Skip to main content

cubek_convolution/components/
selection.rs

1use cubecl::{
2    Runtime,
3    client::ComputeClient,
4    ir::{StorageType, VectorSize},
5};
6use cubek_matmul::components::stage::PartitionBuffering;
7
8use cubek_matmul::definition::{
9    MatmulAvailabilityError, MatmulElems, MatmulVectorSizes, SwizzleModes, TilingBlueprint,
10    TilingScheme, adjust_dtypes,
11};
12use cubek_matmul::{
13    components::tile::TileMatmulFamily,
14    routines::{NUM_SM_APPROX, NUM_TENSOR_CORES_APPROX, find_instruction_size},
15};
16use cubek_std::stage::SwizzleMode;
17
18use crate::components::ConvolutionProblem;
19
20/// A heuristic to find the number of tiles in the stage.
21///
22/// Maximizes tensor core usage unless doing so would significantly impair
23/// parallelization across SMs. It ensures the number of cubes is as close as
24/// possible to the available SMs.
25pub(crate) fn find_stage_size_m_n(
26    m: usize,
27    n: usize,
28    num_sm: usize,
29    max_tensor_cores: usize,
30    instruction_m: usize,
31    instruction_n: usize,
32    stage_size_k: usize,
33) -> (usize, usize) {
34    let max_tiles_elems_m = 256 / instruction_m;
35    let max_tiles_elems_n = 256 / instruction_n;
36    let max_tiles_total_stage = 16 / stage_size_k;
37
38    let mut dim_num_tiles_m = max_tensor_cores
39        .min(max_tiles_elems_m)
40        .min(max_tiles_total_stage);
41
42    let mut dim_num_tiles_n = max_tensor_cores
43        .min(max_tiles_elems_n)
44        .min(max_tiles_total_stage);
45
46    let total_tiles_m = m.div_ceil(instruction_m);
47    let total_tiles_n = n.div_ceil(instruction_n);
48
49    while total_tiles_n < dim_num_tiles_n && dim_num_tiles_n > 1 {
50        dim_num_tiles_n /= 2;
51    }
52
53    let total_tiles = total_tiles_m * total_tiles_n;
54
55    let mut stage_num_tiles = dim_num_tiles_m * dim_num_tiles_n;
56    let mut num_cubes_expected = total_tiles.div_ceil(stage_num_tiles);
57
58    // We keep track of two configurations to select the closest to `num_sm`, whether it's a bit over or under
59    let mut previous_dim_num_tiles = dim_num_tiles_m;
60    let mut previous_num_cubes = num_cubes_expected;
61
62    // Refine tensor core usage to stay as close as possible to `num_sm`
63    while num_cubes_expected < num_sm && dim_num_tiles_m > 1 {
64        previous_dim_num_tiles = dim_num_tiles_m;
65        previous_num_cubes = num_cubes_expected;
66
67        // Reduce tensor core usage
68        dim_num_tiles_m = dim_num_tiles_m.div_ceil(2);
69        stage_num_tiles = dim_num_tiles_m * dim_num_tiles_n;
70
71        // Number of cubes grows as a consequence of smaller stage
72        num_cubes_expected = total_tiles.div_ceil(stage_num_tiles);
73    }
74
75    // Compare previous and current values to determine the closest to `num_sm`
76    if (previous_num_cubes as isize - num_sm as isize).abs()
77        <= (num_cubes_expected as isize - num_sm as isize).abs()
78    {
79        (previous_dim_num_tiles, dim_num_tiles_n)
80    } else {
81        (dim_num_tiles_n, dim_num_tiles_m)
82    }
83}
84
85pub fn convolution_matmul_selection<TMM: TileMatmulFamily, R: Runtime>(
86    client: &ComputeClient<R>,
87    problem: &ConvolutionProblem,
88    plane_dim: u32,
89    swizzle: bool,
90    vector_sizes: &MatmulVectorSizes,
91    dtypes: &mut MatmulElems,
92) -> Result<TilingBlueprint, MatmulAvailabilityError> {
93    adjust_dtypes(client, dtypes, TMM::requires_accelerator());
94
95    // rough heuristic based on previous bench results where 512 channels with a 3x3 kernel seemed
96    // to be the rough cutoff for the k=4 size.
97    let stage_k = if problem.k >= 4096 { 4 } else { 2 };
98
99    let tile_size = find_instruction_size::<R, _, _>(
100        client,
101        (
102            dtypes.lhs_register,
103            dtypes.rhs_register,
104            dtypes.acc_register,
105        ),
106        (problem.m, problem.n, problem.k).into(),
107        (None, None, None),
108        TMM::is_supported,
109        TMM::supported_sizes,
110    )?;
111
112    let hardware = &client.properties().hardware;
113    let num_sm = hardware
114        .num_streaming_multiprocessors
115        .unwrap_or(NUM_TENSOR_CORES_APPROX);
116    let max_tensor_cores = hardware.num_tensor_cores.unwrap_or(NUM_SM_APPROX);
117
118    let (stage_size_m, stage_size_n) = find_stage_size_m_n(
119        problem.m,
120        problem.n,
121        num_sm as usize,
122        max_tensor_cores as usize,
123        tile_size.m() as usize,
124        tile_size.n() as usize,
125        stage_k as usize,
126    );
127
128    let tiling_scheme = TilingScheme::builder()
129        .with_stage_size((stage_size_m as u32, 1, 1).into())
130        .with_tile_size(tile_size)
131        .with_partition_size((1, stage_size_n as u32, stage_k).into())
132        .build()
133        .unwrap();
134
135    let mut builder =
136        TilingBlueprint::builder(tiling_scheme, plane_dim, &problem.as_matmul_problem())
137            .partition_buffering(PartitionBuffering::Single);
138
139    if swizzle {
140        let swizzle_dim = tiling_scheme.elements_per_stage_along_k() as usize;
141
142        let lhs = select_swizzle(swizzle_dim, dtypes.lhs_stage, vector_sizes.lhs);
143        let rhs = select_swizzle(swizzle_dim, dtypes.rhs_stage, vector_sizes.rhs);
144        builder = builder.shared_swizzle(SwizzleModes {
145            lhs,
146            rhs,
147            ..Default::default()
148        });
149    }
150
151    Ok(builder.build())
152}
153
154/// All modes currently use atom size 16
155const SWIZZLE_ATOM: usize = 16;
156
157fn select_swizzle(swizzle_dim: usize, elem: StorageType, vector_size: VectorSize) -> SwizzleMode {
158    // Vector size exceeds swizzle atom
159    if elem.size() * vector_size > SWIZZLE_ATOM {
160        return SwizzleMode::None;
161    }
162    let swizzle_dim_bytes = swizzle_dim * elem.size();
163    if !swizzle_dim_bytes.is_power_of_two() {
164        return SwizzleMode::None;
165    }
166    match swizzle_dim_bytes {
167        32 => SwizzleMode::B32,
168        64 => SwizzleMode::B64,
169        _ => SwizzleMode::B128,
170        //_ => SwizzleMode::None,
171    }
172}