cubecl_reduce/
error.rs

1use core::fmt;
2
3use cubecl_core::{ir::StorageType, server::LaunchError};
4
5#[derive(Debug, PartialEq, Eq, Clone, Hash)]
6pub enum ReduceError {
7    /// Indicate that the hardware / API doesn't support SIMT plane instructions.
8    PlanesUnavailable,
9    /// When the cube count is bigger than the max supported.
10    CubeCountTooLarge,
11    /// Indicate that min_plane_dim != max_plane_dim, thus the exact plane_dim is not fixed.
12    ImprecisePlaneDim,
13    /// Indicate the axis is too large.
14    InvalidAxis { axis: usize, rank: usize },
15    /// Indicate that the shape of the output tensor is invalid for the given input and axis.
16    MismatchShape {
17        expected_shape: Vec<usize>,
18        output_shape: Vec<usize>,
19    },
20    /// Indicate that we can't launch a shared sum because the atomic addition is not supported.
21    MissingAtomicAdd(StorageType),
22
23    /// An error happened during launch.
24    Launch(LaunchError),
25}
26
27impl fmt::Display for ReduceError {
28    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
29        match self {
30            Self::PlanesUnavailable => write!(
31                f,
32                "Trying to launch a kernel using plane instructions, but there are not supported by the hardware."
33            ),
34            Self::CubeCountTooLarge => {
35                write!(f, "The cube count is larger than the max supported.")
36            }
37            Self::ImprecisePlaneDim => write!(
38                f,
39                "Trying to launch a kernel using plane instructions, but the min and max plane dimensions are different."
40            ),
41            Self::InvalidAxis { axis, rank } => write!(
42                f,
43                "The provided axis ({axis}) must be smaller than the input tensor rank ({rank})."
44            ),
45            Self::MismatchShape {
46                expected_shape,
47                output_shape,
48            } => {
49                write!(
50                    f,
51                    "The output shape (currently {output_shape:?}) should be {expected_shape:?}."
52                )
53            }
54            Self::MissingAtomicAdd(elem) => {
55                write!(f, "Atomic add not supported by the client for {elem}")
56            }
57            Self::Launch(err) => {
58                write!(f, "An error happened during launch: {err}")
59            }
60        }
61    }
62}