tensorlogic_scirs_backend/
error.rs1use std::fmt;
7use thiserror::Error;
8
9#[derive(Error, Debug)]
11pub enum TlBackendError {
12 #[error("Shape mismatch: {0}")]
14 ShapeMismatch(ShapeMismatchError),
15
16 #[error("Invalid einsum spec: {0}")]
18 InvalidEinsumSpec(String),
19
20 #[error("Tensor not found: {0}")]
22 TensorNotFound(String),
23
24 #[error("Invalid operation: {0}")]
26 InvalidOperation(String),
27
28 #[error("Device error: {0}")]
30 DeviceError(DeviceError),
31
32 #[error("Out of memory: {0}")]
34 OutOfMemory(String),
35
36 #[error("Numerical error: {0}")]
38 NumericalError(NumericalError),
39
40 #[error("Gradient error: {0}")]
42 GradientError(String),
43
44 #[error("Graph error: {0}")]
46 GraphError(String),
47
48 #[error("Execution error: {0}")]
50 ExecutionError(String),
51
52 #[error("Unsupported: {0}")]
54 Unsupported(String),
55
56 #[error("Internal error: {0}")]
58 Internal(String),
59}
60
61#[derive(Debug, Clone)]
63pub struct ShapeMismatchError {
64 pub operation: String,
66 pub expected: Vec<Vec<usize>>,
68 pub actual: Vec<Vec<usize>>,
70 pub context: Option<String>,
72}
73
74impl fmt::Display for ShapeMismatchError {
75 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
76 write!(
77 f,
78 "Shape mismatch in {}: expected {:?}, got {:?}",
79 self.operation, self.expected, self.actual
80 )?;
81 if let Some(ctx) = &self.context {
82 write!(f, " ({})", ctx)?;
83 }
84 Ok(())
85 }
86}
87
88impl ShapeMismatchError {
89 pub fn new(
91 operation: impl Into<String>,
92 expected: Vec<Vec<usize>>,
93 actual: Vec<Vec<usize>>,
94 ) -> Self {
95 Self {
96 operation: operation.into(),
97 expected,
98 actual,
99 context: None,
100 }
101 }
102
103 pub fn with_context(mut self, context: impl Into<String>) -> Self {
105 self.context = Some(context.into());
106 self
107 }
108}
109
110#[derive(Error, Debug, Clone)]
112pub enum DeviceError {
113 #[error("GPU not available: {0}")]
115 GpuUnavailable(String),
116
117 #[error("Device memory allocation failed: {0}")]
119 AllocationFailed(String),
120
121 #[error("Device synchronization failed: {0}")]
123 SyncFailed(String),
124
125 #[error("Unsupported device: {0}")]
127 UnsupportedDevice(String),
128}
129
130#[derive(Debug, Clone)]
132pub struct NumericalError {
133 pub kind: NumericalErrorKind,
135 pub location: String,
137 pub values: Option<Vec<f64>>,
139}
140
141#[derive(Debug, Clone, Copy, PartialEq, Eq)]
143pub enum NumericalErrorKind {
144 NaN,
146 Infinity,
148 Overflow,
150 Underflow,
152 DivisionByZero,
154 PrecisionLoss,
156}
157
158impl fmt::Display for NumericalError {
159 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
160 write!(f, "{:?} detected in {}", self.kind, self.location)?;
161 if let Some(vals) = &self.values {
162 write!(f, " (values: {:?})", vals)?;
163 }
164 Ok(())
165 }
166}
167
168impl NumericalError {
169 pub fn new(kind: NumericalErrorKind, location: impl Into<String>) -> Self {
171 Self {
172 kind,
173 location: location.into(),
174 values: None,
175 }
176 }
177
178 pub fn with_values(mut self, values: Vec<f64>) -> Self {
180 self.values = Some(values);
181 self
182 }
183}
184
185pub type TlBackendResult<T> = Result<T, TlBackendError>;
187
188impl TlBackendError {
190 pub fn shape_mismatch(
192 operation: impl Into<String>,
193 expected: Vec<Vec<usize>>,
194 actual: Vec<Vec<usize>>,
195 ) -> Self {
196 TlBackendError::ShapeMismatch(ShapeMismatchError::new(operation, expected, actual))
197 }
198
199 pub fn invalid_einsum(spec: impl Into<String>) -> Self {
201 TlBackendError::InvalidEinsumSpec(spec.into())
202 }
203
204 pub fn tensor_not_found(name: impl Into<String>) -> Self {
206 TlBackendError::TensorNotFound(name.into())
207 }
208
209 pub fn invalid_operation(msg: impl Into<String>) -> Self {
211 TlBackendError::InvalidOperation(msg.into())
212 }
213
214 pub fn numerical(kind: NumericalErrorKind, location: impl Into<String>) -> Self {
216 TlBackendError::NumericalError(NumericalError::new(kind, location))
217 }
218
219 pub fn gpu_unavailable(msg: impl Into<String>) -> Self {
221 TlBackendError::DeviceError(DeviceError::GpuUnavailable(msg.into()))
222 }
223
224 pub fn unsupported(msg: impl Into<String>) -> Self {
226 TlBackendError::Unsupported(msg.into())
227 }
228
229 pub fn execution(msg: impl Into<String>) -> Self {
231 TlBackendError::ExecutionError(msg.into())
232 }
233
234 pub fn gradient(msg: impl Into<String>) -> Self {
236 TlBackendError::GradientError(msg.into())
237 }
238}
239
240pub fn validate_numeric_value(value: f64, location: &str) -> TlBackendResult<()> {
242 if value.is_nan() {
243 Err(TlBackendError::numerical(NumericalErrorKind::NaN, location))
244 } else if value.is_infinite() {
245 Err(TlBackendError::numerical(
246 NumericalErrorKind::Infinity,
247 location,
248 ))
249 } else {
250 Ok(())
251 }
252}
253
254pub fn validate_numeric_values(values: &[f64], location: &str) -> TlBackendResult<()> {
256 for &value in values.iter() {
257 if value.is_nan() {
258 return Err(TlBackendError::NumericalError(
259 NumericalError::new(NumericalErrorKind::NaN, location).with_values(vec![value]),
260 ));
261 }
262 if value.is_infinite() {
263 return Err(TlBackendError::NumericalError(
264 NumericalError::new(NumericalErrorKind::Infinity, location)
265 .with_values(vec![value]),
266 ));
267 }
268 }
269 Ok(())
270}
271
272#[cfg(test)]
273mod tests {
274 use super::*;
275
276 #[test]
277 fn test_shape_mismatch_error() {
278 let err = TlBackendError::shape_mismatch(
279 "matmul",
280 vec![vec![2, 3], vec![3, 4]],
281 vec![vec![2, 3], vec![2, 4]],
282 );
283 assert!(matches!(err, TlBackendError::ShapeMismatch(_)));
284 assert!(err.to_string().contains("matmul"));
285 }
286
287 #[test]
288 fn test_numerical_error() {
289 let err = TlBackendError::numerical(NumericalErrorKind::NaN, "relu operation");
290 assert!(matches!(err, TlBackendError::NumericalError(_)));
291 assert!(err.to_string().contains("NaN"));
292 }
293
294 #[test]
295 fn test_validate_numeric_value() {
296 assert!(validate_numeric_value(0.0, "test").is_ok());
298 assert!(validate_numeric_value(1.5, "test").is_ok());
299 assert!(validate_numeric_value(-10.0, "test").is_ok());
300
301 assert!(validate_numeric_value(f64::NAN, "test").is_err());
303 assert!(validate_numeric_value(f64::INFINITY, "test").is_err());
304 assert!(validate_numeric_value(f64::NEG_INFINITY, "test").is_err());
305 }
306
307 #[test]
308 fn test_validate_numeric_values() {
309 let valid = vec![0.0, 1.0, -1.0, 100.0];
311 assert!(validate_numeric_values(&valid, "test").is_ok());
312
313 let invalid_nan = vec![0.0, f64::NAN, 1.0];
315 assert!(validate_numeric_values(&invalid_nan, "test").is_err());
316
317 let invalid_inf = vec![0.0, 1.0, f64::INFINITY];
318 assert!(validate_numeric_values(&invalid_inf, "test").is_err());
319 }
320
321 #[test]
322 fn test_error_display() {
323 let err = TlBackendError::invalid_einsum("abc,def->xyz");
324 assert_eq!(err.to_string(), "Invalid einsum spec: abc,def->xyz");
325
326 let err = TlBackendError::tensor_not_found("tensor_x");
327 assert_eq!(err.to_string(), "Tensor not found: tensor_x");
328 }
329
330 #[test]
331 fn test_device_error() {
332 let err = TlBackendError::gpu_unavailable("CUDA not installed");
333 assert!(matches!(err, TlBackendError::DeviceError(_)));
334 assert!(err.to_string().contains("GPU not available"));
335 }
336
337 #[test]
338 fn test_shape_mismatch_with_context() {
339 let mut err = ShapeMismatchError::new("einsum", vec![vec![2, 3]], vec![vec![3, 4]]);
340 err = err.with_context("input tensor 'x'");
341 let err_str = err.to_string();
342 assert!(err_str.contains("einsum"));
343 assert!(err_str.contains("input tensor 'x'"));
344 }
345}