Skip to main content

oxigdal_bench/
error.rs

1//! Error types for benchmarking operations.
2//!
3//! This module provides comprehensive error handling for all benchmarking
4//! and profiling operations in oxigdal-bench.
5
6use std::fmt;
7use std::io;
8use std::path::PathBuf;
9
10/// Result type alias for benchmarking operations.
11pub type Result<T> = std::result::Result<T, BenchError>;
12
13/// Comprehensive error type for benchmarking operations.
14#[derive(Debug, thiserror::Error)]
15pub enum BenchError {
16    /// I/O error occurred during benchmarking.
17    #[error("I/O error: {0}")]
18    Io(#[from] io::Error),
19
20    /// Serialization/deserialization error.
21    #[error("Serialization error: {0}")]
22    Serialization(#[from] serde_json::Error),
23
24    /// Profiler initialization or execution error.
25    #[error("Profiler error: {source}")]
26    Profiler {
27        /// The underlying error source
28        source: ProfilerErrorKind,
29    },
30
31    /// Benchmark execution error.
32    #[error("Benchmark execution error: {message}")]
33    BenchmarkExecution {
34        /// Description of the error
35        message: String,
36        /// Optional benchmark name
37        benchmark_name: Option<String>,
38    },
39
40    /// Report generation error.
41    #[error("Report generation error: {format} - {message}")]
42    ReportGeneration {
43        /// Report format that failed
44        format: String,
45        /// Error message
46        message: String,
47    },
48
49    /// Regression detection error.
50    #[error("Regression detection error: {0}")]
51    RegressionDetection(String),
52
53    /// Baseline file error.
54    #[error("Baseline error for '{path}': {message}")]
55    Baseline {
56        /// Path to the baseline file
57        path: PathBuf,
58        /// Error message
59        message: String,
60    },
61
62    /// Comparison error.
63    #[error("Comparison error: {0}")]
64    Comparison(String),
65
66    /// Invalid configuration error.
67    #[error("Invalid configuration: {0}")]
68    InvalidConfiguration(String),
69
70    /// Missing dependency error (e.g., feature not enabled).
71    #[error("Missing dependency: {dependency} (enable feature: {feature})")]
72    MissingDependency {
73        /// Name of the missing dependency
74        dependency: String,
75        /// Feature flag to enable
76        feature: String,
77    },
78
79    /// Scenario execution error.
80    #[error("Scenario '{scenario}' failed: {message}")]
81    ScenarioFailed {
82        /// Scenario name
83        scenario: String,
84        /// Error message
85        message: String,
86    },
87
88    /// System resource error.
89    #[error("System resource error: {0}")]
90    SystemResource(String),
91
92    /// Memory profiling error.
93    #[error("Memory profiling error: {0}")]
94    MemoryProfiling(String),
95
96    /// CPU profiling error.
97    #[error("CPU profiling error: {0}")]
98    CpuProfiling(String),
99
100    /// Flamegraph generation error.
101    #[error("Flamegraph generation error: {0}")]
102    Flamegraph(String),
103
104    /// Data validation error.
105    #[error("Data validation error: {0}")]
106    DataValidation(String),
107
108    /// Timeout error.
109    #[error("Operation timed out after {seconds} seconds")]
110    Timeout {
111        /// Timeout duration in seconds
112        seconds: u64,
113    },
114
115    /// oxigdal-core error.
116    #[cfg(feature = "raster")]
117    #[error("OxiGDAL core error: {0}")]
118    Core(#[from] oxigdal_core::error::OxiGdalError),
119
120    /// Generic error for other cases.
121    #[error("{0}")]
122    Other(String),
123}
124
125/// Specific profiler error kinds.
126#[derive(Debug)]
127pub enum ProfilerErrorKind {
128    /// Failed to initialize profiler.
129    InitializationFailed(String),
130
131    /// Failed to start profiling.
132    StartFailed(String),
133
134    /// Failed to stop profiling.
135    StopFailed(String),
136
137    /// Failed to collect profiling data.
138    CollectionFailed(String),
139
140    /// Failed to generate report.
141    ReportGenerationFailed(String),
142
143    /// Unsupported profiler feature.
144    UnsupportedFeature(String),
145}
146
147impl fmt::Display for ProfilerErrorKind {
148    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
149        match self {
150            Self::InitializationFailed(msg) => write!(f, "initialization failed: {msg}"),
151            Self::StartFailed(msg) => write!(f, "start failed: {msg}"),
152            Self::StopFailed(msg) => write!(f, "stop failed: {msg}"),
153            Self::CollectionFailed(msg) => write!(f, "collection failed: {msg}"),
154            Self::ReportGenerationFailed(msg) => write!(f, "report generation failed: {msg}"),
155            Self::UnsupportedFeature(msg) => write!(f, "unsupported feature: {msg}"),
156        }
157    }
158}
159
160impl std::error::Error for ProfilerErrorKind {}
161
162impl BenchError {
163    /// Creates a new profiler initialization error.
164    pub fn profiler_init<S: Into<String>>(message: S) -> Self {
165        Self::Profiler {
166            source: ProfilerErrorKind::InitializationFailed(message.into()),
167        }
168    }
169
170    /// Creates a new profiler start error.
171    pub fn profiler_start<S: Into<String>>(message: S) -> Self {
172        Self::Profiler {
173            source: ProfilerErrorKind::StartFailed(message.into()),
174        }
175    }
176
177    /// Creates a new profiler stop error.
178    pub fn profiler_stop<S: Into<String>>(message: S) -> Self {
179        Self::Profiler {
180            source: ProfilerErrorKind::StopFailed(message.into()),
181        }
182    }
183
184    /// Creates a new profiler collection error.
185    pub fn profiler_collect<S: Into<String>>(message: S) -> Self {
186        Self::Profiler {
187            source: ProfilerErrorKind::CollectionFailed(message.into()),
188        }
189    }
190
191    /// Creates a new benchmark execution error.
192    pub fn benchmark_execution<S: Into<String>>(message: S) -> Self {
193        Self::BenchmarkExecution {
194            message: message.into(),
195            benchmark_name: None,
196        }
197    }
198
199    /// Creates a new benchmark execution error with name.
200    pub fn benchmark_execution_with_name<S1: Into<String>, S2: Into<String>>(
201        name: S1,
202        message: S2,
203    ) -> Self {
204        Self::BenchmarkExecution {
205            message: message.into(),
206            benchmark_name: Some(name.into()),
207        }
208    }
209
210    /// Creates a new report generation error.
211    pub fn report_generation<S1: Into<String>, S2: Into<String>>(format: S1, message: S2) -> Self {
212        Self::ReportGeneration {
213            format: format.into(),
214            message: message.into(),
215        }
216    }
217
218    /// Creates a new baseline error.
219    pub fn baseline<P: Into<PathBuf>, S: Into<String>>(path: P, message: S) -> Self {
220        Self::Baseline {
221            path: path.into(),
222            message: message.into(),
223        }
224    }
225
226    /// Creates a new scenario failed error.
227    pub fn scenario_failed<S1: Into<String>, S2: Into<String>>(scenario: S1, message: S2) -> Self {
228        Self::ScenarioFailed {
229            scenario: scenario.into(),
230            message: message.into(),
231        }
232    }
233
234    /// Creates a new missing dependency error.
235    pub fn missing_dependency<S1: Into<String>, S2: Into<String>>(
236        dependency: S1,
237        feature: S2,
238    ) -> Self {
239        Self::MissingDependency {
240            dependency: dependency.into(),
241            feature: feature.into(),
242        }
243    }
244
245    /// Creates a new timeout error.
246    pub fn timeout(seconds: u64) -> Self {
247        Self::Timeout { seconds }
248    }
249}
250
251#[cfg(test)]
252mod tests {
253    use super::*;
254
255    #[test]
256    fn test_error_display() {
257        let err = BenchError::benchmark_execution("Test failed");
258        assert!(err.to_string().contains("Test failed"));
259
260        let err = BenchError::benchmark_execution_with_name("my_bench", "Test failed");
261        assert!(err.to_string().contains("Test failed"));
262
263        let err = BenchError::report_generation("HTML", "Template error");
264        assert!(err.to_string().contains("HTML"));
265        assert!(err.to_string().contains("Template error"));
266
267        let err = BenchError::missing_dependency("criterion", "benchmarking");
268        assert!(err.to_string().contains("criterion"));
269        assert!(err.to_string().contains("benchmarking"));
270
271        let err = BenchError::timeout(30);
272        assert!(err.to_string().contains("30"));
273    }
274
275    #[test]
276    fn test_profiler_errors() {
277        let err = BenchError::profiler_init("Failed to initialize");
278        assert!(err.to_string().contains("initialization"));
279
280        let err = BenchError::profiler_start("Failed to start");
281        assert!(err.to_string().contains("start"));
282
283        let err = BenchError::profiler_stop("Failed to stop");
284        assert!(err.to_string().contains("stop"));
285
286        let err = BenchError::profiler_collect("Failed to collect");
287        assert!(err.to_string().contains("collection"));
288    }
289
290    #[test]
291    fn test_scenario_error() {
292        let err = BenchError::scenario_failed("raster_read", "File not found");
293        assert!(err.to_string().contains("raster_read"));
294        assert!(err.to_string().contains("File not found"));
295    }
296
297    #[test]
298    fn test_baseline_error() {
299        let err = BenchError::baseline("/tmp/baseline.json", "Corrupted file");
300        assert!(err.to_string().contains("baseline.json"));
301        assert!(err.to_string().contains("Corrupted"));
302    }
303}