scouter_types/
error.rs

1use pyo3::exceptions::PyRuntimeError;
2use pyo3::PyErr;
3use thiserror::Error;
4
5#[derive(Error, Debug)]
6pub enum UtilError {
7    #[error("Failed to get parent path")]
8    GetParentPathError,
9
10    #[error("Failed to create directory")]
11    CreateDirectoryError,
12
13    #[error("Failed to read to create path")]
14    CreatePathError,
15
16    #[error(transparent)]
17    IoError(#[from] std::io::Error),
18
19    #[error(transparent)]
20    SerdeJsonError(#[from] serde_json::Error),
21}
22
23impl From<UtilError> for PyErr {
24    fn from(err: UtilError) -> PyErr {
25        let msg = err.to_string();
26        PyRuntimeError::new_err(msg)
27    }
28}
29
30#[derive(Error, Debug)]
31pub enum TypeError {
32    #[error("Start time must be before end time")]
33    StartTimeError,
34
35    #[error("Invalid schedule")]
36    InvalidScheduleError,
37
38    #[error("Invalid PSI threshold configuration")]
39    InvalidPsiThresholdError,
40
41    #[error("Invalid alert dispatch configuration")]
42    InvalidDispatchConfigError,
43
44    #[error("Invalid equal width binning method")]
45    InvalidEqualWidthBinningMethodError,
46
47    #[error("Missing space argument")]
48    MissingSpaceError,
49
50    #[error("Missing name argument")]
51    MissingNameError,
52
53    #[error("Missing version argument")]
54    MissingVersionError,
55
56    #[error("Missing alert_config argument")]
57    MissingAlertConfigError,
58
59    #[error("No metrics found")]
60    NoMetricsError,
61
62    #[error(transparent)]
63    SerdeJsonError(#[from] serde_json::Error),
64
65    #[error("Invalid number")]
66    InvalidNumberError,
67
68    #[error("Root must be an object")]
69    RootMustBeObject,
70
71    #[error("Unsupported type: {0}")]
72    UnsupportedTypeError(String),
73
74    #[error("Failed to downcast Python object: {0}")]
75    DowncastError(String),
76
77    #[error("Invalid data type")]
78    InvalidDataType,
79
80    #[error("Missing value for string feature")]
81    MissingStringValueError,
82
83    #[error("{0}")]
84    PyError(String),
85
86    #[error(
87        "Invalid prompt response type. Expect Score as the output type for the LLMDriftMetric prompt"
88    )]
89    InvalidResponseType,
90
91    #[error(
92        "Unsupported feature type. Feature must be an integer, float or string. Received: {0}"
93    )]
94    UnsupportedFeatureTypeError(String),
95
96    #[error("Unsupported features type. Features must be a list of Feature instances or a dictionary of key value pairs. Received: {0}")]
97    UnsupportedFeaturesTypeError(String),
98
99    #[error("Unsupported metrics type. Metrics must be a list of Metric instances or a dictionary of key value pairs. Received: {0}")]
100    UnsupportedMetricsTypeError(String),
101
102    #[error("{0}")]
103    InvalidParameterError(String),
104
105    #[error("{0}")]
106    InvalidBinCountError(String),
107
108    #[error("{0}")]
109    InvalidValueError(String),
110
111    #[error("Empty Array Detected: {0}")]
112    EmptyArrayError(String),
113
114    #[error("Invalid binning strategy")]
115    InvalidBinningStrategyError,
116
117    #[error("Unsupported status. Status must be one of: All, Pending or Processed. Received: {0}")]
118    InvalidStatusError(String),
119
120    #[error("Failed to supply either input or response for the llm record")]
121    MissingInputOrResponse,
122
123    #[error("Invalid context type. Context must be a PyDict or a Pydantic BaseModel")]
124    MustBeDictOrBaseModel,
125
126    #[error("Failed to check if the context is a Pydantic BaseModel. Error: {0}")]
127    FailedToCheckPydanticModel(String),
128
129    #[error("Failed to import pydantic module. Error: {0}")]
130    FailedToImportPydantic(String),
131
132    #[error("Unsupported Python object type for conversion")]
133    UnsupportedPyObjectType,
134
135    #[error("Invalid dictionary key type. Dictionary keys must be strings, int, float or bool")]
136    InvalidDictKeyType,
137
138    #[error("Invalid compressions type")]
139    InvalidCompressionTypeError,
140
141    #[error("Compression type not supported: {0}")]
142    CompressionTypeNotSupported(String),
143}
144
145impl<'a> From<pyo3::DowncastError<'a, 'a>> for TypeError {
146    fn from(err: pyo3::DowncastError) -> Self {
147        TypeError::DowncastError(err.to_string())
148    }
149}
150
151impl From<TypeError> for PyErr {
152    fn from(err: TypeError) -> PyErr {
153        let msg = err.to_string();
154        PyRuntimeError::new_err(msg)
155    }
156}
157
158impl From<PyErr> for TypeError {
159    fn from(err: PyErr) -> TypeError {
160        TypeError::PyError(err.to_string())
161    }
162}
163
164#[derive(Error, Debug)]
165pub enum ContractError {
166    #[error(transparent)]
167    TypeError(#[from] TypeError),
168
169    #[error("{0}")]
170    PyError(String),
171}
172
173impl From<ContractError> for PyErr {
174    fn from(err: ContractError) -> PyErr {
175        let msg = err.to_string();
176        PyRuntimeError::new_err(msg)
177    }
178}
179
180impl From<PyErr> for ContractError {
181    fn from(err: PyErr) -> ContractError {
182        ContractError::PyError(err.to_string())
183    }
184}
185
186#[derive(Error, Debug)]
187pub enum RecordError {
188    #[error("Unable to extract record into any known ServerRecord variant")]
189    ExtractionError,
190
191    #[error("No server records found")]
192    EmptyServerRecordsError,
193
194    #[error(transparent)]
195    SerdeJsonError(#[from] serde_json::Error),
196
197    #[error("Unexpected record type")]
198    InvalidDriftTypeError,
199
200    #[error("{0}")]
201    PyError(String),
202
203    #[error("Failed to supply either input or response for the llm record")]
204    MissingInputOrResponse,
205}
206
207impl From<RecordError> for PyErr {
208    fn from(err: RecordError) -> PyErr {
209        let msg = err.to_string();
210        PyRuntimeError::new_err(msg)
211    }
212}
213
214impl From<PyErr> for RecordError {
215    fn from(err: PyErr) -> RecordError {
216        RecordError::PyError(err.to_string())
217    }
218}
219
220#[derive(Error, Debug)]
221pub enum ProfileError {
222    #[error(transparent)]
223    SerdeJsonError(#[from] serde_json::Error),
224
225    #[error("Features and array are not the same length")]
226    FeatureArrayLengthError,
227
228    #[error("Unexpected record type")]
229    InvalidDriftTypeError,
230
231    #[error(transparent)]
232    UtilError(#[from] UtilError),
233
234    #[error(transparent)]
235    TypeError(#[from] TypeError),
236
237    #[error(transparent)]
238    IoError(#[from] std::io::Error),
239
240    #[error("Missing sample argument")]
241    MissingSampleError,
242
243    #[error("Missing sample size argument")]
244    MissingSampleSizeError,
245
246    #[error("Custom alert thresholds have not been set")]
247    CustomThresholdNotSetError,
248
249    #[error("Custom alert threshold not found")]
250    CustomAlertThresholdNotFound,
251
252    #[error("{0}")]
253    PyError(String),
254
255    #[error("Invalid binning strategy")]
256    InvalidBinningStrategyError,
257
258    #[error("Missing evaluation workflow")]
259    MissingWorkflowError,
260
261    #[error("Invalid argument for workflow. Argument must be a Workflow object")]
262    InvalidWorkflowType,
263
264    #[error(transparent)]
265    AgentError(#[from] potato_head::AgentError),
266
267    #[error(transparent)]
268    WorkflowError(#[from] potato_head::WorkflowError),
269
270    #[error("Invalid metric name found: {0}")]
271    InvalidMetricNameError(String),
272
273    #[error("No metrics provided for workflow validation")]
274    EmptyMetricsList,
275
276    #[error("LLM Metric requires at least one bound parameter")]
277    NeedAtLeastOneBoundParameterError(String),
278
279    #[error(
280        "Missing prompt in LLM Metric. If providing a list of metrics, prompt must be present"
281    )]
282    MissingPromptError(String),
283
284    #[error("No tasks found in the workflow when validating: {0}")]
285    NoTasksFoundError(String),
286
287    #[error(
288        "Invalid prompt response type. Expected Score as the output type for the LLMDriftMetric prompt. Id: {0}"
289    )]
290    InvalidResponseType(String),
291
292    #[error("No metrics found for the output task: {0}")]
293    MetricNotFoundForOutputTask(String),
294
295    #[error("Metric not found in profile LLM metrics: {0}")]
296    MetricNotFound(String),
297
298    #[error(transparent)]
299    PotatoTypeError(#[from] potato_head::TypeError),
300}
301
302impl From<ProfileError> for PyErr {
303    fn from(err: ProfileError) -> PyErr {
304        let msg = err.to_string();
305        PyRuntimeError::new_err(msg)
306    }
307}
308
309impl From<PyErr> for ProfileError {
310    fn from(err: PyErr) -> ProfileError {
311        ProfileError::PyError(err.to_string())
312    }
313}