1use pyo3::exceptions::PyRuntimeError;
2use pyo3::PyErr;
3use thiserror::Error;
4
5#[derive(Error, Debug)]
6pub enum UtilError {
7 #[error("Failed to get parent path")]
8 GetParentPathError,
9
10 #[error("Failed to create directory")]
11 CreateDirectoryError,
12
13 #[error("Failed to read to create path")]
14 CreatePathError,
15
16 #[error(transparent)]
17 IoError(#[from] std::io::Error),
18
19 #[error(transparent)]
20 SerdeJsonError(#[from] serde_json::Error),
21}
22
23impl From<UtilError> for PyErr {
24 fn from(err: UtilError) -> PyErr {
25 let msg = err.to_string();
26 PyRuntimeError::new_err(msg)
27 }
28}
29
30#[derive(Error, Debug)]
31pub enum TypeError {
32 #[error("Start time must be before end time")]
33 StartTimeError,
34
35 #[error("Invalid schedule")]
36 InvalidScheduleError,
37
38 #[error("Invalid PSI threshold configuration")]
39 InvalidPsiThresholdError,
40
41 #[error("Invalid alert dispatch configuration")]
42 InvalidDispatchConfigError,
43
44 #[error("Missing space argument")]
45 MissingSpaceError,
46
47 #[error("Missing name argument")]
48 MissingNameError,
49
50 #[error("Missing version argument")]
51 MissingVersionError,
52
53 #[error("Missing alert_config argument")]
54 MissingAlertConfigError,
55
56 #[error("No metrics found")]
57 NoMetricsError,
58
59 #[error(transparent)]
60 SerdeJsonError(#[from] serde_json::Error),
61
62 #[error("Invalid number")]
63 InvalidNumberError,
64
65 #[error("Root must be an object")]
66 RootMustBeObject,
67
68 #[error("Unsupported type: {0}")]
69 UnsupportedTypeError(String),
70
71 #[error("Failed to downcast Python object: {0}")]
72 DowncastError(String),
73
74 #[error("Invalid data type")]
75 InvalidDataType,
76
77 #[error("Missing value for string feature")]
78 MissingStringValueError,
79
80 #[error("{0}")]
81 PyError(String),
82
83 #[error(
84 "Invalid prompt response type. Expect Score as the output type for the LLMMetric prompt"
85 )]
86 InvalidResponseType,
87
88 #[error(
89 "Unsupported feature type. Feature must be an integer, float or string. Received: {0}"
90 )]
91 UnsupportedFeatureTypeError(String),
92
93 #[error("Unsupported features type. Features must be a list of Feature instances or a dictionary of key value pairs. Received: {0}")]
94 UnsupportedFeaturesTypeError(String),
95
96 #[error("Unsupported metrics type. Metrics must be a list of Metric instances or a dictionary of key value pairs. Received: {0}")]
97 UnsupportedMetricsTypeError(String),
98
99 #[error("Unsupported status. Status must be one of: All, Pending or Processed. Received: {0}")]
100 InvalidStatusError(String),
101
102 #[error("Failed to supply either input or response for the llm record")]
103 MissingInputOrResponse,
104
105 #[error("Invalid context type. Context must be a PyDict or a Pydantic BaseModel")]
106 MustBeDictOrBaseModel,
107
108 #[error("Failed to check if the context is a Pydantic BaseModel. Error: {0}")]
109 FailedToCheckPydanticModel(String),
110
111 #[error("Failed to import pydantic module. Error: {0}")]
112 FailedToImportPydantic(String),
113}
114
115impl<'a> From<pyo3::DowncastError<'a, 'a>> for TypeError {
116 fn from(err: pyo3::DowncastError) -> Self {
117 TypeError::DowncastError(err.to_string())
118 }
119}
120
121impl From<TypeError> for PyErr {
122 fn from(err: TypeError) -> PyErr {
123 let msg = err.to_string();
124 PyRuntimeError::new_err(msg)
125 }
126}
127
128impl From<PyErr> for TypeError {
129 fn from(err: PyErr) -> TypeError {
130 TypeError::PyError(err.to_string())
131 }
132}
133
134#[derive(Error, Debug)]
135pub enum ContractError {
136 #[error(transparent)]
137 TypeError(#[from] TypeError),
138
139 #[error("{0}")]
140 PyError(String),
141}
142
143impl From<ContractError> for PyErr {
144 fn from(err: ContractError) -> PyErr {
145 let msg = err.to_string();
146 PyRuntimeError::new_err(msg)
147 }
148}
149
150impl From<PyErr> for ContractError {
151 fn from(err: PyErr) -> ContractError {
152 ContractError::PyError(err.to_string())
153 }
154}
155
156#[derive(Error, Debug)]
157pub enum RecordError {
158 #[error("Unable to extract record into any known ServerRecord variant")]
159 ExtractionError,
160
161 #[error("No server records found")]
162 EmptyServerRecordsError,
163
164 #[error(transparent)]
165 SerdeJsonError(#[from] serde_json::Error),
166
167 #[error("Unexpected record type")]
168 InvalidDriftTypeError,
169
170 #[error("{0}")]
171 PyError(String),
172
173 #[error("Failed to supply either input or response for the llm record")]
174 MissingInputOrResponse,
175}
176
177impl From<RecordError> for PyErr {
178 fn from(err: RecordError) -> PyErr {
179 let msg = err.to_string();
180 PyRuntimeError::new_err(msg)
181 }
182}
183
184impl From<PyErr> for RecordError {
185 fn from(err: PyErr) -> RecordError {
186 RecordError::PyError(err.to_string())
187 }
188}
189
190#[derive(Error, Debug)]
191pub enum ProfileError {
192 #[error(transparent)]
193 SerdeJsonError(#[from] serde_json::Error),
194
195 #[error("Features and array are not the same length")]
196 FeatureArrayLengthError,
197
198 #[error("Unexpected record type")]
199 InvalidDriftTypeError,
200
201 #[error(transparent)]
202 UtilError(#[from] UtilError),
203
204 #[error(transparent)]
205 TypeError(#[from] TypeError),
206
207 #[error(transparent)]
208 IoError(#[from] std::io::Error),
209
210 #[error("Missing sample argument")]
211 MissingSampleError,
212
213 #[error("Missing sample size argument")]
214 MissingSampleSizeError,
215
216 #[error("Custom alert thresholds have not been set")]
217 CustomThresholdNotSetError,
218
219 #[error("Custom alert threshold not found")]
220 CustomAlertThresholdNotFound,
221
222 #[error("{0}")]
223 PyError(String),
224
225 #[error("Missing evaluation workflow")]
226 MissingWorkflowError,
227
228 #[error("Invalid argument for workflow. Argument must be a Workflow object")]
229 InvalidWorkflowType,
230
231 #[error(transparent)]
232 AgentError(#[from] potato_head::AgentError),
233
234 #[error(transparent)]
235 WorkflowError(#[from] potato_head::WorkflowError),
236
237 #[error("Invalid metric name found: {0}")]
238 InvalidMetricNameError(String),
239
240 #[error("No metrics provided for workflow validation")]
241 EmptyMetricsList,
242
243 #[error("LLM Metric requires at least one bound parameter")]
244 NeedAtLeastOneBoundParameterError(String),
245
246 #[error(
247 "Missing prompt in LLM Metric. If providing a list of metrics, prompt must be present"
248 )]
249 MissingPromptError(String),
250
251 #[error("No tasks found in the workflow when validating: {0}")]
252 NoTasksFoundError(String),
253
254 #[error(
255 "Invalid prompt response type. Expected Score as the output type for the LLMMetric prompt. Id: {0}"
256 )]
257 InvalidResponseType(String),
258
259 #[error("No metrics found for the output task: {0}")]
260 MetricNotFoundForOutputTask(String),
261
262 #[error("Metric not found in profile LLM metrics: {0}")]
263 MetricNotFound(String),
264
265 #[error(transparent)]
266 PotatoTypeError(#[from] potato_head::TypeError),
267}
268
269impl From<ProfileError> for PyErr {
270 fn from(err: ProfileError) -> PyErr {
271 let msg = err.to_string();
272 PyRuntimeError::new_err(msg)
273 }
274}
275
276impl From<PyErr> for ProfileError {
277 fn from(err: PyErr) -> ProfileError {
278 ProfileError::PyError(err.to_string())
279 }
280}