1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
/*
* OpenAI API
*
* The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details.
*
* The version of the OpenAPI document: 2.3.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
use serde::{Deserialize, Serialize};
/// Eval : An Eval object with a data source config and testing criteria. An Eval represents a task to be done for your LLM integration. Like: - Improve the quality of my chatbot - See how well my chatbot handles customer support - Check if o4-mini is better at my usecase than gpt-4o
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, bon::Builder)]
pub struct Eval {
/// The object type.
#[serde(rename = "object")]
pub object: Object,
/// Unique identifier for the evaluation.
#[serde(rename = "id")]
pub id: String,
/// The name of the evaluation.
#[serde(rename = "name")]
pub name: String,
#[serde(rename = "data_source_config")]
pub data_source_config: Box<models::EvalDataSourceConfig>,
/// A list of testing criteria.
#[serde(rename = "testing_criteria")]
pub testing_criteria: Vec<models::EvalTestingCriteriaInner>,
/// The Unix timestamp (in seconds) for when the eval was created.
#[serde(rename = "created_at")]
pub created_at: i32,
/// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.
#[serde(rename = "metadata", deserialize_with = "Option::deserialize")]
pub metadata: Option<std::collections::HashMap<String, String>>,
}
impl Eval {
/// An Eval object with a data source config and testing criteria. An Eval represents a task to be done for your LLM integration. Like: - Improve the quality of my chatbot - See how well my chatbot handles customer support - Check if o4-mini is better at my usecase than gpt-4o
pub fn new(
object: Object,
id: String,
name: String,
data_source_config: models::EvalDataSourceConfig,
testing_criteria: Vec<models::EvalTestingCriteriaInner>,
created_at: i32,
metadata: Option<std::collections::HashMap<String, String>>,
) -> Eval {
Eval {
object,
id,
name,
data_source_config: Box::new(data_source_config),
testing_criteria,
created_at,
metadata,
}
}
}
/// The object type.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Object {
#[serde(rename = "eval")]
Eval,
}
impl Default for Object {
fn default() -> Object {
Self::Eval
}
}
impl std::fmt::Display for Eval {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match serde_json::to_string(self) {
Ok(s) => write!(f, "{}", s),
Err(_) => Err(std::fmt::Error),
}
}
}