vapi_client/models/
success_evaluation_plan.rs

1/*
2 * Vapi API
3 *
4 * Voice AI for developers.
5 *
6 * The version of the OpenAPI document: 1.0
7 *
8 * Generated by: https://openapi-generator.tech
9 */
10
11use crate::models;
12use serde::{Deserialize, Serialize};
13
14#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
15pub struct SuccessEvaluationPlan {
16    /// This enforces the rubric of the evaluation. The output is stored in `call.analysis.successEvaluation`.  Options include: - 'NumericScale': A scale of 1 to 10. - 'DescriptiveScale': A scale of Excellent, Good, Fair, Poor. - 'Checklist': A checklist of criteria and their status. - 'Matrix': A grid that evaluates multiple criteria across different performance levels. - 'PercentageScale': A scale of 0% to 100%. - 'LikertScale': A scale of Strongly Agree, Agree, Neutral, Disagree, Strongly Disagree. - 'AutomaticRubric': Automatically break down evaluation into several criteria, each with its own score. - 'PassFail': A simple 'true' if call passed, 'false' if not.  Default is 'PassFail'.
17    #[serde(rename = "rubric", skip_serializing_if = "Option::is_none")]
18    pub rubric: Option<RubricTrue>,
19    /// These are the messages used to generate the success evaluation.  @default: ``` [   {     \"role\": \"system\",     \"content\": \"You are an expert call evaluator. You will be given a transcript of a call and the system prompt of the AI participant. Determine if the call was successful based on the objectives inferred from the system prompt. DO NOT return anything except the result.\\n\\nRubric:\\\\n{{rubric}}\\n\\nOnly respond with the result.\"   },   {     \"role\": \"user\",     \"content\": \"Here is the transcript:\\n\\n{{transcript}}\\n\\n\"   },   {     \"role\": \"user\",     \"content\": \"Here was the system prompt of the call:\\n\\n{{systemPrompt}}\\n\\n. Here is the ended reason of the call:\\n\\n{{endedReason}}\\n\\n\"   } ]```  You can customize by providing any messages you want.  Here are the template variables available: - {{transcript}}: the transcript of the call from `call.artifact.transcript`- {{systemPrompt}}: the system prompt of the call from `assistant.model.messages[type=system].content`- {{rubric}}: the rubric of the success evaluation from `successEvaluationPlan.rubric`- {{endedReason}}: the ended reason of the call from `call.endedReason`
20    #[serde(rename = "messages", skip_serializing_if = "Option::is_none")]
21    pub messages: Option<Vec<serde_json::Value>>,
22    /// This determines whether a success evaluation is generated and stored in `call.analysis.successEvaluation`. Defaults to true.  Usage: - If you want to disable the success evaluation, set this to false.  @default true
23    #[serde(rename = "enabled", skip_serializing_if = "Option::is_none")]
24    pub enabled: Option<bool>,
25    /// This is how long the request is tried before giving up. When request times out, `call.analysis.successEvaluation` will be empty.  Usage: - To guarantee the success evaluation is generated, set this value high. Note, this will delay the end of call report in cases where model is slow to respond.  @default 5 seconds
26    #[serde(rename = "timeoutSeconds", skip_serializing_if = "Option::is_none")]
27    pub timeout_seconds: Option<f64>,
28}
29
30impl SuccessEvaluationPlan {
31    pub fn new() -> SuccessEvaluationPlan {
32        SuccessEvaluationPlan {
33            rubric: None,
34            messages: None,
35            enabled: None,
36            timeout_seconds: None,
37        }
38    }
39}
40/// This enforces the rubric of the evaluation. The output is stored in `call.analysis.successEvaluation`.  Options include: - 'NumericScale': A scale of 1 to 10. - 'DescriptiveScale': A scale of Excellent, Good, Fair, Poor. - 'Checklist': A checklist of criteria and their status. - 'Matrix': A grid that evaluates multiple criteria across different performance levels. - 'PercentageScale': A scale of 0% to 100%. - 'LikertScale': A scale of Strongly Agree, Agree, Neutral, Disagree, Strongly Disagree. - 'AutomaticRubric': Automatically break down evaluation into several criteria, each with its own score. - 'PassFail': A simple 'true' if call passed, 'false' if not.  Default is 'PassFail'.
41#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
42pub enum RubricTrue {
43    #[serde(rename = "NumericScale")]
44    NumericScale,
45    #[serde(rename = "DescriptiveScale")]
46    DescriptiveScale,
47    #[serde(rename = "Checklist")]
48    Checklist,
49    #[serde(rename = "Matrix")]
50    Matrix,
51    #[serde(rename = "PercentageScale")]
52    PercentageScale,
53    #[serde(rename = "LikertScale")]
54    LikertScale,
55    #[serde(rename = "AutomaticRubric")]
56    AutomaticRubric,
57    #[serde(rename = "PassFail")]
58    PassFail,
59}
60
61impl Default for RubricTrue {
62    fn default() -> RubricTrue {
63        Self::NumericScale
64    }
65}