1/*
2 * Vapi API
3 *
4 * Voice AI for developers.
5 *
6 * The version of the OpenAPI document: 1.0
7 *
8 * Generated by: https://openapi-generator.tech
9 */
1011use crate::models;
12use serde::{Deserialize, Serialize};
1314#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
15pub struct InflectionAiModel {
16/// This is the starting state for the conversation.
17#[serde(rename = "messages", skip_serializing_if = "Option::is_none")]
18pub messages: Option<Vec<models::OpenAiMessage>>,
19/// These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`. Both `tools` and `toolIds` can be used together.
20#[serde(rename = "tools", skip_serializing_if = "Option::is_none")]
21pub tools: Option<Vec<models::AnyscaleModelToolsInner>>,
22/// These are the tools that the assistant can use during the call. To use transient tools, use `tools`. Both `tools` and `toolIds` can be used together.
23#[serde(rename = "toolIds", skip_serializing_if = "Option::is_none")]
24pub tool_ids: Option<Vec<String>>,
25#[serde(rename = "knowledgeBase", skip_serializing_if = "Option::is_none")]
26pub knowledge_base: Option<models::CreateCustomKnowledgeBaseDto>,
27/// This is the ID of the knowledge base the model will use.
28#[serde(rename = "knowledgeBaseId", skip_serializing_if = "Option::is_none")]
29pub knowledge_base_id: Option<String>,
30/// This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b
31#[serde(rename = "model")]
32pub model: ModelTrue,
33#[serde(rename = "provider")]
34pub provider: ProviderTrue,
35/// This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.
36#[serde(rename = "temperature", skip_serializing_if = "Option::is_none")]
37pub temperature: Option<f64>,
38/// This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250.
39#[serde(rename = "maxTokens", skip_serializing_if = "Option::is_none")]
40pub max_tokens: Option<f64>,
41/// This determines whether we detect user's emotion while they speak and send it as an additional info to model. Default `false` because the model is usually are good at understanding the user's emotion from text. @default false
42#[serde(
43 rename = "emotionRecognitionEnabled",
44 skip_serializing_if = "Option::is_none"
45)]
46pub emotion_recognition_enabled: Option<bool>,
47/// This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai. Default is 0. @default 0
48#[serde(rename = "numFastTurns", skip_serializing_if = "Option::is_none")]
49pub num_fast_turns: Option<f64>,
50}
5152impl InflectionAiModel {
53pub fn new(model: ModelTrue, provider: ProviderTrue) -> InflectionAiModel {
54 InflectionAiModel {
55 messages: None,
56 tools: None,
57 tool_ids: None,
58 knowledge_base: None,
59 knowledge_base_id: None,
60 model,
61 provider,
62 temperature: None,
63 max_tokens: None,
64 emotion_recognition_enabled: None,
65 num_fast_turns: None,
66 }
67 }
68}
69/// This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b
70#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
71pub enum ModelTrue {
72#[serde(rename = "inflection_3_pi")]
73Inflection3Pi,
74}
7576impl Default for ModelTrue {
77fn default() -> ModelTrue {
78Self::Inflection3Pi
79 }
80}
81///
82#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
83pub enum ProviderTrue {
84#[serde(rename = "inflection-ai")]
85InflectionAi,
86}
8788impl Default for ProviderTrue {
89fn default() -> ProviderTrue {
90Self::InflectionAi
91 }
92}