vapi_client/models/
google_model.rs

1/*
2 * Vapi API
3 *
4 * Voice AI for developers.
5 *
6 * The version of the OpenAPI document: 1.0
7 *
8 * Generated by: https://openapi-generator.tech
9 */
10
11use crate::models;
12use serde::{Deserialize, Serialize};
13
14#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
15pub struct GoogleModel {
16    /// This is the starting state for the conversation.
17    #[serde(rename = "messages", skip_serializing_if = "Option::is_none")]
18    pub messages: Option<Vec<models::OpenAiMessage>>,
19    /// These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`.  Both `tools` and `toolIds` can be used together.
20    #[serde(rename = "tools", skip_serializing_if = "Option::is_none")]
21    pub tools: Option<Vec<models::AnyscaleModelToolsInner>>,
22    /// These are the tools that the assistant can use during the call. To use transient tools, use `tools`.  Both `tools` and `toolIds` can be used together.
23    #[serde(rename = "toolIds", skip_serializing_if = "Option::is_none")]
24    pub tool_ids: Option<Vec<String>>,
25    #[serde(rename = "knowledgeBase", skip_serializing_if = "Option::is_none")]
26    pub knowledge_base: Option<models::CreateCustomKnowledgeBaseDto>,
27    /// This is the ID of the knowledge base the model will use.
28    #[serde(rename = "knowledgeBaseId", skip_serializing_if = "Option::is_none")]
29    pub knowledge_base_id: Option<String>,
30    /// This is the Google model that will be used.
31    #[serde(rename = "model")]
32    pub model: ModelTrue,
33    #[serde(rename = "provider")]
34    pub provider: ProviderTrue,
35    /// This is the session configuration for the Gemini Flash 2.0 Multimodal Live API. Only applicable if the model `gemini-2.0-flash-realtime-exp` is selected.
36    #[serde(rename = "realtimeConfig", skip_serializing_if = "Option::is_none")]
37    pub realtime_config: Option<models::GoogleRealtimeConfig>,
38    /// This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.
39    #[serde(rename = "temperature", skip_serializing_if = "Option::is_none")]
40    pub temperature: Option<f64>,
41    /// This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250.
42    #[serde(rename = "maxTokens", skip_serializing_if = "Option::is_none")]
43    pub max_tokens: Option<f64>,
44    /// This determines whether we detect user's emotion while they speak and send it as an additional info to model.  Default `false` because the model is usually are good at understanding the user's emotion from text.  @default false
45    #[serde(
46        rename = "emotionRecognitionEnabled",
47        skip_serializing_if = "Option::is_none"
48    )]
49    pub emotion_recognition_enabled: Option<bool>,
50    /// This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai.  Default is 0.  @default 0
51    #[serde(rename = "numFastTurns", skip_serializing_if = "Option::is_none")]
52    pub num_fast_turns: Option<f64>,
53}
54
55impl GoogleModel {
56    pub fn new(model: ModelTrue, provider: ProviderTrue) -> GoogleModel {
57        GoogleModel {
58            messages: None,
59            tools: None,
60            tool_ids: None,
61            knowledge_base: None,
62            knowledge_base_id: None,
63            model,
64            provider,
65            realtime_config: None,
66            temperature: None,
67            max_tokens: None,
68            emotion_recognition_enabled: None,
69            num_fast_turns: None,
70        }
71    }
72}
73/// This is the Google model that will be used.
74#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
75pub enum ModelTrue {
76    #[serde(rename = "gemini-2.5-pro-preview-05-06")]
77    Gemini2Period5ProPreview0506,
78    #[serde(rename = "gemini-2.5-flash-preview-05-20")]
79    Gemini2Period5FlashPreview0520,
80    #[serde(rename = "gemini-2.5-flash-preview-04-17")]
81    Gemini2Period5FlashPreview0417,
82    #[serde(rename = "gemini-2.0-flash-thinking-exp")]
83    Gemini2Period0FlashThinkingExp,
84    #[serde(rename = "gemini-2.0-pro-exp-02-05")]
85    Gemini2Period0ProExp0205,
86    #[serde(rename = "gemini-2.0-flash")]
87    Gemini2Period0Flash,
88    #[serde(rename = "gemini-2.0-flash-lite")]
89    Gemini2Period0FlashLite,
90    #[serde(rename = "gemini-2.0-flash-lite-preview-02-05")]
91    Gemini2Period0FlashLitePreview0205,
92    #[serde(rename = "gemini-2.0-flash-exp")]
93    Gemini2Period0FlashExp,
94    #[serde(rename = "gemini-2.0-flash-realtime-exp")]
95    Gemini2Period0FlashRealtimeExp,
96    #[serde(rename = "gemini-1.5-flash")]
97    Gemini1Period5Flash,
98    #[serde(rename = "gemini-1.5-flash-002")]
99    Gemini1Period5Flash002,
100    #[serde(rename = "gemini-1.5-pro")]
101    Gemini1Period5Pro,
102    #[serde(rename = "gemini-1.5-pro-002")]
103    Gemini1Period5Pro002,
104    #[serde(rename = "gemini-1.0-pro")]
105    Gemini1Period0Pro,
106}
107
108impl Default for ModelTrue {
109    fn default() -> ModelTrue {
110        Self::Gemini2Period5ProPreview0506
111    }
112}
113///
114#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
115pub enum ProviderTrue {
116    #[serde(rename = "google")]
117    Google,
118}
119
120impl Default for ProviderTrue {
121    fn default() -> ProviderTrue {
122        Self::Google
123    }
124}