vapi_client/models/
google_model.rs

1/*
2 * Vapi API
3 *
4 * API for building voice assistants
5 *
6 * The version of the OpenAPI document: 1.0
7 *
8 * Generated by: https://openapi-generator.tech
9 */
10
11use serde::{Deserialize, Serialize};
12
13use crate::models;
14
15#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
16pub struct GoogleModel {
17    /// This is the starting state for the conversation.
18    #[serde(rename = "messages", skip_serializing_if = "Option::is_none")]
19    pub messages: Option<Vec<models::OpenAiMessage>>,
20    /// These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`.  Both `tools` and `toolIds` can be used together.
21    #[serde(rename = "tools", skip_serializing_if = "Option::is_none")]
22    pub tools: Option<Vec<models::AnyscaleModelToolsInner>>,
23    /// These are the tools that the assistant can use during the call. To use transient tools, use `tools`.  Both `tools` and `toolIds` can be used together.
24    #[serde(rename = "toolIds", skip_serializing_if = "Option::is_none")]
25    pub tool_ids: Option<Vec<String>>,
26    #[serde(rename = "knowledgeBase", skip_serializing_if = "Option::is_none")]
27    pub knowledge_base: Option<models::AnyscaleModelKnowledgeBase>,
28    /// This is the ID of the knowledge base the model will use.
29    #[serde(rename = "knowledgeBaseId", skip_serializing_if = "Option::is_none")]
30    pub knowledge_base_id: Option<String>,
31    /// This is the Google model that will be used.
32    #[serde(rename = "model")]
33    pub model: Model,
34    #[serde(rename = "provider")]
35    pub provider: Provider,
36    /// This is the session configuration for the Gemini Flash 2.0 Multimodal Live API. Only applicable if the model `gemini-2.0-flash-realtime-exp` is selected.
37    #[serde(rename = "realtimeConfig", skip_serializing_if = "Option::is_none")]
38    pub realtime_config: Option<models::GoogleRealtimeConfig>,
39    /// This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.
40    #[serde(rename = "temperature", skip_serializing_if = "Option::is_none")]
41    pub temperature: Option<f64>,
42    /// This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250.
43    #[serde(rename = "maxTokens", skip_serializing_if = "Option::is_none")]
44    pub max_tokens: Option<f64>,
45    /// This determines whether we detect user's emotion while they speak and send it as an additional info to model.  Default `false` because the model is usually are good at understanding the user's emotion from text.  @default false
46    #[serde(
47        rename = "emotionRecognitionEnabled",
48        skip_serializing_if = "Option::is_none"
49    )]
50    pub emotion_recognition_enabled: Option<bool>,
51    /// This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai.  Default is 0.  @default 0
52    #[serde(rename = "numFastTurns", skip_serializing_if = "Option::is_none")]
53    pub num_fast_turns: Option<f64>,
54}
55
56impl GoogleModel {
57    pub fn new(model: Model, provider: Provider) -> GoogleModel {
58        GoogleModel {
59            messages: None,
60            tools: None,
61            tool_ids: None,
62            knowledge_base: None,
63            knowledge_base_id: None,
64            model,
65            provider,
66            realtime_config: None,
67            temperature: None,
68            max_tokens: None,
69            emotion_recognition_enabled: None,
70            num_fast_turns: None,
71        }
72    }
73}
74/// This is the Google model that will be used.
75#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
76pub enum Model {
77    #[serde(rename = "gemini-2.0-flash-thinking-exp")]
78    Gemini2Period0FlashThinkingExp,
79    #[serde(rename = "gemini-2.0-pro-exp-02-05")]
80    Gemini2Period0ProExp0205,
81    #[serde(rename = "gemini-2.0-flash")]
82    Gemini2Period0Flash,
83    #[serde(rename = "gemini-2.0-flash-lite-preview-02-05")]
84    Gemini2Period0FlashLitePreview0205,
85    #[serde(rename = "gemini-2.0-flash-exp")]
86    Gemini2Period0FlashExp,
87    #[serde(rename = "gemini-2.0-flash-realtime-exp")]
88    Gemini2Period0FlashRealtimeExp,
89    #[serde(rename = "gemini-1.5-flash")]
90    Gemini1Period5Flash,
91    #[serde(rename = "gemini-1.5-flash-002")]
92    Gemini1Period5Flash002,
93    #[serde(rename = "gemini-1.5-pro")]
94    Gemini1Period5Pro,
95    #[serde(rename = "gemini-1.5-pro-002")]
96    Gemini1Period5Pro002,
97    #[serde(rename = "gemini-1.0-pro")]
98    Gemini1Period0Pro,
99}
100
101impl Default for Model {
102    fn default() -> Model {
103        Self::Gemini2Period0FlashThinkingExp
104    }
105}
106///
107#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
108pub enum Provider {
109    #[serde(rename = "google")]
110    Google,
111}
112
113impl Default for Provider {
114    fn default() -> Provider {
115        Self::Google
116    }
117}