vapi_client/models/
groq_model.rs

1/*
2 * Vapi API
3 *
4 * Voice AI for developers.
5 *
6 * The version of the OpenAPI document: 1.0
7 *
8 * Generated by: https://openapi-generator.tech
9 */
10
11use crate::models;
12use serde::{Deserialize, Serialize};
13
14#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
15pub struct GroqModel {
16    /// This is the starting state for the conversation.
17    #[serde(rename = "messages", skip_serializing_if = "Option::is_none")]
18    pub messages: Option<Vec<models::OpenAiMessage>>,
19    /// These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`.  Both `tools` and `toolIds` can be used together.
20    #[serde(rename = "tools", skip_serializing_if = "Option::is_none")]
21    pub tools: Option<Vec<models::AnyscaleModelToolsInner>>,
22    /// These are the tools that the assistant can use during the call. To use transient tools, use `tools`.  Both `tools` and `toolIds` can be used together.
23    #[serde(rename = "toolIds", skip_serializing_if = "Option::is_none")]
24    pub tool_ids: Option<Vec<String>>,
25    #[serde(rename = "knowledgeBase", skip_serializing_if = "Option::is_none")]
26    pub knowledge_base: Option<models::CreateCustomKnowledgeBaseDto>,
27    /// This is the ID of the knowledge base the model will use.
28    #[serde(rename = "knowledgeBaseId", skip_serializing_if = "Option::is_none")]
29    pub knowledge_base_id: Option<String>,
30    /// This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b
31    #[serde(rename = "model")]
32    pub model: ModelTrue,
33    #[serde(rename = "provider")]
34    pub provider: ProviderTrue,
35    /// This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.
36    #[serde(rename = "temperature", skip_serializing_if = "Option::is_none")]
37    pub temperature: Option<f64>,
38    /// This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250.
39    #[serde(rename = "maxTokens", skip_serializing_if = "Option::is_none")]
40    pub max_tokens: Option<f64>,
41    /// This determines whether we detect user's emotion while they speak and send it as an additional info to model.  Default `false` because the model is usually are good at understanding the user's emotion from text.  @default false
42    #[serde(
43        rename = "emotionRecognitionEnabled",
44        skip_serializing_if = "Option::is_none"
45    )]
46    pub emotion_recognition_enabled: Option<bool>,
47    /// This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai.  Default is 0.  @default 0
48    #[serde(rename = "numFastTurns", skip_serializing_if = "Option::is_none")]
49    pub num_fast_turns: Option<f64>,
50}
51
52impl GroqModel {
53    pub fn new(model: ModelTrue, provider: ProviderTrue) -> GroqModel {
54        GroqModel {
55            messages: None,
56            tools: None,
57            tool_ids: None,
58            knowledge_base: None,
59            knowledge_base_id: None,
60            model,
61            provider,
62            temperature: None,
63            max_tokens: None,
64            emotion_recognition_enabled: None,
65            num_fast_turns: None,
66        }
67    }
68}
69/// This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b
70#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
71pub enum ModelTrue {
72    #[serde(rename = "deepseek-r1-distill-llama-70b")]
73    DeepseekR1DistillLlama70b,
74    #[serde(rename = "llama-3.3-70b-versatile")]
75    Llama3Period370bVersatile,
76    #[serde(rename = "llama-3.1-405b-reasoning")]
77    Llama3Period1405bReasoning,
78    #[serde(rename = "llama-3.1-8b-instant")]
79    Llama3Period18bInstant,
80    #[serde(rename = "llama3-8b-8192")]
81    Llama38b8192,
82    #[serde(rename = "llama3-70b-8192")]
83    Llama370b8192,
84    #[serde(rename = "gemma2-9b-it")]
85    Gemma29bIt,
86    #[serde(rename = "meta-llama/llama-4-maverick-17b-128e-instruct")]
87    MetaLlamaSlashLlama4Maverick17b128eInstruct,
88    #[serde(rename = "meta-llama/llama-4-scout-17b-16e-instruct")]
89    MetaLlamaSlashLlama4Scout17b16eInstruct,
90    #[serde(rename = "mistral-saba-24b")]
91    MistralSaba24b,
92    #[serde(rename = "compound-beta")]
93    CompoundBeta,
94    #[serde(rename = "compound-beta-mini")]
95    CompoundBetaMini,
96}
97
98impl Default for ModelTrue {
99    fn default() -> ModelTrue {
100        Self::DeepseekR1DistillLlama70b
101    }
102}
103///
104#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
105pub enum ProviderTrue {
106    #[serde(rename = "groq")]
107    Groq,
108}
109
110impl Default for ProviderTrue {
111    fn default() -> ProviderTrue {
112        Self::Groq
113    }
114}