vapi_client/models/
groq_model.rs

1/*
2 * Vapi API
3 *
4 * API for building voice assistants
5 *
6 * The version of the OpenAPI document: 1.0
7 *
8 * Generated by: https://openapi-generator.tech
9 */
10
11use serde::{Deserialize, Serialize};
12use utoipa::OpenApi;
13
14
15use crate::models;
16
17#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize, OpenApi)]
18pub struct GroqModel {
19    /// This is the starting state for the conversation.
20    #[serde(rename = "messages", skip_serializing_if = "Option::is_none")]
21    pub messages: Option<Vec<models::OpenAiMessage>>,
22    /// These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`.  Both `tools` and `toolIds` can be used together.
23    #[serde(rename = "tools", skip_serializing_if = "Option::is_none")]
24    pub tools: Option<Vec<models::AnyscaleModelToolsInner>>,
25    /// These are the tools that the assistant can use during the call. To use transient tools, use `tools`.  Both `tools` and `toolIds` can be used together.
26    #[serde(rename = "toolIds", skip_serializing_if = "Option::is_none")]
27    pub tool_ids: Option<Vec<String>>,
28    #[serde(rename = "knowledgeBase", skip_serializing_if = "Option::is_none")]
29    pub knowledge_base: Option<models::AnyscaleModelKnowledgeBase>,
30    /// This is the ID of the knowledge base the model will use.
31    #[serde(rename = "knowledgeBaseId", skip_serializing_if = "Option::is_none")]
32    pub knowledge_base_id: Option<String>,
33    /// This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b
34    #[serde(rename = "model")]
35    pub model: Model,
36    #[serde(rename = "provider")]
37    pub provider: Provider,
38    /// This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.
39    #[serde(rename = "temperature", skip_serializing_if = "Option::is_none")]
40    pub temperature: Option<f64>,
41    /// This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250.
42    #[serde(rename = "maxTokens", skip_serializing_if = "Option::is_none")]
43    pub max_tokens: Option<f64>,
44    /// This determines whether we detect user's emotion while they speak and send it as an additional info to model.  Default `false` because the model is usually are good at understanding the user's emotion from text.  @default false
45    #[serde(
46        rename = "emotionRecognitionEnabled",
47        skip_serializing_if = "Option::is_none"
48    )]
49    pub emotion_recognition_enabled: Option<bool>,
50    /// This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai.  Default is 0.  @default 0
51    #[serde(rename = "numFastTurns", skip_serializing_if = "Option::is_none")]
52    pub num_fast_turns: Option<f64>,
53}
54
55impl GroqModel {
56    pub fn new(model: Model, provider: Provider) -> GroqModel {
57        GroqModel {
58            messages: None,
59            tools: None,
60            tool_ids: None,
61            knowledge_base: None,
62            knowledge_base_id: None,
63            model,
64            provider,
65            temperature: None,
66            max_tokens: None,
67            emotion_recognition_enabled: None,
68            num_fast_turns: None,
69        }
70    }
71}
72/// This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b
73#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, OpenApi)]
74pub enum Model {
75    #[serde(rename = "deepseek-r1-distill-llama-70b")]
76    DeepseekR1DistillLlama70b,
77    #[serde(rename = "llama-3.3-70b-versatile")]
78    Llama3Period370bVersatile,
79    #[serde(rename = "llama-3.1-405b-reasoning")]
80    Llama3Period1405bReasoning,
81    #[serde(rename = "llama-3.1-70b-versatile")]
82    Llama3Period170bVersatile,
83    #[serde(rename = "llama-3.1-8b-instant")]
84    Llama3Period18bInstant,
85    #[serde(rename = "mixtral-8x7b-32768")]
86    Mixtral8x7b32768,
87    #[serde(rename = "llama3-8b-8192")]
88    Llama38b8192,
89    #[serde(rename = "llama3-70b-8192")]
90    Llama370b8192,
91    #[serde(rename = "gemma2-9b-it")]
92    Gemma29bIt,
93}
94
95impl Default for Model {
96    fn default() -> Model {
97        Self::DeepseekR1DistillLlama70b
98    }
99}
100///
101#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, OpenApi)]
102pub enum Provider {
103    #[serde(rename = "groq")]
104    Groq,
105}
106
107impl Default for Provider {
108    fn default() -> Provider {
109        Self::Groq
110    }
111}