vapi_client/models/custom_llm_model.rs
1/*
2 * Vapi API
3 *
4 * Voice AI for developers.
5 *
6 * The version of the OpenAPI document: 1.0
7 *
8 * Generated by: https://openapi-generator.tech
9 */
10
11use crate::models;
12use serde::{Deserialize, Serialize};
13
14#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
15pub struct CustomLlmModel {
16 /// This is the starting state for the conversation.
17 #[serde(rename = "messages", skip_serializing_if = "Option::is_none")]
18 pub messages: Option<Vec<models::OpenAiMessage>>,
19 /// These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`. Both `tools` and `toolIds` can be used together.
20 #[serde(rename = "tools", skip_serializing_if = "Option::is_none")]
21 pub tools: Option<Vec<models::AnyscaleModelToolsInner>>,
22 /// These are the tools that the assistant can use during the call. To use transient tools, use `tools`. Both `tools` and `toolIds` can be used together.
23 #[serde(rename = "toolIds", skip_serializing_if = "Option::is_none")]
24 pub tool_ids: Option<Vec<String>>,
25 #[serde(rename = "knowledgeBase", skip_serializing_if = "Option::is_none")]
26 pub knowledge_base: Option<models::CreateCustomKnowledgeBaseDto>,
27 /// This is the ID of the knowledge base the model will use.
28 #[serde(rename = "knowledgeBaseId", skip_serializing_if = "Option::is_none")]
29 pub knowledge_base_id: Option<String>,
30 /// This is the provider that will be used for the model. Any service, including your own server, that is compatible with the OpenAI API can be used.
31 #[serde(rename = "provider")]
32 pub provider: ProviderTrue,
33 /// This determines whether metadata is sent in requests to the custom provider. - `off` will not send any metadata. payload will look like `{ messages }` - `variable` will send `assistant.metadata` as a variable on the payload. payload will look like `{ messages, metadata }` - `destructured` will send `assistant.metadata` fields directly on the payload. payload will look like `{ messages, ...metadata }` Further, `variable` and `destructured` will send `call`, `phoneNumber`, and `customer` objects in the payload. Default is `variable`.
34 #[serde(rename = "metadataSendMode", skip_serializing_if = "Option::is_none")]
35 pub metadata_send_mode: Option<MetadataSendModeTrue>,
36 /// These is the URL we'll use for the OpenAI client's `baseURL`. Ex. https://openrouter.ai/api/v1
37 #[serde(rename = "url")]
38 pub url: String,
39 /// This sets the timeout for the connection to the custom provider without needing to stream any tokens back. Default is 20 seconds.
40 #[serde(rename = "timeoutSeconds", skip_serializing_if = "Option::is_none")]
41 pub timeout_seconds: Option<f64>,
42 /// This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b
43 #[serde(rename = "model")]
44 pub model: String,
45 /// This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.
46 #[serde(rename = "temperature", skip_serializing_if = "Option::is_none")]
47 pub temperature: Option<f64>,
48 /// This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250.
49 #[serde(rename = "maxTokens", skip_serializing_if = "Option::is_none")]
50 pub max_tokens: Option<f64>,
51 /// This determines whether we detect user's emotion while they speak and send it as an additional info to model. Default `false` because the model is usually are good at understanding the user's emotion from text. @default false
52 #[serde(
53 rename = "emotionRecognitionEnabled",
54 skip_serializing_if = "Option::is_none"
55 )]
56 pub emotion_recognition_enabled: Option<bool>,
57 /// This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai. Default is 0. @default 0
58 #[serde(rename = "numFastTurns", skip_serializing_if = "Option::is_none")]
59 pub num_fast_turns: Option<f64>,
60}
61
62impl CustomLlmModel {
63 pub fn new(provider: ProviderTrue, url: String, model: String) -> CustomLlmModel {
64 CustomLlmModel {
65 messages: None,
66 tools: None,
67 tool_ids: None,
68 knowledge_base: None,
69 knowledge_base_id: None,
70 provider,
71 metadata_send_mode: None,
72 url,
73 timeout_seconds: None,
74 model,
75 temperature: None,
76 max_tokens: None,
77 emotion_recognition_enabled: None,
78 num_fast_turns: None,
79 }
80 }
81}
82/// This is the provider that will be used for the model. Any service, including your own server, that is compatible with the OpenAI API can be used.
83#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
84pub enum ProviderTrue {
85 #[serde(rename = "custom-llm")]
86 CustomLlm,
87}
88
89impl Default for ProviderTrue {
90 fn default() -> ProviderTrue {
91 Self::CustomLlm
92 }
93}
94/// This determines whether metadata is sent in requests to the custom provider. - `off` will not send any metadata. payload will look like `{ messages }` - `variable` will send `assistant.metadata` as a variable on the payload. payload will look like `{ messages, metadata }` - `destructured` will send `assistant.metadata` fields directly on the payload. payload will look like `{ messages, ...metadata }` Further, `variable` and `destructured` will send `call`, `phoneNumber`, and `customer` objects in the payload. Default is `variable`.
95#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
96pub enum MetadataSendModeTrue {
97 #[serde(rename = "off")]
98 Off,
99 #[serde(rename = "variable")]
100 Variable,
101 #[serde(rename = "destructured")]
102 Destructured,
103}
104
105impl Default for MetadataSendModeTrue {
106 fn default() -> MetadataSendModeTrue {
107 Self::Off
108 }
109}