vapi_client/models/
assistant_overrides.rs

1/*
2 * Vapi API
3 *
4 * API for building voice assistants
5 *
6 * The version of the OpenAPI document: 1.0
7 *
8 * Generated by: https://openapi-generator.tech
9 */
10
11use serde::{Deserialize, Serialize};
12use utoipa::ToSchema;
13
14
15use crate::models;
16
17#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize, ToSchema)]
18pub struct AssistantOverrides {
19    #[serde(rename = "transcriber", skip_serializing_if = "Option::is_none")]
20    pub transcriber: Option<models::CreateAssistantDtoTranscriber>,
21    #[serde(rename = "model", skip_serializing_if = "Option::is_none")]
22    pub model: Option<models::CreateAssistantDtoModel>,
23    #[serde(rename = "voice", skip_serializing_if = "Option::is_none")]
24    pub voice: Option<models::CreateAssistantDtoVoice>,
25    /// This is the first message that the assistant will say. This can also be a URL to a containerized audio file (mp3, wav, etc.).  If unspecified, assistant will wait for user to speak and use the model to respond once they speak.
26    #[serde(rename = "firstMessage", skip_serializing_if = "Option::is_none")]
27    pub first_message: Option<String>,
28    /// This is the mode for the first message. Default is 'assistant-speaks-first'.  Use: - 'assistant-speaks-first' to have the assistant speak first. - 'assistant-waits-for-user' to have the assistant wait for the user to speak first. - 'assistant-speaks-first-with-model-generated-message' to have the assistant speak first with a message generated by the model based on the conversation state. (`assistant.model.messages` at call start, `call.messages` at squad transfer points).  @default 'assistant-speaks-first'
29    #[serde(rename = "firstMessageMode", skip_serializing_if = "Option::is_none")]
30    pub first_message_mode: Option<FirstMessageMode>,
31    /// These are the settings to configure or disable voicemail detection. Alternatively, voicemail detection can be configured using the model.tools=[VoicemailTool]. This uses Twilio's built-in detection while the VoicemailTool relies on the model to detect if a voicemail was reached. You can use neither of them, one of them, or both of them. By default, Twilio built-in detection is enabled while VoicemailTool is not.
32    #[serde(rename = "voicemailDetection", skip_serializing_if = "Option::is_none")]
33    pub voicemail_detection: Option<serde_json::Value>,
34    /// These are the messages that will be sent to your Client SDKs. Default is conversation-update,function-call,hang,model-output,speech-update,status-update,transfer-update,transcript,tool-calls,user-interrupted,voice-input. You can check the shape of the messages in ClientMessage schema.
35    #[serde(rename = "clientMessages", skip_serializing_if = "Option::is_none")]
36    pub client_messages: Option<Vec<ClientMessages>>,
37    /// These are the messages that will be sent to your Server URL. Default is conversation-update,end-of-call-report,function-call,hang,speech-update,status-update,tool-calls,transfer-destination-request,user-interrupted. You can check the shape of the messages in ServerMessage schema.
38    #[serde(rename = "serverMessages", skip_serializing_if = "Option::is_none")]
39    pub server_messages: Option<Vec<ServerMessages>>,
40    /// How many seconds of silence to wait before ending the call. Defaults to 30.  @default 30
41    #[serde(
42        rename = "silenceTimeoutSeconds",
43        skip_serializing_if = "Option::is_none"
44    )]
45    pub silence_timeout_seconds: Option<f64>,
46    /// This is the maximum number of seconds that the call will last. When the call reaches this duration, it will be ended.  @default 600 (10 minutes)
47    #[serde(rename = "maxDurationSeconds", skip_serializing_if = "Option::is_none")]
48    pub max_duration_seconds: Option<f64>,
49    /// This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'.
50    #[serde(rename = "backgroundSound", skip_serializing_if = "Option::is_none")]
51    pub background_sound: Option<BackgroundSound>,
52    /// This enables filtering of noise and background speech while the user is talking.  Default `false` while in beta.  @default false
53    #[serde(
54        rename = "backgroundDenoisingEnabled",
55        skip_serializing_if = "Option::is_none"
56    )]
57    pub background_denoising_enabled: Option<bool>,
58    /// This determines whether the model's output is used in conversation history rather than the transcription of assistant's speech.  Default `false` while in beta.  @default false
59    #[serde(
60        rename = "modelOutputInMessagesEnabled",
61        skip_serializing_if = "Option::is_none"
62    )]
63    pub model_output_in_messages_enabled: Option<bool>,
64    /// These are the configurations to be passed to the transport providers of assistant's calls, like Twilio. You can store multiple configurations for different transport providers. For a call, only the configuration matching the call transport provider is used.
65    #[serde(
66        rename = "transportConfigurations",
67        skip_serializing_if = "Option::is_none"
68    )]
69    pub transport_configurations:
70        Option<Vec<models::CreateAssistantDtoTransportConfigurationsInner>>,
71    /// These are dynamic credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials.
72    #[serde(rename = "credentials", skip_serializing_if = "Option::is_none")]
73    pub credentials: Option<Vec<models::CreateAssistantDtoCredentialsInner>>,
74    /// These are values that will be used to replace the template variables in the assistant messages and other text-based fields. This uses LiquidJS syntax. https://liquidjs.com/tutorials/intro-to-liquid.html  So for example, `{{ name }}` will be replaced with the value of `name` in `variableValues`. `{{\"now\" | date: \"%b %d, %Y, %I:%M %p\", \"America/New_York\"}}` will be replaced with the current date and time in New York.  Some Vapi reserved defaults:  - *customer* - the customer object
75    #[serde(rename = "variableValues", skip_serializing_if = "Option::is_none")]
76    pub variable_values: Option<serde_json::Value>,
77    /// This is the name of the assistant.  This is required when you want to transfer between assistants in a call.
78    #[serde(rename = "name", skip_serializing_if = "Option::is_none")]
79    pub name: Option<String>,
80    /// This is the message that the assistant will say if the call is forwarded to voicemail.  If unspecified, it will hang up.
81    #[serde(rename = "voicemailMessage", skip_serializing_if = "Option::is_none")]
82    pub voicemail_message: Option<String>,
83    /// This is the message that the assistant will say if it ends the call.  If unspecified, it will hang up without saying anything.
84    #[serde(rename = "endCallMessage", skip_serializing_if = "Option::is_none")]
85    pub end_call_message: Option<String>,
86    /// This list contains phrases that, if spoken by the assistant, will trigger the call to be hung up. Case insensitive.
87    #[serde(rename = "endCallPhrases", skip_serializing_if = "Option::is_none")]
88    pub end_call_phrases: Option<Vec<String>>,
89    #[serde(rename = "compliancePlan", skip_serializing_if = "Option::is_none")]
90    pub compliance_plan: Option<models::CompliancePlan>,
91    /// This is for metadata you want to store on the assistant.
92    #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")]
93    pub metadata: Option<serde_json::Value>,
94    /// This is the plan for analysis of assistant's calls. Stored in `call.analysis`.
95    #[serde(rename = "analysisPlan", skip_serializing_if = "Option::is_none")]
96    pub analysis_plan: Option<models::AnalysisPlan>,
97    /// This is the plan for artifacts generated during assistant's calls. Stored in `call.artifact`.  Note: `recordingEnabled` is currently at the root level. It will be moved to `artifactPlan` in the future, but will remain backwards compatible.
98    #[serde(rename = "artifactPlan", skip_serializing_if = "Option::is_none")]
99    pub artifact_plan: Option<models::ArtifactPlan>,
100    /// This is the plan for static predefined messages that can be spoken by the assistant during the call, like `idleMessages`.  Note: `firstMessage`, `voicemailMessage`, and `endCallMessage` are currently at the root level. They will be moved to `messagePlan` in the future, but will remain backwards compatible.
101    #[serde(rename = "messagePlan", skip_serializing_if = "Option::is_none")]
102    pub message_plan: Option<models::MessagePlan>,
103    /// This is the plan for when the assistant should start talking.  You should configure this if you're running into these issues: - The assistant is too slow to start talking after the customer is done speaking. - The assistant is too fast to start talking after the customer is done speaking. - The assistant is so fast that it's actually interrupting the customer.
104    #[serde(rename = "startSpeakingPlan", skip_serializing_if = "Option::is_none")]
105    pub start_speaking_plan: Option<models::StartSpeakingPlan>,
106    /// This is the plan for when assistant should stop talking on customer interruption.  You should configure this if you're running into these issues: - The assistant is too slow to recognize customer's interruption. - The assistant is too fast to recognize customer's interruption. - The assistant is getting interrupted by phrases that are just acknowledgments. - The assistant is getting interrupted by background noises. - The assistant is not properly stopping -- it starts talking right after getting interrupted.
107    #[serde(rename = "stopSpeakingPlan", skip_serializing_if = "Option::is_none")]
108    pub stop_speaking_plan: Option<models::StopSpeakingPlan>,
109    /// This is the plan for real-time monitoring of the assistant's calls.  Usage: - To enable live listening of the assistant's calls, set `monitorPlan.listenEnabled` to `true`. - To enable live control of the assistant's calls, set `monitorPlan.controlEnabled` to `true`.  Note, `serverMessages`, `clientMessages`, `serverUrl` and `serverUrlSecret` are currently at the root level but will be moved to `monitorPlan` in the future. Will remain backwards compatible
110    #[serde(rename = "monitorPlan", skip_serializing_if = "Option::is_none")]
111    pub monitor_plan: Option<models::MonitorPlan>,
112    /// These are the credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can provide a subset using this.
113    #[serde(rename = "credentialIds", skip_serializing_if = "Option::is_none")]
114    pub credential_ids: Option<Vec<String>>,
115    /// This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.  The order of precedence is:  1. assistant.server.url 2. phoneNumber.serverUrl 3. org.serverUrl
116    #[serde(rename = "server", skip_serializing_if = "Option::is_none")]
117    pub server: Option<models::Server>,
118    /// This is a set of actions that will be performed on certain events.
119    #[serde(rename = "hooks", skip_serializing_if = "Option::is_none")]
120    pub hooks: Option<Vec<models::AssistantHooks>>,
121    #[serde(rename = "keypadInputPlan", skip_serializing_if = "Option::is_none")]
122    pub keypad_input_plan: Option<models::KeypadInputPlan>,
123}
124
125impl AssistantOverrides {
126    pub fn new() -> AssistantOverrides {
127        AssistantOverrides {
128            transcriber: None,
129            model: None,
130            voice: None,
131            first_message: None,
132            first_message_mode: None,
133            voicemail_detection: None,
134            client_messages: None,
135            server_messages: None,
136            silence_timeout_seconds: None,
137            max_duration_seconds: None,
138            background_sound: None,
139            background_denoising_enabled: None,
140            model_output_in_messages_enabled: None,
141            transport_configurations: None,
142            credentials: None,
143            variable_values: None,
144            name: None,
145            voicemail_message: None,
146            end_call_message: None,
147            end_call_phrases: None,
148            compliance_plan: None,
149            metadata: None,
150            analysis_plan: None,
151            artifact_plan: None,
152            message_plan: None,
153            start_speaking_plan: None,
154            stop_speaking_plan: None,
155            monitor_plan: None,
156            credential_ids: None,
157            server: None,
158            hooks: None,
159            keypad_input_plan: None,
160        }
161    }
162}
163/// This is the mode for the first message. Default is 'assistant-speaks-first'.  Use: - 'assistant-speaks-first' to have the assistant speak first. - 'assistant-waits-for-user' to have the assistant wait for the user to speak first. - 'assistant-speaks-first-with-model-generated-message' to have the assistant speak first with a message generated by the model based on the conversation state. (`assistant.model.messages` at call start, `call.messages` at squad transfer points).  @default 'assistant-speaks-first'
164#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, ToSchema)]
165pub enum FirstMessageMode {
166    #[serde(rename = "assistant-speaks-first")]
167    AssistantSpeaksFirst,
168    #[serde(rename = "assistant-speaks-first-with-model-generated-message")]
169    AssistantSpeaksFirstWithModelGeneratedMessage,
170    #[serde(rename = "assistant-waits-for-user")]
171    AssistantWaitsForUser,
172}
173
174impl Default for FirstMessageMode {
175    fn default() -> FirstMessageMode {
176        Self::AssistantSpeaksFirst
177    }
178}
179/// These are the messages that will be sent to your Client SDKs. Default is conversation-update,function-call,hang,model-output,speech-update,status-update,transfer-update,transcript,tool-calls,user-interrupted,voice-input. You can check the shape of the messages in ClientMessage schema.
180#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, ToSchema)]
181pub enum ClientMessages {
182    #[serde(rename = "conversation-update")]
183    ConversationUpdate,
184    #[serde(rename = "function-call")]
185    FunctionCall,
186    #[serde(rename = "function-call-result")]
187    FunctionCallResult,
188    #[serde(rename = "hang")]
189    Hang,
190    #[serde(rename = "language-changed")]
191    LanguageChanged,
192    #[serde(rename = "metadata")]
193    Metadata,
194    #[serde(rename = "model-output")]
195    ModelOutput,
196    #[serde(rename = "speech-update")]
197    SpeechUpdate,
198    #[serde(rename = "status-update")]
199    StatusUpdate,
200    #[serde(rename = "transcript")]
201    Transcript,
202    #[serde(rename = "tool-calls")]
203    ToolCalls,
204    #[serde(rename = "tool-calls-result")]
205    ToolCallsResult,
206    #[serde(rename = "transfer-update")]
207    TransferUpdate,
208    #[serde(rename = "user-interrupted")]
209    UserInterrupted,
210    #[serde(rename = "voice-input")]
211    VoiceInput,
212}
213
214impl Default for ClientMessages {
215    fn default() -> ClientMessages {
216        Self::ConversationUpdate
217    }
218}
219/// These are the messages that will be sent to your Server URL. Default is conversation-update,end-of-call-report,function-call,hang,speech-update,status-update,tool-calls,transfer-destination-request,user-interrupted. You can check the shape of the messages in ServerMessage schema.
220#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, ToSchema)]
221pub enum ServerMessages {
222    #[serde(rename = "conversation-update")]
223    ConversationUpdate,
224    #[serde(rename = "end-of-call-report")]
225    EndOfCallReport,
226    #[serde(rename = "function-call")]
227    FunctionCall,
228    #[serde(rename = "hang")]
229    Hang,
230    #[serde(rename = "language-changed")]
231    LanguageChanged,
232    #[serde(rename = "language-change-detected")]
233    LanguageChangeDetected,
234    #[serde(rename = "model-output")]
235    ModelOutput,
236    #[serde(rename = "phone-call-control")]
237    PhoneCallControl,
238    #[serde(rename = "speech-update")]
239    SpeechUpdate,
240    #[serde(rename = "status-update")]
241    StatusUpdate,
242    #[serde(rename = "transcript")]
243    Transcript,
244    #[serde(rename = "transcript[transcriptType=\"final\"]")]
245    TranscriptLeftSquareBracketTranscriptTypeEqualDoubleQuoteFinalDoubleQuoteRightSquareBracket,
246    #[serde(rename = "tool-calls")]
247    ToolCalls,
248    #[serde(rename = "transfer-destination-request")]
249    TransferDestinationRequest,
250    #[serde(rename = "transfer-update")]
251    TransferUpdate,
252    #[serde(rename = "user-interrupted")]
253    UserInterrupted,
254    #[serde(rename = "voice-input")]
255    VoiceInput,
256}
257
258impl Default for ServerMessages {
259    fn default() -> ServerMessages {
260        Self::ConversationUpdate
261    }
262}
263/// This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'.
264#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, ToSchema)]
265pub enum BackgroundSound {
266    #[serde(rename = "off")]
267    Off,
268    #[serde(rename = "office")]
269    Office,
270}
271
272impl Default for BackgroundSound {
273    fn default() -> BackgroundSound {
274        Self::Off
275    }
276}