vapi_client/models/
assistant.rs

1/*
2 * Vapi API
3 *
4 * Voice AI for developers.
5 *
6 * The version of the OpenAPI document: 1.0
7 *
8 * Generated by: https://openapi-generator.tech
9 */
10
11use crate::models;
12use serde::{Deserialize, Serialize};
13
14#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
15pub struct Assistant {
16    #[serde(rename = "transcriber", skip_serializing_if = "Option::is_none")]
17    pub transcriber: Option<models::CreateAssistantDtoTranscriber>,
18    #[serde(rename = "model", skip_serializing_if = "Option::is_none")]
19    pub model: Option<models::CreateAssistantDtoModel>,
20    #[serde(rename = "voice", skip_serializing_if = "Option::is_none")]
21    pub voice: Option<models::CreateAssistantDtoVoice>,
22    /// This is the first message that the assistant will say. This can also be a URL to a containerized audio file (mp3, wav, etc.).  If unspecified, assistant will wait for user to speak and use the model to respond once they speak.
23    #[serde(rename = "firstMessage", skip_serializing_if = "Option::is_none")]
24    pub first_message: Option<String>,
25    #[serde(
26        rename = "firstMessageInterruptionsEnabled",
27        skip_serializing_if = "Option::is_none"
28    )]
29    pub first_message_interruptions_enabled: Option<bool>,
30    /// This is the mode for the first message. Default is 'assistant-speaks-first'.  Use: - 'assistant-speaks-first' to have the assistant speak first. - 'assistant-waits-for-user' to have the assistant wait for the user to speak first. - 'assistant-speaks-first-with-model-generated-message' to have the assistant speak first with a message generated by the model based on the conversation state. (`assistant.model.messages` at call start, `call.messages` at squad transfer points).  @default 'assistant-speaks-first'
31    #[serde(rename = "firstMessageMode", skip_serializing_if = "Option::is_none")]
32    pub first_message_mode: Option<FirstMessageModeTrue>,
33    #[serde(rename = "voicemailDetection", skip_serializing_if = "Option::is_none")]
34    pub voicemail_detection: Option<models::CreateAssistantDtoVoicemailDetection>,
35    /// These are the messages that will be sent to your Client SDKs. Default is conversation-update,function-call,hang,model-output,speech-update,status-update,transfer-update,transcript,tool-calls,user-interrupted,voice-input,workflow.node.started. You can check the shape of the messages in ClientMessage schema.
36    #[serde(rename = "clientMessages", skip_serializing_if = "Option::is_none")]
37    pub client_messages: Option<Vec<ClientMessagesTrue>>,
38    /// These are the messages that will be sent to your Server URL. Default is conversation-update,end-of-call-report,function-call,hang,speech-update,status-update,tool-calls,transfer-destination-request,user-interrupted. You can check the shape of the messages in ServerMessage schema.
39    #[serde(rename = "serverMessages", skip_serializing_if = "Option::is_none")]
40    pub server_messages: Option<Vec<ServerMessagesTrue>>,
41    /// How many seconds of silence to wait before ending the call. Defaults to 30.  @default 30
42    #[serde(
43        rename = "silenceTimeoutSeconds",
44        skip_serializing_if = "Option::is_none"
45    )]
46    pub silence_timeout_seconds: Option<f64>,
47    /// This is the maximum number of seconds that the call will last. When the call reaches this duration, it will be ended.  @default 600 (10 minutes)
48    #[serde(rename = "maxDurationSeconds", skip_serializing_if = "Option::is_none")]
49    pub max_duration_seconds: Option<f64>,
50    #[serde(rename = "backgroundSound", skip_serializing_if = "Option::is_none")]
51    pub background_sound: Option<models::CreateAssistantDtoBackgroundSound>,
52    /// This enables filtering of noise and background speech while the user is talking.  Default `false` while in beta.  @default false
53    #[serde(
54        rename = "backgroundDenoisingEnabled",
55        skip_serializing_if = "Option::is_none"
56    )]
57    pub background_denoising_enabled: Option<bool>,
58    /// This determines whether the model's output is used in conversation history rather than the transcription of assistant's speech.  Default `false` while in beta.  @default false
59    #[serde(
60        rename = "modelOutputInMessagesEnabled",
61        skip_serializing_if = "Option::is_none"
62    )]
63    pub model_output_in_messages_enabled: Option<bool>,
64    /// These are the configurations to be passed to the transport providers of assistant's calls, like Twilio. You can store multiple configurations for different transport providers. For a call, only the configuration matching the call transport provider is used.
65    #[serde(
66        rename = "transportConfigurations",
67        skip_serializing_if = "Option::is_none"
68    )]
69    pub transport_configurations:
70        Option<Vec<models::CreateAssistantDtoTransportConfigurationsInner>>,
71    /// This is the plan for observability of assistant's calls.  Currently, only Langfuse is supported.
72    #[serde(rename = "observabilityPlan", skip_serializing_if = "Option::is_none")]
73    pub observability_plan: Option<models::LangfuseObservabilityPlan>,
74    /// These are dynamic credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials.
75    #[serde(rename = "credentials", skip_serializing_if = "Option::is_none")]
76    pub credentials: Option<Vec<models::WorkflowUserEditableCredentialsInner>>,
77    /// This is a set of actions that will be performed on certain events.
78    #[serde(rename = "hooks", skip_serializing_if = "Option::is_none")]
79    pub hooks: Option<Vec<models::CreateAssistantDtoHooksInner>>,
80    /// This is the name of the assistant.  This is required when you want to transfer between assistants in a call.
81    #[serde(rename = "name", skip_serializing_if = "Option::is_none")]
82    pub name: Option<String>,
83    /// This is the message that the assistant will say if the call is forwarded to voicemail.  If unspecified, it will hang up.
84    #[serde(rename = "voicemailMessage", skip_serializing_if = "Option::is_none")]
85    pub voicemail_message: Option<String>,
86    /// This is the message that the assistant will say if it ends the call.  If unspecified, it will hang up without saying anything.
87    #[serde(rename = "endCallMessage", skip_serializing_if = "Option::is_none")]
88    pub end_call_message: Option<String>,
89    /// This list contains phrases that, if spoken by the assistant, will trigger the call to be hung up. Case insensitive.
90    #[serde(rename = "endCallPhrases", skip_serializing_if = "Option::is_none")]
91    pub end_call_phrases: Option<Vec<String>>,
92    #[serde(rename = "compliancePlan", skip_serializing_if = "Option::is_none")]
93    pub compliance_plan: Option<models::CompliancePlan>,
94    /// This is for metadata you want to store on the assistant.
95    #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")]
96    pub metadata: Option<serde_json::Value>,
97    /// This enables filtering of noise and background speech while the user is talking.  Features: - Smart denoising using Krisp - Fourier denoising  Smart denoising can be combined with or used independently of Fourier denoising.  Order of precedence: - Smart denoising - Fourier denoising
98    #[serde(
99        rename = "backgroundSpeechDenoisingPlan",
100        skip_serializing_if = "Option::is_none"
101    )]
102    pub background_speech_denoising_plan: Option<models::BackgroundSpeechDenoisingPlan>,
103    /// This is the plan for analysis of assistant's calls. Stored in `call.analysis`.
104    #[serde(rename = "analysisPlan", skip_serializing_if = "Option::is_none")]
105    pub analysis_plan: Option<models::AnalysisPlan>,
106    /// This is the plan for artifacts generated during assistant's calls. Stored in `call.artifact`.
107    #[serde(rename = "artifactPlan", skip_serializing_if = "Option::is_none")]
108    pub artifact_plan: Option<models::ArtifactPlan>,
109    /// This is the plan for static predefined messages that can be spoken by the assistant during the call, like `idleMessages`.  Note: `firstMessage`, `voicemailMessage`, and `endCallMessage` are currently at the root level. They will be moved to `messagePlan` in the future, but will remain backwards compatible.
110    #[serde(rename = "messagePlan", skip_serializing_if = "Option::is_none")]
111    pub message_plan: Option<models::MessagePlan>,
112    /// This is the plan for when the assistant should start talking.  You should configure this if you're running into these issues: - The assistant is too slow to start talking after the customer is done speaking. - The assistant is too fast to start talking after the customer is done speaking. - The assistant is so fast that it's actually interrupting the customer.
113    #[serde(rename = "startSpeakingPlan", skip_serializing_if = "Option::is_none")]
114    pub start_speaking_plan: Option<models::StartSpeakingPlan>,
115    /// This is the plan for when assistant should stop talking on customer interruption.  You should configure this if you're running into these issues: - The assistant is too slow to recognize customer's interruption. - The assistant is too fast to recognize customer's interruption. - The assistant is getting interrupted by phrases that are just acknowledgments. - The assistant is getting interrupted by background noises. - The assistant is not properly stopping -- it starts talking right after getting interrupted.
116    #[serde(rename = "stopSpeakingPlan", skip_serializing_if = "Option::is_none")]
117    pub stop_speaking_plan: Option<models::StopSpeakingPlan>,
118    /// This is the plan for real-time monitoring of the assistant's calls.  Usage: - To enable live listening of the assistant's calls, set `monitorPlan.listenEnabled` to `true`. - To enable live control of the assistant's calls, set `monitorPlan.controlEnabled` to `true`.
119    #[serde(rename = "monitorPlan", skip_serializing_if = "Option::is_none")]
120    pub monitor_plan: Option<models::MonitorPlan>,
121    /// These are the credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can provide a subset using this.
122    #[serde(rename = "credentialIds", skip_serializing_if = "Option::is_none")]
123    pub credential_ids: Option<Vec<String>>,
124    /// This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.  The order of precedence is:  1. assistant.server.url 2. phoneNumber.serverUrl 3. org.serverUrl
125    #[serde(rename = "server", skip_serializing_if = "Option::is_none")]
126    pub server: Option<models::Server>,
127    #[serde(rename = "keypadInputPlan", skip_serializing_if = "Option::is_none")]
128    pub keypad_input_plan: Option<models::KeypadInputPlan>,
129    /// This is the unique identifier for the assistant.
130    #[serde(rename = "id")]
131    pub id: String,
132    /// This is the unique identifier for the org that this assistant belongs to.
133    #[serde(rename = "orgId")]
134    pub org_id: String,
135    /// This is the ISO 8601 date-time string of when the assistant was created.
136    #[serde(rename = "createdAt")]
137    pub created_at: String,
138    /// This is the ISO 8601 date-time string of when the assistant was last updated.
139    #[serde(rename = "updatedAt")]
140    pub updated_at: String,
141}
142
143impl Assistant {
144    pub fn new(id: String, org_id: String, created_at: String, updated_at: String) -> Assistant {
145        Assistant {
146            transcriber: None,
147            model: None,
148            voice: None,
149            first_message: None,
150            first_message_interruptions_enabled: None,
151            first_message_mode: None,
152            voicemail_detection: None,
153            client_messages: None,
154            server_messages: None,
155            silence_timeout_seconds: None,
156            max_duration_seconds: None,
157            background_sound: None,
158            background_denoising_enabled: None,
159            model_output_in_messages_enabled: None,
160            transport_configurations: None,
161            observability_plan: None,
162            credentials: None,
163            hooks: None,
164            name: None,
165            voicemail_message: None,
166            end_call_message: None,
167            end_call_phrases: None,
168            compliance_plan: None,
169            metadata: None,
170            background_speech_denoising_plan: None,
171            analysis_plan: None,
172            artifact_plan: None,
173            message_plan: None,
174            start_speaking_plan: None,
175            stop_speaking_plan: None,
176            monitor_plan: None,
177            credential_ids: None,
178            server: None,
179            keypad_input_plan: None,
180            id,
181            org_id,
182            created_at,
183            updated_at,
184        }
185    }
186}
187/// This is the mode for the first message. Default is 'assistant-speaks-first'.  Use: - 'assistant-speaks-first' to have the assistant speak first. - 'assistant-waits-for-user' to have the assistant wait for the user to speak first. - 'assistant-speaks-first-with-model-generated-message' to have the assistant speak first with a message generated by the model based on the conversation state. (`assistant.model.messages` at call start, `call.messages` at squad transfer points).  @default 'assistant-speaks-first'
188#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
189pub enum FirstMessageModeTrue {
190    #[serde(rename = "assistant-speaks-first")]
191    AssistantSpeaksFirst,
192    #[serde(rename = "assistant-speaks-first-with-model-generated-message")]
193    AssistantSpeaksFirstWithModelGeneratedMessage,
194    #[serde(rename = "assistant-waits-for-user")]
195    AssistantWaitsForUser,
196}
197
198impl Default for FirstMessageModeTrue {
199    fn default() -> FirstMessageModeTrue {
200        Self::AssistantSpeaksFirst
201    }
202}
203/// These are the messages that will be sent to your Client SDKs. Default is conversation-update,function-call,hang,model-output,speech-update,status-update,transfer-update,transcript,tool-calls,user-interrupted,voice-input,workflow.node.started. You can check the shape of the messages in ClientMessage schema.
204#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
205pub enum ClientMessagesTrue {
206    #[serde(rename = "conversation-update")]
207    ConversationUpdate,
208    #[serde(rename = "function-call")]
209    FunctionCall,
210    #[serde(rename = "function-call-result")]
211    FunctionCallResult,
212    #[serde(rename = "hang")]
213    Hang,
214    #[serde(rename = "language-changed")]
215    LanguageChanged,
216    #[serde(rename = "metadata")]
217    Metadata,
218    #[serde(rename = "model-output")]
219    ModelOutput,
220    #[serde(rename = "speech-update")]
221    SpeechUpdate,
222    #[serde(rename = "status-update")]
223    StatusUpdate,
224    #[serde(rename = "transcript")]
225    Transcript,
226    #[serde(rename = "tool-calls")]
227    ToolCalls,
228    #[serde(rename = "tool-calls-result")]
229    ToolCallsResult,
230    #[serde(rename = "tool.completed")]
231    ToolPeriodCompleted,
232    #[serde(rename = "transfer-update")]
233    TransferUpdate,
234    #[serde(rename = "user-interrupted")]
235    UserInterrupted,
236    #[serde(rename = "voice-input")]
237    VoiceInput,
238    #[serde(rename = "workflow.node.started")]
239    WorkflowPeriodNodePeriodStarted,
240}
241
242impl Default for ClientMessagesTrue {
243    fn default() -> ClientMessagesTrue {
244        Self::ConversationUpdate
245    }
246}
247/// These are the messages that will be sent to your Server URL. Default is conversation-update,end-of-call-report,function-call,hang,speech-update,status-update,tool-calls,transfer-destination-request,user-interrupted. You can check the shape of the messages in ServerMessage schema.
248#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
249pub enum ServerMessagesTrue {
250    #[serde(rename = "conversation-update")]
251    ConversationUpdate,
252    #[serde(rename = "end-of-call-report")]
253    EndOfCallReport,
254    #[serde(rename = "function-call")]
255    FunctionCall,
256    #[serde(rename = "hang")]
257    Hang,
258    #[serde(rename = "language-changed")]
259    LanguageChanged,
260    #[serde(rename = "language-change-detected")]
261    LanguageChangeDetected,
262    #[serde(rename = "model-output")]
263    ModelOutput,
264    #[serde(rename = "phone-call-control")]
265    PhoneCallControl,
266    #[serde(rename = "speech-update")]
267    SpeechUpdate,
268    #[serde(rename = "status-update")]
269    StatusUpdate,
270    #[serde(rename = "transcript")]
271    Transcript,
272    #[serde(rename = "transcript[transcriptType=\"final\"]")]
273    TranscriptLeftSquareBracketTranscriptTypeEqualDoubleQuoteFinalDoubleQuoteRightSquareBracket,
274    #[serde(rename = "tool-calls")]
275    ToolCalls,
276    #[serde(rename = "transfer-destination-request")]
277    TransferDestinationRequest,
278    #[serde(rename = "transfer-update")]
279    TransferUpdate,
280    #[serde(rename = "user-interrupted")]
281    UserInterrupted,
282    #[serde(rename = "voice-input")]
283    VoiceInput,
284}
285
286impl Default for ServerMessagesTrue {
287    fn default() -> ServerMessagesTrue {
288        Self::ConversationUpdate
289    }
290}