vapi_client/models/start_speaking_plan.rs
1/*
2 * Vapi API
3 *
4 * Voice AI for developers.
5 *
6 * The version of the OpenAPI document: 1.0
7 *
8 * Generated by: https://openapi-generator.tech
9 */
10
11use crate::models;
12use serde::{Deserialize, Serialize};
13
14#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
15pub struct StartSpeakingPlan {
16 /// This is how long assistant waits before speaking. Defaults to 0.4. This is the minimum it will wait but if there is latency is the pipeline, this minimum will be exceeded. This is intended as a stopgap in case the pipeline is moving too fast. Example: - If model generates tokens and voice generates bytes within 100ms, the pipeline still waits 300ms before outputting speech. Usage: - If the customer is taking long pauses, set this to a higher value. - If the assistant is accidentally jumping in too much, set this to a higher value. @default 0.4
17 #[serde(rename = "waitSeconds", skip_serializing_if = "Option::is_none")]
18 pub wait_seconds: Option<f64>,
19 /// This determines if a customer speech is considered done (endpointing) using a Vapi custom-trained model on customer's speech. This is good for middle-of-thought detection. Alternatively, you can use LiveKit's smart endpointing model (it only supports English, though) Once an endpoint is triggered, the request is sent to `assistant.model`. Usage: - If your conversations are long-form and you want assistant to wait smartly even if customer pauses for a bit to think, you can use this instead. This overrides `transcriptionEndpointingPlan`. @default false
20 #[serde(rename = "smartEndpointingEnabled", skip_serializing_if = "Option::is_none")]
21 pub smart_endpointing_enabled: Option<SmartEndpointingEnabled>,
22 /// These are the custom endpointing rules to set an endpointing timeout based on a regex on the customer's speech or the assistant's last message. Usage: - If you have yes/no questions like \"are you interested in a loan?\", you can set a shorter timeout. - If you have questions where the customer may pause to look up information like \"what's my account number?\", you can set a longer timeout. - If you want to wait longer while customer is enumerating a list of numbers, you can set a longer timeout. These override `transcriptionEndpointingPlan` and `smartEndpointingEnabled` when a rule is matched. The rules are evaluated in order and the first one that matches will be used. @default []
23 #[serde(rename = "customEndpointingRules", skip_serializing_if = "Option::is_none")]
24 pub custom_endpointing_rules: Option<Vec<models::StartSpeakingPlanCustomEndpointingRulesInner>>,
25 /// This determines how a customer speech is considered done (endpointing) using the transcription of customer's speech. Once an endpoint is triggered, the request is sent to `assistant.model`.
26 #[serde(rename = "transcriptionEndpointingPlan", skip_serializing_if = "Option::is_none")]
27 pub transcription_endpointing_plan: Option<Box<models::TranscriptionEndpointingPlan>>,
28}
29
30impl StartSpeakingPlan {
31 pub fn new() -> StartSpeakingPlan {
32 StartSpeakingPlan {
33 wait_seconds: None,
34 smart_endpointing_enabled: None,
35 custom_endpointing_rules: None,
36 transcription_endpointing_plan: None,
37 }
38 }
39}
40/// This determines if a customer speech is considered done (endpointing) using a Vapi custom-trained model on customer's speech. This is good for middle-of-thought detection. Alternatively, you can use LiveKit's smart endpointing model (it only supports English, though) Once an endpoint is triggered, the request is sent to `assistant.model`. Usage: - If your conversations are long-form and you want assistant to wait smartly even if customer pauses for a bit to think, you can use this instead. This overrides `transcriptionEndpointingPlan`. @default false
41#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
42pub enum SmartEndpointingEnabled {
43 #[serde(rename = "true")]
44 True,
45 #[serde(rename = "false")]
46 False,
47 #[serde(rename = "livekit")]
48 Livekit,
49}
50
51impl Default for SmartEndpointingEnabled {
52 fn default() -> SmartEndpointingEnabled {
53 Self::True
54 }
55}
56