vapi_client/models/
start_speaking_plan.rs

1/*
2 * Vapi API
3 *
4 * Voice AI for developers.
5 *
6 * The version of the OpenAPI document: 1.0
7 *
8 * Generated by: https://openapi-generator.tech
9 */
10
11use crate::models;
12use serde::{Deserialize, Serialize};
13
14#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
15pub struct StartSpeakingPlan {
16    /// This is how long assistant waits before speaking. Defaults to 0.4.  This is the minimum it will wait but if there is latency is the pipeline, this minimum will be exceeded. This is intended as a stopgap in case the pipeline is moving too fast.  Example: - If model generates tokens and voice generates bytes within 100ms, the pipeline still waits 300ms before outputting speech.  Usage: - If the customer is taking long pauses, set this to a higher value. - If the assistant is accidentally jumping in too much, set this to a higher value.  @default 0.4
17    #[serde(rename = "waitSeconds", skip_serializing_if = "Option::is_none")]
18    pub wait_seconds: Option<f64>,
19    #[serde(
20        rename = "smartEndpointingEnabled",
21        skip_serializing_if = "Option::is_none"
22    )]
23    pub smart_endpointing_enabled: Option<models::StartSpeakingPlanSmartEndpointingEnabled>,
24    #[serde(
25        rename = "smartEndpointingPlan",
26        skip_serializing_if = "Option::is_none"
27    )]
28    pub smart_endpointing_plan: Option<models::StartSpeakingPlanSmartEndpointingPlan>,
29    /// These are the custom endpointing rules to set an endpointing timeout based on a regex on the customer's speech or the assistant's last message.  Usage: - If you have yes/no questions like \"are you interested in a loan?\", you can set a shorter timeout. - If you have questions where the customer may pause to look up information like \"what's my account number?\", you can set a longer timeout. - If you want to wait longer while customer is enumerating a list of numbers, you can set a longer timeout.  These rules have the highest precedence and will override both `smartEndpointingPlan` and `transcriptionEndpointingPlan` when a rule is matched.  The rules are evaluated in order and the first one that matches will be used.  Order of precedence for endpointing: 1. customEndpointingRules (if any match) 2. smartEndpointingPlan (if set) 3. transcriptionEndpointingPlan  @default []
30    #[serde(
31        rename = "customEndpointingRules",
32        skip_serializing_if = "Option::is_none"
33    )]
34    pub custom_endpointing_rules: Option<Vec<models::StartSpeakingPlanCustomEndpointingRulesInner>>,
35    /// This determines how a customer speech is considered done (endpointing) using the transcription of customer's speech.  Once an endpoint is triggered, the request is sent to `assistant.model`.  Note: This plan is only used if `smartEndpointingPlan` is not set. If both are provided, `smartEndpointingPlan` takes precedence. This plan will also be overridden by any matching `customEndpointingRules`.
36    #[serde(
37        rename = "transcriptionEndpointingPlan",
38        skip_serializing_if = "Option::is_none"
39    )]
40    pub transcription_endpointing_plan: Option<models::TranscriptionEndpointingPlan>,
41}
42
43impl StartSpeakingPlan {
44    pub fn new() -> StartSpeakingPlan {
45        StartSpeakingPlan {
46            wait_seconds: None,
47            smart_endpointing_enabled: None,
48            smart_endpointing_plan: None,
49            custom_endpointing_rules: None,
50            transcription_endpointing_plan: None,
51        }
52    }
53}