vapi_client/models/
start_speaking_plan_custom_endpointing_rules_inner.rs

1/*
2 * Vapi API
3 *
4 * Voice AI for developers.
5 *
6 * The version of the OpenAPI document: 1.0
7 *
8 * Generated by: https://openapi-generator.tech
9 */
10
11use crate::models;
12use serde::{Deserialize, Serialize};
13
14#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
15#[serde(untagged)]
16pub enum StartSpeakingPlanCustomEndpointingRulesInner {
17    AssistantCustomEndpointingRule(models::AssistantCustomEndpointingRule),
18    CustomerCustomEndpointingRule(models::CustomerCustomEndpointingRule),
19    BothCustomEndpointingRule(models::BothCustomEndpointingRule),
20}
21
22impl Default for StartSpeakingPlanCustomEndpointingRulesInner {
23    fn default() -> Self {
24        Self::AssistantCustomEndpointingRule(Default::default())
25    }
26}
27/// This endpointing rule is based on the last assistant message before customer started speaking.  Flow: - Assistant speaks - Customer starts speaking - Customer transcription comes in - This rule is evaluated on the last assistant message - If a match is found based on `regex`, the endpointing timeout is set to `timeoutSeconds`  Usage: - If you have yes/no questions in your use case like \"are you interested in a loan?\", you can set a shorter timeout. - If you have questions where the customer may pause to look up information like \"what's my account number?\", you can set a longer timeout.
28#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
29pub enum TypeTrue {
30    #[serde(rename = "assistant")]
31    Assistant,
32    #[serde(rename = "customer")]
33    Customer,
34    #[serde(rename = "both")]
35    Both,
36}
37
38impl Default for TypeTrue {
39    fn default() -> TypeTrue {
40        Self::Assistant
41    }
42}