vapi_client/models/
both_custom_endpointing_rule.rs

1/*
2 * Vapi API
3 *
4 * Voice AI for developers.
5 *
6 * The version of the OpenAPI document: 1.0
7 *
8 * Generated by: https://openapi-generator.tech
9 */
10
11use crate::models;
12use serde::{Deserialize, Serialize};
13
14#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
15pub struct BothCustomEndpointingRule {
16    /// This endpointing rule is based on both the last assistant message and the current customer message as they are speaking.  Flow: - Assistant speaks - Customer starts speaking - Customer transcription comes in - This rule is evaluated on the last assistant message and the current customer transcription - If assistant message matches `assistantRegex` AND customer message matches `customerRegex`, the endpointing timeout is set to `timeoutSeconds`  Usage: - If you want to wait longer while customer is speaking numbers, you can set a longer timeout.
17    #[serde(rename = "type")]
18    pub r#type: TypeTrue,
19    /// This is the regex pattern to match the assistant's message.  Note: - This works by using the `RegExp.test` method in Node.JS. Eg. `/hello/.test(\"hello there\")` will return `true`.  Hot tip: - In JavaScript, escape `\\` when sending the regex pattern. Eg. `\"hello\\sthere\"` will be sent over the wire as `\"hellosthere\"`. Send `\"hello\\\\sthere\"` instead. - `RegExp.test` does substring matching, so `/cat/.test(\"I love cats\")` will return `true`. To do full string matching, send \"^cat$\".
20    #[serde(rename = "assistantRegex")]
21    pub assistant_regex: String,
22    /// These are the options for the assistant's message regex match. Defaults to all disabled.  @default []
23    #[serde(
24        rename = "assistantRegexOptions",
25        skip_serializing_if = "Option::is_none"
26    )]
27    pub assistant_regex_options: Option<Vec<models::RegexOption>>,
28    #[serde(rename = "customerRegex")]
29    pub customer_regex: String,
30    /// These are the options for the customer's message regex match. Defaults to all disabled.  @default []
31    #[serde(
32        rename = "customerRegexOptions",
33        skip_serializing_if = "Option::is_none"
34    )]
35    pub customer_regex_options: Option<Vec<models::RegexOption>>,
36    /// This is the endpointing timeout in seconds, if the rule is matched.
37    #[serde(rename = "timeoutSeconds")]
38    pub timeout_seconds: f64,
39}
40
41impl BothCustomEndpointingRule {
42    pub fn new(
43        r#type: TypeTrue,
44        assistant_regex: String,
45        customer_regex: String,
46        timeout_seconds: f64,
47    ) -> BothCustomEndpointingRule {
48        BothCustomEndpointingRule {
49            r#type,
50            assistant_regex,
51            assistant_regex_options: None,
52            customer_regex,
53            customer_regex_options: None,
54            timeout_seconds,
55        }
56    }
57}
58/// This endpointing rule is based on both the last assistant message and the current customer message as they are speaking.  Flow: - Assistant speaks - Customer starts speaking - Customer transcription comes in - This rule is evaluated on the last assistant message and the current customer transcription - If assistant message matches `assistantRegex` AND customer message matches `customerRegex`, the endpointing timeout is set to `timeoutSeconds`  Usage: - If you want to wait longer while customer is speaking numbers, you can set a longer timeout.
59#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
60pub enum TypeTrue {
61    #[serde(rename = "both")]
62    Both,
63}
64
65impl Default for TypeTrue {
66    fn default() -> TypeTrue {
67        Self::Both
68    }
69}