vapi_client/models/
rime_ai_voice.rs

1/*
2 * Vapi API
3 *
4 * Voice AI for developers.
5 *
6 * The version of the OpenAPI document: 1.0
7 * 
8 * Generated by: https://openapi-generator.tech
9 */
10
11use crate::models;
12use serde::{Deserialize, Serialize};
13
14#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
15pub struct RimeAiVoice {
16    /// This is the voice provider that will be used.
17    #[serde(rename = "provider")]
18    pub provider: Provider,
19    #[serde(rename = "voiceId")]
20    pub voice_id: models::RimeAiVoiceVoiceId,
21    /// This is the model that will be used. Defaults to 'v1' when not specified.
22    #[serde(rename = "model", skip_serializing_if = "Option::is_none")]
23    pub model: Option<Model>,
24    /// This is the speed multiplier that will be used.
25    #[serde(rename = "speed", skip_serializing_if = "Option::is_none")]
26    pub speed: Option<f64>,
27    /// This is a flag that controls whether to add slight pauses using angle brackets. Example: “Hi. <200> I’d love to have a conversation with you.” adds a 200ms pause between the first and second sentences.
28    #[serde(rename = "pauseBetweenBrackets", skip_serializing_if = "Option::is_none")]
29    pub pause_between_brackets: Option<bool>,
30    /// This is a flag that controls whether text inside brackets should be phonemized (converted to phonetic pronunciation) - Example: \"{h'El.o} World\" will pronounce \"Hello\" as expected.
31    #[serde(rename = "phonemizeBetweenBrackets", skip_serializing_if = "Option::is_none")]
32    pub phonemize_between_brackets: Option<bool>,
33    /// This is a flag that controls whether to optimize for reduced latency in streaming. https://docs.rime.ai/api-reference/endpoint/websockets#param-reduce-latency
34    #[serde(rename = "reduceLatency", skip_serializing_if = "Option::is_none")]
35    pub reduce_latency: Option<bool>,
36    /// This is a string that allows inline speed control using alpha notation. https://docs.rime.ai/api-reference/endpoint/websockets#param-inline-speed-alpha
37    #[serde(rename = "inlineSpeedAlpha", skip_serializing_if = "Option::is_none")]
38    pub inline_speed_alpha: Option<String>,
39    /// This is the plan for chunking the model output before it is sent to the voice provider.
40    #[serde(rename = "chunkPlan", skip_serializing_if = "Option::is_none")]
41    pub chunk_plan: Option<models::ChunkPlan>,
42    /// This is the plan for voice provider fallbacks in the event that the primary voice provider fails.
43    #[serde(rename = "fallbackPlan", skip_serializing_if = "Option::is_none")]
44    pub fallback_plan: Option<models::FallbackPlan>,
45}
46
47impl RimeAiVoice {
48    pub fn new(provider: Provider, voice_id: models::RimeAiVoiceVoiceId) -> RimeAiVoice {
49        RimeAiVoice {
50            provider,
51            voice_id,
52            model: None,
53            speed: None,
54            pause_between_brackets: None,
55            phonemize_between_brackets: None,
56            reduce_latency: None,
57            inline_speed_alpha: None,
58            chunk_plan: None,
59            fallback_plan: None,
60        }
61    }
62}
63/// This is the voice provider that will be used.
64#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
65pub enum Provider {
66    #[serde(rename = "rime-ai")]
67    RimeAi,
68}
69
70impl Default for Provider {
71    fn default() -> Provider {
72        Self::RimeAi
73    }
74}
75/// This is the model that will be used. Defaults to 'v1' when not specified.
76#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
77pub enum Model {
78    #[serde(rename = "v1")]
79    V1,
80    #[serde(rename = "mist")]
81    Mist,
82    #[serde(rename = "mistv2")]
83    Mistv2,
84}
85
86impl Default for Model {
87    fn default() -> Model {
88        Self::V1
89    }
90}
91