vapi_client/models/fallback_custom_voice.rs
1/*
2 * Vapi API
3 *
4 * API for building voice assistants
5 *
6 * The version of the OpenAPI document: 1.0
7 *
8 * Generated by: https://openapi-generator.tech
9 */
10
11use serde::{Deserialize, Serialize};
12use utoipa::OpenApi;
13
14
15use crate::models;
16
17#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize, OpenApi)]
18pub struct FallbackCustomVoice {
19 /// This is the voice provider that will be used. Use `custom-voice` for providers that are not natively supported.
20 #[serde(rename = "provider")]
21 pub provider: Provider,
22 /// This is where the voice request will be sent. Request Example: POST https://{server.url} Content-Type: application/json { \"message\": { \"type\": \"voice-request\", \"text\": \"Hello, world!\", \"sampleRate\": 24000, ...other metadata about the call... } } Response Expected: 1-channel 16-bit raw PCM audio at the sample rate specified in the request. Here is how the response will be piped to the transport: ``` response.on('data', (chunk: Buffer) => { outputStream.write(chunk); }); ```
23 #[serde(rename = "server")]
24 pub server: models::Server,
25 /// This is the plan for chunking the model output before it is sent to the voice provider.
26 #[serde(rename = "chunkPlan", skip_serializing_if = "Option::is_none")]
27 pub chunk_plan: Option<models::ChunkPlan>,
28}
29
30impl FallbackCustomVoice {
31 pub fn new(provider: Provider, server: models::Server) -> FallbackCustomVoice {
32 FallbackCustomVoice {
33 provider,
34 server,
35 chunk_plan: None,
36 }
37 }
38}
39/// This is the voice provider that will be used. Use `custom-voice` for providers that are not natively supported.
40#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, OpenApi)]
41pub enum Provider {
42 #[serde(rename = "custom-voice")]
43 CustomVoice,
44}
45
46impl Default for Provider {
47 fn default() -> Provider {
48 Self::CustomVoice
49 }
50}