vapi_client/models/
server_message_voice_request.rs

1/*
2 * Vapi API
3 *
4 * Voice AI for developers.
5 *
6 * The version of the OpenAPI document: 1.0
7 *
8 * Generated by: https://openapi-generator.tech
9 */
10
11use crate::models;
12use serde::{Deserialize, Serialize};
13
14#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
15pub struct ServerMessageVoiceRequest {
16    #[serde(rename = "phoneNumber", skip_serializing_if = "Option::is_none")]
17    pub phone_number: Option<models::ClientMessageWorkflowNodeStartedPhoneNumber>,
18    /// This is the type of the message. \"voice-request\" is sent when using `assistant.voice={ \"type\": \"custom-voice\" }`.  Here is what the request will look like:  POST https://{assistant.voice.server.url} Content-Type: application/json  {   \"messsage\": {     \"type\": \"voice-request\",     \"text\": \"Hello, world!\",     \"sampleRate\": 24000,     ...other metadata about the call...   } }  The expected response is 1-channel 16-bit raw PCM audio at the sample rate specified in the request. Here is how the response will be piped to the transport: ``` response.on('data', (chunk: Buffer) => {   outputStream.write(chunk); }); ```
19    #[serde(rename = "type")]
20    pub r#type: TypeTrue,
21    /// This is the timestamp of the message.
22    #[serde(rename = "timestamp", skip_serializing_if = "Option::is_none")]
23    pub timestamp: Option<f64>,
24    /// This is a live version of the `call.artifact`.  This matches what is stored on `call.artifact` after the call.
25    #[serde(rename = "artifact", skip_serializing_if = "Option::is_none")]
26    pub artifact: Option<models::Artifact>,
27    /// This is the assistant that the message is associated with.
28    #[serde(rename = "assistant", skip_serializing_if = "Option::is_none")]
29    pub assistant: Option<models::CreateAssistantDto>,
30    /// This is the customer that the message is associated with.
31    #[serde(rename = "customer", skip_serializing_if = "Option::is_none")]
32    pub customer: Option<models::CreateCustomerDto>,
33    /// This is the call that the message is associated with.
34    #[serde(rename = "call", skip_serializing_if = "Option::is_none")]
35    pub call: Option<models::Call>,
36    /// This is the chat object.
37    #[serde(rename = "chat", skip_serializing_if = "Option::is_none")]
38    pub chat: Option<models::Chat>,
39    /// This is the text to be synthesized.
40    #[serde(rename = "text")]
41    pub text: String,
42    /// This is the sample rate to be synthesized.
43    #[serde(rename = "sampleRate")]
44    pub sample_rate: f64,
45}
46
47impl ServerMessageVoiceRequest {
48    pub fn new(r#type: TypeTrue, text: String, sample_rate: f64) -> ServerMessageVoiceRequest {
49        ServerMessageVoiceRequest {
50            phone_number: None,
51            r#type,
52            timestamp: None,
53            artifact: None,
54            assistant: None,
55            customer: None,
56            call: None,
57            chat: None,
58            text,
59            sample_rate,
60        }
61    }
62}
63/// This is the type of the message. \"voice-request\" is sent when using `assistant.voice={ \"type\": \"custom-voice\" }`.  Here is what the request will look like:  POST https://{assistant.voice.server.url} Content-Type: application/json  {   \"messsage\": {     \"type\": \"voice-request\",     \"text\": \"Hello, world!\",     \"sampleRate\": 24000,     ...other metadata about the call...   } }  The expected response is 1-channel 16-bit raw PCM audio at the sample rate specified in the request. Here is how the response will be piped to the transport: ``` response.on('data', (chunk: Buffer) => {   outputStream.write(chunk); }); ```
64#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
65pub enum TypeTrue {
66    #[serde(rename = "voice-request")]
67    VoiceRequest,
68}
69
70impl Default for TypeTrue {
71    fn default() -> TypeTrue {
72        Self::VoiceRequest
73    }
74}