vapi_client/models/server_message_voice_request.rs
1/*
2 * Vapi API
3 *
4 * API for building voice assistants
5 *
6 * The version of the OpenAPI document: 1.0
7 *
8 * Generated by: https://openapi-generator.tech
9 */
10
11use serde::{Deserialize, Serialize};
12
13use crate::models;
14
15#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
16pub struct ServerMessageVoiceRequest {
17 #[serde(rename = "phoneNumber", skip_serializing_if = "Option::is_none")]
18 pub phone_number: Option<models::ServerMessageAssistantRequestPhoneNumber>,
19 /// This is the type of the message. \"voice-request\" is sent when using `assistant.voice={ \"type\": \"custom-voice\" }`. Here is what the request will look like: POST https://{assistant.voice.server.url} Content-Type: application/json { \"messsage\": { \"type\": \"voice-request\", \"text\": \"Hello, world!\", \"sampleRate\": 24000, ...other metadata about the call... } } The expected response is 1-channel 16-bit raw PCM audio at the sample rate specified in the request. Here is how the response will be piped to the transport: ``` response.on('data', (chunk: Buffer) => { outputStream.write(chunk); }); ```
20 #[serde(rename = "type")]
21 pub r#type: Type,
22 /// This is the ISO-8601 formatted timestamp of when the message was sent.
23 #[serde(rename = "timestamp", skip_serializing_if = "Option::is_none")]
24 pub timestamp: Option<f64>,
25 /// This is a live version of the `call.artifact`. This matches what is stored on `call.artifact` after the call.
26 #[serde(rename = "artifact", skip_serializing_if = "Option::is_none")]
27 pub artifact: Option<models::Artifact>,
28 /// This is the assistant that is currently active. This is provided for convenience. This matches one of the following: - `call.assistant`, - `call.assistantId`, - `call.squad[n].assistant`, - `call.squad[n].assistantId`, - `call.squadId->[n].assistant`, - `call.squadId->[n].assistantId`.
29 #[serde(rename = "assistant", skip_serializing_if = "Option::is_none")]
30 pub assistant: Option<models::CreateAssistantDto>,
31 /// This is the customer associated with the call. This matches one of the following: - `call.customer`, - `call.customerId`.
32 #[serde(rename = "customer", skip_serializing_if = "Option::is_none")]
33 pub customer: Option<models::CreateCustomerDto>,
34 /// This is the call object. This matches what was returned in POST /call. Note: This might get stale during the call. To get the latest call object, especially after the call is ended, use GET /call/:id.
35 #[serde(rename = "call", skip_serializing_if = "Option::is_none")]
36 pub call: Option<models::Call>,
37 /// This is the text to be synthesized.
38 #[serde(rename = "text")]
39 pub text: String,
40 /// This is the sample rate to be synthesized.
41 #[serde(rename = "sampleRate")]
42 pub sample_rate: f64,
43}
44
45impl ServerMessageVoiceRequest {
46 pub fn new(r#type: Type, text: String, sample_rate: f64) -> ServerMessageVoiceRequest {
47 ServerMessageVoiceRequest {
48 phone_number: None,
49 r#type,
50 timestamp: None,
51 artifact: None,
52 assistant: None,
53 customer: None,
54 call: None,
55 text,
56 sample_rate,
57 }
58 }
59}
60/// This is the type of the message. \"voice-request\" is sent when using `assistant.voice={ \"type\": \"custom-voice\" }`. Here is what the request will look like: POST https://{assistant.voice.server.url} Content-Type: application/json { \"messsage\": { \"type\": \"voice-request\", \"text\": \"Hello, world!\", \"sampleRate\": 24000, ...other metadata about the call... } } The expected response is 1-channel 16-bit raw PCM audio at the sample rate specified in the request. Here is how the response will be piped to the transport: ``` response.on('data', (chunk: Buffer) => { outputStream.write(chunk); }); ```
61#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
62pub enum Type {
63 #[serde(rename = "voice-request")]
64 VoiceRequest,
65}
66
67impl Default for Type {
68 fn default() -> Type {
69 Self::VoiceRequest
70 }
71}