aws_sdk_polly/operation/start_speech_synthesis_task/
builders.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2pub use crate::operation::start_speech_synthesis_task::_start_speech_synthesis_task_output::StartSpeechSynthesisTaskOutputBuilder;
3
4pub use crate::operation::start_speech_synthesis_task::_start_speech_synthesis_task_input::StartSpeechSynthesisTaskInputBuilder;
5
6impl crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskInputBuilder {
7    /// Sends a request with this input using the given client.
8    pub async fn send_with(
9        self,
10        client: &crate::Client,
11    ) -> ::std::result::Result<
12        crate::operation::start_speech_synthesis_task::StartSpeechSynthesisTaskOutput,
13        ::aws_smithy_runtime_api::client::result::SdkError<
14            crate::operation::start_speech_synthesis_task::StartSpeechSynthesisTaskError,
15            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
16        >,
17    > {
18        let mut fluent_builder = client.start_speech_synthesis_task();
19        fluent_builder.inner = self;
20        fluent_builder.send().await
21    }
22}
23/// Fluent builder constructing a request to `StartSpeechSynthesisTask`.
24///
25/// <p>Allows the creation of an asynchronous synthesis task, by starting a new <code>SpeechSynthesisTask</code>. This operation requires all the standard information needed for speech synthesis, plus the name of an Amazon S3 bucket for the service to store the output of the synthesis task and two optional parameters (<code>OutputS3KeyPrefix</code> and <code>SnsTopicArn</code>). Once the synthesis task is created, this operation will return a <code>SpeechSynthesisTask</code> object, which will include an identifier of this task as well as the current status. The <code>SpeechSynthesisTask</code> object is available for 72 hours after starting the asynchronous synthesis task.</p>
26#[derive(::std::clone::Clone, ::std::fmt::Debug)]
27pub struct StartSpeechSynthesisTaskFluentBuilder {
28    handle: ::std::sync::Arc<crate::client::Handle>,
29    inner: crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskInputBuilder,
30    config_override: ::std::option::Option<crate::config::Builder>,
31}
32impl
33    crate::client::customize::internal::CustomizableSend<
34        crate::operation::start_speech_synthesis_task::StartSpeechSynthesisTaskOutput,
35        crate::operation::start_speech_synthesis_task::StartSpeechSynthesisTaskError,
36    > for StartSpeechSynthesisTaskFluentBuilder
37{
38    fn send(
39        self,
40        config_override: crate::config::Builder,
41    ) -> crate::client::customize::internal::BoxFuture<
42        crate::client::customize::internal::SendResult<
43            crate::operation::start_speech_synthesis_task::StartSpeechSynthesisTaskOutput,
44            crate::operation::start_speech_synthesis_task::StartSpeechSynthesisTaskError,
45        >,
46    > {
47        ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await })
48    }
49}
50impl StartSpeechSynthesisTaskFluentBuilder {
51    /// Creates a new `StartSpeechSynthesisTaskFluentBuilder`.
52    pub(crate) fn new(handle: ::std::sync::Arc<crate::client::Handle>) -> Self {
53        Self {
54            handle,
55            inner: ::std::default::Default::default(),
56            config_override: ::std::option::Option::None,
57        }
58    }
59    /// Access the StartSpeechSynthesisTask as a reference.
60    pub fn as_input(&self) -> &crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskInputBuilder {
61        &self.inner
62    }
63    /// Sends the request and returns the response.
64    ///
65    /// If an error occurs, an `SdkError` will be returned with additional details that
66    /// can be matched against.
67    ///
68    /// By default, any retryable failures will be retried twice. Retry behavior
69    /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
70    /// set when configuring the client.
71    pub async fn send(
72        self,
73    ) -> ::std::result::Result<
74        crate::operation::start_speech_synthesis_task::StartSpeechSynthesisTaskOutput,
75        ::aws_smithy_runtime_api::client::result::SdkError<
76            crate::operation::start_speech_synthesis_task::StartSpeechSynthesisTaskError,
77            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
78        >,
79    > {
80        let input = self
81            .inner
82            .build()
83            .map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?;
84        let runtime_plugins = crate::operation::start_speech_synthesis_task::StartSpeechSynthesisTask::operation_runtime_plugins(
85            self.handle.runtime_plugins.clone(),
86            &self.handle.conf,
87            self.config_override,
88        );
89        crate::operation::start_speech_synthesis_task::StartSpeechSynthesisTask::orchestrate(&runtime_plugins, input).await
90    }
91
92    /// Consumes this builder, creating a customizable operation that can be modified before being sent.
93    pub fn customize(
94        self,
95    ) -> crate::client::customize::CustomizableOperation<
96        crate::operation::start_speech_synthesis_task::StartSpeechSynthesisTaskOutput,
97        crate::operation::start_speech_synthesis_task::StartSpeechSynthesisTaskError,
98        Self,
99    > {
100        crate::client::customize::CustomizableOperation::new(self)
101    }
102    pub(crate) fn config_override(mut self, config_override: impl ::std::convert::Into<crate::config::Builder>) -> Self {
103        self.set_config_override(::std::option::Option::Some(config_override.into()));
104        self
105    }
106
107    pub(crate) fn set_config_override(&mut self, config_override: ::std::option::Option<crate::config::Builder>) -> &mut Self {
108        self.config_override = config_override;
109        self
110    }
111    /// <p>Specifies the engine (<code>standard</code>, <code>neural</code>, <code>long-form</code> or <code>generative</code>) for Amazon Polly to use when processing input text for speech synthesis. Using a voice that is not supported for the engine selected will result in an error.</p>
112    pub fn engine(mut self, input: crate::types::Engine) -> Self {
113        self.inner = self.inner.engine(input);
114        self
115    }
116    /// <p>Specifies the engine (<code>standard</code>, <code>neural</code>, <code>long-form</code> or <code>generative</code>) for Amazon Polly to use when processing input text for speech synthesis. Using a voice that is not supported for the engine selected will result in an error.</p>
117    pub fn set_engine(mut self, input: ::std::option::Option<crate::types::Engine>) -> Self {
118        self.inner = self.inner.set_engine(input);
119        self
120    }
121    /// <p>Specifies the engine (<code>standard</code>, <code>neural</code>, <code>long-form</code> or <code>generative</code>) for Amazon Polly to use when processing input text for speech synthesis. Using a voice that is not supported for the engine selected will result in an error.</p>
122    pub fn get_engine(&self) -> &::std::option::Option<crate::types::Engine> {
123        self.inner.get_engine()
124    }
125    /// <p>Optional language code for the Speech Synthesis request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).</p>
126    /// <p>If a bilingual voice is used and no language code is specified, Amazon Polly uses the default language of the bilingual voice. The default language for any voice is the one returned by the <a href="https://docs.aws.amazon.com/polly/latest/dg/API_DescribeVoices.html">DescribeVoices</a> operation for the <code>LanguageCode</code> parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.</p>
127    pub fn language_code(mut self, input: crate::types::LanguageCode) -> Self {
128        self.inner = self.inner.language_code(input);
129        self
130    }
131    /// <p>Optional language code for the Speech Synthesis request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).</p>
132    /// <p>If a bilingual voice is used and no language code is specified, Amazon Polly uses the default language of the bilingual voice. The default language for any voice is the one returned by the <a href="https://docs.aws.amazon.com/polly/latest/dg/API_DescribeVoices.html">DescribeVoices</a> operation for the <code>LanguageCode</code> parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.</p>
133    pub fn set_language_code(mut self, input: ::std::option::Option<crate::types::LanguageCode>) -> Self {
134        self.inner = self.inner.set_language_code(input);
135        self
136    }
137    /// <p>Optional language code for the Speech Synthesis request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).</p>
138    /// <p>If a bilingual voice is used and no language code is specified, Amazon Polly uses the default language of the bilingual voice. The default language for any voice is the one returned by the <a href="https://docs.aws.amazon.com/polly/latest/dg/API_DescribeVoices.html">DescribeVoices</a> operation for the <code>LanguageCode</code> parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.</p>
139    pub fn get_language_code(&self) -> &::std::option::Option<crate::types::LanguageCode> {
140        self.inner.get_language_code()
141    }
142    ///
143    /// Appends an item to `LexiconNames`.
144    ///
145    /// To override the contents of this collection use [`set_lexicon_names`](Self::set_lexicon_names).
146    ///
147    /// <p>List of one or more pronunciation lexicon names you want the service to apply during synthesis. Lexicons are applied only if the language of the lexicon is the same as the language of the voice.</p>
148    pub fn lexicon_names(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
149        self.inner = self.inner.lexicon_names(input.into());
150        self
151    }
152    /// <p>List of one or more pronunciation lexicon names you want the service to apply during synthesis. Lexicons are applied only if the language of the lexicon is the same as the language of the voice.</p>
153    pub fn set_lexicon_names(mut self, input: ::std::option::Option<::std::vec::Vec<::std::string::String>>) -> Self {
154        self.inner = self.inner.set_lexicon_names(input);
155        self
156    }
157    /// <p>List of one or more pronunciation lexicon names you want the service to apply during synthesis. Lexicons are applied only if the language of the lexicon is the same as the language of the voice.</p>
158    pub fn get_lexicon_names(&self) -> &::std::option::Option<::std::vec::Vec<::std::string::String>> {
159        self.inner.get_lexicon_names()
160    }
161    /// <p>The format in which the returned output will be encoded. For audio stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.</p>
162    pub fn output_format(mut self, input: crate::types::OutputFormat) -> Self {
163        self.inner = self.inner.output_format(input);
164        self
165    }
166    /// <p>The format in which the returned output will be encoded. For audio stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.</p>
167    pub fn set_output_format(mut self, input: ::std::option::Option<crate::types::OutputFormat>) -> Self {
168        self.inner = self.inner.set_output_format(input);
169        self
170    }
171    /// <p>The format in which the returned output will be encoded. For audio stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.</p>
172    pub fn get_output_format(&self) -> &::std::option::Option<crate::types::OutputFormat> {
173        self.inner.get_output_format()
174    }
175    /// <p>Amazon S3 bucket name to which the output file will be saved.</p>
176    pub fn output_s3_bucket_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
177        self.inner = self.inner.output_s3_bucket_name(input.into());
178        self
179    }
180    /// <p>Amazon S3 bucket name to which the output file will be saved.</p>
181    pub fn set_output_s3_bucket_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
182        self.inner = self.inner.set_output_s3_bucket_name(input);
183        self
184    }
185    /// <p>Amazon S3 bucket name to which the output file will be saved.</p>
186    pub fn get_output_s3_bucket_name(&self) -> &::std::option::Option<::std::string::String> {
187        self.inner.get_output_s3_bucket_name()
188    }
189    /// <p>The Amazon S3 key prefix for the output speech file.</p>
190    pub fn output_s3_key_prefix(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
191        self.inner = self.inner.output_s3_key_prefix(input.into());
192        self
193    }
194    /// <p>The Amazon S3 key prefix for the output speech file.</p>
195    pub fn set_output_s3_key_prefix(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
196        self.inner = self.inner.set_output_s3_key_prefix(input);
197        self
198    }
199    /// <p>The Amazon S3 key prefix for the output speech file.</p>
200    pub fn get_output_s3_key_prefix(&self) -> &::std::option::Option<::std::string::String> {
201        self.inner.get_output_s3_key_prefix()
202    }
203    /// <p>The audio frequency specified in Hz.</p>
204    /// <p>The valid values for mp3 and ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for standard voices is "22050". The default value for neural voices is "24000". The default value for long-form voices is "24000". The default value for generative voices is "24000".</p>
205    /// <p>Valid values for pcm are "8000" and "16000" The default value is "16000".</p>
206    pub fn sample_rate(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
207        self.inner = self.inner.sample_rate(input.into());
208        self
209    }
210    /// <p>The audio frequency specified in Hz.</p>
211    /// <p>The valid values for mp3 and ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for standard voices is "22050". The default value for neural voices is "24000". The default value for long-form voices is "24000". The default value for generative voices is "24000".</p>
212    /// <p>Valid values for pcm are "8000" and "16000" The default value is "16000".</p>
213    pub fn set_sample_rate(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
214        self.inner = self.inner.set_sample_rate(input);
215        self
216    }
217    /// <p>The audio frequency specified in Hz.</p>
218    /// <p>The valid values for mp3 and ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for standard voices is "22050". The default value for neural voices is "24000". The default value for long-form voices is "24000". The default value for generative voices is "24000".</p>
219    /// <p>Valid values for pcm are "8000" and "16000" The default value is "16000".</p>
220    pub fn get_sample_rate(&self) -> &::std::option::Option<::std::string::String> {
221        self.inner.get_sample_rate()
222    }
223    /// <p>ARN for the SNS topic optionally used for providing status notification for a speech synthesis task.</p>
224    pub fn sns_topic_arn(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
225        self.inner = self.inner.sns_topic_arn(input.into());
226        self
227    }
228    /// <p>ARN for the SNS topic optionally used for providing status notification for a speech synthesis task.</p>
229    pub fn set_sns_topic_arn(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
230        self.inner = self.inner.set_sns_topic_arn(input);
231        self
232    }
233    /// <p>ARN for the SNS topic optionally used for providing status notification for a speech synthesis task.</p>
234    pub fn get_sns_topic_arn(&self) -> &::std::option::Option<::std::string::String> {
235        self.inner.get_sns_topic_arn()
236    }
237    ///
238    /// Appends an item to `SpeechMarkTypes`.
239    ///
240    /// To override the contents of this collection use [`set_speech_mark_types`](Self::set_speech_mark_types).
241    ///
242    /// <p>The type of speech marks returned for the input text.</p>
243    pub fn speech_mark_types(mut self, input: crate::types::SpeechMarkType) -> Self {
244        self.inner = self.inner.speech_mark_types(input);
245        self
246    }
247    /// <p>The type of speech marks returned for the input text.</p>
248    pub fn set_speech_mark_types(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::SpeechMarkType>>) -> Self {
249        self.inner = self.inner.set_speech_mark_types(input);
250        self
251    }
252    /// <p>The type of speech marks returned for the input text.</p>
253    pub fn get_speech_mark_types(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::SpeechMarkType>> {
254        self.inner.get_speech_mark_types()
255    }
256    /// <p>The input text to synthesize. If you specify ssml as the TextType, follow the SSML format for the input text.</p>
257    pub fn text(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
258        self.inner = self.inner.text(input.into());
259        self
260    }
261    /// <p>The input text to synthesize. If you specify ssml as the TextType, follow the SSML format for the input text.</p>
262    pub fn set_text(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
263        self.inner = self.inner.set_text(input);
264        self
265    }
266    /// <p>The input text to synthesize. If you specify ssml as the TextType, follow the SSML format for the input text.</p>
267    pub fn get_text(&self) -> &::std::option::Option<::std::string::String> {
268        self.inner.get_text()
269    }
270    /// <p>Specifies whether the input text is plain text or SSML. The default value is plain text.</p>
271    pub fn text_type(mut self, input: crate::types::TextType) -> Self {
272        self.inner = self.inner.text_type(input);
273        self
274    }
275    /// <p>Specifies whether the input text is plain text or SSML. The default value is plain text.</p>
276    pub fn set_text_type(mut self, input: ::std::option::Option<crate::types::TextType>) -> Self {
277        self.inner = self.inner.set_text_type(input);
278        self
279    }
280    /// <p>Specifies whether the input text is plain text or SSML. The default value is plain text.</p>
281    pub fn get_text_type(&self) -> &::std::option::Option<crate::types::TextType> {
282        self.inner.get_text_type()
283    }
284    /// <p>Voice ID to use for the synthesis.</p>
285    pub fn voice_id(mut self, input: crate::types::VoiceId) -> Self {
286        self.inner = self.inner.voice_id(input);
287        self
288    }
289    /// <p>Voice ID to use for the synthesis.</p>
290    pub fn set_voice_id(mut self, input: ::std::option::Option<crate::types::VoiceId>) -> Self {
291        self.inner = self.inner.set_voice_id(input);
292        self
293    }
294    /// <p>Voice ID to use for the synthesis.</p>
295    pub fn get_voice_id(&self) -> &::std::option::Option<crate::types::VoiceId> {
296        self.inner.get_voice_id()
297    }
298}