aws_sdk_polly/client/
start_speech_synthesis_task.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2impl super::Client {
3    /// Constructs a fluent builder for the [`StartSpeechSynthesisTask`](crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder) operation.
4    ///
5    /// - The fluent builder is configurable:
6    ///   - [`engine(Engine)`](crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder::engine) / [`set_engine(Option<Engine>)`](crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder::set_engine):<br>required: **false**<br><p>Specifies the engine (<code>standard</code>, <code>neural</code>, <code>long-form</code> or <code>generative</code>) for Amazon Polly to use when processing input text for speech synthesis. Using a voice that is not supported for the engine selected will result in an error.</p><br>
7    ///   - [`language_code(LanguageCode)`](crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder::language_code) / [`set_language_code(Option<LanguageCode>)`](crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder::set_language_code):<br>required: **false**<br><p>Optional language code for the Speech Synthesis request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).</p> <p>If a bilingual voice is used and no language code is specified, Amazon Polly uses the default language of the bilingual voice. The default language for any voice is the one returned by the <a href="https://docs.aws.amazon.com/polly/latest/dg/API_DescribeVoices.html">DescribeVoices</a> operation for the <code>LanguageCode</code> parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.</p><br>
8    ///   - [`lexicon_names(impl Into<String>)`](crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder::lexicon_names) / [`set_lexicon_names(Option<Vec::<String>>)`](crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder::set_lexicon_names):<br>required: **false**<br><p>List of one or more pronunciation lexicon names you want the service to apply during synthesis. Lexicons are applied only if the language of the lexicon is the same as the language of the voice.</p><br>
9    ///   - [`output_format(OutputFormat)`](crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder::output_format) / [`set_output_format(Option<OutputFormat>)`](crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder::set_output_format):<br>required: **true**<br><p>The format in which the returned output will be encoded. For audio stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.</p><br>
10    ///   - [`output_s3_bucket_name(impl Into<String>)`](crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder::output_s3_bucket_name) / [`set_output_s3_bucket_name(Option<String>)`](crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder::set_output_s3_bucket_name):<br>required: **true**<br><p>Amazon S3 bucket name to which the output file will be saved.</p><br>
11    ///   - [`output_s3_key_prefix(impl Into<String>)`](crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder::output_s3_key_prefix) / [`set_output_s3_key_prefix(Option<String>)`](crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder::set_output_s3_key_prefix):<br>required: **false**<br><p>The Amazon S3 key prefix for the output speech file.</p><br>
12    ///   - [`sample_rate(impl Into<String>)`](crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder::sample_rate) / [`set_sample_rate(Option<String>)`](crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder::set_sample_rate):<br>required: **false**<br><p>The audio frequency specified in Hz.</p> <p>The valid values for mp3 and ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for standard voices is "22050". The default value for neural voices is "24000". The default value for long-form voices is "24000". The default value for generative voices is "24000".</p> <p>Valid values for pcm are "8000" and "16000" The default value is "16000".</p><br>
13    ///   - [`sns_topic_arn(impl Into<String>)`](crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder::sns_topic_arn) / [`set_sns_topic_arn(Option<String>)`](crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder::set_sns_topic_arn):<br>required: **false**<br><p>ARN for the SNS topic optionally used for providing status notification for a speech synthesis task.</p><br>
14    ///   - [`speech_mark_types(SpeechMarkType)`](crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder::speech_mark_types) / [`set_speech_mark_types(Option<Vec::<SpeechMarkType>>)`](crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder::set_speech_mark_types):<br>required: **false**<br><p>The type of speech marks returned for the input text.</p><br>
15    ///   - [`text(impl Into<String>)`](crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder::text) / [`set_text(Option<String>)`](crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder::set_text):<br>required: **true**<br><p>The input text to synthesize. If you specify ssml as the TextType, follow the SSML format for the input text.</p><br>
16    ///   - [`text_type(TextType)`](crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder::text_type) / [`set_text_type(Option<TextType>)`](crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder::set_text_type):<br>required: **false**<br><p>Specifies whether the input text is plain text or SSML. The default value is plain text.</p><br>
17    ///   - [`voice_id(VoiceId)`](crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder::voice_id) / [`set_voice_id(Option<VoiceId>)`](crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder::set_voice_id):<br>required: **true**<br><p>Voice ID to use for the synthesis.</p><br>
18    /// - On success, responds with [`StartSpeechSynthesisTaskOutput`](crate::operation::start_speech_synthesis_task::StartSpeechSynthesisTaskOutput) with field(s):
19    ///   - [`synthesis_task(Option<SynthesisTask>)`](crate::operation::start_speech_synthesis_task::StartSpeechSynthesisTaskOutput::synthesis_task): <p>SynthesisTask object that provides information and attributes about a newly submitted speech synthesis task.</p>
20    /// - On failure, responds with [`SdkError<StartSpeechSynthesisTaskError>`](crate::operation::start_speech_synthesis_task::StartSpeechSynthesisTaskError)
21    pub fn start_speech_synthesis_task(&self) -> crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder {
22        crate::operation::start_speech_synthesis_task::builders::StartSpeechSynthesisTaskFluentBuilder::new(self.handle.clone())
23    }
24}