1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(::std::fmt::Debug)]
pub struct RecognizeUtteranceOutput {
    /// <p>Indicates whether the input mode to the operation was text or speech. </p>
    pub input_mode: ::std::option::Option<::std::string::String>,
    /// <p>Content type as specified in the <code>responseContentType</code> in the request.</p>
    pub content_type: ::std::option::Option<::std::string::String>,
    /// <p>A list of messages that were last sent to the user. The messages are ordered based on the order that you returned the messages from your Lambda function or the order that the messages are defined in the bot.</p>
    /// <p>The <code>messages</code> field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.</p>
    pub messages: ::std::option::Option<::std::string::String>,
    /// <p>A list of intents that Amazon Lex V2 determined might satisfy the user's utterance.</p>
    /// <p>Each interpretation includes the intent, a score that indicates how confident Amazon Lex V2 is that the interpretation is the correct one, and an optional sentiment response that indicates the sentiment expressed in the utterance.</p>
    /// <p>The <code>interpretations</code> field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.</p>
    pub interpretations: ::std::option::Option<::std::string::String>,
    /// <p>Represents the current state of the dialog between the user and the bot.</p>
    /// <p>Use this to determine the progress of the conversation and what the next action might be.</p>
    /// <p>The <code>sessionState</code> field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.</p>
    pub session_state: ::std::option::Option<::std::string::String>,
    /// <p>The attributes sent in the request.</p>
    /// <p>The <code>requestAttributes</code> field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents.</p>
    pub request_attributes: ::std::option::Option<::std::string::String>,
    /// <p>The identifier of the session in use.</p>
    pub session_id: ::std::option::Option<::std::string::String>,
    /// <p>The text used to process the request.</p>
    /// <p>If the input was an audio stream, the <code>inputTranscript</code> field contains the text extracted from the audio stream. This is the text that is actually processed to recognize intents and slot values. You can use this information to determine if Amazon Lex V2 is correctly processing the audio that you send.</p>
    /// <p>The <code>inputTranscript</code> field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.</p>
    pub input_transcript: ::std::option::Option<::std::string::String>,
    /// <p>The prompt or statement to send to the user. This is based on the bot configuration and context. For example, if Amazon Lex V2 did not understand the user intent, it sends the <code>clarificationPrompt</code> configured for the bot. If the intent requires confirmation before taking the fulfillment action, it sends the <code>confirmationPrompt</code>. Another example: Suppose that the Lambda function successfully fulfilled the intent, and sent a message to convey to the user. Then Amazon Lex V2 sends that message in the response.</p>
    pub audio_stream: ::aws_smithy_http::byte_stream::ByteStream,
    /// <p>The bot member that recognized the utterance.</p>
    pub recognized_bot_member: ::std::option::Option<::std::string::String>,
    _request_id: Option<String>,
}
impl RecognizeUtteranceOutput {
    /// <p>Indicates whether the input mode to the operation was text or speech. </p>
    pub fn input_mode(&self) -> ::std::option::Option<&str> {
        self.input_mode.as_deref()
    }
    /// <p>Content type as specified in the <code>responseContentType</code> in the request.</p>
    pub fn content_type(&self) -> ::std::option::Option<&str> {
        self.content_type.as_deref()
    }
    /// <p>A list of messages that were last sent to the user. The messages are ordered based on the order that you returned the messages from your Lambda function or the order that the messages are defined in the bot.</p>
    /// <p>The <code>messages</code> field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.</p>
    pub fn messages(&self) -> ::std::option::Option<&str> {
        self.messages.as_deref()
    }
    /// <p>A list of intents that Amazon Lex V2 determined might satisfy the user's utterance.</p>
    /// <p>Each interpretation includes the intent, a score that indicates how confident Amazon Lex V2 is that the interpretation is the correct one, and an optional sentiment response that indicates the sentiment expressed in the utterance.</p>
    /// <p>The <code>interpretations</code> field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.</p>
    pub fn interpretations(&self) -> ::std::option::Option<&str> {
        self.interpretations.as_deref()
    }
    /// <p>Represents the current state of the dialog between the user and the bot.</p>
    /// <p>Use this to determine the progress of the conversation and what the next action might be.</p>
    /// <p>The <code>sessionState</code> field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.</p>
    pub fn session_state(&self) -> ::std::option::Option<&str> {
        self.session_state.as_deref()
    }
    /// <p>The attributes sent in the request.</p>
    /// <p>The <code>requestAttributes</code> field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents.</p>
    pub fn request_attributes(&self) -> ::std::option::Option<&str> {
        self.request_attributes.as_deref()
    }
    /// <p>The identifier of the session in use.</p>
    pub fn session_id(&self) -> ::std::option::Option<&str> {
        self.session_id.as_deref()
    }
    /// <p>The text used to process the request.</p>
    /// <p>If the input was an audio stream, the <code>inputTranscript</code> field contains the text extracted from the audio stream. This is the text that is actually processed to recognize intents and slot values. You can use this information to determine if Amazon Lex V2 is correctly processing the audio that you send.</p>
    /// <p>The <code>inputTranscript</code> field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.</p>
    pub fn input_transcript(&self) -> ::std::option::Option<&str> {
        self.input_transcript.as_deref()
    }
    /// <p>The prompt or statement to send to the user. This is based on the bot configuration and context. For example, if Amazon Lex V2 did not understand the user intent, it sends the <code>clarificationPrompt</code> configured for the bot. If the intent requires confirmation before taking the fulfillment action, it sends the <code>confirmationPrompt</code>. Another example: Suppose that the Lambda function successfully fulfilled the intent, and sent a message to convey to the user. Then Amazon Lex V2 sends that message in the response.</p>
    pub fn audio_stream(&self) -> &::aws_smithy_http::byte_stream::ByteStream {
        &self.audio_stream
    }
    /// <p>The bot member that recognized the utterance.</p>
    pub fn recognized_bot_member(&self) -> ::std::option::Option<&str> {
        self.recognized_bot_member.as_deref()
    }
}
impl ::aws_http::request_id::RequestId for RecognizeUtteranceOutput {
    fn request_id(&self) -> Option<&str> {
        self._request_id.as_deref()
    }
}
impl RecognizeUtteranceOutput {
    /// Creates a new builder-style object to manufacture [`RecognizeUtteranceOutput`](crate::operation::recognize_utterance::RecognizeUtteranceOutput).
    pub fn builder() -> crate::operation::recognize_utterance::builders::RecognizeUtteranceOutputBuilder {
        crate::operation::recognize_utterance::builders::RecognizeUtteranceOutputBuilder::default()
    }
}

/// A builder for [`RecognizeUtteranceOutput`](crate::operation::recognize_utterance::RecognizeUtteranceOutput).
#[non_exhaustive]
#[derive(::std::default::Default, ::std::fmt::Debug)]
pub struct RecognizeUtteranceOutputBuilder {
    pub(crate) input_mode: ::std::option::Option<::std::string::String>,
    pub(crate) content_type: ::std::option::Option<::std::string::String>,
    pub(crate) messages: ::std::option::Option<::std::string::String>,
    pub(crate) interpretations: ::std::option::Option<::std::string::String>,
    pub(crate) session_state: ::std::option::Option<::std::string::String>,
    pub(crate) request_attributes: ::std::option::Option<::std::string::String>,
    pub(crate) session_id: ::std::option::Option<::std::string::String>,
    pub(crate) input_transcript: ::std::option::Option<::std::string::String>,
    pub(crate) audio_stream: ::std::option::Option<::aws_smithy_http::byte_stream::ByteStream>,
    pub(crate) recognized_bot_member: ::std::option::Option<::std::string::String>,
    _request_id: Option<String>,
}
impl RecognizeUtteranceOutputBuilder {
    /// <p>Indicates whether the input mode to the operation was text or speech. </p>
    pub fn input_mode(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
        self.input_mode = ::std::option::Option::Some(input.into());
        self
    }
    /// <p>Indicates whether the input mode to the operation was text or speech. </p>
    pub fn set_input_mode(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
        self.input_mode = input;
        self
    }
    /// <p>Indicates whether the input mode to the operation was text or speech. </p>
    pub fn get_input_mode(&self) -> &::std::option::Option<::std::string::String> {
        &self.input_mode
    }
    /// <p>Content type as specified in the <code>responseContentType</code> in the request.</p>
    pub fn content_type(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
        self.content_type = ::std::option::Option::Some(input.into());
        self
    }
    /// <p>Content type as specified in the <code>responseContentType</code> in the request.</p>
    pub fn set_content_type(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
        self.content_type = input;
        self
    }
    /// <p>Content type as specified in the <code>responseContentType</code> in the request.</p>
    pub fn get_content_type(&self) -> &::std::option::Option<::std::string::String> {
        &self.content_type
    }
    /// <p>A list of messages that were last sent to the user. The messages are ordered based on the order that you returned the messages from your Lambda function or the order that the messages are defined in the bot.</p>
    /// <p>The <code>messages</code> field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.</p>
    pub fn messages(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
        self.messages = ::std::option::Option::Some(input.into());
        self
    }
    /// <p>A list of messages that were last sent to the user. The messages are ordered based on the order that you returned the messages from your Lambda function or the order that the messages are defined in the bot.</p>
    /// <p>The <code>messages</code> field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.</p>
    pub fn set_messages(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
        self.messages = input;
        self
    }
    /// <p>A list of messages that were last sent to the user. The messages are ordered based on the order that you returned the messages from your Lambda function or the order that the messages are defined in the bot.</p>
    /// <p>The <code>messages</code> field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.</p>
    pub fn get_messages(&self) -> &::std::option::Option<::std::string::String> {
        &self.messages
    }
    /// <p>A list of intents that Amazon Lex V2 determined might satisfy the user's utterance.</p>
    /// <p>Each interpretation includes the intent, a score that indicates how confident Amazon Lex V2 is that the interpretation is the correct one, and an optional sentiment response that indicates the sentiment expressed in the utterance.</p>
    /// <p>The <code>interpretations</code> field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.</p>
    pub fn interpretations(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
        self.interpretations = ::std::option::Option::Some(input.into());
        self
    }
    /// <p>A list of intents that Amazon Lex V2 determined might satisfy the user's utterance.</p>
    /// <p>Each interpretation includes the intent, a score that indicates how confident Amazon Lex V2 is that the interpretation is the correct one, and an optional sentiment response that indicates the sentiment expressed in the utterance.</p>
    /// <p>The <code>interpretations</code> field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.</p>
    pub fn set_interpretations(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
        self.interpretations = input;
        self
    }
    /// <p>A list of intents that Amazon Lex V2 determined might satisfy the user's utterance.</p>
    /// <p>Each interpretation includes the intent, a score that indicates how confident Amazon Lex V2 is that the interpretation is the correct one, and an optional sentiment response that indicates the sentiment expressed in the utterance.</p>
    /// <p>The <code>interpretations</code> field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.</p>
    pub fn get_interpretations(&self) -> &::std::option::Option<::std::string::String> {
        &self.interpretations
    }
    /// <p>Represents the current state of the dialog between the user and the bot.</p>
    /// <p>Use this to determine the progress of the conversation and what the next action might be.</p>
    /// <p>The <code>sessionState</code> field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.</p>
    pub fn session_state(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
        self.session_state = ::std::option::Option::Some(input.into());
        self
    }
    /// <p>Represents the current state of the dialog between the user and the bot.</p>
    /// <p>Use this to determine the progress of the conversation and what the next action might be.</p>
    /// <p>The <code>sessionState</code> field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.</p>
    pub fn set_session_state(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
        self.session_state = input;
        self
    }
    /// <p>Represents the current state of the dialog between the user and the bot.</p>
    /// <p>Use this to determine the progress of the conversation and what the next action might be.</p>
    /// <p>The <code>sessionState</code> field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.</p>
    pub fn get_session_state(&self) -> &::std::option::Option<::std::string::String> {
        &self.session_state
    }
    /// <p>The attributes sent in the request.</p>
    /// <p>The <code>requestAttributes</code> field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents.</p>
    pub fn request_attributes(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
        self.request_attributes = ::std::option::Option::Some(input.into());
        self
    }
    /// <p>The attributes sent in the request.</p>
    /// <p>The <code>requestAttributes</code> field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents.</p>
    pub fn set_request_attributes(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
        self.request_attributes = input;
        self
    }
    /// <p>The attributes sent in the request.</p>
    /// <p>The <code>requestAttributes</code> field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents.</p>
    pub fn get_request_attributes(&self) -> &::std::option::Option<::std::string::String> {
        &self.request_attributes
    }
    /// <p>The identifier of the session in use.</p>
    pub fn session_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
        self.session_id = ::std::option::Option::Some(input.into());
        self
    }
    /// <p>The identifier of the session in use.</p>
    pub fn set_session_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
        self.session_id = input;
        self
    }
    /// <p>The identifier of the session in use.</p>
    pub fn get_session_id(&self) -> &::std::option::Option<::std::string::String> {
        &self.session_id
    }
    /// <p>The text used to process the request.</p>
    /// <p>If the input was an audio stream, the <code>inputTranscript</code> field contains the text extracted from the audio stream. This is the text that is actually processed to recognize intents and slot values. You can use this information to determine if Amazon Lex V2 is correctly processing the audio that you send.</p>
    /// <p>The <code>inputTranscript</code> field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.</p>
    pub fn input_transcript(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
        self.input_transcript = ::std::option::Option::Some(input.into());
        self
    }
    /// <p>The text used to process the request.</p>
    /// <p>If the input was an audio stream, the <code>inputTranscript</code> field contains the text extracted from the audio stream. This is the text that is actually processed to recognize intents and slot values. You can use this information to determine if Amazon Lex V2 is correctly processing the audio that you send.</p>
    /// <p>The <code>inputTranscript</code> field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.</p>
    pub fn set_input_transcript(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
        self.input_transcript = input;
        self
    }
    /// <p>The text used to process the request.</p>
    /// <p>If the input was an audio stream, the <code>inputTranscript</code> field contains the text extracted from the audio stream. This is the text that is actually processed to recognize intents and slot values. You can use this information to determine if Amazon Lex V2 is correctly processing the audio that you send.</p>
    /// <p>The <code>inputTranscript</code> field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.</p>
    pub fn get_input_transcript(&self) -> &::std::option::Option<::std::string::String> {
        &self.input_transcript
    }
    /// <p>The prompt or statement to send to the user. This is based on the bot configuration and context. For example, if Amazon Lex V2 did not understand the user intent, it sends the <code>clarificationPrompt</code> configured for the bot. If the intent requires confirmation before taking the fulfillment action, it sends the <code>confirmationPrompt</code>. Another example: Suppose that the Lambda function successfully fulfilled the intent, and sent a message to convey to the user. Then Amazon Lex V2 sends that message in the response.</p>
    pub fn audio_stream(mut self, input: ::aws_smithy_http::byte_stream::ByteStream) -> Self {
        self.audio_stream = ::std::option::Option::Some(input);
        self
    }
    /// <p>The prompt or statement to send to the user. This is based on the bot configuration and context. For example, if Amazon Lex V2 did not understand the user intent, it sends the <code>clarificationPrompt</code> configured for the bot. If the intent requires confirmation before taking the fulfillment action, it sends the <code>confirmationPrompt</code>. Another example: Suppose that the Lambda function successfully fulfilled the intent, and sent a message to convey to the user. Then Amazon Lex V2 sends that message in the response.</p>
    pub fn set_audio_stream(mut self, input: ::std::option::Option<::aws_smithy_http::byte_stream::ByteStream>) -> Self {
        self.audio_stream = input;
        self
    }
    /// <p>The prompt or statement to send to the user. This is based on the bot configuration and context. For example, if Amazon Lex V2 did not understand the user intent, it sends the <code>clarificationPrompt</code> configured for the bot. If the intent requires confirmation before taking the fulfillment action, it sends the <code>confirmationPrompt</code>. Another example: Suppose that the Lambda function successfully fulfilled the intent, and sent a message to convey to the user. Then Amazon Lex V2 sends that message in the response.</p>
    pub fn get_audio_stream(&self) -> &::std::option::Option<::aws_smithy_http::byte_stream::ByteStream> {
        &self.audio_stream
    }
    /// <p>The bot member that recognized the utterance.</p>
    pub fn recognized_bot_member(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
        self.recognized_bot_member = ::std::option::Option::Some(input.into());
        self
    }
    /// <p>The bot member that recognized the utterance.</p>
    pub fn set_recognized_bot_member(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
        self.recognized_bot_member = input;
        self
    }
    /// <p>The bot member that recognized the utterance.</p>
    pub fn get_recognized_bot_member(&self) -> &::std::option::Option<::std::string::String> {
        &self.recognized_bot_member
    }
    pub(crate) fn _request_id(mut self, request_id: impl Into<String>) -> Self {
        self._request_id = Some(request_id.into());
        self
    }

    pub(crate) fn _set_request_id(&mut self, request_id: Option<String>) -> &mut Self {
        self._request_id = request_id;
        self
    }
    /// Consumes the builder and constructs a [`RecognizeUtteranceOutput`](crate::operation::recognize_utterance::RecognizeUtteranceOutput).
    pub fn build(self) -> crate::operation::recognize_utterance::RecognizeUtteranceOutput {
        crate::operation::recognize_utterance::RecognizeUtteranceOutput {
            input_mode: self.input_mode,
            content_type: self.content_type,
            messages: self.messages,
            interpretations: self.interpretations,
            session_state: self.session_state,
            request_attributes: self.request_attributes,
            session_id: self.session_id,
            input_transcript: self.input_transcript,
            audio_stream: self.audio_stream.unwrap_or_default(),
            recognized_bot_member: self.recognized_bot_member,
            _request_id: self._request_id,
        }
    }
}