1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)]
pub struct DetectToxicContentOutput {
/// <p>Results of the content moderation analysis. Each entry in the results list contains a list of toxic content types identified in the text, along with a confidence score for each content type. The results list also includes a toxicity score for each entry in the results list.</p>
pub result_list: ::std::option::Option<::std::vec::Vec<crate::types::ToxicLabels>>,
_request_id: Option<String>,
}
impl DetectToxicContentOutput {
/// <p>Results of the content moderation analysis. Each entry in the results list contains a list of toxic content types identified in the text, along with a confidence score for each content type. The results list also includes a toxicity score for each entry in the results list.</p>
///
/// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.result_list.is_none()`.
pub fn result_list(&self) -> &[crate::types::ToxicLabels] {
self.result_list.as_deref().unwrap_or_default()
}
}
impl ::aws_types::request_id::RequestId for DetectToxicContentOutput {
fn request_id(&self) -> Option<&str> {
self._request_id.as_deref()
}
}
impl DetectToxicContentOutput {
/// Creates a new builder-style object to manufacture [`DetectToxicContentOutput`](crate::operation::detect_toxic_content::DetectToxicContentOutput).
pub fn builder() -> crate::operation::detect_toxic_content::builders::DetectToxicContentOutputBuilder {
crate::operation::detect_toxic_content::builders::DetectToxicContentOutputBuilder::default()
}
}
/// A builder for [`DetectToxicContentOutput`](crate::operation::detect_toxic_content::DetectToxicContentOutput).
#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default, ::std::fmt::Debug)]
#[non_exhaustive]
pub struct DetectToxicContentOutputBuilder {
pub(crate) result_list: ::std::option::Option<::std::vec::Vec<crate::types::ToxicLabels>>,
_request_id: Option<String>,
}
impl DetectToxicContentOutputBuilder {
/// Appends an item to `result_list`.
///
/// To override the contents of this collection use [`set_result_list`](Self::set_result_list).
///
/// <p>Results of the content moderation analysis. Each entry in the results list contains a list of toxic content types identified in the text, along with a confidence score for each content type. The results list also includes a toxicity score for each entry in the results list.</p>
pub fn result_list(mut self, input: crate::types::ToxicLabels) -> Self {
let mut v = self.result_list.unwrap_or_default();
v.push(input);
self.result_list = ::std::option::Option::Some(v);
self
}
/// <p>Results of the content moderation analysis. Each entry in the results list contains a list of toxic content types identified in the text, along with a confidence score for each content type. The results list also includes a toxicity score for each entry in the results list.</p>
pub fn set_result_list(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::ToxicLabels>>) -> Self {
self.result_list = input;
self
}
/// <p>Results of the content moderation analysis. Each entry in the results list contains a list of toxic content types identified in the text, along with a confidence score for each content type. The results list also includes a toxicity score for each entry in the results list.</p>
pub fn get_result_list(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::ToxicLabels>> {
&self.result_list
}
pub(crate) fn _request_id(mut self, request_id: impl Into<String>) -> Self {
self._request_id = Some(request_id.into());
self
}
pub(crate) fn _set_request_id(&mut self, request_id: Option<String>) -> &mut Self {
self._request_id = request_id;
self
}
/// Consumes the builder and constructs a [`DetectToxicContentOutput`](crate::operation::detect_toxic_content::DetectToxicContentOutput).
pub fn build(self) -> crate::operation::detect_toxic_content::DetectToxicContentOutput {
crate::operation::detect_toxic_content::DetectToxicContentOutput {
result_list: self.result_list,
_request_id: self._request_id,
}
}
}