1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)]
pub struct DetectLabelsOutput {
    /// <p>An array of labels for the real-world objects detected. </p>
    pub labels: ::std::option::Option<::std::vec::Vec<crate::types::Label>>,
    /// <p>The value of <code>OrientationCorrection</code> is always null.</p>
    /// <p>If the input image is in .jpeg format, it might contain exchangeable image file format (Exif) metadata that includes the image's orientation. Amazon Rekognition uses this orientation information to perform image correction. The bounding box coordinates are translated to represent object locations after the orientation information in the Exif metadata is used to correct the image orientation. Images in .png format don't contain Exif metadata.</p>
    /// <p>Amazon Rekognition doesn’t perform image correction for images in .png format and .jpeg images without orientation information in the image Exif metadata. The bounding box coordinates aren't translated and represent the object locations before the image is rotated. </p>
    pub orientation_correction: ::std::option::Option<crate::types::OrientationCorrection>,
    /// <p>Version number of the label detection model that was used to detect labels.</p>
    pub label_model_version: ::std::option::Option<::std::string::String>,
    /// <p>Information about the properties of the input image, such as brightness, sharpness, contrast, and dominant colors.</p>
    pub image_properties: ::std::option::Option<crate::types::DetectLabelsImageProperties>,
    _request_id: Option<String>,
}
impl DetectLabelsOutput {
    /// <p>An array of labels for the real-world objects detected. </p>
    ///
    /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.labels.is_none()`.
    pub fn labels(&self) -> &[crate::types::Label] {
        self.labels.as_deref().unwrap_or_default()
    }
    /// <p>The value of <code>OrientationCorrection</code> is always null.</p>
    /// <p>If the input image is in .jpeg format, it might contain exchangeable image file format (Exif) metadata that includes the image's orientation. Amazon Rekognition uses this orientation information to perform image correction. The bounding box coordinates are translated to represent object locations after the orientation information in the Exif metadata is used to correct the image orientation. Images in .png format don't contain Exif metadata.</p>
    /// <p>Amazon Rekognition doesn’t perform image correction for images in .png format and .jpeg images without orientation information in the image Exif metadata. The bounding box coordinates aren't translated and represent the object locations before the image is rotated. </p>
    pub fn orientation_correction(&self) -> ::std::option::Option<&crate::types::OrientationCorrection> {
        self.orientation_correction.as_ref()
    }
    /// <p>Version number of the label detection model that was used to detect labels.</p>
    pub fn label_model_version(&self) -> ::std::option::Option<&str> {
        self.label_model_version.as_deref()
    }
    /// <p>Information about the properties of the input image, such as brightness, sharpness, contrast, and dominant colors.</p>
    pub fn image_properties(&self) -> ::std::option::Option<&crate::types::DetectLabelsImageProperties> {
        self.image_properties.as_ref()
    }
}
impl ::aws_types::request_id::RequestId for DetectLabelsOutput {
    fn request_id(&self) -> Option<&str> {
        self._request_id.as_deref()
    }
}
impl DetectLabelsOutput {
    /// Creates a new builder-style object to manufacture [`DetectLabelsOutput`](crate::operation::detect_labels::DetectLabelsOutput).
    pub fn builder() -> crate::operation::detect_labels::builders::DetectLabelsOutputBuilder {
        crate::operation::detect_labels::builders::DetectLabelsOutputBuilder::default()
    }
}

/// A builder for [`DetectLabelsOutput`](crate::operation::detect_labels::DetectLabelsOutput).
#[non_exhaustive]
#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default, ::std::fmt::Debug)]
pub struct DetectLabelsOutputBuilder {
    pub(crate) labels: ::std::option::Option<::std::vec::Vec<crate::types::Label>>,
    pub(crate) orientation_correction: ::std::option::Option<crate::types::OrientationCorrection>,
    pub(crate) label_model_version: ::std::option::Option<::std::string::String>,
    pub(crate) image_properties: ::std::option::Option<crate::types::DetectLabelsImageProperties>,
    _request_id: Option<String>,
}
impl DetectLabelsOutputBuilder {
    /// Appends an item to `labels`.
    ///
    /// To override the contents of this collection use [`set_labels`](Self::set_labels).
    ///
    /// <p>An array of labels for the real-world objects detected. </p>
    pub fn labels(mut self, input: crate::types::Label) -> Self {
        let mut v = self.labels.unwrap_or_default();
        v.push(input);
        self.labels = ::std::option::Option::Some(v);
        self
    }
    /// <p>An array of labels for the real-world objects detected. </p>
    pub fn set_labels(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::Label>>) -> Self {
        self.labels = input;
        self
    }
    /// <p>An array of labels for the real-world objects detected. </p>
    pub fn get_labels(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::Label>> {
        &self.labels
    }
    /// <p>The value of <code>OrientationCorrection</code> is always null.</p>
    /// <p>If the input image is in .jpeg format, it might contain exchangeable image file format (Exif) metadata that includes the image's orientation. Amazon Rekognition uses this orientation information to perform image correction. The bounding box coordinates are translated to represent object locations after the orientation information in the Exif metadata is used to correct the image orientation. Images in .png format don't contain Exif metadata.</p>
    /// <p>Amazon Rekognition doesn’t perform image correction for images in .png format and .jpeg images without orientation information in the image Exif metadata. The bounding box coordinates aren't translated and represent the object locations before the image is rotated. </p>
    pub fn orientation_correction(mut self, input: crate::types::OrientationCorrection) -> Self {
        self.orientation_correction = ::std::option::Option::Some(input);
        self
    }
    /// <p>The value of <code>OrientationCorrection</code> is always null.</p>
    /// <p>If the input image is in .jpeg format, it might contain exchangeable image file format (Exif) metadata that includes the image's orientation. Amazon Rekognition uses this orientation information to perform image correction. The bounding box coordinates are translated to represent object locations after the orientation information in the Exif metadata is used to correct the image orientation. Images in .png format don't contain Exif metadata.</p>
    /// <p>Amazon Rekognition doesn’t perform image correction for images in .png format and .jpeg images without orientation information in the image Exif metadata. The bounding box coordinates aren't translated and represent the object locations before the image is rotated. </p>
    pub fn set_orientation_correction(mut self, input: ::std::option::Option<crate::types::OrientationCorrection>) -> Self {
        self.orientation_correction = input;
        self
    }
    /// <p>The value of <code>OrientationCorrection</code> is always null.</p>
    /// <p>If the input image is in .jpeg format, it might contain exchangeable image file format (Exif) metadata that includes the image's orientation. Amazon Rekognition uses this orientation information to perform image correction. The bounding box coordinates are translated to represent object locations after the orientation information in the Exif metadata is used to correct the image orientation. Images in .png format don't contain Exif metadata.</p>
    /// <p>Amazon Rekognition doesn’t perform image correction for images in .png format and .jpeg images without orientation information in the image Exif metadata. The bounding box coordinates aren't translated and represent the object locations before the image is rotated. </p>
    pub fn get_orientation_correction(&self) -> &::std::option::Option<crate::types::OrientationCorrection> {
        &self.orientation_correction
    }
    /// <p>Version number of the label detection model that was used to detect labels.</p>
    pub fn label_model_version(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
        self.label_model_version = ::std::option::Option::Some(input.into());
        self
    }
    /// <p>Version number of the label detection model that was used to detect labels.</p>
    pub fn set_label_model_version(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
        self.label_model_version = input;
        self
    }
    /// <p>Version number of the label detection model that was used to detect labels.</p>
    pub fn get_label_model_version(&self) -> &::std::option::Option<::std::string::String> {
        &self.label_model_version
    }
    /// <p>Information about the properties of the input image, such as brightness, sharpness, contrast, and dominant colors.</p>
    pub fn image_properties(mut self, input: crate::types::DetectLabelsImageProperties) -> Self {
        self.image_properties = ::std::option::Option::Some(input);
        self
    }
    /// <p>Information about the properties of the input image, such as brightness, sharpness, contrast, and dominant colors.</p>
    pub fn set_image_properties(mut self, input: ::std::option::Option<crate::types::DetectLabelsImageProperties>) -> Self {
        self.image_properties = input;
        self
    }
    /// <p>Information about the properties of the input image, such as brightness, sharpness, contrast, and dominant colors.</p>
    pub fn get_image_properties(&self) -> &::std::option::Option<crate::types::DetectLabelsImageProperties> {
        &self.image_properties
    }
    pub(crate) fn _request_id(mut self, request_id: impl Into<String>) -> Self {
        self._request_id = Some(request_id.into());
        self
    }

    pub(crate) fn _set_request_id(&mut self, request_id: Option<String>) -> &mut Self {
        self._request_id = request_id;
        self
    }
    /// Consumes the builder and constructs a [`DetectLabelsOutput`](crate::operation::detect_labels::DetectLabelsOutput).
    pub fn build(self) -> crate::operation::detect_labels::DetectLabelsOutput {
        crate::operation::detect_labels::DetectLabelsOutput {
            labels: self.labels,
            orientation_correction: self.orientation_correction,
            label_model_version: self.label_model_version,
            image_properties: self.image_properties,
            _request_id: self._request_id,
        }
    }
}