rusoto_elastic_inference/
generated.rs

1// =================================================================
2//
3//                           * WARNING *
4//
5//                    This file is generated!
6//
7//  Changes made to this file will be overwritten. If changes are
8//  required to the generated code, the service_crategen project
9//  must be updated to generate the changes.
10//
11// =================================================================
12
13use std::error::Error;
14use std::fmt;
15
16use async_trait::async_trait;
17use rusoto_core::credential::ProvideAwsCredentials;
18use rusoto_core::region;
19use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest};
20use rusoto_core::{Client, RusotoError};
21
22use rusoto_core::param::{Params, ServiceParams};
23use rusoto_core::proto;
24use rusoto_core::signature::SignedRequest;
25#[allow(unused_imports)]
26use serde::{Deserialize, Serialize};
27use serde_json;
28/// <p> The details of an Elastic Inference Accelerator type. </p>
29#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
30#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
31pub struct AcceleratorType {
32    /// <p> The name of the Elastic Inference Accelerator type. </p>
33    #[serde(rename = "acceleratorTypeName")]
34    #[serde(skip_serializing_if = "Option::is_none")]
35    pub accelerator_type_name: Option<String>,
36    /// <p> The memory information of the Elastic Inference Accelerator type. </p>
37    #[serde(rename = "memoryInfo")]
38    #[serde(skip_serializing_if = "Option::is_none")]
39    pub memory_info: Option<MemoryInfo>,
40    /// <p> The throughput information of the Elastic Inference Accelerator type. </p>
41    #[serde(rename = "throughputInfo")]
42    #[serde(skip_serializing_if = "Option::is_none")]
43    pub throughput_info: Option<Vec<KeyValuePair>>,
44}
45
46/// <p> The offering for an Elastic Inference Accelerator type. </p>
47#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
48#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
49pub struct AcceleratorTypeOffering {
50    /// <p> The name of the Elastic Inference Accelerator type. </p>
51    #[serde(rename = "acceleratorType")]
52    #[serde(skip_serializing_if = "Option::is_none")]
53    pub accelerator_type: Option<String>,
54    /// <p> The location for the offering. It will return either the region, availability zone or availability zone id for the offering depending on the locationType value. </p>
55    #[serde(rename = "location")]
56    #[serde(skip_serializing_if = "Option::is_none")]
57    pub location: Option<String>,
58    /// <p> The location type for the offering. It can assume the following values: region: defines that the offering is at the regional level. availability-zone: defines that the offering is at the availability zone level. availability-zone-id: defines that the offering is at the availability zone level, defined by the availability zone id. </p>
59    #[serde(rename = "locationType")]
60    #[serde(skip_serializing_if = "Option::is_none")]
61    pub location_type: Option<String>,
62}
63
64#[derive(Clone, Debug, Default, PartialEq, Serialize)]
65#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
66pub struct DescribeAcceleratorOfferingsRequest {
67    /// <p> The list of accelerator types to describe. </p>
68    #[serde(rename = "acceleratorTypes")]
69    #[serde(skip_serializing_if = "Option::is_none")]
70    pub accelerator_types: Option<Vec<String>>,
71    /// <p> The location type that you want to describe accelerator type offerings for. It can assume the following values: region: will return the accelerator type offering at the regional level. availability-zone: will return the accelerator type offering at the availability zone level. availability-zone-id: will return the accelerator type offering at the availability zone level returning the availability zone id. </p>
72    #[serde(rename = "locationType")]
73    pub location_type: String,
74}
75
76#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
77#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
78pub struct DescribeAcceleratorOfferingsResponse {
79    /// <p> The list of accelerator type offerings for a specific location. </p>
80    #[serde(rename = "acceleratorTypeOfferings")]
81    #[serde(skip_serializing_if = "Option::is_none")]
82    pub accelerator_type_offerings: Option<Vec<AcceleratorTypeOffering>>,
83}
84
85#[derive(Clone, Debug, Default, PartialEq, Serialize)]
86#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
87pub struct DescribeAcceleratorTypesRequest {}
88
89#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
90#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
91pub struct DescribeAcceleratorTypesResponse {
92    /// <p> The available accelerator types. </p>
93    #[serde(rename = "acceleratorTypes")]
94    #[serde(skip_serializing_if = "Option::is_none")]
95    pub accelerator_types: Option<Vec<AcceleratorType>>,
96}
97
98#[derive(Clone, Debug, Default, PartialEq, Serialize)]
99#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
100pub struct DescribeAcceleratorsRequest {
101    /// <p> The IDs of the accelerators to describe. </p>
102    #[serde(rename = "acceleratorIds")]
103    #[serde(skip_serializing_if = "Option::is_none")]
104    pub accelerator_ids: Option<Vec<String>>,
105    /// <p> One or more filters. Filter names and values are case-sensitive. Valid filter names are: accelerator-types: can provide a list of accelerator type names to filter for. instance-id: can provide a list of EC2 instance ids to filter for. </p>
106    #[serde(rename = "filters")]
107    #[serde(skip_serializing_if = "Option::is_none")]
108    pub filters: Option<Vec<Filter>>,
109    /// <p> The total number of items to return in the command's output. If the total number of items available is more than the value specified, a NextToken is provided in the command's output. To resume pagination, provide the NextToken value in the starting-token argument of a subsequent command. Do not use the NextToken response element directly outside of the AWS CLI. </p>
110    #[serde(rename = "maxResults")]
111    #[serde(skip_serializing_if = "Option::is_none")]
112    pub max_results: Option<i64>,
113    /// <p> A token to specify where to start paginating. This is the NextToken from a previously truncated response. </p>
114    #[serde(rename = "nextToken")]
115    #[serde(skip_serializing_if = "Option::is_none")]
116    pub next_token: Option<String>,
117}
118
119#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
120#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
121pub struct DescribeAcceleratorsResponse {
122    /// <p> The details of the Elastic Inference Accelerators. </p>
123    #[serde(rename = "acceleratorSet")]
124    #[serde(skip_serializing_if = "Option::is_none")]
125    pub accelerator_set: Option<Vec<ElasticInferenceAccelerator>>,
126    /// <p> A token to specify where to start paginating. This is the NextToken from a previously truncated response. </p>
127    #[serde(rename = "nextToken")]
128    #[serde(skip_serializing_if = "Option::is_none")]
129    pub next_token: Option<String>,
130}
131
132/// <p> The details of an Elastic Inference Accelerator. </p>
133#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
134#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
135pub struct ElasticInferenceAccelerator {
136    /// <p> The health of the Elastic Inference Accelerator. </p>
137    #[serde(rename = "acceleratorHealth")]
138    #[serde(skip_serializing_if = "Option::is_none")]
139    pub accelerator_health: Option<ElasticInferenceAcceleratorHealth>,
140    /// <p> The ID of the Elastic Inference Accelerator. </p>
141    #[serde(rename = "acceleratorId")]
142    #[serde(skip_serializing_if = "Option::is_none")]
143    pub accelerator_id: Option<String>,
144    /// <p> The type of the Elastic Inference Accelerator. </p>
145    #[serde(rename = "acceleratorType")]
146    #[serde(skip_serializing_if = "Option::is_none")]
147    pub accelerator_type: Option<String>,
148    /// <p> The ARN of the resource that the Elastic Inference Accelerator is attached to. </p>
149    #[serde(rename = "attachedResource")]
150    #[serde(skip_serializing_if = "Option::is_none")]
151    pub attached_resource: Option<String>,
152    /// <p> The availability zone where the Elastic Inference Accelerator is present. </p>
153    #[serde(rename = "availabilityZone")]
154    #[serde(skip_serializing_if = "Option::is_none")]
155    pub availability_zone: Option<String>,
156}
157
158/// <p> The health details of an Elastic Inference Accelerator. </p>
159#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
160#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
161pub struct ElasticInferenceAcceleratorHealth {
162    /// <p> The health status of the Elastic Inference Accelerator. </p>
163    #[serde(rename = "status")]
164    #[serde(skip_serializing_if = "Option::is_none")]
165    pub status: Option<String>,
166}
167
168/// <p> A filter expression for the Elastic Inference Accelerator list. </p>
169#[derive(Clone, Debug, Default, PartialEq, Serialize)]
170#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
171pub struct Filter {
172    /// <p> The filter name for the Elastic Inference Accelerator list. It can assume the following values: accelerator-type: the type of Elastic Inference Accelerator to filter for. instance-id: an EC2 instance id to filter for. </p>
173    #[serde(rename = "name")]
174    #[serde(skip_serializing_if = "Option::is_none")]
175    pub name: Option<String>,
176    /// <p> The values for the filter of the Elastic Inference Accelerator list. </p>
177    #[serde(rename = "values")]
178    #[serde(skip_serializing_if = "Option::is_none")]
179    pub values: Option<Vec<String>>,
180}
181
182/// <p> A throughput entry for an Elastic Inference Accelerator type. </p>
183#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
184#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
185pub struct KeyValuePair {
186    /// <p> The throughput value of the Elastic Inference Accelerator type. It can assume the following values: TFLOPS16bit: the throughput expressed in 16bit TeraFLOPS. TFLOPS32bit: the throughput expressed in 32bit TeraFLOPS. </p>
187    #[serde(rename = "key")]
188    #[serde(skip_serializing_if = "Option::is_none")]
189    pub key: Option<String>,
190    /// <p> The throughput value of the Elastic Inference Accelerator type. </p>
191    #[serde(rename = "value")]
192    #[serde(skip_serializing_if = "Option::is_none")]
193    pub value: Option<i64>,
194}
195
196#[derive(Clone, Debug, Default, PartialEq, Serialize)]
197#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
198pub struct ListTagsForResourceRequest {
199    /// <p> The ARN of the Elastic Inference Accelerator to list the tags for. </p>
200    #[serde(rename = "resourceArn")]
201    pub resource_arn: String,
202}
203
204#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
205#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
206pub struct ListTagsForResourceResult {
207    /// <p> The tags of the Elastic Inference Accelerator. </p>
208    #[serde(rename = "tags")]
209    #[serde(skip_serializing_if = "Option::is_none")]
210    pub tags: Option<::std::collections::HashMap<String, String>>,
211}
212
213/// <p> The memory information of an Elastic Inference Accelerator type. </p>
214#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
215#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
216pub struct MemoryInfo {
217    /// <p> The size in mebibytes of the Elastic Inference Accelerator type. </p>
218    #[serde(rename = "sizeInMiB")]
219    #[serde(skip_serializing_if = "Option::is_none")]
220    pub size_in_mi_b: Option<i64>,
221}
222
223#[derive(Clone, Debug, Default, PartialEq, Serialize)]
224#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
225pub struct TagResourceRequest {
226    /// <p> The ARN of the Elastic Inference Accelerator to tag. </p>
227    #[serde(rename = "resourceArn")]
228    pub resource_arn: String,
229    /// <p> The tags to add to the Elastic Inference Accelerator. </p>
230    #[serde(rename = "tags")]
231    pub tags: ::std::collections::HashMap<String, String>,
232}
233
234#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
235#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
236pub struct TagResourceResult {}
237
238#[derive(Clone, Debug, Default, PartialEq, Serialize)]
239#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
240pub struct UntagResourceRequest {
241    /// <p> The ARN of the Elastic Inference Accelerator to untag. </p>
242    #[serde(rename = "resourceArn")]
243    pub resource_arn: String,
244    /// <p> The list of tags to remove from the Elastic Inference Accelerator. </p>
245    #[serde(rename = "tagKeys")]
246    pub tag_keys: Vec<String>,
247}
248
249#[derive(Clone, Debug, Default, Deserialize, PartialEq)]
250#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
251pub struct UntagResourceResult {}
252
253/// Errors returned by DescribeAcceleratorOfferings
254#[derive(Debug, PartialEq)]
255pub enum DescribeAcceleratorOfferingsError {
256    /// <p> Raised when a malformed input has been provided to the API. </p>
257    BadRequest(String),
258    /// <p> Raised when an unexpected error occurred during request processing. </p>
259    InternalServer(String),
260    /// <p> Raised when the requested resource cannot be found. </p>
261    ResourceNotFound(String),
262}
263
264impl DescribeAcceleratorOfferingsError {
265    pub fn from_response(
266        res: BufferedHttpResponse,
267    ) -> RusotoError<DescribeAcceleratorOfferingsError> {
268        if let Some(err) = proto::json::Error::parse_rest(&res) {
269            match err.typ.as_str() {
270                "BadRequestException" => {
271                    return RusotoError::Service(DescribeAcceleratorOfferingsError::BadRequest(
272                        err.msg,
273                    ))
274                }
275                "InternalServerException" => {
276                    return RusotoError::Service(DescribeAcceleratorOfferingsError::InternalServer(
277                        err.msg,
278                    ))
279                }
280                "ResourceNotFoundException" => {
281                    return RusotoError::Service(
282                        DescribeAcceleratorOfferingsError::ResourceNotFound(err.msg),
283                    )
284                }
285                "ValidationException" => return RusotoError::Validation(err.msg),
286                _ => {}
287            }
288        }
289        RusotoError::Unknown(res)
290    }
291}
292impl fmt::Display for DescribeAcceleratorOfferingsError {
293    #[allow(unused_variables)]
294    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
295        match *self {
296            DescribeAcceleratorOfferingsError::BadRequest(ref cause) => write!(f, "{}", cause),
297            DescribeAcceleratorOfferingsError::InternalServer(ref cause) => write!(f, "{}", cause),
298            DescribeAcceleratorOfferingsError::ResourceNotFound(ref cause) => {
299                write!(f, "{}", cause)
300            }
301        }
302    }
303}
304impl Error for DescribeAcceleratorOfferingsError {}
305/// Errors returned by DescribeAcceleratorTypes
306#[derive(Debug, PartialEq)]
307pub enum DescribeAcceleratorTypesError {
308    /// <p> Raised when an unexpected error occurred during request processing. </p>
309    InternalServer(String),
310}
311
312impl DescribeAcceleratorTypesError {
313    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DescribeAcceleratorTypesError> {
314        if let Some(err) = proto::json::Error::parse_rest(&res) {
315            match err.typ.as_str() {
316                "InternalServerException" => {
317                    return RusotoError::Service(DescribeAcceleratorTypesError::InternalServer(
318                        err.msg,
319                    ))
320                }
321                "ValidationException" => return RusotoError::Validation(err.msg),
322                _ => {}
323            }
324        }
325        RusotoError::Unknown(res)
326    }
327}
328impl fmt::Display for DescribeAcceleratorTypesError {
329    #[allow(unused_variables)]
330    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
331        match *self {
332            DescribeAcceleratorTypesError::InternalServer(ref cause) => write!(f, "{}", cause),
333        }
334    }
335}
336impl Error for DescribeAcceleratorTypesError {}
337/// Errors returned by DescribeAccelerators
338#[derive(Debug, PartialEq)]
339pub enum DescribeAcceleratorsError {
340    /// <p> Raised when a malformed input has been provided to the API. </p>
341    BadRequest(String),
342    /// <p> Raised when an unexpected error occurred during request processing. </p>
343    InternalServer(String),
344    /// <p> Raised when the requested resource cannot be found. </p>
345    ResourceNotFound(String),
346}
347
348impl DescribeAcceleratorsError {
349    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DescribeAcceleratorsError> {
350        if let Some(err) = proto::json::Error::parse_rest(&res) {
351            match err.typ.as_str() {
352                "BadRequestException" => {
353                    return RusotoError::Service(DescribeAcceleratorsError::BadRequest(err.msg))
354                }
355                "InternalServerException" => {
356                    return RusotoError::Service(DescribeAcceleratorsError::InternalServer(err.msg))
357                }
358                "ResourceNotFoundException" => {
359                    return RusotoError::Service(DescribeAcceleratorsError::ResourceNotFound(
360                        err.msg,
361                    ))
362                }
363                "ValidationException" => return RusotoError::Validation(err.msg),
364                _ => {}
365            }
366        }
367        RusotoError::Unknown(res)
368    }
369}
370impl fmt::Display for DescribeAcceleratorsError {
371    #[allow(unused_variables)]
372    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
373        match *self {
374            DescribeAcceleratorsError::BadRequest(ref cause) => write!(f, "{}", cause),
375            DescribeAcceleratorsError::InternalServer(ref cause) => write!(f, "{}", cause),
376            DescribeAcceleratorsError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
377        }
378    }
379}
380impl Error for DescribeAcceleratorsError {}
381/// Errors returned by ListTagsForResource
382#[derive(Debug, PartialEq)]
383pub enum ListTagsForResourceError {
384    /// <p> Raised when a malformed input has been provided to the API. </p>
385    BadRequest(String),
386    /// <p> Raised when an unexpected error occurred during request processing. </p>
387    InternalServer(String),
388    /// <p> Raised when the requested resource cannot be found. </p>
389    ResourceNotFound(String),
390}
391
392impl ListTagsForResourceError {
393    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<ListTagsForResourceError> {
394        if let Some(err) = proto::json::Error::parse_rest(&res) {
395            match err.typ.as_str() {
396                "BadRequestException" => {
397                    return RusotoError::Service(ListTagsForResourceError::BadRequest(err.msg))
398                }
399                "InternalServerException" => {
400                    return RusotoError::Service(ListTagsForResourceError::InternalServer(err.msg))
401                }
402                "ResourceNotFoundException" => {
403                    return RusotoError::Service(ListTagsForResourceError::ResourceNotFound(
404                        err.msg,
405                    ))
406                }
407                "ValidationException" => return RusotoError::Validation(err.msg),
408                _ => {}
409            }
410        }
411        RusotoError::Unknown(res)
412    }
413}
414impl fmt::Display for ListTagsForResourceError {
415    #[allow(unused_variables)]
416    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
417        match *self {
418            ListTagsForResourceError::BadRequest(ref cause) => write!(f, "{}", cause),
419            ListTagsForResourceError::InternalServer(ref cause) => write!(f, "{}", cause),
420            ListTagsForResourceError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
421        }
422    }
423}
424impl Error for ListTagsForResourceError {}
425/// Errors returned by TagResource
426#[derive(Debug, PartialEq)]
427pub enum TagResourceError {
428    /// <p> Raised when a malformed input has been provided to the API. </p>
429    BadRequest(String),
430    /// <p> Raised when an unexpected error occurred during request processing. </p>
431    InternalServer(String),
432    /// <p> Raised when the requested resource cannot be found. </p>
433    ResourceNotFound(String),
434}
435
436impl TagResourceError {
437    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<TagResourceError> {
438        if let Some(err) = proto::json::Error::parse_rest(&res) {
439            match err.typ.as_str() {
440                "BadRequestException" => {
441                    return RusotoError::Service(TagResourceError::BadRequest(err.msg))
442                }
443                "InternalServerException" => {
444                    return RusotoError::Service(TagResourceError::InternalServer(err.msg))
445                }
446                "ResourceNotFoundException" => {
447                    return RusotoError::Service(TagResourceError::ResourceNotFound(err.msg))
448                }
449                "ValidationException" => return RusotoError::Validation(err.msg),
450                _ => {}
451            }
452        }
453        RusotoError::Unknown(res)
454    }
455}
456impl fmt::Display for TagResourceError {
457    #[allow(unused_variables)]
458    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
459        match *self {
460            TagResourceError::BadRequest(ref cause) => write!(f, "{}", cause),
461            TagResourceError::InternalServer(ref cause) => write!(f, "{}", cause),
462            TagResourceError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
463        }
464    }
465}
466impl Error for TagResourceError {}
467/// Errors returned by UntagResource
468#[derive(Debug, PartialEq)]
469pub enum UntagResourceError {
470    /// <p> Raised when a malformed input has been provided to the API. </p>
471    BadRequest(String),
472    /// <p> Raised when an unexpected error occurred during request processing. </p>
473    InternalServer(String),
474    /// <p> Raised when the requested resource cannot be found. </p>
475    ResourceNotFound(String),
476}
477
478impl UntagResourceError {
479    pub fn from_response(res: BufferedHttpResponse) -> RusotoError<UntagResourceError> {
480        if let Some(err) = proto::json::Error::parse_rest(&res) {
481            match err.typ.as_str() {
482                "BadRequestException" => {
483                    return RusotoError::Service(UntagResourceError::BadRequest(err.msg))
484                }
485                "InternalServerException" => {
486                    return RusotoError::Service(UntagResourceError::InternalServer(err.msg))
487                }
488                "ResourceNotFoundException" => {
489                    return RusotoError::Service(UntagResourceError::ResourceNotFound(err.msg))
490                }
491                "ValidationException" => return RusotoError::Validation(err.msg),
492                _ => {}
493            }
494        }
495        RusotoError::Unknown(res)
496    }
497}
498impl fmt::Display for UntagResourceError {
499    #[allow(unused_variables)]
500    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
501        match *self {
502            UntagResourceError::BadRequest(ref cause) => write!(f, "{}", cause),
503            UntagResourceError::InternalServer(ref cause) => write!(f, "{}", cause),
504            UntagResourceError::ResourceNotFound(ref cause) => write!(f, "{}", cause),
505        }
506    }
507}
508impl Error for UntagResourceError {}
509/// Trait representing the capabilities of the Amazon Elastic Inference API. Amazon Elastic Inference clients implement this trait.
510#[async_trait]
511pub trait ElasticInference {
512    /// <p> Describes the locations in which a given accelerator type or set of types is present in a given region. </p>
513    async fn describe_accelerator_offerings(
514        &self,
515        input: DescribeAcceleratorOfferingsRequest,
516    ) -> Result<DescribeAcceleratorOfferingsResponse, RusotoError<DescribeAcceleratorOfferingsError>>;
517
518    /// <p> Describes the accelerator types available in a given region, as well as their characteristics, such as memory and throughput. </p>
519    async fn describe_accelerator_types(
520        &self,
521    ) -> Result<DescribeAcceleratorTypesResponse, RusotoError<DescribeAcceleratorTypesError>>;
522
523    /// <p> Describes information over a provided set of accelerators belonging to an account. </p>
524    async fn describe_accelerators(
525        &self,
526        input: DescribeAcceleratorsRequest,
527    ) -> Result<DescribeAcceleratorsResponse, RusotoError<DescribeAcceleratorsError>>;
528
529    /// <p> Returns all tags of an Elastic Inference Accelerator. </p>
530    async fn list_tags_for_resource(
531        &self,
532        input: ListTagsForResourceRequest,
533    ) -> Result<ListTagsForResourceResult, RusotoError<ListTagsForResourceError>>;
534
535    /// <p> Adds the specified tags to an Elastic Inference Accelerator. </p>
536    async fn tag_resource(
537        &self,
538        input: TagResourceRequest,
539    ) -> Result<TagResourceResult, RusotoError<TagResourceError>>;
540
541    /// <p> Removes the specified tags from an Elastic Inference Accelerator. </p>
542    async fn untag_resource(
543        &self,
544        input: UntagResourceRequest,
545    ) -> Result<UntagResourceResult, RusotoError<UntagResourceError>>;
546}
547/// A client for the Amazon Elastic Inference API.
548#[derive(Clone)]
549pub struct ElasticInferenceClient {
550    client: Client,
551    region: region::Region,
552}
553
554impl ElasticInferenceClient {
555    /// Creates a client backed by the default tokio event loop.
556    ///
557    /// The client will use the default credentials provider and tls client.
558    pub fn new(region: region::Region) -> ElasticInferenceClient {
559        ElasticInferenceClient {
560            client: Client::shared(),
561            region,
562        }
563    }
564
565    pub fn new_with<P, D>(
566        request_dispatcher: D,
567        credentials_provider: P,
568        region: region::Region,
569    ) -> ElasticInferenceClient
570    where
571        P: ProvideAwsCredentials + Send + Sync + 'static,
572        D: DispatchSignedRequest + Send + Sync + 'static,
573    {
574        ElasticInferenceClient {
575            client: Client::new_with(credentials_provider, request_dispatcher),
576            region,
577        }
578    }
579
580    pub fn new_with_client(client: Client, region: region::Region) -> ElasticInferenceClient {
581        ElasticInferenceClient { client, region }
582    }
583}
584
585#[async_trait]
586impl ElasticInference for ElasticInferenceClient {
587    /// <p> Describes the locations in which a given accelerator type or set of types is present in a given region. </p>
588    #[allow(unused_mut)]
589    async fn describe_accelerator_offerings(
590        &self,
591        input: DescribeAcceleratorOfferingsRequest,
592    ) -> Result<DescribeAcceleratorOfferingsResponse, RusotoError<DescribeAcceleratorOfferingsError>>
593    {
594        let request_uri = "/describe-accelerator-offerings";
595
596        let mut request =
597            SignedRequest::new("POST", "elastic-inference", &self.region, &request_uri);
598        request.set_content_type("application/x-amz-json-1.1".to_owned());
599
600        request.set_endpoint_prefix("api.elastic-inference".to_string());
601        let encoded = Some(serde_json::to_vec(&input).unwrap());
602        request.set_payload(encoded);
603
604        let mut response = self
605            .client
606            .sign_and_dispatch(request)
607            .await
608            .map_err(RusotoError::from)?;
609        if response.status.is_success() {
610            let mut response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
611            let result = proto::json::ResponsePayload::new(&response)
612                .deserialize::<DescribeAcceleratorOfferingsResponse, _>()?;
613
614            Ok(result)
615        } else {
616            let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
617            Err(DescribeAcceleratorOfferingsError::from_response(response))
618        }
619    }
620
621    /// <p> Describes the accelerator types available in a given region, as well as their characteristics, such as memory and throughput. </p>
622    #[allow(unused_mut)]
623    async fn describe_accelerator_types(
624        &self,
625    ) -> Result<DescribeAcceleratorTypesResponse, RusotoError<DescribeAcceleratorTypesError>> {
626        let request_uri = "/describe-accelerator-types";
627
628        let mut request =
629            SignedRequest::new("GET", "elastic-inference", &self.region, &request_uri);
630        request.set_content_type("application/x-amz-json-1.1".to_owned());
631
632        request.set_endpoint_prefix("api.elastic-inference".to_string());
633
634        let mut response = self
635            .client
636            .sign_and_dispatch(request)
637            .await
638            .map_err(RusotoError::from)?;
639        if response.status.is_success() {
640            let mut response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
641            let result = proto::json::ResponsePayload::new(&response)
642                .deserialize::<DescribeAcceleratorTypesResponse, _>()?;
643
644            Ok(result)
645        } else {
646            let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
647            Err(DescribeAcceleratorTypesError::from_response(response))
648        }
649    }
650
651    /// <p> Describes information over a provided set of accelerators belonging to an account. </p>
652    #[allow(unused_mut)]
653    async fn describe_accelerators(
654        &self,
655        input: DescribeAcceleratorsRequest,
656    ) -> Result<DescribeAcceleratorsResponse, RusotoError<DescribeAcceleratorsError>> {
657        let request_uri = "/describe-accelerators";
658
659        let mut request =
660            SignedRequest::new("POST", "elastic-inference", &self.region, &request_uri);
661        request.set_content_type("application/x-amz-json-1.1".to_owned());
662
663        request.set_endpoint_prefix("api.elastic-inference".to_string());
664        let encoded = Some(serde_json::to_vec(&input).unwrap());
665        request.set_payload(encoded);
666
667        let mut response = self
668            .client
669            .sign_and_dispatch(request)
670            .await
671            .map_err(RusotoError::from)?;
672        if response.status.is_success() {
673            let mut response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
674            let result = proto::json::ResponsePayload::new(&response)
675                .deserialize::<DescribeAcceleratorsResponse, _>()?;
676
677            Ok(result)
678        } else {
679            let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
680            Err(DescribeAcceleratorsError::from_response(response))
681        }
682    }
683
684    /// <p> Returns all tags of an Elastic Inference Accelerator. </p>
685    #[allow(unused_mut)]
686    async fn list_tags_for_resource(
687        &self,
688        input: ListTagsForResourceRequest,
689    ) -> Result<ListTagsForResourceResult, RusotoError<ListTagsForResourceError>> {
690        let request_uri = format!("/tags/{resource_arn}", resource_arn = input.resource_arn);
691
692        let mut request =
693            SignedRequest::new("GET", "elastic-inference", &self.region, &request_uri);
694        request.set_content_type("application/x-amz-json-1.1".to_owned());
695
696        request.set_endpoint_prefix("api.elastic-inference".to_string());
697
698        let mut response = self
699            .client
700            .sign_and_dispatch(request)
701            .await
702            .map_err(RusotoError::from)?;
703        if response.status.is_success() {
704            let mut response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
705            let result = proto::json::ResponsePayload::new(&response)
706                .deserialize::<ListTagsForResourceResult, _>()?;
707
708            Ok(result)
709        } else {
710            let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
711            Err(ListTagsForResourceError::from_response(response))
712        }
713    }
714
715    /// <p> Adds the specified tags to an Elastic Inference Accelerator. </p>
716    #[allow(unused_mut)]
717    async fn tag_resource(
718        &self,
719        input: TagResourceRequest,
720    ) -> Result<TagResourceResult, RusotoError<TagResourceError>> {
721        let request_uri = format!("/tags/{resource_arn}", resource_arn = input.resource_arn);
722
723        let mut request =
724            SignedRequest::new("POST", "elastic-inference", &self.region, &request_uri);
725        request.set_content_type("application/x-amz-json-1.1".to_owned());
726
727        request.set_endpoint_prefix("api.elastic-inference".to_string());
728        let encoded = Some(serde_json::to_vec(&input).unwrap());
729        request.set_payload(encoded);
730
731        let mut response = self
732            .client
733            .sign_and_dispatch(request)
734            .await
735            .map_err(RusotoError::from)?;
736        if response.status.is_success() {
737            let mut response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
738            let result = proto::json::ResponsePayload::new(&response)
739                .deserialize::<TagResourceResult, _>()?;
740
741            Ok(result)
742        } else {
743            let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
744            Err(TagResourceError::from_response(response))
745        }
746    }
747
748    /// <p> Removes the specified tags from an Elastic Inference Accelerator. </p>
749    #[allow(unused_mut)]
750    async fn untag_resource(
751        &self,
752        input: UntagResourceRequest,
753    ) -> Result<UntagResourceResult, RusotoError<UntagResourceError>> {
754        let request_uri = format!("/tags/{resource_arn}", resource_arn = input.resource_arn);
755
756        let mut request =
757            SignedRequest::new("DELETE", "elastic-inference", &self.region, &request_uri);
758        request.set_content_type("application/x-amz-json-1.1".to_owned());
759
760        request.set_endpoint_prefix("api.elastic-inference".to_string());
761
762        let mut params = Params::new();
763        for item in input.tag_keys.iter() {
764            params.put("tagKeys", item);
765        }
766        request.set_params(params);
767
768        let mut response = self
769            .client
770            .sign_and_dispatch(request)
771            .await
772            .map_err(RusotoError::from)?;
773        if response.status.is_success() {
774            let mut response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
775            let result = proto::json::ResponsePayload::new(&response)
776                .deserialize::<UntagResourceResult, _>()?;
777
778            Ok(result)
779        } else {
780            let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
781            Err(UntagResourceError::from_response(response))
782        }
783    }
784}