aws_sdk_lookoutvision/operation/start_model/
builders.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2pub use crate::operation::start_model::_start_model_output::StartModelOutputBuilder;
3
4pub use crate::operation::start_model::_start_model_input::StartModelInputBuilder;
5
6impl crate::operation::start_model::builders::StartModelInputBuilder {
7    /// Sends a request with this input using the given client.
8    pub async fn send_with(
9        self,
10        client: &crate::Client,
11    ) -> ::std::result::Result<
12        crate::operation::start_model::StartModelOutput,
13        ::aws_smithy_runtime_api::client::result::SdkError<
14            crate::operation::start_model::StartModelError,
15            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
16        >,
17    > {
18        let mut fluent_builder = client.start_model();
19        fluent_builder.inner = self;
20        fluent_builder.send().await
21    }
22}
23/// Fluent builder constructing a request to `StartModel`.
24///
25/// <p>Starts the running of the version of an Amazon Lookout for Vision model. Starting a model takes a while to complete. To check the current state of the model, use <code>DescribeModel</code>.</p>
26/// <p>A model is ready to use when its status is <code>HOSTED</code>.</p>
27/// <p>Once the model is running, you can detect custom labels in new images by calling <code>DetectAnomalies</code>.</p><note>
28/// <p>You are charged for the amount of time that the model is running. To stop a running model, call <code>StopModel</code>.</p>
29/// </note>
30/// <p>This operation requires permissions to perform the <code>lookoutvision:StartModel</code> operation.</p>
31#[derive(::std::clone::Clone, ::std::fmt::Debug)]
32pub struct StartModelFluentBuilder {
33    handle: ::std::sync::Arc<crate::client::Handle>,
34    inner: crate::operation::start_model::builders::StartModelInputBuilder,
35    config_override: ::std::option::Option<crate::config::Builder>,
36}
37impl
38    crate::client::customize::internal::CustomizableSend<
39        crate::operation::start_model::StartModelOutput,
40        crate::operation::start_model::StartModelError,
41    > for StartModelFluentBuilder
42{
43    fn send(
44        self,
45        config_override: crate::config::Builder,
46    ) -> crate::client::customize::internal::BoxFuture<
47        crate::client::customize::internal::SendResult<
48            crate::operation::start_model::StartModelOutput,
49            crate::operation::start_model::StartModelError,
50        >,
51    > {
52        ::std::boxed::Box::pin(async move { self.config_override(config_override).send().await })
53    }
54}
55impl StartModelFluentBuilder {
56    /// Creates a new `StartModelFluentBuilder`.
57    pub(crate) fn new(handle: ::std::sync::Arc<crate::client::Handle>) -> Self {
58        Self {
59            handle,
60            inner: ::std::default::Default::default(),
61            config_override: ::std::option::Option::None,
62        }
63    }
64    /// Access the StartModel as a reference.
65    pub fn as_input(&self) -> &crate::operation::start_model::builders::StartModelInputBuilder {
66        &self.inner
67    }
68    /// Sends the request and returns the response.
69    ///
70    /// If an error occurs, an `SdkError` will be returned with additional details that
71    /// can be matched against.
72    ///
73    /// By default, any retryable failures will be retried twice. Retry behavior
74    /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
75    /// set when configuring the client.
76    pub async fn send(
77        self,
78    ) -> ::std::result::Result<
79        crate::operation::start_model::StartModelOutput,
80        ::aws_smithy_runtime_api::client::result::SdkError<
81            crate::operation::start_model::StartModelError,
82            ::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
83        >,
84    > {
85        let input = self
86            .inner
87            .build()
88            .map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?;
89        let runtime_plugins = crate::operation::start_model::StartModel::operation_runtime_plugins(
90            self.handle.runtime_plugins.clone(),
91            &self.handle.conf,
92            self.config_override,
93        );
94        crate::operation::start_model::StartModel::orchestrate(&runtime_plugins, input).await
95    }
96
97    /// Consumes this builder, creating a customizable operation that can be modified before being sent.
98    pub fn customize(
99        self,
100    ) -> crate::client::customize::CustomizableOperation<
101        crate::operation::start_model::StartModelOutput,
102        crate::operation::start_model::StartModelError,
103        Self,
104    > {
105        crate::client::customize::CustomizableOperation::new(self)
106    }
107    pub(crate) fn config_override(mut self, config_override: impl ::std::convert::Into<crate::config::Builder>) -> Self {
108        self.set_config_override(::std::option::Option::Some(config_override.into()));
109        self
110    }
111
112    pub(crate) fn set_config_override(&mut self, config_override: ::std::option::Option<crate::config::Builder>) -> &mut Self {
113        self.config_override = config_override;
114        self
115    }
116    /// <p>The name of the project that contains the model that you want to start.</p>
117    pub fn project_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
118        self.inner = self.inner.project_name(input.into());
119        self
120    }
121    /// <p>The name of the project that contains the model that you want to start.</p>
122    pub fn set_project_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
123        self.inner = self.inner.set_project_name(input);
124        self
125    }
126    /// <p>The name of the project that contains the model that you want to start.</p>
127    pub fn get_project_name(&self) -> &::std::option::Option<::std::string::String> {
128        self.inner.get_project_name()
129    }
130    /// <p>The version of the model that you want to start.</p>
131    pub fn model_version(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
132        self.inner = self.inner.model_version(input.into());
133        self
134    }
135    /// <p>The version of the model that you want to start.</p>
136    pub fn set_model_version(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
137        self.inner = self.inner.set_model_version(input);
138        self
139    }
140    /// <p>The version of the model that you want to start.</p>
141    pub fn get_model_version(&self) -> &::std::option::Option<::std::string::String> {
142        self.inner.get_model_version()
143    }
144    /// <p>The minimum number of inference units to use. A single inference unit represents 1 hour of processing. Use a higher number to increase the TPS throughput of your model. You are charged for the number of inference units that you use.</p>
145    pub fn min_inference_units(mut self, input: i32) -> Self {
146        self.inner = self.inner.min_inference_units(input);
147        self
148    }
149    /// <p>The minimum number of inference units to use. A single inference unit represents 1 hour of processing. Use a higher number to increase the TPS throughput of your model. You are charged for the number of inference units that you use.</p>
150    pub fn set_min_inference_units(mut self, input: ::std::option::Option<i32>) -> Self {
151        self.inner = self.inner.set_min_inference_units(input);
152        self
153    }
154    /// <p>The minimum number of inference units to use. A single inference unit represents 1 hour of processing. Use a higher number to increase the TPS throughput of your model. You are charged for the number of inference units that you use.</p>
155    pub fn get_min_inference_units(&self) -> &::std::option::Option<i32> {
156        self.inner.get_min_inference_units()
157    }
158    /// <p>ClientToken is an idempotency token that ensures a call to <code>StartModel</code> completes only once. You choose the value to pass. For example, An issue might prevent you from getting a response from <code>StartModel</code>. In this case, safely retry your call to <code>StartModel</code> by using the same <code>ClientToken</code> parameter value.</p>
159    /// <p>If you don't supply a value for <code>ClientToken</code>, the AWS SDK you are using inserts a value for you. This prevents retries after a network error from making multiple start requests. You'll need to provide your own value for other use cases.</p>
160    /// <p>An error occurs if the other input parameters are not the same as in the first request. Using a different value for <code>ClientToken</code> is considered a new call to <code>StartModel</code>. An idempotency token is active for 8 hours.</p>
161    pub fn client_token(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
162        self.inner = self.inner.client_token(input.into());
163        self
164    }
165    /// <p>ClientToken is an idempotency token that ensures a call to <code>StartModel</code> completes only once. You choose the value to pass. For example, An issue might prevent you from getting a response from <code>StartModel</code>. In this case, safely retry your call to <code>StartModel</code> by using the same <code>ClientToken</code> parameter value.</p>
166    /// <p>If you don't supply a value for <code>ClientToken</code>, the AWS SDK you are using inserts a value for you. This prevents retries after a network error from making multiple start requests. You'll need to provide your own value for other use cases.</p>
167    /// <p>An error occurs if the other input parameters are not the same as in the first request. Using a different value for <code>ClientToken</code> is considered a new call to <code>StartModel</code>. An idempotency token is active for 8 hours.</p>
168    pub fn set_client_token(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
169        self.inner = self.inner.set_client_token(input);
170        self
171    }
172    /// <p>ClientToken is an idempotency token that ensures a call to <code>StartModel</code> completes only once. You choose the value to pass. For example, An issue might prevent you from getting a response from <code>StartModel</code>. In this case, safely retry your call to <code>StartModel</code> by using the same <code>ClientToken</code> parameter value.</p>
173    /// <p>If you don't supply a value for <code>ClientToken</code>, the AWS SDK you are using inserts a value for you. This prevents retries after a network error from making multiple start requests. You'll need to provide your own value for other use cases.</p>
174    /// <p>An error occurs if the other input parameters are not the same as in the first request. Using a different value for <code>ClientToken</code> is considered a new call to <code>StartModel</code>. An idempotency token is active for 8 hours.</p>
175    pub fn get_client_token(&self) -> &::std::option::Option<::std::string::String> {
176        self.inner.get_client_token()
177    }
178    /// <p>The maximum number of inference units to use for auto-scaling the model. If you don't specify a value, Amazon Lookout for Vision doesn't auto-scale the model.</p>
179    pub fn max_inference_units(mut self, input: i32) -> Self {
180        self.inner = self.inner.max_inference_units(input);
181        self
182    }
183    /// <p>The maximum number of inference units to use for auto-scaling the model. If you don't specify a value, Amazon Lookout for Vision doesn't auto-scale the model.</p>
184    pub fn set_max_inference_units(mut self, input: ::std::option::Option<i32>) -> Self {
185        self.inner = self.inner.set_max_inference_units(input);
186        self
187    }
188    /// <p>The maximum number of inference units to use for auto-scaling the model. If you don't specify a value, Amazon Lookout for Vision doesn't auto-scale the model.</p>
189    pub fn get_max_inference_units(&self) -> &::std::option::Option<i32> {
190        self.inner.get_max_inference_units()
191    }
192}