1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)]
pub struct StartModelInput {
/// <p>The name of the project that contains the model that you want to start.</p>
pub project_name: ::std::option::Option<::std::string::String>,
/// <p>The version of the model that you want to start.</p>
pub model_version: ::std::option::Option<::std::string::String>,
/// <p>The minimum number of inference units to use. A single inference unit represents 1 hour of processing. Use a higher number to increase the TPS throughput of your model. You are charged for the number of inference units that you use. </p>
pub min_inference_units: ::std::option::Option<i32>,
/// <p>ClientToken is an idempotency token that ensures a call to <code>StartModel</code> completes only once. You choose the value to pass. For example, An issue might prevent you from getting a response from <code>StartModel</code>. In this case, safely retry your call to <code>StartModel</code> by using the same <code>ClientToken</code> parameter value. </p>
/// <p>If you don't supply a value for <code>ClientToken</code>, the AWS SDK you are using inserts a value for you. This prevents retries after a network error from making multiple start requests. You'll need to provide your own value for other use cases. </p>
/// <p>An error occurs if the other input parameters are not the same as in the first request. Using a different value for <code>ClientToken</code> is considered a new call to <code>StartModel</code>. An idempotency token is active for 8 hours. </p>
pub client_token: ::std::option::Option<::std::string::String>,
/// <p>The maximum number of inference units to use for auto-scaling the model. If you don't specify a value, Amazon Lookout for Vision doesn't auto-scale the model.</p>
pub max_inference_units: ::std::option::Option<i32>,
}
impl StartModelInput {
/// <p>The name of the project that contains the model that you want to start.</p>
pub fn project_name(&self) -> ::std::option::Option<&str> {
self.project_name.as_deref()
}
/// <p>The version of the model that you want to start.</p>
pub fn model_version(&self) -> ::std::option::Option<&str> {
self.model_version.as_deref()
}
/// <p>The minimum number of inference units to use. A single inference unit represents 1 hour of processing. Use a higher number to increase the TPS throughput of your model. You are charged for the number of inference units that you use. </p>
pub fn min_inference_units(&self) -> ::std::option::Option<i32> {
self.min_inference_units
}
/// <p>ClientToken is an idempotency token that ensures a call to <code>StartModel</code> completes only once. You choose the value to pass. For example, An issue might prevent you from getting a response from <code>StartModel</code>. In this case, safely retry your call to <code>StartModel</code> by using the same <code>ClientToken</code> parameter value. </p>
/// <p>If you don't supply a value for <code>ClientToken</code>, the AWS SDK you are using inserts a value for you. This prevents retries after a network error from making multiple start requests. You'll need to provide your own value for other use cases. </p>
/// <p>An error occurs if the other input parameters are not the same as in the first request. Using a different value for <code>ClientToken</code> is considered a new call to <code>StartModel</code>. An idempotency token is active for 8 hours. </p>
pub fn client_token(&self) -> ::std::option::Option<&str> {
self.client_token.as_deref()
}
/// <p>The maximum number of inference units to use for auto-scaling the model. If you don't specify a value, Amazon Lookout for Vision doesn't auto-scale the model.</p>
pub fn max_inference_units(&self) -> ::std::option::Option<i32> {
self.max_inference_units
}
}
impl StartModelInput {
/// Creates a new builder-style object to manufacture [`StartModelInput`](crate::operation::start_model::StartModelInput).
pub fn builder() -> crate::operation::start_model::builders::StartModelInputBuilder {
crate::operation::start_model::builders::StartModelInputBuilder::default()
}
}
/// A builder for [`StartModelInput`](crate::operation::start_model::StartModelInput).
#[non_exhaustive]
#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default, ::std::fmt::Debug)]
pub struct StartModelInputBuilder {
pub(crate) project_name: ::std::option::Option<::std::string::String>,
pub(crate) model_version: ::std::option::Option<::std::string::String>,
pub(crate) min_inference_units: ::std::option::Option<i32>,
pub(crate) client_token: ::std::option::Option<::std::string::String>,
pub(crate) max_inference_units: ::std::option::Option<i32>,
}
impl StartModelInputBuilder {
/// <p>The name of the project that contains the model that you want to start.</p>
/// This field is required.
pub fn project_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
self.project_name = ::std::option::Option::Some(input.into());
self
}
/// <p>The name of the project that contains the model that you want to start.</p>
pub fn set_project_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
self.project_name = input;
self
}
/// <p>The name of the project that contains the model that you want to start.</p>
pub fn get_project_name(&self) -> &::std::option::Option<::std::string::String> {
&self.project_name
}
/// <p>The version of the model that you want to start.</p>
/// This field is required.
pub fn model_version(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
self.model_version = ::std::option::Option::Some(input.into());
self
}
/// <p>The version of the model that you want to start.</p>
pub fn set_model_version(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
self.model_version = input;
self
}
/// <p>The version of the model that you want to start.</p>
pub fn get_model_version(&self) -> &::std::option::Option<::std::string::String> {
&self.model_version
}
/// <p>The minimum number of inference units to use. A single inference unit represents 1 hour of processing. Use a higher number to increase the TPS throughput of your model. You are charged for the number of inference units that you use. </p>
/// This field is required.
pub fn min_inference_units(mut self, input: i32) -> Self {
self.min_inference_units = ::std::option::Option::Some(input);
self
}
/// <p>The minimum number of inference units to use. A single inference unit represents 1 hour of processing. Use a higher number to increase the TPS throughput of your model. You are charged for the number of inference units that you use. </p>
pub fn set_min_inference_units(mut self, input: ::std::option::Option<i32>) -> Self {
self.min_inference_units = input;
self
}
/// <p>The minimum number of inference units to use. A single inference unit represents 1 hour of processing. Use a higher number to increase the TPS throughput of your model. You are charged for the number of inference units that you use. </p>
pub fn get_min_inference_units(&self) -> &::std::option::Option<i32> {
&self.min_inference_units
}
/// <p>ClientToken is an idempotency token that ensures a call to <code>StartModel</code> completes only once. You choose the value to pass. For example, An issue might prevent you from getting a response from <code>StartModel</code>. In this case, safely retry your call to <code>StartModel</code> by using the same <code>ClientToken</code> parameter value. </p>
/// <p>If you don't supply a value for <code>ClientToken</code>, the AWS SDK you are using inserts a value for you. This prevents retries after a network error from making multiple start requests. You'll need to provide your own value for other use cases. </p>
/// <p>An error occurs if the other input parameters are not the same as in the first request. Using a different value for <code>ClientToken</code> is considered a new call to <code>StartModel</code>. An idempotency token is active for 8 hours. </p>
pub fn client_token(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
self.client_token = ::std::option::Option::Some(input.into());
self
}
/// <p>ClientToken is an idempotency token that ensures a call to <code>StartModel</code> completes only once. You choose the value to pass. For example, An issue might prevent you from getting a response from <code>StartModel</code>. In this case, safely retry your call to <code>StartModel</code> by using the same <code>ClientToken</code> parameter value. </p>
/// <p>If you don't supply a value for <code>ClientToken</code>, the AWS SDK you are using inserts a value for you. This prevents retries after a network error from making multiple start requests. You'll need to provide your own value for other use cases. </p>
/// <p>An error occurs if the other input parameters are not the same as in the first request. Using a different value for <code>ClientToken</code> is considered a new call to <code>StartModel</code>. An idempotency token is active for 8 hours. </p>
pub fn set_client_token(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
self.client_token = input;
self
}
/// <p>ClientToken is an idempotency token that ensures a call to <code>StartModel</code> completes only once. You choose the value to pass. For example, An issue might prevent you from getting a response from <code>StartModel</code>. In this case, safely retry your call to <code>StartModel</code> by using the same <code>ClientToken</code> parameter value. </p>
/// <p>If you don't supply a value for <code>ClientToken</code>, the AWS SDK you are using inserts a value for you. This prevents retries after a network error from making multiple start requests. You'll need to provide your own value for other use cases. </p>
/// <p>An error occurs if the other input parameters are not the same as in the first request. Using a different value for <code>ClientToken</code> is considered a new call to <code>StartModel</code>. An idempotency token is active for 8 hours. </p>
pub fn get_client_token(&self) -> &::std::option::Option<::std::string::String> {
&self.client_token
}
/// <p>The maximum number of inference units to use for auto-scaling the model. If you don't specify a value, Amazon Lookout for Vision doesn't auto-scale the model.</p>
pub fn max_inference_units(mut self, input: i32) -> Self {
self.max_inference_units = ::std::option::Option::Some(input);
self
}
/// <p>The maximum number of inference units to use for auto-scaling the model. If you don't specify a value, Amazon Lookout for Vision doesn't auto-scale the model.</p>
pub fn set_max_inference_units(mut self, input: ::std::option::Option<i32>) -> Self {
self.max_inference_units = input;
self
}
/// <p>The maximum number of inference units to use for auto-scaling the model. If you don't specify a value, Amazon Lookout for Vision doesn't auto-scale the model.</p>
pub fn get_max_inference_units(&self) -> &::std::option::Option<i32> {
&self.max_inference_units
}
/// Consumes the builder and constructs a [`StartModelInput`](crate::operation::start_model::StartModelInput).
pub fn build(self) -> ::std::result::Result<crate::operation::start_model::StartModelInput, ::aws_smithy_types::error::operation::BuildError> {
::std::result::Result::Ok(crate::operation::start_model::StartModelInput {
project_name: self.project_name,
model_version: self.model_version,
min_inference_units: self.min_inference_units,
client_token: self.client_token,
max_inference_units: self.max_inference_units,
})
}
}