// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
/// <p>The fair share policy for a scheduling
/// policy.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct FairsharePolicy {
/// <p>The time period to use to calculate a
/// fair share percentage for each fair share identifier in use, in seconds. A value of zero (0) indicates that only
/// current usage should be measured; if there are four evenly weighted fair share identifiers then each can only use up
/// to 25% of the available CPU resources, even if some of the fair share identifiers have no currently running jobs. The
/// decay allows for more recently run jobs to have more weight than jobs that ran earlier. The maximum supported value
/// is 604800 (1 week).</p>
pub share_decay_seconds: i32,
/// <p>A value used to reserve some of the
/// available maximum vCPU for fair share identifiers that have not yet been used.</p>
/// <p>The reserved ratio is
/// <code>(<i>computeReservation</i>/100)^<i>ActiveFairShares</i>
/// </code> where
/// <code>
/// <i>ActiveFairShares</i>
/// </code> is the number of active fair share identifiers.</p>
/// <p>For example, a <code>computeReservation</code> value of 50 indicates that Batch should reserve 50% of the
/// maximum available vCPU if there is only one fair share identifier, 25% if there are two fair share identifiers, and
/// 12.5% if there are three fair share identifiers. A <code>computeReservation</code> value of 25 indicates that Batch
/// should reserve 25% of the maximum available vCPU if there is only one fair share identifier, 6.25% if there are two
/// fair share identifiers, and 1.56% if there are three fair share identifiers.</p>
///
/// <p>The minimum value is 0 and the maximum value is 99.</p>
pub compute_reservation: i32,
/// <p>Array of <code>SharedIdentifier</code>
/// objects that contain the weights for the fair
/// share identifiers for the fair share policy.
/// Fair share identifiers that
/// are not included have a default weight of <code>1.0</code>.</p>
pub share_distribution: std::option::Option<std::vec::Vec<crate::model::ShareAttributes>>,
}
impl FairsharePolicy {
/// <p>The time period to use to calculate a
/// fair share percentage for each fair share identifier in use, in seconds. A value of zero (0) indicates that only
/// current usage should be measured; if there are four evenly weighted fair share identifiers then each can only use up
/// to 25% of the available CPU resources, even if some of the fair share identifiers have no currently running jobs. The
/// decay allows for more recently run jobs to have more weight than jobs that ran earlier. The maximum supported value
/// is 604800 (1 week).</p>
pub fn share_decay_seconds(&self) -> i32 {
self.share_decay_seconds
}
/// <p>A value used to reserve some of the
/// available maximum vCPU for fair share identifiers that have not yet been used.</p>
/// <p>The reserved ratio is
/// <code>(<i>computeReservation</i>/100)^<i>ActiveFairShares</i>
/// </code> where
/// <code>
/// <i>ActiveFairShares</i>
/// </code> is the number of active fair share identifiers.</p>
/// <p>For example, a <code>computeReservation</code> value of 50 indicates that Batch should reserve 50% of the
/// maximum available vCPU if there is only one fair share identifier, 25% if there are two fair share identifiers, and
/// 12.5% if there are three fair share identifiers. A <code>computeReservation</code> value of 25 indicates that Batch
/// should reserve 25% of the maximum available vCPU if there is only one fair share identifier, 6.25% if there are two
/// fair share identifiers, and 1.56% if there are three fair share identifiers.</p>
///
/// <p>The minimum value is 0 and the maximum value is 99.</p>
pub fn compute_reservation(&self) -> i32 {
self.compute_reservation
}
/// <p>Array of <code>SharedIdentifier</code>
/// objects that contain the weights for the fair
/// share identifiers for the fair share policy.
/// Fair share identifiers that
/// are not included have a default weight of <code>1.0</code>.</p>
pub fn share_distribution(&self) -> std::option::Option<&[crate::model::ShareAttributes]> {
self.share_distribution.as_deref()
}
}
impl std::fmt::Debug for FairsharePolicy {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("FairsharePolicy");
formatter.field("share_decay_seconds", &self.share_decay_seconds);
formatter.field("compute_reservation", &self.compute_reservation);
formatter.field("share_distribution", &self.share_distribution);
formatter.finish()
}
}
/// See [`FairsharePolicy`](crate::model::FairsharePolicy)
pub mod fairshare_policy {
/// A builder for [`FairsharePolicy`](crate::model::FairsharePolicy)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) share_decay_seconds: std::option::Option<i32>,
pub(crate) compute_reservation: std::option::Option<i32>,
pub(crate) share_distribution:
std::option::Option<std::vec::Vec<crate::model::ShareAttributes>>,
}
impl Builder {
/// <p>The time period to use to calculate a
/// fair share percentage for each fair share identifier in use, in seconds. A value of zero (0) indicates that only
/// current usage should be measured; if there are four evenly weighted fair share identifiers then each can only use up
/// to 25% of the available CPU resources, even if some of the fair share identifiers have no currently running jobs. The
/// decay allows for more recently run jobs to have more weight than jobs that ran earlier. The maximum supported value
/// is 604800 (1 week).</p>
pub fn share_decay_seconds(mut self, input: i32) -> Self {
self.share_decay_seconds = Some(input);
self
}
/// <p>The time period to use to calculate a
/// fair share percentage for each fair share identifier in use, in seconds. A value of zero (0) indicates that only
/// current usage should be measured; if there are four evenly weighted fair share identifiers then each can only use up
/// to 25% of the available CPU resources, even if some of the fair share identifiers have no currently running jobs. The
/// decay allows for more recently run jobs to have more weight than jobs that ran earlier. The maximum supported value
/// is 604800 (1 week).</p>
pub fn set_share_decay_seconds(mut self, input: std::option::Option<i32>) -> Self {
self.share_decay_seconds = input;
self
}
/// <p>A value used to reserve some of the
/// available maximum vCPU for fair share identifiers that have not yet been used.</p>
/// <p>The reserved ratio is
/// <code>(<i>computeReservation</i>/100)^<i>ActiveFairShares</i>
/// </code> where
/// <code>
/// <i>ActiveFairShares</i>
/// </code> is the number of active fair share identifiers.</p>
/// <p>For example, a <code>computeReservation</code> value of 50 indicates that Batch should reserve 50% of the
/// maximum available vCPU if there is only one fair share identifier, 25% if there are two fair share identifiers, and
/// 12.5% if there are three fair share identifiers. A <code>computeReservation</code> value of 25 indicates that Batch
/// should reserve 25% of the maximum available vCPU if there is only one fair share identifier, 6.25% if there are two
/// fair share identifiers, and 1.56% if there are three fair share identifiers.</p>
///
/// <p>The minimum value is 0 and the maximum value is 99.</p>
pub fn compute_reservation(mut self, input: i32) -> Self {
self.compute_reservation = Some(input);
self
}
/// <p>A value used to reserve some of the
/// available maximum vCPU for fair share identifiers that have not yet been used.</p>
/// <p>The reserved ratio is
/// <code>(<i>computeReservation</i>/100)^<i>ActiveFairShares</i>
/// </code> where
/// <code>
/// <i>ActiveFairShares</i>
/// </code> is the number of active fair share identifiers.</p>
/// <p>For example, a <code>computeReservation</code> value of 50 indicates that Batch should reserve 50% of the
/// maximum available vCPU if there is only one fair share identifier, 25% if there are two fair share identifiers, and
/// 12.5% if there are three fair share identifiers. A <code>computeReservation</code> value of 25 indicates that Batch
/// should reserve 25% of the maximum available vCPU if there is only one fair share identifier, 6.25% if there are two
/// fair share identifiers, and 1.56% if there are three fair share identifiers.</p>
///
/// <p>The minimum value is 0 and the maximum value is 99.</p>
pub fn set_compute_reservation(mut self, input: std::option::Option<i32>) -> Self {
self.compute_reservation = input;
self
}
/// Appends an item to `share_distribution`.
///
/// To override the contents of this collection use [`set_share_distribution`](Self::set_share_distribution).
///
/// <p>Array of <code>SharedIdentifier</code>
/// objects that contain the weights for the fair
/// share identifiers for the fair share policy.
/// Fair share identifiers that
/// are not included have a default weight of <code>1.0</code>.</p>
pub fn share_distribution(
mut self,
input: impl Into<crate::model::ShareAttributes>,
) -> Self {
let mut v = self.share_distribution.unwrap_or_default();
v.push(input.into());
self.share_distribution = Some(v);
self
}
/// <p>Array of <code>SharedIdentifier</code>
/// objects that contain the weights for the fair
/// share identifiers for the fair share policy.
/// Fair share identifiers that
/// are not included have a default weight of <code>1.0</code>.</p>
pub fn set_share_distribution(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ShareAttributes>>,
) -> Self {
self.share_distribution = input;
self
}
/// Consumes the builder and constructs a [`FairsharePolicy`](crate::model::FairsharePolicy)
pub fn build(self) -> crate::model::FairsharePolicy {
crate::model::FairsharePolicy {
share_decay_seconds: self.share_decay_seconds.unwrap_or_default(),
compute_reservation: self.compute_reservation.unwrap_or_default(),
share_distribution: self.share_distribution,
}
}
}
}
impl FairsharePolicy {
/// Creates a new builder-style object to manufacture [`FairsharePolicy`](crate::model::FairsharePolicy)
pub fn builder() -> crate::model::fairshare_policy::Builder {
crate::model::fairshare_policy::Builder::default()
}
}
/// <p>Specifies the weights for the fair share identifiers for the fair share policy. Fair share identifiers that are
/// not included have a default weight of <code>1.0</code>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ShareAttributes {
/// <p>A fair share identifier or fair share identifier prefix. If the string ends with '*' then this entry specifies
/// the weight factor to use for fair share identifiers that begin with that prefix. The list of fair share identifiers
/// in a fair share policy cannot overlap. For example you cannot have one that specifies a <code>shareIdentifier</code>
/// of <code>UserA*</code> and another that specifies a <code>shareIdentifier</code> of <code>UserA-1</code>.</p>
/// <p>There can be no more than 500 fair share identifiers active in a job queue.</p>
/// <p>The string is limited to 255 alphanumeric characters, optionally followed by '*'.</p>
pub share_identifier: std::option::Option<std::string::String>,
/// <p>The weight factor for the fair share
/// identifier. The default value is 1.0. A lower value has a higher priority for compute resources. For example, jobs
/// using a share identifier with a weight factor of 0.125 (1/8) will get 8 times the compute resources of jobs using a
/// share identifier with a weight factor of 1.</p>
/// <p>The smallest supported value is 0.0001 and the largest supported value is 999.9999.</p>
pub weight_factor: f32,
}
impl ShareAttributes {
/// <p>A fair share identifier or fair share identifier prefix. If the string ends with '*' then this entry specifies
/// the weight factor to use for fair share identifiers that begin with that prefix. The list of fair share identifiers
/// in a fair share policy cannot overlap. For example you cannot have one that specifies a <code>shareIdentifier</code>
/// of <code>UserA*</code> and another that specifies a <code>shareIdentifier</code> of <code>UserA-1</code>.</p>
/// <p>There can be no more than 500 fair share identifiers active in a job queue.</p>
/// <p>The string is limited to 255 alphanumeric characters, optionally followed by '*'.</p>
pub fn share_identifier(&self) -> std::option::Option<&str> {
self.share_identifier.as_deref()
}
/// <p>The weight factor for the fair share
/// identifier. The default value is 1.0. A lower value has a higher priority for compute resources. For example, jobs
/// using a share identifier with a weight factor of 0.125 (1/8) will get 8 times the compute resources of jobs using a
/// share identifier with a weight factor of 1.</p>
/// <p>The smallest supported value is 0.0001 and the largest supported value is 999.9999.</p>
pub fn weight_factor(&self) -> f32 {
self.weight_factor
}
}
impl std::fmt::Debug for ShareAttributes {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ShareAttributes");
formatter.field("share_identifier", &self.share_identifier);
formatter.field("weight_factor", &self.weight_factor);
formatter.finish()
}
}
/// See [`ShareAttributes`](crate::model::ShareAttributes)
pub mod share_attributes {
/// A builder for [`ShareAttributes`](crate::model::ShareAttributes)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) share_identifier: std::option::Option<std::string::String>,
pub(crate) weight_factor: std::option::Option<f32>,
}
impl Builder {
/// <p>A fair share identifier or fair share identifier prefix. If the string ends with '*' then this entry specifies
/// the weight factor to use for fair share identifiers that begin with that prefix. The list of fair share identifiers
/// in a fair share policy cannot overlap. For example you cannot have one that specifies a <code>shareIdentifier</code>
/// of <code>UserA*</code> and another that specifies a <code>shareIdentifier</code> of <code>UserA-1</code>.</p>
/// <p>There can be no more than 500 fair share identifiers active in a job queue.</p>
/// <p>The string is limited to 255 alphanumeric characters, optionally followed by '*'.</p>
pub fn share_identifier(mut self, input: impl Into<std::string::String>) -> Self {
self.share_identifier = Some(input.into());
self
}
/// <p>A fair share identifier or fair share identifier prefix. If the string ends with '*' then this entry specifies
/// the weight factor to use for fair share identifiers that begin with that prefix. The list of fair share identifiers
/// in a fair share policy cannot overlap. For example you cannot have one that specifies a <code>shareIdentifier</code>
/// of <code>UserA*</code> and another that specifies a <code>shareIdentifier</code> of <code>UserA-1</code>.</p>
/// <p>There can be no more than 500 fair share identifiers active in a job queue.</p>
/// <p>The string is limited to 255 alphanumeric characters, optionally followed by '*'.</p>
pub fn set_share_identifier(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.share_identifier = input;
self
}
/// <p>The weight factor for the fair share
/// identifier. The default value is 1.0. A lower value has a higher priority for compute resources. For example, jobs
/// using a share identifier with a weight factor of 0.125 (1/8) will get 8 times the compute resources of jobs using a
/// share identifier with a weight factor of 1.</p>
/// <p>The smallest supported value is 0.0001 and the largest supported value is 999.9999.</p>
pub fn weight_factor(mut self, input: f32) -> Self {
self.weight_factor = Some(input);
self
}
/// <p>The weight factor for the fair share
/// identifier. The default value is 1.0. A lower value has a higher priority for compute resources. For example, jobs
/// using a share identifier with a weight factor of 0.125 (1/8) will get 8 times the compute resources of jobs using a
/// share identifier with a weight factor of 1.</p>
/// <p>The smallest supported value is 0.0001 and the largest supported value is 999.9999.</p>
pub fn set_weight_factor(mut self, input: std::option::Option<f32>) -> Self {
self.weight_factor = input;
self
}
/// Consumes the builder and constructs a [`ShareAttributes`](crate::model::ShareAttributes)
pub fn build(self) -> crate::model::ShareAttributes {
crate::model::ShareAttributes {
share_identifier: self.share_identifier,
weight_factor: self.weight_factor.unwrap_or_default(),
}
}
}
}
impl ShareAttributes {
/// Creates a new builder-style object to manufacture [`ShareAttributes`](crate::model::ShareAttributes)
pub fn builder() -> crate::model::share_attributes::Builder {
crate::model::share_attributes::Builder::default()
}
}
/// <p>The order in which compute environments are tried for job placement within a queue. Compute environments are
/// tried in ascending order. For example, if two compute environments are associated with a job queue, the compute
/// environment with a lower order integer value is tried for job placement first. Compute environments must be in the
/// <code>VALID</code> state before you can associate them with a job queue. All of the compute environments must be
/// either EC2 (<code>EC2</code> or <code>SPOT</code>) or Fargate (<code>FARGATE</code> or <code>FARGATE_SPOT</code>);
/// EC2 and Fargate compute environments can't be mixed.</p>
/// <note>
/// <p>All compute environments that are associated with a job queue must share the same architecture. Batch doesn't
/// support mixing compute environment architecture types in a single job queue.</p>
/// </note>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ComputeEnvironmentOrder {
/// <p>The order of the compute environment. Compute environments are tried in ascending order. For example, if two
/// compute environments are associated with a job queue, the compute environment with a lower <code>order</code> integer
/// value is tried for job placement first.</p>
pub order: i32,
/// <p>The Amazon Resource Name (ARN) of the compute environment.</p>
pub compute_environment: std::option::Option<std::string::String>,
}
impl ComputeEnvironmentOrder {
/// <p>The order of the compute environment. Compute environments are tried in ascending order. For example, if two
/// compute environments are associated with a job queue, the compute environment with a lower <code>order</code> integer
/// value is tried for job placement first.</p>
pub fn order(&self) -> i32 {
self.order
}
/// <p>The Amazon Resource Name (ARN) of the compute environment.</p>
pub fn compute_environment(&self) -> std::option::Option<&str> {
self.compute_environment.as_deref()
}
}
impl std::fmt::Debug for ComputeEnvironmentOrder {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ComputeEnvironmentOrder");
formatter.field("order", &self.order);
formatter.field("compute_environment", &self.compute_environment);
formatter.finish()
}
}
/// See [`ComputeEnvironmentOrder`](crate::model::ComputeEnvironmentOrder)
pub mod compute_environment_order {
/// A builder for [`ComputeEnvironmentOrder`](crate::model::ComputeEnvironmentOrder)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) order: std::option::Option<i32>,
pub(crate) compute_environment: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The order of the compute environment. Compute environments are tried in ascending order. For example, if two
/// compute environments are associated with a job queue, the compute environment with a lower <code>order</code> integer
/// value is tried for job placement first.</p>
pub fn order(mut self, input: i32) -> Self {
self.order = Some(input);
self
}
/// <p>The order of the compute environment. Compute environments are tried in ascending order. For example, if two
/// compute environments are associated with a job queue, the compute environment with a lower <code>order</code> integer
/// value is tried for job placement first.</p>
pub fn set_order(mut self, input: std::option::Option<i32>) -> Self {
self.order = input;
self
}
/// <p>The Amazon Resource Name (ARN) of the compute environment.</p>
pub fn compute_environment(mut self, input: impl Into<std::string::String>) -> Self {
self.compute_environment = Some(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of the compute environment.</p>
pub fn set_compute_environment(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.compute_environment = input;
self
}
/// Consumes the builder and constructs a [`ComputeEnvironmentOrder`](crate::model::ComputeEnvironmentOrder)
pub fn build(self) -> crate::model::ComputeEnvironmentOrder {
crate::model::ComputeEnvironmentOrder {
order: self.order.unwrap_or_default(),
compute_environment: self.compute_environment,
}
}
}
}
impl ComputeEnvironmentOrder {
/// Creates a new builder-style object to manufacture [`ComputeEnvironmentOrder`](crate::model::ComputeEnvironmentOrder)
pub fn builder() -> crate::model::compute_environment_order::Builder {
crate::model::compute_environment_order::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum JqState {
#[allow(missing_docs)] // documentation missing in model
Disabled,
#[allow(missing_docs)] // documentation missing in model
Enabled,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for JqState {
fn from(s: &str) -> Self {
match s {
"DISABLED" => JqState::Disabled,
"ENABLED" => JqState::Enabled,
other => JqState::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for JqState {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(JqState::from(s))
}
}
impl JqState {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
JqState::Disabled => "DISABLED",
JqState::Enabled => "ENABLED",
JqState::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["DISABLED", "ENABLED"]
}
}
impl AsRef<str> for JqState {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>An object representing the attributes of a compute environment that can be updated. For more information, see
/// <a href="https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html">Compute Environments</a> in the
/// <i>Batch User Guide</i>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ComputeResourceUpdate {
/// <p>The minimum number of Amazon EC2 vCPUs that an environment should maintain.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub minv_cpus: i32,
/// <p>The maximum number of Amazon EC2 vCPUs that an environment can reach.</p>
/// <note>
/// <p>With both <code>BEST_FIT_PROGRESSIVE</code> and <code>SPOT_CAPACITY_OPTIMIZED</code> allocation strategies,
/// Batch might need to exceed <code>maxvCpus</code> to meet your capacity requirements. In this event, Batch never
/// exceeds <code>maxvCpus</code> by more than a single instance. That is, no more than a single instance from among
/// those specified in your compute environment.</p>
/// </note>
pub maxv_cpus: i32,
/// <p>The desired number of Amazon EC2 vCPUS in the compute environment.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub desiredv_cpus: i32,
/// <p>The VPC subnets where the compute resources are launched. Fargate compute resources can contain up to 16
/// subnets. Providing an empty list will be handled as if this parameter wasn't specified and no change is made. This
/// can't be specified for EC2 compute resources. For more information, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html">VPCs and Subnets</a> in the <i>Amazon VPC User
/// Guide</i>.</p>
pub subnets: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The Amazon EC2 security groups associated with instances launched in the compute environment. This parameter is
/// required for Fargate compute resources, where it can contain up to 5 security groups. This can't be specified for
/// EC2 compute resources. Providing an empty list is handled as if this parameter wasn't specified and no change is
/// made.</p>
pub security_group_ids: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl ComputeResourceUpdate {
/// <p>The minimum number of Amazon EC2 vCPUs that an environment should maintain.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn minv_cpus(&self) -> i32 {
self.minv_cpus
}
/// <p>The maximum number of Amazon EC2 vCPUs that an environment can reach.</p>
/// <note>
/// <p>With both <code>BEST_FIT_PROGRESSIVE</code> and <code>SPOT_CAPACITY_OPTIMIZED</code> allocation strategies,
/// Batch might need to exceed <code>maxvCpus</code> to meet your capacity requirements. In this event, Batch never
/// exceeds <code>maxvCpus</code> by more than a single instance. That is, no more than a single instance from among
/// those specified in your compute environment.</p>
/// </note>
pub fn maxv_cpus(&self) -> i32 {
self.maxv_cpus
}
/// <p>The desired number of Amazon EC2 vCPUS in the compute environment.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn desiredv_cpus(&self) -> i32 {
self.desiredv_cpus
}
/// <p>The VPC subnets where the compute resources are launched. Fargate compute resources can contain up to 16
/// subnets. Providing an empty list will be handled as if this parameter wasn't specified and no change is made. This
/// can't be specified for EC2 compute resources. For more information, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html">VPCs and Subnets</a> in the <i>Amazon VPC User
/// Guide</i>.</p>
pub fn subnets(&self) -> std::option::Option<&[std::string::String]> {
self.subnets.as_deref()
}
/// <p>The Amazon EC2 security groups associated with instances launched in the compute environment. This parameter is
/// required for Fargate compute resources, where it can contain up to 5 security groups. This can't be specified for
/// EC2 compute resources. Providing an empty list is handled as if this parameter wasn't specified and no change is
/// made.</p>
pub fn security_group_ids(&self) -> std::option::Option<&[std::string::String]> {
self.security_group_ids.as_deref()
}
}
impl std::fmt::Debug for ComputeResourceUpdate {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ComputeResourceUpdate");
formatter.field("minv_cpus", &self.minv_cpus);
formatter.field("maxv_cpus", &self.maxv_cpus);
formatter.field("desiredv_cpus", &self.desiredv_cpus);
formatter.field("subnets", &self.subnets);
formatter.field("security_group_ids", &self.security_group_ids);
formatter.finish()
}
}
/// See [`ComputeResourceUpdate`](crate::model::ComputeResourceUpdate)
pub mod compute_resource_update {
/// A builder for [`ComputeResourceUpdate`](crate::model::ComputeResourceUpdate)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) minv_cpus: std::option::Option<i32>,
pub(crate) maxv_cpus: std::option::Option<i32>,
pub(crate) desiredv_cpus: std::option::Option<i32>,
pub(crate) subnets: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) security_group_ids: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
/// <p>The minimum number of Amazon EC2 vCPUs that an environment should maintain.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn minv_cpus(mut self, input: i32) -> Self {
self.minv_cpus = Some(input);
self
}
/// <p>The minimum number of Amazon EC2 vCPUs that an environment should maintain.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn set_minv_cpus(mut self, input: std::option::Option<i32>) -> Self {
self.minv_cpus = input;
self
}
/// <p>The maximum number of Amazon EC2 vCPUs that an environment can reach.</p>
/// <note>
/// <p>With both <code>BEST_FIT_PROGRESSIVE</code> and <code>SPOT_CAPACITY_OPTIMIZED</code> allocation strategies,
/// Batch might need to exceed <code>maxvCpus</code> to meet your capacity requirements. In this event, Batch never
/// exceeds <code>maxvCpus</code> by more than a single instance. That is, no more than a single instance from among
/// those specified in your compute environment.</p>
/// </note>
pub fn maxv_cpus(mut self, input: i32) -> Self {
self.maxv_cpus = Some(input);
self
}
/// <p>The maximum number of Amazon EC2 vCPUs that an environment can reach.</p>
/// <note>
/// <p>With both <code>BEST_FIT_PROGRESSIVE</code> and <code>SPOT_CAPACITY_OPTIMIZED</code> allocation strategies,
/// Batch might need to exceed <code>maxvCpus</code> to meet your capacity requirements. In this event, Batch never
/// exceeds <code>maxvCpus</code> by more than a single instance. That is, no more than a single instance from among
/// those specified in your compute environment.</p>
/// </note>
pub fn set_maxv_cpus(mut self, input: std::option::Option<i32>) -> Self {
self.maxv_cpus = input;
self
}
/// <p>The desired number of Amazon EC2 vCPUS in the compute environment.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn desiredv_cpus(mut self, input: i32) -> Self {
self.desiredv_cpus = Some(input);
self
}
/// <p>The desired number of Amazon EC2 vCPUS in the compute environment.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn set_desiredv_cpus(mut self, input: std::option::Option<i32>) -> Self {
self.desiredv_cpus = input;
self
}
/// Appends an item to `subnets`.
///
/// To override the contents of this collection use [`set_subnets`](Self::set_subnets).
///
/// <p>The VPC subnets where the compute resources are launched. Fargate compute resources can contain up to 16
/// subnets. Providing an empty list will be handled as if this parameter wasn't specified and no change is made. This
/// can't be specified for EC2 compute resources. For more information, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html">VPCs and Subnets</a> in the <i>Amazon VPC User
/// Guide</i>.</p>
pub fn subnets(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.subnets.unwrap_or_default();
v.push(input.into());
self.subnets = Some(v);
self
}
/// <p>The VPC subnets where the compute resources are launched. Fargate compute resources can contain up to 16
/// subnets. Providing an empty list will be handled as if this parameter wasn't specified and no change is made. This
/// can't be specified for EC2 compute resources. For more information, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html">VPCs and Subnets</a> in the <i>Amazon VPC User
/// Guide</i>.</p>
pub fn set_subnets(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.subnets = input;
self
}
/// Appends an item to `security_group_ids`.
///
/// To override the contents of this collection use [`set_security_group_ids`](Self::set_security_group_ids).
///
/// <p>The Amazon EC2 security groups associated with instances launched in the compute environment. This parameter is
/// required for Fargate compute resources, where it can contain up to 5 security groups. This can't be specified for
/// EC2 compute resources. Providing an empty list is handled as if this parameter wasn't specified and no change is
/// made.</p>
pub fn security_group_ids(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.security_group_ids.unwrap_or_default();
v.push(input.into());
self.security_group_ids = Some(v);
self
}
/// <p>The Amazon EC2 security groups associated with instances launched in the compute environment. This parameter is
/// required for Fargate compute resources, where it can contain up to 5 security groups. This can't be specified for
/// EC2 compute resources. Providing an empty list is handled as if this parameter wasn't specified and no change is
/// made.</p>
pub fn set_security_group_ids(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.security_group_ids = input;
self
}
/// Consumes the builder and constructs a [`ComputeResourceUpdate`](crate::model::ComputeResourceUpdate)
pub fn build(self) -> crate::model::ComputeResourceUpdate {
crate::model::ComputeResourceUpdate {
minv_cpus: self.minv_cpus.unwrap_or_default(),
maxv_cpus: self.maxv_cpus.unwrap_or_default(),
desiredv_cpus: self.desiredv_cpus.unwrap_or_default(),
subnets: self.subnets,
security_group_ids: self.security_group_ids,
}
}
}
}
impl ComputeResourceUpdate {
/// Creates a new builder-style object to manufacture [`ComputeResourceUpdate`](crate::model::ComputeResourceUpdate)
pub fn builder() -> crate::model::compute_resource_update::Builder {
crate::model::compute_resource_update::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum CeState {
#[allow(missing_docs)] // documentation missing in model
Disabled,
#[allow(missing_docs)] // documentation missing in model
Enabled,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for CeState {
fn from(s: &str) -> Self {
match s {
"DISABLED" => CeState::Disabled,
"ENABLED" => CeState::Enabled,
other => CeState::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for CeState {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(CeState::from(s))
}
}
impl CeState {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
CeState::Disabled => "DISABLED",
CeState::Enabled => "ENABLED",
CeState::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["DISABLED", "ENABLED"]
}
}
impl AsRef<str> for CeState {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>An object representing a job timeout configuration.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct JobTimeout {
/// <p>The time duration in seconds (measured from the job attempt's <code>startedAt</code> timestamp) after which
/// Batch terminates your jobs if they have not finished. The minimum value for the timeout is 60 seconds.</p>
pub attempt_duration_seconds: i32,
}
impl JobTimeout {
/// <p>The time duration in seconds (measured from the job attempt's <code>startedAt</code> timestamp) after which
/// Batch terminates your jobs if they have not finished. The minimum value for the timeout is 60 seconds.</p>
pub fn attempt_duration_seconds(&self) -> i32 {
self.attempt_duration_seconds
}
}
impl std::fmt::Debug for JobTimeout {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("JobTimeout");
formatter.field("attempt_duration_seconds", &self.attempt_duration_seconds);
formatter.finish()
}
}
/// See [`JobTimeout`](crate::model::JobTimeout)
pub mod job_timeout {
/// A builder for [`JobTimeout`](crate::model::JobTimeout)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) attempt_duration_seconds: std::option::Option<i32>,
}
impl Builder {
/// <p>The time duration in seconds (measured from the job attempt's <code>startedAt</code> timestamp) after which
/// Batch terminates your jobs if they have not finished. The minimum value for the timeout is 60 seconds.</p>
pub fn attempt_duration_seconds(mut self, input: i32) -> Self {
self.attempt_duration_seconds = Some(input);
self
}
/// <p>The time duration in seconds (measured from the job attempt's <code>startedAt</code> timestamp) after which
/// Batch terminates your jobs if they have not finished. The minimum value for the timeout is 60 seconds.</p>
pub fn set_attempt_duration_seconds(mut self, input: std::option::Option<i32>) -> Self {
self.attempt_duration_seconds = input;
self
}
/// Consumes the builder and constructs a [`JobTimeout`](crate::model::JobTimeout)
pub fn build(self) -> crate::model::JobTimeout {
crate::model::JobTimeout {
attempt_duration_seconds: self.attempt_duration_seconds.unwrap_or_default(),
}
}
}
}
impl JobTimeout {
/// Creates a new builder-style object to manufacture [`JobTimeout`](crate::model::JobTimeout)
pub fn builder() -> crate::model::job_timeout::Builder {
crate::model::job_timeout::Builder::default()
}
}
/// <p>The retry strategy associated with a job. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/job_retries.html">Automated job retries</a> in the <i>Batch User Guide</i>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct RetryStrategy {
/// <p>The number of times to move a job to the <code>RUNNABLE</code> status. You can specify between 1 and 10
/// attempts. If the value of <code>attempts</code> is greater than one, the job is retried on failure the same number of
/// attempts as the value.</p>
pub attempts: i32,
/// <p>Array of up to 5 objects that specify conditions under which the job should be retried or failed. If this
/// parameter is specified, then the <code>attempts</code> parameter must also be specified.</p>
pub evaluate_on_exit: std::option::Option<std::vec::Vec<crate::model::EvaluateOnExit>>,
}
impl RetryStrategy {
/// <p>The number of times to move a job to the <code>RUNNABLE</code> status. You can specify between 1 and 10
/// attempts. If the value of <code>attempts</code> is greater than one, the job is retried on failure the same number of
/// attempts as the value.</p>
pub fn attempts(&self) -> i32 {
self.attempts
}
/// <p>Array of up to 5 objects that specify conditions under which the job should be retried or failed. If this
/// parameter is specified, then the <code>attempts</code> parameter must also be specified.</p>
pub fn evaluate_on_exit(&self) -> std::option::Option<&[crate::model::EvaluateOnExit]> {
self.evaluate_on_exit.as_deref()
}
}
impl std::fmt::Debug for RetryStrategy {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("RetryStrategy");
formatter.field("attempts", &self.attempts);
formatter.field("evaluate_on_exit", &self.evaluate_on_exit);
formatter.finish()
}
}
/// See [`RetryStrategy`](crate::model::RetryStrategy)
pub mod retry_strategy {
/// A builder for [`RetryStrategy`](crate::model::RetryStrategy)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) attempts: std::option::Option<i32>,
pub(crate) evaluate_on_exit:
std::option::Option<std::vec::Vec<crate::model::EvaluateOnExit>>,
}
impl Builder {
/// <p>The number of times to move a job to the <code>RUNNABLE</code> status. You can specify between 1 and 10
/// attempts. If the value of <code>attempts</code> is greater than one, the job is retried on failure the same number of
/// attempts as the value.</p>
pub fn attempts(mut self, input: i32) -> Self {
self.attempts = Some(input);
self
}
/// <p>The number of times to move a job to the <code>RUNNABLE</code> status. You can specify between 1 and 10
/// attempts. If the value of <code>attempts</code> is greater than one, the job is retried on failure the same number of
/// attempts as the value.</p>
pub fn set_attempts(mut self, input: std::option::Option<i32>) -> Self {
self.attempts = input;
self
}
/// Appends an item to `evaluate_on_exit`.
///
/// To override the contents of this collection use [`set_evaluate_on_exit`](Self::set_evaluate_on_exit).
///
/// <p>Array of up to 5 objects that specify conditions under which the job should be retried or failed. If this
/// parameter is specified, then the <code>attempts</code> parameter must also be specified.</p>
pub fn evaluate_on_exit(mut self, input: impl Into<crate::model::EvaluateOnExit>) -> Self {
let mut v = self.evaluate_on_exit.unwrap_or_default();
v.push(input.into());
self.evaluate_on_exit = Some(v);
self
}
/// <p>Array of up to 5 objects that specify conditions under which the job should be retried or failed. If this
/// parameter is specified, then the <code>attempts</code> parameter must also be specified.</p>
pub fn set_evaluate_on_exit(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::EvaluateOnExit>>,
) -> Self {
self.evaluate_on_exit = input;
self
}
/// Consumes the builder and constructs a [`RetryStrategy`](crate::model::RetryStrategy)
pub fn build(self) -> crate::model::RetryStrategy {
crate::model::RetryStrategy {
attempts: self.attempts.unwrap_or_default(),
evaluate_on_exit: self.evaluate_on_exit,
}
}
}
}
impl RetryStrategy {
/// Creates a new builder-style object to manufacture [`RetryStrategy`](crate::model::RetryStrategy)
pub fn builder() -> crate::model::retry_strategy::Builder {
crate::model::retry_strategy::Builder::default()
}
}
/// <p>Specifies a set of conditions to be met, and an action to take (<code>RETRY</code> or <code>EXIT</code>) if all
/// conditions are met.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct EvaluateOnExit {
/// <p>Contains a glob pattern to match against the <code>StatusReason</code> returned for a job. The pattern can be up
/// to 512 characters in length. It can contain letters, numbers, periods (.), colons (:), and white space (including
/// spaces or tabs).
/// It can optionally end with an
/// asterisk (*) so that only the start of the string needs to be an exact match.</p>
///
/// <p>The string can be between 1 and 512 characters in length.</p>
pub on_status_reason: std::option::Option<std::string::String>,
/// <p>Contains a glob pattern to match against the <code>Reason</code> returned for a job. The pattern can be up to
/// 512 characters in length. It can contain letters, numbers, periods (.), colons (:), and white space (including spaces
/// and tabs). It can optionally end with an asterisk (*) so that only the start of the string needs to be an exact
/// match.</p>
///
/// <p>The string can be between 1 and 512 characters in length.</p>
pub on_reason: std::option::Option<std::string::String>,
/// <p>Contains a glob pattern to match against the decimal representation of the <code>ExitCode</code> returned for a
/// job. The pattern can be up to 512 characters in length. It can contain only numbers, and can optionally end with an
/// asterisk (*) so that only the start of the string needs to be an exact match.</p>
///
/// <p>The string can be between 1 and 512 characters in length.</p>
pub on_exit_code: std::option::Option<std::string::String>,
/// <p>Specifies the action to take if all of the specified conditions (<code>onStatusReason</code>,
/// <code>onReason</code>, and <code>onExitCode</code>) are met. The values aren't case sensitive.</p>
pub action: std::option::Option<crate::model::RetryAction>,
}
impl EvaluateOnExit {
/// <p>Contains a glob pattern to match against the <code>StatusReason</code> returned for a job. The pattern can be up
/// to 512 characters in length. It can contain letters, numbers, periods (.), colons (:), and white space (including
/// spaces or tabs).
/// It can optionally end with an
/// asterisk (*) so that only the start of the string needs to be an exact match.</p>
///
/// <p>The string can be between 1 and 512 characters in length.</p>
pub fn on_status_reason(&self) -> std::option::Option<&str> {
self.on_status_reason.as_deref()
}
/// <p>Contains a glob pattern to match against the <code>Reason</code> returned for a job. The pattern can be up to
/// 512 characters in length. It can contain letters, numbers, periods (.), colons (:), and white space (including spaces
/// and tabs). It can optionally end with an asterisk (*) so that only the start of the string needs to be an exact
/// match.</p>
///
/// <p>The string can be between 1 and 512 characters in length.</p>
pub fn on_reason(&self) -> std::option::Option<&str> {
self.on_reason.as_deref()
}
/// <p>Contains a glob pattern to match against the decimal representation of the <code>ExitCode</code> returned for a
/// job. The pattern can be up to 512 characters in length. It can contain only numbers, and can optionally end with an
/// asterisk (*) so that only the start of the string needs to be an exact match.</p>
///
/// <p>The string can be between 1 and 512 characters in length.</p>
pub fn on_exit_code(&self) -> std::option::Option<&str> {
self.on_exit_code.as_deref()
}
/// <p>Specifies the action to take if all of the specified conditions (<code>onStatusReason</code>,
/// <code>onReason</code>, and <code>onExitCode</code>) are met. The values aren't case sensitive.</p>
pub fn action(&self) -> std::option::Option<&crate::model::RetryAction> {
self.action.as_ref()
}
}
impl std::fmt::Debug for EvaluateOnExit {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("EvaluateOnExit");
formatter.field("on_status_reason", &self.on_status_reason);
formatter.field("on_reason", &self.on_reason);
formatter.field("on_exit_code", &self.on_exit_code);
formatter.field("action", &self.action);
formatter.finish()
}
}
/// See [`EvaluateOnExit`](crate::model::EvaluateOnExit)
pub mod evaluate_on_exit {
/// A builder for [`EvaluateOnExit`](crate::model::EvaluateOnExit)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) on_status_reason: std::option::Option<std::string::String>,
pub(crate) on_reason: std::option::Option<std::string::String>,
pub(crate) on_exit_code: std::option::Option<std::string::String>,
pub(crate) action: std::option::Option<crate::model::RetryAction>,
}
impl Builder {
/// <p>Contains a glob pattern to match against the <code>StatusReason</code> returned for a job. The pattern can be up
/// to 512 characters in length. It can contain letters, numbers, periods (.), colons (:), and white space (including
/// spaces or tabs).
/// It can optionally end with an
/// asterisk (*) so that only the start of the string needs to be an exact match.</p>
///
/// <p>The string can be between 1 and 512 characters in length.</p>
pub fn on_status_reason(mut self, input: impl Into<std::string::String>) -> Self {
self.on_status_reason = Some(input.into());
self
}
/// <p>Contains a glob pattern to match against the <code>StatusReason</code> returned for a job. The pattern can be up
/// to 512 characters in length. It can contain letters, numbers, periods (.), colons (:), and white space (including
/// spaces or tabs).
/// It can optionally end with an
/// asterisk (*) so that only the start of the string needs to be an exact match.</p>
///
/// <p>The string can be between 1 and 512 characters in length.</p>
pub fn set_on_status_reason(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.on_status_reason = input;
self
}
/// <p>Contains a glob pattern to match against the <code>Reason</code> returned for a job. The pattern can be up to
/// 512 characters in length. It can contain letters, numbers, periods (.), colons (:), and white space (including spaces
/// and tabs). It can optionally end with an asterisk (*) so that only the start of the string needs to be an exact
/// match.</p>
///
/// <p>The string can be between 1 and 512 characters in length.</p>
pub fn on_reason(mut self, input: impl Into<std::string::String>) -> Self {
self.on_reason = Some(input.into());
self
}
/// <p>Contains a glob pattern to match against the <code>Reason</code> returned for a job. The pattern can be up to
/// 512 characters in length. It can contain letters, numbers, periods (.), colons (:), and white space (including spaces
/// and tabs). It can optionally end with an asterisk (*) so that only the start of the string needs to be an exact
/// match.</p>
///
/// <p>The string can be between 1 and 512 characters in length.</p>
pub fn set_on_reason(mut self, input: std::option::Option<std::string::String>) -> Self {
self.on_reason = input;
self
}
/// <p>Contains a glob pattern to match against the decimal representation of the <code>ExitCode</code> returned for a
/// job. The pattern can be up to 512 characters in length. It can contain only numbers, and can optionally end with an
/// asterisk (*) so that only the start of the string needs to be an exact match.</p>
///
/// <p>The string can be between 1 and 512 characters in length.</p>
pub fn on_exit_code(mut self, input: impl Into<std::string::String>) -> Self {
self.on_exit_code = Some(input.into());
self
}
/// <p>Contains a glob pattern to match against the decimal representation of the <code>ExitCode</code> returned for a
/// job. The pattern can be up to 512 characters in length. It can contain only numbers, and can optionally end with an
/// asterisk (*) so that only the start of the string needs to be an exact match.</p>
///
/// <p>The string can be between 1 and 512 characters in length.</p>
pub fn set_on_exit_code(mut self, input: std::option::Option<std::string::String>) -> Self {
self.on_exit_code = input;
self
}
/// <p>Specifies the action to take if all of the specified conditions (<code>onStatusReason</code>,
/// <code>onReason</code>, and <code>onExitCode</code>) are met. The values aren't case sensitive.</p>
pub fn action(mut self, input: crate::model::RetryAction) -> Self {
self.action = Some(input);
self
}
/// <p>Specifies the action to take if all of the specified conditions (<code>onStatusReason</code>,
/// <code>onReason</code>, and <code>onExitCode</code>) are met. The values aren't case sensitive.</p>
pub fn set_action(mut self, input: std::option::Option<crate::model::RetryAction>) -> Self {
self.action = input;
self
}
/// Consumes the builder and constructs a [`EvaluateOnExit`](crate::model::EvaluateOnExit)
pub fn build(self) -> crate::model::EvaluateOnExit {
crate::model::EvaluateOnExit {
on_status_reason: self.on_status_reason,
on_reason: self.on_reason,
on_exit_code: self.on_exit_code,
action: self.action,
}
}
}
}
impl EvaluateOnExit {
/// Creates a new builder-style object to manufacture [`EvaluateOnExit`](crate::model::EvaluateOnExit)
pub fn builder() -> crate::model::evaluate_on_exit::Builder {
crate::model::evaluate_on_exit::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum RetryAction {
#[allow(missing_docs)] // documentation missing in model
Exit,
#[allow(missing_docs)] // documentation missing in model
Retry,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for RetryAction {
fn from(s: &str) -> Self {
match s {
"EXIT" => RetryAction::Exit,
"RETRY" => RetryAction::Retry,
other => RetryAction::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for RetryAction {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(RetryAction::from(s))
}
}
impl RetryAction {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
RetryAction::Exit => "EXIT",
RetryAction::Retry => "RETRY",
RetryAction::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["EXIT", "RETRY"]
}
}
impl AsRef<str> for RetryAction {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>Object representing any node overrides to a job definition that's used in a <a>SubmitJob</a> API
/// operation.</p>
/// <note>
/// <p>This isn't applicable to jobs that are running on Fargate resources and shouldn't be provided; use
/// <code>containerOverrides</code> instead.</p>
/// </note>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct NodeOverrides {
/// <p>The number of nodes to use with a multi-node parallel job. This value overrides the number of nodes that are
/// specified in the job definition. To use this override:</p>
/// <ul>
/// <li>
/// <p>There must be at least one node range in your job definition that has an open upper boundary (such as
/// <code>:</code> or <code>n:</code>).</p>
/// </li>
/// <li>
/// <p>The lower boundary of the node range specified in the job definition must be fewer than the number of nodes
/// specified in the override.</p>
/// </li>
/// <li>
/// <p>The main node index specified in the job definition must be fewer than the number of nodes specified in the
/// override.</p>
/// </li>
/// </ul>
pub num_nodes: i32,
/// <p>The node property overrides for the job.</p>
pub node_property_overrides:
std::option::Option<std::vec::Vec<crate::model::NodePropertyOverride>>,
}
impl NodeOverrides {
/// <p>The number of nodes to use with a multi-node parallel job. This value overrides the number of nodes that are
/// specified in the job definition. To use this override:</p>
/// <ul>
/// <li>
/// <p>There must be at least one node range in your job definition that has an open upper boundary (such as
/// <code>:</code> or <code>n:</code>).</p>
/// </li>
/// <li>
/// <p>The lower boundary of the node range specified in the job definition must be fewer than the number of nodes
/// specified in the override.</p>
/// </li>
/// <li>
/// <p>The main node index specified in the job definition must be fewer than the number of nodes specified in the
/// override.</p>
/// </li>
/// </ul>
pub fn num_nodes(&self) -> i32 {
self.num_nodes
}
/// <p>The node property overrides for the job.</p>
pub fn node_property_overrides(
&self,
) -> std::option::Option<&[crate::model::NodePropertyOverride]> {
self.node_property_overrides.as_deref()
}
}
impl std::fmt::Debug for NodeOverrides {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("NodeOverrides");
formatter.field("num_nodes", &self.num_nodes);
formatter.field("node_property_overrides", &self.node_property_overrides);
formatter.finish()
}
}
/// See [`NodeOverrides`](crate::model::NodeOverrides)
pub mod node_overrides {
/// A builder for [`NodeOverrides`](crate::model::NodeOverrides)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) num_nodes: std::option::Option<i32>,
pub(crate) node_property_overrides:
std::option::Option<std::vec::Vec<crate::model::NodePropertyOverride>>,
}
impl Builder {
/// <p>The number of nodes to use with a multi-node parallel job. This value overrides the number of nodes that are
/// specified in the job definition. To use this override:</p>
/// <ul>
/// <li>
/// <p>There must be at least one node range in your job definition that has an open upper boundary (such as
/// <code>:</code> or <code>n:</code>).</p>
/// </li>
/// <li>
/// <p>The lower boundary of the node range specified in the job definition must be fewer than the number of nodes
/// specified in the override.</p>
/// </li>
/// <li>
/// <p>The main node index specified in the job definition must be fewer than the number of nodes specified in the
/// override.</p>
/// </li>
/// </ul>
pub fn num_nodes(mut self, input: i32) -> Self {
self.num_nodes = Some(input);
self
}
/// <p>The number of nodes to use with a multi-node parallel job. This value overrides the number of nodes that are
/// specified in the job definition. To use this override:</p>
/// <ul>
/// <li>
/// <p>There must be at least one node range in your job definition that has an open upper boundary (such as
/// <code>:</code> or <code>n:</code>).</p>
/// </li>
/// <li>
/// <p>The lower boundary of the node range specified in the job definition must be fewer than the number of nodes
/// specified in the override.</p>
/// </li>
/// <li>
/// <p>The main node index specified in the job definition must be fewer than the number of nodes specified in the
/// override.</p>
/// </li>
/// </ul>
pub fn set_num_nodes(mut self, input: std::option::Option<i32>) -> Self {
self.num_nodes = input;
self
}
/// Appends an item to `node_property_overrides`.
///
/// To override the contents of this collection use [`set_node_property_overrides`](Self::set_node_property_overrides).
///
/// <p>The node property overrides for the job.</p>
pub fn node_property_overrides(
mut self,
input: impl Into<crate::model::NodePropertyOverride>,
) -> Self {
let mut v = self.node_property_overrides.unwrap_or_default();
v.push(input.into());
self.node_property_overrides = Some(v);
self
}
/// <p>The node property overrides for the job.</p>
pub fn set_node_property_overrides(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::NodePropertyOverride>>,
) -> Self {
self.node_property_overrides = input;
self
}
/// Consumes the builder and constructs a [`NodeOverrides`](crate::model::NodeOverrides)
pub fn build(self) -> crate::model::NodeOverrides {
crate::model::NodeOverrides {
num_nodes: self.num_nodes.unwrap_or_default(),
node_property_overrides: self.node_property_overrides,
}
}
}
}
impl NodeOverrides {
/// Creates a new builder-style object to manufacture [`NodeOverrides`](crate::model::NodeOverrides)
pub fn builder() -> crate::model::node_overrides::Builder {
crate::model::node_overrides::Builder::default()
}
}
/// <p>Object representing any node overrides to a job definition that's used in a <a>SubmitJob</a> API
/// operation.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct NodePropertyOverride {
/// <p>The range of nodes, using node index values, that's used to override. A range of <code>0:3</code> indicates
/// nodes with index values of <code>0</code> through <code>3</code>. If the starting range value is omitted
/// (<code>:n</code>), then <code>0</code> is used to start the range. If the ending range value is omitted
/// (<code>n:</code>), then the highest possible node index is used to end the range.</p>
pub target_nodes: std::option::Option<std::string::String>,
/// <p>The overrides that should be sent to a node range.</p>
pub container_overrides: std::option::Option<crate::model::ContainerOverrides>,
}
impl NodePropertyOverride {
/// <p>The range of nodes, using node index values, that's used to override. A range of <code>0:3</code> indicates
/// nodes with index values of <code>0</code> through <code>3</code>. If the starting range value is omitted
/// (<code>:n</code>), then <code>0</code> is used to start the range. If the ending range value is omitted
/// (<code>n:</code>), then the highest possible node index is used to end the range.</p>
pub fn target_nodes(&self) -> std::option::Option<&str> {
self.target_nodes.as_deref()
}
/// <p>The overrides that should be sent to a node range.</p>
pub fn container_overrides(&self) -> std::option::Option<&crate::model::ContainerOverrides> {
self.container_overrides.as_ref()
}
}
impl std::fmt::Debug for NodePropertyOverride {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("NodePropertyOverride");
formatter.field("target_nodes", &self.target_nodes);
formatter.field("container_overrides", &self.container_overrides);
formatter.finish()
}
}
/// See [`NodePropertyOverride`](crate::model::NodePropertyOverride)
pub mod node_property_override {
/// A builder for [`NodePropertyOverride`](crate::model::NodePropertyOverride)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) target_nodes: std::option::Option<std::string::String>,
pub(crate) container_overrides: std::option::Option<crate::model::ContainerOverrides>,
}
impl Builder {
/// <p>The range of nodes, using node index values, that's used to override. A range of <code>0:3</code> indicates
/// nodes with index values of <code>0</code> through <code>3</code>. If the starting range value is omitted
/// (<code>:n</code>), then <code>0</code> is used to start the range. If the ending range value is omitted
/// (<code>n:</code>), then the highest possible node index is used to end the range.</p>
pub fn target_nodes(mut self, input: impl Into<std::string::String>) -> Self {
self.target_nodes = Some(input.into());
self
}
/// <p>The range of nodes, using node index values, that's used to override. A range of <code>0:3</code> indicates
/// nodes with index values of <code>0</code> through <code>3</code>. If the starting range value is omitted
/// (<code>:n</code>), then <code>0</code> is used to start the range. If the ending range value is omitted
/// (<code>n:</code>), then the highest possible node index is used to end the range.</p>
pub fn set_target_nodes(mut self, input: std::option::Option<std::string::String>) -> Self {
self.target_nodes = input;
self
}
/// <p>The overrides that should be sent to a node range.</p>
pub fn container_overrides(mut self, input: crate::model::ContainerOverrides) -> Self {
self.container_overrides = Some(input);
self
}
/// <p>The overrides that should be sent to a node range.</p>
pub fn set_container_overrides(
mut self,
input: std::option::Option<crate::model::ContainerOverrides>,
) -> Self {
self.container_overrides = input;
self
}
/// Consumes the builder and constructs a [`NodePropertyOverride`](crate::model::NodePropertyOverride)
pub fn build(self) -> crate::model::NodePropertyOverride {
crate::model::NodePropertyOverride {
target_nodes: self.target_nodes,
container_overrides: self.container_overrides,
}
}
}
}
impl NodePropertyOverride {
/// Creates a new builder-style object to manufacture [`NodePropertyOverride`](crate::model::NodePropertyOverride)
pub fn builder() -> crate::model::node_property_override::Builder {
crate::model::node_property_override::Builder::default()
}
}
/// <p>The overrides that should be sent to a container.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ContainerOverrides {
/// <p>This parameter is deprecated, use
/// <code>resourceRequirements</code> to override the <code>vcpus</code> parameter that's set in the
/// job definition. It's not supported for jobs
/// that run on Fargate resources. For jobs run on EC2 resources, it overrides the <code>vcpus</code> parameter set in
/// the job definition, but doesn't override any vCPU requirement specified in the
/// <code>resourceRequirements</code> structure in the job definition. To override vCPU requirements that are specified
/// in the <code>resourceRequirements</code> structure in the job definition, <code>resourceRequirements</code> must be
/// specified in the <code>SubmitJob</code> request, with <code>type</code> set to <code>VCPU</code> and
/// <code>value</code> set to the new value. For
/// more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#override-resource-requirements">Can't override job definition
/// resource requirements</a> in the <i>Batch User Guide</i>.</p>
pub vcpus: i32,
/// <p>This parameter is deprecated, use
/// <code>resourceRequirements</code> to override the memory requirements specified in the job definition. It's not
/// supported for jobs that run on Fargate resources. For jobs run on EC2 resources, it overrides the
/// <code>memory</code> parameter set in the job definition, but doesn't override any memory requirement specified in
/// the <code>resourceRequirements</code> structure in the job definition. To override memory requirements that are
/// specified in the <code>resourceRequirements</code> structure in the job definition, <code>resourceRequirements</code>
/// must be specified in the <code>SubmitJob</code> request, with <code>type</code> set to <code>MEMORY</code> and
/// <code>value</code> set to the new value. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#override-resource-requirements">Can't override job definition
/// resource requirements</a> in the <i>Batch User Guide</i>.</p>
pub memory: i32,
/// <p>The command to send to the container that overrides the default command from the Docker image or the job
/// definition.</p>
pub command: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The instance type to use for a multi-node parallel job.</p>
/// <note>
/// <p>This parameter isn't applicable to single-node container jobs or jobs that run on Fargate resources, and
/// shouldn't be provided.</p>
/// </note>
pub instance_type: std::option::Option<std::string::String>,
/// <p>The environment variables to send to the container. You can add new environment variables, which are added to
/// the container at launch, or you can override the existing environment variables from the Docker image or the job
/// definition.</p>
/// <note>
/// <p>Environment variables must not start with <code>AWS_BATCH</code>; this naming
/// convention is reserved for variables that are set by the Batch service.</p>
/// </note>
pub environment: std::option::Option<std::vec::Vec<crate::model::KeyValuePair>>,
/// <p>The type and amount of resources to assign to a container. This overrides the settings in the job definition.
/// The supported resources include <code>GPU</code>, <code>MEMORY</code>, and <code>VCPU</code>.</p>
pub resource_requirements:
std::option::Option<std::vec::Vec<crate::model::ResourceRequirement>>,
}
impl ContainerOverrides {
/// <p>This parameter is deprecated, use
/// <code>resourceRequirements</code> to override the <code>vcpus</code> parameter that's set in the
/// job definition. It's not supported for jobs
/// that run on Fargate resources. For jobs run on EC2 resources, it overrides the <code>vcpus</code> parameter set in
/// the job definition, but doesn't override any vCPU requirement specified in the
/// <code>resourceRequirements</code> structure in the job definition. To override vCPU requirements that are specified
/// in the <code>resourceRequirements</code> structure in the job definition, <code>resourceRequirements</code> must be
/// specified in the <code>SubmitJob</code> request, with <code>type</code> set to <code>VCPU</code> and
/// <code>value</code> set to the new value. For
/// more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#override-resource-requirements">Can't override job definition
/// resource requirements</a> in the <i>Batch User Guide</i>.</p>
pub fn vcpus(&self) -> i32 {
self.vcpus
}
/// <p>This parameter is deprecated, use
/// <code>resourceRequirements</code> to override the memory requirements specified in the job definition. It's not
/// supported for jobs that run on Fargate resources. For jobs run on EC2 resources, it overrides the
/// <code>memory</code> parameter set in the job definition, but doesn't override any memory requirement specified in
/// the <code>resourceRequirements</code> structure in the job definition. To override memory requirements that are
/// specified in the <code>resourceRequirements</code> structure in the job definition, <code>resourceRequirements</code>
/// must be specified in the <code>SubmitJob</code> request, with <code>type</code> set to <code>MEMORY</code> and
/// <code>value</code> set to the new value. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#override-resource-requirements">Can't override job definition
/// resource requirements</a> in the <i>Batch User Guide</i>.</p>
pub fn memory(&self) -> i32 {
self.memory
}
/// <p>The command to send to the container that overrides the default command from the Docker image or the job
/// definition.</p>
pub fn command(&self) -> std::option::Option<&[std::string::String]> {
self.command.as_deref()
}
/// <p>The instance type to use for a multi-node parallel job.</p>
/// <note>
/// <p>This parameter isn't applicable to single-node container jobs or jobs that run on Fargate resources, and
/// shouldn't be provided.</p>
/// </note>
pub fn instance_type(&self) -> std::option::Option<&str> {
self.instance_type.as_deref()
}
/// <p>The environment variables to send to the container. You can add new environment variables, which are added to
/// the container at launch, or you can override the existing environment variables from the Docker image or the job
/// definition.</p>
/// <note>
/// <p>Environment variables must not start with <code>AWS_BATCH</code>; this naming
/// convention is reserved for variables that are set by the Batch service.</p>
/// </note>
pub fn environment(&self) -> std::option::Option<&[crate::model::KeyValuePair]> {
self.environment.as_deref()
}
/// <p>The type and amount of resources to assign to a container. This overrides the settings in the job definition.
/// The supported resources include <code>GPU</code>, <code>MEMORY</code>, and <code>VCPU</code>.</p>
pub fn resource_requirements(
&self,
) -> std::option::Option<&[crate::model::ResourceRequirement]> {
self.resource_requirements.as_deref()
}
}
impl std::fmt::Debug for ContainerOverrides {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ContainerOverrides");
formatter.field("vcpus", &self.vcpus);
formatter.field("memory", &self.memory);
formatter.field("command", &self.command);
formatter.field("instance_type", &self.instance_type);
formatter.field("environment", &self.environment);
formatter.field("resource_requirements", &self.resource_requirements);
formatter.finish()
}
}
/// See [`ContainerOverrides`](crate::model::ContainerOverrides)
pub mod container_overrides {
/// A builder for [`ContainerOverrides`](crate::model::ContainerOverrides)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) vcpus: std::option::Option<i32>,
pub(crate) memory: std::option::Option<i32>,
pub(crate) command: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) instance_type: std::option::Option<std::string::String>,
pub(crate) environment: std::option::Option<std::vec::Vec<crate::model::KeyValuePair>>,
pub(crate) resource_requirements:
std::option::Option<std::vec::Vec<crate::model::ResourceRequirement>>,
}
impl Builder {
/// <p>This parameter is deprecated, use
/// <code>resourceRequirements</code> to override the <code>vcpus</code> parameter that's set in the
/// job definition. It's not supported for jobs
/// that run on Fargate resources. For jobs run on EC2 resources, it overrides the <code>vcpus</code> parameter set in
/// the job definition, but doesn't override any vCPU requirement specified in the
/// <code>resourceRequirements</code> structure in the job definition. To override vCPU requirements that are specified
/// in the <code>resourceRequirements</code> structure in the job definition, <code>resourceRequirements</code> must be
/// specified in the <code>SubmitJob</code> request, with <code>type</code> set to <code>VCPU</code> and
/// <code>value</code> set to the new value. For
/// more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#override-resource-requirements">Can't override job definition
/// resource requirements</a> in the <i>Batch User Guide</i>.</p>
pub fn vcpus(mut self, input: i32) -> Self {
self.vcpus = Some(input);
self
}
/// <p>This parameter is deprecated, use
/// <code>resourceRequirements</code> to override the <code>vcpus</code> parameter that's set in the
/// job definition. It's not supported for jobs
/// that run on Fargate resources. For jobs run on EC2 resources, it overrides the <code>vcpus</code> parameter set in
/// the job definition, but doesn't override any vCPU requirement specified in the
/// <code>resourceRequirements</code> structure in the job definition. To override vCPU requirements that are specified
/// in the <code>resourceRequirements</code> structure in the job definition, <code>resourceRequirements</code> must be
/// specified in the <code>SubmitJob</code> request, with <code>type</code> set to <code>VCPU</code> and
/// <code>value</code> set to the new value. For
/// more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#override-resource-requirements">Can't override job definition
/// resource requirements</a> in the <i>Batch User Guide</i>.</p>
pub fn set_vcpus(mut self, input: std::option::Option<i32>) -> Self {
self.vcpus = input;
self
}
/// <p>This parameter is deprecated, use
/// <code>resourceRequirements</code> to override the memory requirements specified in the job definition. It's not
/// supported for jobs that run on Fargate resources. For jobs run on EC2 resources, it overrides the
/// <code>memory</code> parameter set in the job definition, but doesn't override any memory requirement specified in
/// the <code>resourceRequirements</code> structure in the job definition. To override memory requirements that are
/// specified in the <code>resourceRequirements</code> structure in the job definition, <code>resourceRequirements</code>
/// must be specified in the <code>SubmitJob</code> request, with <code>type</code> set to <code>MEMORY</code> and
/// <code>value</code> set to the new value. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#override-resource-requirements">Can't override job definition
/// resource requirements</a> in the <i>Batch User Guide</i>.</p>
pub fn memory(mut self, input: i32) -> Self {
self.memory = Some(input);
self
}
/// <p>This parameter is deprecated, use
/// <code>resourceRequirements</code> to override the memory requirements specified in the job definition. It's not
/// supported for jobs that run on Fargate resources. For jobs run on EC2 resources, it overrides the
/// <code>memory</code> parameter set in the job definition, but doesn't override any memory requirement specified in
/// the <code>resourceRequirements</code> structure in the job definition. To override memory requirements that are
/// specified in the <code>resourceRequirements</code> structure in the job definition, <code>resourceRequirements</code>
/// must be specified in the <code>SubmitJob</code> request, with <code>type</code> set to <code>MEMORY</code> and
/// <code>value</code> set to the new value. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#override-resource-requirements">Can't override job definition
/// resource requirements</a> in the <i>Batch User Guide</i>.</p>
pub fn set_memory(mut self, input: std::option::Option<i32>) -> Self {
self.memory = input;
self
}
/// Appends an item to `command`.
///
/// To override the contents of this collection use [`set_command`](Self::set_command).
///
/// <p>The command to send to the container that overrides the default command from the Docker image or the job
/// definition.</p>
pub fn command(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.command.unwrap_or_default();
v.push(input.into());
self.command = Some(v);
self
}
/// <p>The command to send to the container that overrides the default command from the Docker image or the job
/// definition.</p>
pub fn set_command(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.command = input;
self
}
/// <p>The instance type to use for a multi-node parallel job.</p>
/// <note>
/// <p>This parameter isn't applicable to single-node container jobs or jobs that run on Fargate resources, and
/// shouldn't be provided.</p>
/// </note>
pub fn instance_type(mut self, input: impl Into<std::string::String>) -> Self {
self.instance_type = Some(input.into());
self
}
/// <p>The instance type to use for a multi-node parallel job.</p>
/// <note>
/// <p>This parameter isn't applicable to single-node container jobs or jobs that run on Fargate resources, and
/// shouldn't be provided.</p>
/// </note>
pub fn set_instance_type(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.instance_type = input;
self
}
/// Appends an item to `environment`.
///
/// To override the contents of this collection use [`set_environment`](Self::set_environment).
///
/// <p>The environment variables to send to the container. You can add new environment variables, which are added to
/// the container at launch, or you can override the existing environment variables from the Docker image or the job
/// definition.</p>
/// <note>
/// <p>Environment variables must not start with <code>AWS_BATCH</code>; this naming
/// convention is reserved for variables that are set by the Batch service.</p>
/// </note>
pub fn environment(mut self, input: impl Into<crate::model::KeyValuePair>) -> Self {
let mut v = self.environment.unwrap_or_default();
v.push(input.into());
self.environment = Some(v);
self
}
/// <p>The environment variables to send to the container. You can add new environment variables, which are added to
/// the container at launch, or you can override the existing environment variables from the Docker image or the job
/// definition.</p>
/// <note>
/// <p>Environment variables must not start with <code>AWS_BATCH</code>; this naming
/// convention is reserved for variables that are set by the Batch service.</p>
/// </note>
pub fn set_environment(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::KeyValuePair>>,
) -> Self {
self.environment = input;
self
}
/// Appends an item to `resource_requirements`.
///
/// To override the contents of this collection use [`set_resource_requirements`](Self::set_resource_requirements).
///
/// <p>The type and amount of resources to assign to a container. This overrides the settings in the job definition.
/// The supported resources include <code>GPU</code>, <code>MEMORY</code>, and <code>VCPU</code>.</p>
pub fn resource_requirements(
mut self,
input: impl Into<crate::model::ResourceRequirement>,
) -> Self {
let mut v = self.resource_requirements.unwrap_or_default();
v.push(input.into());
self.resource_requirements = Some(v);
self
}
/// <p>The type and amount of resources to assign to a container. This overrides the settings in the job definition.
/// The supported resources include <code>GPU</code>, <code>MEMORY</code>, and <code>VCPU</code>.</p>
pub fn set_resource_requirements(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ResourceRequirement>>,
) -> Self {
self.resource_requirements = input;
self
}
/// Consumes the builder and constructs a [`ContainerOverrides`](crate::model::ContainerOverrides)
pub fn build(self) -> crate::model::ContainerOverrides {
crate::model::ContainerOverrides {
vcpus: self.vcpus.unwrap_or_default(),
memory: self.memory.unwrap_or_default(),
command: self.command,
instance_type: self.instance_type,
environment: self.environment,
resource_requirements: self.resource_requirements,
}
}
}
}
impl ContainerOverrides {
/// Creates a new builder-style object to manufacture [`ContainerOverrides`](crate::model::ContainerOverrides)
pub fn builder() -> crate::model::container_overrides::Builder {
crate::model::container_overrides::Builder::default()
}
}
/// <p>The type and amount of a resource to assign to a container. The supported resources include <code>GPU</code>,
/// <code>MEMORY</code>, and <code>VCPU</code>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ResourceRequirement {
/// <p>The quantity of the specified resource to reserve for the container. The values vary based on the
/// <code>type</code> specified.</p>
/// <dl>
/// <dt>type="GPU"</dt>
/// <dd>
/// <p>The number of physical GPUs to reserve for the container. The number of GPUs reserved for all containers in a
/// job shouldn't exceed the number of available GPUs on the compute resource that the job is launched on.</p>
/// <note>
/// <p>GPUs are not available for jobs that are running on Fargate resources.</p>
/// </note>
/// </dd>
/// <dt>type="MEMORY"</dt>
/// <dd>
/// <p>The memory hard limit (in MiB) present to the container. This parameter is supported for jobs that are
/// running on EC2 resources. If your container attempts to exceed the memory specified, the container is terminated.
/// This parameter maps to <code>Memory</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the
/// <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--memory</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.
/// You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for
/// multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to
/// <code>Memory</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the
/// <code>--memory</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <note>
/// <p>If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for
/// a particular instance type, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html">Memory
/// Management</a> in the <i>Batch User Guide</i>.</p>
/// </note>
/// <p>For jobs that are running on Fargate resources, then <code>value</code> is the hard limit (in MiB), and
/// must match one of the supported values and the <code>VCPU</code> values must be one of the values supported for
/// that memory value.</p>
/// <dl>
/// <dt>value = 512</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 0.25</p>
/// </dd>
/// <dt>value = 1024</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 0.25 or 0.5</p>
/// </dd>
/// <dt>value = 2048</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 0.25, 0.5, or 1</p>
/// </dd>
/// <dt>value = 3072</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 0.5, or 1</p>
/// </dd>
/// <dt>value = 4096</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 0.5, 1, or 2</p>
/// </dd>
/// <dt>value = 5120, 6144, or 7168</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 1 or 2</p>
/// </dd>
/// <dt>value = 8192</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 1, 2, or 4</p>
/// </dd>
/// <dt>value = 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 2 or 4</p>
/// </dd>
/// <dt>value = 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 4</p>
/// </dd>
/// </dl>
/// </dd>
/// <dt>type="VCPU"</dt>
/// <dd>
/// <p>The number of vCPUs reserved for the container. This parameter maps to <code>CpuShares</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--cpu-shares</code> option to
/// <a href="https://docs.docker.com/engine/reference/run/">docker run</a>. Each vCPU is equivalent to 1,024 CPU shares. For EC2
/// resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be
/// specified for each node at least once.</p>
/// <p>For jobs that are running on Fargate resources, then <code>value</code> must match one of the supported
/// values and the <code>MEMORY</code> values must be one of the values supported for that
/// <code>VCPU</code>
/// value. The supported values are 0.25, 0.5, 1, 2, and 4</p>
/// <dl>
/// <dt>value = 0.25</dt>
/// <dd>
/// <p>
/// <code>MEMORY</code> = 512, 1024, or 2048</p>
/// </dd>
/// <dt>value = 0.5</dt>
/// <dd>
/// <p>
/// <code>MEMORY</code> = 1024, 2048, 3072, or 4096</p>
/// </dd>
/// <dt>value = 1</dt>
/// <dd>
/// <p>
/// <code>MEMORY</code> = 2048, 3072, 4096, 5120, 6144, 7168, or 8192</p>
/// </dd>
/// <dt>value = 2</dt>
/// <dd>
/// <p>
/// <code>MEMORY</code> = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384</p>
/// </dd>
/// <dt>value = 4</dt>
/// <dd>
/// <p>
/// <code>MEMORY</code> = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456,
/// 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720</p>
/// </dd>
/// </dl>
/// </dd>
/// </dl>
pub value: std::option::Option<std::string::String>,
/// <p>The type of resource to assign to a container. The supported resources include <code>GPU</code>,
/// <code>MEMORY</code>, and <code>VCPU</code>.</p>
pub r#type: std::option::Option<crate::model::ResourceType>,
}
impl ResourceRequirement {
/// <p>The quantity of the specified resource to reserve for the container. The values vary based on the
/// <code>type</code> specified.</p>
/// <dl>
/// <dt>type="GPU"</dt>
/// <dd>
/// <p>The number of physical GPUs to reserve for the container. The number of GPUs reserved for all containers in a
/// job shouldn't exceed the number of available GPUs on the compute resource that the job is launched on.</p>
/// <note>
/// <p>GPUs are not available for jobs that are running on Fargate resources.</p>
/// </note>
/// </dd>
/// <dt>type="MEMORY"</dt>
/// <dd>
/// <p>The memory hard limit (in MiB) present to the container. This parameter is supported for jobs that are
/// running on EC2 resources. If your container attempts to exceed the memory specified, the container is terminated.
/// This parameter maps to <code>Memory</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the
/// <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--memory</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.
/// You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for
/// multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to
/// <code>Memory</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the
/// <code>--memory</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <note>
/// <p>If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for
/// a particular instance type, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html">Memory
/// Management</a> in the <i>Batch User Guide</i>.</p>
/// </note>
/// <p>For jobs that are running on Fargate resources, then <code>value</code> is the hard limit (in MiB), and
/// must match one of the supported values and the <code>VCPU</code> values must be one of the values supported for
/// that memory value.</p>
/// <dl>
/// <dt>value = 512</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 0.25</p>
/// </dd>
/// <dt>value = 1024</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 0.25 or 0.5</p>
/// </dd>
/// <dt>value = 2048</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 0.25, 0.5, or 1</p>
/// </dd>
/// <dt>value = 3072</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 0.5, or 1</p>
/// </dd>
/// <dt>value = 4096</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 0.5, 1, or 2</p>
/// </dd>
/// <dt>value = 5120, 6144, or 7168</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 1 or 2</p>
/// </dd>
/// <dt>value = 8192</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 1, 2, or 4</p>
/// </dd>
/// <dt>value = 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 2 or 4</p>
/// </dd>
/// <dt>value = 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 4</p>
/// </dd>
/// </dl>
/// </dd>
/// <dt>type="VCPU"</dt>
/// <dd>
/// <p>The number of vCPUs reserved for the container. This parameter maps to <code>CpuShares</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--cpu-shares</code> option to
/// <a href="https://docs.docker.com/engine/reference/run/">docker run</a>. Each vCPU is equivalent to 1,024 CPU shares. For EC2
/// resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be
/// specified for each node at least once.</p>
/// <p>For jobs that are running on Fargate resources, then <code>value</code> must match one of the supported
/// values and the <code>MEMORY</code> values must be one of the values supported for that
/// <code>VCPU</code>
/// value. The supported values are 0.25, 0.5, 1, 2, and 4</p>
/// <dl>
/// <dt>value = 0.25</dt>
/// <dd>
/// <p>
/// <code>MEMORY</code> = 512, 1024, or 2048</p>
/// </dd>
/// <dt>value = 0.5</dt>
/// <dd>
/// <p>
/// <code>MEMORY</code> = 1024, 2048, 3072, or 4096</p>
/// </dd>
/// <dt>value = 1</dt>
/// <dd>
/// <p>
/// <code>MEMORY</code> = 2048, 3072, 4096, 5120, 6144, 7168, or 8192</p>
/// </dd>
/// <dt>value = 2</dt>
/// <dd>
/// <p>
/// <code>MEMORY</code> = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384</p>
/// </dd>
/// <dt>value = 4</dt>
/// <dd>
/// <p>
/// <code>MEMORY</code> = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456,
/// 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720</p>
/// </dd>
/// </dl>
/// </dd>
/// </dl>
pub fn value(&self) -> std::option::Option<&str> {
self.value.as_deref()
}
/// <p>The type of resource to assign to a container. The supported resources include <code>GPU</code>,
/// <code>MEMORY</code>, and <code>VCPU</code>.</p>
pub fn r#type(&self) -> std::option::Option<&crate::model::ResourceType> {
self.r#type.as_ref()
}
}
impl std::fmt::Debug for ResourceRequirement {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ResourceRequirement");
formatter.field("value", &self.value);
formatter.field("r#type", &self.r#type);
formatter.finish()
}
}
/// See [`ResourceRequirement`](crate::model::ResourceRequirement)
pub mod resource_requirement {
/// A builder for [`ResourceRequirement`](crate::model::ResourceRequirement)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) value: std::option::Option<std::string::String>,
pub(crate) r#type: std::option::Option<crate::model::ResourceType>,
}
impl Builder {
/// <p>The quantity of the specified resource to reserve for the container. The values vary based on the
/// <code>type</code> specified.</p>
/// <dl>
/// <dt>type="GPU"</dt>
/// <dd>
/// <p>The number of physical GPUs to reserve for the container. The number of GPUs reserved for all containers in a
/// job shouldn't exceed the number of available GPUs on the compute resource that the job is launched on.</p>
/// <note>
/// <p>GPUs are not available for jobs that are running on Fargate resources.</p>
/// </note>
/// </dd>
/// <dt>type="MEMORY"</dt>
/// <dd>
/// <p>The memory hard limit (in MiB) present to the container. This parameter is supported for jobs that are
/// running on EC2 resources. If your container attempts to exceed the memory specified, the container is terminated.
/// This parameter maps to <code>Memory</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the
/// <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--memory</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.
/// You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for
/// multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to
/// <code>Memory</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the
/// <code>--memory</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <note>
/// <p>If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for
/// a particular instance type, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html">Memory
/// Management</a> in the <i>Batch User Guide</i>.</p>
/// </note>
/// <p>For jobs that are running on Fargate resources, then <code>value</code> is the hard limit (in MiB), and
/// must match one of the supported values and the <code>VCPU</code> values must be one of the values supported for
/// that memory value.</p>
/// <dl>
/// <dt>value = 512</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 0.25</p>
/// </dd>
/// <dt>value = 1024</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 0.25 or 0.5</p>
/// </dd>
/// <dt>value = 2048</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 0.25, 0.5, or 1</p>
/// </dd>
/// <dt>value = 3072</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 0.5, or 1</p>
/// </dd>
/// <dt>value = 4096</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 0.5, 1, or 2</p>
/// </dd>
/// <dt>value = 5120, 6144, or 7168</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 1 or 2</p>
/// </dd>
/// <dt>value = 8192</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 1, 2, or 4</p>
/// </dd>
/// <dt>value = 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 2 or 4</p>
/// </dd>
/// <dt>value = 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 4</p>
/// </dd>
/// </dl>
/// </dd>
/// <dt>type="VCPU"</dt>
/// <dd>
/// <p>The number of vCPUs reserved for the container. This parameter maps to <code>CpuShares</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--cpu-shares</code> option to
/// <a href="https://docs.docker.com/engine/reference/run/">docker run</a>. Each vCPU is equivalent to 1,024 CPU shares. For EC2
/// resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be
/// specified for each node at least once.</p>
/// <p>For jobs that are running on Fargate resources, then <code>value</code> must match one of the supported
/// values and the <code>MEMORY</code> values must be one of the values supported for that
/// <code>VCPU</code>
/// value. The supported values are 0.25, 0.5, 1, 2, and 4</p>
/// <dl>
/// <dt>value = 0.25</dt>
/// <dd>
/// <p>
/// <code>MEMORY</code> = 512, 1024, or 2048</p>
/// </dd>
/// <dt>value = 0.5</dt>
/// <dd>
/// <p>
/// <code>MEMORY</code> = 1024, 2048, 3072, or 4096</p>
/// </dd>
/// <dt>value = 1</dt>
/// <dd>
/// <p>
/// <code>MEMORY</code> = 2048, 3072, 4096, 5120, 6144, 7168, or 8192</p>
/// </dd>
/// <dt>value = 2</dt>
/// <dd>
/// <p>
/// <code>MEMORY</code> = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384</p>
/// </dd>
/// <dt>value = 4</dt>
/// <dd>
/// <p>
/// <code>MEMORY</code> = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456,
/// 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720</p>
/// </dd>
/// </dl>
/// </dd>
/// </dl>
pub fn value(mut self, input: impl Into<std::string::String>) -> Self {
self.value = Some(input.into());
self
}
/// <p>The quantity of the specified resource to reserve for the container. The values vary based on the
/// <code>type</code> specified.</p>
/// <dl>
/// <dt>type="GPU"</dt>
/// <dd>
/// <p>The number of physical GPUs to reserve for the container. The number of GPUs reserved for all containers in a
/// job shouldn't exceed the number of available GPUs on the compute resource that the job is launched on.</p>
/// <note>
/// <p>GPUs are not available for jobs that are running on Fargate resources.</p>
/// </note>
/// </dd>
/// <dt>type="MEMORY"</dt>
/// <dd>
/// <p>The memory hard limit (in MiB) present to the container. This parameter is supported for jobs that are
/// running on EC2 resources. If your container attempts to exceed the memory specified, the container is terminated.
/// This parameter maps to <code>Memory</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the
/// <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--memory</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.
/// You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for
/// multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to
/// <code>Memory</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the
/// <code>--memory</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <note>
/// <p>If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for
/// a particular instance type, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html">Memory
/// Management</a> in the <i>Batch User Guide</i>.</p>
/// </note>
/// <p>For jobs that are running on Fargate resources, then <code>value</code> is the hard limit (in MiB), and
/// must match one of the supported values and the <code>VCPU</code> values must be one of the values supported for
/// that memory value.</p>
/// <dl>
/// <dt>value = 512</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 0.25</p>
/// </dd>
/// <dt>value = 1024</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 0.25 or 0.5</p>
/// </dd>
/// <dt>value = 2048</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 0.25, 0.5, or 1</p>
/// </dd>
/// <dt>value = 3072</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 0.5, or 1</p>
/// </dd>
/// <dt>value = 4096</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 0.5, 1, or 2</p>
/// </dd>
/// <dt>value = 5120, 6144, or 7168</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 1 or 2</p>
/// </dd>
/// <dt>value = 8192</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 1, 2, or 4</p>
/// </dd>
/// <dt>value = 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 2 or 4</p>
/// </dd>
/// <dt>value = 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720</dt>
/// <dd>
/// <p>
/// <code>VCPU</code> = 4</p>
/// </dd>
/// </dl>
/// </dd>
/// <dt>type="VCPU"</dt>
/// <dd>
/// <p>The number of vCPUs reserved for the container. This parameter maps to <code>CpuShares</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--cpu-shares</code> option to
/// <a href="https://docs.docker.com/engine/reference/run/">docker run</a>. Each vCPU is equivalent to 1,024 CPU shares. For EC2
/// resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be
/// specified for each node at least once.</p>
/// <p>For jobs that are running on Fargate resources, then <code>value</code> must match one of the supported
/// values and the <code>MEMORY</code> values must be one of the values supported for that
/// <code>VCPU</code>
/// value. The supported values are 0.25, 0.5, 1, 2, and 4</p>
/// <dl>
/// <dt>value = 0.25</dt>
/// <dd>
/// <p>
/// <code>MEMORY</code> = 512, 1024, or 2048</p>
/// </dd>
/// <dt>value = 0.5</dt>
/// <dd>
/// <p>
/// <code>MEMORY</code> = 1024, 2048, 3072, or 4096</p>
/// </dd>
/// <dt>value = 1</dt>
/// <dd>
/// <p>
/// <code>MEMORY</code> = 2048, 3072, 4096, 5120, 6144, 7168, or 8192</p>
/// </dd>
/// <dt>value = 2</dt>
/// <dd>
/// <p>
/// <code>MEMORY</code> = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384</p>
/// </dd>
/// <dt>value = 4</dt>
/// <dd>
/// <p>
/// <code>MEMORY</code> = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456,
/// 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720</p>
/// </dd>
/// </dl>
/// </dd>
/// </dl>
pub fn set_value(mut self, input: std::option::Option<std::string::String>) -> Self {
self.value = input;
self
}
/// <p>The type of resource to assign to a container. The supported resources include <code>GPU</code>,
/// <code>MEMORY</code>, and <code>VCPU</code>.</p>
pub fn r#type(mut self, input: crate::model::ResourceType) -> Self {
self.r#type = Some(input);
self
}
/// <p>The type of resource to assign to a container. The supported resources include <code>GPU</code>,
/// <code>MEMORY</code>, and <code>VCPU</code>.</p>
pub fn set_type(mut self, input: std::option::Option<crate::model::ResourceType>) -> Self {
self.r#type = input;
self
}
/// Consumes the builder and constructs a [`ResourceRequirement`](crate::model::ResourceRequirement)
pub fn build(self) -> crate::model::ResourceRequirement {
crate::model::ResourceRequirement {
value: self.value,
r#type: self.r#type,
}
}
}
}
impl ResourceRequirement {
/// Creates a new builder-style object to manufacture [`ResourceRequirement`](crate::model::ResourceRequirement)
pub fn builder() -> crate::model::resource_requirement::Builder {
crate::model::resource_requirement::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum ResourceType {
#[allow(missing_docs)] // documentation missing in model
Gpu,
#[allow(missing_docs)] // documentation missing in model
Memory,
#[allow(missing_docs)] // documentation missing in model
Vcpu,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for ResourceType {
fn from(s: &str) -> Self {
match s {
"GPU" => ResourceType::Gpu,
"MEMORY" => ResourceType::Memory,
"VCPU" => ResourceType::Vcpu,
other => ResourceType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for ResourceType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(ResourceType::from(s))
}
}
impl ResourceType {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
ResourceType::Gpu => "GPU",
ResourceType::Memory => "MEMORY",
ResourceType::Vcpu => "VCPU",
ResourceType::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["GPU", "MEMORY", "VCPU"]
}
}
impl AsRef<str> for ResourceType {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>A key-value pair object.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct KeyValuePair {
/// <p>The name of the key-value pair. For environment variables, this is the name of the environment variable.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The value of the key-value pair. For environment variables, this is the value of the environment
/// variable.</p>
pub value: std::option::Option<std::string::String>,
}
impl KeyValuePair {
/// <p>The name of the key-value pair. For environment variables, this is the name of the environment variable.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>The value of the key-value pair. For environment variables, this is the value of the environment
/// variable.</p>
pub fn value(&self) -> std::option::Option<&str> {
self.value.as_deref()
}
}
impl std::fmt::Debug for KeyValuePair {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("KeyValuePair");
formatter.field("name", &self.name);
formatter.field("value", &self.value);
formatter.finish()
}
}
/// See [`KeyValuePair`](crate::model::KeyValuePair)
pub mod key_value_pair {
/// A builder for [`KeyValuePair`](crate::model::KeyValuePair)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) value: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name of the key-value pair. For environment variables, this is the name of the environment variable.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>The name of the key-value pair. For environment variables, this is the name of the environment variable.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>The value of the key-value pair. For environment variables, this is the value of the environment
/// variable.</p>
pub fn value(mut self, input: impl Into<std::string::String>) -> Self {
self.value = Some(input.into());
self
}
/// <p>The value of the key-value pair. For environment variables, this is the value of the environment
/// variable.</p>
pub fn set_value(mut self, input: std::option::Option<std::string::String>) -> Self {
self.value = input;
self
}
/// Consumes the builder and constructs a [`KeyValuePair`](crate::model::KeyValuePair)
pub fn build(self) -> crate::model::KeyValuePair {
crate::model::KeyValuePair {
name: self.name,
value: self.value,
}
}
}
}
impl KeyValuePair {
/// Creates a new builder-style object to manufacture [`KeyValuePair`](crate::model::KeyValuePair)
pub fn builder() -> crate::model::key_value_pair::Builder {
crate::model::key_value_pair::Builder::default()
}
}
/// <p>An object representing an Batch job dependency.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct JobDependency {
/// <p>The job ID of the Batch job associated with this dependency.</p>
pub job_id: std::option::Option<std::string::String>,
/// <p>The type of the job dependency.</p>
pub r#type: std::option::Option<crate::model::ArrayJobDependency>,
}
impl JobDependency {
/// <p>The job ID of the Batch job associated with this dependency.</p>
pub fn job_id(&self) -> std::option::Option<&str> {
self.job_id.as_deref()
}
/// <p>The type of the job dependency.</p>
pub fn r#type(&self) -> std::option::Option<&crate::model::ArrayJobDependency> {
self.r#type.as_ref()
}
}
impl std::fmt::Debug for JobDependency {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("JobDependency");
formatter.field("job_id", &self.job_id);
formatter.field("r#type", &self.r#type);
formatter.finish()
}
}
/// See [`JobDependency`](crate::model::JobDependency)
pub mod job_dependency {
/// A builder for [`JobDependency`](crate::model::JobDependency)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) job_id: std::option::Option<std::string::String>,
pub(crate) r#type: std::option::Option<crate::model::ArrayJobDependency>,
}
impl Builder {
/// <p>The job ID of the Batch job associated with this dependency.</p>
pub fn job_id(mut self, input: impl Into<std::string::String>) -> Self {
self.job_id = Some(input.into());
self
}
/// <p>The job ID of the Batch job associated with this dependency.</p>
pub fn set_job_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.job_id = input;
self
}
/// <p>The type of the job dependency.</p>
pub fn r#type(mut self, input: crate::model::ArrayJobDependency) -> Self {
self.r#type = Some(input);
self
}
/// <p>The type of the job dependency.</p>
pub fn set_type(
mut self,
input: std::option::Option<crate::model::ArrayJobDependency>,
) -> Self {
self.r#type = input;
self
}
/// Consumes the builder and constructs a [`JobDependency`](crate::model::JobDependency)
pub fn build(self) -> crate::model::JobDependency {
crate::model::JobDependency {
job_id: self.job_id,
r#type: self.r#type,
}
}
}
}
impl JobDependency {
/// Creates a new builder-style object to manufacture [`JobDependency`](crate::model::JobDependency)
pub fn builder() -> crate::model::job_dependency::Builder {
crate::model::job_dependency::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum ArrayJobDependency {
#[allow(missing_docs)] // documentation missing in model
NToN,
#[allow(missing_docs)] // documentation missing in model
Sequential,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for ArrayJobDependency {
fn from(s: &str) -> Self {
match s {
"N_TO_N" => ArrayJobDependency::NToN,
"SEQUENTIAL" => ArrayJobDependency::Sequential,
other => ArrayJobDependency::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for ArrayJobDependency {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(ArrayJobDependency::from(s))
}
}
impl ArrayJobDependency {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
ArrayJobDependency::NToN => "N_TO_N",
ArrayJobDependency::Sequential => "SEQUENTIAL",
ArrayJobDependency::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["N_TO_N", "SEQUENTIAL"]
}
}
impl AsRef<str> for ArrayJobDependency {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>An object representing an Batch array job.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ArrayProperties {
/// <p>The size of the array job.</p>
pub size: i32,
}
impl ArrayProperties {
/// <p>The size of the array job.</p>
pub fn size(&self) -> i32 {
self.size
}
}
impl std::fmt::Debug for ArrayProperties {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ArrayProperties");
formatter.field("size", &self.size);
formatter.finish()
}
}
/// See [`ArrayProperties`](crate::model::ArrayProperties)
pub mod array_properties {
/// A builder for [`ArrayProperties`](crate::model::ArrayProperties)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) size: std::option::Option<i32>,
}
impl Builder {
/// <p>The size of the array job.</p>
pub fn size(mut self, input: i32) -> Self {
self.size = Some(input);
self
}
/// <p>The size of the array job.</p>
pub fn set_size(mut self, input: std::option::Option<i32>) -> Self {
self.size = input;
self
}
/// Consumes the builder and constructs a [`ArrayProperties`](crate::model::ArrayProperties)
pub fn build(self) -> crate::model::ArrayProperties {
crate::model::ArrayProperties {
size: self.size.unwrap_or_default(),
}
}
}
}
impl ArrayProperties {
/// Creates a new builder-style object to manufacture [`ArrayProperties`](crate::model::ArrayProperties)
pub fn builder() -> crate::model::array_properties::Builder {
crate::model::array_properties::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum PlatformCapability {
#[allow(missing_docs)] // documentation missing in model
Ec2,
#[allow(missing_docs)] // documentation missing in model
Fargate,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for PlatformCapability {
fn from(s: &str) -> Self {
match s {
"EC2" => PlatformCapability::Ec2,
"FARGATE" => PlatformCapability::Fargate,
other => PlatformCapability::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for PlatformCapability {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(PlatformCapability::from(s))
}
}
impl PlatformCapability {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
PlatformCapability::Ec2 => "EC2",
PlatformCapability::Fargate => "FARGATE",
PlatformCapability::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["EC2", "FARGATE"]
}
}
impl AsRef<str> for PlatformCapability {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>An object representing the node properties of a multi-node parallel job.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct NodeProperties {
/// <p>The number of nodes associated with a multi-node parallel job.</p>
pub num_nodes: i32,
/// <p>Specifies the node index for the main node of a multi-node parallel job. This node index value must be fewer
/// than the number of nodes.</p>
pub main_node: i32,
/// <p>A list of node ranges and their properties associated with a multi-node parallel job.</p>
pub node_range_properties: std::option::Option<std::vec::Vec<crate::model::NodeRangeProperty>>,
}
impl NodeProperties {
/// <p>The number of nodes associated with a multi-node parallel job.</p>
pub fn num_nodes(&self) -> i32 {
self.num_nodes
}
/// <p>Specifies the node index for the main node of a multi-node parallel job. This node index value must be fewer
/// than the number of nodes.</p>
pub fn main_node(&self) -> i32 {
self.main_node
}
/// <p>A list of node ranges and their properties associated with a multi-node parallel job.</p>
pub fn node_range_properties(&self) -> std::option::Option<&[crate::model::NodeRangeProperty]> {
self.node_range_properties.as_deref()
}
}
impl std::fmt::Debug for NodeProperties {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("NodeProperties");
formatter.field("num_nodes", &self.num_nodes);
formatter.field("main_node", &self.main_node);
formatter.field("node_range_properties", &self.node_range_properties);
formatter.finish()
}
}
/// See [`NodeProperties`](crate::model::NodeProperties)
pub mod node_properties {
/// A builder for [`NodeProperties`](crate::model::NodeProperties)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) num_nodes: std::option::Option<i32>,
pub(crate) main_node: std::option::Option<i32>,
pub(crate) node_range_properties:
std::option::Option<std::vec::Vec<crate::model::NodeRangeProperty>>,
}
impl Builder {
/// <p>The number of nodes associated with a multi-node parallel job.</p>
pub fn num_nodes(mut self, input: i32) -> Self {
self.num_nodes = Some(input);
self
}
/// <p>The number of nodes associated with a multi-node parallel job.</p>
pub fn set_num_nodes(mut self, input: std::option::Option<i32>) -> Self {
self.num_nodes = input;
self
}
/// <p>Specifies the node index for the main node of a multi-node parallel job. This node index value must be fewer
/// than the number of nodes.</p>
pub fn main_node(mut self, input: i32) -> Self {
self.main_node = Some(input);
self
}
/// <p>Specifies the node index for the main node of a multi-node parallel job. This node index value must be fewer
/// than the number of nodes.</p>
pub fn set_main_node(mut self, input: std::option::Option<i32>) -> Self {
self.main_node = input;
self
}
/// Appends an item to `node_range_properties`.
///
/// To override the contents of this collection use [`set_node_range_properties`](Self::set_node_range_properties).
///
/// <p>A list of node ranges and their properties associated with a multi-node parallel job.</p>
pub fn node_range_properties(
mut self,
input: impl Into<crate::model::NodeRangeProperty>,
) -> Self {
let mut v = self.node_range_properties.unwrap_or_default();
v.push(input.into());
self.node_range_properties = Some(v);
self
}
/// <p>A list of node ranges and their properties associated with a multi-node parallel job.</p>
pub fn set_node_range_properties(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::NodeRangeProperty>>,
) -> Self {
self.node_range_properties = input;
self
}
/// Consumes the builder and constructs a [`NodeProperties`](crate::model::NodeProperties)
pub fn build(self) -> crate::model::NodeProperties {
crate::model::NodeProperties {
num_nodes: self.num_nodes.unwrap_or_default(),
main_node: self.main_node.unwrap_or_default(),
node_range_properties: self.node_range_properties,
}
}
}
}
impl NodeProperties {
/// Creates a new builder-style object to manufacture [`NodeProperties`](crate::model::NodeProperties)
pub fn builder() -> crate::model::node_properties::Builder {
crate::model::node_properties::Builder::default()
}
}
/// <p>An object representing the properties of the node range for a multi-node parallel job.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct NodeRangeProperty {
/// <p>The range of nodes, using node index values. A range of <code>0:3</code> indicates nodes with index values of
/// <code>0</code> through <code>3</code>. If the starting range value is omitted (<code>:n</code>), then <code>0</code>
/// is used to start the range. If the ending range value is omitted (<code>n:</code>), then the highest possible node
/// index is used to end the range. Your accumulative node ranges must account for all nodes (<code>0:n</code>). You can
/// nest node ranges, for example <code>0:10</code> and <code>4:5</code>, in which case the <code>4:5</code> range
/// properties override the <code>0:10</code> properties.</p>
pub target_nodes: std::option::Option<std::string::String>,
/// <p>The container details for the node range.</p>
pub container: std::option::Option<crate::model::ContainerProperties>,
}
impl NodeRangeProperty {
/// <p>The range of nodes, using node index values. A range of <code>0:3</code> indicates nodes with index values of
/// <code>0</code> through <code>3</code>. If the starting range value is omitted (<code>:n</code>), then <code>0</code>
/// is used to start the range. If the ending range value is omitted (<code>n:</code>), then the highest possible node
/// index is used to end the range. Your accumulative node ranges must account for all nodes (<code>0:n</code>). You can
/// nest node ranges, for example <code>0:10</code> and <code>4:5</code>, in which case the <code>4:5</code> range
/// properties override the <code>0:10</code> properties.</p>
pub fn target_nodes(&self) -> std::option::Option<&str> {
self.target_nodes.as_deref()
}
/// <p>The container details for the node range.</p>
pub fn container(&self) -> std::option::Option<&crate::model::ContainerProperties> {
self.container.as_ref()
}
}
impl std::fmt::Debug for NodeRangeProperty {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("NodeRangeProperty");
formatter.field("target_nodes", &self.target_nodes);
formatter.field("container", &self.container);
formatter.finish()
}
}
/// See [`NodeRangeProperty`](crate::model::NodeRangeProperty)
pub mod node_range_property {
/// A builder for [`NodeRangeProperty`](crate::model::NodeRangeProperty)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) target_nodes: std::option::Option<std::string::String>,
pub(crate) container: std::option::Option<crate::model::ContainerProperties>,
}
impl Builder {
/// <p>The range of nodes, using node index values. A range of <code>0:3</code> indicates nodes with index values of
/// <code>0</code> through <code>3</code>. If the starting range value is omitted (<code>:n</code>), then <code>0</code>
/// is used to start the range. If the ending range value is omitted (<code>n:</code>), then the highest possible node
/// index is used to end the range. Your accumulative node ranges must account for all nodes (<code>0:n</code>). You can
/// nest node ranges, for example <code>0:10</code> and <code>4:5</code>, in which case the <code>4:5</code> range
/// properties override the <code>0:10</code> properties.</p>
pub fn target_nodes(mut self, input: impl Into<std::string::String>) -> Self {
self.target_nodes = Some(input.into());
self
}
/// <p>The range of nodes, using node index values. A range of <code>0:3</code> indicates nodes with index values of
/// <code>0</code> through <code>3</code>. If the starting range value is omitted (<code>:n</code>), then <code>0</code>
/// is used to start the range. If the ending range value is omitted (<code>n:</code>), then the highest possible node
/// index is used to end the range. Your accumulative node ranges must account for all nodes (<code>0:n</code>). You can
/// nest node ranges, for example <code>0:10</code> and <code>4:5</code>, in which case the <code>4:5</code> range
/// properties override the <code>0:10</code> properties.</p>
pub fn set_target_nodes(mut self, input: std::option::Option<std::string::String>) -> Self {
self.target_nodes = input;
self
}
/// <p>The container details for the node range.</p>
pub fn container(mut self, input: crate::model::ContainerProperties) -> Self {
self.container = Some(input);
self
}
/// <p>The container details for the node range.</p>
pub fn set_container(
mut self,
input: std::option::Option<crate::model::ContainerProperties>,
) -> Self {
self.container = input;
self
}
/// Consumes the builder and constructs a [`NodeRangeProperty`](crate::model::NodeRangeProperty)
pub fn build(self) -> crate::model::NodeRangeProperty {
crate::model::NodeRangeProperty {
target_nodes: self.target_nodes,
container: self.container,
}
}
}
}
impl NodeRangeProperty {
/// Creates a new builder-style object to manufacture [`NodeRangeProperty`](crate::model::NodeRangeProperty)
pub fn builder() -> crate::model::node_range_property::Builder {
crate::model::node_range_property::Builder::default()
}
}
/// <p>Container properties are used in job definitions to describe the container that's launched as part of a
/// job.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ContainerProperties {
/// <p>The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker
/// Hub registry are available by default. Other repositories are specified with
/// <code>
/// <i>repository-url</i>/<i>image</i>:<i>tag</i>
/// </code>.
/// Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons,
/// periods, forward slashes, and number signs are allowed. This parameter maps to <code>Image</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of
/// the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>IMAGE</code> parameter of <a href="https://docs.docker.com/engine/reference/run/">docker
/// run</a>.</p>
/// <note>
/// <p>Docker image architecture must match the processor architecture of the compute resources that they're scheduled
/// on. For example, ARM-based Docker images can only run on ARM-based compute resources.</p>
/// </note>
/// <ul>
/// <li>
/// <p>Images in Amazon ECR repositories use the full registry and repository URI (for example,
/// <code>012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name></code>).</p>
/// </li>
/// <li>
/// <p>Images in official repositories on Docker Hub use a single name (for example, <code>ubuntu</code> or
/// <code>mongo</code>).</p>
/// </li>
/// <li>
/// <p>Images in other repositories on Docker Hub are qualified with an organization name (for example,
/// <code>amazon/amazon-ecs-agent</code>).</p>
/// </li>
/// <li>
/// <p>Images in other online repositories are qualified further by a domain name (for example,
/// <code>quay.io/assemblyline/ubuntu</code>).</p>
/// </li>
/// </ul>
pub image: std::option::Option<std::string::String>,
/// <p>This parameter is deprecated, use <code>resourceRequirements</code> to specify the vCPU requirements for the job
/// definition. It's not supported for jobs that run on Fargate resources. For jobs run on EC2 resources, it specifies
/// the number of vCPUs reserved for the job.</p>
///
/// <p>Each vCPU is equivalent to 1,024 CPU shares. This parameter maps to <code>CpuShares</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--cpu-shares</code> option to
/// <a href="https://docs.docker.com/engine/reference/run/">docker run</a>. The number of vCPUs must be specified but can be specified
/// in several places. You must specify it at least once for each node.</p>
pub vcpus: i32,
/// <p>This parameter is deprecated, use
/// <code>resourceRequirements</code> to specify the memory requirements for the job definition. It's not supported for
/// jobs that run on Fargate resources. For jobs run on EC2 resources, it specifies the memory hard
/// limit (in MiB) for a container. If your container attempts to exceed the specified number, it's terminated. You must
/// specify at least 4 MiB of memory for a job using this parameter. The memory hard limit can be specified in several
/// places. It must be specified for each node at least once.</p>
pub memory: i32,
/// <p>The command that's passed to the container. This parameter maps to <code>Cmd</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>COMMAND</code> parameter to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>. For more information, see <a href="https://docs.docker.com/engine/reference/builder/#cmd">https://docs.docker.com/engine/reference/builder/#cmd</a>.</p>
pub command: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The Amazon Resource Name (ARN) of the IAM role that the container can assume for Amazon Web Services permissions. For more information, see
/// <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html">IAM Roles for Tasks</a>
/// in the <i>Amazon Elastic Container Service Developer Guide</i>.</p>
pub job_role_arn: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (ARN) of the execution role that Batch can assume. For jobs that run on Fargate resources, you must
/// provide an execution role. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html">Batch execution IAM role</a> in the
/// <i>Batch User Guide</i>.</p>
pub execution_role_arn: std::option::Option<std::string::String>,
/// <p>A list of data volumes used in a job.</p>
pub volumes: std::option::Option<std::vec::Vec<crate::model::Volume>>,
/// <p>The environment variables to pass to a container. This parameter maps to <code>Env</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--env</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <important>
/// <p>We don't recommend using plaintext environment variables for sensitive information, such as credential
/// data.</p>
/// </important>
/// <note>
/// <p>Environment variables must not start with <code>AWS_BATCH</code>; this naming
/// convention is reserved for variables that are set by the Batch service.</p>
/// </note>
pub environment: std::option::Option<std::vec::Vec<crate::model::KeyValuePair>>,
/// <p>The mount points for data volumes in your container. This parameter maps to <code>Volumes</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--volume</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
pub mount_points: std::option::Option<std::vec::Vec<crate::model::MountPoint>>,
/// <p>When this parameter is true, the container is given read-only access to its root file system. This parameter
/// maps to <code>ReadonlyRootfs</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and
/// the <code>--read-only</code> option to <code>docker run</code>.</p>
pub readonly_root_filesystem: bool,
/// <p>When this parameter is true, the container is given elevated permissions on the host container instance (similar
/// to the <code>root</code> user). This parameter maps to <code>Privileged</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--privileged</code> option to
/// <a href="https://docs.docker.com/engine/reference/run/">docker run</a>. The default value is false.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided, or
/// specified as false.</p>
/// </note>
pub privileged: bool,
/// <p>A list of <code>ulimits</code> to set in the container. This parameter maps to <code>Ulimits</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--ulimit</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub ulimits: std::option::Option<std::vec::Vec<crate::model::Ulimit>>,
/// <p>The user name to use inside the container. This parameter maps to <code>User</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--user</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
pub user: std::option::Option<std::string::String>,
/// <p>The instance type to use for a multi-node parallel job. All node groups in a multi-node parallel job must use
/// the same instance type.</p>
/// <note>
/// <p>This parameter isn't applicable to single-node container jobs or jobs that run on Fargate resources, and
/// shouldn't be provided.</p>
/// </note>
pub instance_type: std::option::Option<std::string::String>,
/// <p>The type and amount of resources to assign to a container. The supported resources include <code>GPU</code>,
/// <code>MEMORY</code>, and <code>VCPU</code>.</p>
pub resource_requirements:
std::option::Option<std::vec::Vec<crate::model::ResourceRequirement>>,
/// <p>Linux-specific modifications that are applied to the container, such as details for device mappings.</p>
pub linux_parameters: std::option::Option<crate::model::LinuxParameters>,
/// <p>The log configuration specification for the container.</p>
/// <p>This parameter maps to <code>LogConfig</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the
/// <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--log-driver</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.
/// By default, containers use the same logging driver that the Docker daemon uses. However the container might use a
/// different logging driver than the Docker daemon by specifying a log driver with this parameter in the container
/// definition. To use a different logging driver for a container, the log system must be configured properly on the
/// container instance (or on a different log server for remote logging options). For more information on the options for
/// different supported log drivers, see <a href="https://docs.docker.com/engine/admin/logging/overview/">Configure
/// logging drivers</a> in the Docker documentation.</p>
/// <note>
/// <p>Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the <a>LogConfiguration</a> data type).</p>
/// </note>
/// <p>This parameter requires version 1.18 of the Docker Remote API or greater on your
/// container instance. To check the Docker Remote API version on your container instance, log into your
/// container instance and run the following command: <code>sudo docker version | grep "Server API version"</code>
/// </p>
/// <note>
/// <p>The Amazon ECS container agent running on a container instance must register the logging drivers available on that
/// instance with the <code>ECS_AVAILABLE_LOGGING_DRIVERS</code> environment variable before containers placed on that
/// instance can use these log configuration options. For more information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html">Amazon ECS Container Agent Configuration</a> in the
/// <i>Amazon Elastic Container Service Developer Guide</i>.</p>
/// </note>
pub log_configuration: std::option::Option<crate::model::LogConfiguration>,
/// <p>The secrets for the container. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html">Specifying sensitive data</a> in the
/// <i>Batch User Guide</i>.</p>
pub secrets: std::option::Option<std::vec::Vec<crate::model::Secret>>,
/// <p>The network configuration for jobs that are running on Fargate resources. Jobs that are running on EC2
/// resources must not specify this parameter.</p>
pub network_configuration: std::option::Option<crate::model::NetworkConfiguration>,
/// <p>The platform configuration for jobs that are running on Fargate resources. Jobs that are running on EC2
/// resources must not specify this parameter.</p>
pub fargate_platform_configuration:
std::option::Option<crate::model::FargatePlatformConfiguration>,
}
impl ContainerProperties {
/// <p>The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker
/// Hub registry are available by default. Other repositories are specified with
/// <code>
/// <i>repository-url</i>/<i>image</i>:<i>tag</i>
/// </code>.
/// Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons,
/// periods, forward slashes, and number signs are allowed. This parameter maps to <code>Image</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of
/// the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>IMAGE</code> parameter of <a href="https://docs.docker.com/engine/reference/run/">docker
/// run</a>.</p>
/// <note>
/// <p>Docker image architecture must match the processor architecture of the compute resources that they're scheduled
/// on. For example, ARM-based Docker images can only run on ARM-based compute resources.</p>
/// </note>
/// <ul>
/// <li>
/// <p>Images in Amazon ECR repositories use the full registry and repository URI (for example,
/// <code>012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name></code>).</p>
/// </li>
/// <li>
/// <p>Images in official repositories on Docker Hub use a single name (for example, <code>ubuntu</code> or
/// <code>mongo</code>).</p>
/// </li>
/// <li>
/// <p>Images in other repositories on Docker Hub are qualified with an organization name (for example,
/// <code>amazon/amazon-ecs-agent</code>).</p>
/// </li>
/// <li>
/// <p>Images in other online repositories are qualified further by a domain name (for example,
/// <code>quay.io/assemblyline/ubuntu</code>).</p>
/// </li>
/// </ul>
pub fn image(&self) -> std::option::Option<&str> {
self.image.as_deref()
}
/// <p>This parameter is deprecated, use <code>resourceRequirements</code> to specify the vCPU requirements for the job
/// definition. It's not supported for jobs that run on Fargate resources. For jobs run on EC2 resources, it specifies
/// the number of vCPUs reserved for the job.</p>
///
/// <p>Each vCPU is equivalent to 1,024 CPU shares. This parameter maps to <code>CpuShares</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--cpu-shares</code> option to
/// <a href="https://docs.docker.com/engine/reference/run/">docker run</a>. The number of vCPUs must be specified but can be specified
/// in several places. You must specify it at least once for each node.</p>
pub fn vcpus(&self) -> i32 {
self.vcpus
}
/// <p>This parameter is deprecated, use
/// <code>resourceRequirements</code> to specify the memory requirements for the job definition. It's not supported for
/// jobs that run on Fargate resources. For jobs run on EC2 resources, it specifies the memory hard
/// limit (in MiB) for a container. If your container attempts to exceed the specified number, it's terminated. You must
/// specify at least 4 MiB of memory for a job using this parameter. The memory hard limit can be specified in several
/// places. It must be specified for each node at least once.</p>
pub fn memory(&self) -> i32 {
self.memory
}
/// <p>The command that's passed to the container. This parameter maps to <code>Cmd</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>COMMAND</code> parameter to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>. For more information, see <a href="https://docs.docker.com/engine/reference/builder/#cmd">https://docs.docker.com/engine/reference/builder/#cmd</a>.</p>
pub fn command(&self) -> std::option::Option<&[std::string::String]> {
self.command.as_deref()
}
/// <p>The Amazon Resource Name (ARN) of the IAM role that the container can assume for Amazon Web Services permissions. For more information, see
/// <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html">IAM Roles for Tasks</a>
/// in the <i>Amazon Elastic Container Service Developer Guide</i>.</p>
pub fn job_role_arn(&self) -> std::option::Option<&str> {
self.job_role_arn.as_deref()
}
/// <p>The Amazon Resource Name (ARN) of the execution role that Batch can assume. For jobs that run on Fargate resources, you must
/// provide an execution role. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html">Batch execution IAM role</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn execution_role_arn(&self) -> std::option::Option<&str> {
self.execution_role_arn.as_deref()
}
/// <p>A list of data volumes used in a job.</p>
pub fn volumes(&self) -> std::option::Option<&[crate::model::Volume]> {
self.volumes.as_deref()
}
/// <p>The environment variables to pass to a container. This parameter maps to <code>Env</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--env</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <important>
/// <p>We don't recommend using plaintext environment variables for sensitive information, such as credential
/// data.</p>
/// </important>
/// <note>
/// <p>Environment variables must not start with <code>AWS_BATCH</code>; this naming
/// convention is reserved for variables that are set by the Batch service.</p>
/// </note>
pub fn environment(&self) -> std::option::Option<&[crate::model::KeyValuePair]> {
self.environment.as_deref()
}
/// <p>The mount points for data volumes in your container. This parameter maps to <code>Volumes</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--volume</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
pub fn mount_points(&self) -> std::option::Option<&[crate::model::MountPoint]> {
self.mount_points.as_deref()
}
/// <p>When this parameter is true, the container is given read-only access to its root file system. This parameter
/// maps to <code>ReadonlyRootfs</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and
/// the <code>--read-only</code> option to <code>docker run</code>.</p>
pub fn readonly_root_filesystem(&self) -> bool {
self.readonly_root_filesystem
}
/// <p>When this parameter is true, the container is given elevated permissions on the host container instance (similar
/// to the <code>root</code> user). This parameter maps to <code>Privileged</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--privileged</code> option to
/// <a href="https://docs.docker.com/engine/reference/run/">docker run</a>. The default value is false.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided, or
/// specified as false.</p>
/// </note>
pub fn privileged(&self) -> bool {
self.privileged
}
/// <p>A list of <code>ulimits</code> to set in the container. This parameter maps to <code>Ulimits</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--ulimit</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub fn ulimits(&self) -> std::option::Option<&[crate::model::Ulimit]> {
self.ulimits.as_deref()
}
/// <p>The user name to use inside the container. This parameter maps to <code>User</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--user</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
pub fn user(&self) -> std::option::Option<&str> {
self.user.as_deref()
}
/// <p>The instance type to use for a multi-node parallel job. All node groups in a multi-node parallel job must use
/// the same instance type.</p>
/// <note>
/// <p>This parameter isn't applicable to single-node container jobs or jobs that run on Fargate resources, and
/// shouldn't be provided.</p>
/// </note>
pub fn instance_type(&self) -> std::option::Option<&str> {
self.instance_type.as_deref()
}
/// <p>The type and amount of resources to assign to a container. The supported resources include <code>GPU</code>,
/// <code>MEMORY</code>, and <code>VCPU</code>.</p>
pub fn resource_requirements(
&self,
) -> std::option::Option<&[crate::model::ResourceRequirement]> {
self.resource_requirements.as_deref()
}
/// <p>Linux-specific modifications that are applied to the container, such as details for device mappings.</p>
pub fn linux_parameters(&self) -> std::option::Option<&crate::model::LinuxParameters> {
self.linux_parameters.as_ref()
}
/// <p>The log configuration specification for the container.</p>
/// <p>This parameter maps to <code>LogConfig</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the
/// <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--log-driver</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.
/// By default, containers use the same logging driver that the Docker daemon uses. However the container might use a
/// different logging driver than the Docker daemon by specifying a log driver with this parameter in the container
/// definition. To use a different logging driver for a container, the log system must be configured properly on the
/// container instance (or on a different log server for remote logging options). For more information on the options for
/// different supported log drivers, see <a href="https://docs.docker.com/engine/admin/logging/overview/">Configure
/// logging drivers</a> in the Docker documentation.</p>
/// <note>
/// <p>Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the <a>LogConfiguration</a> data type).</p>
/// </note>
/// <p>This parameter requires version 1.18 of the Docker Remote API or greater on your
/// container instance. To check the Docker Remote API version on your container instance, log into your
/// container instance and run the following command: <code>sudo docker version | grep "Server API version"</code>
/// </p>
/// <note>
/// <p>The Amazon ECS container agent running on a container instance must register the logging drivers available on that
/// instance with the <code>ECS_AVAILABLE_LOGGING_DRIVERS</code> environment variable before containers placed on that
/// instance can use these log configuration options. For more information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html">Amazon ECS Container Agent Configuration</a> in the
/// <i>Amazon Elastic Container Service Developer Guide</i>.</p>
/// </note>
pub fn log_configuration(&self) -> std::option::Option<&crate::model::LogConfiguration> {
self.log_configuration.as_ref()
}
/// <p>The secrets for the container. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html">Specifying sensitive data</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn secrets(&self) -> std::option::Option<&[crate::model::Secret]> {
self.secrets.as_deref()
}
/// <p>The network configuration for jobs that are running on Fargate resources. Jobs that are running on EC2
/// resources must not specify this parameter.</p>
pub fn network_configuration(
&self,
) -> std::option::Option<&crate::model::NetworkConfiguration> {
self.network_configuration.as_ref()
}
/// <p>The platform configuration for jobs that are running on Fargate resources. Jobs that are running on EC2
/// resources must not specify this parameter.</p>
pub fn fargate_platform_configuration(
&self,
) -> std::option::Option<&crate::model::FargatePlatformConfiguration> {
self.fargate_platform_configuration.as_ref()
}
}
impl std::fmt::Debug for ContainerProperties {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ContainerProperties");
formatter.field("image", &self.image);
formatter.field("vcpus", &self.vcpus);
formatter.field("memory", &self.memory);
formatter.field("command", &self.command);
formatter.field("job_role_arn", &self.job_role_arn);
formatter.field("execution_role_arn", &self.execution_role_arn);
formatter.field("volumes", &self.volumes);
formatter.field("environment", &self.environment);
formatter.field("mount_points", &self.mount_points);
formatter.field("readonly_root_filesystem", &self.readonly_root_filesystem);
formatter.field("privileged", &self.privileged);
formatter.field("ulimits", &self.ulimits);
formatter.field("user", &self.user);
formatter.field("instance_type", &self.instance_type);
formatter.field("resource_requirements", &self.resource_requirements);
formatter.field("linux_parameters", &self.linux_parameters);
formatter.field("log_configuration", &self.log_configuration);
formatter.field("secrets", &self.secrets);
formatter.field("network_configuration", &self.network_configuration);
formatter.field(
"fargate_platform_configuration",
&self.fargate_platform_configuration,
);
formatter.finish()
}
}
/// See [`ContainerProperties`](crate::model::ContainerProperties)
pub mod container_properties {
/// A builder for [`ContainerProperties`](crate::model::ContainerProperties)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) image: std::option::Option<std::string::String>,
pub(crate) vcpus: std::option::Option<i32>,
pub(crate) memory: std::option::Option<i32>,
pub(crate) command: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) job_role_arn: std::option::Option<std::string::String>,
pub(crate) execution_role_arn: std::option::Option<std::string::String>,
pub(crate) volumes: std::option::Option<std::vec::Vec<crate::model::Volume>>,
pub(crate) environment: std::option::Option<std::vec::Vec<crate::model::KeyValuePair>>,
pub(crate) mount_points: std::option::Option<std::vec::Vec<crate::model::MountPoint>>,
pub(crate) readonly_root_filesystem: std::option::Option<bool>,
pub(crate) privileged: std::option::Option<bool>,
pub(crate) ulimits: std::option::Option<std::vec::Vec<crate::model::Ulimit>>,
pub(crate) user: std::option::Option<std::string::String>,
pub(crate) instance_type: std::option::Option<std::string::String>,
pub(crate) resource_requirements:
std::option::Option<std::vec::Vec<crate::model::ResourceRequirement>>,
pub(crate) linux_parameters: std::option::Option<crate::model::LinuxParameters>,
pub(crate) log_configuration: std::option::Option<crate::model::LogConfiguration>,
pub(crate) secrets: std::option::Option<std::vec::Vec<crate::model::Secret>>,
pub(crate) network_configuration: std::option::Option<crate::model::NetworkConfiguration>,
pub(crate) fargate_platform_configuration:
std::option::Option<crate::model::FargatePlatformConfiguration>,
}
impl Builder {
/// <p>The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker
/// Hub registry are available by default. Other repositories are specified with
/// <code>
/// <i>repository-url</i>/<i>image</i>:<i>tag</i>
/// </code>.
/// Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons,
/// periods, forward slashes, and number signs are allowed. This parameter maps to <code>Image</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of
/// the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>IMAGE</code> parameter of <a href="https://docs.docker.com/engine/reference/run/">docker
/// run</a>.</p>
/// <note>
/// <p>Docker image architecture must match the processor architecture of the compute resources that they're scheduled
/// on. For example, ARM-based Docker images can only run on ARM-based compute resources.</p>
/// </note>
/// <ul>
/// <li>
/// <p>Images in Amazon ECR repositories use the full registry and repository URI (for example,
/// <code>012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name></code>).</p>
/// </li>
/// <li>
/// <p>Images in official repositories on Docker Hub use a single name (for example, <code>ubuntu</code> or
/// <code>mongo</code>).</p>
/// </li>
/// <li>
/// <p>Images in other repositories on Docker Hub are qualified with an organization name (for example,
/// <code>amazon/amazon-ecs-agent</code>).</p>
/// </li>
/// <li>
/// <p>Images in other online repositories are qualified further by a domain name (for example,
/// <code>quay.io/assemblyline/ubuntu</code>).</p>
/// </li>
/// </ul>
pub fn image(mut self, input: impl Into<std::string::String>) -> Self {
self.image = Some(input.into());
self
}
/// <p>The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker
/// Hub registry are available by default. Other repositories are specified with
/// <code>
/// <i>repository-url</i>/<i>image</i>:<i>tag</i>
/// </code>.
/// Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons,
/// periods, forward slashes, and number signs are allowed. This parameter maps to <code>Image</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of
/// the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>IMAGE</code> parameter of <a href="https://docs.docker.com/engine/reference/run/">docker
/// run</a>.</p>
/// <note>
/// <p>Docker image architecture must match the processor architecture of the compute resources that they're scheduled
/// on. For example, ARM-based Docker images can only run on ARM-based compute resources.</p>
/// </note>
/// <ul>
/// <li>
/// <p>Images in Amazon ECR repositories use the full registry and repository URI (for example,
/// <code>012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name></code>).</p>
/// </li>
/// <li>
/// <p>Images in official repositories on Docker Hub use a single name (for example, <code>ubuntu</code> or
/// <code>mongo</code>).</p>
/// </li>
/// <li>
/// <p>Images in other repositories on Docker Hub are qualified with an organization name (for example,
/// <code>amazon/amazon-ecs-agent</code>).</p>
/// </li>
/// <li>
/// <p>Images in other online repositories are qualified further by a domain name (for example,
/// <code>quay.io/assemblyline/ubuntu</code>).</p>
/// </li>
/// </ul>
pub fn set_image(mut self, input: std::option::Option<std::string::String>) -> Self {
self.image = input;
self
}
/// <p>This parameter is deprecated, use <code>resourceRequirements</code> to specify the vCPU requirements for the job
/// definition. It's not supported for jobs that run on Fargate resources. For jobs run on EC2 resources, it specifies
/// the number of vCPUs reserved for the job.</p>
///
/// <p>Each vCPU is equivalent to 1,024 CPU shares. This parameter maps to <code>CpuShares</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--cpu-shares</code> option to
/// <a href="https://docs.docker.com/engine/reference/run/">docker run</a>. The number of vCPUs must be specified but can be specified
/// in several places. You must specify it at least once for each node.</p>
pub fn vcpus(mut self, input: i32) -> Self {
self.vcpus = Some(input);
self
}
/// <p>This parameter is deprecated, use <code>resourceRequirements</code> to specify the vCPU requirements for the job
/// definition. It's not supported for jobs that run on Fargate resources. For jobs run on EC2 resources, it specifies
/// the number of vCPUs reserved for the job.</p>
///
/// <p>Each vCPU is equivalent to 1,024 CPU shares. This parameter maps to <code>CpuShares</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--cpu-shares</code> option to
/// <a href="https://docs.docker.com/engine/reference/run/">docker run</a>. The number of vCPUs must be specified but can be specified
/// in several places. You must specify it at least once for each node.</p>
pub fn set_vcpus(mut self, input: std::option::Option<i32>) -> Self {
self.vcpus = input;
self
}
/// <p>This parameter is deprecated, use
/// <code>resourceRequirements</code> to specify the memory requirements for the job definition. It's not supported for
/// jobs that run on Fargate resources. For jobs run on EC2 resources, it specifies the memory hard
/// limit (in MiB) for a container. If your container attempts to exceed the specified number, it's terminated. You must
/// specify at least 4 MiB of memory for a job using this parameter. The memory hard limit can be specified in several
/// places. It must be specified for each node at least once.</p>
pub fn memory(mut self, input: i32) -> Self {
self.memory = Some(input);
self
}
/// <p>This parameter is deprecated, use
/// <code>resourceRequirements</code> to specify the memory requirements for the job definition. It's not supported for
/// jobs that run on Fargate resources. For jobs run on EC2 resources, it specifies the memory hard
/// limit (in MiB) for a container. If your container attempts to exceed the specified number, it's terminated. You must
/// specify at least 4 MiB of memory for a job using this parameter. The memory hard limit can be specified in several
/// places. It must be specified for each node at least once.</p>
pub fn set_memory(mut self, input: std::option::Option<i32>) -> Self {
self.memory = input;
self
}
/// Appends an item to `command`.
///
/// To override the contents of this collection use [`set_command`](Self::set_command).
///
/// <p>The command that's passed to the container. This parameter maps to <code>Cmd</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>COMMAND</code> parameter to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>. For more information, see <a href="https://docs.docker.com/engine/reference/builder/#cmd">https://docs.docker.com/engine/reference/builder/#cmd</a>.</p>
pub fn command(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.command.unwrap_or_default();
v.push(input.into());
self.command = Some(v);
self
}
/// <p>The command that's passed to the container. This parameter maps to <code>Cmd</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>COMMAND</code> parameter to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>. For more information, see <a href="https://docs.docker.com/engine/reference/builder/#cmd">https://docs.docker.com/engine/reference/builder/#cmd</a>.</p>
pub fn set_command(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.command = input;
self
}
/// <p>The Amazon Resource Name (ARN) of the IAM role that the container can assume for Amazon Web Services permissions. For more information, see
/// <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html">IAM Roles for Tasks</a>
/// in the <i>Amazon Elastic Container Service Developer Guide</i>.</p>
pub fn job_role_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.job_role_arn = Some(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of the IAM role that the container can assume for Amazon Web Services permissions. For more information, see
/// <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html">IAM Roles for Tasks</a>
/// in the <i>Amazon Elastic Container Service Developer Guide</i>.</p>
pub fn set_job_role_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.job_role_arn = input;
self
}
/// <p>The Amazon Resource Name (ARN) of the execution role that Batch can assume. For jobs that run on Fargate resources, you must
/// provide an execution role. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html">Batch execution IAM role</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn execution_role_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.execution_role_arn = Some(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of the execution role that Batch can assume. For jobs that run on Fargate resources, you must
/// provide an execution role. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html">Batch execution IAM role</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn set_execution_role_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.execution_role_arn = input;
self
}
/// Appends an item to `volumes`.
///
/// To override the contents of this collection use [`set_volumes`](Self::set_volumes).
///
/// <p>A list of data volumes used in a job.</p>
pub fn volumes(mut self, input: impl Into<crate::model::Volume>) -> Self {
let mut v = self.volumes.unwrap_or_default();
v.push(input.into());
self.volumes = Some(v);
self
}
/// <p>A list of data volumes used in a job.</p>
pub fn set_volumes(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Volume>>,
) -> Self {
self.volumes = input;
self
}
/// Appends an item to `environment`.
///
/// To override the contents of this collection use [`set_environment`](Self::set_environment).
///
/// <p>The environment variables to pass to a container. This parameter maps to <code>Env</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--env</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <important>
/// <p>We don't recommend using plaintext environment variables for sensitive information, such as credential
/// data.</p>
/// </important>
/// <note>
/// <p>Environment variables must not start with <code>AWS_BATCH</code>; this naming
/// convention is reserved for variables that are set by the Batch service.</p>
/// </note>
pub fn environment(mut self, input: impl Into<crate::model::KeyValuePair>) -> Self {
let mut v = self.environment.unwrap_or_default();
v.push(input.into());
self.environment = Some(v);
self
}
/// <p>The environment variables to pass to a container. This parameter maps to <code>Env</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--env</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <important>
/// <p>We don't recommend using plaintext environment variables for sensitive information, such as credential
/// data.</p>
/// </important>
/// <note>
/// <p>Environment variables must not start with <code>AWS_BATCH</code>; this naming
/// convention is reserved for variables that are set by the Batch service.</p>
/// </note>
pub fn set_environment(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::KeyValuePair>>,
) -> Self {
self.environment = input;
self
}
/// Appends an item to `mount_points`.
///
/// To override the contents of this collection use [`set_mount_points`](Self::set_mount_points).
///
/// <p>The mount points for data volumes in your container. This parameter maps to <code>Volumes</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--volume</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
pub fn mount_points(mut self, input: impl Into<crate::model::MountPoint>) -> Self {
let mut v = self.mount_points.unwrap_or_default();
v.push(input.into());
self.mount_points = Some(v);
self
}
/// <p>The mount points for data volumes in your container. This parameter maps to <code>Volumes</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--volume</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
pub fn set_mount_points(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::MountPoint>>,
) -> Self {
self.mount_points = input;
self
}
/// <p>When this parameter is true, the container is given read-only access to its root file system. This parameter
/// maps to <code>ReadonlyRootfs</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and
/// the <code>--read-only</code> option to <code>docker run</code>.</p>
pub fn readonly_root_filesystem(mut self, input: bool) -> Self {
self.readonly_root_filesystem = Some(input);
self
}
/// <p>When this parameter is true, the container is given read-only access to its root file system. This parameter
/// maps to <code>ReadonlyRootfs</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and
/// the <code>--read-only</code> option to <code>docker run</code>.</p>
pub fn set_readonly_root_filesystem(mut self, input: std::option::Option<bool>) -> Self {
self.readonly_root_filesystem = input;
self
}
/// <p>When this parameter is true, the container is given elevated permissions on the host container instance (similar
/// to the <code>root</code> user). This parameter maps to <code>Privileged</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--privileged</code> option to
/// <a href="https://docs.docker.com/engine/reference/run/">docker run</a>. The default value is false.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided, or
/// specified as false.</p>
/// </note>
pub fn privileged(mut self, input: bool) -> Self {
self.privileged = Some(input);
self
}
/// <p>When this parameter is true, the container is given elevated permissions on the host container instance (similar
/// to the <code>root</code> user). This parameter maps to <code>Privileged</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--privileged</code> option to
/// <a href="https://docs.docker.com/engine/reference/run/">docker run</a>. The default value is false.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided, or
/// specified as false.</p>
/// </note>
pub fn set_privileged(mut self, input: std::option::Option<bool>) -> Self {
self.privileged = input;
self
}
/// Appends an item to `ulimits`.
///
/// To override the contents of this collection use [`set_ulimits`](Self::set_ulimits).
///
/// <p>A list of <code>ulimits</code> to set in the container. This parameter maps to <code>Ulimits</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--ulimit</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub fn ulimits(mut self, input: impl Into<crate::model::Ulimit>) -> Self {
let mut v = self.ulimits.unwrap_or_default();
v.push(input.into());
self.ulimits = Some(v);
self
}
/// <p>A list of <code>ulimits</code> to set in the container. This parameter maps to <code>Ulimits</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--ulimit</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub fn set_ulimits(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Ulimit>>,
) -> Self {
self.ulimits = input;
self
}
/// <p>The user name to use inside the container. This parameter maps to <code>User</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--user</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
pub fn user(mut self, input: impl Into<std::string::String>) -> Self {
self.user = Some(input.into());
self
}
/// <p>The user name to use inside the container. This parameter maps to <code>User</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--user</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
pub fn set_user(mut self, input: std::option::Option<std::string::String>) -> Self {
self.user = input;
self
}
/// <p>The instance type to use for a multi-node parallel job. All node groups in a multi-node parallel job must use
/// the same instance type.</p>
/// <note>
/// <p>This parameter isn't applicable to single-node container jobs or jobs that run on Fargate resources, and
/// shouldn't be provided.</p>
/// </note>
pub fn instance_type(mut self, input: impl Into<std::string::String>) -> Self {
self.instance_type = Some(input.into());
self
}
/// <p>The instance type to use for a multi-node parallel job. All node groups in a multi-node parallel job must use
/// the same instance type.</p>
/// <note>
/// <p>This parameter isn't applicable to single-node container jobs or jobs that run on Fargate resources, and
/// shouldn't be provided.</p>
/// </note>
pub fn set_instance_type(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.instance_type = input;
self
}
/// Appends an item to `resource_requirements`.
///
/// To override the contents of this collection use [`set_resource_requirements`](Self::set_resource_requirements).
///
/// <p>The type and amount of resources to assign to a container. The supported resources include <code>GPU</code>,
/// <code>MEMORY</code>, and <code>VCPU</code>.</p>
pub fn resource_requirements(
mut self,
input: impl Into<crate::model::ResourceRequirement>,
) -> Self {
let mut v = self.resource_requirements.unwrap_or_default();
v.push(input.into());
self.resource_requirements = Some(v);
self
}
/// <p>The type and amount of resources to assign to a container. The supported resources include <code>GPU</code>,
/// <code>MEMORY</code>, and <code>VCPU</code>.</p>
pub fn set_resource_requirements(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ResourceRequirement>>,
) -> Self {
self.resource_requirements = input;
self
}
/// <p>Linux-specific modifications that are applied to the container, such as details for device mappings.</p>
pub fn linux_parameters(mut self, input: crate::model::LinuxParameters) -> Self {
self.linux_parameters = Some(input);
self
}
/// <p>Linux-specific modifications that are applied to the container, such as details for device mappings.</p>
pub fn set_linux_parameters(
mut self,
input: std::option::Option<crate::model::LinuxParameters>,
) -> Self {
self.linux_parameters = input;
self
}
/// <p>The log configuration specification for the container.</p>
/// <p>This parameter maps to <code>LogConfig</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the
/// <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--log-driver</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.
/// By default, containers use the same logging driver that the Docker daemon uses. However the container might use a
/// different logging driver than the Docker daemon by specifying a log driver with this parameter in the container
/// definition. To use a different logging driver for a container, the log system must be configured properly on the
/// container instance (or on a different log server for remote logging options). For more information on the options for
/// different supported log drivers, see <a href="https://docs.docker.com/engine/admin/logging/overview/">Configure
/// logging drivers</a> in the Docker documentation.</p>
/// <note>
/// <p>Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the <a>LogConfiguration</a> data type).</p>
/// </note>
/// <p>This parameter requires version 1.18 of the Docker Remote API or greater on your
/// container instance. To check the Docker Remote API version on your container instance, log into your
/// container instance and run the following command: <code>sudo docker version | grep "Server API version"</code>
/// </p>
/// <note>
/// <p>The Amazon ECS container agent running on a container instance must register the logging drivers available on that
/// instance with the <code>ECS_AVAILABLE_LOGGING_DRIVERS</code> environment variable before containers placed on that
/// instance can use these log configuration options. For more information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html">Amazon ECS Container Agent Configuration</a> in the
/// <i>Amazon Elastic Container Service Developer Guide</i>.</p>
/// </note>
pub fn log_configuration(mut self, input: crate::model::LogConfiguration) -> Self {
self.log_configuration = Some(input);
self
}
/// <p>The log configuration specification for the container.</p>
/// <p>This parameter maps to <code>LogConfig</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the
/// <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--log-driver</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.
/// By default, containers use the same logging driver that the Docker daemon uses. However the container might use a
/// different logging driver than the Docker daemon by specifying a log driver with this parameter in the container
/// definition. To use a different logging driver for a container, the log system must be configured properly on the
/// container instance (or on a different log server for remote logging options). For more information on the options for
/// different supported log drivers, see <a href="https://docs.docker.com/engine/admin/logging/overview/">Configure
/// logging drivers</a> in the Docker documentation.</p>
/// <note>
/// <p>Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the <a>LogConfiguration</a> data type).</p>
/// </note>
/// <p>This parameter requires version 1.18 of the Docker Remote API or greater on your
/// container instance. To check the Docker Remote API version on your container instance, log into your
/// container instance and run the following command: <code>sudo docker version | grep "Server API version"</code>
/// </p>
/// <note>
/// <p>The Amazon ECS container agent running on a container instance must register the logging drivers available on that
/// instance with the <code>ECS_AVAILABLE_LOGGING_DRIVERS</code> environment variable before containers placed on that
/// instance can use these log configuration options. For more information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html">Amazon ECS Container Agent Configuration</a> in the
/// <i>Amazon Elastic Container Service Developer Guide</i>.</p>
/// </note>
pub fn set_log_configuration(
mut self,
input: std::option::Option<crate::model::LogConfiguration>,
) -> Self {
self.log_configuration = input;
self
}
/// Appends an item to `secrets`.
///
/// To override the contents of this collection use [`set_secrets`](Self::set_secrets).
///
/// <p>The secrets for the container. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html">Specifying sensitive data</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn secrets(mut self, input: impl Into<crate::model::Secret>) -> Self {
let mut v = self.secrets.unwrap_or_default();
v.push(input.into());
self.secrets = Some(v);
self
}
/// <p>The secrets for the container. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html">Specifying sensitive data</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn set_secrets(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Secret>>,
) -> Self {
self.secrets = input;
self
}
/// <p>The network configuration for jobs that are running on Fargate resources. Jobs that are running on EC2
/// resources must not specify this parameter.</p>
pub fn network_configuration(mut self, input: crate::model::NetworkConfiguration) -> Self {
self.network_configuration = Some(input);
self
}
/// <p>The network configuration for jobs that are running on Fargate resources. Jobs that are running on EC2
/// resources must not specify this parameter.</p>
pub fn set_network_configuration(
mut self,
input: std::option::Option<crate::model::NetworkConfiguration>,
) -> Self {
self.network_configuration = input;
self
}
/// <p>The platform configuration for jobs that are running on Fargate resources. Jobs that are running on EC2
/// resources must not specify this parameter.</p>
pub fn fargate_platform_configuration(
mut self,
input: crate::model::FargatePlatformConfiguration,
) -> Self {
self.fargate_platform_configuration = Some(input);
self
}
/// <p>The platform configuration for jobs that are running on Fargate resources. Jobs that are running on EC2
/// resources must not specify this parameter.</p>
pub fn set_fargate_platform_configuration(
mut self,
input: std::option::Option<crate::model::FargatePlatformConfiguration>,
) -> Self {
self.fargate_platform_configuration = input;
self
}
/// Consumes the builder and constructs a [`ContainerProperties`](crate::model::ContainerProperties)
pub fn build(self) -> crate::model::ContainerProperties {
crate::model::ContainerProperties {
image: self.image,
vcpus: self.vcpus.unwrap_or_default(),
memory: self.memory.unwrap_or_default(),
command: self.command,
job_role_arn: self.job_role_arn,
execution_role_arn: self.execution_role_arn,
volumes: self.volumes,
environment: self.environment,
mount_points: self.mount_points,
readonly_root_filesystem: self.readonly_root_filesystem.unwrap_or_default(),
privileged: self.privileged.unwrap_or_default(),
ulimits: self.ulimits,
user: self.user,
instance_type: self.instance_type,
resource_requirements: self.resource_requirements,
linux_parameters: self.linux_parameters,
log_configuration: self.log_configuration,
secrets: self.secrets,
network_configuration: self.network_configuration,
fargate_platform_configuration: self.fargate_platform_configuration,
}
}
}
}
impl ContainerProperties {
/// Creates a new builder-style object to manufacture [`ContainerProperties`](crate::model::ContainerProperties)
pub fn builder() -> crate::model::container_properties::Builder {
crate::model::container_properties::Builder::default()
}
}
/// <p>The platform configuration for jobs that are running on Fargate resources. Jobs that run on EC2 resources must
/// not specify this parameter.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct FargatePlatformConfiguration {
/// <p>The Fargate platform version where the jobs are running. A platform version is specified only for jobs
/// that are running on Fargate resources. If one isn't specified, the <code>LATEST</code> platform version is used by
/// default. This uses a recent, approved version of the Fargate platform for compute resources. For more
/// information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html">Fargate platform versions</a> in the <i>Amazon Elastic Container Service Developer Guide</i>.</p>
pub platform_version: std::option::Option<std::string::String>,
}
impl FargatePlatformConfiguration {
/// <p>The Fargate platform version where the jobs are running. A platform version is specified only for jobs
/// that are running on Fargate resources. If one isn't specified, the <code>LATEST</code> platform version is used by
/// default. This uses a recent, approved version of the Fargate platform for compute resources. For more
/// information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html">Fargate platform versions</a> in the <i>Amazon Elastic Container Service Developer Guide</i>.</p>
pub fn platform_version(&self) -> std::option::Option<&str> {
self.platform_version.as_deref()
}
}
impl std::fmt::Debug for FargatePlatformConfiguration {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("FargatePlatformConfiguration");
formatter.field("platform_version", &self.platform_version);
formatter.finish()
}
}
/// See [`FargatePlatformConfiguration`](crate::model::FargatePlatformConfiguration)
pub mod fargate_platform_configuration {
/// A builder for [`FargatePlatformConfiguration`](crate::model::FargatePlatformConfiguration)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) platform_version: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The Fargate platform version where the jobs are running. A platform version is specified only for jobs
/// that are running on Fargate resources. If one isn't specified, the <code>LATEST</code> platform version is used by
/// default. This uses a recent, approved version of the Fargate platform for compute resources. For more
/// information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html">Fargate platform versions</a> in the <i>Amazon Elastic Container Service Developer Guide</i>.</p>
pub fn platform_version(mut self, input: impl Into<std::string::String>) -> Self {
self.platform_version = Some(input.into());
self
}
/// <p>The Fargate platform version where the jobs are running. A platform version is specified only for jobs
/// that are running on Fargate resources. If one isn't specified, the <code>LATEST</code> platform version is used by
/// default. This uses a recent, approved version of the Fargate platform for compute resources. For more
/// information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html">Fargate platform versions</a> in the <i>Amazon Elastic Container Service Developer Guide</i>.</p>
pub fn set_platform_version(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.platform_version = input;
self
}
/// Consumes the builder and constructs a [`FargatePlatformConfiguration`](crate::model::FargatePlatformConfiguration)
pub fn build(self) -> crate::model::FargatePlatformConfiguration {
crate::model::FargatePlatformConfiguration {
platform_version: self.platform_version,
}
}
}
}
impl FargatePlatformConfiguration {
/// Creates a new builder-style object to manufacture [`FargatePlatformConfiguration`](crate::model::FargatePlatformConfiguration)
pub fn builder() -> crate::model::fargate_platform_configuration::Builder {
crate::model::fargate_platform_configuration::Builder::default()
}
}
/// <p>The network configuration for jobs that are running on Fargate resources. Jobs that are running on EC2
/// resources must not specify this parameter.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct NetworkConfiguration {
/// <p>Indicates whether the job should have a public IP address. For a job that is running on Fargate resources in a
/// private subnet to send outbound traffic to the internet (for example, to pull container images), the private subnet
/// requires a NAT gateway be attached to route requests to the internet. For more information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html">Amazon ECS task networking</a>. The
/// default value is "DISABLED".</p>
pub assign_public_ip: std::option::Option<crate::model::AssignPublicIp>,
}
impl NetworkConfiguration {
/// <p>Indicates whether the job should have a public IP address. For a job that is running on Fargate resources in a
/// private subnet to send outbound traffic to the internet (for example, to pull container images), the private subnet
/// requires a NAT gateway be attached to route requests to the internet. For more information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html">Amazon ECS task networking</a>. The
/// default value is "DISABLED".</p>
pub fn assign_public_ip(&self) -> std::option::Option<&crate::model::AssignPublicIp> {
self.assign_public_ip.as_ref()
}
}
impl std::fmt::Debug for NetworkConfiguration {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("NetworkConfiguration");
formatter.field("assign_public_ip", &self.assign_public_ip);
formatter.finish()
}
}
/// See [`NetworkConfiguration`](crate::model::NetworkConfiguration)
pub mod network_configuration {
/// A builder for [`NetworkConfiguration`](crate::model::NetworkConfiguration)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) assign_public_ip: std::option::Option<crate::model::AssignPublicIp>,
}
impl Builder {
/// <p>Indicates whether the job should have a public IP address. For a job that is running on Fargate resources in a
/// private subnet to send outbound traffic to the internet (for example, to pull container images), the private subnet
/// requires a NAT gateway be attached to route requests to the internet. For more information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html">Amazon ECS task networking</a>. The
/// default value is "DISABLED".</p>
pub fn assign_public_ip(mut self, input: crate::model::AssignPublicIp) -> Self {
self.assign_public_ip = Some(input);
self
}
/// <p>Indicates whether the job should have a public IP address. For a job that is running on Fargate resources in a
/// private subnet to send outbound traffic to the internet (for example, to pull container images), the private subnet
/// requires a NAT gateway be attached to route requests to the internet. For more information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html">Amazon ECS task networking</a>. The
/// default value is "DISABLED".</p>
pub fn set_assign_public_ip(
mut self,
input: std::option::Option<crate::model::AssignPublicIp>,
) -> Self {
self.assign_public_ip = input;
self
}
/// Consumes the builder and constructs a [`NetworkConfiguration`](crate::model::NetworkConfiguration)
pub fn build(self) -> crate::model::NetworkConfiguration {
crate::model::NetworkConfiguration {
assign_public_ip: self.assign_public_ip,
}
}
}
}
impl NetworkConfiguration {
/// Creates a new builder-style object to manufacture [`NetworkConfiguration`](crate::model::NetworkConfiguration)
pub fn builder() -> crate::model::network_configuration::Builder {
crate::model::network_configuration::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum AssignPublicIp {
#[allow(missing_docs)] // documentation missing in model
Disabled,
#[allow(missing_docs)] // documentation missing in model
Enabled,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for AssignPublicIp {
fn from(s: &str) -> Self {
match s {
"DISABLED" => AssignPublicIp::Disabled,
"ENABLED" => AssignPublicIp::Enabled,
other => AssignPublicIp::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for AssignPublicIp {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(AssignPublicIp::from(s))
}
}
impl AssignPublicIp {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
AssignPublicIp::Disabled => "DISABLED",
AssignPublicIp::Enabled => "ENABLED",
AssignPublicIp::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["DISABLED", "ENABLED"]
}
}
impl AsRef<str> for AssignPublicIp {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>An object representing the secret to expose to your container. Secrets can be exposed to a container in the
/// following ways:</p>
/// <ul>
/// <li>
/// <p>To inject sensitive data into your containers as environment variables, use the <code>secrets</code> container
/// definition parameter.</p>
/// </li>
/// <li>
/// <p>To reference sensitive information in the log configuration of a container, use the <code>secretOptions</code>
/// container definition parameter.</p>
/// </li>
/// </ul>
/// <p>For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html">Specifying
/// sensitive data</a> in the <i>Batch User Guide</i>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Secret {
/// <p>The name of the secret.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The secret to expose to the container. The supported values are either the full ARN of the Secrets Manager secret or the
/// full ARN of the parameter in the Amazon Web Services Systems Manager Parameter Store.</p>
/// <note>
/// <p>If the Amazon Web Services Systems Manager Parameter Store parameter exists in the same Region as the job you're launching, then you can use
/// either the full ARN or name of the parameter. If the parameter exists in a different Region, then the full ARN must
/// be specified.</p>
/// </note>
pub value_from: std::option::Option<std::string::String>,
}
impl Secret {
/// <p>The name of the secret.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>The secret to expose to the container. The supported values are either the full ARN of the Secrets Manager secret or the
/// full ARN of the parameter in the Amazon Web Services Systems Manager Parameter Store.</p>
/// <note>
/// <p>If the Amazon Web Services Systems Manager Parameter Store parameter exists in the same Region as the job you're launching, then you can use
/// either the full ARN or name of the parameter. If the parameter exists in a different Region, then the full ARN must
/// be specified.</p>
/// </note>
pub fn value_from(&self) -> std::option::Option<&str> {
self.value_from.as_deref()
}
}
impl std::fmt::Debug for Secret {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Secret");
formatter.field("name", &self.name);
formatter.field("value_from", &self.value_from);
formatter.finish()
}
}
/// See [`Secret`](crate::model::Secret)
pub mod secret {
/// A builder for [`Secret`](crate::model::Secret)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) value_from: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name of the secret.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>The name of the secret.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>The secret to expose to the container. The supported values are either the full ARN of the Secrets Manager secret or the
/// full ARN of the parameter in the Amazon Web Services Systems Manager Parameter Store.</p>
/// <note>
/// <p>If the Amazon Web Services Systems Manager Parameter Store parameter exists in the same Region as the job you're launching, then you can use
/// either the full ARN or name of the parameter. If the parameter exists in a different Region, then the full ARN must
/// be specified.</p>
/// </note>
pub fn value_from(mut self, input: impl Into<std::string::String>) -> Self {
self.value_from = Some(input.into());
self
}
/// <p>The secret to expose to the container. The supported values are either the full ARN of the Secrets Manager secret or the
/// full ARN of the parameter in the Amazon Web Services Systems Manager Parameter Store.</p>
/// <note>
/// <p>If the Amazon Web Services Systems Manager Parameter Store parameter exists in the same Region as the job you're launching, then you can use
/// either the full ARN or name of the parameter. If the parameter exists in a different Region, then the full ARN must
/// be specified.</p>
/// </note>
pub fn set_value_from(mut self, input: std::option::Option<std::string::String>) -> Self {
self.value_from = input;
self
}
/// Consumes the builder and constructs a [`Secret`](crate::model::Secret)
pub fn build(self) -> crate::model::Secret {
crate::model::Secret {
name: self.name,
value_from: self.value_from,
}
}
}
}
impl Secret {
/// Creates a new builder-style object to manufacture [`Secret`](crate::model::Secret)
pub fn builder() -> crate::model::secret::Builder {
crate::model::secret::Builder::default()
}
}
/// <p>Log configuration options to send to a custom log driver for the container.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct LogConfiguration {
/// <p>The log driver to use for the container. The valid values listed for this parameter are log drivers that the
/// Amazon ECS container agent can communicate with by default.</p>
/// <p>The supported log drivers are <code>awslogs</code>, <code>fluentd</code>, <code>gelf</code>,
/// <code>json-file</code>, <code>journald</code>, <code>logentries</code>, <code>syslog</code>, and
/// <code>splunk</code>.</p>
/// <note>
/// <p>Jobs that are running on Fargate resources are restricted to the <code>awslogs</code> and <code>splunk</code>
/// log drivers.</p>
/// </note>
/// <dl>
/// <dt>awslogs</dt>
/// <dd>
/// <p>Specifies the Amazon CloudWatch Logs logging driver. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/using_awslogs.html">Using the awslogs Log Driver</a> in the
/// <i>Batch User Guide</i> and <a href="https://docs.docker.com/config/containers/logging/awslogs/">Amazon CloudWatch Logs logging driver</a> in the Docker documentation.</p>
/// </dd>
/// <dt>fluentd</dt>
/// <dd>
/// <p>Specifies the Fluentd logging driver. For more information, including usage and options, see <a href="https://docs.docker.com/config/containers/logging/fluentd/">Fluentd logging driver</a> in the Docker
/// documentation.</p>
/// </dd>
/// <dt>gelf</dt>
/// <dd>
/// <p>Specifies the Graylog Extended Format (GELF) logging driver. For more information, including usage and
/// options, see <a href="https://docs.docker.com/config/containers/logging/gelf/">Graylog Extended Format logging
/// driver</a> in the Docker documentation.</p>
/// </dd>
/// <dt>journald</dt>
/// <dd>
/// <p>Specifies the journald logging driver. For more information, including usage and options, see <a href="https://docs.docker.com/config/containers/logging/journald/">Journald logging driver</a> in the Docker
/// documentation.</p>
/// </dd>
/// <dt>json-file</dt>
/// <dd>
/// <p>Specifies the JSON file logging driver. For more information, including usage and options, see <a href="https://docs.docker.com/config/containers/logging/json-file/">JSON File logging driver</a> in the Docker
/// documentation.</p>
/// </dd>
/// <dt>splunk</dt>
/// <dd>
/// <p>Specifies the Splunk logging driver. For more information, including usage and options, see <a href="https://docs.docker.com/config/containers/logging/splunk/">Splunk logging driver</a> in the Docker
/// documentation.</p>
/// </dd>
/// <dt>syslog</dt>
/// <dd>
/// <p>Specifies the syslog logging driver. For more information, including usage and options, see <a href="https://docs.docker.com/config/containers/logging/syslog/">Syslog logging driver</a> in the Docker
/// documentation.</p>
/// </dd>
/// </dl>
/// <note>
/// <p>If you have a custom driver that's not listed earlier that you want to work with the Amazon ECS container agent, you
/// can fork the Amazon ECS container agent project that's <a href="https://github.com/aws/amazon-ecs-agent">available on
/// GitHub</a> and customize it to work with that driver. We encourage you to submit pull requests for changes that
/// you want to have included. However, Amazon Web Services doesn't currently support running modified copies of this
/// software.</p>
/// </note>
/// <p>This parameter requires version 1.18 of the Docker Remote API or greater on your
/// container instance. To check the Docker Remote API version on your container instance, log into your
/// container instance and run the following command: <code>sudo docker version | grep "Server API version"</code>
/// </p>
pub log_driver: std::option::Option<crate::model::LogDriver>,
/// <p>The configuration options to send to the log driver. This parameter requires version 1.19 of the Docker Remote API or greater on your
/// container instance. To check the Docker Remote API version on your container instance, log into your
/// container instance and run the following command: <code>sudo docker version | grep "Server API version"</code>
/// </p>
pub options:
std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>,
/// <p>The secrets to pass to the log configuration. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html">Specifying Sensitive Data</a> in the
/// <i>Batch User Guide</i>.</p>
pub secret_options: std::option::Option<std::vec::Vec<crate::model::Secret>>,
}
impl LogConfiguration {
/// <p>The log driver to use for the container. The valid values listed for this parameter are log drivers that the
/// Amazon ECS container agent can communicate with by default.</p>
/// <p>The supported log drivers are <code>awslogs</code>, <code>fluentd</code>, <code>gelf</code>,
/// <code>json-file</code>, <code>journald</code>, <code>logentries</code>, <code>syslog</code>, and
/// <code>splunk</code>.</p>
/// <note>
/// <p>Jobs that are running on Fargate resources are restricted to the <code>awslogs</code> and <code>splunk</code>
/// log drivers.</p>
/// </note>
/// <dl>
/// <dt>awslogs</dt>
/// <dd>
/// <p>Specifies the Amazon CloudWatch Logs logging driver. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/using_awslogs.html">Using the awslogs Log Driver</a> in the
/// <i>Batch User Guide</i> and <a href="https://docs.docker.com/config/containers/logging/awslogs/">Amazon CloudWatch Logs logging driver</a> in the Docker documentation.</p>
/// </dd>
/// <dt>fluentd</dt>
/// <dd>
/// <p>Specifies the Fluentd logging driver. For more information, including usage and options, see <a href="https://docs.docker.com/config/containers/logging/fluentd/">Fluentd logging driver</a> in the Docker
/// documentation.</p>
/// </dd>
/// <dt>gelf</dt>
/// <dd>
/// <p>Specifies the Graylog Extended Format (GELF) logging driver. For more information, including usage and
/// options, see <a href="https://docs.docker.com/config/containers/logging/gelf/">Graylog Extended Format logging
/// driver</a> in the Docker documentation.</p>
/// </dd>
/// <dt>journald</dt>
/// <dd>
/// <p>Specifies the journald logging driver. For more information, including usage and options, see <a href="https://docs.docker.com/config/containers/logging/journald/">Journald logging driver</a> in the Docker
/// documentation.</p>
/// </dd>
/// <dt>json-file</dt>
/// <dd>
/// <p>Specifies the JSON file logging driver. For more information, including usage and options, see <a href="https://docs.docker.com/config/containers/logging/json-file/">JSON File logging driver</a> in the Docker
/// documentation.</p>
/// </dd>
/// <dt>splunk</dt>
/// <dd>
/// <p>Specifies the Splunk logging driver. For more information, including usage and options, see <a href="https://docs.docker.com/config/containers/logging/splunk/">Splunk logging driver</a> in the Docker
/// documentation.</p>
/// </dd>
/// <dt>syslog</dt>
/// <dd>
/// <p>Specifies the syslog logging driver. For more information, including usage and options, see <a href="https://docs.docker.com/config/containers/logging/syslog/">Syslog logging driver</a> in the Docker
/// documentation.</p>
/// </dd>
/// </dl>
/// <note>
/// <p>If you have a custom driver that's not listed earlier that you want to work with the Amazon ECS container agent, you
/// can fork the Amazon ECS container agent project that's <a href="https://github.com/aws/amazon-ecs-agent">available on
/// GitHub</a> and customize it to work with that driver. We encourage you to submit pull requests for changes that
/// you want to have included. However, Amazon Web Services doesn't currently support running modified copies of this
/// software.</p>
/// </note>
/// <p>This parameter requires version 1.18 of the Docker Remote API or greater on your
/// container instance. To check the Docker Remote API version on your container instance, log into your
/// container instance and run the following command: <code>sudo docker version | grep "Server API version"</code>
/// </p>
pub fn log_driver(&self) -> std::option::Option<&crate::model::LogDriver> {
self.log_driver.as_ref()
}
/// <p>The configuration options to send to the log driver. This parameter requires version 1.19 of the Docker Remote API or greater on your
/// container instance. To check the Docker Remote API version on your container instance, log into your
/// container instance and run the following command: <code>sudo docker version | grep "Server API version"</code>
/// </p>
pub fn options(
&self,
) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>>
{
self.options.as_ref()
}
/// <p>The secrets to pass to the log configuration. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html">Specifying Sensitive Data</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn secret_options(&self) -> std::option::Option<&[crate::model::Secret]> {
self.secret_options.as_deref()
}
}
impl std::fmt::Debug for LogConfiguration {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("LogConfiguration");
formatter.field("log_driver", &self.log_driver);
formatter.field("options", &self.options);
formatter.field("secret_options", &self.secret_options);
formatter.finish()
}
}
/// See [`LogConfiguration`](crate::model::LogConfiguration)
pub mod log_configuration {
/// A builder for [`LogConfiguration`](crate::model::LogConfiguration)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) log_driver: std::option::Option<crate::model::LogDriver>,
pub(crate) options: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
pub(crate) secret_options: std::option::Option<std::vec::Vec<crate::model::Secret>>,
}
impl Builder {
/// <p>The log driver to use for the container. The valid values listed for this parameter are log drivers that the
/// Amazon ECS container agent can communicate with by default.</p>
/// <p>The supported log drivers are <code>awslogs</code>, <code>fluentd</code>, <code>gelf</code>,
/// <code>json-file</code>, <code>journald</code>, <code>logentries</code>, <code>syslog</code>, and
/// <code>splunk</code>.</p>
/// <note>
/// <p>Jobs that are running on Fargate resources are restricted to the <code>awslogs</code> and <code>splunk</code>
/// log drivers.</p>
/// </note>
/// <dl>
/// <dt>awslogs</dt>
/// <dd>
/// <p>Specifies the Amazon CloudWatch Logs logging driver. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/using_awslogs.html">Using the awslogs Log Driver</a> in the
/// <i>Batch User Guide</i> and <a href="https://docs.docker.com/config/containers/logging/awslogs/">Amazon CloudWatch Logs logging driver</a> in the Docker documentation.</p>
/// </dd>
/// <dt>fluentd</dt>
/// <dd>
/// <p>Specifies the Fluentd logging driver. For more information, including usage and options, see <a href="https://docs.docker.com/config/containers/logging/fluentd/">Fluentd logging driver</a> in the Docker
/// documentation.</p>
/// </dd>
/// <dt>gelf</dt>
/// <dd>
/// <p>Specifies the Graylog Extended Format (GELF) logging driver. For more information, including usage and
/// options, see <a href="https://docs.docker.com/config/containers/logging/gelf/">Graylog Extended Format logging
/// driver</a> in the Docker documentation.</p>
/// </dd>
/// <dt>journald</dt>
/// <dd>
/// <p>Specifies the journald logging driver. For more information, including usage and options, see <a href="https://docs.docker.com/config/containers/logging/journald/">Journald logging driver</a> in the Docker
/// documentation.</p>
/// </dd>
/// <dt>json-file</dt>
/// <dd>
/// <p>Specifies the JSON file logging driver. For more information, including usage and options, see <a href="https://docs.docker.com/config/containers/logging/json-file/">JSON File logging driver</a> in the Docker
/// documentation.</p>
/// </dd>
/// <dt>splunk</dt>
/// <dd>
/// <p>Specifies the Splunk logging driver. For more information, including usage and options, see <a href="https://docs.docker.com/config/containers/logging/splunk/">Splunk logging driver</a> in the Docker
/// documentation.</p>
/// </dd>
/// <dt>syslog</dt>
/// <dd>
/// <p>Specifies the syslog logging driver. For more information, including usage and options, see <a href="https://docs.docker.com/config/containers/logging/syslog/">Syslog logging driver</a> in the Docker
/// documentation.</p>
/// </dd>
/// </dl>
/// <note>
/// <p>If you have a custom driver that's not listed earlier that you want to work with the Amazon ECS container agent, you
/// can fork the Amazon ECS container agent project that's <a href="https://github.com/aws/amazon-ecs-agent">available on
/// GitHub</a> and customize it to work with that driver. We encourage you to submit pull requests for changes that
/// you want to have included. However, Amazon Web Services doesn't currently support running modified copies of this
/// software.</p>
/// </note>
/// <p>This parameter requires version 1.18 of the Docker Remote API or greater on your
/// container instance. To check the Docker Remote API version on your container instance, log into your
/// container instance and run the following command: <code>sudo docker version | grep "Server API version"</code>
/// </p>
pub fn log_driver(mut self, input: crate::model::LogDriver) -> Self {
self.log_driver = Some(input);
self
}
/// <p>The log driver to use for the container. The valid values listed for this parameter are log drivers that the
/// Amazon ECS container agent can communicate with by default.</p>
/// <p>The supported log drivers are <code>awslogs</code>, <code>fluentd</code>, <code>gelf</code>,
/// <code>json-file</code>, <code>journald</code>, <code>logentries</code>, <code>syslog</code>, and
/// <code>splunk</code>.</p>
/// <note>
/// <p>Jobs that are running on Fargate resources are restricted to the <code>awslogs</code> and <code>splunk</code>
/// log drivers.</p>
/// </note>
/// <dl>
/// <dt>awslogs</dt>
/// <dd>
/// <p>Specifies the Amazon CloudWatch Logs logging driver. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/using_awslogs.html">Using the awslogs Log Driver</a> in the
/// <i>Batch User Guide</i> and <a href="https://docs.docker.com/config/containers/logging/awslogs/">Amazon CloudWatch Logs logging driver</a> in the Docker documentation.</p>
/// </dd>
/// <dt>fluentd</dt>
/// <dd>
/// <p>Specifies the Fluentd logging driver. For more information, including usage and options, see <a href="https://docs.docker.com/config/containers/logging/fluentd/">Fluentd logging driver</a> in the Docker
/// documentation.</p>
/// </dd>
/// <dt>gelf</dt>
/// <dd>
/// <p>Specifies the Graylog Extended Format (GELF) logging driver. For more information, including usage and
/// options, see <a href="https://docs.docker.com/config/containers/logging/gelf/">Graylog Extended Format logging
/// driver</a> in the Docker documentation.</p>
/// </dd>
/// <dt>journald</dt>
/// <dd>
/// <p>Specifies the journald logging driver. For more information, including usage and options, see <a href="https://docs.docker.com/config/containers/logging/journald/">Journald logging driver</a> in the Docker
/// documentation.</p>
/// </dd>
/// <dt>json-file</dt>
/// <dd>
/// <p>Specifies the JSON file logging driver. For more information, including usage and options, see <a href="https://docs.docker.com/config/containers/logging/json-file/">JSON File logging driver</a> in the Docker
/// documentation.</p>
/// </dd>
/// <dt>splunk</dt>
/// <dd>
/// <p>Specifies the Splunk logging driver. For more information, including usage and options, see <a href="https://docs.docker.com/config/containers/logging/splunk/">Splunk logging driver</a> in the Docker
/// documentation.</p>
/// </dd>
/// <dt>syslog</dt>
/// <dd>
/// <p>Specifies the syslog logging driver. For more information, including usage and options, see <a href="https://docs.docker.com/config/containers/logging/syslog/">Syslog logging driver</a> in the Docker
/// documentation.</p>
/// </dd>
/// </dl>
/// <note>
/// <p>If you have a custom driver that's not listed earlier that you want to work with the Amazon ECS container agent, you
/// can fork the Amazon ECS container agent project that's <a href="https://github.com/aws/amazon-ecs-agent">available on
/// GitHub</a> and customize it to work with that driver. We encourage you to submit pull requests for changes that
/// you want to have included. However, Amazon Web Services doesn't currently support running modified copies of this
/// software.</p>
/// </note>
/// <p>This parameter requires version 1.18 of the Docker Remote API or greater on your
/// container instance. To check the Docker Remote API version on your container instance, log into your
/// container instance and run the following command: <code>sudo docker version | grep "Server API version"</code>
/// </p>
pub fn set_log_driver(
mut self,
input: std::option::Option<crate::model::LogDriver>,
) -> Self {
self.log_driver = input;
self
}
/// Adds a key-value pair to `options`.
///
/// To override the contents of this collection use [`set_options`](Self::set_options).
///
/// <p>The configuration options to send to the log driver. This parameter requires version 1.19 of the Docker Remote API or greater on your
/// container instance. To check the Docker Remote API version on your container instance, log into your
/// container instance and run the following command: <code>sudo docker version | grep "Server API version"</code>
/// </p>
pub fn options(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
let mut hash_map = self.options.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.options = Some(hash_map);
self
}
/// <p>The configuration options to send to the log driver. This parameter requires version 1.19 of the Docker Remote API or greater on your
/// container instance. To check the Docker Remote API version on your container instance, log into your
/// container instance and run the following command: <code>sudo docker version | grep "Server API version"</code>
/// </p>
pub fn set_options(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.options = input;
self
}
/// Appends an item to `secret_options`.
///
/// To override the contents of this collection use [`set_secret_options`](Self::set_secret_options).
///
/// <p>The secrets to pass to the log configuration. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html">Specifying Sensitive Data</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn secret_options(mut self, input: impl Into<crate::model::Secret>) -> Self {
let mut v = self.secret_options.unwrap_or_default();
v.push(input.into());
self.secret_options = Some(v);
self
}
/// <p>The secrets to pass to the log configuration. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html">Specifying Sensitive Data</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn set_secret_options(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Secret>>,
) -> Self {
self.secret_options = input;
self
}
/// Consumes the builder and constructs a [`LogConfiguration`](crate::model::LogConfiguration)
pub fn build(self) -> crate::model::LogConfiguration {
crate::model::LogConfiguration {
log_driver: self.log_driver,
options: self.options,
secret_options: self.secret_options,
}
}
}
}
impl LogConfiguration {
/// Creates a new builder-style object to manufacture [`LogConfiguration`](crate::model::LogConfiguration)
pub fn builder() -> crate::model::log_configuration::Builder {
crate::model::log_configuration::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum LogDriver {
#[allow(missing_docs)] // documentation missing in model
Awslogs,
#[allow(missing_docs)] // documentation missing in model
Fluentd,
#[allow(missing_docs)] // documentation missing in model
Gelf,
#[allow(missing_docs)] // documentation missing in model
Journald,
#[allow(missing_docs)] // documentation missing in model
JsonFile,
#[allow(missing_docs)] // documentation missing in model
Splunk,
#[allow(missing_docs)] // documentation missing in model
Syslog,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for LogDriver {
fn from(s: &str) -> Self {
match s {
"awslogs" => LogDriver::Awslogs,
"fluentd" => LogDriver::Fluentd,
"gelf" => LogDriver::Gelf,
"journald" => LogDriver::Journald,
"json-file" => LogDriver::JsonFile,
"splunk" => LogDriver::Splunk,
"syslog" => LogDriver::Syslog,
other => LogDriver::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for LogDriver {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(LogDriver::from(s))
}
}
impl LogDriver {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
LogDriver::Awslogs => "awslogs",
LogDriver::Fluentd => "fluentd",
LogDriver::Gelf => "gelf",
LogDriver::Journald => "journald",
LogDriver::JsonFile => "json-file",
LogDriver::Splunk => "splunk",
LogDriver::Syslog => "syslog",
LogDriver::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&[
"awslogs",
"fluentd",
"gelf",
"journald",
"json-file",
"splunk",
"syslog",
]
}
}
impl AsRef<str> for LogDriver {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>Linux-specific modifications that are applied to the container, such as details for device mappings.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct LinuxParameters {
/// <p>Any host devices to expose to the container. This parameter maps to <code>Devices</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--device</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub devices: std::option::Option<std::vec::Vec<crate::model::Device>>,
/// <p>If true, run an <code>init</code> process inside the container that forwards signals and reaps processes. This
/// parameter maps to the <code>--init</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.
/// This parameter requires version 1.25 of the Docker Remote API or greater on your
/// container instance. To check the Docker Remote API version on your container instance, log into your
/// container instance and run the following command: <code>sudo docker version | grep "Server API version"</code>
/// </p>
pub init_process_enabled: bool,
/// <p>The value for the size (in MiB) of the <code>/dev/shm</code> volume. This parameter maps to the
/// <code>--shm-size</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub shared_memory_size: i32,
/// <p>The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the
/// <code>--tmpfs</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub tmpfs: std::option::Option<std::vec::Vec<crate::model::Tmpfs>>,
/// <p>The total amount of swap memory (in MiB) a container can use. This parameter is translated to the
/// <code>--memory-swap</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a> where the value is the
/// sum of the container memory plus the <code>maxSwap</code> value. For more information, see <a href="https://docs.docker.com/config/containers/resource_constraints/#--memory-swap-details">
/// <code>--memory-swap</code> details</a> in the Docker documentation.</p>
/// <p>If a <code>maxSwap</code> value of <code>0</code> is specified, the container doesn't use swap. Accepted values
/// are <code>0</code> or any positive integer. If the <code>maxSwap</code> parameter is omitted, the container doesn't
/// use the swap configuration for the container instance it is running on. A <code>maxSwap</code> value must be set for
/// the <code>swappiness</code> parameter to be used.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub max_swap: i32,
/// <p>This allows you to tune a container's memory swappiness behavior. A <code>swappiness</code> value of
/// <code>0</code> causes swapping not to happen unless absolutely necessary. A <code>swappiness</code> value of
/// <code>100</code> causes pages to be swapped very aggressively. Accepted values are whole numbers between
/// <code>0</code> and <code>100</code>. If the <code>swappiness</code> parameter isn't specified, a default value of
/// <code>60</code> is used. If a value isn't specified for <code>maxSwap</code>, then this parameter is ignored. If
/// <code>maxSwap</code> is set to 0, the container doesn't use swap. This parameter maps to the
/// <code>--memory-swappiness</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <p>Consider the following when you use a per-container swap configuration.</p>
/// <ul>
/// <li>
/// <p>Swap space must be enabled and allocated on the container instance for the containers to use.</p>
/// <note>
/// <p>The Amazon ECS optimized AMIs don't have swap enabled by default. You must enable swap on the instance to use this
/// feature. For more information, see <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-store-swap-volumes.html">Instance Store Swap Volumes</a> in the
/// <i>Amazon EC2 User Guide for Linux Instances</i> or <a href="http://aws.amazon.com/premiumsupport/knowledge-center/ec2-memory-swap-file/">How do I allocate memory to work as swap space in an
/// Amazon EC2 instance by using a swap file?</a>
/// </p>
/// </note>
/// </li>
/// <li>
/// <p>The swap space parameters are only supported for job definitions using EC2 resources.</p>
/// </li>
/// <li>
/// <p>If the <code>maxSwap</code> and <code>swappiness</code> parameters are omitted from a job definition, each
/// container will have a default <code>swappiness</code> value of 60, and the total swap usage will be limited to two
/// times the memory reservation of the container.</p>
/// </li>
/// </ul>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub swappiness: i32,
}
impl LinuxParameters {
/// <p>Any host devices to expose to the container. This parameter maps to <code>Devices</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--device</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub fn devices(&self) -> std::option::Option<&[crate::model::Device]> {
self.devices.as_deref()
}
/// <p>If true, run an <code>init</code> process inside the container that forwards signals and reaps processes. This
/// parameter maps to the <code>--init</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.
/// This parameter requires version 1.25 of the Docker Remote API or greater on your
/// container instance. To check the Docker Remote API version on your container instance, log into your
/// container instance and run the following command: <code>sudo docker version | grep "Server API version"</code>
/// </p>
pub fn init_process_enabled(&self) -> bool {
self.init_process_enabled
}
/// <p>The value for the size (in MiB) of the <code>/dev/shm</code> volume. This parameter maps to the
/// <code>--shm-size</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub fn shared_memory_size(&self) -> i32 {
self.shared_memory_size
}
/// <p>The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the
/// <code>--tmpfs</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub fn tmpfs(&self) -> std::option::Option<&[crate::model::Tmpfs]> {
self.tmpfs.as_deref()
}
/// <p>The total amount of swap memory (in MiB) a container can use. This parameter is translated to the
/// <code>--memory-swap</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a> where the value is the
/// sum of the container memory plus the <code>maxSwap</code> value. For more information, see <a href="https://docs.docker.com/config/containers/resource_constraints/#--memory-swap-details">
/// <code>--memory-swap</code> details</a> in the Docker documentation.</p>
/// <p>If a <code>maxSwap</code> value of <code>0</code> is specified, the container doesn't use swap. Accepted values
/// are <code>0</code> or any positive integer. If the <code>maxSwap</code> parameter is omitted, the container doesn't
/// use the swap configuration for the container instance it is running on. A <code>maxSwap</code> value must be set for
/// the <code>swappiness</code> parameter to be used.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub fn max_swap(&self) -> i32 {
self.max_swap
}
/// <p>This allows you to tune a container's memory swappiness behavior. A <code>swappiness</code> value of
/// <code>0</code> causes swapping not to happen unless absolutely necessary. A <code>swappiness</code> value of
/// <code>100</code> causes pages to be swapped very aggressively. Accepted values are whole numbers between
/// <code>0</code> and <code>100</code>. If the <code>swappiness</code> parameter isn't specified, a default value of
/// <code>60</code> is used. If a value isn't specified for <code>maxSwap</code>, then this parameter is ignored. If
/// <code>maxSwap</code> is set to 0, the container doesn't use swap. This parameter maps to the
/// <code>--memory-swappiness</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <p>Consider the following when you use a per-container swap configuration.</p>
/// <ul>
/// <li>
/// <p>Swap space must be enabled and allocated on the container instance for the containers to use.</p>
/// <note>
/// <p>The Amazon ECS optimized AMIs don't have swap enabled by default. You must enable swap on the instance to use this
/// feature. For more information, see <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-store-swap-volumes.html">Instance Store Swap Volumes</a> in the
/// <i>Amazon EC2 User Guide for Linux Instances</i> or <a href="http://aws.amazon.com/premiumsupport/knowledge-center/ec2-memory-swap-file/">How do I allocate memory to work as swap space in an
/// Amazon EC2 instance by using a swap file?</a>
/// </p>
/// </note>
/// </li>
/// <li>
/// <p>The swap space parameters are only supported for job definitions using EC2 resources.</p>
/// </li>
/// <li>
/// <p>If the <code>maxSwap</code> and <code>swappiness</code> parameters are omitted from a job definition, each
/// container will have a default <code>swappiness</code> value of 60, and the total swap usage will be limited to two
/// times the memory reservation of the container.</p>
/// </li>
/// </ul>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub fn swappiness(&self) -> i32 {
self.swappiness
}
}
impl std::fmt::Debug for LinuxParameters {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("LinuxParameters");
formatter.field("devices", &self.devices);
formatter.field("init_process_enabled", &self.init_process_enabled);
formatter.field("shared_memory_size", &self.shared_memory_size);
formatter.field("tmpfs", &self.tmpfs);
formatter.field("max_swap", &self.max_swap);
formatter.field("swappiness", &self.swappiness);
formatter.finish()
}
}
/// See [`LinuxParameters`](crate::model::LinuxParameters)
pub mod linux_parameters {
/// A builder for [`LinuxParameters`](crate::model::LinuxParameters)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) devices: std::option::Option<std::vec::Vec<crate::model::Device>>,
pub(crate) init_process_enabled: std::option::Option<bool>,
pub(crate) shared_memory_size: std::option::Option<i32>,
pub(crate) tmpfs: std::option::Option<std::vec::Vec<crate::model::Tmpfs>>,
pub(crate) max_swap: std::option::Option<i32>,
pub(crate) swappiness: std::option::Option<i32>,
}
impl Builder {
/// Appends an item to `devices`.
///
/// To override the contents of this collection use [`set_devices`](Self::set_devices).
///
/// <p>Any host devices to expose to the container. This parameter maps to <code>Devices</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--device</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub fn devices(mut self, input: impl Into<crate::model::Device>) -> Self {
let mut v = self.devices.unwrap_or_default();
v.push(input.into());
self.devices = Some(v);
self
}
/// <p>Any host devices to expose to the container. This parameter maps to <code>Devices</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--device</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub fn set_devices(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Device>>,
) -> Self {
self.devices = input;
self
}
/// <p>If true, run an <code>init</code> process inside the container that forwards signals and reaps processes. This
/// parameter maps to the <code>--init</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.
/// This parameter requires version 1.25 of the Docker Remote API or greater on your
/// container instance. To check the Docker Remote API version on your container instance, log into your
/// container instance and run the following command: <code>sudo docker version | grep "Server API version"</code>
/// </p>
pub fn init_process_enabled(mut self, input: bool) -> Self {
self.init_process_enabled = Some(input);
self
}
/// <p>If true, run an <code>init</code> process inside the container that forwards signals and reaps processes. This
/// parameter maps to the <code>--init</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.
/// This parameter requires version 1.25 of the Docker Remote API or greater on your
/// container instance. To check the Docker Remote API version on your container instance, log into your
/// container instance and run the following command: <code>sudo docker version | grep "Server API version"</code>
/// </p>
pub fn set_init_process_enabled(mut self, input: std::option::Option<bool>) -> Self {
self.init_process_enabled = input;
self
}
/// <p>The value for the size (in MiB) of the <code>/dev/shm</code> volume. This parameter maps to the
/// <code>--shm-size</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub fn shared_memory_size(mut self, input: i32) -> Self {
self.shared_memory_size = Some(input);
self
}
/// <p>The value for the size (in MiB) of the <code>/dev/shm</code> volume. This parameter maps to the
/// <code>--shm-size</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub fn set_shared_memory_size(mut self, input: std::option::Option<i32>) -> Self {
self.shared_memory_size = input;
self
}
/// Appends an item to `tmpfs`.
///
/// To override the contents of this collection use [`set_tmpfs`](Self::set_tmpfs).
///
/// <p>The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the
/// <code>--tmpfs</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub fn tmpfs(mut self, input: impl Into<crate::model::Tmpfs>) -> Self {
let mut v = self.tmpfs.unwrap_or_default();
v.push(input.into());
self.tmpfs = Some(v);
self
}
/// <p>The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the
/// <code>--tmpfs</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub fn set_tmpfs(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Tmpfs>>,
) -> Self {
self.tmpfs = input;
self
}
/// <p>The total amount of swap memory (in MiB) a container can use. This parameter is translated to the
/// <code>--memory-swap</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a> where the value is the
/// sum of the container memory plus the <code>maxSwap</code> value. For more information, see <a href="https://docs.docker.com/config/containers/resource_constraints/#--memory-swap-details">
/// <code>--memory-swap</code> details</a> in the Docker documentation.</p>
/// <p>If a <code>maxSwap</code> value of <code>0</code> is specified, the container doesn't use swap. Accepted values
/// are <code>0</code> or any positive integer. If the <code>maxSwap</code> parameter is omitted, the container doesn't
/// use the swap configuration for the container instance it is running on. A <code>maxSwap</code> value must be set for
/// the <code>swappiness</code> parameter to be used.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub fn max_swap(mut self, input: i32) -> Self {
self.max_swap = Some(input);
self
}
/// <p>The total amount of swap memory (in MiB) a container can use. This parameter is translated to the
/// <code>--memory-swap</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a> where the value is the
/// sum of the container memory plus the <code>maxSwap</code> value. For more information, see <a href="https://docs.docker.com/config/containers/resource_constraints/#--memory-swap-details">
/// <code>--memory-swap</code> details</a> in the Docker documentation.</p>
/// <p>If a <code>maxSwap</code> value of <code>0</code> is specified, the container doesn't use swap. Accepted values
/// are <code>0</code> or any positive integer. If the <code>maxSwap</code> parameter is omitted, the container doesn't
/// use the swap configuration for the container instance it is running on. A <code>maxSwap</code> value must be set for
/// the <code>swappiness</code> parameter to be used.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub fn set_max_swap(mut self, input: std::option::Option<i32>) -> Self {
self.max_swap = input;
self
}
/// <p>This allows you to tune a container's memory swappiness behavior. A <code>swappiness</code> value of
/// <code>0</code> causes swapping not to happen unless absolutely necessary. A <code>swappiness</code> value of
/// <code>100</code> causes pages to be swapped very aggressively. Accepted values are whole numbers between
/// <code>0</code> and <code>100</code>. If the <code>swappiness</code> parameter isn't specified, a default value of
/// <code>60</code> is used. If a value isn't specified for <code>maxSwap</code>, then this parameter is ignored. If
/// <code>maxSwap</code> is set to 0, the container doesn't use swap. This parameter maps to the
/// <code>--memory-swappiness</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <p>Consider the following when you use a per-container swap configuration.</p>
/// <ul>
/// <li>
/// <p>Swap space must be enabled and allocated on the container instance for the containers to use.</p>
/// <note>
/// <p>The Amazon ECS optimized AMIs don't have swap enabled by default. You must enable swap on the instance to use this
/// feature. For more information, see <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-store-swap-volumes.html">Instance Store Swap Volumes</a> in the
/// <i>Amazon EC2 User Guide for Linux Instances</i> or <a href="http://aws.amazon.com/premiumsupport/knowledge-center/ec2-memory-swap-file/">How do I allocate memory to work as swap space in an
/// Amazon EC2 instance by using a swap file?</a>
/// </p>
/// </note>
/// </li>
/// <li>
/// <p>The swap space parameters are only supported for job definitions using EC2 resources.</p>
/// </li>
/// <li>
/// <p>If the <code>maxSwap</code> and <code>swappiness</code> parameters are omitted from a job definition, each
/// container will have a default <code>swappiness</code> value of 60, and the total swap usage will be limited to two
/// times the memory reservation of the container.</p>
/// </li>
/// </ul>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub fn swappiness(mut self, input: i32) -> Self {
self.swappiness = Some(input);
self
}
/// <p>This allows you to tune a container's memory swappiness behavior. A <code>swappiness</code> value of
/// <code>0</code> causes swapping not to happen unless absolutely necessary. A <code>swappiness</code> value of
/// <code>100</code> causes pages to be swapped very aggressively. Accepted values are whole numbers between
/// <code>0</code> and <code>100</code>. If the <code>swappiness</code> parameter isn't specified, a default value of
/// <code>60</code> is used. If a value isn't specified for <code>maxSwap</code>, then this parameter is ignored. If
/// <code>maxSwap</code> is set to 0, the container doesn't use swap. This parameter maps to the
/// <code>--memory-swappiness</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <p>Consider the following when you use a per-container swap configuration.</p>
/// <ul>
/// <li>
/// <p>Swap space must be enabled and allocated on the container instance for the containers to use.</p>
/// <note>
/// <p>The Amazon ECS optimized AMIs don't have swap enabled by default. You must enable swap on the instance to use this
/// feature. For more information, see <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-store-swap-volumes.html">Instance Store Swap Volumes</a> in the
/// <i>Amazon EC2 User Guide for Linux Instances</i> or <a href="http://aws.amazon.com/premiumsupport/knowledge-center/ec2-memory-swap-file/">How do I allocate memory to work as swap space in an
/// Amazon EC2 instance by using a swap file?</a>
/// </p>
/// </note>
/// </li>
/// <li>
/// <p>The swap space parameters are only supported for job definitions using EC2 resources.</p>
/// </li>
/// <li>
/// <p>If the <code>maxSwap</code> and <code>swappiness</code> parameters are omitted from a job definition, each
/// container will have a default <code>swappiness</code> value of 60, and the total swap usage will be limited to two
/// times the memory reservation of the container.</p>
/// </li>
/// </ul>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub fn set_swappiness(mut self, input: std::option::Option<i32>) -> Self {
self.swappiness = input;
self
}
/// Consumes the builder and constructs a [`LinuxParameters`](crate::model::LinuxParameters)
pub fn build(self) -> crate::model::LinuxParameters {
crate::model::LinuxParameters {
devices: self.devices,
init_process_enabled: self.init_process_enabled.unwrap_or_default(),
shared_memory_size: self.shared_memory_size.unwrap_or_default(),
tmpfs: self.tmpfs,
max_swap: self.max_swap.unwrap_or_default(),
swappiness: self.swappiness.unwrap_or_default(),
}
}
}
}
impl LinuxParameters {
/// Creates a new builder-style object to manufacture [`LinuxParameters`](crate::model::LinuxParameters)
pub fn builder() -> crate::model::linux_parameters::Builder {
crate::model::linux_parameters::Builder::default()
}
}
/// <p>The container path, mount options, and size of the tmpfs mount.</p>
/// <note>
/// <p>This object isn't applicable to jobs that are running on Fargate resources.</p>
/// </note>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Tmpfs {
/// <p>The absolute file path in the container where the tmpfs volume is mounted.</p>
pub container_path: std::option::Option<std::string::String>,
/// <p>The size (in MiB) of the tmpfs volume.</p>
pub size: i32,
/// <p>The list of tmpfs volume mount options.</p>
/// <p>Valid values: "<code>defaults</code>" | "<code>ro</code>" | "<code>rw</code>" | "<code>suid</code>" |
/// "<code>nosuid</code>" | "<code>dev</code>" | "<code>nodev</code>" | "<code>exec</code>" | "<code>noexec</code>" |
/// "<code>sync</code>" | "<code>async</code>" | "<code>dirsync</code>" | "<code>remount</code>" | "<code>mand</code>" |
/// "<code>nomand</code>" | "<code>atime</code>" | "<code>noatime</code>" | "<code>diratime</code>" |
/// "<code>nodiratime</code>" | "<code>bind</code>" | "<code>rbind" | "unbindable" | "runbindable" | "private" |
/// "rprivate" | "shared" | "rshared" | "slave" | "rslave" | "relatime</code>" | "<code>norelatime</code>" |
/// "<code>strictatime</code>" | "<code>nostrictatime</code>" | "<code>mode</code>" | "<code>uid</code>" |
/// "<code>gid</code>" | "<code>nr_inodes</code>" | "<code>nr_blocks</code>" | "<code>mpol</code>"</p>
pub mount_options: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Tmpfs {
/// <p>The absolute file path in the container where the tmpfs volume is mounted.</p>
pub fn container_path(&self) -> std::option::Option<&str> {
self.container_path.as_deref()
}
/// <p>The size (in MiB) of the tmpfs volume.</p>
pub fn size(&self) -> i32 {
self.size
}
/// <p>The list of tmpfs volume mount options.</p>
/// <p>Valid values: "<code>defaults</code>" | "<code>ro</code>" | "<code>rw</code>" | "<code>suid</code>" |
/// "<code>nosuid</code>" | "<code>dev</code>" | "<code>nodev</code>" | "<code>exec</code>" | "<code>noexec</code>" |
/// "<code>sync</code>" | "<code>async</code>" | "<code>dirsync</code>" | "<code>remount</code>" | "<code>mand</code>" |
/// "<code>nomand</code>" | "<code>atime</code>" | "<code>noatime</code>" | "<code>diratime</code>" |
/// "<code>nodiratime</code>" | "<code>bind</code>" | "<code>rbind" | "unbindable" | "runbindable" | "private" |
/// "rprivate" | "shared" | "rshared" | "slave" | "rslave" | "relatime</code>" | "<code>norelatime</code>" |
/// "<code>strictatime</code>" | "<code>nostrictatime</code>" | "<code>mode</code>" | "<code>uid</code>" |
/// "<code>gid</code>" | "<code>nr_inodes</code>" | "<code>nr_blocks</code>" | "<code>mpol</code>"</p>
pub fn mount_options(&self) -> std::option::Option<&[std::string::String]> {
self.mount_options.as_deref()
}
}
impl std::fmt::Debug for Tmpfs {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Tmpfs");
formatter.field("container_path", &self.container_path);
formatter.field("size", &self.size);
formatter.field("mount_options", &self.mount_options);
formatter.finish()
}
}
/// See [`Tmpfs`](crate::model::Tmpfs)
pub mod tmpfs {
/// A builder for [`Tmpfs`](crate::model::Tmpfs)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) container_path: std::option::Option<std::string::String>,
pub(crate) size: std::option::Option<i32>,
pub(crate) mount_options: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
/// <p>The absolute file path in the container where the tmpfs volume is mounted.</p>
pub fn container_path(mut self, input: impl Into<std::string::String>) -> Self {
self.container_path = Some(input.into());
self
}
/// <p>The absolute file path in the container where the tmpfs volume is mounted.</p>
pub fn set_container_path(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.container_path = input;
self
}
/// <p>The size (in MiB) of the tmpfs volume.</p>
pub fn size(mut self, input: i32) -> Self {
self.size = Some(input);
self
}
/// <p>The size (in MiB) of the tmpfs volume.</p>
pub fn set_size(mut self, input: std::option::Option<i32>) -> Self {
self.size = input;
self
}
/// Appends an item to `mount_options`.
///
/// To override the contents of this collection use [`set_mount_options`](Self::set_mount_options).
///
/// <p>The list of tmpfs volume mount options.</p>
/// <p>Valid values: "<code>defaults</code>" | "<code>ro</code>" | "<code>rw</code>" | "<code>suid</code>" |
/// "<code>nosuid</code>" | "<code>dev</code>" | "<code>nodev</code>" | "<code>exec</code>" | "<code>noexec</code>" |
/// "<code>sync</code>" | "<code>async</code>" | "<code>dirsync</code>" | "<code>remount</code>" | "<code>mand</code>" |
/// "<code>nomand</code>" | "<code>atime</code>" | "<code>noatime</code>" | "<code>diratime</code>" |
/// "<code>nodiratime</code>" | "<code>bind</code>" | "<code>rbind" | "unbindable" | "runbindable" | "private" |
/// "rprivate" | "shared" | "rshared" | "slave" | "rslave" | "relatime</code>" | "<code>norelatime</code>" |
/// "<code>strictatime</code>" | "<code>nostrictatime</code>" | "<code>mode</code>" | "<code>uid</code>" |
/// "<code>gid</code>" | "<code>nr_inodes</code>" | "<code>nr_blocks</code>" | "<code>mpol</code>"</p>
pub fn mount_options(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.mount_options.unwrap_or_default();
v.push(input.into());
self.mount_options = Some(v);
self
}
/// <p>The list of tmpfs volume mount options.</p>
/// <p>Valid values: "<code>defaults</code>" | "<code>ro</code>" | "<code>rw</code>" | "<code>suid</code>" |
/// "<code>nosuid</code>" | "<code>dev</code>" | "<code>nodev</code>" | "<code>exec</code>" | "<code>noexec</code>" |
/// "<code>sync</code>" | "<code>async</code>" | "<code>dirsync</code>" | "<code>remount</code>" | "<code>mand</code>" |
/// "<code>nomand</code>" | "<code>atime</code>" | "<code>noatime</code>" | "<code>diratime</code>" |
/// "<code>nodiratime</code>" | "<code>bind</code>" | "<code>rbind" | "unbindable" | "runbindable" | "private" |
/// "rprivate" | "shared" | "rshared" | "slave" | "rslave" | "relatime</code>" | "<code>norelatime</code>" |
/// "<code>strictatime</code>" | "<code>nostrictatime</code>" | "<code>mode</code>" | "<code>uid</code>" |
/// "<code>gid</code>" | "<code>nr_inodes</code>" | "<code>nr_blocks</code>" | "<code>mpol</code>"</p>
pub fn set_mount_options(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.mount_options = input;
self
}
/// Consumes the builder and constructs a [`Tmpfs`](crate::model::Tmpfs)
pub fn build(self) -> crate::model::Tmpfs {
crate::model::Tmpfs {
container_path: self.container_path,
size: self.size.unwrap_or_default(),
mount_options: self.mount_options,
}
}
}
}
impl Tmpfs {
/// Creates a new builder-style object to manufacture [`Tmpfs`](crate::model::Tmpfs)
pub fn builder() -> crate::model::tmpfs::Builder {
crate::model::tmpfs::Builder::default()
}
}
/// <p>An object representing a container instance host device.</p>
/// <note>
/// <p>This object isn't applicable to jobs that are running on Fargate resources and shouldn't be provided.</p>
/// </note>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Device {
/// <p>The path for the device on the host container instance.</p>
pub host_path: std::option::Option<std::string::String>,
/// <p>The path inside the container that's used to expose the host device. By default, the <code>hostPath</code> value
/// is used.</p>
pub container_path: std::option::Option<std::string::String>,
/// <p>The explicit permissions to provide to the container for the device. By default, the container has permissions
/// for <code>read</code>, <code>write</code>, and <code>mknod</code> for the device.</p>
pub permissions: std::option::Option<std::vec::Vec<crate::model::DeviceCgroupPermission>>,
}
impl Device {
/// <p>The path for the device on the host container instance.</p>
pub fn host_path(&self) -> std::option::Option<&str> {
self.host_path.as_deref()
}
/// <p>The path inside the container that's used to expose the host device. By default, the <code>hostPath</code> value
/// is used.</p>
pub fn container_path(&self) -> std::option::Option<&str> {
self.container_path.as_deref()
}
/// <p>The explicit permissions to provide to the container for the device. By default, the container has permissions
/// for <code>read</code>, <code>write</code>, and <code>mknod</code> for the device.</p>
pub fn permissions(&self) -> std::option::Option<&[crate::model::DeviceCgroupPermission]> {
self.permissions.as_deref()
}
}
impl std::fmt::Debug for Device {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Device");
formatter.field("host_path", &self.host_path);
formatter.field("container_path", &self.container_path);
formatter.field("permissions", &self.permissions);
formatter.finish()
}
}
/// See [`Device`](crate::model::Device)
pub mod device {
/// A builder for [`Device`](crate::model::Device)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) host_path: std::option::Option<std::string::String>,
pub(crate) container_path: std::option::Option<std::string::String>,
pub(crate) permissions:
std::option::Option<std::vec::Vec<crate::model::DeviceCgroupPermission>>,
}
impl Builder {
/// <p>The path for the device on the host container instance.</p>
pub fn host_path(mut self, input: impl Into<std::string::String>) -> Self {
self.host_path = Some(input.into());
self
}
/// <p>The path for the device on the host container instance.</p>
pub fn set_host_path(mut self, input: std::option::Option<std::string::String>) -> Self {
self.host_path = input;
self
}
/// <p>The path inside the container that's used to expose the host device. By default, the <code>hostPath</code> value
/// is used.</p>
pub fn container_path(mut self, input: impl Into<std::string::String>) -> Self {
self.container_path = Some(input.into());
self
}
/// <p>The path inside the container that's used to expose the host device. By default, the <code>hostPath</code> value
/// is used.</p>
pub fn set_container_path(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.container_path = input;
self
}
/// Appends an item to `permissions`.
///
/// To override the contents of this collection use [`set_permissions`](Self::set_permissions).
///
/// <p>The explicit permissions to provide to the container for the device. By default, the container has permissions
/// for <code>read</code>, <code>write</code>, and <code>mknod</code> for the device.</p>
pub fn permissions(
mut self,
input: impl Into<crate::model::DeviceCgroupPermission>,
) -> Self {
let mut v = self.permissions.unwrap_or_default();
v.push(input.into());
self.permissions = Some(v);
self
}
/// <p>The explicit permissions to provide to the container for the device. By default, the container has permissions
/// for <code>read</code>, <code>write</code>, and <code>mknod</code> for the device.</p>
pub fn set_permissions(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::DeviceCgroupPermission>>,
) -> Self {
self.permissions = input;
self
}
/// Consumes the builder and constructs a [`Device`](crate::model::Device)
pub fn build(self) -> crate::model::Device {
crate::model::Device {
host_path: self.host_path,
container_path: self.container_path,
permissions: self.permissions,
}
}
}
}
impl Device {
/// Creates a new builder-style object to manufacture [`Device`](crate::model::Device)
pub fn builder() -> crate::model::device::Builder {
crate::model::device::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum DeviceCgroupPermission {
#[allow(missing_docs)] // documentation missing in model
Mknod,
#[allow(missing_docs)] // documentation missing in model
Read,
#[allow(missing_docs)] // documentation missing in model
Write,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for DeviceCgroupPermission {
fn from(s: &str) -> Self {
match s {
"MKNOD" => DeviceCgroupPermission::Mknod,
"READ" => DeviceCgroupPermission::Read,
"WRITE" => DeviceCgroupPermission::Write,
other => DeviceCgroupPermission::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for DeviceCgroupPermission {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(DeviceCgroupPermission::from(s))
}
}
impl DeviceCgroupPermission {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
DeviceCgroupPermission::Mknod => "MKNOD",
DeviceCgroupPermission::Read => "READ",
DeviceCgroupPermission::Write => "WRITE",
DeviceCgroupPermission::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["MKNOD", "READ", "WRITE"]
}
}
impl AsRef<str> for DeviceCgroupPermission {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>The <code>ulimit</code> settings to pass to the container.</p>
/// <note>
/// <p>This object isn't applicable to jobs that are running on Fargate resources.</p>
/// </note>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Ulimit {
/// <p>The hard limit for the <code>ulimit</code> type.</p>
pub hard_limit: i32,
/// <p>The <code>type</code> of the <code>ulimit</code>.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The soft limit for the <code>ulimit</code> type.</p>
pub soft_limit: i32,
}
impl Ulimit {
/// <p>The hard limit for the <code>ulimit</code> type.</p>
pub fn hard_limit(&self) -> i32 {
self.hard_limit
}
/// <p>The <code>type</code> of the <code>ulimit</code>.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>The soft limit for the <code>ulimit</code> type.</p>
pub fn soft_limit(&self) -> i32 {
self.soft_limit
}
}
impl std::fmt::Debug for Ulimit {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Ulimit");
formatter.field("hard_limit", &self.hard_limit);
formatter.field("name", &self.name);
formatter.field("soft_limit", &self.soft_limit);
formatter.finish()
}
}
/// See [`Ulimit`](crate::model::Ulimit)
pub mod ulimit {
/// A builder for [`Ulimit`](crate::model::Ulimit)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) hard_limit: std::option::Option<i32>,
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) soft_limit: std::option::Option<i32>,
}
impl Builder {
/// <p>The hard limit for the <code>ulimit</code> type.</p>
pub fn hard_limit(mut self, input: i32) -> Self {
self.hard_limit = Some(input);
self
}
/// <p>The hard limit for the <code>ulimit</code> type.</p>
pub fn set_hard_limit(mut self, input: std::option::Option<i32>) -> Self {
self.hard_limit = input;
self
}
/// <p>The <code>type</code> of the <code>ulimit</code>.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>The <code>type</code> of the <code>ulimit</code>.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>The soft limit for the <code>ulimit</code> type.</p>
pub fn soft_limit(mut self, input: i32) -> Self {
self.soft_limit = Some(input);
self
}
/// <p>The soft limit for the <code>ulimit</code> type.</p>
pub fn set_soft_limit(mut self, input: std::option::Option<i32>) -> Self {
self.soft_limit = input;
self
}
/// Consumes the builder and constructs a [`Ulimit`](crate::model::Ulimit)
pub fn build(self) -> crate::model::Ulimit {
crate::model::Ulimit {
hard_limit: self.hard_limit.unwrap_or_default(),
name: self.name,
soft_limit: self.soft_limit.unwrap_or_default(),
}
}
}
}
impl Ulimit {
/// Creates a new builder-style object to manufacture [`Ulimit`](crate::model::Ulimit)
pub fn builder() -> crate::model::ulimit::Builder {
crate::model::ulimit::Builder::default()
}
}
/// <p>Details on a Docker volume mount point that's used in a job's container properties. This parameter maps to
/// <code>Volumes</code> in the <a href="https://docs.docker.com/engine/reference/api/docker_remote_api_v1.19/#create-a-container">Create a
/// container</a> section of the Docker Remote API and the <code>--volume</code> option to docker run.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct MountPoint {
/// <p>The path on the container where the host volume is mounted.</p>
pub container_path: std::option::Option<std::string::String>,
/// <p>If this value is <code>true</code>, the container has read-only access to the volume. Otherwise, the container
/// can write to the volume. The default value is <code>false</code>.</p>
pub read_only: bool,
/// <p>The name of the volume to mount.</p>
pub source_volume: std::option::Option<std::string::String>,
}
impl MountPoint {
/// <p>The path on the container where the host volume is mounted.</p>
pub fn container_path(&self) -> std::option::Option<&str> {
self.container_path.as_deref()
}
/// <p>If this value is <code>true</code>, the container has read-only access to the volume. Otherwise, the container
/// can write to the volume. The default value is <code>false</code>.</p>
pub fn read_only(&self) -> bool {
self.read_only
}
/// <p>The name of the volume to mount.</p>
pub fn source_volume(&self) -> std::option::Option<&str> {
self.source_volume.as_deref()
}
}
impl std::fmt::Debug for MountPoint {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("MountPoint");
formatter.field("container_path", &self.container_path);
formatter.field("read_only", &self.read_only);
formatter.field("source_volume", &self.source_volume);
formatter.finish()
}
}
/// See [`MountPoint`](crate::model::MountPoint)
pub mod mount_point {
/// A builder for [`MountPoint`](crate::model::MountPoint)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) container_path: std::option::Option<std::string::String>,
pub(crate) read_only: std::option::Option<bool>,
pub(crate) source_volume: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The path on the container where the host volume is mounted.</p>
pub fn container_path(mut self, input: impl Into<std::string::String>) -> Self {
self.container_path = Some(input.into());
self
}
/// <p>The path on the container where the host volume is mounted.</p>
pub fn set_container_path(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.container_path = input;
self
}
/// <p>If this value is <code>true</code>, the container has read-only access to the volume. Otherwise, the container
/// can write to the volume. The default value is <code>false</code>.</p>
pub fn read_only(mut self, input: bool) -> Self {
self.read_only = Some(input);
self
}
/// <p>If this value is <code>true</code>, the container has read-only access to the volume. Otherwise, the container
/// can write to the volume. The default value is <code>false</code>.</p>
pub fn set_read_only(mut self, input: std::option::Option<bool>) -> Self {
self.read_only = input;
self
}
/// <p>The name of the volume to mount.</p>
pub fn source_volume(mut self, input: impl Into<std::string::String>) -> Self {
self.source_volume = Some(input.into());
self
}
/// <p>The name of the volume to mount.</p>
pub fn set_source_volume(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.source_volume = input;
self
}
/// Consumes the builder and constructs a [`MountPoint`](crate::model::MountPoint)
pub fn build(self) -> crate::model::MountPoint {
crate::model::MountPoint {
container_path: self.container_path,
read_only: self.read_only.unwrap_or_default(),
source_volume: self.source_volume,
}
}
}
}
impl MountPoint {
/// Creates a new builder-style object to manufacture [`MountPoint`](crate::model::MountPoint)
pub fn builder() -> crate::model::mount_point::Builder {
crate::model::mount_point::Builder::default()
}
}
/// <p>A data volume used in a job's container properties.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Volume {
/// <p>The contents of the <code>host</code> parameter determine whether your data volume persists on the host
/// container instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host path
/// for your data volume. However, the data isn't guaranteed to persist after the containers associated with it stop
/// running.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub host: std::option::Option<crate::model::Host>,
/// <p>The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are
/// allowed. This name is referenced in the <code>sourceVolume</code>
/// parameter of container definition <code>mountPoints</code>.</p>
pub name: std::option::Option<std::string::String>,
/// <p>This parameter is specified when you are using an Amazon Elastic File System file system for job storage. Jobs that are running
/// on Fargate resources must specify a <code>platformVersion</code> of at least <code>1.4.0</code>.</p>
pub efs_volume_configuration: std::option::Option<crate::model::EfsVolumeConfiguration>,
}
impl Volume {
/// <p>The contents of the <code>host</code> parameter determine whether your data volume persists on the host
/// container instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host path
/// for your data volume. However, the data isn't guaranteed to persist after the containers associated with it stop
/// running.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub fn host(&self) -> std::option::Option<&crate::model::Host> {
self.host.as_ref()
}
/// <p>The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are
/// allowed. This name is referenced in the <code>sourceVolume</code>
/// parameter of container definition <code>mountPoints</code>.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>This parameter is specified when you are using an Amazon Elastic File System file system for job storage. Jobs that are running
/// on Fargate resources must specify a <code>platformVersion</code> of at least <code>1.4.0</code>.</p>
pub fn efs_volume_configuration(
&self,
) -> std::option::Option<&crate::model::EfsVolumeConfiguration> {
self.efs_volume_configuration.as_ref()
}
}
impl std::fmt::Debug for Volume {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Volume");
formatter.field("host", &self.host);
formatter.field("name", &self.name);
formatter.field("efs_volume_configuration", &self.efs_volume_configuration);
formatter.finish()
}
}
/// See [`Volume`](crate::model::Volume)
pub mod volume {
/// A builder for [`Volume`](crate::model::Volume)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) host: std::option::Option<crate::model::Host>,
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) efs_volume_configuration:
std::option::Option<crate::model::EfsVolumeConfiguration>,
}
impl Builder {
/// <p>The contents of the <code>host</code> parameter determine whether your data volume persists on the host
/// container instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host path
/// for your data volume. However, the data isn't guaranteed to persist after the containers associated with it stop
/// running.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub fn host(mut self, input: crate::model::Host) -> Self {
self.host = Some(input);
self
}
/// <p>The contents of the <code>host</code> parameter determine whether your data volume persists on the host
/// container instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host path
/// for your data volume. However, the data isn't guaranteed to persist after the containers associated with it stop
/// running.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be
/// provided.</p>
/// </note>
pub fn set_host(mut self, input: std::option::Option<crate::model::Host>) -> Self {
self.host = input;
self
}
/// <p>The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are
/// allowed. This name is referenced in the <code>sourceVolume</code>
/// parameter of container definition <code>mountPoints</code>.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are
/// allowed. This name is referenced in the <code>sourceVolume</code>
/// parameter of container definition <code>mountPoints</code>.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>This parameter is specified when you are using an Amazon Elastic File System file system for job storage. Jobs that are running
/// on Fargate resources must specify a <code>platformVersion</code> of at least <code>1.4.0</code>.</p>
pub fn efs_volume_configuration(
mut self,
input: crate::model::EfsVolumeConfiguration,
) -> Self {
self.efs_volume_configuration = Some(input);
self
}
/// <p>This parameter is specified when you are using an Amazon Elastic File System file system for job storage. Jobs that are running
/// on Fargate resources must specify a <code>platformVersion</code> of at least <code>1.4.0</code>.</p>
pub fn set_efs_volume_configuration(
mut self,
input: std::option::Option<crate::model::EfsVolumeConfiguration>,
) -> Self {
self.efs_volume_configuration = input;
self
}
/// Consumes the builder and constructs a [`Volume`](crate::model::Volume)
pub fn build(self) -> crate::model::Volume {
crate::model::Volume {
host: self.host,
name: self.name,
efs_volume_configuration: self.efs_volume_configuration,
}
}
}
}
impl Volume {
/// Creates a new builder-style object to manufacture [`Volume`](crate::model::Volume)
pub fn builder() -> crate::model::volume::Builder {
crate::model::volume::Builder::default()
}
}
/// <p>This is used when you're using an Amazon Elastic File System file system for job storage. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html">Amazon EFS Volumes</a> in the
/// <i>Batch User Guide</i>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct EfsVolumeConfiguration {
/// <p>The Amazon EFS file system ID to use.</p>
pub file_system_id: std::option::Option<std::string::String>,
/// <p>The directory within the Amazon EFS file system to mount as the root directory inside the host. If this parameter is
/// omitted, the root of the Amazon EFS volume is used instead. Specifying <code>/</code> has the same effect as omitting this
/// parameter. The maximum length is 4,096 characters.</p>
/// <important>
/// <p>If an EFS access point is specified in the <code>authorizationConfig</code>, the root directory parameter must
/// either be omitted or set to <code>/</code>, which enforces the path set on the Amazon EFS access point.</p>
/// </important>
pub root_directory: std::option::Option<std::string::String>,
/// <p>Determines whether to enable encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server.
/// Transit encryption must be enabled if Amazon EFS IAM authorization is used. If this parameter is omitted, the default
/// value of <code>DISABLED</code> is used. For more information, see <a href="https://docs.aws.amazon.com/efs/latest/ug/encryption-in-transit.html">Encrypting data in transit</a> in the
/// <i>Amazon Elastic File System User Guide</i>.</p>
pub transit_encryption: std::option::Option<crate::model::EfsTransitEncryption>,
/// <p>The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS server. If you don't specify a
/// transit encryption port, it uses the port selection strategy that the Amazon EFS mount helper uses. The value must be
/// between 0 and 65,535. For more information, see <a href="https://docs.aws.amazon.com/efs/latest/ug/efs-mount-helper.html">EFS Mount Helper</a> in the <i>Amazon Elastic File System User Guide</i>.</p>
pub transit_encryption_port: i32,
/// <p>The authorization configuration details for the Amazon EFS file system.</p>
pub authorization_config: std::option::Option<crate::model::EfsAuthorizationConfig>,
}
impl EfsVolumeConfiguration {
/// <p>The Amazon EFS file system ID to use.</p>
pub fn file_system_id(&self) -> std::option::Option<&str> {
self.file_system_id.as_deref()
}
/// <p>The directory within the Amazon EFS file system to mount as the root directory inside the host. If this parameter is
/// omitted, the root of the Amazon EFS volume is used instead. Specifying <code>/</code> has the same effect as omitting this
/// parameter. The maximum length is 4,096 characters.</p>
/// <important>
/// <p>If an EFS access point is specified in the <code>authorizationConfig</code>, the root directory parameter must
/// either be omitted or set to <code>/</code>, which enforces the path set on the Amazon EFS access point.</p>
/// </important>
pub fn root_directory(&self) -> std::option::Option<&str> {
self.root_directory.as_deref()
}
/// <p>Determines whether to enable encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server.
/// Transit encryption must be enabled if Amazon EFS IAM authorization is used. If this parameter is omitted, the default
/// value of <code>DISABLED</code> is used. For more information, see <a href="https://docs.aws.amazon.com/efs/latest/ug/encryption-in-transit.html">Encrypting data in transit</a> in the
/// <i>Amazon Elastic File System User Guide</i>.</p>
pub fn transit_encryption(&self) -> std::option::Option<&crate::model::EfsTransitEncryption> {
self.transit_encryption.as_ref()
}
/// <p>The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS server. If you don't specify a
/// transit encryption port, it uses the port selection strategy that the Amazon EFS mount helper uses. The value must be
/// between 0 and 65,535. For more information, see <a href="https://docs.aws.amazon.com/efs/latest/ug/efs-mount-helper.html">EFS Mount Helper</a> in the <i>Amazon Elastic File System User Guide</i>.</p>
pub fn transit_encryption_port(&self) -> i32 {
self.transit_encryption_port
}
/// <p>The authorization configuration details for the Amazon EFS file system.</p>
pub fn authorization_config(
&self,
) -> std::option::Option<&crate::model::EfsAuthorizationConfig> {
self.authorization_config.as_ref()
}
}
impl std::fmt::Debug for EfsVolumeConfiguration {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("EfsVolumeConfiguration");
formatter.field("file_system_id", &self.file_system_id);
formatter.field("root_directory", &self.root_directory);
formatter.field("transit_encryption", &self.transit_encryption);
formatter.field("transit_encryption_port", &self.transit_encryption_port);
formatter.field("authorization_config", &self.authorization_config);
formatter.finish()
}
}
/// See [`EfsVolumeConfiguration`](crate::model::EfsVolumeConfiguration)
pub mod efs_volume_configuration {
/// A builder for [`EfsVolumeConfiguration`](crate::model::EfsVolumeConfiguration)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) file_system_id: std::option::Option<std::string::String>,
pub(crate) root_directory: std::option::Option<std::string::String>,
pub(crate) transit_encryption: std::option::Option<crate::model::EfsTransitEncryption>,
pub(crate) transit_encryption_port: std::option::Option<i32>,
pub(crate) authorization_config: std::option::Option<crate::model::EfsAuthorizationConfig>,
}
impl Builder {
/// <p>The Amazon EFS file system ID to use.</p>
pub fn file_system_id(mut self, input: impl Into<std::string::String>) -> Self {
self.file_system_id = Some(input.into());
self
}
/// <p>The Amazon EFS file system ID to use.</p>
pub fn set_file_system_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.file_system_id = input;
self
}
/// <p>The directory within the Amazon EFS file system to mount as the root directory inside the host. If this parameter is
/// omitted, the root of the Amazon EFS volume is used instead. Specifying <code>/</code> has the same effect as omitting this
/// parameter. The maximum length is 4,096 characters.</p>
/// <important>
/// <p>If an EFS access point is specified in the <code>authorizationConfig</code>, the root directory parameter must
/// either be omitted or set to <code>/</code>, which enforces the path set on the Amazon EFS access point.</p>
/// </important>
pub fn root_directory(mut self, input: impl Into<std::string::String>) -> Self {
self.root_directory = Some(input.into());
self
}
/// <p>The directory within the Amazon EFS file system to mount as the root directory inside the host. If this parameter is
/// omitted, the root of the Amazon EFS volume is used instead. Specifying <code>/</code> has the same effect as omitting this
/// parameter. The maximum length is 4,096 characters.</p>
/// <important>
/// <p>If an EFS access point is specified in the <code>authorizationConfig</code>, the root directory parameter must
/// either be omitted or set to <code>/</code>, which enforces the path set on the Amazon EFS access point.</p>
/// </important>
pub fn set_root_directory(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.root_directory = input;
self
}
/// <p>Determines whether to enable encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server.
/// Transit encryption must be enabled if Amazon EFS IAM authorization is used. If this parameter is omitted, the default
/// value of <code>DISABLED</code> is used. For more information, see <a href="https://docs.aws.amazon.com/efs/latest/ug/encryption-in-transit.html">Encrypting data in transit</a> in the
/// <i>Amazon Elastic File System User Guide</i>.</p>
pub fn transit_encryption(mut self, input: crate::model::EfsTransitEncryption) -> Self {
self.transit_encryption = Some(input);
self
}
/// <p>Determines whether to enable encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server.
/// Transit encryption must be enabled if Amazon EFS IAM authorization is used. If this parameter is omitted, the default
/// value of <code>DISABLED</code> is used. For more information, see <a href="https://docs.aws.amazon.com/efs/latest/ug/encryption-in-transit.html">Encrypting data in transit</a> in the
/// <i>Amazon Elastic File System User Guide</i>.</p>
pub fn set_transit_encryption(
mut self,
input: std::option::Option<crate::model::EfsTransitEncryption>,
) -> Self {
self.transit_encryption = input;
self
}
/// <p>The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS server. If you don't specify a
/// transit encryption port, it uses the port selection strategy that the Amazon EFS mount helper uses. The value must be
/// between 0 and 65,535. For more information, see <a href="https://docs.aws.amazon.com/efs/latest/ug/efs-mount-helper.html">EFS Mount Helper</a> in the <i>Amazon Elastic File System User Guide</i>.</p>
pub fn transit_encryption_port(mut self, input: i32) -> Self {
self.transit_encryption_port = Some(input);
self
}
/// <p>The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS server. If you don't specify a
/// transit encryption port, it uses the port selection strategy that the Amazon EFS mount helper uses. The value must be
/// between 0 and 65,535. For more information, see <a href="https://docs.aws.amazon.com/efs/latest/ug/efs-mount-helper.html">EFS Mount Helper</a> in the <i>Amazon Elastic File System User Guide</i>.</p>
pub fn set_transit_encryption_port(mut self, input: std::option::Option<i32>) -> Self {
self.transit_encryption_port = input;
self
}
/// <p>The authorization configuration details for the Amazon EFS file system.</p>
pub fn authorization_config(mut self, input: crate::model::EfsAuthorizationConfig) -> Self {
self.authorization_config = Some(input);
self
}
/// <p>The authorization configuration details for the Amazon EFS file system.</p>
pub fn set_authorization_config(
mut self,
input: std::option::Option<crate::model::EfsAuthorizationConfig>,
) -> Self {
self.authorization_config = input;
self
}
/// Consumes the builder and constructs a [`EfsVolumeConfiguration`](crate::model::EfsVolumeConfiguration)
pub fn build(self) -> crate::model::EfsVolumeConfiguration {
crate::model::EfsVolumeConfiguration {
file_system_id: self.file_system_id,
root_directory: self.root_directory,
transit_encryption: self.transit_encryption,
transit_encryption_port: self.transit_encryption_port.unwrap_or_default(),
authorization_config: self.authorization_config,
}
}
}
}
impl EfsVolumeConfiguration {
/// Creates a new builder-style object to manufacture [`EfsVolumeConfiguration`](crate::model::EfsVolumeConfiguration)
pub fn builder() -> crate::model::efs_volume_configuration::Builder {
crate::model::efs_volume_configuration::Builder::default()
}
}
/// <p>The authorization configuration details for the Amazon EFS file system.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct EfsAuthorizationConfig {
/// <p>The Amazon EFS access point ID to use. If an access point is specified, the root directory value specified in the
/// <code>EFSVolumeConfiguration</code> must either be omitted or set to <code>/</code> which will enforce the path set
/// on the EFS access point. If an access point is used, transit encryption must be enabled in the
/// <code>EFSVolumeConfiguration</code>. For more information, see <a href="https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html">Working with Amazon EFS Access Points</a> in the
/// <i>Amazon Elastic File System User Guide</i>.</p>
pub access_point_id: std::option::Option<std::string::String>,
/// <p>Whether or not to use the Batch job IAM role defined in a job definition when mounting the Amazon EFS file system.
/// If enabled, transit encryption must be enabled in the <code>EFSVolumeConfiguration</code>. If this parameter is
/// omitted, the default value of <code>DISABLED</code> is used. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html#efs-volume-accesspoints">Using Amazon EFS Access Points</a> in the
/// <i>Batch User Guide</i>. EFS IAM authorization requires that <code>TransitEncryption</code> be
/// <code>ENABLED</code> and that a <code>JobRoleArn</code> is specified.</p>
pub iam: std::option::Option<crate::model::EfsAuthorizationConfigIam>,
}
impl EfsAuthorizationConfig {
/// <p>The Amazon EFS access point ID to use. If an access point is specified, the root directory value specified in the
/// <code>EFSVolumeConfiguration</code> must either be omitted or set to <code>/</code> which will enforce the path set
/// on the EFS access point. If an access point is used, transit encryption must be enabled in the
/// <code>EFSVolumeConfiguration</code>. For more information, see <a href="https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html">Working with Amazon EFS Access Points</a> in the
/// <i>Amazon Elastic File System User Guide</i>.</p>
pub fn access_point_id(&self) -> std::option::Option<&str> {
self.access_point_id.as_deref()
}
/// <p>Whether or not to use the Batch job IAM role defined in a job definition when mounting the Amazon EFS file system.
/// If enabled, transit encryption must be enabled in the <code>EFSVolumeConfiguration</code>. If this parameter is
/// omitted, the default value of <code>DISABLED</code> is used. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html#efs-volume-accesspoints">Using Amazon EFS Access Points</a> in the
/// <i>Batch User Guide</i>. EFS IAM authorization requires that <code>TransitEncryption</code> be
/// <code>ENABLED</code> and that a <code>JobRoleArn</code> is specified.</p>
pub fn iam(&self) -> std::option::Option<&crate::model::EfsAuthorizationConfigIam> {
self.iam.as_ref()
}
}
impl std::fmt::Debug for EfsAuthorizationConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("EfsAuthorizationConfig");
formatter.field("access_point_id", &self.access_point_id);
formatter.field("iam", &self.iam);
formatter.finish()
}
}
/// See [`EfsAuthorizationConfig`](crate::model::EfsAuthorizationConfig)
pub mod efs_authorization_config {
/// A builder for [`EfsAuthorizationConfig`](crate::model::EfsAuthorizationConfig)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) access_point_id: std::option::Option<std::string::String>,
pub(crate) iam: std::option::Option<crate::model::EfsAuthorizationConfigIam>,
}
impl Builder {
/// <p>The Amazon EFS access point ID to use. If an access point is specified, the root directory value specified in the
/// <code>EFSVolumeConfiguration</code> must either be omitted or set to <code>/</code> which will enforce the path set
/// on the EFS access point. If an access point is used, transit encryption must be enabled in the
/// <code>EFSVolumeConfiguration</code>. For more information, see <a href="https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html">Working with Amazon EFS Access Points</a> in the
/// <i>Amazon Elastic File System User Guide</i>.</p>
pub fn access_point_id(mut self, input: impl Into<std::string::String>) -> Self {
self.access_point_id = Some(input.into());
self
}
/// <p>The Amazon EFS access point ID to use. If an access point is specified, the root directory value specified in the
/// <code>EFSVolumeConfiguration</code> must either be omitted or set to <code>/</code> which will enforce the path set
/// on the EFS access point. If an access point is used, transit encryption must be enabled in the
/// <code>EFSVolumeConfiguration</code>. For more information, see <a href="https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html">Working with Amazon EFS Access Points</a> in the
/// <i>Amazon Elastic File System User Guide</i>.</p>
pub fn set_access_point_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.access_point_id = input;
self
}
/// <p>Whether or not to use the Batch job IAM role defined in a job definition when mounting the Amazon EFS file system.
/// If enabled, transit encryption must be enabled in the <code>EFSVolumeConfiguration</code>. If this parameter is
/// omitted, the default value of <code>DISABLED</code> is used. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html#efs-volume-accesspoints">Using Amazon EFS Access Points</a> in the
/// <i>Batch User Guide</i>. EFS IAM authorization requires that <code>TransitEncryption</code> be
/// <code>ENABLED</code> and that a <code>JobRoleArn</code> is specified.</p>
pub fn iam(mut self, input: crate::model::EfsAuthorizationConfigIam) -> Self {
self.iam = Some(input);
self
}
/// <p>Whether or not to use the Batch job IAM role defined in a job definition when mounting the Amazon EFS file system.
/// If enabled, transit encryption must be enabled in the <code>EFSVolumeConfiguration</code>. If this parameter is
/// omitted, the default value of <code>DISABLED</code> is used. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html#efs-volume-accesspoints">Using Amazon EFS Access Points</a> in the
/// <i>Batch User Guide</i>. EFS IAM authorization requires that <code>TransitEncryption</code> be
/// <code>ENABLED</code> and that a <code>JobRoleArn</code> is specified.</p>
pub fn set_iam(
mut self,
input: std::option::Option<crate::model::EfsAuthorizationConfigIam>,
) -> Self {
self.iam = input;
self
}
/// Consumes the builder and constructs a [`EfsAuthorizationConfig`](crate::model::EfsAuthorizationConfig)
pub fn build(self) -> crate::model::EfsAuthorizationConfig {
crate::model::EfsAuthorizationConfig {
access_point_id: self.access_point_id,
iam: self.iam,
}
}
}
}
impl EfsAuthorizationConfig {
/// Creates a new builder-style object to manufacture [`EfsAuthorizationConfig`](crate::model::EfsAuthorizationConfig)
pub fn builder() -> crate::model::efs_authorization_config::Builder {
crate::model::efs_authorization_config::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum EfsAuthorizationConfigIam {
#[allow(missing_docs)] // documentation missing in model
Disabled,
#[allow(missing_docs)] // documentation missing in model
Enabled,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for EfsAuthorizationConfigIam {
fn from(s: &str) -> Self {
match s {
"DISABLED" => EfsAuthorizationConfigIam::Disabled,
"ENABLED" => EfsAuthorizationConfigIam::Enabled,
other => EfsAuthorizationConfigIam::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for EfsAuthorizationConfigIam {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(EfsAuthorizationConfigIam::from(s))
}
}
impl EfsAuthorizationConfigIam {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
EfsAuthorizationConfigIam::Disabled => "DISABLED",
EfsAuthorizationConfigIam::Enabled => "ENABLED",
EfsAuthorizationConfigIam::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["DISABLED", "ENABLED"]
}
}
impl AsRef<str> for EfsAuthorizationConfigIam {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum EfsTransitEncryption {
#[allow(missing_docs)] // documentation missing in model
Disabled,
#[allow(missing_docs)] // documentation missing in model
Enabled,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for EfsTransitEncryption {
fn from(s: &str) -> Self {
match s {
"DISABLED" => EfsTransitEncryption::Disabled,
"ENABLED" => EfsTransitEncryption::Enabled,
other => EfsTransitEncryption::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for EfsTransitEncryption {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(EfsTransitEncryption::from(s))
}
}
impl EfsTransitEncryption {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
EfsTransitEncryption::Disabled => "DISABLED",
EfsTransitEncryption::Enabled => "ENABLED",
EfsTransitEncryption::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["DISABLED", "ENABLED"]
}
}
impl AsRef<str> for EfsTransitEncryption {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>Determine whether your data volume persists on the host container instance and where it is stored. If this
/// parameter is empty, then the Docker daemon assigns a host path for your data volume, but the data isn't guaranteed to
/// persist after the containers associated with it stop running.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Host {
/// <p>The path on the host container instance that's presented to the container. If this parameter is empty, then the
/// Docker daemon has assigned a host path for you. If this parameter contains a file location, then the data volume
/// persists at the specified location on the host container instance until you delete it manually. If the source path
/// location doesn't exist on the host container instance, the Docker daemon creates it. If the location does exist, the
/// contents of the source path folder are exported.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that run on Fargate resources and shouldn't be provided.</p>
/// </note>
pub source_path: std::option::Option<std::string::String>,
}
impl Host {
/// <p>The path on the host container instance that's presented to the container. If this parameter is empty, then the
/// Docker daemon has assigned a host path for you. If this parameter contains a file location, then the data volume
/// persists at the specified location on the host container instance until you delete it manually. If the source path
/// location doesn't exist on the host container instance, the Docker daemon creates it. If the location does exist, the
/// contents of the source path folder are exported.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that run on Fargate resources and shouldn't be provided.</p>
/// </note>
pub fn source_path(&self) -> std::option::Option<&str> {
self.source_path.as_deref()
}
}
impl std::fmt::Debug for Host {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Host");
formatter.field("source_path", &self.source_path);
formatter.finish()
}
}
/// See [`Host`](crate::model::Host)
pub mod host {
/// A builder for [`Host`](crate::model::Host)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) source_path: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The path on the host container instance that's presented to the container. If this parameter is empty, then the
/// Docker daemon has assigned a host path for you. If this parameter contains a file location, then the data volume
/// persists at the specified location on the host container instance until you delete it manually. If the source path
/// location doesn't exist on the host container instance, the Docker daemon creates it. If the location does exist, the
/// contents of the source path folder are exported.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that run on Fargate resources and shouldn't be provided.</p>
/// </note>
pub fn source_path(mut self, input: impl Into<std::string::String>) -> Self {
self.source_path = Some(input.into());
self
}
/// <p>The path on the host container instance that's presented to the container. If this parameter is empty, then the
/// Docker daemon has assigned a host path for you. If this parameter contains a file location, then the data volume
/// persists at the specified location on the host container instance until you delete it manually. If the source path
/// location doesn't exist on the host container instance, the Docker daemon creates it. If the location does exist, the
/// contents of the source path folder are exported.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that run on Fargate resources and shouldn't be provided.</p>
/// </note>
pub fn set_source_path(mut self, input: std::option::Option<std::string::String>) -> Self {
self.source_path = input;
self
}
/// Consumes the builder and constructs a [`Host`](crate::model::Host)
pub fn build(self) -> crate::model::Host {
crate::model::Host {
source_path: self.source_path,
}
}
}
}
impl Host {
/// Creates a new builder-style object to manufacture [`Host`](crate::model::Host)
pub fn builder() -> crate::model::host::Builder {
crate::model::host::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum JobDefinitionType {
#[allow(missing_docs)] // documentation missing in model
Container,
#[allow(missing_docs)] // documentation missing in model
Multinode,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for JobDefinitionType {
fn from(s: &str) -> Self {
match s {
"container" => JobDefinitionType::Container,
"multinode" => JobDefinitionType::Multinode,
other => JobDefinitionType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for JobDefinitionType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(JobDefinitionType::from(s))
}
}
impl JobDefinitionType {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
JobDefinitionType::Container => "container",
JobDefinitionType::Multinode => "multinode",
JobDefinitionType::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["container", "multinode"]
}
}
impl AsRef<str> for JobDefinitionType {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>An object containing the details of a scheduling policy returned in a <code>ListSchedulingPolicy</code>
/// action.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct SchedulingPolicyListingDetail {
/// <p>Amazon Resource Name (ARN) of the scheduling policy.</p>
pub arn: std::option::Option<std::string::String>,
}
impl SchedulingPolicyListingDetail {
/// <p>Amazon Resource Name (ARN) of the scheduling policy.</p>
pub fn arn(&self) -> std::option::Option<&str> {
self.arn.as_deref()
}
}
impl std::fmt::Debug for SchedulingPolicyListingDetail {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("SchedulingPolicyListingDetail");
formatter.field("arn", &self.arn);
formatter.finish()
}
}
/// See [`SchedulingPolicyListingDetail`](crate::model::SchedulingPolicyListingDetail)
pub mod scheduling_policy_listing_detail {
/// A builder for [`SchedulingPolicyListingDetail`](crate::model::SchedulingPolicyListingDetail)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) arn: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>Amazon Resource Name (ARN) of the scheduling policy.</p>
pub fn arn(mut self, input: impl Into<std::string::String>) -> Self {
self.arn = Some(input.into());
self
}
/// <p>Amazon Resource Name (ARN) of the scheduling policy.</p>
pub fn set_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.arn = input;
self
}
/// Consumes the builder and constructs a [`SchedulingPolicyListingDetail`](crate::model::SchedulingPolicyListingDetail)
pub fn build(self) -> crate::model::SchedulingPolicyListingDetail {
crate::model::SchedulingPolicyListingDetail { arn: self.arn }
}
}
}
impl SchedulingPolicyListingDetail {
/// Creates a new builder-style object to manufacture [`SchedulingPolicyListingDetail`](crate::model::SchedulingPolicyListingDetail)
pub fn builder() -> crate::model::scheduling_policy_listing_detail::Builder {
crate::model::scheduling_policy_listing_detail::Builder::default()
}
}
/// <p>An object representing summary details of a job.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct JobSummary {
/// <p>The Amazon Resource Name (ARN) of the job.</p>
pub job_arn: std::option::Option<std::string::String>,
/// <p>The ID of the job.</p>
pub job_id: std::option::Option<std::string::String>,
/// <p>The name of the job.</p>
pub job_name: std::option::Option<std::string::String>,
/// <p>The Unix timestamp for when the job was created. For non-array jobs and parent array jobs, this is when the job
/// entered the <code>SUBMITTED</code> state (at the time <a>SubmitJob</a> was called). For array child jobs,
/// this is when the child job was spawned by its parent and entered the <code>PENDING</code> state.</p>
pub created_at: i64,
/// <p>The current status for the job.</p>
pub status: std::option::Option<crate::model::JobStatus>,
/// <p>A short, human-readable string to provide additional details about the current status of the job.</p>
pub status_reason: std::option::Option<std::string::String>,
/// <p>The Unix timestamp for when the job was started (when the job transitioned from the <code>STARTING</code> state
/// to the <code>RUNNING</code> state).</p>
pub started_at: i64,
/// <p>The Unix timestamp for when the job was stopped (when the job transitioned from the <code>RUNNING</code> state
/// to a terminal state, such as <code>SUCCEEDED</code> or <code>FAILED</code>).</p>
pub stopped_at: i64,
/// <p>An object representing the details of the container that's associated with the job.</p>
pub container: std::option::Option<crate::model::ContainerSummary>,
/// <p>The array properties of the job, if it is an array job.</p>
pub array_properties: std::option::Option<crate::model::ArrayPropertiesSummary>,
/// <p>The node properties for a single node in a job summary list.</p>
/// <note>
/// <p>This isn't applicable to jobs that are running on Fargate resources.</p>
/// </note>
pub node_properties: std::option::Option<crate::model::NodePropertiesSummary>,
/// <p>The Amazon Resource Name (ARN) of the job definition.</p>
pub job_definition: std::option::Option<std::string::String>,
}
impl JobSummary {
/// <p>The Amazon Resource Name (ARN) of the job.</p>
pub fn job_arn(&self) -> std::option::Option<&str> {
self.job_arn.as_deref()
}
/// <p>The ID of the job.</p>
pub fn job_id(&self) -> std::option::Option<&str> {
self.job_id.as_deref()
}
/// <p>The name of the job.</p>
pub fn job_name(&self) -> std::option::Option<&str> {
self.job_name.as_deref()
}
/// <p>The Unix timestamp for when the job was created. For non-array jobs and parent array jobs, this is when the job
/// entered the <code>SUBMITTED</code> state (at the time <a>SubmitJob</a> was called). For array child jobs,
/// this is when the child job was spawned by its parent and entered the <code>PENDING</code> state.</p>
pub fn created_at(&self) -> i64 {
self.created_at
}
/// <p>The current status for the job.</p>
pub fn status(&self) -> std::option::Option<&crate::model::JobStatus> {
self.status.as_ref()
}
/// <p>A short, human-readable string to provide additional details about the current status of the job.</p>
pub fn status_reason(&self) -> std::option::Option<&str> {
self.status_reason.as_deref()
}
/// <p>The Unix timestamp for when the job was started (when the job transitioned from the <code>STARTING</code> state
/// to the <code>RUNNING</code> state).</p>
pub fn started_at(&self) -> i64 {
self.started_at
}
/// <p>The Unix timestamp for when the job was stopped (when the job transitioned from the <code>RUNNING</code> state
/// to a terminal state, such as <code>SUCCEEDED</code> or <code>FAILED</code>).</p>
pub fn stopped_at(&self) -> i64 {
self.stopped_at
}
/// <p>An object representing the details of the container that's associated with the job.</p>
pub fn container(&self) -> std::option::Option<&crate::model::ContainerSummary> {
self.container.as_ref()
}
/// <p>The array properties of the job, if it is an array job.</p>
pub fn array_properties(&self) -> std::option::Option<&crate::model::ArrayPropertiesSummary> {
self.array_properties.as_ref()
}
/// <p>The node properties for a single node in a job summary list.</p>
/// <note>
/// <p>This isn't applicable to jobs that are running on Fargate resources.</p>
/// </note>
pub fn node_properties(&self) -> std::option::Option<&crate::model::NodePropertiesSummary> {
self.node_properties.as_ref()
}
/// <p>The Amazon Resource Name (ARN) of the job definition.</p>
pub fn job_definition(&self) -> std::option::Option<&str> {
self.job_definition.as_deref()
}
}
impl std::fmt::Debug for JobSummary {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("JobSummary");
formatter.field("job_arn", &self.job_arn);
formatter.field("job_id", &self.job_id);
formatter.field("job_name", &self.job_name);
formatter.field("created_at", &self.created_at);
formatter.field("status", &self.status);
formatter.field("status_reason", &self.status_reason);
formatter.field("started_at", &self.started_at);
formatter.field("stopped_at", &self.stopped_at);
formatter.field("container", &self.container);
formatter.field("array_properties", &self.array_properties);
formatter.field("node_properties", &self.node_properties);
formatter.field("job_definition", &self.job_definition);
formatter.finish()
}
}
/// See [`JobSummary`](crate::model::JobSummary)
pub mod job_summary {
/// A builder for [`JobSummary`](crate::model::JobSummary)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) job_arn: std::option::Option<std::string::String>,
pub(crate) job_id: std::option::Option<std::string::String>,
pub(crate) job_name: std::option::Option<std::string::String>,
pub(crate) created_at: std::option::Option<i64>,
pub(crate) status: std::option::Option<crate::model::JobStatus>,
pub(crate) status_reason: std::option::Option<std::string::String>,
pub(crate) started_at: std::option::Option<i64>,
pub(crate) stopped_at: std::option::Option<i64>,
pub(crate) container: std::option::Option<crate::model::ContainerSummary>,
pub(crate) array_properties: std::option::Option<crate::model::ArrayPropertiesSummary>,
pub(crate) node_properties: std::option::Option<crate::model::NodePropertiesSummary>,
pub(crate) job_definition: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The Amazon Resource Name (ARN) of the job.</p>
pub fn job_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.job_arn = Some(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of the job.</p>
pub fn set_job_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.job_arn = input;
self
}
/// <p>The ID of the job.</p>
pub fn job_id(mut self, input: impl Into<std::string::String>) -> Self {
self.job_id = Some(input.into());
self
}
/// <p>The ID of the job.</p>
pub fn set_job_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.job_id = input;
self
}
/// <p>The name of the job.</p>
pub fn job_name(mut self, input: impl Into<std::string::String>) -> Self {
self.job_name = Some(input.into());
self
}
/// <p>The name of the job.</p>
pub fn set_job_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.job_name = input;
self
}
/// <p>The Unix timestamp for when the job was created. For non-array jobs and parent array jobs, this is when the job
/// entered the <code>SUBMITTED</code> state (at the time <a>SubmitJob</a> was called). For array child jobs,
/// this is when the child job was spawned by its parent and entered the <code>PENDING</code> state.</p>
pub fn created_at(mut self, input: i64) -> Self {
self.created_at = Some(input);
self
}
/// <p>The Unix timestamp for when the job was created. For non-array jobs and parent array jobs, this is when the job
/// entered the <code>SUBMITTED</code> state (at the time <a>SubmitJob</a> was called). For array child jobs,
/// this is when the child job was spawned by its parent and entered the <code>PENDING</code> state.</p>
pub fn set_created_at(mut self, input: std::option::Option<i64>) -> Self {
self.created_at = input;
self
}
/// <p>The current status for the job.</p>
pub fn status(mut self, input: crate::model::JobStatus) -> Self {
self.status = Some(input);
self
}
/// <p>The current status for the job.</p>
pub fn set_status(mut self, input: std::option::Option<crate::model::JobStatus>) -> Self {
self.status = input;
self
}
/// <p>A short, human-readable string to provide additional details about the current status of the job.</p>
pub fn status_reason(mut self, input: impl Into<std::string::String>) -> Self {
self.status_reason = Some(input.into());
self
}
/// <p>A short, human-readable string to provide additional details about the current status of the job.</p>
pub fn set_status_reason(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.status_reason = input;
self
}
/// <p>The Unix timestamp for when the job was started (when the job transitioned from the <code>STARTING</code> state
/// to the <code>RUNNING</code> state).</p>
pub fn started_at(mut self, input: i64) -> Self {
self.started_at = Some(input);
self
}
/// <p>The Unix timestamp for when the job was started (when the job transitioned from the <code>STARTING</code> state
/// to the <code>RUNNING</code> state).</p>
pub fn set_started_at(mut self, input: std::option::Option<i64>) -> Self {
self.started_at = input;
self
}
/// <p>The Unix timestamp for when the job was stopped (when the job transitioned from the <code>RUNNING</code> state
/// to a terminal state, such as <code>SUCCEEDED</code> or <code>FAILED</code>).</p>
pub fn stopped_at(mut self, input: i64) -> Self {
self.stopped_at = Some(input);
self
}
/// <p>The Unix timestamp for when the job was stopped (when the job transitioned from the <code>RUNNING</code> state
/// to a terminal state, such as <code>SUCCEEDED</code> or <code>FAILED</code>).</p>
pub fn set_stopped_at(mut self, input: std::option::Option<i64>) -> Self {
self.stopped_at = input;
self
}
/// <p>An object representing the details of the container that's associated with the job.</p>
pub fn container(mut self, input: crate::model::ContainerSummary) -> Self {
self.container = Some(input);
self
}
/// <p>An object representing the details of the container that's associated with the job.</p>
pub fn set_container(
mut self,
input: std::option::Option<crate::model::ContainerSummary>,
) -> Self {
self.container = input;
self
}
/// <p>The array properties of the job, if it is an array job.</p>
pub fn array_properties(mut self, input: crate::model::ArrayPropertiesSummary) -> Self {
self.array_properties = Some(input);
self
}
/// <p>The array properties of the job, if it is an array job.</p>
pub fn set_array_properties(
mut self,
input: std::option::Option<crate::model::ArrayPropertiesSummary>,
) -> Self {
self.array_properties = input;
self
}
/// <p>The node properties for a single node in a job summary list.</p>
/// <note>
/// <p>This isn't applicable to jobs that are running on Fargate resources.</p>
/// </note>
pub fn node_properties(mut self, input: crate::model::NodePropertiesSummary) -> Self {
self.node_properties = Some(input);
self
}
/// <p>The node properties for a single node in a job summary list.</p>
/// <note>
/// <p>This isn't applicable to jobs that are running on Fargate resources.</p>
/// </note>
pub fn set_node_properties(
mut self,
input: std::option::Option<crate::model::NodePropertiesSummary>,
) -> Self {
self.node_properties = input;
self
}
/// <p>The Amazon Resource Name (ARN) of the job definition.</p>
pub fn job_definition(mut self, input: impl Into<std::string::String>) -> Self {
self.job_definition = Some(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of the job definition.</p>
pub fn set_job_definition(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.job_definition = input;
self
}
/// Consumes the builder and constructs a [`JobSummary`](crate::model::JobSummary)
pub fn build(self) -> crate::model::JobSummary {
crate::model::JobSummary {
job_arn: self.job_arn,
job_id: self.job_id,
job_name: self.job_name,
created_at: self.created_at.unwrap_or_default(),
status: self.status,
status_reason: self.status_reason,
started_at: self.started_at.unwrap_or_default(),
stopped_at: self.stopped_at.unwrap_or_default(),
container: self.container,
array_properties: self.array_properties,
node_properties: self.node_properties,
job_definition: self.job_definition,
}
}
}
}
impl JobSummary {
/// Creates a new builder-style object to manufacture [`JobSummary`](crate::model::JobSummary)
pub fn builder() -> crate::model::job_summary::Builder {
crate::model::job_summary::Builder::default()
}
}
/// <p>An object representing the properties of a node that's associated with a multi-node parallel job.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct NodePropertiesSummary {
/// <p>Specifies whether the current node is the main node for a multi-node parallel job.</p>
pub is_main_node: bool,
/// <p>The number of nodes associated with a multi-node parallel job.</p>
pub num_nodes: i32,
/// <p>The node index for the node. Node index numbering begins at zero. This index is also available on the node with
/// the <code>AWS_BATCH_JOB_NODE_INDEX</code> environment variable.</p>
pub node_index: i32,
}
impl NodePropertiesSummary {
/// <p>Specifies whether the current node is the main node for a multi-node parallel job.</p>
pub fn is_main_node(&self) -> bool {
self.is_main_node
}
/// <p>The number of nodes associated with a multi-node parallel job.</p>
pub fn num_nodes(&self) -> i32 {
self.num_nodes
}
/// <p>The node index for the node. Node index numbering begins at zero. This index is also available on the node with
/// the <code>AWS_BATCH_JOB_NODE_INDEX</code> environment variable.</p>
pub fn node_index(&self) -> i32 {
self.node_index
}
}
impl std::fmt::Debug for NodePropertiesSummary {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("NodePropertiesSummary");
formatter.field("is_main_node", &self.is_main_node);
formatter.field("num_nodes", &self.num_nodes);
formatter.field("node_index", &self.node_index);
formatter.finish()
}
}
/// See [`NodePropertiesSummary`](crate::model::NodePropertiesSummary)
pub mod node_properties_summary {
/// A builder for [`NodePropertiesSummary`](crate::model::NodePropertiesSummary)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) is_main_node: std::option::Option<bool>,
pub(crate) num_nodes: std::option::Option<i32>,
pub(crate) node_index: std::option::Option<i32>,
}
impl Builder {
/// <p>Specifies whether the current node is the main node for a multi-node parallel job.</p>
pub fn is_main_node(mut self, input: bool) -> Self {
self.is_main_node = Some(input);
self
}
/// <p>Specifies whether the current node is the main node for a multi-node parallel job.</p>
pub fn set_is_main_node(mut self, input: std::option::Option<bool>) -> Self {
self.is_main_node = input;
self
}
/// <p>The number of nodes associated with a multi-node parallel job.</p>
pub fn num_nodes(mut self, input: i32) -> Self {
self.num_nodes = Some(input);
self
}
/// <p>The number of nodes associated with a multi-node parallel job.</p>
pub fn set_num_nodes(mut self, input: std::option::Option<i32>) -> Self {
self.num_nodes = input;
self
}
/// <p>The node index for the node. Node index numbering begins at zero. This index is also available on the node with
/// the <code>AWS_BATCH_JOB_NODE_INDEX</code> environment variable.</p>
pub fn node_index(mut self, input: i32) -> Self {
self.node_index = Some(input);
self
}
/// <p>The node index for the node. Node index numbering begins at zero. This index is also available on the node with
/// the <code>AWS_BATCH_JOB_NODE_INDEX</code> environment variable.</p>
pub fn set_node_index(mut self, input: std::option::Option<i32>) -> Self {
self.node_index = input;
self
}
/// Consumes the builder and constructs a [`NodePropertiesSummary`](crate::model::NodePropertiesSummary)
pub fn build(self) -> crate::model::NodePropertiesSummary {
crate::model::NodePropertiesSummary {
is_main_node: self.is_main_node.unwrap_or_default(),
num_nodes: self.num_nodes.unwrap_or_default(),
node_index: self.node_index.unwrap_or_default(),
}
}
}
}
impl NodePropertiesSummary {
/// Creates a new builder-style object to manufacture [`NodePropertiesSummary`](crate::model::NodePropertiesSummary)
pub fn builder() -> crate::model::node_properties_summary::Builder {
crate::model::node_properties_summary::Builder::default()
}
}
/// <p>An object representing the array properties of a job.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ArrayPropertiesSummary {
/// <p>The size of the array job. This parameter is returned for parent array jobs.</p>
pub size: i32,
/// <p>The job index within the array that's associated with this job. This parameter is returned for children of array
/// jobs.</p>
pub index: i32,
}
impl ArrayPropertiesSummary {
/// <p>The size of the array job. This parameter is returned for parent array jobs.</p>
pub fn size(&self) -> i32 {
self.size
}
/// <p>The job index within the array that's associated with this job. This parameter is returned for children of array
/// jobs.</p>
pub fn index(&self) -> i32 {
self.index
}
}
impl std::fmt::Debug for ArrayPropertiesSummary {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ArrayPropertiesSummary");
formatter.field("size", &self.size);
formatter.field("index", &self.index);
formatter.finish()
}
}
/// See [`ArrayPropertiesSummary`](crate::model::ArrayPropertiesSummary)
pub mod array_properties_summary {
/// A builder for [`ArrayPropertiesSummary`](crate::model::ArrayPropertiesSummary)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) size: std::option::Option<i32>,
pub(crate) index: std::option::Option<i32>,
}
impl Builder {
/// <p>The size of the array job. This parameter is returned for parent array jobs.</p>
pub fn size(mut self, input: i32) -> Self {
self.size = Some(input);
self
}
/// <p>The size of the array job. This parameter is returned for parent array jobs.</p>
pub fn set_size(mut self, input: std::option::Option<i32>) -> Self {
self.size = input;
self
}
/// <p>The job index within the array that's associated with this job. This parameter is returned for children of array
/// jobs.</p>
pub fn index(mut self, input: i32) -> Self {
self.index = Some(input);
self
}
/// <p>The job index within the array that's associated with this job. This parameter is returned for children of array
/// jobs.</p>
pub fn set_index(mut self, input: std::option::Option<i32>) -> Self {
self.index = input;
self
}
/// Consumes the builder and constructs a [`ArrayPropertiesSummary`](crate::model::ArrayPropertiesSummary)
pub fn build(self) -> crate::model::ArrayPropertiesSummary {
crate::model::ArrayPropertiesSummary {
size: self.size.unwrap_or_default(),
index: self.index.unwrap_or_default(),
}
}
}
}
impl ArrayPropertiesSummary {
/// Creates a new builder-style object to manufacture [`ArrayPropertiesSummary`](crate::model::ArrayPropertiesSummary)
pub fn builder() -> crate::model::array_properties_summary::Builder {
crate::model::array_properties_summary::Builder::default()
}
}
/// <p>An object representing summary details of a container within a job.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ContainerSummary {
/// <p>The exit code to return upon completion.</p>
pub exit_code: i32,
/// <p>A short (255 max characters) human-readable string to provide additional details about a running or stopped
/// container.</p>
pub reason: std::option::Option<std::string::String>,
}
impl ContainerSummary {
/// <p>The exit code to return upon completion.</p>
pub fn exit_code(&self) -> i32 {
self.exit_code
}
/// <p>A short (255 max characters) human-readable string to provide additional details about a running or stopped
/// container.</p>
pub fn reason(&self) -> std::option::Option<&str> {
self.reason.as_deref()
}
}
impl std::fmt::Debug for ContainerSummary {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ContainerSummary");
formatter.field("exit_code", &self.exit_code);
formatter.field("reason", &self.reason);
formatter.finish()
}
}
/// See [`ContainerSummary`](crate::model::ContainerSummary)
pub mod container_summary {
/// A builder for [`ContainerSummary`](crate::model::ContainerSummary)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) exit_code: std::option::Option<i32>,
pub(crate) reason: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The exit code to return upon completion.</p>
pub fn exit_code(mut self, input: i32) -> Self {
self.exit_code = Some(input);
self
}
/// <p>The exit code to return upon completion.</p>
pub fn set_exit_code(mut self, input: std::option::Option<i32>) -> Self {
self.exit_code = input;
self
}
/// <p>A short (255 max characters) human-readable string to provide additional details about a running or stopped
/// container.</p>
pub fn reason(mut self, input: impl Into<std::string::String>) -> Self {
self.reason = Some(input.into());
self
}
/// <p>A short (255 max characters) human-readable string to provide additional details about a running or stopped
/// container.</p>
pub fn set_reason(mut self, input: std::option::Option<std::string::String>) -> Self {
self.reason = input;
self
}
/// Consumes the builder and constructs a [`ContainerSummary`](crate::model::ContainerSummary)
pub fn build(self) -> crate::model::ContainerSummary {
crate::model::ContainerSummary {
exit_code: self.exit_code.unwrap_or_default(),
reason: self.reason,
}
}
}
}
impl ContainerSummary {
/// Creates a new builder-style object to manufacture [`ContainerSummary`](crate::model::ContainerSummary)
pub fn builder() -> crate::model::container_summary::Builder {
crate::model::container_summary::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum JobStatus {
#[allow(missing_docs)] // documentation missing in model
Failed,
#[allow(missing_docs)] // documentation missing in model
Pending,
#[allow(missing_docs)] // documentation missing in model
Runnable,
#[allow(missing_docs)] // documentation missing in model
Running,
#[allow(missing_docs)] // documentation missing in model
Starting,
#[allow(missing_docs)] // documentation missing in model
Submitted,
#[allow(missing_docs)] // documentation missing in model
Succeeded,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for JobStatus {
fn from(s: &str) -> Self {
match s {
"FAILED" => JobStatus::Failed,
"PENDING" => JobStatus::Pending,
"RUNNABLE" => JobStatus::Runnable,
"RUNNING" => JobStatus::Running,
"STARTING" => JobStatus::Starting,
"SUBMITTED" => JobStatus::Submitted,
"SUCCEEDED" => JobStatus::Succeeded,
other => JobStatus::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for JobStatus {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(JobStatus::from(s))
}
}
impl JobStatus {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
JobStatus::Failed => "FAILED",
JobStatus::Pending => "PENDING",
JobStatus::Runnable => "RUNNABLE",
JobStatus::Running => "RUNNING",
JobStatus::Starting => "STARTING",
JobStatus::Submitted => "SUBMITTED",
JobStatus::Succeeded => "SUCCEEDED",
JobStatus::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&[
"FAILED",
"PENDING",
"RUNNABLE",
"RUNNING",
"STARTING",
"SUBMITTED",
"SUCCEEDED",
]
}
}
impl AsRef<str> for JobStatus {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>A filter name and value pair that's used to return a more specific list of results from a <code>ListJobs</code>
/// API operation.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct KeyValuesPair {
/// <p>The name of the filter. Filter names are case sensitive.</p>
pub name: std::option::Option<std::string::String>,
/// <p>The filter values.</p>
pub values: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl KeyValuesPair {
/// <p>The name of the filter. Filter names are case sensitive.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>The filter values.</p>
pub fn values(&self) -> std::option::Option<&[std::string::String]> {
self.values.as_deref()
}
}
impl std::fmt::Debug for KeyValuesPair {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("KeyValuesPair");
formatter.field("name", &self.name);
formatter.field("values", &self.values);
formatter.finish()
}
}
/// See [`KeyValuesPair`](crate::model::KeyValuesPair)
pub mod key_values_pair {
/// A builder for [`KeyValuesPair`](crate::model::KeyValuesPair)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) values: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
/// <p>The name of the filter. Filter names are case sensitive.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>The name of the filter. Filter names are case sensitive.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// Appends an item to `values`.
///
/// To override the contents of this collection use [`set_values`](Self::set_values).
///
/// <p>The filter values.</p>
pub fn values(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.values.unwrap_or_default();
v.push(input.into());
self.values = Some(v);
self
}
/// <p>The filter values.</p>
pub fn set_values(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.values = input;
self
}
/// Consumes the builder and constructs a [`KeyValuesPair`](crate::model::KeyValuesPair)
pub fn build(self) -> crate::model::KeyValuesPair {
crate::model::KeyValuesPair {
name: self.name,
values: self.values,
}
}
}
}
impl KeyValuesPair {
/// Creates a new builder-style object to manufacture [`KeyValuesPair`](crate::model::KeyValuesPair)
pub fn builder() -> crate::model::key_values_pair::Builder {
crate::model::key_values_pair::Builder::default()
}
}
/// <p>An object representing a scheduling
/// policy.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct SchedulingPolicyDetail {
/// <p>The name of the scheduling
/// policy.</p>
pub name: std::option::Option<std::string::String>,
/// <p>Amazon Resource Name (ARN) of the scheduling policy. An example would be
/// <code>arn:<i>aws</i>:batch:<i>us-east-1</i>:<i>123456789012</i>:scheduling-policy/<i>HighPriority</i>
/// </code>
/// </p>
pub arn: std::option::Option<std::string::String>,
/// <p>The fair share policy for the scheduling
/// policy.</p>
pub fairshare_policy: std::option::Option<crate::model::FairsharePolicy>,
/// <p>The tags that you apply to the scheduling policy to help you categorize and organize your resources. Each tag
/// consists of a key and an optional value. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html">Tagging Amazon Web Services Resources</a> in <i>Amazon Web Services General
/// Reference</i>.</p>
pub tags:
std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>,
}
impl SchedulingPolicyDetail {
/// <p>The name of the scheduling
/// policy.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>Amazon Resource Name (ARN) of the scheduling policy. An example would be
/// <code>arn:<i>aws</i>:batch:<i>us-east-1</i>:<i>123456789012</i>:scheduling-policy/<i>HighPriority</i>
/// </code>
/// </p>
pub fn arn(&self) -> std::option::Option<&str> {
self.arn.as_deref()
}
/// <p>The fair share policy for the scheduling
/// policy.</p>
pub fn fairshare_policy(&self) -> std::option::Option<&crate::model::FairsharePolicy> {
self.fairshare_policy.as_ref()
}
/// <p>The tags that you apply to the scheduling policy to help you categorize and organize your resources. Each tag
/// consists of a key and an optional value. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html">Tagging Amazon Web Services Resources</a> in <i>Amazon Web Services General
/// Reference</i>.</p>
pub fn tags(
&self,
) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>>
{
self.tags.as_ref()
}
}
impl std::fmt::Debug for SchedulingPolicyDetail {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("SchedulingPolicyDetail");
formatter.field("name", &self.name);
formatter.field("arn", &self.arn);
formatter.field("fairshare_policy", &self.fairshare_policy);
formatter.field("tags", &self.tags);
formatter.finish()
}
}
/// See [`SchedulingPolicyDetail`](crate::model::SchedulingPolicyDetail)
pub mod scheduling_policy_detail {
/// A builder for [`SchedulingPolicyDetail`](crate::model::SchedulingPolicyDetail)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) arn: std::option::Option<std::string::String>,
pub(crate) fairshare_policy: std::option::Option<crate::model::FairsharePolicy>,
pub(crate) tags: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
}
impl Builder {
/// <p>The name of the scheduling
/// policy.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>The name of the scheduling
/// policy.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>Amazon Resource Name (ARN) of the scheduling policy. An example would be
/// <code>arn:<i>aws</i>:batch:<i>us-east-1</i>:<i>123456789012</i>:scheduling-policy/<i>HighPriority</i>
/// </code>
/// </p>
pub fn arn(mut self, input: impl Into<std::string::String>) -> Self {
self.arn = Some(input.into());
self
}
/// <p>Amazon Resource Name (ARN) of the scheduling policy. An example would be
/// <code>arn:<i>aws</i>:batch:<i>us-east-1</i>:<i>123456789012</i>:scheduling-policy/<i>HighPriority</i>
/// </code>
/// </p>
pub fn set_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.arn = input;
self
}
/// <p>The fair share policy for the scheduling
/// policy.</p>
pub fn fairshare_policy(mut self, input: crate::model::FairsharePolicy) -> Self {
self.fairshare_policy = Some(input);
self
}
/// <p>The fair share policy for the scheduling
/// policy.</p>
pub fn set_fairshare_policy(
mut self,
input: std::option::Option<crate::model::FairsharePolicy>,
) -> Self {
self.fairshare_policy = input;
self
}
/// Adds a key-value pair to `tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>The tags that you apply to the scheduling policy to help you categorize and organize your resources. Each tag
/// consists of a key and an optional value. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html">Tagging Amazon Web Services Resources</a> in <i>Amazon Web Services General
/// Reference</i>.</p>
pub fn tags(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
let mut hash_map = self.tags.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.tags = Some(hash_map);
self
}
/// <p>The tags that you apply to the scheduling policy to help you categorize and organize your resources. Each tag
/// consists of a key and an optional value. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html">Tagging Amazon Web Services Resources</a> in <i>Amazon Web Services General
/// Reference</i>.</p>
pub fn set_tags(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.tags = input;
self
}
/// Consumes the builder and constructs a [`SchedulingPolicyDetail`](crate::model::SchedulingPolicyDetail)
pub fn build(self) -> crate::model::SchedulingPolicyDetail {
crate::model::SchedulingPolicyDetail {
name: self.name,
arn: self.arn,
fairshare_policy: self.fairshare_policy,
tags: self.tags,
}
}
}
}
impl SchedulingPolicyDetail {
/// Creates a new builder-style object to manufacture [`SchedulingPolicyDetail`](crate::model::SchedulingPolicyDetail)
pub fn builder() -> crate::model::scheduling_policy_detail::Builder {
crate::model::scheduling_policy_detail::Builder::default()
}
}
/// <p>An object representing an Batch job.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct JobDetail {
/// <p>The Amazon Resource Name (ARN) of the job.</p>
pub job_arn: std::option::Option<std::string::String>,
/// <p>The name of the job.</p>
pub job_name: std::option::Option<std::string::String>,
/// <p>The ID for the job.</p>
pub job_id: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (ARN) of the job queue that the job is associated with.</p>
pub job_queue: std::option::Option<std::string::String>,
/// <p>The current status for the job.</p>
/// <note>
/// <p>If your jobs don't progress to <code>STARTING</code>, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#job_stuck_in_runnable">Jobs Stuck in RUNNABLE Status</a> in the
/// troubleshooting section of the <i>Batch User Guide</i>.</p>
/// </note>
pub status: std::option::Option<crate::model::JobStatus>,
/// <p>The share identifier for the job.</p>
pub share_identifier: std::option::Option<std::string::String>,
/// <p>The scheduling policy of the job definition. This will only affect jobs in job queues with a fair share policy.
/// Jobs with a higher scheduling priority will be scheduled before jobs with a lower scheduling priority.</p>
pub scheduling_priority: i32,
/// <p>A list of job attempts associated with this job.</p>
pub attempts: std::option::Option<std::vec::Vec<crate::model::AttemptDetail>>,
/// <p>A short, human-readable string to provide additional details about the current status of the job.</p>
pub status_reason: std::option::Option<std::string::String>,
/// <p>The Unix timestamp (in milliseconds) for when the job was created. For non-array jobs and parent array jobs,
/// this is when the job entered the <code>SUBMITTED</code> state (at the time <a>SubmitJob</a> was called).
/// For array child jobs, this is when the child job was spawned by its parent and entered the <code>PENDING</code>
/// state.</p>
pub created_at: i64,
/// <p>The retry strategy to use for this job if an attempt fails.</p>
pub retry_strategy: std::option::Option<crate::model::RetryStrategy>,
/// <p>The Unix timestamp (in milliseconds) for when the job was started (when the job transitioned from the
/// <code>STARTING</code> state to the <code>RUNNING</code> state). This parameter isn't provided for child jobs of
/// array jobs or multi-node parallel jobs.</p>
pub started_at: i64,
/// <p>The Unix timestamp (in milliseconds) for when the job was stopped (when the job transitioned from the
/// <code>RUNNING</code> state to a terminal state, such as <code>SUCCEEDED</code> or <code>FAILED</code>).</p>
pub stopped_at: i64,
/// <p>A list of job IDs that this job depends on.</p>
pub depends_on: std::option::Option<std::vec::Vec<crate::model::JobDependency>>,
/// <p>The job definition that's used by this job.</p>
pub job_definition: std::option::Option<std::string::String>,
/// <p>Additional parameters passed to the job that replace parameter substitution placeholders or override any
/// corresponding parameter defaults from the job definition.</p>
pub parameters:
std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>,
/// <p>An object representing the details of the container that's associated with the job.</p>
pub container: std::option::Option<crate::model::ContainerDetail>,
/// <p>An object representing the details of a node that's associated with a multi-node parallel job.</p>
pub node_details: std::option::Option<crate::model::NodeDetails>,
/// <p>An object representing the node properties of a multi-node parallel job.</p>
/// <note>
/// <p>This isn't applicable to jobs that are running on Fargate resources.</p>
/// </note>
pub node_properties: std::option::Option<crate::model::NodeProperties>,
/// <p>The array properties of the job, if it is an array job.</p>
pub array_properties: std::option::Option<crate::model::ArrayPropertiesDetail>,
/// <p>The timeout configuration for the job.</p>
pub timeout: std::option::Option<crate::model::JobTimeout>,
/// <p>The tags applied to the job.</p>
pub tags:
std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>,
/// <p>Specifies whether to propagate the tags from the job or job definition to the corresponding Amazon ECS task. If no
/// value is specified, the tags aren't propagated. Tags can only be propagated to the tasks during task creation. For
/// tags with the same name, job tags are given priority over job definitions tags. If the total number of combined tags
/// from the job and job definition is over 50, the job is moved to the <code>FAILED</code> state.</p>
pub propagate_tags: bool,
/// <p>The platform capabilities required by the job definition. If no value is specified, it defaults to
/// <code>EC2</code>. Jobs run on Fargate resources specify <code>FARGATE</code>.</p>
pub platform_capabilities: std::option::Option<std::vec::Vec<crate::model::PlatformCapability>>,
}
impl JobDetail {
/// <p>The Amazon Resource Name (ARN) of the job.</p>
pub fn job_arn(&self) -> std::option::Option<&str> {
self.job_arn.as_deref()
}
/// <p>The name of the job.</p>
pub fn job_name(&self) -> std::option::Option<&str> {
self.job_name.as_deref()
}
/// <p>The ID for the job.</p>
pub fn job_id(&self) -> std::option::Option<&str> {
self.job_id.as_deref()
}
/// <p>The Amazon Resource Name (ARN) of the job queue that the job is associated with.</p>
pub fn job_queue(&self) -> std::option::Option<&str> {
self.job_queue.as_deref()
}
/// <p>The current status for the job.</p>
/// <note>
/// <p>If your jobs don't progress to <code>STARTING</code>, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#job_stuck_in_runnable">Jobs Stuck in RUNNABLE Status</a> in the
/// troubleshooting section of the <i>Batch User Guide</i>.</p>
/// </note>
pub fn status(&self) -> std::option::Option<&crate::model::JobStatus> {
self.status.as_ref()
}
/// <p>The share identifier for the job.</p>
pub fn share_identifier(&self) -> std::option::Option<&str> {
self.share_identifier.as_deref()
}
/// <p>The scheduling policy of the job definition. This will only affect jobs in job queues with a fair share policy.
/// Jobs with a higher scheduling priority will be scheduled before jobs with a lower scheduling priority.</p>
pub fn scheduling_priority(&self) -> i32 {
self.scheduling_priority
}
/// <p>A list of job attempts associated with this job.</p>
pub fn attempts(&self) -> std::option::Option<&[crate::model::AttemptDetail]> {
self.attempts.as_deref()
}
/// <p>A short, human-readable string to provide additional details about the current status of the job.</p>
pub fn status_reason(&self) -> std::option::Option<&str> {
self.status_reason.as_deref()
}
/// <p>The Unix timestamp (in milliseconds) for when the job was created. For non-array jobs and parent array jobs,
/// this is when the job entered the <code>SUBMITTED</code> state (at the time <a>SubmitJob</a> was called).
/// For array child jobs, this is when the child job was spawned by its parent and entered the <code>PENDING</code>
/// state.</p>
pub fn created_at(&self) -> i64 {
self.created_at
}
/// <p>The retry strategy to use for this job if an attempt fails.</p>
pub fn retry_strategy(&self) -> std::option::Option<&crate::model::RetryStrategy> {
self.retry_strategy.as_ref()
}
/// <p>The Unix timestamp (in milliseconds) for when the job was started (when the job transitioned from the
/// <code>STARTING</code> state to the <code>RUNNING</code> state). This parameter isn't provided for child jobs of
/// array jobs or multi-node parallel jobs.</p>
pub fn started_at(&self) -> i64 {
self.started_at
}
/// <p>The Unix timestamp (in milliseconds) for when the job was stopped (when the job transitioned from the
/// <code>RUNNING</code> state to a terminal state, such as <code>SUCCEEDED</code> or <code>FAILED</code>).</p>
pub fn stopped_at(&self) -> i64 {
self.stopped_at
}
/// <p>A list of job IDs that this job depends on.</p>
pub fn depends_on(&self) -> std::option::Option<&[crate::model::JobDependency]> {
self.depends_on.as_deref()
}
/// <p>The job definition that's used by this job.</p>
pub fn job_definition(&self) -> std::option::Option<&str> {
self.job_definition.as_deref()
}
/// <p>Additional parameters passed to the job that replace parameter substitution placeholders or override any
/// corresponding parameter defaults from the job definition.</p>
pub fn parameters(
&self,
) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>>
{
self.parameters.as_ref()
}
/// <p>An object representing the details of the container that's associated with the job.</p>
pub fn container(&self) -> std::option::Option<&crate::model::ContainerDetail> {
self.container.as_ref()
}
/// <p>An object representing the details of a node that's associated with a multi-node parallel job.</p>
pub fn node_details(&self) -> std::option::Option<&crate::model::NodeDetails> {
self.node_details.as_ref()
}
/// <p>An object representing the node properties of a multi-node parallel job.</p>
/// <note>
/// <p>This isn't applicable to jobs that are running on Fargate resources.</p>
/// </note>
pub fn node_properties(&self) -> std::option::Option<&crate::model::NodeProperties> {
self.node_properties.as_ref()
}
/// <p>The array properties of the job, if it is an array job.</p>
pub fn array_properties(&self) -> std::option::Option<&crate::model::ArrayPropertiesDetail> {
self.array_properties.as_ref()
}
/// <p>The timeout configuration for the job.</p>
pub fn timeout(&self) -> std::option::Option<&crate::model::JobTimeout> {
self.timeout.as_ref()
}
/// <p>The tags applied to the job.</p>
pub fn tags(
&self,
) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>>
{
self.tags.as_ref()
}
/// <p>Specifies whether to propagate the tags from the job or job definition to the corresponding Amazon ECS task. If no
/// value is specified, the tags aren't propagated. Tags can only be propagated to the tasks during task creation. For
/// tags with the same name, job tags are given priority over job definitions tags. If the total number of combined tags
/// from the job and job definition is over 50, the job is moved to the <code>FAILED</code> state.</p>
pub fn propagate_tags(&self) -> bool {
self.propagate_tags
}
/// <p>The platform capabilities required by the job definition. If no value is specified, it defaults to
/// <code>EC2</code>. Jobs run on Fargate resources specify <code>FARGATE</code>.</p>
pub fn platform_capabilities(
&self,
) -> std::option::Option<&[crate::model::PlatformCapability]> {
self.platform_capabilities.as_deref()
}
}
impl std::fmt::Debug for JobDetail {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("JobDetail");
formatter.field("job_arn", &self.job_arn);
formatter.field("job_name", &self.job_name);
formatter.field("job_id", &self.job_id);
formatter.field("job_queue", &self.job_queue);
formatter.field("status", &self.status);
formatter.field("share_identifier", &self.share_identifier);
formatter.field("scheduling_priority", &self.scheduling_priority);
formatter.field("attempts", &self.attempts);
formatter.field("status_reason", &self.status_reason);
formatter.field("created_at", &self.created_at);
formatter.field("retry_strategy", &self.retry_strategy);
formatter.field("started_at", &self.started_at);
formatter.field("stopped_at", &self.stopped_at);
formatter.field("depends_on", &self.depends_on);
formatter.field("job_definition", &self.job_definition);
formatter.field("parameters", &self.parameters);
formatter.field("container", &self.container);
formatter.field("node_details", &self.node_details);
formatter.field("node_properties", &self.node_properties);
formatter.field("array_properties", &self.array_properties);
formatter.field("timeout", &self.timeout);
formatter.field("tags", &self.tags);
formatter.field("propagate_tags", &self.propagate_tags);
formatter.field("platform_capabilities", &self.platform_capabilities);
formatter.finish()
}
}
/// See [`JobDetail`](crate::model::JobDetail)
pub mod job_detail {
/// A builder for [`JobDetail`](crate::model::JobDetail)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) job_arn: std::option::Option<std::string::String>,
pub(crate) job_name: std::option::Option<std::string::String>,
pub(crate) job_id: std::option::Option<std::string::String>,
pub(crate) job_queue: std::option::Option<std::string::String>,
pub(crate) status: std::option::Option<crate::model::JobStatus>,
pub(crate) share_identifier: std::option::Option<std::string::String>,
pub(crate) scheduling_priority: std::option::Option<i32>,
pub(crate) attempts: std::option::Option<std::vec::Vec<crate::model::AttemptDetail>>,
pub(crate) status_reason: std::option::Option<std::string::String>,
pub(crate) created_at: std::option::Option<i64>,
pub(crate) retry_strategy: std::option::Option<crate::model::RetryStrategy>,
pub(crate) started_at: std::option::Option<i64>,
pub(crate) stopped_at: std::option::Option<i64>,
pub(crate) depends_on: std::option::Option<std::vec::Vec<crate::model::JobDependency>>,
pub(crate) job_definition: std::option::Option<std::string::String>,
pub(crate) parameters: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
pub(crate) container: std::option::Option<crate::model::ContainerDetail>,
pub(crate) node_details: std::option::Option<crate::model::NodeDetails>,
pub(crate) node_properties: std::option::Option<crate::model::NodeProperties>,
pub(crate) array_properties: std::option::Option<crate::model::ArrayPropertiesDetail>,
pub(crate) timeout: std::option::Option<crate::model::JobTimeout>,
pub(crate) tags: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
pub(crate) propagate_tags: std::option::Option<bool>,
pub(crate) platform_capabilities:
std::option::Option<std::vec::Vec<crate::model::PlatformCapability>>,
}
impl Builder {
/// <p>The Amazon Resource Name (ARN) of the job.</p>
pub fn job_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.job_arn = Some(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of the job.</p>
pub fn set_job_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.job_arn = input;
self
}
/// <p>The name of the job.</p>
pub fn job_name(mut self, input: impl Into<std::string::String>) -> Self {
self.job_name = Some(input.into());
self
}
/// <p>The name of the job.</p>
pub fn set_job_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.job_name = input;
self
}
/// <p>The ID for the job.</p>
pub fn job_id(mut self, input: impl Into<std::string::String>) -> Self {
self.job_id = Some(input.into());
self
}
/// <p>The ID for the job.</p>
pub fn set_job_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.job_id = input;
self
}
/// <p>The Amazon Resource Name (ARN) of the job queue that the job is associated with.</p>
pub fn job_queue(mut self, input: impl Into<std::string::String>) -> Self {
self.job_queue = Some(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of the job queue that the job is associated with.</p>
pub fn set_job_queue(mut self, input: std::option::Option<std::string::String>) -> Self {
self.job_queue = input;
self
}
/// <p>The current status for the job.</p>
/// <note>
/// <p>If your jobs don't progress to <code>STARTING</code>, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#job_stuck_in_runnable">Jobs Stuck in RUNNABLE Status</a> in the
/// troubleshooting section of the <i>Batch User Guide</i>.</p>
/// </note>
pub fn status(mut self, input: crate::model::JobStatus) -> Self {
self.status = Some(input);
self
}
/// <p>The current status for the job.</p>
/// <note>
/// <p>If your jobs don't progress to <code>STARTING</code>, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#job_stuck_in_runnable">Jobs Stuck in RUNNABLE Status</a> in the
/// troubleshooting section of the <i>Batch User Guide</i>.</p>
/// </note>
pub fn set_status(mut self, input: std::option::Option<crate::model::JobStatus>) -> Self {
self.status = input;
self
}
/// <p>The share identifier for the job.</p>
pub fn share_identifier(mut self, input: impl Into<std::string::String>) -> Self {
self.share_identifier = Some(input.into());
self
}
/// <p>The share identifier for the job.</p>
pub fn set_share_identifier(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.share_identifier = input;
self
}
/// <p>The scheduling policy of the job definition. This will only affect jobs in job queues with a fair share policy.
/// Jobs with a higher scheduling priority will be scheduled before jobs with a lower scheduling priority.</p>
pub fn scheduling_priority(mut self, input: i32) -> Self {
self.scheduling_priority = Some(input);
self
}
/// <p>The scheduling policy of the job definition. This will only affect jobs in job queues with a fair share policy.
/// Jobs with a higher scheduling priority will be scheduled before jobs with a lower scheduling priority.</p>
pub fn set_scheduling_priority(mut self, input: std::option::Option<i32>) -> Self {
self.scheduling_priority = input;
self
}
/// Appends an item to `attempts`.
///
/// To override the contents of this collection use [`set_attempts`](Self::set_attempts).
///
/// <p>A list of job attempts associated with this job.</p>
pub fn attempts(mut self, input: impl Into<crate::model::AttemptDetail>) -> Self {
let mut v = self.attempts.unwrap_or_default();
v.push(input.into());
self.attempts = Some(v);
self
}
/// <p>A list of job attempts associated with this job.</p>
pub fn set_attempts(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::AttemptDetail>>,
) -> Self {
self.attempts = input;
self
}
/// <p>A short, human-readable string to provide additional details about the current status of the job.</p>
pub fn status_reason(mut self, input: impl Into<std::string::String>) -> Self {
self.status_reason = Some(input.into());
self
}
/// <p>A short, human-readable string to provide additional details about the current status of the job.</p>
pub fn set_status_reason(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.status_reason = input;
self
}
/// <p>The Unix timestamp (in milliseconds) for when the job was created. For non-array jobs and parent array jobs,
/// this is when the job entered the <code>SUBMITTED</code> state (at the time <a>SubmitJob</a> was called).
/// For array child jobs, this is when the child job was spawned by its parent and entered the <code>PENDING</code>
/// state.</p>
pub fn created_at(mut self, input: i64) -> Self {
self.created_at = Some(input);
self
}
/// <p>The Unix timestamp (in milliseconds) for when the job was created. For non-array jobs and parent array jobs,
/// this is when the job entered the <code>SUBMITTED</code> state (at the time <a>SubmitJob</a> was called).
/// For array child jobs, this is when the child job was spawned by its parent and entered the <code>PENDING</code>
/// state.</p>
pub fn set_created_at(mut self, input: std::option::Option<i64>) -> Self {
self.created_at = input;
self
}
/// <p>The retry strategy to use for this job if an attempt fails.</p>
pub fn retry_strategy(mut self, input: crate::model::RetryStrategy) -> Self {
self.retry_strategy = Some(input);
self
}
/// <p>The retry strategy to use for this job if an attempt fails.</p>
pub fn set_retry_strategy(
mut self,
input: std::option::Option<crate::model::RetryStrategy>,
) -> Self {
self.retry_strategy = input;
self
}
/// <p>The Unix timestamp (in milliseconds) for when the job was started (when the job transitioned from the
/// <code>STARTING</code> state to the <code>RUNNING</code> state). This parameter isn't provided for child jobs of
/// array jobs or multi-node parallel jobs.</p>
pub fn started_at(mut self, input: i64) -> Self {
self.started_at = Some(input);
self
}
/// <p>The Unix timestamp (in milliseconds) for when the job was started (when the job transitioned from the
/// <code>STARTING</code> state to the <code>RUNNING</code> state). This parameter isn't provided for child jobs of
/// array jobs or multi-node parallel jobs.</p>
pub fn set_started_at(mut self, input: std::option::Option<i64>) -> Self {
self.started_at = input;
self
}
/// <p>The Unix timestamp (in milliseconds) for when the job was stopped (when the job transitioned from the
/// <code>RUNNING</code> state to a terminal state, such as <code>SUCCEEDED</code> or <code>FAILED</code>).</p>
pub fn stopped_at(mut self, input: i64) -> Self {
self.stopped_at = Some(input);
self
}
/// <p>The Unix timestamp (in milliseconds) for when the job was stopped (when the job transitioned from the
/// <code>RUNNING</code> state to a terminal state, such as <code>SUCCEEDED</code> or <code>FAILED</code>).</p>
pub fn set_stopped_at(mut self, input: std::option::Option<i64>) -> Self {
self.stopped_at = input;
self
}
/// Appends an item to `depends_on`.
///
/// To override the contents of this collection use [`set_depends_on`](Self::set_depends_on).
///
/// <p>A list of job IDs that this job depends on.</p>
pub fn depends_on(mut self, input: impl Into<crate::model::JobDependency>) -> Self {
let mut v = self.depends_on.unwrap_or_default();
v.push(input.into());
self.depends_on = Some(v);
self
}
/// <p>A list of job IDs that this job depends on.</p>
pub fn set_depends_on(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::JobDependency>>,
) -> Self {
self.depends_on = input;
self
}
/// <p>The job definition that's used by this job.</p>
pub fn job_definition(mut self, input: impl Into<std::string::String>) -> Self {
self.job_definition = Some(input.into());
self
}
/// <p>The job definition that's used by this job.</p>
pub fn set_job_definition(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.job_definition = input;
self
}
/// Adds a key-value pair to `parameters`.
///
/// To override the contents of this collection use [`set_parameters`](Self::set_parameters).
///
/// <p>Additional parameters passed to the job that replace parameter substitution placeholders or override any
/// corresponding parameter defaults from the job definition.</p>
pub fn parameters(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
let mut hash_map = self.parameters.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.parameters = Some(hash_map);
self
}
/// <p>Additional parameters passed to the job that replace parameter substitution placeholders or override any
/// corresponding parameter defaults from the job definition.</p>
pub fn set_parameters(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.parameters = input;
self
}
/// <p>An object representing the details of the container that's associated with the job.</p>
pub fn container(mut self, input: crate::model::ContainerDetail) -> Self {
self.container = Some(input);
self
}
/// <p>An object representing the details of the container that's associated with the job.</p>
pub fn set_container(
mut self,
input: std::option::Option<crate::model::ContainerDetail>,
) -> Self {
self.container = input;
self
}
/// <p>An object representing the details of a node that's associated with a multi-node parallel job.</p>
pub fn node_details(mut self, input: crate::model::NodeDetails) -> Self {
self.node_details = Some(input);
self
}
/// <p>An object representing the details of a node that's associated with a multi-node parallel job.</p>
pub fn set_node_details(
mut self,
input: std::option::Option<crate::model::NodeDetails>,
) -> Self {
self.node_details = input;
self
}
/// <p>An object representing the node properties of a multi-node parallel job.</p>
/// <note>
/// <p>This isn't applicable to jobs that are running on Fargate resources.</p>
/// </note>
pub fn node_properties(mut self, input: crate::model::NodeProperties) -> Self {
self.node_properties = Some(input);
self
}
/// <p>An object representing the node properties of a multi-node parallel job.</p>
/// <note>
/// <p>This isn't applicable to jobs that are running on Fargate resources.</p>
/// </note>
pub fn set_node_properties(
mut self,
input: std::option::Option<crate::model::NodeProperties>,
) -> Self {
self.node_properties = input;
self
}
/// <p>The array properties of the job, if it is an array job.</p>
pub fn array_properties(mut self, input: crate::model::ArrayPropertiesDetail) -> Self {
self.array_properties = Some(input);
self
}
/// <p>The array properties of the job, if it is an array job.</p>
pub fn set_array_properties(
mut self,
input: std::option::Option<crate::model::ArrayPropertiesDetail>,
) -> Self {
self.array_properties = input;
self
}
/// <p>The timeout configuration for the job.</p>
pub fn timeout(mut self, input: crate::model::JobTimeout) -> Self {
self.timeout = Some(input);
self
}
/// <p>The timeout configuration for the job.</p>
pub fn set_timeout(mut self, input: std::option::Option<crate::model::JobTimeout>) -> Self {
self.timeout = input;
self
}
/// Adds a key-value pair to `tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>The tags applied to the job.</p>
pub fn tags(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
let mut hash_map = self.tags.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.tags = Some(hash_map);
self
}
/// <p>The tags applied to the job.</p>
pub fn set_tags(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.tags = input;
self
}
/// <p>Specifies whether to propagate the tags from the job or job definition to the corresponding Amazon ECS task. If no
/// value is specified, the tags aren't propagated. Tags can only be propagated to the tasks during task creation. For
/// tags with the same name, job tags are given priority over job definitions tags. If the total number of combined tags
/// from the job and job definition is over 50, the job is moved to the <code>FAILED</code> state.</p>
pub fn propagate_tags(mut self, input: bool) -> Self {
self.propagate_tags = Some(input);
self
}
/// <p>Specifies whether to propagate the tags from the job or job definition to the corresponding Amazon ECS task. If no
/// value is specified, the tags aren't propagated. Tags can only be propagated to the tasks during task creation. For
/// tags with the same name, job tags are given priority over job definitions tags. If the total number of combined tags
/// from the job and job definition is over 50, the job is moved to the <code>FAILED</code> state.</p>
pub fn set_propagate_tags(mut self, input: std::option::Option<bool>) -> Self {
self.propagate_tags = input;
self
}
/// Appends an item to `platform_capabilities`.
///
/// To override the contents of this collection use [`set_platform_capabilities`](Self::set_platform_capabilities).
///
/// <p>The platform capabilities required by the job definition. If no value is specified, it defaults to
/// <code>EC2</code>. Jobs run on Fargate resources specify <code>FARGATE</code>.</p>
pub fn platform_capabilities(
mut self,
input: impl Into<crate::model::PlatformCapability>,
) -> Self {
let mut v = self.platform_capabilities.unwrap_or_default();
v.push(input.into());
self.platform_capabilities = Some(v);
self
}
/// <p>The platform capabilities required by the job definition. If no value is specified, it defaults to
/// <code>EC2</code>. Jobs run on Fargate resources specify <code>FARGATE</code>.</p>
pub fn set_platform_capabilities(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::PlatformCapability>>,
) -> Self {
self.platform_capabilities = input;
self
}
/// Consumes the builder and constructs a [`JobDetail`](crate::model::JobDetail)
pub fn build(self) -> crate::model::JobDetail {
crate::model::JobDetail {
job_arn: self.job_arn,
job_name: self.job_name,
job_id: self.job_id,
job_queue: self.job_queue,
status: self.status,
share_identifier: self.share_identifier,
scheduling_priority: self.scheduling_priority.unwrap_or_default(),
attempts: self.attempts,
status_reason: self.status_reason,
created_at: self.created_at.unwrap_or_default(),
retry_strategy: self.retry_strategy,
started_at: self.started_at.unwrap_or_default(),
stopped_at: self.stopped_at.unwrap_or_default(),
depends_on: self.depends_on,
job_definition: self.job_definition,
parameters: self.parameters,
container: self.container,
node_details: self.node_details,
node_properties: self.node_properties,
array_properties: self.array_properties,
timeout: self.timeout,
tags: self.tags,
propagate_tags: self.propagate_tags.unwrap_or_default(),
platform_capabilities: self.platform_capabilities,
}
}
}
}
impl JobDetail {
/// Creates a new builder-style object to manufacture [`JobDetail`](crate::model::JobDetail)
pub fn builder() -> crate::model::job_detail::Builder {
crate::model::job_detail::Builder::default()
}
}
/// <p>An object representing the array properties of a job.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ArrayPropertiesDetail {
/// <p>A summary of the number of array job children in each available job status. This parameter is returned for
/// parent array jobs.</p>
pub status_summary: std::option::Option<std::collections::HashMap<std::string::String, i32>>,
/// <p>The size of the array job. This parameter is returned for parent array jobs.</p>
pub size: i32,
/// <p>The job index within the array that's associated with this job. This parameter is returned for array job
/// children.</p>
pub index: i32,
}
impl ArrayPropertiesDetail {
/// <p>A summary of the number of array job children in each available job status. This parameter is returned for
/// parent array jobs.</p>
pub fn status_summary(
&self,
) -> std::option::Option<&std::collections::HashMap<std::string::String, i32>> {
self.status_summary.as_ref()
}
/// <p>The size of the array job. This parameter is returned for parent array jobs.</p>
pub fn size(&self) -> i32 {
self.size
}
/// <p>The job index within the array that's associated with this job. This parameter is returned for array job
/// children.</p>
pub fn index(&self) -> i32 {
self.index
}
}
impl std::fmt::Debug for ArrayPropertiesDetail {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ArrayPropertiesDetail");
formatter.field("status_summary", &self.status_summary);
formatter.field("size", &self.size);
formatter.field("index", &self.index);
formatter.finish()
}
}
/// See [`ArrayPropertiesDetail`](crate::model::ArrayPropertiesDetail)
pub mod array_properties_detail {
/// A builder for [`ArrayPropertiesDetail`](crate::model::ArrayPropertiesDetail)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) status_summary:
std::option::Option<std::collections::HashMap<std::string::String, i32>>,
pub(crate) size: std::option::Option<i32>,
pub(crate) index: std::option::Option<i32>,
}
impl Builder {
/// Adds a key-value pair to `status_summary`.
///
/// To override the contents of this collection use [`set_status_summary`](Self::set_status_summary).
///
/// <p>A summary of the number of array job children in each available job status. This parameter is returned for
/// parent array jobs.</p>
pub fn status_summary(
mut self,
k: impl Into<std::string::String>,
v: impl Into<i32>,
) -> Self {
let mut hash_map = self.status_summary.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.status_summary = Some(hash_map);
self
}
/// <p>A summary of the number of array job children in each available job status. This parameter is returned for
/// parent array jobs.</p>
pub fn set_status_summary(
mut self,
input: std::option::Option<std::collections::HashMap<std::string::String, i32>>,
) -> Self {
self.status_summary = input;
self
}
/// <p>The size of the array job. This parameter is returned for parent array jobs.</p>
pub fn size(mut self, input: i32) -> Self {
self.size = Some(input);
self
}
/// <p>The size of the array job. This parameter is returned for parent array jobs.</p>
pub fn set_size(mut self, input: std::option::Option<i32>) -> Self {
self.size = input;
self
}
/// <p>The job index within the array that's associated with this job. This parameter is returned for array job
/// children.</p>
pub fn index(mut self, input: i32) -> Self {
self.index = Some(input);
self
}
/// <p>The job index within the array that's associated with this job. This parameter is returned for array job
/// children.</p>
pub fn set_index(mut self, input: std::option::Option<i32>) -> Self {
self.index = input;
self
}
/// Consumes the builder and constructs a [`ArrayPropertiesDetail`](crate::model::ArrayPropertiesDetail)
pub fn build(self) -> crate::model::ArrayPropertiesDetail {
crate::model::ArrayPropertiesDetail {
status_summary: self.status_summary,
size: self.size.unwrap_or_default(),
index: self.index.unwrap_or_default(),
}
}
}
}
impl ArrayPropertiesDetail {
/// Creates a new builder-style object to manufacture [`ArrayPropertiesDetail`](crate::model::ArrayPropertiesDetail)
pub fn builder() -> crate::model::array_properties_detail::Builder {
crate::model::array_properties_detail::Builder::default()
}
}
/// <p>An object representing the details of a multi-node parallel job node.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct NodeDetails {
/// <p>The node index for the node. Node index numbering begins at zero. This index is also available on the node with
/// the <code>AWS_BATCH_JOB_NODE_INDEX</code> environment variable.</p>
pub node_index: i32,
/// <p>Specifies whether the current node is the main node for a multi-node parallel job.</p>
pub is_main_node: bool,
}
impl NodeDetails {
/// <p>The node index for the node. Node index numbering begins at zero. This index is also available on the node with
/// the <code>AWS_BATCH_JOB_NODE_INDEX</code> environment variable.</p>
pub fn node_index(&self) -> i32 {
self.node_index
}
/// <p>Specifies whether the current node is the main node for a multi-node parallel job.</p>
pub fn is_main_node(&self) -> bool {
self.is_main_node
}
}
impl std::fmt::Debug for NodeDetails {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("NodeDetails");
formatter.field("node_index", &self.node_index);
formatter.field("is_main_node", &self.is_main_node);
formatter.finish()
}
}
/// See [`NodeDetails`](crate::model::NodeDetails)
pub mod node_details {
/// A builder for [`NodeDetails`](crate::model::NodeDetails)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) node_index: std::option::Option<i32>,
pub(crate) is_main_node: std::option::Option<bool>,
}
impl Builder {
/// <p>The node index for the node. Node index numbering begins at zero. This index is also available on the node with
/// the <code>AWS_BATCH_JOB_NODE_INDEX</code> environment variable.</p>
pub fn node_index(mut self, input: i32) -> Self {
self.node_index = Some(input);
self
}
/// <p>The node index for the node. Node index numbering begins at zero. This index is also available on the node with
/// the <code>AWS_BATCH_JOB_NODE_INDEX</code> environment variable.</p>
pub fn set_node_index(mut self, input: std::option::Option<i32>) -> Self {
self.node_index = input;
self
}
/// <p>Specifies whether the current node is the main node for a multi-node parallel job.</p>
pub fn is_main_node(mut self, input: bool) -> Self {
self.is_main_node = Some(input);
self
}
/// <p>Specifies whether the current node is the main node for a multi-node parallel job.</p>
pub fn set_is_main_node(mut self, input: std::option::Option<bool>) -> Self {
self.is_main_node = input;
self
}
/// Consumes the builder and constructs a [`NodeDetails`](crate::model::NodeDetails)
pub fn build(self) -> crate::model::NodeDetails {
crate::model::NodeDetails {
node_index: self.node_index.unwrap_or_default(),
is_main_node: self.is_main_node.unwrap_or_default(),
}
}
}
}
impl NodeDetails {
/// Creates a new builder-style object to manufacture [`NodeDetails`](crate::model::NodeDetails)
pub fn builder() -> crate::model::node_details::Builder {
crate::model::node_details::Builder::default()
}
}
/// <p>An object representing the details of a container that's part of a job.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ContainerDetail {
/// <p>The image used to start the container.</p>
pub image: std::option::Option<std::string::String>,
/// <p>The number of vCPUs reserved for the container. For jobs that run on EC2 resources, you can specify the vCPU
/// requirement for the job using <code>resourceRequirements</code>, but you can't specify the vCPU requirements in both
/// the <code>vcpus</code> and <code>resourceRequirements</code> object. This parameter maps to <code>CpuShares</code> in
/// the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--cpu-shares</code> option to
/// <a href="https://docs.docker.com/engine/reference/run/">docker run</a>. Each vCPU is equivalent to 1,024 CPU shares. You must
/// specify at least one vCPU. This is required but can be specified in several places. It must be specified for each
/// node at least once.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that run on Fargate resources. For jobs that run on Fargate
/// resources, you must specify the vCPU requirement for the job using <code>resourceRequirements</code>.</p>
/// </note>
pub vcpus: i32,
/// <p>For jobs run on EC2 resources that didn't specify memory requirements using <code>resourceRequirements</code>,
/// the number of MiB of memory reserved for the job. For other jobs, including all run on Fargate resources, see
/// <code>resourceRequirements</code>.</p>
pub memory: i32,
/// <p>The command that's passed to the container.</p>
pub command: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The Amazon Resource Name (ARN) associated with the job upon execution.</p>
pub job_role_arn: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (ARN) of the
/// execution
/// role that Batch can assume. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html">Batch execution IAM role</a> in the
/// <i>Batch User Guide</i>.</p>
pub execution_role_arn: std::option::Option<std::string::String>,
/// <p>A list of volumes associated with the job.</p>
pub volumes: std::option::Option<std::vec::Vec<crate::model::Volume>>,
/// <p>The environment variables to pass to a container.</p>
/// <note>
/// <p>Environment variables must not start with <code>AWS_BATCH</code>; this naming
/// convention is reserved for variables that are set by the Batch service.</p>
/// </note>
pub environment: std::option::Option<std::vec::Vec<crate::model::KeyValuePair>>,
/// <p>The mount points for data volumes in your container.</p>
pub mount_points: std::option::Option<std::vec::Vec<crate::model::MountPoint>>,
/// <p>When this parameter is true, the container is given read-only access to its root file system. This parameter
/// maps to <code>ReadonlyRootfs</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and
/// the <code>--read-only</code> option to <a href="https://docs.docker.com/engine/reference/commandline/run/">
/// <code>docker run</code>
/// </a>.</p>
pub readonly_root_filesystem: bool,
/// <p>A list of <code>ulimit</code> values to set in the container. This parameter maps to <code>Ulimits</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--ulimit</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources.</p>
/// </note>
pub ulimits: std::option::Option<std::vec::Vec<crate::model::Ulimit>>,
/// <p>When this parameter is true, the container is given elevated permissions on the host container instance (similar
/// to the <code>root</code> user). The default value is false.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided, or
/// specified as false.</p>
/// </note>
pub privileged: bool,
/// <p>The user name to use inside the container. This parameter maps to <code>User</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--user</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
pub user: std::option::Option<std::string::String>,
/// <p>The exit code to return upon completion.</p>
pub exit_code: i32,
/// <p>A short (255 max characters) human-readable string to provide additional details about a running or stopped
/// container.</p>
pub reason: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (ARN) of the container instance that the container is running on.</p>
pub container_instance_arn: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (ARN) of the Amazon ECS task that's associated with the container job. Each container attempt receives a task
/// ARN when they reach the <code>STARTING</code> status.</p>
pub task_arn: std::option::Option<std::string::String>,
/// <p>The name of the CloudWatch Logs log stream associated with the container. The log group for Batch jobs is
/// <code>/aws/batch/job</code>. Each container attempt receives a log stream name when they reach the
/// <code>RUNNING</code> status.</p>
pub log_stream_name: std::option::Option<std::string::String>,
/// <p>The instance type of the underlying host infrastructure of a multi-node parallel job.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources.</p>
/// </note>
pub instance_type: std::option::Option<std::string::String>,
/// <p>The network interfaces associated with the job.</p>
pub network_interfaces: std::option::Option<std::vec::Vec<crate::model::NetworkInterface>>,
/// <p>The type and amount of resources to assign to a container. The supported resources include <code>GPU</code>,
/// <code>MEMORY</code>, and <code>VCPU</code>.</p>
pub resource_requirements:
std::option::Option<std::vec::Vec<crate::model::ResourceRequirement>>,
/// <p>Linux-specific modifications that are applied to the container, such as details for device mappings.</p>
pub linux_parameters: std::option::Option<crate::model::LinuxParameters>,
/// <p>The log configuration specification for the container.</p>
/// <p>This parameter maps to <code>LogConfig</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the
/// <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--log-driver</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.
/// By default, containers use the same logging driver that the Docker daemon uses. However, the container might use a
/// different logging driver than the Docker daemon by specifying a log driver with this parameter in the container
/// definition. To use a different logging driver for a container, the log system must be configured properly on the
/// container instance. Or, alternatively, it must be configured on a different log server for remote logging options.
/// For more information on the options for different supported log drivers, see <a href="https://docs.docker.com/engine/admin/logging/overview/">Configure logging drivers</a> in the Docker
/// documentation.</p>
/// <note>
/// <p>Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the <a>LogConfiguration</a> data type). Additional log drivers might be available in future releases of the Amazon ECS
/// container agent.</p>
/// </note>
/// <p>This parameter requires version 1.18 of the Docker Remote API or greater on your
/// container instance. To check the Docker Remote API version on your container instance, log into your
/// container instance and run the following command: <code>sudo docker version | grep "Server API version"</code>
/// </p>
/// <note>
/// <p>The Amazon ECS container agent running on a container instance must register the logging drivers available on that
/// instance with the <code>ECS_AVAILABLE_LOGGING_DRIVERS</code> environment variable before containers placed on that
/// instance can use these log configuration options. For more information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html">Amazon ECS Container Agent Configuration</a> in the
/// <i>Amazon Elastic Container Service Developer Guide</i>.</p>
/// </note>
pub log_configuration: std::option::Option<crate::model::LogConfiguration>,
/// <p>The secrets to pass to the container. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html">Specifying sensitive data</a> in the
/// <i>Batch User Guide</i>.</p>
pub secrets: std::option::Option<std::vec::Vec<crate::model::Secret>>,
/// <p>The network configuration for jobs that are running on Fargate resources. Jobs that are running on EC2
/// resources must not specify this parameter.</p>
pub network_configuration: std::option::Option<crate::model::NetworkConfiguration>,
/// <p>The platform configuration for jobs that are running on Fargate resources. Jobs that are running on EC2
/// resources must not specify this parameter.</p>
pub fargate_platform_configuration:
std::option::Option<crate::model::FargatePlatformConfiguration>,
}
impl ContainerDetail {
/// <p>The image used to start the container.</p>
pub fn image(&self) -> std::option::Option<&str> {
self.image.as_deref()
}
/// <p>The number of vCPUs reserved for the container. For jobs that run on EC2 resources, you can specify the vCPU
/// requirement for the job using <code>resourceRequirements</code>, but you can't specify the vCPU requirements in both
/// the <code>vcpus</code> and <code>resourceRequirements</code> object. This parameter maps to <code>CpuShares</code> in
/// the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--cpu-shares</code> option to
/// <a href="https://docs.docker.com/engine/reference/run/">docker run</a>. Each vCPU is equivalent to 1,024 CPU shares. You must
/// specify at least one vCPU. This is required but can be specified in several places. It must be specified for each
/// node at least once.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that run on Fargate resources. For jobs that run on Fargate
/// resources, you must specify the vCPU requirement for the job using <code>resourceRequirements</code>.</p>
/// </note>
pub fn vcpus(&self) -> i32 {
self.vcpus
}
/// <p>For jobs run on EC2 resources that didn't specify memory requirements using <code>resourceRequirements</code>,
/// the number of MiB of memory reserved for the job. For other jobs, including all run on Fargate resources, see
/// <code>resourceRequirements</code>.</p>
pub fn memory(&self) -> i32 {
self.memory
}
/// <p>The command that's passed to the container.</p>
pub fn command(&self) -> std::option::Option<&[std::string::String]> {
self.command.as_deref()
}
/// <p>The Amazon Resource Name (ARN) associated with the job upon execution.</p>
pub fn job_role_arn(&self) -> std::option::Option<&str> {
self.job_role_arn.as_deref()
}
/// <p>The Amazon Resource Name (ARN) of the
/// execution
/// role that Batch can assume. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html">Batch execution IAM role</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn execution_role_arn(&self) -> std::option::Option<&str> {
self.execution_role_arn.as_deref()
}
/// <p>A list of volumes associated with the job.</p>
pub fn volumes(&self) -> std::option::Option<&[crate::model::Volume]> {
self.volumes.as_deref()
}
/// <p>The environment variables to pass to a container.</p>
/// <note>
/// <p>Environment variables must not start with <code>AWS_BATCH</code>; this naming
/// convention is reserved for variables that are set by the Batch service.</p>
/// </note>
pub fn environment(&self) -> std::option::Option<&[crate::model::KeyValuePair]> {
self.environment.as_deref()
}
/// <p>The mount points for data volumes in your container.</p>
pub fn mount_points(&self) -> std::option::Option<&[crate::model::MountPoint]> {
self.mount_points.as_deref()
}
/// <p>When this parameter is true, the container is given read-only access to its root file system. This parameter
/// maps to <code>ReadonlyRootfs</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and
/// the <code>--read-only</code> option to <a href="https://docs.docker.com/engine/reference/commandline/run/">
/// <code>docker run</code>
/// </a>.</p>
pub fn readonly_root_filesystem(&self) -> bool {
self.readonly_root_filesystem
}
/// <p>A list of <code>ulimit</code> values to set in the container. This parameter maps to <code>Ulimits</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--ulimit</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources.</p>
/// </note>
pub fn ulimits(&self) -> std::option::Option<&[crate::model::Ulimit]> {
self.ulimits.as_deref()
}
/// <p>When this parameter is true, the container is given elevated permissions on the host container instance (similar
/// to the <code>root</code> user). The default value is false.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided, or
/// specified as false.</p>
/// </note>
pub fn privileged(&self) -> bool {
self.privileged
}
/// <p>The user name to use inside the container. This parameter maps to <code>User</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--user</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
pub fn user(&self) -> std::option::Option<&str> {
self.user.as_deref()
}
/// <p>The exit code to return upon completion.</p>
pub fn exit_code(&self) -> i32 {
self.exit_code
}
/// <p>A short (255 max characters) human-readable string to provide additional details about a running or stopped
/// container.</p>
pub fn reason(&self) -> std::option::Option<&str> {
self.reason.as_deref()
}
/// <p>The Amazon Resource Name (ARN) of the container instance that the container is running on.</p>
pub fn container_instance_arn(&self) -> std::option::Option<&str> {
self.container_instance_arn.as_deref()
}
/// <p>The Amazon Resource Name (ARN) of the Amazon ECS task that's associated with the container job. Each container attempt receives a task
/// ARN when they reach the <code>STARTING</code> status.</p>
pub fn task_arn(&self) -> std::option::Option<&str> {
self.task_arn.as_deref()
}
/// <p>The name of the CloudWatch Logs log stream associated with the container. The log group for Batch jobs is
/// <code>/aws/batch/job</code>. Each container attempt receives a log stream name when they reach the
/// <code>RUNNING</code> status.</p>
pub fn log_stream_name(&self) -> std::option::Option<&str> {
self.log_stream_name.as_deref()
}
/// <p>The instance type of the underlying host infrastructure of a multi-node parallel job.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources.</p>
/// </note>
pub fn instance_type(&self) -> std::option::Option<&str> {
self.instance_type.as_deref()
}
/// <p>The network interfaces associated with the job.</p>
pub fn network_interfaces(&self) -> std::option::Option<&[crate::model::NetworkInterface]> {
self.network_interfaces.as_deref()
}
/// <p>The type and amount of resources to assign to a container. The supported resources include <code>GPU</code>,
/// <code>MEMORY</code>, and <code>VCPU</code>.</p>
pub fn resource_requirements(
&self,
) -> std::option::Option<&[crate::model::ResourceRequirement]> {
self.resource_requirements.as_deref()
}
/// <p>Linux-specific modifications that are applied to the container, such as details for device mappings.</p>
pub fn linux_parameters(&self) -> std::option::Option<&crate::model::LinuxParameters> {
self.linux_parameters.as_ref()
}
/// <p>The log configuration specification for the container.</p>
/// <p>This parameter maps to <code>LogConfig</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the
/// <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--log-driver</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.
/// By default, containers use the same logging driver that the Docker daemon uses. However, the container might use a
/// different logging driver than the Docker daemon by specifying a log driver with this parameter in the container
/// definition. To use a different logging driver for a container, the log system must be configured properly on the
/// container instance. Or, alternatively, it must be configured on a different log server for remote logging options.
/// For more information on the options for different supported log drivers, see <a href="https://docs.docker.com/engine/admin/logging/overview/">Configure logging drivers</a> in the Docker
/// documentation.</p>
/// <note>
/// <p>Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the <a>LogConfiguration</a> data type). Additional log drivers might be available in future releases of the Amazon ECS
/// container agent.</p>
/// </note>
/// <p>This parameter requires version 1.18 of the Docker Remote API or greater on your
/// container instance. To check the Docker Remote API version on your container instance, log into your
/// container instance and run the following command: <code>sudo docker version | grep "Server API version"</code>
/// </p>
/// <note>
/// <p>The Amazon ECS container agent running on a container instance must register the logging drivers available on that
/// instance with the <code>ECS_AVAILABLE_LOGGING_DRIVERS</code> environment variable before containers placed on that
/// instance can use these log configuration options. For more information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html">Amazon ECS Container Agent Configuration</a> in the
/// <i>Amazon Elastic Container Service Developer Guide</i>.</p>
/// </note>
pub fn log_configuration(&self) -> std::option::Option<&crate::model::LogConfiguration> {
self.log_configuration.as_ref()
}
/// <p>The secrets to pass to the container. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html">Specifying sensitive data</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn secrets(&self) -> std::option::Option<&[crate::model::Secret]> {
self.secrets.as_deref()
}
/// <p>The network configuration for jobs that are running on Fargate resources. Jobs that are running on EC2
/// resources must not specify this parameter.</p>
pub fn network_configuration(
&self,
) -> std::option::Option<&crate::model::NetworkConfiguration> {
self.network_configuration.as_ref()
}
/// <p>The platform configuration for jobs that are running on Fargate resources. Jobs that are running on EC2
/// resources must not specify this parameter.</p>
pub fn fargate_platform_configuration(
&self,
) -> std::option::Option<&crate::model::FargatePlatformConfiguration> {
self.fargate_platform_configuration.as_ref()
}
}
impl std::fmt::Debug for ContainerDetail {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ContainerDetail");
formatter.field("image", &self.image);
formatter.field("vcpus", &self.vcpus);
formatter.field("memory", &self.memory);
formatter.field("command", &self.command);
formatter.field("job_role_arn", &self.job_role_arn);
formatter.field("execution_role_arn", &self.execution_role_arn);
formatter.field("volumes", &self.volumes);
formatter.field("environment", &self.environment);
formatter.field("mount_points", &self.mount_points);
formatter.field("readonly_root_filesystem", &self.readonly_root_filesystem);
formatter.field("ulimits", &self.ulimits);
formatter.field("privileged", &self.privileged);
formatter.field("user", &self.user);
formatter.field("exit_code", &self.exit_code);
formatter.field("reason", &self.reason);
formatter.field("container_instance_arn", &self.container_instance_arn);
formatter.field("task_arn", &self.task_arn);
formatter.field("log_stream_name", &self.log_stream_name);
formatter.field("instance_type", &self.instance_type);
formatter.field("network_interfaces", &self.network_interfaces);
formatter.field("resource_requirements", &self.resource_requirements);
formatter.field("linux_parameters", &self.linux_parameters);
formatter.field("log_configuration", &self.log_configuration);
formatter.field("secrets", &self.secrets);
formatter.field("network_configuration", &self.network_configuration);
formatter.field(
"fargate_platform_configuration",
&self.fargate_platform_configuration,
);
formatter.finish()
}
}
/// See [`ContainerDetail`](crate::model::ContainerDetail)
pub mod container_detail {
/// A builder for [`ContainerDetail`](crate::model::ContainerDetail)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) image: std::option::Option<std::string::String>,
pub(crate) vcpus: std::option::Option<i32>,
pub(crate) memory: std::option::Option<i32>,
pub(crate) command: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) job_role_arn: std::option::Option<std::string::String>,
pub(crate) execution_role_arn: std::option::Option<std::string::String>,
pub(crate) volumes: std::option::Option<std::vec::Vec<crate::model::Volume>>,
pub(crate) environment: std::option::Option<std::vec::Vec<crate::model::KeyValuePair>>,
pub(crate) mount_points: std::option::Option<std::vec::Vec<crate::model::MountPoint>>,
pub(crate) readonly_root_filesystem: std::option::Option<bool>,
pub(crate) ulimits: std::option::Option<std::vec::Vec<crate::model::Ulimit>>,
pub(crate) privileged: std::option::Option<bool>,
pub(crate) user: std::option::Option<std::string::String>,
pub(crate) exit_code: std::option::Option<i32>,
pub(crate) reason: std::option::Option<std::string::String>,
pub(crate) container_instance_arn: std::option::Option<std::string::String>,
pub(crate) task_arn: std::option::Option<std::string::String>,
pub(crate) log_stream_name: std::option::Option<std::string::String>,
pub(crate) instance_type: std::option::Option<std::string::String>,
pub(crate) network_interfaces:
std::option::Option<std::vec::Vec<crate::model::NetworkInterface>>,
pub(crate) resource_requirements:
std::option::Option<std::vec::Vec<crate::model::ResourceRequirement>>,
pub(crate) linux_parameters: std::option::Option<crate::model::LinuxParameters>,
pub(crate) log_configuration: std::option::Option<crate::model::LogConfiguration>,
pub(crate) secrets: std::option::Option<std::vec::Vec<crate::model::Secret>>,
pub(crate) network_configuration: std::option::Option<crate::model::NetworkConfiguration>,
pub(crate) fargate_platform_configuration:
std::option::Option<crate::model::FargatePlatformConfiguration>,
}
impl Builder {
/// <p>The image used to start the container.</p>
pub fn image(mut self, input: impl Into<std::string::String>) -> Self {
self.image = Some(input.into());
self
}
/// <p>The image used to start the container.</p>
pub fn set_image(mut self, input: std::option::Option<std::string::String>) -> Self {
self.image = input;
self
}
/// <p>The number of vCPUs reserved for the container. For jobs that run on EC2 resources, you can specify the vCPU
/// requirement for the job using <code>resourceRequirements</code>, but you can't specify the vCPU requirements in both
/// the <code>vcpus</code> and <code>resourceRequirements</code> object. This parameter maps to <code>CpuShares</code> in
/// the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--cpu-shares</code> option to
/// <a href="https://docs.docker.com/engine/reference/run/">docker run</a>. Each vCPU is equivalent to 1,024 CPU shares. You must
/// specify at least one vCPU. This is required but can be specified in several places. It must be specified for each
/// node at least once.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that run on Fargate resources. For jobs that run on Fargate
/// resources, you must specify the vCPU requirement for the job using <code>resourceRequirements</code>.</p>
/// </note>
pub fn vcpus(mut self, input: i32) -> Self {
self.vcpus = Some(input);
self
}
/// <p>The number of vCPUs reserved for the container. For jobs that run on EC2 resources, you can specify the vCPU
/// requirement for the job using <code>resourceRequirements</code>, but you can't specify the vCPU requirements in both
/// the <code>vcpus</code> and <code>resourceRequirements</code> object. This parameter maps to <code>CpuShares</code> in
/// the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--cpu-shares</code> option to
/// <a href="https://docs.docker.com/engine/reference/run/">docker run</a>. Each vCPU is equivalent to 1,024 CPU shares. You must
/// specify at least one vCPU. This is required but can be specified in several places. It must be specified for each
/// node at least once.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that run on Fargate resources. For jobs that run on Fargate
/// resources, you must specify the vCPU requirement for the job using <code>resourceRequirements</code>.</p>
/// </note>
pub fn set_vcpus(mut self, input: std::option::Option<i32>) -> Self {
self.vcpus = input;
self
}
/// <p>For jobs run on EC2 resources that didn't specify memory requirements using <code>resourceRequirements</code>,
/// the number of MiB of memory reserved for the job. For other jobs, including all run on Fargate resources, see
/// <code>resourceRequirements</code>.</p>
pub fn memory(mut self, input: i32) -> Self {
self.memory = Some(input);
self
}
/// <p>For jobs run on EC2 resources that didn't specify memory requirements using <code>resourceRequirements</code>,
/// the number of MiB of memory reserved for the job. For other jobs, including all run on Fargate resources, see
/// <code>resourceRequirements</code>.</p>
pub fn set_memory(mut self, input: std::option::Option<i32>) -> Self {
self.memory = input;
self
}
/// Appends an item to `command`.
///
/// To override the contents of this collection use [`set_command`](Self::set_command).
///
/// <p>The command that's passed to the container.</p>
pub fn command(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.command.unwrap_or_default();
v.push(input.into());
self.command = Some(v);
self
}
/// <p>The command that's passed to the container.</p>
pub fn set_command(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.command = input;
self
}
/// <p>The Amazon Resource Name (ARN) associated with the job upon execution.</p>
pub fn job_role_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.job_role_arn = Some(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) associated with the job upon execution.</p>
pub fn set_job_role_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.job_role_arn = input;
self
}
/// <p>The Amazon Resource Name (ARN) of the
/// execution
/// role that Batch can assume. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html">Batch execution IAM role</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn execution_role_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.execution_role_arn = Some(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of the
/// execution
/// role that Batch can assume. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html">Batch execution IAM role</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn set_execution_role_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.execution_role_arn = input;
self
}
/// Appends an item to `volumes`.
///
/// To override the contents of this collection use [`set_volumes`](Self::set_volumes).
///
/// <p>A list of volumes associated with the job.</p>
pub fn volumes(mut self, input: impl Into<crate::model::Volume>) -> Self {
let mut v = self.volumes.unwrap_or_default();
v.push(input.into());
self.volumes = Some(v);
self
}
/// <p>A list of volumes associated with the job.</p>
pub fn set_volumes(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Volume>>,
) -> Self {
self.volumes = input;
self
}
/// Appends an item to `environment`.
///
/// To override the contents of this collection use [`set_environment`](Self::set_environment).
///
/// <p>The environment variables to pass to a container.</p>
/// <note>
/// <p>Environment variables must not start with <code>AWS_BATCH</code>; this naming
/// convention is reserved for variables that are set by the Batch service.</p>
/// </note>
pub fn environment(mut self, input: impl Into<crate::model::KeyValuePair>) -> Self {
let mut v = self.environment.unwrap_or_default();
v.push(input.into());
self.environment = Some(v);
self
}
/// <p>The environment variables to pass to a container.</p>
/// <note>
/// <p>Environment variables must not start with <code>AWS_BATCH</code>; this naming
/// convention is reserved for variables that are set by the Batch service.</p>
/// </note>
pub fn set_environment(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::KeyValuePair>>,
) -> Self {
self.environment = input;
self
}
/// Appends an item to `mount_points`.
///
/// To override the contents of this collection use [`set_mount_points`](Self::set_mount_points).
///
/// <p>The mount points for data volumes in your container.</p>
pub fn mount_points(mut self, input: impl Into<crate::model::MountPoint>) -> Self {
let mut v = self.mount_points.unwrap_or_default();
v.push(input.into());
self.mount_points = Some(v);
self
}
/// <p>The mount points for data volumes in your container.</p>
pub fn set_mount_points(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::MountPoint>>,
) -> Self {
self.mount_points = input;
self
}
/// <p>When this parameter is true, the container is given read-only access to its root file system. This parameter
/// maps to <code>ReadonlyRootfs</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and
/// the <code>--read-only</code> option to <a href="https://docs.docker.com/engine/reference/commandline/run/">
/// <code>docker run</code>
/// </a>.</p>
pub fn readonly_root_filesystem(mut self, input: bool) -> Self {
self.readonly_root_filesystem = Some(input);
self
}
/// <p>When this parameter is true, the container is given read-only access to its root file system. This parameter
/// maps to <code>ReadonlyRootfs</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and
/// the <code>--read-only</code> option to <a href="https://docs.docker.com/engine/reference/commandline/run/">
/// <code>docker run</code>
/// </a>.</p>
pub fn set_readonly_root_filesystem(mut self, input: std::option::Option<bool>) -> Self {
self.readonly_root_filesystem = input;
self
}
/// Appends an item to `ulimits`.
///
/// To override the contents of this collection use [`set_ulimits`](Self::set_ulimits).
///
/// <p>A list of <code>ulimit</code> values to set in the container. This parameter maps to <code>Ulimits</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--ulimit</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources.</p>
/// </note>
pub fn ulimits(mut self, input: impl Into<crate::model::Ulimit>) -> Self {
let mut v = self.ulimits.unwrap_or_default();
v.push(input.into());
self.ulimits = Some(v);
self
}
/// <p>A list of <code>ulimit</code> values to set in the container. This parameter maps to <code>Ulimits</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--ulimit</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources.</p>
/// </note>
pub fn set_ulimits(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Ulimit>>,
) -> Self {
self.ulimits = input;
self
}
/// <p>When this parameter is true, the container is given elevated permissions on the host container instance (similar
/// to the <code>root</code> user). The default value is false.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided, or
/// specified as false.</p>
/// </note>
pub fn privileged(mut self, input: bool) -> Self {
self.privileged = Some(input);
self
}
/// <p>When this parameter is true, the container is given elevated permissions on the host container instance (similar
/// to the <code>root</code> user). The default value is false.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided, or
/// specified as false.</p>
/// </note>
pub fn set_privileged(mut self, input: std::option::Option<bool>) -> Self {
self.privileged = input;
self
}
/// <p>The user name to use inside the container. This parameter maps to <code>User</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--user</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
pub fn user(mut self, input: impl Into<std::string::String>) -> Self {
self.user = Some(input.into());
self
}
/// <p>The user name to use inside the container. This parameter maps to <code>User</code> in the
/// <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--user</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
pub fn set_user(mut self, input: std::option::Option<std::string::String>) -> Self {
self.user = input;
self
}
/// <p>The exit code to return upon completion.</p>
pub fn exit_code(mut self, input: i32) -> Self {
self.exit_code = Some(input);
self
}
/// <p>The exit code to return upon completion.</p>
pub fn set_exit_code(mut self, input: std::option::Option<i32>) -> Self {
self.exit_code = input;
self
}
/// <p>A short (255 max characters) human-readable string to provide additional details about a running or stopped
/// container.</p>
pub fn reason(mut self, input: impl Into<std::string::String>) -> Self {
self.reason = Some(input.into());
self
}
/// <p>A short (255 max characters) human-readable string to provide additional details about a running or stopped
/// container.</p>
pub fn set_reason(mut self, input: std::option::Option<std::string::String>) -> Self {
self.reason = input;
self
}
/// <p>The Amazon Resource Name (ARN) of the container instance that the container is running on.</p>
pub fn container_instance_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.container_instance_arn = Some(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of the container instance that the container is running on.</p>
pub fn set_container_instance_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.container_instance_arn = input;
self
}
/// <p>The Amazon Resource Name (ARN) of the Amazon ECS task that's associated with the container job. Each container attempt receives a task
/// ARN when they reach the <code>STARTING</code> status.</p>
pub fn task_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.task_arn = Some(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of the Amazon ECS task that's associated with the container job. Each container attempt receives a task
/// ARN when they reach the <code>STARTING</code> status.</p>
pub fn set_task_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.task_arn = input;
self
}
/// <p>The name of the CloudWatch Logs log stream associated with the container. The log group for Batch jobs is
/// <code>/aws/batch/job</code>. Each container attempt receives a log stream name when they reach the
/// <code>RUNNING</code> status.</p>
pub fn log_stream_name(mut self, input: impl Into<std::string::String>) -> Self {
self.log_stream_name = Some(input.into());
self
}
/// <p>The name of the CloudWatch Logs log stream associated with the container. The log group for Batch jobs is
/// <code>/aws/batch/job</code>. Each container attempt receives a log stream name when they reach the
/// <code>RUNNING</code> status.</p>
pub fn set_log_stream_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.log_stream_name = input;
self
}
/// <p>The instance type of the underlying host infrastructure of a multi-node parallel job.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources.</p>
/// </note>
pub fn instance_type(mut self, input: impl Into<std::string::String>) -> Self {
self.instance_type = Some(input.into());
self
}
/// <p>The instance type of the underlying host infrastructure of a multi-node parallel job.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources.</p>
/// </note>
pub fn set_instance_type(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.instance_type = input;
self
}
/// Appends an item to `network_interfaces`.
///
/// To override the contents of this collection use [`set_network_interfaces`](Self::set_network_interfaces).
///
/// <p>The network interfaces associated with the job.</p>
pub fn network_interfaces(
mut self,
input: impl Into<crate::model::NetworkInterface>,
) -> Self {
let mut v = self.network_interfaces.unwrap_or_default();
v.push(input.into());
self.network_interfaces = Some(v);
self
}
/// <p>The network interfaces associated with the job.</p>
pub fn set_network_interfaces(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::NetworkInterface>>,
) -> Self {
self.network_interfaces = input;
self
}
/// Appends an item to `resource_requirements`.
///
/// To override the contents of this collection use [`set_resource_requirements`](Self::set_resource_requirements).
///
/// <p>The type and amount of resources to assign to a container. The supported resources include <code>GPU</code>,
/// <code>MEMORY</code>, and <code>VCPU</code>.</p>
pub fn resource_requirements(
mut self,
input: impl Into<crate::model::ResourceRequirement>,
) -> Self {
let mut v = self.resource_requirements.unwrap_or_default();
v.push(input.into());
self.resource_requirements = Some(v);
self
}
/// <p>The type and amount of resources to assign to a container. The supported resources include <code>GPU</code>,
/// <code>MEMORY</code>, and <code>VCPU</code>.</p>
pub fn set_resource_requirements(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ResourceRequirement>>,
) -> Self {
self.resource_requirements = input;
self
}
/// <p>Linux-specific modifications that are applied to the container, such as details for device mappings.</p>
pub fn linux_parameters(mut self, input: crate::model::LinuxParameters) -> Self {
self.linux_parameters = Some(input);
self
}
/// <p>Linux-specific modifications that are applied to the container, such as details for device mappings.</p>
pub fn set_linux_parameters(
mut self,
input: std::option::Option<crate::model::LinuxParameters>,
) -> Self {
self.linux_parameters = input;
self
}
/// <p>The log configuration specification for the container.</p>
/// <p>This parameter maps to <code>LogConfig</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the
/// <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--log-driver</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.
/// By default, containers use the same logging driver that the Docker daemon uses. However, the container might use a
/// different logging driver than the Docker daemon by specifying a log driver with this parameter in the container
/// definition. To use a different logging driver for a container, the log system must be configured properly on the
/// container instance. Or, alternatively, it must be configured on a different log server for remote logging options.
/// For more information on the options for different supported log drivers, see <a href="https://docs.docker.com/engine/admin/logging/overview/">Configure logging drivers</a> in the Docker
/// documentation.</p>
/// <note>
/// <p>Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the <a>LogConfiguration</a> data type). Additional log drivers might be available in future releases of the Amazon ECS
/// container agent.</p>
/// </note>
/// <p>This parameter requires version 1.18 of the Docker Remote API or greater on your
/// container instance. To check the Docker Remote API version on your container instance, log into your
/// container instance and run the following command: <code>sudo docker version | grep "Server API version"</code>
/// </p>
/// <note>
/// <p>The Amazon ECS container agent running on a container instance must register the logging drivers available on that
/// instance with the <code>ECS_AVAILABLE_LOGGING_DRIVERS</code> environment variable before containers placed on that
/// instance can use these log configuration options. For more information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html">Amazon ECS Container Agent Configuration</a> in the
/// <i>Amazon Elastic Container Service Developer Guide</i>.</p>
/// </note>
pub fn log_configuration(mut self, input: crate::model::LogConfiguration) -> Self {
self.log_configuration = Some(input);
self
}
/// <p>The log configuration specification for the container.</p>
/// <p>This parameter maps to <code>LogConfig</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">Create a container</a> section of the
/// <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the <code>--log-driver</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.
/// By default, containers use the same logging driver that the Docker daemon uses. However, the container might use a
/// different logging driver than the Docker daemon by specifying a log driver with this parameter in the container
/// definition. To use a different logging driver for a container, the log system must be configured properly on the
/// container instance. Or, alternatively, it must be configured on a different log server for remote logging options.
/// For more information on the options for different supported log drivers, see <a href="https://docs.docker.com/engine/admin/logging/overview/">Configure logging drivers</a> in the Docker
/// documentation.</p>
/// <note>
/// <p>Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the <a>LogConfiguration</a> data type). Additional log drivers might be available in future releases of the Amazon ECS
/// container agent.</p>
/// </note>
/// <p>This parameter requires version 1.18 of the Docker Remote API or greater on your
/// container instance. To check the Docker Remote API version on your container instance, log into your
/// container instance and run the following command: <code>sudo docker version | grep "Server API version"</code>
/// </p>
/// <note>
/// <p>The Amazon ECS container agent running on a container instance must register the logging drivers available on that
/// instance with the <code>ECS_AVAILABLE_LOGGING_DRIVERS</code> environment variable before containers placed on that
/// instance can use these log configuration options. For more information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html">Amazon ECS Container Agent Configuration</a> in the
/// <i>Amazon Elastic Container Service Developer Guide</i>.</p>
/// </note>
pub fn set_log_configuration(
mut self,
input: std::option::Option<crate::model::LogConfiguration>,
) -> Self {
self.log_configuration = input;
self
}
/// Appends an item to `secrets`.
///
/// To override the contents of this collection use [`set_secrets`](Self::set_secrets).
///
/// <p>The secrets to pass to the container. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html">Specifying sensitive data</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn secrets(mut self, input: impl Into<crate::model::Secret>) -> Self {
let mut v = self.secrets.unwrap_or_default();
v.push(input.into());
self.secrets = Some(v);
self
}
/// <p>The secrets to pass to the container. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html">Specifying sensitive data</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn set_secrets(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Secret>>,
) -> Self {
self.secrets = input;
self
}
/// <p>The network configuration for jobs that are running on Fargate resources. Jobs that are running on EC2
/// resources must not specify this parameter.</p>
pub fn network_configuration(mut self, input: crate::model::NetworkConfiguration) -> Self {
self.network_configuration = Some(input);
self
}
/// <p>The network configuration for jobs that are running on Fargate resources. Jobs that are running on EC2
/// resources must not specify this parameter.</p>
pub fn set_network_configuration(
mut self,
input: std::option::Option<crate::model::NetworkConfiguration>,
) -> Self {
self.network_configuration = input;
self
}
/// <p>The platform configuration for jobs that are running on Fargate resources. Jobs that are running on EC2
/// resources must not specify this parameter.</p>
pub fn fargate_platform_configuration(
mut self,
input: crate::model::FargatePlatformConfiguration,
) -> Self {
self.fargate_platform_configuration = Some(input);
self
}
/// <p>The platform configuration for jobs that are running on Fargate resources. Jobs that are running on EC2
/// resources must not specify this parameter.</p>
pub fn set_fargate_platform_configuration(
mut self,
input: std::option::Option<crate::model::FargatePlatformConfiguration>,
) -> Self {
self.fargate_platform_configuration = input;
self
}
/// Consumes the builder and constructs a [`ContainerDetail`](crate::model::ContainerDetail)
pub fn build(self) -> crate::model::ContainerDetail {
crate::model::ContainerDetail {
image: self.image,
vcpus: self.vcpus.unwrap_or_default(),
memory: self.memory.unwrap_or_default(),
command: self.command,
job_role_arn: self.job_role_arn,
execution_role_arn: self.execution_role_arn,
volumes: self.volumes,
environment: self.environment,
mount_points: self.mount_points,
readonly_root_filesystem: self.readonly_root_filesystem.unwrap_or_default(),
ulimits: self.ulimits,
privileged: self.privileged.unwrap_or_default(),
user: self.user,
exit_code: self.exit_code.unwrap_or_default(),
reason: self.reason,
container_instance_arn: self.container_instance_arn,
task_arn: self.task_arn,
log_stream_name: self.log_stream_name,
instance_type: self.instance_type,
network_interfaces: self.network_interfaces,
resource_requirements: self.resource_requirements,
linux_parameters: self.linux_parameters,
log_configuration: self.log_configuration,
secrets: self.secrets,
network_configuration: self.network_configuration,
fargate_platform_configuration: self.fargate_platform_configuration,
}
}
}
}
impl ContainerDetail {
/// Creates a new builder-style object to manufacture [`ContainerDetail`](crate::model::ContainerDetail)
pub fn builder() -> crate::model::container_detail::Builder {
crate::model::container_detail::Builder::default()
}
}
/// <p>An object representing the elastic network interface for a multi-node parallel job node.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct NetworkInterface {
/// <p>The attachment ID for the network interface.</p>
pub attachment_id: std::option::Option<std::string::String>,
/// <p>The private IPv6 address for the network interface.</p>
pub ipv6_address: std::option::Option<std::string::String>,
/// <p>The private IPv4 address for the network interface.</p>
pub private_ipv4_address: std::option::Option<std::string::String>,
}
impl NetworkInterface {
/// <p>The attachment ID for the network interface.</p>
pub fn attachment_id(&self) -> std::option::Option<&str> {
self.attachment_id.as_deref()
}
/// <p>The private IPv6 address for the network interface.</p>
pub fn ipv6_address(&self) -> std::option::Option<&str> {
self.ipv6_address.as_deref()
}
/// <p>The private IPv4 address for the network interface.</p>
pub fn private_ipv4_address(&self) -> std::option::Option<&str> {
self.private_ipv4_address.as_deref()
}
}
impl std::fmt::Debug for NetworkInterface {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("NetworkInterface");
formatter.field("attachment_id", &self.attachment_id);
formatter.field("ipv6_address", &self.ipv6_address);
formatter.field("private_ipv4_address", &self.private_ipv4_address);
formatter.finish()
}
}
/// See [`NetworkInterface`](crate::model::NetworkInterface)
pub mod network_interface {
/// A builder for [`NetworkInterface`](crate::model::NetworkInterface)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) attachment_id: std::option::Option<std::string::String>,
pub(crate) ipv6_address: std::option::Option<std::string::String>,
pub(crate) private_ipv4_address: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The attachment ID for the network interface.</p>
pub fn attachment_id(mut self, input: impl Into<std::string::String>) -> Self {
self.attachment_id = Some(input.into());
self
}
/// <p>The attachment ID for the network interface.</p>
pub fn set_attachment_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.attachment_id = input;
self
}
/// <p>The private IPv6 address for the network interface.</p>
pub fn ipv6_address(mut self, input: impl Into<std::string::String>) -> Self {
self.ipv6_address = Some(input.into());
self
}
/// <p>The private IPv6 address for the network interface.</p>
pub fn set_ipv6_address(mut self, input: std::option::Option<std::string::String>) -> Self {
self.ipv6_address = input;
self
}
/// <p>The private IPv4 address for the network interface.</p>
pub fn private_ipv4_address(mut self, input: impl Into<std::string::String>) -> Self {
self.private_ipv4_address = Some(input.into());
self
}
/// <p>The private IPv4 address for the network interface.</p>
pub fn set_private_ipv4_address(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.private_ipv4_address = input;
self
}
/// Consumes the builder and constructs a [`NetworkInterface`](crate::model::NetworkInterface)
pub fn build(self) -> crate::model::NetworkInterface {
crate::model::NetworkInterface {
attachment_id: self.attachment_id,
ipv6_address: self.ipv6_address,
private_ipv4_address: self.private_ipv4_address,
}
}
}
}
impl NetworkInterface {
/// Creates a new builder-style object to manufacture [`NetworkInterface`](crate::model::NetworkInterface)
pub fn builder() -> crate::model::network_interface::Builder {
crate::model::network_interface::Builder::default()
}
}
/// <p>An object representing a job attempt.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct AttemptDetail {
/// <p>Details about the container in this job attempt.</p>
pub container: std::option::Option<crate::model::AttemptContainerDetail>,
/// <p>The Unix timestamp (in milliseconds) for when the attempt was started (when the attempt transitioned from the
/// <code>STARTING</code> state to the <code>RUNNING</code> state).</p>
pub started_at: i64,
/// <p>The Unix timestamp (in milliseconds) for when the attempt was stopped (when the attempt transitioned from the
/// <code>RUNNING</code> state to a terminal state, such as <code>SUCCEEDED</code> or <code>FAILED</code>).</p>
pub stopped_at: i64,
/// <p>A short, human-readable string to provide additional details about the current status of the job attempt.</p>
pub status_reason: std::option::Option<std::string::String>,
}
impl AttemptDetail {
/// <p>Details about the container in this job attempt.</p>
pub fn container(&self) -> std::option::Option<&crate::model::AttemptContainerDetail> {
self.container.as_ref()
}
/// <p>The Unix timestamp (in milliseconds) for when the attempt was started (when the attempt transitioned from the
/// <code>STARTING</code> state to the <code>RUNNING</code> state).</p>
pub fn started_at(&self) -> i64 {
self.started_at
}
/// <p>The Unix timestamp (in milliseconds) for when the attempt was stopped (when the attempt transitioned from the
/// <code>RUNNING</code> state to a terminal state, such as <code>SUCCEEDED</code> or <code>FAILED</code>).</p>
pub fn stopped_at(&self) -> i64 {
self.stopped_at
}
/// <p>A short, human-readable string to provide additional details about the current status of the job attempt.</p>
pub fn status_reason(&self) -> std::option::Option<&str> {
self.status_reason.as_deref()
}
}
impl std::fmt::Debug for AttemptDetail {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("AttemptDetail");
formatter.field("container", &self.container);
formatter.field("started_at", &self.started_at);
formatter.field("stopped_at", &self.stopped_at);
formatter.field("status_reason", &self.status_reason);
formatter.finish()
}
}
/// See [`AttemptDetail`](crate::model::AttemptDetail)
pub mod attempt_detail {
/// A builder for [`AttemptDetail`](crate::model::AttemptDetail)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) container: std::option::Option<crate::model::AttemptContainerDetail>,
pub(crate) started_at: std::option::Option<i64>,
pub(crate) stopped_at: std::option::Option<i64>,
pub(crate) status_reason: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>Details about the container in this job attempt.</p>
pub fn container(mut self, input: crate::model::AttemptContainerDetail) -> Self {
self.container = Some(input);
self
}
/// <p>Details about the container in this job attempt.</p>
pub fn set_container(
mut self,
input: std::option::Option<crate::model::AttemptContainerDetail>,
) -> Self {
self.container = input;
self
}
/// <p>The Unix timestamp (in milliseconds) for when the attempt was started (when the attempt transitioned from the
/// <code>STARTING</code> state to the <code>RUNNING</code> state).</p>
pub fn started_at(mut self, input: i64) -> Self {
self.started_at = Some(input);
self
}
/// <p>The Unix timestamp (in milliseconds) for when the attempt was started (when the attempt transitioned from the
/// <code>STARTING</code> state to the <code>RUNNING</code> state).</p>
pub fn set_started_at(mut self, input: std::option::Option<i64>) -> Self {
self.started_at = input;
self
}
/// <p>The Unix timestamp (in milliseconds) for when the attempt was stopped (when the attempt transitioned from the
/// <code>RUNNING</code> state to a terminal state, such as <code>SUCCEEDED</code> or <code>FAILED</code>).</p>
pub fn stopped_at(mut self, input: i64) -> Self {
self.stopped_at = Some(input);
self
}
/// <p>The Unix timestamp (in milliseconds) for when the attempt was stopped (when the attempt transitioned from the
/// <code>RUNNING</code> state to a terminal state, such as <code>SUCCEEDED</code> or <code>FAILED</code>).</p>
pub fn set_stopped_at(mut self, input: std::option::Option<i64>) -> Self {
self.stopped_at = input;
self
}
/// <p>A short, human-readable string to provide additional details about the current status of the job attempt.</p>
pub fn status_reason(mut self, input: impl Into<std::string::String>) -> Self {
self.status_reason = Some(input.into());
self
}
/// <p>A short, human-readable string to provide additional details about the current status of the job attempt.</p>
pub fn set_status_reason(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.status_reason = input;
self
}
/// Consumes the builder and constructs a [`AttemptDetail`](crate::model::AttemptDetail)
pub fn build(self) -> crate::model::AttemptDetail {
crate::model::AttemptDetail {
container: self.container,
started_at: self.started_at.unwrap_or_default(),
stopped_at: self.stopped_at.unwrap_or_default(),
status_reason: self.status_reason,
}
}
}
}
impl AttemptDetail {
/// Creates a new builder-style object to manufacture [`AttemptDetail`](crate::model::AttemptDetail)
pub fn builder() -> crate::model::attempt_detail::Builder {
crate::model::attempt_detail::Builder::default()
}
}
/// <p>An object representing the details of a container that's part of a job attempt.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct AttemptContainerDetail {
/// <p>The Amazon Resource Name (ARN) of the Amazon ECS container instance that hosts the job attempt.</p>
pub container_instance_arn: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (ARN) of the Amazon ECS task that's associated with the job attempt. Each container attempt receives a task
/// ARN when they reach the <code>STARTING</code> status.</p>
pub task_arn: std::option::Option<std::string::String>,
/// <p>The exit code for the job attempt. A non-zero exit code is considered a failure.</p>
pub exit_code: i32,
/// <p>A short (255 max characters) human-readable string to provide additional details about a running or stopped
/// container.</p>
pub reason: std::option::Option<std::string::String>,
/// <p>The name of the CloudWatch Logs log stream associated with the container. The log group for Batch jobs is
/// <code>/aws/batch/job</code>. Each container attempt receives a log stream name when they reach the
/// <code>RUNNING</code> status.</p>
pub log_stream_name: std::option::Option<std::string::String>,
/// <p>The network interfaces associated with the job attempt.</p>
pub network_interfaces: std::option::Option<std::vec::Vec<crate::model::NetworkInterface>>,
}
impl AttemptContainerDetail {
/// <p>The Amazon Resource Name (ARN) of the Amazon ECS container instance that hosts the job attempt.</p>
pub fn container_instance_arn(&self) -> std::option::Option<&str> {
self.container_instance_arn.as_deref()
}
/// <p>The Amazon Resource Name (ARN) of the Amazon ECS task that's associated with the job attempt. Each container attempt receives a task
/// ARN when they reach the <code>STARTING</code> status.</p>
pub fn task_arn(&self) -> std::option::Option<&str> {
self.task_arn.as_deref()
}
/// <p>The exit code for the job attempt. A non-zero exit code is considered a failure.</p>
pub fn exit_code(&self) -> i32 {
self.exit_code
}
/// <p>A short (255 max characters) human-readable string to provide additional details about a running or stopped
/// container.</p>
pub fn reason(&self) -> std::option::Option<&str> {
self.reason.as_deref()
}
/// <p>The name of the CloudWatch Logs log stream associated with the container. The log group for Batch jobs is
/// <code>/aws/batch/job</code>. Each container attempt receives a log stream name when they reach the
/// <code>RUNNING</code> status.</p>
pub fn log_stream_name(&self) -> std::option::Option<&str> {
self.log_stream_name.as_deref()
}
/// <p>The network interfaces associated with the job attempt.</p>
pub fn network_interfaces(&self) -> std::option::Option<&[crate::model::NetworkInterface]> {
self.network_interfaces.as_deref()
}
}
impl std::fmt::Debug for AttemptContainerDetail {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("AttemptContainerDetail");
formatter.field("container_instance_arn", &self.container_instance_arn);
formatter.field("task_arn", &self.task_arn);
formatter.field("exit_code", &self.exit_code);
formatter.field("reason", &self.reason);
formatter.field("log_stream_name", &self.log_stream_name);
formatter.field("network_interfaces", &self.network_interfaces);
formatter.finish()
}
}
/// See [`AttemptContainerDetail`](crate::model::AttemptContainerDetail)
pub mod attempt_container_detail {
/// A builder for [`AttemptContainerDetail`](crate::model::AttemptContainerDetail)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) container_instance_arn: std::option::Option<std::string::String>,
pub(crate) task_arn: std::option::Option<std::string::String>,
pub(crate) exit_code: std::option::Option<i32>,
pub(crate) reason: std::option::Option<std::string::String>,
pub(crate) log_stream_name: std::option::Option<std::string::String>,
pub(crate) network_interfaces:
std::option::Option<std::vec::Vec<crate::model::NetworkInterface>>,
}
impl Builder {
/// <p>The Amazon Resource Name (ARN) of the Amazon ECS container instance that hosts the job attempt.</p>
pub fn container_instance_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.container_instance_arn = Some(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of the Amazon ECS container instance that hosts the job attempt.</p>
pub fn set_container_instance_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.container_instance_arn = input;
self
}
/// <p>The Amazon Resource Name (ARN) of the Amazon ECS task that's associated with the job attempt. Each container attempt receives a task
/// ARN when they reach the <code>STARTING</code> status.</p>
pub fn task_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.task_arn = Some(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of the Amazon ECS task that's associated with the job attempt. Each container attempt receives a task
/// ARN when they reach the <code>STARTING</code> status.</p>
pub fn set_task_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.task_arn = input;
self
}
/// <p>The exit code for the job attempt. A non-zero exit code is considered a failure.</p>
pub fn exit_code(mut self, input: i32) -> Self {
self.exit_code = Some(input);
self
}
/// <p>The exit code for the job attempt. A non-zero exit code is considered a failure.</p>
pub fn set_exit_code(mut self, input: std::option::Option<i32>) -> Self {
self.exit_code = input;
self
}
/// <p>A short (255 max characters) human-readable string to provide additional details about a running or stopped
/// container.</p>
pub fn reason(mut self, input: impl Into<std::string::String>) -> Self {
self.reason = Some(input.into());
self
}
/// <p>A short (255 max characters) human-readable string to provide additional details about a running or stopped
/// container.</p>
pub fn set_reason(mut self, input: std::option::Option<std::string::String>) -> Self {
self.reason = input;
self
}
/// <p>The name of the CloudWatch Logs log stream associated with the container. The log group for Batch jobs is
/// <code>/aws/batch/job</code>. Each container attempt receives a log stream name when they reach the
/// <code>RUNNING</code> status.</p>
pub fn log_stream_name(mut self, input: impl Into<std::string::String>) -> Self {
self.log_stream_name = Some(input.into());
self
}
/// <p>The name of the CloudWatch Logs log stream associated with the container. The log group for Batch jobs is
/// <code>/aws/batch/job</code>. Each container attempt receives a log stream name when they reach the
/// <code>RUNNING</code> status.</p>
pub fn set_log_stream_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.log_stream_name = input;
self
}
/// Appends an item to `network_interfaces`.
///
/// To override the contents of this collection use [`set_network_interfaces`](Self::set_network_interfaces).
///
/// <p>The network interfaces associated with the job attempt.</p>
pub fn network_interfaces(
mut self,
input: impl Into<crate::model::NetworkInterface>,
) -> Self {
let mut v = self.network_interfaces.unwrap_or_default();
v.push(input.into());
self.network_interfaces = Some(v);
self
}
/// <p>The network interfaces associated with the job attempt.</p>
pub fn set_network_interfaces(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::NetworkInterface>>,
) -> Self {
self.network_interfaces = input;
self
}
/// Consumes the builder and constructs a [`AttemptContainerDetail`](crate::model::AttemptContainerDetail)
pub fn build(self) -> crate::model::AttemptContainerDetail {
crate::model::AttemptContainerDetail {
container_instance_arn: self.container_instance_arn,
task_arn: self.task_arn,
exit_code: self.exit_code.unwrap_or_default(),
reason: self.reason,
log_stream_name: self.log_stream_name,
network_interfaces: self.network_interfaces,
}
}
}
}
impl AttemptContainerDetail {
/// Creates a new builder-style object to manufacture [`AttemptContainerDetail`](crate::model::AttemptContainerDetail)
pub fn builder() -> crate::model::attempt_container_detail::Builder {
crate::model::attempt_container_detail::Builder::default()
}
}
/// <p>An object representing the details of an Batch job queue.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct JobQueueDetail {
/// <p>The name of the job queue.</p>
pub job_queue_name: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (ARN) of the job queue.</p>
pub job_queue_arn: std::option::Option<std::string::String>,
/// <p>Describes the ability of the queue to accept new jobs. If the job queue state is <code>ENABLED</code>, it's able
/// to accept jobs. If the job queue state is <code>DISABLED</code>, new jobs can't be added to the queue, but jobs
/// already in the queue can finish.</p>
pub state: std::option::Option<crate::model::JqState>,
/// <p>Amazon Resource Name (ARN) of the scheduling policy. The format is
/// <code>aws:<i>Partition</i>:batch:<i>Region</i>:<i>Account</i>:scheduling-policy/<i>Name</i>
/// </code>.
/// For example,
/// <code>aws:aws:batch:us-west-2:012345678910:scheduling-policy/MySchedulingPolicy</code>.</p>
pub scheduling_policy_arn: std::option::Option<std::string::String>,
/// <p>The status of the job queue (for example, <code>CREATING</code> or <code>VALID</code>).</p>
pub status: std::option::Option<crate::model::JqStatus>,
/// <p>A short, human-readable string to provide additional details about the current status of the job queue.</p>
pub status_reason: std::option::Option<std::string::String>,
/// <p>The priority of the job queue. Job queues with a higher priority (or a higher integer value for the
/// <code>priority</code> parameter) are evaluated first when associated with the same compute environment. Priority is
/// determined in descending order, for example, a job queue with a priority value of <code>10</code> is given scheduling
/// preference over a job queue with a priority value of <code>1</code>. All of the compute environments must be either
/// EC2 (<code>EC2</code> or <code>SPOT</code>) or Fargate (<code>FARGATE</code> or <code>FARGATE_SPOT</code>); EC2 and
/// Fargate compute environments can't be mixed.</p>
pub priority: i32,
/// <p>The compute environments that are attached to the job queue and the order that job placement is preferred.
/// Compute environments are selected for job placement in ascending order.</p>
pub compute_environment_order:
std::option::Option<std::vec::Vec<crate::model::ComputeEnvironmentOrder>>,
/// <p>The tags applied to the job queue. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/using-tags.html">Tagging your Batch resources</a> in
/// <i>Batch User Guide</i>.</p>
pub tags:
std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>,
}
impl JobQueueDetail {
/// <p>The name of the job queue.</p>
pub fn job_queue_name(&self) -> std::option::Option<&str> {
self.job_queue_name.as_deref()
}
/// <p>The Amazon Resource Name (ARN) of the job queue.</p>
pub fn job_queue_arn(&self) -> std::option::Option<&str> {
self.job_queue_arn.as_deref()
}
/// <p>Describes the ability of the queue to accept new jobs. If the job queue state is <code>ENABLED</code>, it's able
/// to accept jobs. If the job queue state is <code>DISABLED</code>, new jobs can't be added to the queue, but jobs
/// already in the queue can finish.</p>
pub fn state(&self) -> std::option::Option<&crate::model::JqState> {
self.state.as_ref()
}
/// <p>Amazon Resource Name (ARN) of the scheduling policy. The format is
/// <code>aws:<i>Partition</i>:batch:<i>Region</i>:<i>Account</i>:scheduling-policy/<i>Name</i>
/// </code>.
/// For example,
/// <code>aws:aws:batch:us-west-2:012345678910:scheduling-policy/MySchedulingPolicy</code>.</p>
pub fn scheduling_policy_arn(&self) -> std::option::Option<&str> {
self.scheduling_policy_arn.as_deref()
}
/// <p>The status of the job queue (for example, <code>CREATING</code> or <code>VALID</code>).</p>
pub fn status(&self) -> std::option::Option<&crate::model::JqStatus> {
self.status.as_ref()
}
/// <p>A short, human-readable string to provide additional details about the current status of the job queue.</p>
pub fn status_reason(&self) -> std::option::Option<&str> {
self.status_reason.as_deref()
}
/// <p>The priority of the job queue. Job queues with a higher priority (or a higher integer value for the
/// <code>priority</code> parameter) are evaluated first when associated with the same compute environment. Priority is
/// determined in descending order, for example, a job queue with a priority value of <code>10</code> is given scheduling
/// preference over a job queue with a priority value of <code>1</code>. All of the compute environments must be either
/// EC2 (<code>EC2</code> or <code>SPOT</code>) or Fargate (<code>FARGATE</code> or <code>FARGATE_SPOT</code>); EC2 and
/// Fargate compute environments can't be mixed.</p>
pub fn priority(&self) -> i32 {
self.priority
}
/// <p>The compute environments that are attached to the job queue and the order that job placement is preferred.
/// Compute environments are selected for job placement in ascending order.</p>
pub fn compute_environment_order(
&self,
) -> std::option::Option<&[crate::model::ComputeEnvironmentOrder]> {
self.compute_environment_order.as_deref()
}
/// <p>The tags applied to the job queue. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/using-tags.html">Tagging your Batch resources</a> in
/// <i>Batch User Guide</i>.</p>
pub fn tags(
&self,
) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>>
{
self.tags.as_ref()
}
}
impl std::fmt::Debug for JobQueueDetail {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("JobQueueDetail");
formatter.field("job_queue_name", &self.job_queue_name);
formatter.field("job_queue_arn", &self.job_queue_arn);
formatter.field("state", &self.state);
formatter.field("scheduling_policy_arn", &self.scheduling_policy_arn);
formatter.field("status", &self.status);
formatter.field("status_reason", &self.status_reason);
formatter.field("priority", &self.priority);
formatter.field("compute_environment_order", &self.compute_environment_order);
formatter.field("tags", &self.tags);
formatter.finish()
}
}
/// See [`JobQueueDetail`](crate::model::JobQueueDetail)
pub mod job_queue_detail {
/// A builder for [`JobQueueDetail`](crate::model::JobQueueDetail)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) job_queue_name: std::option::Option<std::string::String>,
pub(crate) job_queue_arn: std::option::Option<std::string::String>,
pub(crate) state: std::option::Option<crate::model::JqState>,
pub(crate) scheduling_policy_arn: std::option::Option<std::string::String>,
pub(crate) status: std::option::Option<crate::model::JqStatus>,
pub(crate) status_reason: std::option::Option<std::string::String>,
pub(crate) priority: std::option::Option<i32>,
pub(crate) compute_environment_order:
std::option::Option<std::vec::Vec<crate::model::ComputeEnvironmentOrder>>,
pub(crate) tags: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
}
impl Builder {
/// <p>The name of the job queue.</p>
pub fn job_queue_name(mut self, input: impl Into<std::string::String>) -> Self {
self.job_queue_name = Some(input.into());
self
}
/// <p>The name of the job queue.</p>
pub fn set_job_queue_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.job_queue_name = input;
self
}
/// <p>The Amazon Resource Name (ARN) of the job queue.</p>
pub fn job_queue_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.job_queue_arn = Some(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of the job queue.</p>
pub fn set_job_queue_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.job_queue_arn = input;
self
}
/// <p>Describes the ability of the queue to accept new jobs. If the job queue state is <code>ENABLED</code>, it's able
/// to accept jobs. If the job queue state is <code>DISABLED</code>, new jobs can't be added to the queue, but jobs
/// already in the queue can finish.</p>
pub fn state(mut self, input: crate::model::JqState) -> Self {
self.state = Some(input);
self
}
/// <p>Describes the ability of the queue to accept new jobs. If the job queue state is <code>ENABLED</code>, it's able
/// to accept jobs. If the job queue state is <code>DISABLED</code>, new jobs can't be added to the queue, but jobs
/// already in the queue can finish.</p>
pub fn set_state(mut self, input: std::option::Option<crate::model::JqState>) -> Self {
self.state = input;
self
}
/// <p>Amazon Resource Name (ARN) of the scheduling policy. The format is
/// <code>aws:<i>Partition</i>:batch:<i>Region</i>:<i>Account</i>:scheduling-policy/<i>Name</i>
/// </code>.
/// For example,
/// <code>aws:aws:batch:us-west-2:012345678910:scheduling-policy/MySchedulingPolicy</code>.</p>
pub fn scheduling_policy_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.scheduling_policy_arn = Some(input.into());
self
}
/// <p>Amazon Resource Name (ARN) of the scheduling policy. The format is
/// <code>aws:<i>Partition</i>:batch:<i>Region</i>:<i>Account</i>:scheduling-policy/<i>Name</i>
/// </code>.
/// For example,
/// <code>aws:aws:batch:us-west-2:012345678910:scheduling-policy/MySchedulingPolicy</code>.</p>
pub fn set_scheduling_policy_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.scheduling_policy_arn = input;
self
}
/// <p>The status of the job queue (for example, <code>CREATING</code> or <code>VALID</code>).</p>
pub fn status(mut self, input: crate::model::JqStatus) -> Self {
self.status = Some(input);
self
}
/// <p>The status of the job queue (for example, <code>CREATING</code> or <code>VALID</code>).</p>
pub fn set_status(mut self, input: std::option::Option<crate::model::JqStatus>) -> Self {
self.status = input;
self
}
/// <p>A short, human-readable string to provide additional details about the current status of the job queue.</p>
pub fn status_reason(mut self, input: impl Into<std::string::String>) -> Self {
self.status_reason = Some(input.into());
self
}
/// <p>A short, human-readable string to provide additional details about the current status of the job queue.</p>
pub fn set_status_reason(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.status_reason = input;
self
}
/// <p>The priority of the job queue. Job queues with a higher priority (or a higher integer value for the
/// <code>priority</code> parameter) are evaluated first when associated with the same compute environment. Priority is
/// determined in descending order, for example, a job queue with a priority value of <code>10</code> is given scheduling
/// preference over a job queue with a priority value of <code>1</code>. All of the compute environments must be either
/// EC2 (<code>EC2</code> or <code>SPOT</code>) or Fargate (<code>FARGATE</code> or <code>FARGATE_SPOT</code>); EC2 and
/// Fargate compute environments can't be mixed.</p>
pub fn priority(mut self, input: i32) -> Self {
self.priority = Some(input);
self
}
/// <p>The priority of the job queue. Job queues with a higher priority (or a higher integer value for the
/// <code>priority</code> parameter) are evaluated first when associated with the same compute environment. Priority is
/// determined in descending order, for example, a job queue with a priority value of <code>10</code> is given scheduling
/// preference over a job queue with a priority value of <code>1</code>. All of the compute environments must be either
/// EC2 (<code>EC2</code> or <code>SPOT</code>) or Fargate (<code>FARGATE</code> or <code>FARGATE_SPOT</code>); EC2 and
/// Fargate compute environments can't be mixed.</p>
pub fn set_priority(mut self, input: std::option::Option<i32>) -> Self {
self.priority = input;
self
}
/// Appends an item to `compute_environment_order`.
///
/// To override the contents of this collection use [`set_compute_environment_order`](Self::set_compute_environment_order).
///
/// <p>The compute environments that are attached to the job queue and the order that job placement is preferred.
/// Compute environments are selected for job placement in ascending order.</p>
pub fn compute_environment_order(
mut self,
input: impl Into<crate::model::ComputeEnvironmentOrder>,
) -> Self {
let mut v = self.compute_environment_order.unwrap_or_default();
v.push(input.into());
self.compute_environment_order = Some(v);
self
}
/// <p>The compute environments that are attached to the job queue and the order that job placement is preferred.
/// Compute environments are selected for job placement in ascending order.</p>
pub fn set_compute_environment_order(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ComputeEnvironmentOrder>>,
) -> Self {
self.compute_environment_order = input;
self
}
/// Adds a key-value pair to `tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>The tags applied to the job queue. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/using-tags.html">Tagging your Batch resources</a> in
/// <i>Batch User Guide</i>.</p>
pub fn tags(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
let mut hash_map = self.tags.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.tags = Some(hash_map);
self
}
/// <p>The tags applied to the job queue. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/using-tags.html">Tagging your Batch resources</a> in
/// <i>Batch User Guide</i>.</p>
pub fn set_tags(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.tags = input;
self
}
/// Consumes the builder and constructs a [`JobQueueDetail`](crate::model::JobQueueDetail)
pub fn build(self) -> crate::model::JobQueueDetail {
crate::model::JobQueueDetail {
job_queue_name: self.job_queue_name,
job_queue_arn: self.job_queue_arn,
state: self.state,
scheduling_policy_arn: self.scheduling_policy_arn,
status: self.status,
status_reason: self.status_reason,
priority: self.priority.unwrap_or_default(),
compute_environment_order: self.compute_environment_order,
tags: self.tags,
}
}
}
}
impl JobQueueDetail {
/// Creates a new builder-style object to manufacture [`JobQueueDetail`](crate::model::JobQueueDetail)
pub fn builder() -> crate::model::job_queue_detail::Builder {
crate::model::job_queue_detail::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum JqStatus {
#[allow(missing_docs)] // documentation missing in model
Creating,
#[allow(missing_docs)] // documentation missing in model
Deleted,
#[allow(missing_docs)] // documentation missing in model
Deleting,
#[allow(missing_docs)] // documentation missing in model
Invalid,
#[allow(missing_docs)] // documentation missing in model
Updating,
#[allow(missing_docs)] // documentation missing in model
Valid,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for JqStatus {
fn from(s: &str) -> Self {
match s {
"CREATING" => JqStatus::Creating,
"DELETED" => JqStatus::Deleted,
"DELETING" => JqStatus::Deleting,
"INVALID" => JqStatus::Invalid,
"UPDATING" => JqStatus::Updating,
"VALID" => JqStatus::Valid,
other => JqStatus::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for JqStatus {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(JqStatus::from(s))
}
}
impl JqStatus {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
JqStatus::Creating => "CREATING",
JqStatus::Deleted => "DELETED",
JqStatus::Deleting => "DELETING",
JqStatus::Invalid => "INVALID",
JqStatus::Updating => "UPDATING",
JqStatus::Valid => "VALID",
JqStatus::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&[
"CREATING", "DELETED", "DELETING", "INVALID", "UPDATING", "VALID",
]
}
}
impl AsRef<str> for JqStatus {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>An object representing an Batch job definition.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct JobDefinition {
/// <p>The name of the job definition.</p>
pub job_definition_name: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (ARN) for the job definition.</p>
pub job_definition_arn: std::option::Option<std::string::String>,
/// <p>The revision of the job definition.</p>
pub revision: i32,
/// <p>The status of the job definition.</p>
pub status: std::option::Option<std::string::String>,
/// <p>The type of job definition, either
/// <code>container</code> or <code>multinode</code>. If the job is run on Fargate resources, then
/// <code>multinode</code> isn't supported. For more information about multi-node parallel jobs, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/multi-node-job-def.html">Creating a multi-node parallel job definition</a>
/// in the <i>Batch User Guide</i>.</p>
pub r#type: std::option::Option<std::string::String>,
/// <p>The scheduling priority of the job
/// definition. This will only affect jobs in job queues with a fair share policy. Jobs with a higher scheduling priority
/// will be scheduled before jobs with a lower scheduling priority.</p>
pub scheduling_priority: i32,
/// <p>Default parameters or parameter substitution placeholders that are set in the job definition. Parameters are
/// specified as a key-value pair mapping. Parameters in a <code>SubmitJob</code> request override any corresponding
/// parameter defaults from the job definition. For more information about specifying parameters, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/job_definition_parameters.html">Job Definition Parameters</a> in the
/// <i>Batch User Guide</i>.</p>
pub parameters:
std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>,
/// <p>The retry strategy to use for failed jobs that are submitted with this job definition.</p>
pub retry_strategy: std::option::Option<crate::model::RetryStrategy>,
/// <p>An object with various properties specific to container-based jobs.</p>
pub container_properties: std::option::Option<crate::model::ContainerProperties>,
/// <p>The timeout configuration for jobs that are submitted with this job definition. You can specify a timeout
/// duration after which Batch terminates your jobs if they haven't finished.</p>
pub timeout: std::option::Option<crate::model::JobTimeout>,
/// <p>An object with various properties specific to multi-node parallel jobs.</p>
/// <note>
/// <p>If the job runs on Fargate resources, then you must not specify <code>nodeProperties</code>; use
/// <code>containerProperties</code> instead.</p>
/// </note>
pub node_properties: std::option::Option<crate::model::NodeProperties>,
/// <p>The tags applied to the job definition.</p>
pub tags:
std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>,
/// <p>Specifies whether to propagate the tags from the job or job definition to the corresponding Amazon ECS task. If no
/// value is specified, the tags aren't propagated. Tags can only be propagated to the tasks during task creation. For
/// tags with the same name, job tags are given priority over job definitions tags. If the total number of combined tags
/// from the job and job definition is over 50, the job is moved to the <code>FAILED</code> state.</p>
pub propagate_tags: bool,
/// <p>The platform capabilities required by the job definition. If no value is specified, it defaults to
/// <code>EC2</code>. Jobs run on Fargate resources specify <code>FARGATE</code>.</p>
pub platform_capabilities: std::option::Option<std::vec::Vec<crate::model::PlatformCapability>>,
}
impl JobDefinition {
/// <p>The name of the job definition.</p>
pub fn job_definition_name(&self) -> std::option::Option<&str> {
self.job_definition_name.as_deref()
}
/// <p>The Amazon Resource Name (ARN) for the job definition.</p>
pub fn job_definition_arn(&self) -> std::option::Option<&str> {
self.job_definition_arn.as_deref()
}
/// <p>The revision of the job definition.</p>
pub fn revision(&self) -> i32 {
self.revision
}
/// <p>The status of the job definition.</p>
pub fn status(&self) -> std::option::Option<&str> {
self.status.as_deref()
}
/// <p>The type of job definition, either
/// <code>container</code> or <code>multinode</code>. If the job is run on Fargate resources, then
/// <code>multinode</code> isn't supported. For more information about multi-node parallel jobs, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/multi-node-job-def.html">Creating a multi-node parallel job definition</a>
/// in the <i>Batch User Guide</i>.</p>
pub fn r#type(&self) -> std::option::Option<&str> {
self.r#type.as_deref()
}
/// <p>The scheduling priority of the job
/// definition. This will only affect jobs in job queues with a fair share policy. Jobs with a higher scheduling priority
/// will be scheduled before jobs with a lower scheduling priority.</p>
pub fn scheduling_priority(&self) -> i32 {
self.scheduling_priority
}
/// <p>Default parameters or parameter substitution placeholders that are set in the job definition. Parameters are
/// specified as a key-value pair mapping. Parameters in a <code>SubmitJob</code> request override any corresponding
/// parameter defaults from the job definition. For more information about specifying parameters, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/job_definition_parameters.html">Job Definition Parameters</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn parameters(
&self,
) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>>
{
self.parameters.as_ref()
}
/// <p>The retry strategy to use for failed jobs that are submitted with this job definition.</p>
pub fn retry_strategy(&self) -> std::option::Option<&crate::model::RetryStrategy> {
self.retry_strategy.as_ref()
}
/// <p>An object with various properties specific to container-based jobs.</p>
pub fn container_properties(&self) -> std::option::Option<&crate::model::ContainerProperties> {
self.container_properties.as_ref()
}
/// <p>The timeout configuration for jobs that are submitted with this job definition. You can specify a timeout
/// duration after which Batch terminates your jobs if they haven't finished.</p>
pub fn timeout(&self) -> std::option::Option<&crate::model::JobTimeout> {
self.timeout.as_ref()
}
/// <p>An object with various properties specific to multi-node parallel jobs.</p>
/// <note>
/// <p>If the job runs on Fargate resources, then you must not specify <code>nodeProperties</code>; use
/// <code>containerProperties</code> instead.</p>
/// </note>
pub fn node_properties(&self) -> std::option::Option<&crate::model::NodeProperties> {
self.node_properties.as_ref()
}
/// <p>The tags applied to the job definition.</p>
pub fn tags(
&self,
) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>>
{
self.tags.as_ref()
}
/// <p>Specifies whether to propagate the tags from the job or job definition to the corresponding Amazon ECS task. If no
/// value is specified, the tags aren't propagated. Tags can only be propagated to the tasks during task creation. For
/// tags with the same name, job tags are given priority over job definitions tags. If the total number of combined tags
/// from the job and job definition is over 50, the job is moved to the <code>FAILED</code> state.</p>
pub fn propagate_tags(&self) -> bool {
self.propagate_tags
}
/// <p>The platform capabilities required by the job definition. If no value is specified, it defaults to
/// <code>EC2</code>. Jobs run on Fargate resources specify <code>FARGATE</code>.</p>
pub fn platform_capabilities(
&self,
) -> std::option::Option<&[crate::model::PlatformCapability]> {
self.platform_capabilities.as_deref()
}
}
impl std::fmt::Debug for JobDefinition {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("JobDefinition");
formatter.field("job_definition_name", &self.job_definition_name);
formatter.field("job_definition_arn", &self.job_definition_arn);
formatter.field("revision", &self.revision);
formatter.field("status", &self.status);
formatter.field("r#type", &self.r#type);
formatter.field("scheduling_priority", &self.scheduling_priority);
formatter.field("parameters", &self.parameters);
formatter.field("retry_strategy", &self.retry_strategy);
formatter.field("container_properties", &self.container_properties);
formatter.field("timeout", &self.timeout);
formatter.field("node_properties", &self.node_properties);
formatter.field("tags", &self.tags);
formatter.field("propagate_tags", &self.propagate_tags);
formatter.field("platform_capabilities", &self.platform_capabilities);
formatter.finish()
}
}
/// See [`JobDefinition`](crate::model::JobDefinition)
pub mod job_definition {
/// A builder for [`JobDefinition`](crate::model::JobDefinition)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) job_definition_name: std::option::Option<std::string::String>,
pub(crate) job_definition_arn: std::option::Option<std::string::String>,
pub(crate) revision: std::option::Option<i32>,
pub(crate) status: std::option::Option<std::string::String>,
pub(crate) r#type: std::option::Option<std::string::String>,
pub(crate) scheduling_priority: std::option::Option<i32>,
pub(crate) parameters: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
pub(crate) retry_strategy: std::option::Option<crate::model::RetryStrategy>,
pub(crate) container_properties: std::option::Option<crate::model::ContainerProperties>,
pub(crate) timeout: std::option::Option<crate::model::JobTimeout>,
pub(crate) node_properties: std::option::Option<crate::model::NodeProperties>,
pub(crate) tags: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
pub(crate) propagate_tags: std::option::Option<bool>,
pub(crate) platform_capabilities:
std::option::Option<std::vec::Vec<crate::model::PlatformCapability>>,
}
impl Builder {
/// <p>The name of the job definition.</p>
pub fn job_definition_name(mut self, input: impl Into<std::string::String>) -> Self {
self.job_definition_name = Some(input.into());
self
}
/// <p>The name of the job definition.</p>
pub fn set_job_definition_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.job_definition_name = input;
self
}
/// <p>The Amazon Resource Name (ARN) for the job definition.</p>
pub fn job_definition_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.job_definition_arn = Some(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) for the job definition.</p>
pub fn set_job_definition_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.job_definition_arn = input;
self
}
/// <p>The revision of the job definition.</p>
pub fn revision(mut self, input: i32) -> Self {
self.revision = Some(input);
self
}
/// <p>The revision of the job definition.</p>
pub fn set_revision(mut self, input: std::option::Option<i32>) -> Self {
self.revision = input;
self
}
/// <p>The status of the job definition.</p>
pub fn status(mut self, input: impl Into<std::string::String>) -> Self {
self.status = Some(input.into());
self
}
/// <p>The status of the job definition.</p>
pub fn set_status(mut self, input: std::option::Option<std::string::String>) -> Self {
self.status = input;
self
}
/// <p>The type of job definition, either
/// <code>container</code> or <code>multinode</code>. If the job is run on Fargate resources, then
/// <code>multinode</code> isn't supported. For more information about multi-node parallel jobs, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/multi-node-job-def.html">Creating a multi-node parallel job definition</a>
/// in the <i>Batch User Guide</i>.</p>
pub fn r#type(mut self, input: impl Into<std::string::String>) -> Self {
self.r#type = Some(input.into());
self
}
/// <p>The type of job definition, either
/// <code>container</code> or <code>multinode</code>. If the job is run on Fargate resources, then
/// <code>multinode</code> isn't supported. For more information about multi-node parallel jobs, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/multi-node-job-def.html">Creating a multi-node parallel job definition</a>
/// in the <i>Batch User Guide</i>.</p>
pub fn set_type(mut self, input: std::option::Option<std::string::String>) -> Self {
self.r#type = input;
self
}
/// <p>The scheduling priority of the job
/// definition. This will only affect jobs in job queues with a fair share policy. Jobs with a higher scheduling priority
/// will be scheduled before jobs with a lower scheduling priority.</p>
pub fn scheduling_priority(mut self, input: i32) -> Self {
self.scheduling_priority = Some(input);
self
}
/// <p>The scheduling priority of the job
/// definition. This will only affect jobs in job queues with a fair share policy. Jobs with a higher scheduling priority
/// will be scheduled before jobs with a lower scheduling priority.</p>
pub fn set_scheduling_priority(mut self, input: std::option::Option<i32>) -> Self {
self.scheduling_priority = input;
self
}
/// Adds a key-value pair to `parameters`.
///
/// To override the contents of this collection use [`set_parameters`](Self::set_parameters).
///
/// <p>Default parameters or parameter substitution placeholders that are set in the job definition. Parameters are
/// specified as a key-value pair mapping. Parameters in a <code>SubmitJob</code> request override any corresponding
/// parameter defaults from the job definition. For more information about specifying parameters, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/job_definition_parameters.html">Job Definition Parameters</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn parameters(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
let mut hash_map = self.parameters.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.parameters = Some(hash_map);
self
}
/// <p>Default parameters or parameter substitution placeholders that are set in the job definition. Parameters are
/// specified as a key-value pair mapping. Parameters in a <code>SubmitJob</code> request override any corresponding
/// parameter defaults from the job definition. For more information about specifying parameters, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/job_definition_parameters.html">Job Definition Parameters</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn set_parameters(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.parameters = input;
self
}
/// <p>The retry strategy to use for failed jobs that are submitted with this job definition.</p>
pub fn retry_strategy(mut self, input: crate::model::RetryStrategy) -> Self {
self.retry_strategy = Some(input);
self
}
/// <p>The retry strategy to use for failed jobs that are submitted with this job definition.</p>
pub fn set_retry_strategy(
mut self,
input: std::option::Option<crate::model::RetryStrategy>,
) -> Self {
self.retry_strategy = input;
self
}
/// <p>An object with various properties specific to container-based jobs.</p>
pub fn container_properties(mut self, input: crate::model::ContainerProperties) -> Self {
self.container_properties = Some(input);
self
}
/// <p>An object with various properties specific to container-based jobs.</p>
pub fn set_container_properties(
mut self,
input: std::option::Option<crate::model::ContainerProperties>,
) -> Self {
self.container_properties = input;
self
}
/// <p>The timeout configuration for jobs that are submitted with this job definition. You can specify a timeout
/// duration after which Batch terminates your jobs if they haven't finished.</p>
pub fn timeout(mut self, input: crate::model::JobTimeout) -> Self {
self.timeout = Some(input);
self
}
/// <p>The timeout configuration for jobs that are submitted with this job definition. You can specify a timeout
/// duration after which Batch terminates your jobs if they haven't finished.</p>
pub fn set_timeout(mut self, input: std::option::Option<crate::model::JobTimeout>) -> Self {
self.timeout = input;
self
}
/// <p>An object with various properties specific to multi-node parallel jobs.</p>
/// <note>
/// <p>If the job runs on Fargate resources, then you must not specify <code>nodeProperties</code>; use
/// <code>containerProperties</code> instead.</p>
/// </note>
pub fn node_properties(mut self, input: crate::model::NodeProperties) -> Self {
self.node_properties = Some(input);
self
}
/// <p>An object with various properties specific to multi-node parallel jobs.</p>
/// <note>
/// <p>If the job runs on Fargate resources, then you must not specify <code>nodeProperties</code>; use
/// <code>containerProperties</code> instead.</p>
/// </note>
pub fn set_node_properties(
mut self,
input: std::option::Option<crate::model::NodeProperties>,
) -> Self {
self.node_properties = input;
self
}
/// Adds a key-value pair to `tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>The tags applied to the job definition.</p>
pub fn tags(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
let mut hash_map = self.tags.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.tags = Some(hash_map);
self
}
/// <p>The tags applied to the job definition.</p>
pub fn set_tags(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.tags = input;
self
}
/// <p>Specifies whether to propagate the tags from the job or job definition to the corresponding Amazon ECS task. If no
/// value is specified, the tags aren't propagated. Tags can only be propagated to the tasks during task creation. For
/// tags with the same name, job tags are given priority over job definitions tags. If the total number of combined tags
/// from the job and job definition is over 50, the job is moved to the <code>FAILED</code> state.</p>
pub fn propagate_tags(mut self, input: bool) -> Self {
self.propagate_tags = Some(input);
self
}
/// <p>Specifies whether to propagate the tags from the job or job definition to the corresponding Amazon ECS task. If no
/// value is specified, the tags aren't propagated. Tags can only be propagated to the tasks during task creation. For
/// tags with the same name, job tags are given priority over job definitions tags. If the total number of combined tags
/// from the job and job definition is over 50, the job is moved to the <code>FAILED</code> state.</p>
pub fn set_propagate_tags(mut self, input: std::option::Option<bool>) -> Self {
self.propagate_tags = input;
self
}
/// Appends an item to `platform_capabilities`.
///
/// To override the contents of this collection use [`set_platform_capabilities`](Self::set_platform_capabilities).
///
/// <p>The platform capabilities required by the job definition. If no value is specified, it defaults to
/// <code>EC2</code>. Jobs run on Fargate resources specify <code>FARGATE</code>.</p>
pub fn platform_capabilities(
mut self,
input: impl Into<crate::model::PlatformCapability>,
) -> Self {
let mut v = self.platform_capabilities.unwrap_or_default();
v.push(input.into());
self.platform_capabilities = Some(v);
self
}
/// <p>The platform capabilities required by the job definition. If no value is specified, it defaults to
/// <code>EC2</code>. Jobs run on Fargate resources specify <code>FARGATE</code>.</p>
pub fn set_platform_capabilities(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::PlatformCapability>>,
) -> Self {
self.platform_capabilities = input;
self
}
/// Consumes the builder and constructs a [`JobDefinition`](crate::model::JobDefinition)
pub fn build(self) -> crate::model::JobDefinition {
crate::model::JobDefinition {
job_definition_name: self.job_definition_name,
job_definition_arn: self.job_definition_arn,
revision: self.revision.unwrap_or_default(),
status: self.status,
r#type: self.r#type,
scheduling_priority: self.scheduling_priority.unwrap_or_default(),
parameters: self.parameters,
retry_strategy: self.retry_strategy,
container_properties: self.container_properties,
timeout: self.timeout,
node_properties: self.node_properties,
tags: self.tags,
propagate_tags: self.propagate_tags.unwrap_or_default(),
platform_capabilities: self.platform_capabilities,
}
}
}
}
impl JobDefinition {
/// Creates a new builder-style object to manufacture [`JobDefinition`](crate::model::JobDefinition)
pub fn builder() -> crate::model::job_definition::Builder {
crate::model::job_definition::Builder::default()
}
}
/// <p>An object representing an Batch compute environment.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ComputeEnvironmentDetail {
/// <p>The name of the compute environment. Up to 128 letters (uppercase and lowercase), numbers, hyphens, and
/// underscores are allowed.</p>
pub compute_environment_name: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (ARN) of the compute environment.</p>
pub compute_environment_arn: std::option::Option<std::string::String>,
/// <p>The maximum number of VCPUs expected to be used for an unmanaged compute environment.</p>
pub unmanagedv_cpus: i32,
/// <p>The Amazon Resource Name (ARN) of the underlying Amazon ECS cluster used by the compute environment.</p>
pub ecs_cluster_arn: std::option::Option<std::string::String>,
/// <p>The tags applied to the compute environment.</p>
pub tags:
std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>,
/// <p>The type of the compute environment: <code>MANAGED</code> or <code>UNMANAGED</code>. For more information, see
/// <a href="https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html">Compute Environments</a> in the
/// <i>Batch User Guide</i>.</p>
pub r#type: std::option::Option<crate::model::CeType>,
/// <p>The state of the compute environment. The valid values are <code>ENABLED</code> or <code>DISABLED</code>.</p>
/// <p>If the state is <code>ENABLED</code>, then the Batch scheduler can attempt to place jobs from an associated
/// job queue on the compute resources within the environment. If the compute environment is managed, then it can scale
/// its instances out or in automatically, based on the job queue demand.</p>
/// <p>If the state is <code>DISABLED</code>, then the Batch scheduler doesn't attempt to place jobs within the
/// environment. Jobs in a <code>STARTING</code> or <code>RUNNING</code> state continue to progress normally. Managed
/// compute environments in the <code>DISABLED</code> state don't scale out. However, they scale in to
/// <code>minvCpus</code> value after instances become idle.</p>
pub state: std::option::Option<crate::model::CeState>,
/// <p>The current status of the compute environment (for example, <code>CREATING</code> or <code>VALID</code>).</p>
pub status: std::option::Option<crate::model::CeStatus>,
/// <p>A short, human-readable string to provide additional details about the current status of the compute
/// environment.</p>
pub status_reason: std::option::Option<std::string::String>,
/// <p>The compute resources defined for the compute environment. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html">Compute Environments</a> in the
/// <i>Batch User Guide</i>.</p>
pub compute_resources: std::option::Option<crate::model::ComputeResource>,
/// <p>The service role associated with the compute environment that allows Batch to make calls to Amazon Web Services API
/// operations on your behalf. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/service_IAM_role.html">Batch service IAM role</a> in the
/// <i>Batch User Guide</i>.</p>
pub service_role: std::option::Option<std::string::String>,
}
impl ComputeEnvironmentDetail {
/// <p>The name of the compute environment. Up to 128 letters (uppercase and lowercase), numbers, hyphens, and
/// underscores are allowed.</p>
pub fn compute_environment_name(&self) -> std::option::Option<&str> {
self.compute_environment_name.as_deref()
}
/// <p>The Amazon Resource Name (ARN) of the compute environment.</p>
pub fn compute_environment_arn(&self) -> std::option::Option<&str> {
self.compute_environment_arn.as_deref()
}
/// <p>The maximum number of VCPUs expected to be used for an unmanaged compute environment.</p>
pub fn unmanagedv_cpus(&self) -> i32 {
self.unmanagedv_cpus
}
/// <p>The Amazon Resource Name (ARN) of the underlying Amazon ECS cluster used by the compute environment.</p>
pub fn ecs_cluster_arn(&self) -> std::option::Option<&str> {
self.ecs_cluster_arn.as_deref()
}
/// <p>The tags applied to the compute environment.</p>
pub fn tags(
&self,
) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>>
{
self.tags.as_ref()
}
/// <p>The type of the compute environment: <code>MANAGED</code> or <code>UNMANAGED</code>. For more information, see
/// <a href="https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html">Compute Environments</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn r#type(&self) -> std::option::Option<&crate::model::CeType> {
self.r#type.as_ref()
}
/// <p>The state of the compute environment. The valid values are <code>ENABLED</code> or <code>DISABLED</code>.</p>
/// <p>If the state is <code>ENABLED</code>, then the Batch scheduler can attempt to place jobs from an associated
/// job queue on the compute resources within the environment. If the compute environment is managed, then it can scale
/// its instances out or in automatically, based on the job queue demand.</p>
/// <p>If the state is <code>DISABLED</code>, then the Batch scheduler doesn't attempt to place jobs within the
/// environment. Jobs in a <code>STARTING</code> or <code>RUNNING</code> state continue to progress normally. Managed
/// compute environments in the <code>DISABLED</code> state don't scale out. However, they scale in to
/// <code>minvCpus</code> value after instances become idle.</p>
pub fn state(&self) -> std::option::Option<&crate::model::CeState> {
self.state.as_ref()
}
/// <p>The current status of the compute environment (for example, <code>CREATING</code> or <code>VALID</code>).</p>
pub fn status(&self) -> std::option::Option<&crate::model::CeStatus> {
self.status.as_ref()
}
/// <p>A short, human-readable string to provide additional details about the current status of the compute
/// environment.</p>
pub fn status_reason(&self) -> std::option::Option<&str> {
self.status_reason.as_deref()
}
/// <p>The compute resources defined for the compute environment. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html">Compute Environments</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn compute_resources(&self) -> std::option::Option<&crate::model::ComputeResource> {
self.compute_resources.as_ref()
}
/// <p>The service role associated with the compute environment that allows Batch to make calls to Amazon Web Services API
/// operations on your behalf. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/service_IAM_role.html">Batch service IAM role</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn service_role(&self) -> std::option::Option<&str> {
self.service_role.as_deref()
}
}
impl std::fmt::Debug for ComputeEnvironmentDetail {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ComputeEnvironmentDetail");
formatter.field("compute_environment_name", &self.compute_environment_name);
formatter.field("compute_environment_arn", &self.compute_environment_arn);
formatter.field("unmanagedv_cpus", &self.unmanagedv_cpus);
formatter.field("ecs_cluster_arn", &self.ecs_cluster_arn);
formatter.field("tags", &self.tags);
formatter.field("r#type", &self.r#type);
formatter.field("state", &self.state);
formatter.field("status", &self.status);
formatter.field("status_reason", &self.status_reason);
formatter.field("compute_resources", &self.compute_resources);
formatter.field("service_role", &self.service_role);
formatter.finish()
}
}
/// See [`ComputeEnvironmentDetail`](crate::model::ComputeEnvironmentDetail)
pub mod compute_environment_detail {
/// A builder for [`ComputeEnvironmentDetail`](crate::model::ComputeEnvironmentDetail)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) compute_environment_name: std::option::Option<std::string::String>,
pub(crate) compute_environment_arn: std::option::Option<std::string::String>,
pub(crate) unmanagedv_cpus: std::option::Option<i32>,
pub(crate) ecs_cluster_arn: std::option::Option<std::string::String>,
pub(crate) tags: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
pub(crate) r#type: std::option::Option<crate::model::CeType>,
pub(crate) state: std::option::Option<crate::model::CeState>,
pub(crate) status: std::option::Option<crate::model::CeStatus>,
pub(crate) status_reason: std::option::Option<std::string::String>,
pub(crate) compute_resources: std::option::Option<crate::model::ComputeResource>,
pub(crate) service_role: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name of the compute environment. Up to 128 letters (uppercase and lowercase), numbers, hyphens, and
/// underscores are allowed.</p>
pub fn compute_environment_name(mut self, input: impl Into<std::string::String>) -> Self {
self.compute_environment_name = Some(input.into());
self
}
/// <p>The name of the compute environment. Up to 128 letters (uppercase and lowercase), numbers, hyphens, and
/// underscores are allowed.</p>
pub fn set_compute_environment_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.compute_environment_name = input;
self
}
/// <p>The Amazon Resource Name (ARN) of the compute environment.</p>
pub fn compute_environment_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.compute_environment_arn = Some(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of the compute environment.</p>
pub fn set_compute_environment_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.compute_environment_arn = input;
self
}
/// <p>The maximum number of VCPUs expected to be used for an unmanaged compute environment.</p>
pub fn unmanagedv_cpus(mut self, input: i32) -> Self {
self.unmanagedv_cpus = Some(input);
self
}
/// <p>The maximum number of VCPUs expected to be used for an unmanaged compute environment.</p>
pub fn set_unmanagedv_cpus(mut self, input: std::option::Option<i32>) -> Self {
self.unmanagedv_cpus = input;
self
}
/// <p>The Amazon Resource Name (ARN) of the underlying Amazon ECS cluster used by the compute environment.</p>
pub fn ecs_cluster_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.ecs_cluster_arn = Some(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of the underlying Amazon ECS cluster used by the compute environment.</p>
pub fn set_ecs_cluster_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.ecs_cluster_arn = input;
self
}
/// Adds a key-value pair to `tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>The tags applied to the compute environment.</p>
pub fn tags(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
let mut hash_map = self.tags.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.tags = Some(hash_map);
self
}
/// <p>The tags applied to the compute environment.</p>
pub fn set_tags(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.tags = input;
self
}
/// <p>The type of the compute environment: <code>MANAGED</code> or <code>UNMANAGED</code>. For more information, see
/// <a href="https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html">Compute Environments</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn r#type(mut self, input: crate::model::CeType) -> Self {
self.r#type = Some(input);
self
}
/// <p>The type of the compute environment: <code>MANAGED</code> or <code>UNMANAGED</code>. For more information, see
/// <a href="https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html">Compute Environments</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn set_type(mut self, input: std::option::Option<crate::model::CeType>) -> Self {
self.r#type = input;
self
}
/// <p>The state of the compute environment. The valid values are <code>ENABLED</code> or <code>DISABLED</code>.</p>
/// <p>If the state is <code>ENABLED</code>, then the Batch scheduler can attempt to place jobs from an associated
/// job queue on the compute resources within the environment. If the compute environment is managed, then it can scale
/// its instances out or in automatically, based on the job queue demand.</p>
/// <p>If the state is <code>DISABLED</code>, then the Batch scheduler doesn't attempt to place jobs within the
/// environment. Jobs in a <code>STARTING</code> or <code>RUNNING</code> state continue to progress normally. Managed
/// compute environments in the <code>DISABLED</code> state don't scale out. However, they scale in to
/// <code>minvCpus</code> value after instances become idle.</p>
pub fn state(mut self, input: crate::model::CeState) -> Self {
self.state = Some(input);
self
}
/// <p>The state of the compute environment. The valid values are <code>ENABLED</code> or <code>DISABLED</code>.</p>
/// <p>If the state is <code>ENABLED</code>, then the Batch scheduler can attempt to place jobs from an associated
/// job queue on the compute resources within the environment. If the compute environment is managed, then it can scale
/// its instances out or in automatically, based on the job queue demand.</p>
/// <p>If the state is <code>DISABLED</code>, then the Batch scheduler doesn't attempt to place jobs within the
/// environment. Jobs in a <code>STARTING</code> or <code>RUNNING</code> state continue to progress normally. Managed
/// compute environments in the <code>DISABLED</code> state don't scale out. However, they scale in to
/// <code>minvCpus</code> value after instances become idle.</p>
pub fn set_state(mut self, input: std::option::Option<crate::model::CeState>) -> Self {
self.state = input;
self
}
/// <p>The current status of the compute environment (for example, <code>CREATING</code> or <code>VALID</code>).</p>
pub fn status(mut self, input: crate::model::CeStatus) -> Self {
self.status = Some(input);
self
}
/// <p>The current status of the compute environment (for example, <code>CREATING</code> or <code>VALID</code>).</p>
pub fn set_status(mut self, input: std::option::Option<crate::model::CeStatus>) -> Self {
self.status = input;
self
}
/// <p>A short, human-readable string to provide additional details about the current status of the compute
/// environment.</p>
pub fn status_reason(mut self, input: impl Into<std::string::String>) -> Self {
self.status_reason = Some(input.into());
self
}
/// <p>A short, human-readable string to provide additional details about the current status of the compute
/// environment.</p>
pub fn set_status_reason(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.status_reason = input;
self
}
/// <p>The compute resources defined for the compute environment. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html">Compute Environments</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn compute_resources(mut self, input: crate::model::ComputeResource) -> Self {
self.compute_resources = Some(input);
self
}
/// <p>The compute resources defined for the compute environment. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html">Compute Environments</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn set_compute_resources(
mut self,
input: std::option::Option<crate::model::ComputeResource>,
) -> Self {
self.compute_resources = input;
self
}
/// <p>The service role associated with the compute environment that allows Batch to make calls to Amazon Web Services API
/// operations on your behalf. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/service_IAM_role.html">Batch service IAM role</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn service_role(mut self, input: impl Into<std::string::String>) -> Self {
self.service_role = Some(input.into());
self
}
/// <p>The service role associated with the compute environment that allows Batch to make calls to Amazon Web Services API
/// operations on your behalf. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/service_IAM_role.html">Batch service IAM role</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn set_service_role(mut self, input: std::option::Option<std::string::String>) -> Self {
self.service_role = input;
self
}
/// Consumes the builder and constructs a [`ComputeEnvironmentDetail`](crate::model::ComputeEnvironmentDetail)
pub fn build(self) -> crate::model::ComputeEnvironmentDetail {
crate::model::ComputeEnvironmentDetail {
compute_environment_name: self.compute_environment_name,
compute_environment_arn: self.compute_environment_arn,
unmanagedv_cpus: self.unmanagedv_cpus.unwrap_or_default(),
ecs_cluster_arn: self.ecs_cluster_arn,
tags: self.tags,
r#type: self.r#type,
state: self.state,
status: self.status,
status_reason: self.status_reason,
compute_resources: self.compute_resources,
service_role: self.service_role,
}
}
}
}
impl ComputeEnvironmentDetail {
/// Creates a new builder-style object to manufacture [`ComputeEnvironmentDetail`](crate::model::ComputeEnvironmentDetail)
pub fn builder() -> crate::model::compute_environment_detail::Builder {
crate::model::compute_environment_detail::Builder::default()
}
}
/// <p>An object representing an Batch compute resource. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html">Compute Environments</a> in the
/// <i>Batch User Guide</i>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ComputeResource {
/// <p>The type of compute environment: <code>EC2</code>, <code>SPOT</code>, <code>FARGATE</code>, or
/// <code>FARGATE_SPOT</code>. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html">Compute Environments</a> in the
/// <i>Batch User Guide</i>.</p>
/// <p> If you choose <code>SPOT</code>, you must also specify an Amazon EC2 Spot Fleet role with the
/// <code>spotIamFleetRole</code> parameter. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html">Amazon EC2 Spot Fleet role</a> in the
/// <i>Batch User Guide</i>.</p>
pub r#type: std::option::Option<crate::model::CrType>,
/// <p>The allocation strategy to use for the compute resource if not enough instances of the best fitting instance
/// type can be allocated. This might be because of availability of the instance type in the Region or <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html">Amazon EC2 service limits</a>. For more
/// information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/allocation-strategies.html">Allocation Strategies</a>
/// in the <i>Batch User Guide</i>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
/// <dl>
/// <dt>BEST_FIT (default)</dt>
/// <dd>
/// <p>Batch selects an instance type that best fits the needs of the jobs with a preference for the lowest-cost
/// instance type. If additional instances of the selected instance type aren't available, Batch waits for the
/// additional instances to be available. If there aren't enough instances available, or if the user is reaching
/// <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html">Amazon EC2 service limits</a>
/// then additional jobs aren't run until the currently running jobs have completed. This allocation strategy keeps
/// costs lower but can limit scaling. If you are using Spot Fleets with <code>BEST_FIT</code> then the Spot Fleet IAM
/// Role must be specified.</p>
/// </dd>
/// <dt>BEST_FIT_PROGRESSIVE</dt>
/// <dd>
/// <p>Batch will select additional instance types that are large enough to meet the requirements of the jobs in
/// the queue, with a preference for instance types with a lower cost per unit vCPU. If additional instances of the
/// previously selected instance types aren't available, Batch will select new instance types.</p>
/// </dd>
/// <dt>SPOT_CAPACITY_OPTIMIZED</dt>
/// <dd>
/// <p>Batch will select one or more instance types that are large enough to meet the requirements of the jobs in
/// the queue, with a preference for instance types that are less likely to be interrupted. This allocation strategy
/// is only available for Spot Instance compute resources.</p>
/// </dd>
/// </dl>
/// <p>With both <code>BEST_FIT_PROGRESSIVE</code> and <code>SPOT_CAPACITY_OPTIMIZED</code> strategies, Batch might
/// need to go above <code>maxvCpus</code> to meet your capacity requirements. In this event, Batch never exceeds
/// <code>maxvCpus</code> by more than a single instance.</p>
pub allocation_strategy: std::option::Option<crate::model::CrAllocationStrategy>,
/// <p>The minimum number of Amazon EC2 vCPUs that an environment should maintain (even if the compute environment is
/// <code>DISABLED</code>).</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub minv_cpus: i32,
/// <p>The maximum number of Amazon EC2 vCPUs that a compute environment can reach.</p>
/// <note>
/// <p>With both <code>BEST_FIT_PROGRESSIVE</code> and <code>SPOT_CAPACITY_OPTIMIZED</code> allocation strategies,
/// Batch might need to exceed <code>maxvCpus</code> to meet your capacity requirements. In this event, Batch never
/// exceeds <code>maxvCpus</code> by more than a single instance. For example, no more than a single instance from among
/// those specified in your compute environment is allocated.</p>
/// </note>
pub maxv_cpus: i32,
/// <p>The desired number of Amazon EC2 vCPUS in the compute environment. Batch modifies this value between the minimum
/// and maximum values, based on job queue demand.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub desiredv_cpus: i32,
/// <p>The instances types that can be launched. You can specify instance families to launch any instance type within
/// those families (for example, <code>c5</code> or <code>p3</code>), or you can specify specific sizes within a family
/// (such as <code>c5.8xlarge</code>). You can also choose <code>optimal</code> to select instance types (from the C4,
/// M4, and R4 instance families) that match the demand of your job queues.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
/// <note>
/// <p>When you create a compute environment, the instance types that you select for the compute environment must
/// share the same architecture. For example, you can't mix x86 and ARM instances in the same compute
/// environment.</p>
/// </note>
/// <note>
/// <p>Currently, <code>optimal</code> uses instance types from the C4, M4, and R4 instance families. In Regions that
/// don't have instance types from those instance families, instance types from the C5, M5. and R5 instance families are
/// used.</p>
/// </note>
pub instance_types: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The Amazon Machine Image (AMI) ID used for instances launched in the compute environment. This parameter is
/// overridden by the <code>imageIdOverride</code> member of the <code>Ec2Configuration</code> structure.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
/// <note>
/// <p>The AMI that you choose for a compute environment must match the architecture of the instance types that
/// you intend to use for that compute environment. For example, if your compute environment uses A1 instance types,
/// the compute resource AMI that you choose must support ARM instances. Amazon ECS vends both x86 and ARM versions of the
/// Amazon ECS-optimized Amazon Linux 2 AMI. For more information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#ecs-optimized-ami-linux-variants.html">Amazon ECS-optimized
/// Amazon Linux 2 AMI</a>
/// in the <i>Amazon Elastic Container Service Developer Guide</i>.</p>
/// </note>
pub image_id: std::option::Option<std::string::String>,
/// <p>The VPC subnets where the compute resources are launched. These subnets must be within the same VPC. Fargate
/// compute resources can contain up to 16 subnets. For more information, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html">VPCs and Subnets</a> in the <i>Amazon VPC User
/// Guide</i>.</p>
pub subnets: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The Amazon EC2 security groups associated with instances launched in the compute environment. One or more security
/// groups must be specified, either in <code>securityGroupIds</code> or using a launch template referenced in
/// <code>launchTemplate</code>. This parameter is required for jobs that are running on Fargate resources and must
/// contain at least one security group. Fargate doesn't support launch templates. If security groups are specified
/// using both <code>securityGroupIds</code> and <code>launchTemplate</code>, the values in <code>securityGroupIds</code>
/// are used.</p>
pub security_group_ids: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The Amazon EC2 key pair that's used for instances launched in the compute environment. You can use this key pair to
/// log in to your instances with SSH.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub ec2_key_pair: std::option::Option<std::string::String>,
/// <p>The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can specify the short name
/// or full Amazon Resource Name (ARN) of an instance profile. For example,
/// <code>
/// <i>ecsInstanceRole</i>
/// </code> or
/// <code>arn:aws:iam::<i><aws_account_id></i>:instance-profile/<i>ecsInstanceRole</i>
/// </code>.
/// For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/instance_IAM_role.html">Amazon ECS Instance
/// Role</a> in the <i>Batch User Guide</i>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub instance_role: std::option::Option<std::string::String>,
/// <p>Key-value pair tags to be applied to EC2 resources that are launched in the compute environment. For Batch,
/// these take the form of "String1": "String2", where String1 is the tag key and String2 is the tag value−for
/// example, <code>{ "Name": "Batch Instance - C4OnDemand" }</code>. This is helpful for recognizing your Batch
/// instances in the Amazon EC2 console. These tags can't be updated or removed after the compute environment is created. Any
/// changes to these tags require that you create a new compute environment and remove the old compute environment. These
/// tags aren't seen when using the Batch <code>ListTagsForResource</code> API operation.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub tags:
std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>,
/// <p>The Amazon EC2 placement group to associate with your compute resources. If you intend to submit multi-node parallel
/// jobs to your compute environment, you should consider creating a cluster placement group and associate it with your
/// compute resources. This keeps your multi-node parallel job on a logical grouping of instances within a single
/// Availability Zone with high network flow potential. For more information, see <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html">Placement Groups</a> in the <i>Amazon EC2 User Guide for
/// Linux Instances</i>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub placement_group: std::option::Option<std::string::String>,
/// <p>The maximum percentage that a Spot Instance price can be when compared with the On-Demand price for that
/// instance type before instances are launched. For example, if your maximum percentage is 20%, then the Spot price must
/// be less than 20% of the current On-Demand price for that Amazon EC2 instance. You always pay the lowest (market) price and
/// never more than your maximum percentage. If you leave this field empty, the default value is 100% of the On-Demand
/// price.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub bid_percentage: i32,
/// <p>The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a <code>SPOT</code> compute environment. This role is
/// required if the allocation strategy set to <code>BEST_FIT</code> or if the allocation strategy isn't specified. For
/// more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html">Amazon EC2 Spot Fleet
/// Role</a> in the <i>Batch User Guide</i>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
/// <important>
/// <p>To tag your Spot Instances on creation, the Spot Fleet IAM role specified here must use the newer <b>AmazonEC2SpotFleetTaggingRole</b> managed policy. The previously recommended <b>AmazonEC2SpotFleetRole</b> managed policy doesn't have the required permissions to tag Spot
/// Instances. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#spot-instance-no-tag">Spot Instances not tagged on creation</a> in the
/// <i>Batch User Guide</i>.</p>
/// </important>
pub spot_iam_fleet_role: std::option::Option<std::string::String>,
/// <p>The launch template to use for your compute resources. Any other compute resource parameters that you specify in
/// a <a>CreateComputeEnvironment</a> API operation override the same parameters in the launch template. You
/// must specify either the launch template ID or launch template name in the request, but not both. For more
/// information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/launch-templates.html">Launch Template Support</a> in
/// the <i>Batch User Guide</i>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub launch_template: std::option::Option<crate::model::LaunchTemplateSpecification>,
/// <p>Provides information used to select Amazon Machine Images (AMIs) for EC2 instances in the compute environment.
/// If <code>Ec2Configuration</code> isn't specified, the default is <code>ECS_AL2</code>.</p>
///
/// <p>One or two values can be provided.</p>
///
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub ec2_configuration: std::option::Option<std::vec::Vec<crate::model::Ec2Configuration>>,
}
impl ComputeResource {
/// <p>The type of compute environment: <code>EC2</code>, <code>SPOT</code>, <code>FARGATE</code>, or
/// <code>FARGATE_SPOT</code>. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html">Compute Environments</a> in the
/// <i>Batch User Guide</i>.</p>
/// <p> If you choose <code>SPOT</code>, you must also specify an Amazon EC2 Spot Fleet role with the
/// <code>spotIamFleetRole</code> parameter. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html">Amazon EC2 Spot Fleet role</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn r#type(&self) -> std::option::Option<&crate::model::CrType> {
self.r#type.as_ref()
}
/// <p>The allocation strategy to use for the compute resource if not enough instances of the best fitting instance
/// type can be allocated. This might be because of availability of the instance type in the Region or <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html">Amazon EC2 service limits</a>. For more
/// information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/allocation-strategies.html">Allocation Strategies</a>
/// in the <i>Batch User Guide</i>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
/// <dl>
/// <dt>BEST_FIT (default)</dt>
/// <dd>
/// <p>Batch selects an instance type that best fits the needs of the jobs with a preference for the lowest-cost
/// instance type. If additional instances of the selected instance type aren't available, Batch waits for the
/// additional instances to be available. If there aren't enough instances available, or if the user is reaching
/// <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html">Amazon EC2 service limits</a>
/// then additional jobs aren't run until the currently running jobs have completed. This allocation strategy keeps
/// costs lower but can limit scaling. If you are using Spot Fleets with <code>BEST_FIT</code> then the Spot Fleet IAM
/// Role must be specified.</p>
/// </dd>
/// <dt>BEST_FIT_PROGRESSIVE</dt>
/// <dd>
/// <p>Batch will select additional instance types that are large enough to meet the requirements of the jobs in
/// the queue, with a preference for instance types with a lower cost per unit vCPU. If additional instances of the
/// previously selected instance types aren't available, Batch will select new instance types.</p>
/// </dd>
/// <dt>SPOT_CAPACITY_OPTIMIZED</dt>
/// <dd>
/// <p>Batch will select one or more instance types that are large enough to meet the requirements of the jobs in
/// the queue, with a preference for instance types that are less likely to be interrupted. This allocation strategy
/// is only available for Spot Instance compute resources.</p>
/// </dd>
/// </dl>
/// <p>With both <code>BEST_FIT_PROGRESSIVE</code> and <code>SPOT_CAPACITY_OPTIMIZED</code> strategies, Batch might
/// need to go above <code>maxvCpus</code> to meet your capacity requirements. In this event, Batch never exceeds
/// <code>maxvCpus</code> by more than a single instance.</p>
pub fn allocation_strategy(&self) -> std::option::Option<&crate::model::CrAllocationStrategy> {
self.allocation_strategy.as_ref()
}
/// <p>The minimum number of Amazon EC2 vCPUs that an environment should maintain (even if the compute environment is
/// <code>DISABLED</code>).</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn minv_cpus(&self) -> i32 {
self.minv_cpus
}
/// <p>The maximum number of Amazon EC2 vCPUs that a compute environment can reach.</p>
/// <note>
/// <p>With both <code>BEST_FIT_PROGRESSIVE</code> and <code>SPOT_CAPACITY_OPTIMIZED</code> allocation strategies,
/// Batch might need to exceed <code>maxvCpus</code> to meet your capacity requirements. In this event, Batch never
/// exceeds <code>maxvCpus</code> by more than a single instance. For example, no more than a single instance from among
/// those specified in your compute environment is allocated.</p>
/// </note>
pub fn maxv_cpus(&self) -> i32 {
self.maxv_cpus
}
/// <p>The desired number of Amazon EC2 vCPUS in the compute environment. Batch modifies this value between the minimum
/// and maximum values, based on job queue demand.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn desiredv_cpus(&self) -> i32 {
self.desiredv_cpus
}
/// <p>The instances types that can be launched. You can specify instance families to launch any instance type within
/// those families (for example, <code>c5</code> or <code>p3</code>), or you can specify specific sizes within a family
/// (such as <code>c5.8xlarge</code>). You can also choose <code>optimal</code> to select instance types (from the C4,
/// M4, and R4 instance families) that match the demand of your job queues.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
/// <note>
/// <p>When you create a compute environment, the instance types that you select for the compute environment must
/// share the same architecture. For example, you can't mix x86 and ARM instances in the same compute
/// environment.</p>
/// </note>
/// <note>
/// <p>Currently, <code>optimal</code> uses instance types from the C4, M4, and R4 instance families. In Regions that
/// don't have instance types from those instance families, instance types from the C5, M5. and R5 instance families are
/// used.</p>
/// </note>
pub fn instance_types(&self) -> std::option::Option<&[std::string::String]> {
self.instance_types.as_deref()
}
/// <p>The Amazon Machine Image (AMI) ID used for instances launched in the compute environment. This parameter is
/// overridden by the <code>imageIdOverride</code> member of the <code>Ec2Configuration</code> structure.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
/// <note>
/// <p>The AMI that you choose for a compute environment must match the architecture of the instance types that
/// you intend to use for that compute environment. For example, if your compute environment uses A1 instance types,
/// the compute resource AMI that you choose must support ARM instances. Amazon ECS vends both x86 and ARM versions of the
/// Amazon ECS-optimized Amazon Linux 2 AMI. For more information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#ecs-optimized-ami-linux-variants.html">Amazon ECS-optimized
/// Amazon Linux 2 AMI</a>
/// in the <i>Amazon Elastic Container Service Developer Guide</i>.</p>
/// </note>
pub fn image_id(&self) -> std::option::Option<&str> {
self.image_id.as_deref()
}
/// <p>The VPC subnets where the compute resources are launched. These subnets must be within the same VPC. Fargate
/// compute resources can contain up to 16 subnets. For more information, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html">VPCs and Subnets</a> in the <i>Amazon VPC User
/// Guide</i>.</p>
pub fn subnets(&self) -> std::option::Option<&[std::string::String]> {
self.subnets.as_deref()
}
/// <p>The Amazon EC2 security groups associated with instances launched in the compute environment. One or more security
/// groups must be specified, either in <code>securityGroupIds</code> or using a launch template referenced in
/// <code>launchTemplate</code>. This parameter is required for jobs that are running on Fargate resources and must
/// contain at least one security group. Fargate doesn't support launch templates. If security groups are specified
/// using both <code>securityGroupIds</code> and <code>launchTemplate</code>, the values in <code>securityGroupIds</code>
/// are used.</p>
pub fn security_group_ids(&self) -> std::option::Option<&[std::string::String]> {
self.security_group_ids.as_deref()
}
/// <p>The Amazon EC2 key pair that's used for instances launched in the compute environment. You can use this key pair to
/// log in to your instances with SSH.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn ec2_key_pair(&self) -> std::option::Option<&str> {
self.ec2_key_pair.as_deref()
}
/// <p>The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can specify the short name
/// or full Amazon Resource Name (ARN) of an instance profile. For example,
/// <code>
/// <i>ecsInstanceRole</i>
/// </code> or
/// <code>arn:aws:iam::<i><aws_account_id></i>:instance-profile/<i>ecsInstanceRole</i>
/// </code>.
/// For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/instance_IAM_role.html">Amazon ECS Instance
/// Role</a> in the <i>Batch User Guide</i>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn instance_role(&self) -> std::option::Option<&str> {
self.instance_role.as_deref()
}
/// <p>Key-value pair tags to be applied to EC2 resources that are launched in the compute environment. For Batch,
/// these take the form of "String1": "String2", where String1 is the tag key and String2 is the tag value−for
/// example, <code>{ "Name": "Batch Instance - C4OnDemand" }</code>. This is helpful for recognizing your Batch
/// instances in the Amazon EC2 console. These tags can't be updated or removed after the compute environment is created. Any
/// changes to these tags require that you create a new compute environment and remove the old compute environment. These
/// tags aren't seen when using the Batch <code>ListTagsForResource</code> API operation.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn tags(
&self,
) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>>
{
self.tags.as_ref()
}
/// <p>The Amazon EC2 placement group to associate with your compute resources. If you intend to submit multi-node parallel
/// jobs to your compute environment, you should consider creating a cluster placement group and associate it with your
/// compute resources. This keeps your multi-node parallel job on a logical grouping of instances within a single
/// Availability Zone with high network flow potential. For more information, see <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html">Placement Groups</a> in the <i>Amazon EC2 User Guide for
/// Linux Instances</i>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn placement_group(&self) -> std::option::Option<&str> {
self.placement_group.as_deref()
}
/// <p>The maximum percentage that a Spot Instance price can be when compared with the On-Demand price for that
/// instance type before instances are launched. For example, if your maximum percentage is 20%, then the Spot price must
/// be less than 20% of the current On-Demand price for that Amazon EC2 instance. You always pay the lowest (market) price and
/// never more than your maximum percentage. If you leave this field empty, the default value is 100% of the On-Demand
/// price.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn bid_percentage(&self) -> i32 {
self.bid_percentage
}
/// <p>The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a <code>SPOT</code> compute environment. This role is
/// required if the allocation strategy set to <code>BEST_FIT</code> or if the allocation strategy isn't specified. For
/// more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html">Amazon EC2 Spot Fleet
/// Role</a> in the <i>Batch User Guide</i>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
/// <important>
/// <p>To tag your Spot Instances on creation, the Spot Fleet IAM role specified here must use the newer <b>AmazonEC2SpotFleetTaggingRole</b> managed policy. The previously recommended <b>AmazonEC2SpotFleetRole</b> managed policy doesn't have the required permissions to tag Spot
/// Instances. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#spot-instance-no-tag">Spot Instances not tagged on creation</a> in the
/// <i>Batch User Guide</i>.</p>
/// </important>
pub fn spot_iam_fleet_role(&self) -> std::option::Option<&str> {
self.spot_iam_fleet_role.as_deref()
}
/// <p>The launch template to use for your compute resources. Any other compute resource parameters that you specify in
/// a <a>CreateComputeEnvironment</a> API operation override the same parameters in the launch template. You
/// must specify either the launch template ID or launch template name in the request, but not both. For more
/// information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/launch-templates.html">Launch Template Support</a> in
/// the <i>Batch User Guide</i>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn launch_template(
&self,
) -> std::option::Option<&crate::model::LaunchTemplateSpecification> {
self.launch_template.as_ref()
}
/// <p>Provides information used to select Amazon Machine Images (AMIs) for EC2 instances in the compute environment.
/// If <code>Ec2Configuration</code> isn't specified, the default is <code>ECS_AL2</code>.</p>
///
/// <p>One or two values can be provided.</p>
///
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn ec2_configuration(&self) -> std::option::Option<&[crate::model::Ec2Configuration]> {
self.ec2_configuration.as_deref()
}
}
impl std::fmt::Debug for ComputeResource {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ComputeResource");
formatter.field("r#type", &self.r#type);
formatter.field("allocation_strategy", &self.allocation_strategy);
formatter.field("minv_cpus", &self.minv_cpus);
formatter.field("maxv_cpus", &self.maxv_cpus);
formatter.field("desiredv_cpus", &self.desiredv_cpus);
formatter.field("instance_types", &self.instance_types);
formatter.field("image_id", &self.image_id);
formatter.field("subnets", &self.subnets);
formatter.field("security_group_ids", &self.security_group_ids);
formatter.field("ec2_key_pair", &self.ec2_key_pair);
formatter.field("instance_role", &self.instance_role);
formatter.field("tags", &self.tags);
formatter.field("placement_group", &self.placement_group);
formatter.field("bid_percentage", &self.bid_percentage);
formatter.field("spot_iam_fleet_role", &self.spot_iam_fleet_role);
formatter.field("launch_template", &self.launch_template);
formatter.field("ec2_configuration", &self.ec2_configuration);
formatter.finish()
}
}
/// See [`ComputeResource`](crate::model::ComputeResource)
pub mod compute_resource {
/// A builder for [`ComputeResource`](crate::model::ComputeResource)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) r#type: std::option::Option<crate::model::CrType>,
pub(crate) allocation_strategy: std::option::Option<crate::model::CrAllocationStrategy>,
pub(crate) minv_cpus: std::option::Option<i32>,
pub(crate) maxv_cpus: std::option::Option<i32>,
pub(crate) desiredv_cpus: std::option::Option<i32>,
pub(crate) instance_types: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) image_id: std::option::Option<std::string::String>,
pub(crate) subnets: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) security_group_ids: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) ec2_key_pair: std::option::Option<std::string::String>,
pub(crate) instance_role: std::option::Option<std::string::String>,
pub(crate) tags: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
pub(crate) placement_group: std::option::Option<std::string::String>,
pub(crate) bid_percentage: std::option::Option<i32>,
pub(crate) spot_iam_fleet_role: std::option::Option<std::string::String>,
pub(crate) launch_template: std::option::Option<crate::model::LaunchTemplateSpecification>,
pub(crate) ec2_configuration:
std::option::Option<std::vec::Vec<crate::model::Ec2Configuration>>,
}
impl Builder {
/// <p>The type of compute environment: <code>EC2</code>, <code>SPOT</code>, <code>FARGATE</code>, or
/// <code>FARGATE_SPOT</code>. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html">Compute Environments</a> in the
/// <i>Batch User Guide</i>.</p>
/// <p> If you choose <code>SPOT</code>, you must also specify an Amazon EC2 Spot Fleet role with the
/// <code>spotIamFleetRole</code> parameter. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html">Amazon EC2 Spot Fleet role</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn r#type(mut self, input: crate::model::CrType) -> Self {
self.r#type = Some(input);
self
}
/// <p>The type of compute environment: <code>EC2</code>, <code>SPOT</code>, <code>FARGATE</code>, or
/// <code>FARGATE_SPOT</code>. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html">Compute Environments</a> in the
/// <i>Batch User Guide</i>.</p>
/// <p> If you choose <code>SPOT</code>, you must also specify an Amazon EC2 Spot Fleet role with the
/// <code>spotIamFleetRole</code> parameter. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html">Amazon EC2 Spot Fleet role</a> in the
/// <i>Batch User Guide</i>.</p>
pub fn set_type(mut self, input: std::option::Option<crate::model::CrType>) -> Self {
self.r#type = input;
self
}
/// <p>The allocation strategy to use for the compute resource if not enough instances of the best fitting instance
/// type can be allocated. This might be because of availability of the instance type in the Region or <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html">Amazon EC2 service limits</a>. For more
/// information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/allocation-strategies.html">Allocation Strategies</a>
/// in the <i>Batch User Guide</i>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
/// <dl>
/// <dt>BEST_FIT (default)</dt>
/// <dd>
/// <p>Batch selects an instance type that best fits the needs of the jobs with a preference for the lowest-cost
/// instance type. If additional instances of the selected instance type aren't available, Batch waits for the
/// additional instances to be available. If there aren't enough instances available, or if the user is reaching
/// <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html">Amazon EC2 service limits</a>
/// then additional jobs aren't run until the currently running jobs have completed. This allocation strategy keeps
/// costs lower but can limit scaling. If you are using Spot Fleets with <code>BEST_FIT</code> then the Spot Fleet IAM
/// Role must be specified.</p>
/// </dd>
/// <dt>BEST_FIT_PROGRESSIVE</dt>
/// <dd>
/// <p>Batch will select additional instance types that are large enough to meet the requirements of the jobs in
/// the queue, with a preference for instance types with a lower cost per unit vCPU. If additional instances of the
/// previously selected instance types aren't available, Batch will select new instance types.</p>
/// </dd>
/// <dt>SPOT_CAPACITY_OPTIMIZED</dt>
/// <dd>
/// <p>Batch will select one or more instance types that are large enough to meet the requirements of the jobs in
/// the queue, with a preference for instance types that are less likely to be interrupted. This allocation strategy
/// is only available for Spot Instance compute resources.</p>
/// </dd>
/// </dl>
/// <p>With both <code>BEST_FIT_PROGRESSIVE</code> and <code>SPOT_CAPACITY_OPTIMIZED</code> strategies, Batch might
/// need to go above <code>maxvCpus</code> to meet your capacity requirements. In this event, Batch never exceeds
/// <code>maxvCpus</code> by more than a single instance.</p>
pub fn allocation_strategy(mut self, input: crate::model::CrAllocationStrategy) -> Self {
self.allocation_strategy = Some(input);
self
}
/// <p>The allocation strategy to use for the compute resource if not enough instances of the best fitting instance
/// type can be allocated. This might be because of availability of the instance type in the Region or <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html">Amazon EC2 service limits</a>. For more
/// information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/allocation-strategies.html">Allocation Strategies</a>
/// in the <i>Batch User Guide</i>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
/// <dl>
/// <dt>BEST_FIT (default)</dt>
/// <dd>
/// <p>Batch selects an instance type that best fits the needs of the jobs with a preference for the lowest-cost
/// instance type. If additional instances of the selected instance type aren't available, Batch waits for the
/// additional instances to be available. If there aren't enough instances available, or if the user is reaching
/// <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html">Amazon EC2 service limits</a>
/// then additional jobs aren't run until the currently running jobs have completed. This allocation strategy keeps
/// costs lower but can limit scaling. If you are using Spot Fleets with <code>BEST_FIT</code> then the Spot Fleet IAM
/// Role must be specified.</p>
/// </dd>
/// <dt>BEST_FIT_PROGRESSIVE</dt>
/// <dd>
/// <p>Batch will select additional instance types that are large enough to meet the requirements of the jobs in
/// the queue, with a preference for instance types with a lower cost per unit vCPU. If additional instances of the
/// previously selected instance types aren't available, Batch will select new instance types.</p>
/// </dd>
/// <dt>SPOT_CAPACITY_OPTIMIZED</dt>
/// <dd>
/// <p>Batch will select one or more instance types that are large enough to meet the requirements of the jobs in
/// the queue, with a preference for instance types that are less likely to be interrupted. This allocation strategy
/// is only available for Spot Instance compute resources.</p>
/// </dd>
/// </dl>
/// <p>With both <code>BEST_FIT_PROGRESSIVE</code> and <code>SPOT_CAPACITY_OPTIMIZED</code> strategies, Batch might
/// need to go above <code>maxvCpus</code> to meet your capacity requirements. In this event, Batch never exceeds
/// <code>maxvCpus</code> by more than a single instance.</p>
pub fn set_allocation_strategy(
mut self,
input: std::option::Option<crate::model::CrAllocationStrategy>,
) -> Self {
self.allocation_strategy = input;
self
}
/// <p>The minimum number of Amazon EC2 vCPUs that an environment should maintain (even if the compute environment is
/// <code>DISABLED</code>).</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn minv_cpus(mut self, input: i32) -> Self {
self.minv_cpus = Some(input);
self
}
/// <p>The minimum number of Amazon EC2 vCPUs that an environment should maintain (even if the compute environment is
/// <code>DISABLED</code>).</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn set_minv_cpus(mut self, input: std::option::Option<i32>) -> Self {
self.minv_cpus = input;
self
}
/// <p>The maximum number of Amazon EC2 vCPUs that a compute environment can reach.</p>
/// <note>
/// <p>With both <code>BEST_FIT_PROGRESSIVE</code> and <code>SPOT_CAPACITY_OPTIMIZED</code> allocation strategies,
/// Batch might need to exceed <code>maxvCpus</code> to meet your capacity requirements. In this event, Batch never
/// exceeds <code>maxvCpus</code> by more than a single instance. For example, no more than a single instance from among
/// those specified in your compute environment is allocated.</p>
/// </note>
pub fn maxv_cpus(mut self, input: i32) -> Self {
self.maxv_cpus = Some(input);
self
}
/// <p>The maximum number of Amazon EC2 vCPUs that a compute environment can reach.</p>
/// <note>
/// <p>With both <code>BEST_FIT_PROGRESSIVE</code> and <code>SPOT_CAPACITY_OPTIMIZED</code> allocation strategies,
/// Batch might need to exceed <code>maxvCpus</code> to meet your capacity requirements. In this event, Batch never
/// exceeds <code>maxvCpus</code> by more than a single instance. For example, no more than a single instance from among
/// those specified in your compute environment is allocated.</p>
/// </note>
pub fn set_maxv_cpus(mut self, input: std::option::Option<i32>) -> Self {
self.maxv_cpus = input;
self
}
/// <p>The desired number of Amazon EC2 vCPUS in the compute environment. Batch modifies this value between the minimum
/// and maximum values, based on job queue demand.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn desiredv_cpus(mut self, input: i32) -> Self {
self.desiredv_cpus = Some(input);
self
}
/// <p>The desired number of Amazon EC2 vCPUS in the compute environment. Batch modifies this value between the minimum
/// and maximum values, based on job queue demand.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn set_desiredv_cpus(mut self, input: std::option::Option<i32>) -> Self {
self.desiredv_cpus = input;
self
}
/// Appends an item to `instance_types`.
///
/// To override the contents of this collection use [`set_instance_types`](Self::set_instance_types).
///
/// <p>The instances types that can be launched. You can specify instance families to launch any instance type within
/// those families (for example, <code>c5</code> or <code>p3</code>), or you can specify specific sizes within a family
/// (such as <code>c5.8xlarge</code>). You can also choose <code>optimal</code> to select instance types (from the C4,
/// M4, and R4 instance families) that match the demand of your job queues.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
/// <note>
/// <p>When you create a compute environment, the instance types that you select for the compute environment must
/// share the same architecture. For example, you can't mix x86 and ARM instances in the same compute
/// environment.</p>
/// </note>
/// <note>
/// <p>Currently, <code>optimal</code> uses instance types from the C4, M4, and R4 instance families. In Regions that
/// don't have instance types from those instance families, instance types from the C5, M5. and R5 instance families are
/// used.</p>
/// </note>
pub fn instance_types(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.instance_types.unwrap_or_default();
v.push(input.into());
self.instance_types = Some(v);
self
}
/// <p>The instances types that can be launched. You can specify instance families to launch any instance type within
/// those families (for example, <code>c5</code> or <code>p3</code>), or you can specify specific sizes within a family
/// (such as <code>c5.8xlarge</code>). You can also choose <code>optimal</code> to select instance types (from the C4,
/// M4, and R4 instance families) that match the demand of your job queues.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
/// <note>
/// <p>When you create a compute environment, the instance types that you select for the compute environment must
/// share the same architecture. For example, you can't mix x86 and ARM instances in the same compute
/// environment.</p>
/// </note>
/// <note>
/// <p>Currently, <code>optimal</code> uses instance types from the C4, M4, and R4 instance families. In Regions that
/// don't have instance types from those instance families, instance types from the C5, M5. and R5 instance families are
/// used.</p>
/// </note>
pub fn set_instance_types(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.instance_types = input;
self
}
/// <p>The Amazon Machine Image (AMI) ID used for instances launched in the compute environment. This parameter is
/// overridden by the <code>imageIdOverride</code> member of the <code>Ec2Configuration</code> structure.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
/// <note>
/// <p>The AMI that you choose for a compute environment must match the architecture of the instance types that
/// you intend to use for that compute environment. For example, if your compute environment uses A1 instance types,
/// the compute resource AMI that you choose must support ARM instances. Amazon ECS vends both x86 and ARM versions of the
/// Amazon ECS-optimized Amazon Linux 2 AMI. For more information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#ecs-optimized-ami-linux-variants.html">Amazon ECS-optimized
/// Amazon Linux 2 AMI</a>
/// in the <i>Amazon Elastic Container Service Developer Guide</i>.</p>
/// </note>
pub fn image_id(mut self, input: impl Into<std::string::String>) -> Self {
self.image_id = Some(input.into());
self
}
/// <p>The Amazon Machine Image (AMI) ID used for instances launched in the compute environment. This parameter is
/// overridden by the <code>imageIdOverride</code> member of the <code>Ec2Configuration</code> structure.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
/// <note>
/// <p>The AMI that you choose for a compute environment must match the architecture of the instance types that
/// you intend to use for that compute environment. For example, if your compute environment uses A1 instance types,
/// the compute resource AMI that you choose must support ARM instances. Amazon ECS vends both x86 and ARM versions of the
/// Amazon ECS-optimized Amazon Linux 2 AMI. For more information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#ecs-optimized-ami-linux-variants.html">Amazon ECS-optimized
/// Amazon Linux 2 AMI</a>
/// in the <i>Amazon Elastic Container Service Developer Guide</i>.</p>
/// </note>
pub fn set_image_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.image_id = input;
self
}
/// Appends an item to `subnets`.
///
/// To override the contents of this collection use [`set_subnets`](Self::set_subnets).
///
/// <p>The VPC subnets where the compute resources are launched. These subnets must be within the same VPC. Fargate
/// compute resources can contain up to 16 subnets. For more information, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html">VPCs and Subnets</a> in the <i>Amazon VPC User
/// Guide</i>.</p>
pub fn subnets(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.subnets.unwrap_or_default();
v.push(input.into());
self.subnets = Some(v);
self
}
/// <p>The VPC subnets where the compute resources are launched. These subnets must be within the same VPC. Fargate
/// compute resources can contain up to 16 subnets. For more information, see <a href="https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html">VPCs and Subnets</a> in the <i>Amazon VPC User
/// Guide</i>.</p>
pub fn set_subnets(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.subnets = input;
self
}
/// Appends an item to `security_group_ids`.
///
/// To override the contents of this collection use [`set_security_group_ids`](Self::set_security_group_ids).
///
/// <p>The Amazon EC2 security groups associated with instances launched in the compute environment. One or more security
/// groups must be specified, either in <code>securityGroupIds</code> or using a launch template referenced in
/// <code>launchTemplate</code>. This parameter is required for jobs that are running on Fargate resources and must
/// contain at least one security group. Fargate doesn't support launch templates. If security groups are specified
/// using both <code>securityGroupIds</code> and <code>launchTemplate</code>, the values in <code>securityGroupIds</code>
/// are used.</p>
pub fn security_group_ids(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.security_group_ids.unwrap_or_default();
v.push(input.into());
self.security_group_ids = Some(v);
self
}
/// <p>The Amazon EC2 security groups associated with instances launched in the compute environment. One or more security
/// groups must be specified, either in <code>securityGroupIds</code> or using a launch template referenced in
/// <code>launchTemplate</code>. This parameter is required for jobs that are running on Fargate resources and must
/// contain at least one security group. Fargate doesn't support launch templates. If security groups are specified
/// using both <code>securityGroupIds</code> and <code>launchTemplate</code>, the values in <code>securityGroupIds</code>
/// are used.</p>
pub fn set_security_group_ids(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.security_group_ids = input;
self
}
/// <p>The Amazon EC2 key pair that's used for instances launched in the compute environment. You can use this key pair to
/// log in to your instances with SSH.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn ec2_key_pair(mut self, input: impl Into<std::string::String>) -> Self {
self.ec2_key_pair = Some(input.into());
self
}
/// <p>The Amazon EC2 key pair that's used for instances launched in the compute environment. You can use this key pair to
/// log in to your instances with SSH.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn set_ec2_key_pair(mut self, input: std::option::Option<std::string::String>) -> Self {
self.ec2_key_pair = input;
self
}
/// <p>The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can specify the short name
/// or full Amazon Resource Name (ARN) of an instance profile. For example,
/// <code>
/// <i>ecsInstanceRole</i>
/// </code> or
/// <code>arn:aws:iam::<i><aws_account_id></i>:instance-profile/<i>ecsInstanceRole</i>
/// </code>.
/// For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/instance_IAM_role.html">Amazon ECS Instance
/// Role</a> in the <i>Batch User Guide</i>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn instance_role(mut self, input: impl Into<std::string::String>) -> Self {
self.instance_role = Some(input.into());
self
}
/// <p>The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can specify the short name
/// or full Amazon Resource Name (ARN) of an instance profile. For example,
/// <code>
/// <i>ecsInstanceRole</i>
/// </code> or
/// <code>arn:aws:iam::<i><aws_account_id></i>:instance-profile/<i>ecsInstanceRole</i>
/// </code>.
/// For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/instance_IAM_role.html">Amazon ECS Instance
/// Role</a> in the <i>Batch User Guide</i>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn set_instance_role(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.instance_role = input;
self
}
/// Adds a key-value pair to `tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>Key-value pair tags to be applied to EC2 resources that are launched in the compute environment. For Batch,
/// these take the form of "String1": "String2", where String1 is the tag key and String2 is the tag value−for
/// example, <code>{ "Name": "Batch Instance - C4OnDemand" }</code>. This is helpful for recognizing your Batch
/// instances in the Amazon EC2 console. These tags can't be updated or removed after the compute environment is created. Any
/// changes to these tags require that you create a new compute environment and remove the old compute environment. These
/// tags aren't seen when using the Batch <code>ListTagsForResource</code> API operation.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn tags(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
let mut hash_map = self.tags.unwrap_or_default();
hash_map.insert(k.into(), v.into());
self.tags = Some(hash_map);
self
}
/// <p>Key-value pair tags to be applied to EC2 resources that are launched in the compute environment. For Batch,
/// these take the form of "String1": "String2", where String1 is the tag key and String2 is the tag value−for
/// example, <code>{ "Name": "Batch Instance - C4OnDemand" }</code>. This is helpful for recognizing your Batch
/// instances in the Amazon EC2 console. These tags can't be updated or removed after the compute environment is created. Any
/// changes to these tags require that you create a new compute environment and remove the old compute environment. These
/// tags aren't seen when using the Batch <code>ListTagsForResource</code> API operation.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn set_tags(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.tags = input;
self
}
/// <p>The Amazon EC2 placement group to associate with your compute resources. If you intend to submit multi-node parallel
/// jobs to your compute environment, you should consider creating a cluster placement group and associate it with your
/// compute resources. This keeps your multi-node parallel job on a logical grouping of instances within a single
/// Availability Zone with high network flow potential. For more information, see <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html">Placement Groups</a> in the <i>Amazon EC2 User Guide for
/// Linux Instances</i>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn placement_group(mut self, input: impl Into<std::string::String>) -> Self {
self.placement_group = Some(input.into());
self
}
/// <p>The Amazon EC2 placement group to associate with your compute resources. If you intend to submit multi-node parallel
/// jobs to your compute environment, you should consider creating a cluster placement group and associate it with your
/// compute resources. This keeps your multi-node parallel job on a logical grouping of instances within a single
/// Availability Zone with high network flow potential. For more information, see <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html">Placement Groups</a> in the <i>Amazon EC2 User Guide for
/// Linux Instances</i>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn set_placement_group(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.placement_group = input;
self
}
/// <p>The maximum percentage that a Spot Instance price can be when compared with the On-Demand price for that
/// instance type before instances are launched. For example, if your maximum percentage is 20%, then the Spot price must
/// be less than 20% of the current On-Demand price for that Amazon EC2 instance. You always pay the lowest (market) price and
/// never more than your maximum percentage. If you leave this field empty, the default value is 100% of the On-Demand
/// price.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn bid_percentage(mut self, input: i32) -> Self {
self.bid_percentage = Some(input);
self
}
/// <p>The maximum percentage that a Spot Instance price can be when compared with the On-Demand price for that
/// instance type before instances are launched. For example, if your maximum percentage is 20%, then the Spot price must
/// be less than 20% of the current On-Demand price for that Amazon EC2 instance. You always pay the lowest (market) price and
/// never more than your maximum percentage. If you leave this field empty, the default value is 100% of the On-Demand
/// price.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn set_bid_percentage(mut self, input: std::option::Option<i32>) -> Self {
self.bid_percentage = input;
self
}
/// <p>The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a <code>SPOT</code> compute environment. This role is
/// required if the allocation strategy set to <code>BEST_FIT</code> or if the allocation strategy isn't specified. For
/// more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html">Amazon EC2 Spot Fleet
/// Role</a> in the <i>Batch User Guide</i>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
/// <important>
/// <p>To tag your Spot Instances on creation, the Spot Fleet IAM role specified here must use the newer <b>AmazonEC2SpotFleetTaggingRole</b> managed policy. The previously recommended <b>AmazonEC2SpotFleetRole</b> managed policy doesn't have the required permissions to tag Spot
/// Instances. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#spot-instance-no-tag">Spot Instances not tagged on creation</a> in the
/// <i>Batch User Guide</i>.</p>
/// </important>
pub fn spot_iam_fleet_role(mut self, input: impl Into<std::string::String>) -> Self {
self.spot_iam_fleet_role = Some(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a <code>SPOT</code> compute environment. This role is
/// required if the allocation strategy set to <code>BEST_FIT</code> or if the allocation strategy isn't specified. For
/// more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html">Amazon EC2 Spot Fleet
/// Role</a> in the <i>Batch User Guide</i>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
/// <important>
/// <p>To tag your Spot Instances on creation, the Spot Fleet IAM role specified here must use the newer <b>AmazonEC2SpotFleetTaggingRole</b> managed policy. The previously recommended <b>AmazonEC2SpotFleetRole</b> managed policy doesn't have the required permissions to tag Spot
/// Instances. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#spot-instance-no-tag">Spot Instances not tagged on creation</a> in the
/// <i>Batch User Guide</i>.</p>
/// </important>
pub fn set_spot_iam_fleet_role(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.spot_iam_fleet_role = input;
self
}
/// <p>The launch template to use for your compute resources. Any other compute resource parameters that you specify in
/// a <a>CreateComputeEnvironment</a> API operation override the same parameters in the launch template. You
/// must specify either the launch template ID or launch template name in the request, but not both. For more
/// information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/launch-templates.html">Launch Template Support</a> in
/// the <i>Batch User Guide</i>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn launch_template(mut self, input: crate::model::LaunchTemplateSpecification) -> Self {
self.launch_template = Some(input);
self
}
/// <p>The launch template to use for your compute resources. Any other compute resource parameters that you specify in
/// a <a>CreateComputeEnvironment</a> API operation override the same parameters in the launch template. You
/// must specify either the launch template ID or launch template name in the request, but not both. For more
/// information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/launch-templates.html">Launch Template Support</a> in
/// the <i>Batch User Guide</i>.</p>
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn set_launch_template(
mut self,
input: std::option::Option<crate::model::LaunchTemplateSpecification>,
) -> Self {
self.launch_template = input;
self
}
/// Appends an item to `ec2_configuration`.
///
/// To override the contents of this collection use [`set_ec2_configuration`](Self::set_ec2_configuration).
///
/// <p>Provides information used to select Amazon Machine Images (AMIs) for EC2 instances in the compute environment.
/// If <code>Ec2Configuration</code> isn't specified, the default is <code>ECS_AL2</code>.</p>
///
/// <p>One or two values can be provided.</p>
///
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn ec2_configuration(
mut self,
input: impl Into<crate::model::Ec2Configuration>,
) -> Self {
let mut v = self.ec2_configuration.unwrap_or_default();
v.push(input.into());
self.ec2_configuration = Some(v);
self
}
/// <p>Provides information used to select Amazon Machine Images (AMIs) for EC2 instances in the compute environment.
/// If <code>Ec2Configuration</code> isn't specified, the default is <code>ECS_AL2</code>.</p>
///
/// <p>One or two values can be provided.</p>
///
/// <note>
/// <p>This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be
/// specified.</p>
/// </note>
pub fn set_ec2_configuration(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Ec2Configuration>>,
) -> Self {
self.ec2_configuration = input;
self
}
/// Consumes the builder and constructs a [`ComputeResource`](crate::model::ComputeResource)
pub fn build(self) -> crate::model::ComputeResource {
crate::model::ComputeResource {
r#type: self.r#type,
allocation_strategy: self.allocation_strategy,
minv_cpus: self.minv_cpus.unwrap_or_default(),
maxv_cpus: self.maxv_cpus.unwrap_or_default(),
desiredv_cpus: self.desiredv_cpus.unwrap_or_default(),
instance_types: self.instance_types,
image_id: self.image_id,
subnets: self.subnets,
security_group_ids: self.security_group_ids,
ec2_key_pair: self.ec2_key_pair,
instance_role: self.instance_role,
tags: self.tags,
placement_group: self.placement_group,
bid_percentage: self.bid_percentage.unwrap_or_default(),
spot_iam_fleet_role: self.spot_iam_fleet_role,
launch_template: self.launch_template,
ec2_configuration: self.ec2_configuration,
}
}
}
}
impl ComputeResource {
/// Creates a new builder-style object to manufacture [`ComputeResource`](crate::model::ComputeResource)
pub fn builder() -> crate::model::compute_resource::Builder {
crate::model::compute_resource::Builder::default()
}
}
/// <p>Provides information used to select Amazon Machine Images (AMIs) for instances in the compute environment. If
/// <code>Ec2Configuration</code> isn't specified, the default is <code>ECS_AL2</code> (<a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami">Amazon Linux 2</a>).</p>
/// <note>
/// <p>This object isn't applicable to jobs that are running on Fargate resources.</p>
/// </note>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Ec2Configuration {
/// <p>The image type to match with the instance type to select an AMI. If the <code>imageIdOverride</code> parameter
/// isn't specified, then a recent <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami">Amazon ECS-optimized Amazon Linux 2 AMI</a>
/// (<code>ECS_AL2</code>) is used.</p>
/// <dl>
/// <dt>ECS_AL2</dt>
/// <dd>
/// <p>
/// <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami">Amazon Linux
/// 2</a>− Default for all non-GPU instance families.</p>
/// </dd>
/// <dt>ECS_AL2_NVIDIA</dt>
/// <dd>
/// <p>
/// <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#gpuami">Amazon Linux
/// 2 (GPU)</a>−Default for all GPU instance families (for example <code>P4</code> and <code>G4</code>) and
/// can be used for all non Amazon Web Services Graviton-based instance types.</p>
/// </dd>
/// <dt>ECS_AL1</dt>
/// <dd>
/// <p>
/// <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#alami">Amazon Linux</a>.
/// Amazon Linux is reaching the end-of-life of standard support. For more information, see <a href="http://aws.amazon.com/amazon-linux-ami/">Amazon Linux AMI</a>.</p>
/// </dd>
/// </dl>
pub image_type: std::option::Option<std::string::String>,
/// <p>The AMI ID used for instances launched in the compute environment that match the image type. This setting
/// overrides the <code>imageId</code> set in the <code>computeResource</code> object.</p>
pub image_id_override: std::option::Option<std::string::String>,
}
impl Ec2Configuration {
/// <p>The image type to match with the instance type to select an AMI. If the <code>imageIdOverride</code> parameter
/// isn't specified, then a recent <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami">Amazon ECS-optimized Amazon Linux 2 AMI</a>
/// (<code>ECS_AL2</code>) is used.</p>
/// <dl>
/// <dt>ECS_AL2</dt>
/// <dd>
/// <p>
/// <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami">Amazon Linux
/// 2</a>− Default for all non-GPU instance families.</p>
/// </dd>
/// <dt>ECS_AL2_NVIDIA</dt>
/// <dd>
/// <p>
/// <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#gpuami">Amazon Linux
/// 2 (GPU)</a>−Default for all GPU instance families (for example <code>P4</code> and <code>G4</code>) and
/// can be used for all non Amazon Web Services Graviton-based instance types.</p>
/// </dd>
/// <dt>ECS_AL1</dt>
/// <dd>
/// <p>
/// <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#alami">Amazon Linux</a>.
/// Amazon Linux is reaching the end-of-life of standard support. For more information, see <a href="http://aws.amazon.com/amazon-linux-ami/">Amazon Linux AMI</a>.</p>
/// </dd>
/// </dl>
pub fn image_type(&self) -> std::option::Option<&str> {
self.image_type.as_deref()
}
/// <p>The AMI ID used for instances launched in the compute environment that match the image type. This setting
/// overrides the <code>imageId</code> set in the <code>computeResource</code> object.</p>
pub fn image_id_override(&self) -> std::option::Option<&str> {
self.image_id_override.as_deref()
}
}
impl std::fmt::Debug for Ec2Configuration {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Ec2Configuration");
formatter.field("image_type", &self.image_type);
formatter.field("image_id_override", &self.image_id_override);
formatter.finish()
}
}
/// See [`Ec2Configuration`](crate::model::Ec2Configuration)
pub mod ec2_configuration {
/// A builder for [`Ec2Configuration`](crate::model::Ec2Configuration)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) image_type: std::option::Option<std::string::String>,
pub(crate) image_id_override: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The image type to match with the instance type to select an AMI. If the <code>imageIdOverride</code> parameter
/// isn't specified, then a recent <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami">Amazon ECS-optimized Amazon Linux 2 AMI</a>
/// (<code>ECS_AL2</code>) is used.</p>
/// <dl>
/// <dt>ECS_AL2</dt>
/// <dd>
/// <p>
/// <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami">Amazon Linux
/// 2</a>− Default for all non-GPU instance families.</p>
/// </dd>
/// <dt>ECS_AL2_NVIDIA</dt>
/// <dd>
/// <p>
/// <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#gpuami">Amazon Linux
/// 2 (GPU)</a>−Default for all GPU instance families (for example <code>P4</code> and <code>G4</code>) and
/// can be used for all non Amazon Web Services Graviton-based instance types.</p>
/// </dd>
/// <dt>ECS_AL1</dt>
/// <dd>
/// <p>
/// <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#alami">Amazon Linux</a>.
/// Amazon Linux is reaching the end-of-life of standard support. For more information, see <a href="http://aws.amazon.com/amazon-linux-ami/">Amazon Linux AMI</a>.</p>
/// </dd>
/// </dl>
pub fn image_type(mut self, input: impl Into<std::string::String>) -> Self {
self.image_type = Some(input.into());
self
}
/// <p>The image type to match with the instance type to select an AMI. If the <code>imageIdOverride</code> parameter
/// isn't specified, then a recent <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami">Amazon ECS-optimized Amazon Linux 2 AMI</a>
/// (<code>ECS_AL2</code>) is used.</p>
/// <dl>
/// <dt>ECS_AL2</dt>
/// <dd>
/// <p>
/// <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami">Amazon Linux
/// 2</a>− Default for all non-GPU instance families.</p>
/// </dd>
/// <dt>ECS_AL2_NVIDIA</dt>
/// <dd>
/// <p>
/// <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#gpuami">Amazon Linux
/// 2 (GPU)</a>−Default for all GPU instance families (for example <code>P4</code> and <code>G4</code>) and
/// can be used for all non Amazon Web Services Graviton-based instance types.</p>
/// </dd>
/// <dt>ECS_AL1</dt>
/// <dd>
/// <p>
/// <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#alami">Amazon Linux</a>.
/// Amazon Linux is reaching the end-of-life of standard support. For more information, see <a href="http://aws.amazon.com/amazon-linux-ami/">Amazon Linux AMI</a>.</p>
/// </dd>
/// </dl>
pub fn set_image_type(mut self, input: std::option::Option<std::string::String>) -> Self {
self.image_type = input;
self
}
/// <p>The AMI ID used for instances launched in the compute environment that match the image type. This setting
/// overrides the <code>imageId</code> set in the <code>computeResource</code> object.</p>
pub fn image_id_override(mut self, input: impl Into<std::string::String>) -> Self {
self.image_id_override = Some(input.into());
self
}
/// <p>The AMI ID used for instances launched in the compute environment that match the image type. This setting
/// overrides the <code>imageId</code> set in the <code>computeResource</code> object.</p>
pub fn set_image_id_override(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.image_id_override = input;
self
}
/// Consumes the builder and constructs a [`Ec2Configuration`](crate::model::Ec2Configuration)
pub fn build(self) -> crate::model::Ec2Configuration {
crate::model::Ec2Configuration {
image_type: self.image_type,
image_id_override: self.image_id_override,
}
}
}
}
impl Ec2Configuration {
/// Creates a new builder-style object to manufacture [`Ec2Configuration`](crate::model::Ec2Configuration)
pub fn builder() -> crate::model::ec2_configuration::Builder {
crate::model::ec2_configuration::Builder::default()
}
}
/// <p>An object representing a launch template associated with a compute resource. You must specify either the launch
/// template ID or launch template name in the request, but not both.</p>
/// <p>If security groups are specified using both the <code>securityGroupIds</code> parameter of
/// <code>CreateComputeEnvironment</code> and the launch template, the values in the <code>securityGroupIds</code>
/// parameter of <code>CreateComputeEnvironment</code> will be used.</p>
/// <note>
/// <p>This object isn't applicable to jobs that are running on Fargate resources.</p>
/// </note>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct LaunchTemplateSpecification {
/// <p>The ID of the launch template.</p>
pub launch_template_id: std::option::Option<std::string::String>,
/// <p>The name of the launch template.</p>
pub launch_template_name: std::option::Option<std::string::String>,
/// <p>The version number of the launch template, <code>$Latest</code>, or <code>$Default</code>.</p>
/// <p>If the value is <code>$Latest</code>, the latest version of the launch template is used. If the value is
/// <code>$Default</code>, the default version of the launch template is used.</p>
/// <important>
/// <p>After the compute environment is created, the launch template version that's used isn't changed, even if the
/// <code>$Default</code> or <code>$Latest</code> version for the launch template is updated. To use a new launch
/// template version, create a new compute environment, add the new compute environment to the existing job queue,
/// remove the old compute environment from the job queue, and delete the old compute environment.</p>
/// </important>
/// <p>Default: <code>$Default</code>.</p>
pub version: std::option::Option<std::string::String>,
}
impl LaunchTemplateSpecification {
/// <p>The ID of the launch template.</p>
pub fn launch_template_id(&self) -> std::option::Option<&str> {
self.launch_template_id.as_deref()
}
/// <p>The name of the launch template.</p>
pub fn launch_template_name(&self) -> std::option::Option<&str> {
self.launch_template_name.as_deref()
}
/// <p>The version number of the launch template, <code>$Latest</code>, or <code>$Default</code>.</p>
/// <p>If the value is <code>$Latest</code>, the latest version of the launch template is used. If the value is
/// <code>$Default</code>, the default version of the launch template is used.</p>
/// <important>
/// <p>After the compute environment is created, the launch template version that's used isn't changed, even if the
/// <code>$Default</code> or <code>$Latest</code> version for the launch template is updated. To use a new launch
/// template version, create a new compute environment, add the new compute environment to the existing job queue,
/// remove the old compute environment from the job queue, and delete the old compute environment.</p>
/// </important>
/// <p>Default: <code>$Default</code>.</p>
pub fn version(&self) -> std::option::Option<&str> {
self.version.as_deref()
}
}
impl std::fmt::Debug for LaunchTemplateSpecification {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("LaunchTemplateSpecification");
formatter.field("launch_template_id", &self.launch_template_id);
formatter.field("launch_template_name", &self.launch_template_name);
formatter.field("version", &self.version);
formatter.finish()
}
}
/// See [`LaunchTemplateSpecification`](crate::model::LaunchTemplateSpecification)
pub mod launch_template_specification {
/// A builder for [`LaunchTemplateSpecification`](crate::model::LaunchTemplateSpecification)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) launch_template_id: std::option::Option<std::string::String>,
pub(crate) launch_template_name: std::option::Option<std::string::String>,
pub(crate) version: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The ID of the launch template.</p>
pub fn launch_template_id(mut self, input: impl Into<std::string::String>) -> Self {
self.launch_template_id = Some(input.into());
self
}
/// <p>The ID of the launch template.</p>
pub fn set_launch_template_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.launch_template_id = input;
self
}
/// <p>The name of the launch template.</p>
pub fn launch_template_name(mut self, input: impl Into<std::string::String>) -> Self {
self.launch_template_name = Some(input.into());
self
}
/// <p>The name of the launch template.</p>
pub fn set_launch_template_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.launch_template_name = input;
self
}
/// <p>The version number of the launch template, <code>$Latest</code>, or <code>$Default</code>.</p>
/// <p>If the value is <code>$Latest</code>, the latest version of the launch template is used. If the value is
/// <code>$Default</code>, the default version of the launch template is used.</p>
/// <important>
/// <p>After the compute environment is created, the launch template version that's used isn't changed, even if the
/// <code>$Default</code> or <code>$Latest</code> version for the launch template is updated. To use a new launch
/// template version, create a new compute environment, add the new compute environment to the existing job queue,
/// remove the old compute environment from the job queue, and delete the old compute environment.</p>
/// </important>
/// <p>Default: <code>$Default</code>.</p>
pub fn version(mut self, input: impl Into<std::string::String>) -> Self {
self.version = Some(input.into());
self
}
/// <p>The version number of the launch template, <code>$Latest</code>, or <code>$Default</code>.</p>
/// <p>If the value is <code>$Latest</code>, the latest version of the launch template is used. If the value is
/// <code>$Default</code>, the default version of the launch template is used.</p>
/// <important>
/// <p>After the compute environment is created, the launch template version that's used isn't changed, even if the
/// <code>$Default</code> or <code>$Latest</code> version for the launch template is updated. To use a new launch
/// template version, create a new compute environment, add the new compute environment to the existing job queue,
/// remove the old compute environment from the job queue, and delete the old compute environment.</p>
/// </important>
/// <p>Default: <code>$Default</code>.</p>
pub fn set_version(mut self, input: std::option::Option<std::string::String>) -> Self {
self.version = input;
self
}
/// Consumes the builder and constructs a [`LaunchTemplateSpecification`](crate::model::LaunchTemplateSpecification)
pub fn build(self) -> crate::model::LaunchTemplateSpecification {
crate::model::LaunchTemplateSpecification {
launch_template_id: self.launch_template_id,
launch_template_name: self.launch_template_name,
version: self.version,
}
}
}
}
impl LaunchTemplateSpecification {
/// Creates a new builder-style object to manufacture [`LaunchTemplateSpecification`](crate::model::LaunchTemplateSpecification)
pub fn builder() -> crate::model::launch_template_specification::Builder {
crate::model::launch_template_specification::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum CrAllocationStrategy {
#[allow(missing_docs)] // documentation missing in model
BestFit,
#[allow(missing_docs)] // documentation missing in model
BestFitProgressive,
#[allow(missing_docs)] // documentation missing in model
SpotCapacityOptimized,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for CrAllocationStrategy {
fn from(s: &str) -> Self {
match s {
"BEST_FIT" => CrAllocationStrategy::BestFit,
"BEST_FIT_PROGRESSIVE" => CrAllocationStrategy::BestFitProgressive,
"SPOT_CAPACITY_OPTIMIZED" => CrAllocationStrategy::SpotCapacityOptimized,
other => CrAllocationStrategy::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for CrAllocationStrategy {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(CrAllocationStrategy::from(s))
}
}
impl CrAllocationStrategy {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
CrAllocationStrategy::BestFit => "BEST_FIT",
CrAllocationStrategy::BestFitProgressive => "BEST_FIT_PROGRESSIVE",
CrAllocationStrategy::SpotCapacityOptimized => "SPOT_CAPACITY_OPTIMIZED",
CrAllocationStrategy::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&[
"BEST_FIT",
"BEST_FIT_PROGRESSIVE",
"SPOT_CAPACITY_OPTIMIZED",
]
}
}
impl AsRef<str> for CrAllocationStrategy {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum CrType {
#[allow(missing_docs)] // documentation missing in model
Ec2,
#[allow(missing_docs)] // documentation missing in model
Fargate,
#[allow(missing_docs)] // documentation missing in model
FargateSpot,
#[allow(missing_docs)] // documentation missing in model
Spot,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for CrType {
fn from(s: &str) -> Self {
match s {
"EC2" => CrType::Ec2,
"FARGATE" => CrType::Fargate,
"FARGATE_SPOT" => CrType::FargateSpot,
"SPOT" => CrType::Spot,
other => CrType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for CrType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(CrType::from(s))
}
}
impl CrType {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
CrType::Ec2 => "EC2",
CrType::Fargate => "FARGATE",
CrType::FargateSpot => "FARGATE_SPOT",
CrType::Spot => "SPOT",
CrType::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["EC2", "FARGATE", "FARGATE_SPOT", "SPOT"]
}
}
impl AsRef<str> for CrType {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum CeStatus {
#[allow(missing_docs)] // documentation missing in model
Creating,
#[allow(missing_docs)] // documentation missing in model
Deleted,
#[allow(missing_docs)] // documentation missing in model
Deleting,
#[allow(missing_docs)] // documentation missing in model
Invalid,
#[allow(missing_docs)] // documentation missing in model
Updating,
#[allow(missing_docs)] // documentation missing in model
Valid,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for CeStatus {
fn from(s: &str) -> Self {
match s {
"CREATING" => CeStatus::Creating,
"DELETED" => CeStatus::Deleted,
"DELETING" => CeStatus::Deleting,
"INVALID" => CeStatus::Invalid,
"UPDATING" => CeStatus::Updating,
"VALID" => CeStatus::Valid,
other => CeStatus::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for CeStatus {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(CeStatus::from(s))
}
}
impl CeStatus {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
CeStatus::Creating => "CREATING",
CeStatus::Deleted => "DELETED",
CeStatus::Deleting => "DELETING",
CeStatus::Invalid => "INVALID",
CeStatus::Updating => "UPDATING",
CeStatus::Valid => "VALID",
CeStatus::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&[
"CREATING", "DELETED", "DELETING", "INVALID", "UPDATING", "VALID",
]
}
}
impl AsRef<str> for CeStatus {
fn as_ref(&self) -> &str {
self.as_str()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum CeType {
#[allow(missing_docs)] // documentation missing in model
Managed,
#[allow(missing_docs)] // documentation missing in model
Unmanaged,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for CeType {
fn from(s: &str) -> Self {
match s {
"MANAGED" => CeType::Managed,
"UNMANAGED" => CeType::Unmanaged,
other => CeType::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for CeType {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(CeType::from(s))
}
}
impl CeType {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
CeType::Managed => "MANAGED",
CeType::Unmanaged => "UNMANAGED",
CeType::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["MANAGED", "UNMANAGED"]
}
}
impl AsRef<str> for CeType {
fn as_ref(&self) -> &str {
self.as_str()
}
}