Struct aws_sdk_batch::model::compute_resource::Builder
source · pub struct Builder { /* private fields */ }
Expand description
A builder for ComputeResource
.
Implementations§
source§impl Builder
impl Builder
sourcepub fn type(self, input: CrType) -> Self
pub fn type(self, input: CrType) -> Self
The type of compute environment: EC2
, SPOT
, FARGATE
, or FARGATE_SPOT
. For more information, see Compute environments in the Batch User Guide.
If you choose SPOT
, you must also specify an Amazon EC2 Spot Fleet role with the spotIamFleetRole
parameter. For more information, see Amazon EC2 spot fleet role in the Batch User Guide.
sourcepub fn set_type(self, input: Option<CrType>) -> Self
pub fn set_type(self, input: Option<CrType>) -> Self
The type of compute environment: EC2
, SPOT
, FARGATE
, or FARGATE_SPOT
. For more information, see Compute environments in the Batch User Guide.
If you choose SPOT
, you must also specify an Amazon EC2 Spot Fleet role with the spotIamFleetRole
parameter. For more information, see Amazon EC2 spot fleet role in the Batch User Guide.
Examples found in repository?
2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
pub(crate) fn deser_structure_crate_model_compute_resource<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<crate::model::ComputeResource>,
aws_smithy_json::deserialize::error::DeserializeError,
>
where
I: Iterator<
Item = Result<
aws_smithy_json::deserialize::Token<'a>,
aws_smithy_json::deserialize::error::DeserializeError,
>,
>,
{
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::compute_resource::Builder::default();
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"type" => {
builder = builder.set_type(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped()
.map(|u| crate::model::CrType::from(u.as_ref()))
})
.transpose()?,
);
}
"allocationStrategy" => {
builder = builder.set_allocation_strategy(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::CrAllocationStrategy::from(u.as_ref())
})
})
.transpose()?,
);
}
"minvCpus" => {
builder = builder.set_minv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"maxvCpus" => {
builder = builder.set_maxv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"desiredvCpus" => {
builder = builder.set_desiredv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"instanceTypes" => {
builder = builder.set_instance_types(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"imageId" => {
builder = builder.set_image_id(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"subnets" => {
builder = builder.set_subnets(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"securityGroupIds" => {
builder = builder.set_security_group_ids(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"ec2KeyPair" => {
builder = builder.set_ec2_key_pair(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"instanceRole" => {
builder = builder.set_instance_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"tags" => {
builder = builder.set_tags(
crate::json_deser::deser_map_com_amazonaws_batch_tags_map(
tokens,
)?,
);
}
"placementGroup" => {
builder = builder.set_placement_group(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"bidPercentage" => {
builder = builder.set_bid_percentage(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"spotIamFleetRole" => {
builder = builder.set_spot_iam_fleet_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"launchTemplate" => {
builder = builder.set_launch_template(
crate::json_deser::deser_structure_crate_model_launch_template_specification(tokens)?
);
}
"ec2Configuration" => {
builder = builder.set_ec2_configuration(
crate::json_deser::deser_list_com_amazonaws_batch_ec2_configuration_list(tokens)?
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(format!(
"expected object key or end object, found: {:?}",
other
)),
)
}
}
}
Ok(Some(builder.build()))
}
_ => Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(
"expected start object or null",
),
),
}
}
sourcepub fn allocation_strategy(self, input: CrAllocationStrategy) -> Self
pub fn allocation_strategy(self, input: CrAllocationStrategy) -> Self
The allocation strategy to use for the compute resource if not enough instances of the best fitting instance type can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation strategies in the Batch User Guide.
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
- BEST_FIT (default)
-
Batch selects an instance type that best fits the needs of the jobs with a preference for the lowest-cost instance type. If additional instances of the selected instance type aren't available, Batch waits for the additional instances to be available. If there aren't enough instances available, or if the user is reaching Amazon EC2 service limits then additional jobs aren't run until the currently running jobs have completed. This allocation strategy keeps costs lower but can limit scaling. If you are using Spot Fleets with
BEST_FIT
then the Spot Fleet IAM Role must be specified. Compute resources that use aBEST_FIT
allocation strategy don't support infrastructure updates and can't update some parameters. For more information, see Updating compute environments in the Batch User Guide. - BEST_FIT_PROGRESSIVE
-
Batch will select additional instance types that are large enough to meet the requirements of the jobs in the queue, with a preference for instance types with a lower cost per unit vCPU. If additional instances of the previously selected instance types aren't available, Batch will select new instance types.
- SPOT_CAPACITY_OPTIMIZED
-
Batch will select one or more instance types that are large enough to meet the requirements of the jobs in the queue, with a preference for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.
With both BEST_FIT_PROGRESSIVE
and SPOT_CAPACITY_OPTIMIZED
strategies, Batch might need to go above maxvCpus
to meet your capacity requirements. In this event, Batch never exceeds maxvCpus
by more than a single instance.
sourcepub fn set_allocation_strategy(self, input: Option<CrAllocationStrategy>) -> Self
pub fn set_allocation_strategy(self, input: Option<CrAllocationStrategy>) -> Self
The allocation strategy to use for the compute resource if not enough instances of the best fitting instance type can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation strategies in the Batch User Guide.
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
- BEST_FIT (default)
-
Batch selects an instance type that best fits the needs of the jobs with a preference for the lowest-cost instance type. If additional instances of the selected instance type aren't available, Batch waits for the additional instances to be available. If there aren't enough instances available, or if the user is reaching Amazon EC2 service limits then additional jobs aren't run until the currently running jobs have completed. This allocation strategy keeps costs lower but can limit scaling. If you are using Spot Fleets with
BEST_FIT
then the Spot Fleet IAM Role must be specified. Compute resources that use aBEST_FIT
allocation strategy don't support infrastructure updates and can't update some parameters. For more information, see Updating compute environments in the Batch User Guide. - BEST_FIT_PROGRESSIVE
-
Batch will select additional instance types that are large enough to meet the requirements of the jobs in the queue, with a preference for instance types with a lower cost per unit vCPU. If additional instances of the previously selected instance types aren't available, Batch will select new instance types.
- SPOT_CAPACITY_OPTIMIZED
-
Batch will select one or more instance types that are large enough to meet the requirements of the jobs in the queue, with a preference for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.
With both BEST_FIT_PROGRESSIVE
and SPOT_CAPACITY_OPTIMIZED
strategies, Batch might need to go above maxvCpus
to meet your capacity requirements. In this event, Batch never exceeds maxvCpus
by more than a single instance.
Examples found in repository?
2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
pub(crate) fn deser_structure_crate_model_compute_resource<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<crate::model::ComputeResource>,
aws_smithy_json::deserialize::error::DeserializeError,
>
where
I: Iterator<
Item = Result<
aws_smithy_json::deserialize::Token<'a>,
aws_smithy_json::deserialize::error::DeserializeError,
>,
>,
{
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::compute_resource::Builder::default();
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"type" => {
builder = builder.set_type(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped()
.map(|u| crate::model::CrType::from(u.as_ref()))
})
.transpose()?,
);
}
"allocationStrategy" => {
builder = builder.set_allocation_strategy(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::CrAllocationStrategy::from(u.as_ref())
})
})
.transpose()?,
);
}
"minvCpus" => {
builder = builder.set_minv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"maxvCpus" => {
builder = builder.set_maxv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"desiredvCpus" => {
builder = builder.set_desiredv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"instanceTypes" => {
builder = builder.set_instance_types(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"imageId" => {
builder = builder.set_image_id(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"subnets" => {
builder = builder.set_subnets(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"securityGroupIds" => {
builder = builder.set_security_group_ids(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"ec2KeyPair" => {
builder = builder.set_ec2_key_pair(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"instanceRole" => {
builder = builder.set_instance_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"tags" => {
builder = builder.set_tags(
crate::json_deser::deser_map_com_amazonaws_batch_tags_map(
tokens,
)?,
);
}
"placementGroup" => {
builder = builder.set_placement_group(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"bidPercentage" => {
builder = builder.set_bid_percentage(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"spotIamFleetRole" => {
builder = builder.set_spot_iam_fleet_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"launchTemplate" => {
builder = builder.set_launch_template(
crate::json_deser::deser_structure_crate_model_launch_template_specification(tokens)?
);
}
"ec2Configuration" => {
builder = builder.set_ec2_configuration(
crate::json_deser::deser_list_com_amazonaws_batch_ec2_configuration_list(tokens)?
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(format!(
"expected object key or end object, found: {:?}",
other
)),
)
}
}
}
Ok(Some(builder.build()))
}
_ => Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(
"expected start object or null",
),
),
}
}
sourcepub fn minv_cpus(self, input: i32) -> Self
pub fn minv_cpus(self, input: i32) -> Self
The minimum number of Amazon EC2 vCPUs that an environment should maintain (even if the compute environment is DISABLED
).
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
sourcepub fn set_minv_cpus(self, input: Option<i32>) -> Self
pub fn set_minv_cpus(self, input: Option<i32>) -> Self
The minimum number of Amazon EC2 vCPUs that an environment should maintain (even if the compute environment is DISABLED
).
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
Examples found in repository?
2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
pub(crate) fn deser_structure_crate_model_compute_resource<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<crate::model::ComputeResource>,
aws_smithy_json::deserialize::error::DeserializeError,
>
where
I: Iterator<
Item = Result<
aws_smithy_json::deserialize::Token<'a>,
aws_smithy_json::deserialize::error::DeserializeError,
>,
>,
{
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::compute_resource::Builder::default();
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"type" => {
builder = builder.set_type(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped()
.map(|u| crate::model::CrType::from(u.as_ref()))
})
.transpose()?,
);
}
"allocationStrategy" => {
builder = builder.set_allocation_strategy(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::CrAllocationStrategy::from(u.as_ref())
})
})
.transpose()?,
);
}
"minvCpus" => {
builder = builder.set_minv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"maxvCpus" => {
builder = builder.set_maxv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"desiredvCpus" => {
builder = builder.set_desiredv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"instanceTypes" => {
builder = builder.set_instance_types(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"imageId" => {
builder = builder.set_image_id(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"subnets" => {
builder = builder.set_subnets(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"securityGroupIds" => {
builder = builder.set_security_group_ids(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"ec2KeyPair" => {
builder = builder.set_ec2_key_pair(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"instanceRole" => {
builder = builder.set_instance_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"tags" => {
builder = builder.set_tags(
crate::json_deser::deser_map_com_amazonaws_batch_tags_map(
tokens,
)?,
);
}
"placementGroup" => {
builder = builder.set_placement_group(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"bidPercentage" => {
builder = builder.set_bid_percentage(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"spotIamFleetRole" => {
builder = builder.set_spot_iam_fleet_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"launchTemplate" => {
builder = builder.set_launch_template(
crate::json_deser::deser_structure_crate_model_launch_template_specification(tokens)?
);
}
"ec2Configuration" => {
builder = builder.set_ec2_configuration(
crate::json_deser::deser_list_com_amazonaws_batch_ec2_configuration_list(tokens)?
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(format!(
"expected object key or end object, found: {:?}",
other
)),
)
}
}
}
Ok(Some(builder.build()))
}
_ => Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(
"expected start object or null",
),
),
}
}
sourcepub fn maxv_cpus(self, input: i32) -> Self
pub fn maxv_cpus(self, input: i32) -> Self
The maximum number of Amazon EC2 vCPUs that a compute environment can reach.
With both BEST_FIT_PROGRESSIVE
and SPOT_CAPACITY_OPTIMIZED
allocation strategies, Batch might need to exceed maxvCpus
to meet your capacity requirements. In this event, Batch never exceeds maxvCpus
by more than a single instance. For example, no more than a single instance from among those specified in your compute environment is allocated.
sourcepub fn set_maxv_cpus(self, input: Option<i32>) -> Self
pub fn set_maxv_cpus(self, input: Option<i32>) -> Self
The maximum number of Amazon EC2 vCPUs that a compute environment can reach.
With both BEST_FIT_PROGRESSIVE
and SPOT_CAPACITY_OPTIMIZED
allocation strategies, Batch might need to exceed maxvCpus
to meet your capacity requirements. In this event, Batch never exceeds maxvCpus
by more than a single instance. For example, no more than a single instance from among those specified in your compute environment is allocated.
Examples found in repository?
2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
pub(crate) fn deser_structure_crate_model_compute_resource<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<crate::model::ComputeResource>,
aws_smithy_json::deserialize::error::DeserializeError,
>
where
I: Iterator<
Item = Result<
aws_smithy_json::deserialize::Token<'a>,
aws_smithy_json::deserialize::error::DeserializeError,
>,
>,
{
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::compute_resource::Builder::default();
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"type" => {
builder = builder.set_type(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped()
.map(|u| crate::model::CrType::from(u.as_ref()))
})
.transpose()?,
);
}
"allocationStrategy" => {
builder = builder.set_allocation_strategy(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::CrAllocationStrategy::from(u.as_ref())
})
})
.transpose()?,
);
}
"minvCpus" => {
builder = builder.set_minv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"maxvCpus" => {
builder = builder.set_maxv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"desiredvCpus" => {
builder = builder.set_desiredv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"instanceTypes" => {
builder = builder.set_instance_types(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"imageId" => {
builder = builder.set_image_id(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"subnets" => {
builder = builder.set_subnets(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"securityGroupIds" => {
builder = builder.set_security_group_ids(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"ec2KeyPair" => {
builder = builder.set_ec2_key_pair(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"instanceRole" => {
builder = builder.set_instance_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"tags" => {
builder = builder.set_tags(
crate::json_deser::deser_map_com_amazonaws_batch_tags_map(
tokens,
)?,
);
}
"placementGroup" => {
builder = builder.set_placement_group(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"bidPercentage" => {
builder = builder.set_bid_percentage(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"spotIamFleetRole" => {
builder = builder.set_spot_iam_fleet_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"launchTemplate" => {
builder = builder.set_launch_template(
crate::json_deser::deser_structure_crate_model_launch_template_specification(tokens)?
);
}
"ec2Configuration" => {
builder = builder.set_ec2_configuration(
crate::json_deser::deser_list_com_amazonaws_batch_ec2_configuration_list(tokens)?
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(format!(
"expected object key or end object, found: {:?}",
other
)),
)
}
}
}
Ok(Some(builder.build()))
}
_ => Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(
"expected start object or null",
),
),
}
}
sourcepub fn desiredv_cpus(self, input: i32) -> Self
pub fn desiredv_cpus(self, input: i32) -> Self
The desired number of Amazon EC2 vCPUS in the compute environment. Batch modifies this value between the minimum and maximum values, based on job queue demand.
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
sourcepub fn set_desiredv_cpus(self, input: Option<i32>) -> Self
pub fn set_desiredv_cpus(self, input: Option<i32>) -> Self
The desired number of Amazon EC2 vCPUS in the compute environment. Batch modifies this value between the minimum and maximum values, based on job queue demand.
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
Examples found in repository?
2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
pub(crate) fn deser_structure_crate_model_compute_resource<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<crate::model::ComputeResource>,
aws_smithy_json::deserialize::error::DeserializeError,
>
where
I: Iterator<
Item = Result<
aws_smithy_json::deserialize::Token<'a>,
aws_smithy_json::deserialize::error::DeserializeError,
>,
>,
{
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::compute_resource::Builder::default();
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"type" => {
builder = builder.set_type(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped()
.map(|u| crate::model::CrType::from(u.as_ref()))
})
.transpose()?,
);
}
"allocationStrategy" => {
builder = builder.set_allocation_strategy(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::CrAllocationStrategy::from(u.as_ref())
})
})
.transpose()?,
);
}
"minvCpus" => {
builder = builder.set_minv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"maxvCpus" => {
builder = builder.set_maxv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"desiredvCpus" => {
builder = builder.set_desiredv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"instanceTypes" => {
builder = builder.set_instance_types(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"imageId" => {
builder = builder.set_image_id(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"subnets" => {
builder = builder.set_subnets(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"securityGroupIds" => {
builder = builder.set_security_group_ids(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"ec2KeyPair" => {
builder = builder.set_ec2_key_pair(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"instanceRole" => {
builder = builder.set_instance_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"tags" => {
builder = builder.set_tags(
crate::json_deser::deser_map_com_amazonaws_batch_tags_map(
tokens,
)?,
);
}
"placementGroup" => {
builder = builder.set_placement_group(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"bidPercentage" => {
builder = builder.set_bid_percentage(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"spotIamFleetRole" => {
builder = builder.set_spot_iam_fleet_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"launchTemplate" => {
builder = builder.set_launch_template(
crate::json_deser::deser_structure_crate_model_launch_template_specification(tokens)?
);
}
"ec2Configuration" => {
builder = builder.set_ec2_configuration(
crate::json_deser::deser_list_com_amazonaws_batch_ec2_configuration_list(tokens)?
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(format!(
"expected object key or end object, found: {:?}",
other
)),
)
}
}
}
Ok(Some(builder.build()))
}
_ => Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(
"expected start object or null",
),
),
}
}
sourcepub fn instance_types(self, input: impl Into<String>) -> Self
pub fn instance_types(self, input: impl Into<String>) -> Self
Appends an item to instance_types
.
To override the contents of this collection use set_instance_types
.
The instances types that can be launched. You can specify instance families to launch any instance type within those families (for example, c5
or p3
), or you can specify specific sizes within a family (such as c5.8xlarge
). You can also choose optimal
to select instance types (from the C4, M4, and R4 instance families) that match the demand of your job queues.
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
When you create a compute environment, the instance types that you select for the compute environment must share the same architecture. For example, you can't mix x86 and ARM instances in the same compute environment.
Currently, optimal
uses instance types from the C4, M4, and R4 instance families. In Regions that don't have instance types from those instance families, instance types from the C5, M5. and R5 instance families are used.
sourcepub fn set_instance_types(self, input: Option<Vec<String>>) -> Self
pub fn set_instance_types(self, input: Option<Vec<String>>) -> Self
The instances types that can be launched. You can specify instance families to launch any instance type within those families (for example, c5
or p3
), or you can specify specific sizes within a family (such as c5.8xlarge
). You can also choose optimal
to select instance types (from the C4, M4, and R4 instance families) that match the demand of your job queues.
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
When you create a compute environment, the instance types that you select for the compute environment must share the same architecture. For example, you can't mix x86 and ARM instances in the same compute environment.
Currently, optimal
uses instance types from the C4, M4, and R4 instance families. In Regions that don't have instance types from those instance families, instance types from the C5, M5. and R5 instance families are used.
Examples found in repository?
2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
pub(crate) fn deser_structure_crate_model_compute_resource<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<crate::model::ComputeResource>,
aws_smithy_json::deserialize::error::DeserializeError,
>
where
I: Iterator<
Item = Result<
aws_smithy_json::deserialize::Token<'a>,
aws_smithy_json::deserialize::error::DeserializeError,
>,
>,
{
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::compute_resource::Builder::default();
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"type" => {
builder = builder.set_type(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped()
.map(|u| crate::model::CrType::from(u.as_ref()))
})
.transpose()?,
);
}
"allocationStrategy" => {
builder = builder.set_allocation_strategy(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::CrAllocationStrategy::from(u.as_ref())
})
})
.transpose()?,
);
}
"minvCpus" => {
builder = builder.set_minv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"maxvCpus" => {
builder = builder.set_maxv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"desiredvCpus" => {
builder = builder.set_desiredv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"instanceTypes" => {
builder = builder.set_instance_types(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"imageId" => {
builder = builder.set_image_id(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"subnets" => {
builder = builder.set_subnets(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"securityGroupIds" => {
builder = builder.set_security_group_ids(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"ec2KeyPair" => {
builder = builder.set_ec2_key_pair(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"instanceRole" => {
builder = builder.set_instance_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"tags" => {
builder = builder.set_tags(
crate::json_deser::deser_map_com_amazonaws_batch_tags_map(
tokens,
)?,
);
}
"placementGroup" => {
builder = builder.set_placement_group(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"bidPercentage" => {
builder = builder.set_bid_percentage(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"spotIamFleetRole" => {
builder = builder.set_spot_iam_fleet_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"launchTemplate" => {
builder = builder.set_launch_template(
crate::json_deser::deser_structure_crate_model_launch_template_specification(tokens)?
);
}
"ec2Configuration" => {
builder = builder.set_ec2_configuration(
crate::json_deser::deser_list_com_amazonaws_batch_ec2_configuration_list(tokens)?
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(format!(
"expected object key or end object, found: {:?}",
other
)),
)
}
}
}
Ok(Some(builder.build()))
}
_ => Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(
"expected start object or null",
),
),
}
}
sourcepub fn image_id(self, input: impl Into<String>) -> Self
👎Deprecated: This field is deprecated, use ec2Configuration[].imageIdOverride instead.
pub fn image_id(self, input: impl Into<String>) -> Self
The Amazon Machine Image (AMI) ID used for instances launched in the compute environment. This parameter is overridden by the imageIdOverride
member of the Ec2Configuration
structure.
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
The AMI that you choose for a compute environment must match the architecture of the instance types that you intend to use for that compute environment. For example, if your compute environment uses A1 instance types, the compute resource AMI that you choose must support ARM instances. Amazon ECS vends both x86 and ARM versions of the Amazon ECS-optimized Amazon Linux 2 AMI. For more information, see Amazon ECS-optimized Amazon Linux 2 AMI in the Amazon Elastic Container Service Developer Guide.
sourcepub fn set_image_id(self, input: Option<String>) -> Self
👎Deprecated: This field is deprecated, use ec2Configuration[].imageIdOverride instead.
pub fn set_image_id(self, input: Option<String>) -> Self
The Amazon Machine Image (AMI) ID used for instances launched in the compute environment. This parameter is overridden by the imageIdOverride
member of the Ec2Configuration
structure.
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
The AMI that you choose for a compute environment must match the architecture of the instance types that you intend to use for that compute environment. For example, if your compute environment uses A1 instance types, the compute resource AMI that you choose must support ARM instances. Amazon ECS vends both x86 and ARM versions of the Amazon ECS-optimized Amazon Linux 2 AMI. For more information, see Amazon ECS-optimized Amazon Linux 2 AMI in the Amazon Elastic Container Service Developer Guide.
Examples found in repository?
2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
pub(crate) fn deser_structure_crate_model_compute_resource<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<crate::model::ComputeResource>,
aws_smithy_json::deserialize::error::DeserializeError,
>
where
I: Iterator<
Item = Result<
aws_smithy_json::deserialize::Token<'a>,
aws_smithy_json::deserialize::error::DeserializeError,
>,
>,
{
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::compute_resource::Builder::default();
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"type" => {
builder = builder.set_type(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped()
.map(|u| crate::model::CrType::from(u.as_ref()))
})
.transpose()?,
);
}
"allocationStrategy" => {
builder = builder.set_allocation_strategy(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::CrAllocationStrategy::from(u.as_ref())
})
})
.transpose()?,
);
}
"minvCpus" => {
builder = builder.set_minv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"maxvCpus" => {
builder = builder.set_maxv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"desiredvCpus" => {
builder = builder.set_desiredv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"instanceTypes" => {
builder = builder.set_instance_types(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"imageId" => {
builder = builder.set_image_id(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"subnets" => {
builder = builder.set_subnets(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"securityGroupIds" => {
builder = builder.set_security_group_ids(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"ec2KeyPair" => {
builder = builder.set_ec2_key_pair(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"instanceRole" => {
builder = builder.set_instance_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"tags" => {
builder = builder.set_tags(
crate::json_deser::deser_map_com_amazonaws_batch_tags_map(
tokens,
)?,
);
}
"placementGroup" => {
builder = builder.set_placement_group(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"bidPercentage" => {
builder = builder.set_bid_percentage(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"spotIamFleetRole" => {
builder = builder.set_spot_iam_fleet_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"launchTemplate" => {
builder = builder.set_launch_template(
crate::json_deser::deser_structure_crate_model_launch_template_specification(tokens)?
);
}
"ec2Configuration" => {
builder = builder.set_ec2_configuration(
crate::json_deser::deser_list_com_amazonaws_batch_ec2_configuration_list(tokens)?
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(format!(
"expected object key or end object, found: {:?}",
other
)),
)
}
}
}
Ok(Some(builder.build()))
}
_ => Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(
"expected start object or null",
),
),
}
}
sourcepub fn subnets(self, input: impl Into<String>) -> Self
pub fn subnets(self, input: impl Into<String>) -> Self
Appends an item to subnets
.
To override the contents of this collection use set_subnets
.
The VPC subnets where the compute resources are launched. These subnets must be within the same VPC. Fargate compute resources can contain up to 16 subnets. For more information, see VPCs and subnets in the Amazon VPC User Guide.
sourcepub fn set_subnets(self, input: Option<Vec<String>>) -> Self
pub fn set_subnets(self, input: Option<Vec<String>>) -> Self
The VPC subnets where the compute resources are launched. These subnets must be within the same VPC. Fargate compute resources can contain up to 16 subnets. For more information, see VPCs and subnets in the Amazon VPC User Guide.
Examples found in repository?
2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
pub(crate) fn deser_structure_crate_model_compute_resource<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<crate::model::ComputeResource>,
aws_smithy_json::deserialize::error::DeserializeError,
>
where
I: Iterator<
Item = Result<
aws_smithy_json::deserialize::Token<'a>,
aws_smithy_json::deserialize::error::DeserializeError,
>,
>,
{
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::compute_resource::Builder::default();
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"type" => {
builder = builder.set_type(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped()
.map(|u| crate::model::CrType::from(u.as_ref()))
})
.transpose()?,
);
}
"allocationStrategy" => {
builder = builder.set_allocation_strategy(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::CrAllocationStrategy::from(u.as_ref())
})
})
.transpose()?,
);
}
"minvCpus" => {
builder = builder.set_minv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"maxvCpus" => {
builder = builder.set_maxv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"desiredvCpus" => {
builder = builder.set_desiredv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"instanceTypes" => {
builder = builder.set_instance_types(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"imageId" => {
builder = builder.set_image_id(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"subnets" => {
builder = builder.set_subnets(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"securityGroupIds" => {
builder = builder.set_security_group_ids(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"ec2KeyPair" => {
builder = builder.set_ec2_key_pair(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"instanceRole" => {
builder = builder.set_instance_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"tags" => {
builder = builder.set_tags(
crate::json_deser::deser_map_com_amazonaws_batch_tags_map(
tokens,
)?,
);
}
"placementGroup" => {
builder = builder.set_placement_group(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"bidPercentage" => {
builder = builder.set_bid_percentage(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"spotIamFleetRole" => {
builder = builder.set_spot_iam_fleet_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"launchTemplate" => {
builder = builder.set_launch_template(
crate::json_deser::deser_structure_crate_model_launch_template_specification(tokens)?
);
}
"ec2Configuration" => {
builder = builder.set_ec2_configuration(
crate::json_deser::deser_list_com_amazonaws_batch_ec2_configuration_list(tokens)?
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(format!(
"expected object key or end object, found: {:?}",
other
)),
)
}
}
}
Ok(Some(builder.build()))
}
_ => Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(
"expected start object or null",
),
),
}
}
sourcepub fn security_group_ids(self, input: impl Into<String>) -> Self
pub fn security_group_ids(self, input: impl Into<String>) -> Self
Appends an item to security_group_ids
.
To override the contents of this collection use set_security_group_ids
.
The Amazon EC2 security groups associated with instances launched in the compute environment. One or more security groups must be specified, either in securityGroupIds
or using a launch template referenced in launchTemplate
. This parameter is required for jobs that are running on Fargate resources and must contain at least one security group. Fargate doesn't support launch templates. If security groups are specified using both securityGroupIds
and launchTemplate
, the values in securityGroupIds
are used.
sourcepub fn set_security_group_ids(self, input: Option<Vec<String>>) -> Self
pub fn set_security_group_ids(self, input: Option<Vec<String>>) -> Self
The Amazon EC2 security groups associated with instances launched in the compute environment. One or more security groups must be specified, either in securityGroupIds
or using a launch template referenced in launchTemplate
. This parameter is required for jobs that are running on Fargate resources and must contain at least one security group. Fargate doesn't support launch templates. If security groups are specified using both securityGroupIds
and launchTemplate
, the values in securityGroupIds
are used.
Examples found in repository?
2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
pub(crate) fn deser_structure_crate_model_compute_resource<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<crate::model::ComputeResource>,
aws_smithy_json::deserialize::error::DeserializeError,
>
where
I: Iterator<
Item = Result<
aws_smithy_json::deserialize::Token<'a>,
aws_smithy_json::deserialize::error::DeserializeError,
>,
>,
{
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::compute_resource::Builder::default();
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"type" => {
builder = builder.set_type(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped()
.map(|u| crate::model::CrType::from(u.as_ref()))
})
.transpose()?,
);
}
"allocationStrategy" => {
builder = builder.set_allocation_strategy(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::CrAllocationStrategy::from(u.as_ref())
})
})
.transpose()?,
);
}
"minvCpus" => {
builder = builder.set_minv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"maxvCpus" => {
builder = builder.set_maxv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"desiredvCpus" => {
builder = builder.set_desiredv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"instanceTypes" => {
builder = builder.set_instance_types(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"imageId" => {
builder = builder.set_image_id(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"subnets" => {
builder = builder.set_subnets(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"securityGroupIds" => {
builder = builder.set_security_group_ids(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"ec2KeyPair" => {
builder = builder.set_ec2_key_pair(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"instanceRole" => {
builder = builder.set_instance_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"tags" => {
builder = builder.set_tags(
crate::json_deser::deser_map_com_amazonaws_batch_tags_map(
tokens,
)?,
);
}
"placementGroup" => {
builder = builder.set_placement_group(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"bidPercentage" => {
builder = builder.set_bid_percentage(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"spotIamFleetRole" => {
builder = builder.set_spot_iam_fleet_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"launchTemplate" => {
builder = builder.set_launch_template(
crate::json_deser::deser_structure_crate_model_launch_template_specification(tokens)?
);
}
"ec2Configuration" => {
builder = builder.set_ec2_configuration(
crate::json_deser::deser_list_com_amazonaws_batch_ec2_configuration_list(tokens)?
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(format!(
"expected object key or end object, found: {:?}",
other
)),
)
}
}
}
Ok(Some(builder.build()))
}
_ => Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(
"expected start object or null",
),
),
}
}
sourcepub fn ec2_key_pair(self, input: impl Into<String>) -> Self
pub fn ec2_key_pair(self, input: impl Into<String>) -> Self
The Amazon EC2 key pair that's used for instances launched in the compute environment. You can use this key pair to log in to your instances with SSH.
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
sourcepub fn set_ec2_key_pair(self, input: Option<String>) -> Self
pub fn set_ec2_key_pair(self, input: Option<String>) -> Self
The Amazon EC2 key pair that's used for instances launched in the compute environment. You can use this key pair to log in to your instances with SSH.
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
Examples found in repository?
2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
pub(crate) fn deser_structure_crate_model_compute_resource<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<crate::model::ComputeResource>,
aws_smithy_json::deserialize::error::DeserializeError,
>
where
I: Iterator<
Item = Result<
aws_smithy_json::deserialize::Token<'a>,
aws_smithy_json::deserialize::error::DeserializeError,
>,
>,
{
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::compute_resource::Builder::default();
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"type" => {
builder = builder.set_type(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped()
.map(|u| crate::model::CrType::from(u.as_ref()))
})
.transpose()?,
);
}
"allocationStrategy" => {
builder = builder.set_allocation_strategy(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::CrAllocationStrategy::from(u.as_ref())
})
})
.transpose()?,
);
}
"minvCpus" => {
builder = builder.set_minv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"maxvCpus" => {
builder = builder.set_maxv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"desiredvCpus" => {
builder = builder.set_desiredv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"instanceTypes" => {
builder = builder.set_instance_types(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"imageId" => {
builder = builder.set_image_id(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"subnets" => {
builder = builder.set_subnets(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"securityGroupIds" => {
builder = builder.set_security_group_ids(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"ec2KeyPair" => {
builder = builder.set_ec2_key_pair(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"instanceRole" => {
builder = builder.set_instance_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"tags" => {
builder = builder.set_tags(
crate::json_deser::deser_map_com_amazonaws_batch_tags_map(
tokens,
)?,
);
}
"placementGroup" => {
builder = builder.set_placement_group(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"bidPercentage" => {
builder = builder.set_bid_percentage(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"spotIamFleetRole" => {
builder = builder.set_spot_iam_fleet_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"launchTemplate" => {
builder = builder.set_launch_template(
crate::json_deser::deser_structure_crate_model_launch_template_specification(tokens)?
);
}
"ec2Configuration" => {
builder = builder.set_ec2_configuration(
crate::json_deser::deser_list_com_amazonaws_batch_ec2_configuration_list(tokens)?
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(format!(
"expected object key or end object, found: {:?}",
other
)),
)
}
}
}
Ok(Some(builder.build()))
}
_ => Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(
"expected start object or null",
),
),
}
}
sourcepub fn instance_role(self, input: impl Into<String>) -> Self
pub fn instance_role(self, input: impl Into<String>) -> Self
The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example, ecsInstanceRole
or arn:aws:iam::
. For more information, see Amazon ECS instance role in the Batch User Guide.
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
sourcepub fn set_instance_role(self, input: Option<String>) -> Self
pub fn set_instance_role(self, input: Option<String>) -> Self
The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example, ecsInstanceRole
or arn:aws:iam::
. For more information, see Amazon ECS instance role in the Batch User Guide.
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
Examples found in repository?
2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
pub(crate) fn deser_structure_crate_model_compute_resource<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<crate::model::ComputeResource>,
aws_smithy_json::deserialize::error::DeserializeError,
>
where
I: Iterator<
Item = Result<
aws_smithy_json::deserialize::Token<'a>,
aws_smithy_json::deserialize::error::DeserializeError,
>,
>,
{
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::compute_resource::Builder::default();
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"type" => {
builder = builder.set_type(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped()
.map(|u| crate::model::CrType::from(u.as_ref()))
})
.transpose()?,
);
}
"allocationStrategy" => {
builder = builder.set_allocation_strategy(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::CrAllocationStrategy::from(u.as_ref())
})
})
.transpose()?,
);
}
"minvCpus" => {
builder = builder.set_minv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"maxvCpus" => {
builder = builder.set_maxv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"desiredvCpus" => {
builder = builder.set_desiredv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"instanceTypes" => {
builder = builder.set_instance_types(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"imageId" => {
builder = builder.set_image_id(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"subnets" => {
builder = builder.set_subnets(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"securityGroupIds" => {
builder = builder.set_security_group_ids(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"ec2KeyPair" => {
builder = builder.set_ec2_key_pair(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"instanceRole" => {
builder = builder.set_instance_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"tags" => {
builder = builder.set_tags(
crate::json_deser::deser_map_com_amazonaws_batch_tags_map(
tokens,
)?,
);
}
"placementGroup" => {
builder = builder.set_placement_group(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"bidPercentage" => {
builder = builder.set_bid_percentage(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"spotIamFleetRole" => {
builder = builder.set_spot_iam_fleet_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"launchTemplate" => {
builder = builder.set_launch_template(
crate::json_deser::deser_structure_crate_model_launch_template_specification(tokens)?
);
}
"ec2Configuration" => {
builder = builder.set_ec2_configuration(
crate::json_deser::deser_list_com_amazonaws_batch_ec2_configuration_list(tokens)?
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(format!(
"expected object key or end object, found: {:?}",
other
)),
)
}
}
}
Ok(Some(builder.build()))
}
_ => Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(
"expected start object or null",
),
),
}
}
Adds a key-value pair to tags
.
To override the contents of this collection use set_tags
.
Key-value pair tags to be applied to EC2 resources that are launched in the compute environment. For Batch, these take the form of "String1": "String2", where String1 is the tag key and String2 is the tag value−for example, { "Name": "Batch Instance - C4OnDemand" }
. This is helpful for recognizing your Batch instances in the Amazon EC2 console. Updating these tags requires an infrastructure update to the compute environment. For more information, see Updating compute environments in the Batch User Guide. These tags aren't seen when using the Batch ListTagsForResource
API operation.
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
Key-value pair tags to be applied to EC2 resources that are launched in the compute environment. For Batch, these take the form of "String1": "String2", where String1 is the tag key and String2 is the tag value−for example, { "Name": "Batch Instance - C4OnDemand" }
. This is helpful for recognizing your Batch instances in the Amazon EC2 console. Updating these tags requires an infrastructure update to the compute environment. For more information, see Updating compute environments in the Batch User Guide. These tags aren't seen when using the Batch ListTagsForResource
API operation.
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
Examples found in repository?
2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
pub(crate) fn deser_structure_crate_model_compute_resource<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<crate::model::ComputeResource>,
aws_smithy_json::deserialize::error::DeserializeError,
>
where
I: Iterator<
Item = Result<
aws_smithy_json::deserialize::Token<'a>,
aws_smithy_json::deserialize::error::DeserializeError,
>,
>,
{
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::compute_resource::Builder::default();
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"type" => {
builder = builder.set_type(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped()
.map(|u| crate::model::CrType::from(u.as_ref()))
})
.transpose()?,
);
}
"allocationStrategy" => {
builder = builder.set_allocation_strategy(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::CrAllocationStrategy::from(u.as_ref())
})
})
.transpose()?,
);
}
"minvCpus" => {
builder = builder.set_minv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"maxvCpus" => {
builder = builder.set_maxv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"desiredvCpus" => {
builder = builder.set_desiredv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"instanceTypes" => {
builder = builder.set_instance_types(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"imageId" => {
builder = builder.set_image_id(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"subnets" => {
builder = builder.set_subnets(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"securityGroupIds" => {
builder = builder.set_security_group_ids(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"ec2KeyPair" => {
builder = builder.set_ec2_key_pair(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"instanceRole" => {
builder = builder.set_instance_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"tags" => {
builder = builder.set_tags(
crate::json_deser::deser_map_com_amazonaws_batch_tags_map(
tokens,
)?,
);
}
"placementGroup" => {
builder = builder.set_placement_group(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"bidPercentage" => {
builder = builder.set_bid_percentage(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"spotIamFleetRole" => {
builder = builder.set_spot_iam_fleet_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"launchTemplate" => {
builder = builder.set_launch_template(
crate::json_deser::deser_structure_crate_model_launch_template_specification(tokens)?
);
}
"ec2Configuration" => {
builder = builder.set_ec2_configuration(
crate::json_deser::deser_list_com_amazonaws_batch_ec2_configuration_list(tokens)?
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(format!(
"expected object key or end object, found: {:?}",
other
)),
)
}
}
}
Ok(Some(builder.build()))
}
_ => Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(
"expected start object or null",
),
),
}
}
sourcepub fn placement_group(self, input: impl Into<String>) -> Self
pub fn placement_group(self, input: impl Into<String>) -> Self
The Amazon EC2 placement group to associate with your compute resources. If you intend to submit multi-node parallel jobs to your compute environment, you should consider creating a cluster placement group and associate it with your compute resources. This keeps your multi-node parallel job on a logical grouping of instances within a single Availability Zone with high network flow potential. For more information, see Placement groups in the Amazon EC2 User Guide for Linux Instances.
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
sourcepub fn set_placement_group(self, input: Option<String>) -> Self
pub fn set_placement_group(self, input: Option<String>) -> Self
The Amazon EC2 placement group to associate with your compute resources. If you intend to submit multi-node parallel jobs to your compute environment, you should consider creating a cluster placement group and associate it with your compute resources. This keeps your multi-node parallel job on a logical grouping of instances within a single Availability Zone with high network flow potential. For more information, see Placement groups in the Amazon EC2 User Guide for Linux Instances.
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
Examples found in repository?
2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
pub(crate) fn deser_structure_crate_model_compute_resource<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<crate::model::ComputeResource>,
aws_smithy_json::deserialize::error::DeserializeError,
>
where
I: Iterator<
Item = Result<
aws_smithy_json::deserialize::Token<'a>,
aws_smithy_json::deserialize::error::DeserializeError,
>,
>,
{
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::compute_resource::Builder::default();
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"type" => {
builder = builder.set_type(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped()
.map(|u| crate::model::CrType::from(u.as_ref()))
})
.transpose()?,
);
}
"allocationStrategy" => {
builder = builder.set_allocation_strategy(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::CrAllocationStrategy::from(u.as_ref())
})
})
.transpose()?,
);
}
"minvCpus" => {
builder = builder.set_minv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"maxvCpus" => {
builder = builder.set_maxv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"desiredvCpus" => {
builder = builder.set_desiredv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"instanceTypes" => {
builder = builder.set_instance_types(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"imageId" => {
builder = builder.set_image_id(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"subnets" => {
builder = builder.set_subnets(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"securityGroupIds" => {
builder = builder.set_security_group_ids(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"ec2KeyPair" => {
builder = builder.set_ec2_key_pair(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"instanceRole" => {
builder = builder.set_instance_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"tags" => {
builder = builder.set_tags(
crate::json_deser::deser_map_com_amazonaws_batch_tags_map(
tokens,
)?,
);
}
"placementGroup" => {
builder = builder.set_placement_group(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"bidPercentage" => {
builder = builder.set_bid_percentage(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"spotIamFleetRole" => {
builder = builder.set_spot_iam_fleet_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"launchTemplate" => {
builder = builder.set_launch_template(
crate::json_deser::deser_structure_crate_model_launch_template_specification(tokens)?
);
}
"ec2Configuration" => {
builder = builder.set_ec2_configuration(
crate::json_deser::deser_list_com_amazonaws_batch_ec2_configuration_list(tokens)?
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(format!(
"expected object key or end object, found: {:?}",
other
)),
)
}
}
}
Ok(Some(builder.build()))
}
_ => Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(
"expected start object or null",
),
),
}
}
sourcepub fn bid_percentage(self, input: i32) -> Self
pub fn bid_percentage(self, input: i32) -> Self
The maximum percentage that a Spot Instance price can be when compared with the On-Demand price for that instance type before instances are launched. For example, if your maximum percentage is 20%, then the Spot price must be less than 20% of the current On-Demand price for that Amazon EC2 instance. You always pay the lowest (market) price and never more than your maximum percentage. If you leave this field empty, the default value is 100% of the On-Demand price.
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
sourcepub fn set_bid_percentage(self, input: Option<i32>) -> Self
pub fn set_bid_percentage(self, input: Option<i32>) -> Self
The maximum percentage that a Spot Instance price can be when compared with the On-Demand price for that instance type before instances are launched. For example, if your maximum percentage is 20%, then the Spot price must be less than 20% of the current On-Demand price for that Amazon EC2 instance. You always pay the lowest (market) price and never more than your maximum percentage. If you leave this field empty, the default value is 100% of the On-Demand price.
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
Examples found in repository?
2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
pub(crate) fn deser_structure_crate_model_compute_resource<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<crate::model::ComputeResource>,
aws_smithy_json::deserialize::error::DeserializeError,
>
where
I: Iterator<
Item = Result<
aws_smithy_json::deserialize::Token<'a>,
aws_smithy_json::deserialize::error::DeserializeError,
>,
>,
{
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::compute_resource::Builder::default();
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"type" => {
builder = builder.set_type(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped()
.map(|u| crate::model::CrType::from(u.as_ref()))
})
.transpose()?,
);
}
"allocationStrategy" => {
builder = builder.set_allocation_strategy(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::CrAllocationStrategy::from(u.as_ref())
})
})
.transpose()?,
);
}
"minvCpus" => {
builder = builder.set_minv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"maxvCpus" => {
builder = builder.set_maxv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"desiredvCpus" => {
builder = builder.set_desiredv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"instanceTypes" => {
builder = builder.set_instance_types(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"imageId" => {
builder = builder.set_image_id(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"subnets" => {
builder = builder.set_subnets(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"securityGroupIds" => {
builder = builder.set_security_group_ids(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"ec2KeyPair" => {
builder = builder.set_ec2_key_pair(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"instanceRole" => {
builder = builder.set_instance_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"tags" => {
builder = builder.set_tags(
crate::json_deser::deser_map_com_amazonaws_batch_tags_map(
tokens,
)?,
);
}
"placementGroup" => {
builder = builder.set_placement_group(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"bidPercentage" => {
builder = builder.set_bid_percentage(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"spotIamFleetRole" => {
builder = builder.set_spot_iam_fleet_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"launchTemplate" => {
builder = builder.set_launch_template(
crate::json_deser::deser_structure_crate_model_launch_template_specification(tokens)?
);
}
"ec2Configuration" => {
builder = builder.set_ec2_configuration(
crate::json_deser::deser_list_com_amazonaws_batch_ec2_configuration_list(tokens)?
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(format!(
"expected object key or end object, found: {:?}",
other
)),
)
}
}
}
Ok(Some(builder.build()))
}
_ => Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(
"expected start object or null",
),
),
}
}
sourcepub fn spot_iam_fleet_role(self, input: impl Into<String>) -> Self
pub fn spot_iam_fleet_role(self, input: impl Into<String>) -> Self
The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT
compute environment. This role is required if the allocation strategy set to BEST_FIT
or if the allocation strategy isn't specified. For more information, see Amazon EC2 spot fleet role in the Batch User Guide.
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
To tag your Spot Instances on creation, the Spot Fleet IAM role specified here must use the newer AmazonEC2SpotFleetTaggingRole managed policy. The previously recommended AmazonEC2SpotFleetRole managed policy doesn't have the required permissions to tag Spot Instances. For more information, see Spot instances not tagged on creation in the Batch User Guide.
sourcepub fn set_spot_iam_fleet_role(self, input: Option<String>) -> Self
pub fn set_spot_iam_fleet_role(self, input: Option<String>) -> Self
The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT
compute environment. This role is required if the allocation strategy set to BEST_FIT
or if the allocation strategy isn't specified. For more information, see Amazon EC2 spot fleet role in the Batch User Guide.
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
To tag your Spot Instances on creation, the Spot Fleet IAM role specified here must use the newer AmazonEC2SpotFleetTaggingRole managed policy. The previously recommended AmazonEC2SpotFleetRole managed policy doesn't have the required permissions to tag Spot Instances. For more information, see Spot instances not tagged on creation in the Batch User Guide.
Examples found in repository?
2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
pub(crate) fn deser_structure_crate_model_compute_resource<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<crate::model::ComputeResource>,
aws_smithy_json::deserialize::error::DeserializeError,
>
where
I: Iterator<
Item = Result<
aws_smithy_json::deserialize::Token<'a>,
aws_smithy_json::deserialize::error::DeserializeError,
>,
>,
{
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::compute_resource::Builder::default();
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"type" => {
builder = builder.set_type(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped()
.map(|u| crate::model::CrType::from(u.as_ref()))
})
.transpose()?,
);
}
"allocationStrategy" => {
builder = builder.set_allocation_strategy(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::CrAllocationStrategy::from(u.as_ref())
})
})
.transpose()?,
);
}
"minvCpus" => {
builder = builder.set_minv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"maxvCpus" => {
builder = builder.set_maxv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"desiredvCpus" => {
builder = builder.set_desiredv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"instanceTypes" => {
builder = builder.set_instance_types(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"imageId" => {
builder = builder.set_image_id(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"subnets" => {
builder = builder.set_subnets(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"securityGroupIds" => {
builder = builder.set_security_group_ids(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"ec2KeyPair" => {
builder = builder.set_ec2_key_pair(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"instanceRole" => {
builder = builder.set_instance_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"tags" => {
builder = builder.set_tags(
crate::json_deser::deser_map_com_amazonaws_batch_tags_map(
tokens,
)?,
);
}
"placementGroup" => {
builder = builder.set_placement_group(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"bidPercentage" => {
builder = builder.set_bid_percentage(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"spotIamFleetRole" => {
builder = builder.set_spot_iam_fleet_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"launchTemplate" => {
builder = builder.set_launch_template(
crate::json_deser::deser_structure_crate_model_launch_template_specification(tokens)?
);
}
"ec2Configuration" => {
builder = builder.set_ec2_configuration(
crate::json_deser::deser_list_com_amazonaws_batch_ec2_configuration_list(tokens)?
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(format!(
"expected object key or end object, found: {:?}",
other
)),
)
}
}
}
Ok(Some(builder.build()))
}
_ => Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(
"expected start object or null",
),
),
}
}
sourcepub fn launch_template(self, input: LaunchTemplateSpecification) -> Self
pub fn launch_template(self, input: LaunchTemplateSpecification) -> Self
The launch template to use for your compute resources. Any other compute resource parameters that you specify in a CreateComputeEnvironment
API operation override the same parameters in the launch template. You must specify either the launch template ID or launch template name in the request, but not both. For more information, see Launch template support in the Batch User Guide.
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
sourcepub fn set_launch_template(
self,
input: Option<LaunchTemplateSpecification>
) -> Self
pub fn set_launch_template(
self,
input: Option<LaunchTemplateSpecification>
) -> Self
The launch template to use for your compute resources. Any other compute resource parameters that you specify in a CreateComputeEnvironment
API operation override the same parameters in the launch template. You must specify either the launch template ID or launch template name in the request, but not both. For more information, see Launch template support in the Batch User Guide.
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
Examples found in repository?
2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
pub(crate) fn deser_structure_crate_model_compute_resource<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<crate::model::ComputeResource>,
aws_smithy_json::deserialize::error::DeserializeError,
>
where
I: Iterator<
Item = Result<
aws_smithy_json::deserialize::Token<'a>,
aws_smithy_json::deserialize::error::DeserializeError,
>,
>,
{
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::compute_resource::Builder::default();
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"type" => {
builder = builder.set_type(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped()
.map(|u| crate::model::CrType::from(u.as_ref()))
})
.transpose()?,
);
}
"allocationStrategy" => {
builder = builder.set_allocation_strategy(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::CrAllocationStrategy::from(u.as_ref())
})
})
.transpose()?,
);
}
"minvCpus" => {
builder = builder.set_minv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"maxvCpus" => {
builder = builder.set_maxv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"desiredvCpus" => {
builder = builder.set_desiredv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"instanceTypes" => {
builder = builder.set_instance_types(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"imageId" => {
builder = builder.set_image_id(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"subnets" => {
builder = builder.set_subnets(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"securityGroupIds" => {
builder = builder.set_security_group_ids(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"ec2KeyPair" => {
builder = builder.set_ec2_key_pair(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"instanceRole" => {
builder = builder.set_instance_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"tags" => {
builder = builder.set_tags(
crate::json_deser::deser_map_com_amazonaws_batch_tags_map(
tokens,
)?,
);
}
"placementGroup" => {
builder = builder.set_placement_group(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"bidPercentage" => {
builder = builder.set_bid_percentage(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"spotIamFleetRole" => {
builder = builder.set_spot_iam_fleet_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"launchTemplate" => {
builder = builder.set_launch_template(
crate::json_deser::deser_structure_crate_model_launch_template_specification(tokens)?
);
}
"ec2Configuration" => {
builder = builder.set_ec2_configuration(
crate::json_deser::deser_list_com_amazonaws_batch_ec2_configuration_list(tokens)?
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(format!(
"expected object key or end object, found: {:?}",
other
)),
)
}
}
}
Ok(Some(builder.build()))
}
_ => Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(
"expected start object or null",
),
),
}
}
sourcepub fn ec2_configuration(self, input: Ec2Configuration) -> Self
pub fn ec2_configuration(self, input: Ec2Configuration) -> Self
Appends an item to ec2_configuration
.
To override the contents of this collection use set_ec2_configuration
.
Provides information used to select Amazon Machine Images (AMIs) for EC2 instances in the compute environment. If Ec2Configuration
isn't specified, the default is ECS_AL2
.
One or two values can be provided.
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
sourcepub fn set_ec2_configuration(self, input: Option<Vec<Ec2Configuration>>) -> Self
pub fn set_ec2_configuration(self, input: Option<Vec<Ec2Configuration>>) -> Self
Provides information used to select Amazon Machine Images (AMIs) for EC2 instances in the compute environment. If Ec2Configuration
isn't specified, the default is ECS_AL2
.
One or two values can be provided.
This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.
Examples found in repository?
2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
pub(crate) fn deser_structure_crate_model_compute_resource<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<crate::model::ComputeResource>,
aws_smithy_json::deserialize::error::DeserializeError,
>
where
I: Iterator<
Item = Result<
aws_smithy_json::deserialize::Token<'a>,
aws_smithy_json::deserialize::error::DeserializeError,
>,
>,
{
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::compute_resource::Builder::default();
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"type" => {
builder = builder.set_type(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped()
.map(|u| crate::model::CrType::from(u.as_ref()))
})
.transpose()?,
);
}
"allocationStrategy" => {
builder = builder.set_allocation_strategy(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::CrAllocationStrategy::from(u.as_ref())
})
})
.transpose()?,
);
}
"minvCpus" => {
builder = builder.set_minv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"maxvCpus" => {
builder = builder.set_maxv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"desiredvCpus" => {
builder = builder.set_desiredv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"instanceTypes" => {
builder = builder.set_instance_types(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"imageId" => {
builder = builder.set_image_id(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"subnets" => {
builder = builder.set_subnets(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"securityGroupIds" => {
builder = builder.set_security_group_ids(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"ec2KeyPair" => {
builder = builder.set_ec2_key_pair(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"instanceRole" => {
builder = builder.set_instance_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"tags" => {
builder = builder.set_tags(
crate::json_deser::deser_map_com_amazonaws_batch_tags_map(
tokens,
)?,
);
}
"placementGroup" => {
builder = builder.set_placement_group(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"bidPercentage" => {
builder = builder.set_bid_percentage(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"spotIamFleetRole" => {
builder = builder.set_spot_iam_fleet_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"launchTemplate" => {
builder = builder.set_launch_template(
crate::json_deser::deser_structure_crate_model_launch_template_specification(tokens)?
);
}
"ec2Configuration" => {
builder = builder.set_ec2_configuration(
crate::json_deser::deser_list_com_amazonaws_batch_ec2_configuration_list(tokens)?
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(format!(
"expected object key or end object, found: {:?}",
other
)),
)
}
}
}
Ok(Some(builder.build()))
}
_ => Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(
"expected start object or null",
),
),
}
}
sourcepub fn build(self) -> ComputeResource
pub fn build(self) -> ComputeResource
Consumes the builder and constructs a ComputeResource
.
Examples found in repository?
2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
pub(crate) fn deser_structure_crate_model_compute_resource<'a, I>(
tokens: &mut std::iter::Peekable<I>,
) -> Result<
Option<crate::model::ComputeResource>,
aws_smithy_json::deserialize::error::DeserializeError,
>
where
I: Iterator<
Item = Result<
aws_smithy_json::deserialize::Token<'a>,
aws_smithy_json::deserialize::error::DeserializeError,
>,
>,
{
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None),
Some(aws_smithy_json::deserialize::Token::StartObject { .. }) => {
#[allow(unused_mut)]
let mut builder = crate::model::compute_resource::Builder::default();
loop {
match tokens.next().transpose()? {
Some(aws_smithy_json::deserialize::Token::EndObject { .. }) => break,
Some(aws_smithy_json::deserialize::Token::ObjectKey { key, .. }) => {
match key.to_unescaped()?.as_ref() {
"type" => {
builder = builder.set_type(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped()
.map(|u| crate::model::CrType::from(u.as_ref()))
})
.transpose()?,
);
}
"allocationStrategy" => {
builder = builder.set_allocation_strategy(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| {
s.to_unescaped().map(|u| {
crate::model::CrAllocationStrategy::from(u.as_ref())
})
})
.transpose()?,
);
}
"minvCpus" => {
builder = builder.set_minv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"maxvCpus" => {
builder = builder.set_maxv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"desiredvCpus" => {
builder = builder.set_desiredv_cpus(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"instanceTypes" => {
builder = builder.set_instance_types(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"imageId" => {
builder = builder.set_image_id(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"subnets" => {
builder = builder.set_subnets(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"securityGroupIds" => {
builder = builder.set_security_group_ids(
crate::json_deser::deser_list_com_amazonaws_batch_string_list(
tokens,
)?,
);
}
"ec2KeyPair" => {
builder = builder.set_ec2_key_pair(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"instanceRole" => {
builder = builder.set_instance_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"tags" => {
builder = builder.set_tags(
crate::json_deser::deser_map_com_amazonaws_batch_tags_map(
tokens,
)?,
);
}
"placementGroup" => {
builder = builder.set_placement_group(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"bidPercentage" => {
builder = builder.set_bid_percentage(
aws_smithy_json::deserialize::token::expect_number_or_null(
tokens.next(),
)?
.map(i32::try_from)
.transpose()?,
);
}
"spotIamFleetRole" => {
builder = builder.set_spot_iam_fleet_role(
aws_smithy_json::deserialize::token::expect_string_or_null(
tokens.next(),
)?
.map(|s| s.to_unescaped().map(|u| u.into_owned()))
.transpose()?,
);
}
"launchTemplate" => {
builder = builder.set_launch_template(
crate::json_deser::deser_structure_crate_model_launch_template_specification(tokens)?
);
}
"ec2Configuration" => {
builder = builder.set_ec2_configuration(
crate::json_deser::deser_list_com_amazonaws_batch_ec2_configuration_list(tokens)?
);
}
_ => aws_smithy_json::deserialize::token::skip_value(tokens)?,
}
}
other => {
return Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(format!(
"expected object key or end object, found: {:?}",
other
)),
)
}
}
}
Ok(Some(builder.build()))
}
_ => Err(
aws_smithy_json::deserialize::error::DeserializeError::custom(
"expected start object or null",
),
),
}
}