1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
pub use crate::operation::modify_cache_cluster::_modify_cache_cluster_output::ModifyCacheClusterOutputBuilder;
pub use crate::operation::modify_cache_cluster::_modify_cache_cluster_input::ModifyCacheClusterInputBuilder;
impl ModifyCacheClusterInputBuilder {
/// Sends a request with this input using the given client.
pub async fn send_with(
self,
client: &crate::Client,
) -> ::std::result::Result<
crate::operation::modify_cache_cluster::ModifyCacheClusterOutput,
::aws_smithy_runtime_api::client::result::SdkError<
crate::operation::modify_cache_cluster::ModifyCacheClusterError,
::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
>,
> {
let mut fluent_builder = client.modify_cache_cluster();
fluent_builder.inner = self;
fluent_builder.send().await
}
}
/// Fluent builder constructing a request to `ModifyCacheCluster`.
///
/// <p>Modifies the settings for a cluster. You can use this operation to change one or more cluster configuration parameters by specifying the parameters and the new values.</p>
#[derive(::std::clone::Clone, ::std::fmt::Debug)]
pub struct ModifyCacheClusterFluentBuilder {
handle: ::std::sync::Arc<crate::client::Handle>,
inner: crate::operation::modify_cache_cluster::builders::ModifyCacheClusterInputBuilder,
config_override: ::std::option::Option<crate::config::Builder>,
}
impl
crate::client::customize::internal::CustomizableSend<
crate::operation::modify_cache_cluster::ModifyCacheClusterOutput,
crate::operation::modify_cache_cluster::ModifyCacheClusterError,
> for ModifyCacheClusterFluentBuilder
{
fn send(
self,
config_override: crate::config::Builder,
) -> crate::client::customize::internal::BoxFuture<
crate::client::customize::internal::SendResult<
crate::operation::modify_cache_cluster::ModifyCacheClusterOutput,
crate::operation::modify_cache_cluster::ModifyCacheClusterError,
>,
> {
::std::boxed::Box::pin(async move { self.config_override(config_override).send().await })
}
}
impl ModifyCacheClusterFluentBuilder {
/// Creates a new `ModifyCacheCluster`.
pub(crate) fn new(handle: ::std::sync::Arc<crate::client::Handle>) -> Self {
Self {
handle,
inner: ::std::default::Default::default(),
config_override: ::std::option::Option::None,
}
}
/// Access the ModifyCacheCluster as a reference.
pub fn as_input(&self) -> &crate::operation::modify_cache_cluster::builders::ModifyCacheClusterInputBuilder {
&self.inner
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> ::std::result::Result<
crate::operation::modify_cache_cluster::ModifyCacheClusterOutput,
::aws_smithy_runtime_api::client::result::SdkError<
crate::operation::modify_cache_cluster::ModifyCacheClusterError,
::aws_smithy_runtime_api::client::orchestrator::HttpResponse,
>,
> {
let input = self
.inner
.build()
.map_err(::aws_smithy_runtime_api::client::result::SdkError::construction_failure)?;
let runtime_plugins = crate::operation::modify_cache_cluster::ModifyCacheCluster::operation_runtime_plugins(
self.handle.runtime_plugins.clone(),
&self.handle.conf,
self.config_override,
);
crate::operation::modify_cache_cluster::ModifyCacheCluster::orchestrate(&runtime_plugins, input).await
}
/// Consumes this builder, creating a customizable operation that can be modified before being sent.
pub fn customize(
self,
) -> crate::client::customize::CustomizableOperation<
crate::operation::modify_cache_cluster::ModifyCacheClusterOutput,
crate::operation::modify_cache_cluster::ModifyCacheClusterError,
Self,
> {
crate::client::customize::CustomizableOperation::new(self)
}
pub(crate) fn config_override(mut self, config_override: impl Into<crate::config::Builder>) -> Self {
self.set_config_override(Some(config_override.into()));
self
}
pub(crate) fn set_config_override(&mut self, config_override: Option<crate::config::Builder>) -> &mut Self {
self.config_override = config_override;
self
}
/// <p>The cluster identifier. This value is stored as a lowercase string.</p>
pub fn cache_cluster_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
self.inner = self.inner.cache_cluster_id(input.into());
self
}
/// <p>The cluster identifier. This value is stored as a lowercase string.</p>
pub fn set_cache_cluster_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
self.inner = self.inner.set_cache_cluster_id(input);
self
}
/// <p>The cluster identifier. This value is stored as a lowercase string.</p>
pub fn get_cache_cluster_id(&self) -> &::std::option::Option<::std::string::String> {
self.inner.get_cache_cluster_id()
}
/// <p>The number of cache nodes that the cluster should have. If the value for <code>NumCacheNodes</code> is greater than the sum of the number of current cache nodes and the number of cache nodes pending creation (which may be zero), more nodes are added. If the value is less than the number of existing cache nodes, nodes are removed. If the value is equal to the number of current cache nodes, any pending add or remove requests are canceled.</p>
/// <p>If you are removing cache nodes, you must use the <code>CacheNodeIdsToRemove</code> parameter to provide the IDs of the specific cache nodes to remove.</p>
/// <p>For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 40.</p> <note>
/// <p>Adding or removing Memcached cache nodes can be applied immediately or as a pending operation (see <code>ApplyImmediately</code>).</p>
/// <p>A pending operation to modify the number of cache nodes in a cluster during its maintenance window, whether by adding or removing nodes in accordance with the scale out architecture, is not queued. The customer's latest request to add or remove nodes to the cluster overrides any previous pending operations to modify the number of cache nodes in the cluster. For example, a request to remove 2 nodes would override a previous pending operation to remove 3 nodes. Similarly, a request to add 2 nodes would override a previous pending operation to remove 3 nodes and vice versa. As Memcached cache nodes may now be provisioned in different Availability Zones with flexible cache node placement, a request to add nodes does not automatically override a previous pending operation to add nodes. The customer can modify the previous pending operation to add more nodes or explicitly cancel the pending request and retry the new request. To cancel pending operations to modify the number of cache nodes in a cluster, use the <code>ModifyCacheCluster</code> request and set <code>NumCacheNodes</code> equal to the number of cache nodes currently in the cluster.</p>
/// </note>
pub fn num_cache_nodes(mut self, input: i32) -> Self {
self.inner = self.inner.num_cache_nodes(input);
self
}
/// <p>The number of cache nodes that the cluster should have. If the value for <code>NumCacheNodes</code> is greater than the sum of the number of current cache nodes and the number of cache nodes pending creation (which may be zero), more nodes are added. If the value is less than the number of existing cache nodes, nodes are removed. If the value is equal to the number of current cache nodes, any pending add or remove requests are canceled.</p>
/// <p>If you are removing cache nodes, you must use the <code>CacheNodeIdsToRemove</code> parameter to provide the IDs of the specific cache nodes to remove.</p>
/// <p>For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 40.</p> <note>
/// <p>Adding or removing Memcached cache nodes can be applied immediately or as a pending operation (see <code>ApplyImmediately</code>).</p>
/// <p>A pending operation to modify the number of cache nodes in a cluster during its maintenance window, whether by adding or removing nodes in accordance with the scale out architecture, is not queued. The customer's latest request to add or remove nodes to the cluster overrides any previous pending operations to modify the number of cache nodes in the cluster. For example, a request to remove 2 nodes would override a previous pending operation to remove 3 nodes. Similarly, a request to add 2 nodes would override a previous pending operation to remove 3 nodes and vice versa. As Memcached cache nodes may now be provisioned in different Availability Zones with flexible cache node placement, a request to add nodes does not automatically override a previous pending operation to add nodes. The customer can modify the previous pending operation to add more nodes or explicitly cancel the pending request and retry the new request. To cancel pending operations to modify the number of cache nodes in a cluster, use the <code>ModifyCacheCluster</code> request and set <code>NumCacheNodes</code> equal to the number of cache nodes currently in the cluster.</p>
/// </note>
pub fn set_num_cache_nodes(mut self, input: ::std::option::Option<i32>) -> Self {
self.inner = self.inner.set_num_cache_nodes(input);
self
}
/// <p>The number of cache nodes that the cluster should have. If the value for <code>NumCacheNodes</code> is greater than the sum of the number of current cache nodes and the number of cache nodes pending creation (which may be zero), more nodes are added. If the value is less than the number of existing cache nodes, nodes are removed. If the value is equal to the number of current cache nodes, any pending add or remove requests are canceled.</p>
/// <p>If you are removing cache nodes, you must use the <code>CacheNodeIdsToRemove</code> parameter to provide the IDs of the specific cache nodes to remove.</p>
/// <p>For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 40.</p> <note>
/// <p>Adding or removing Memcached cache nodes can be applied immediately or as a pending operation (see <code>ApplyImmediately</code>).</p>
/// <p>A pending operation to modify the number of cache nodes in a cluster during its maintenance window, whether by adding or removing nodes in accordance with the scale out architecture, is not queued. The customer's latest request to add or remove nodes to the cluster overrides any previous pending operations to modify the number of cache nodes in the cluster. For example, a request to remove 2 nodes would override a previous pending operation to remove 3 nodes. Similarly, a request to add 2 nodes would override a previous pending operation to remove 3 nodes and vice versa. As Memcached cache nodes may now be provisioned in different Availability Zones with flexible cache node placement, a request to add nodes does not automatically override a previous pending operation to add nodes. The customer can modify the previous pending operation to add more nodes or explicitly cancel the pending request and retry the new request. To cancel pending operations to modify the number of cache nodes in a cluster, use the <code>ModifyCacheCluster</code> request and set <code>NumCacheNodes</code> equal to the number of cache nodes currently in the cluster.</p>
/// </note>
pub fn get_num_cache_nodes(&self) -> &::std::option::Option<i32> {
self.inner.get_num_cache_nodes()
}
/// Appends an item to `CacheNodeIdsToRemove`.
///
/// To override the contents of this collection use [`set_cache_node_ids_to_remove`](Self::set_cache_node_ids_to_remove).
///
/// <p>A list of cache node IDs to be removed. A node ID is a numeric identifier (0001, 0002, etc.). This parameter is only valid when <code>NumCacheNodes</code> is less than the existing number of cache nodes. The number of cache node IDs supplied in this parameter must match the difference between the existing number of cache nodes in the cluster or pending cache nodes, whichever is greater, and the value of <code>NumCacheNodes</code> in the request.</p>
/// <p>For example: If you have 3 active cache nodes, 7 pending cache nodes, and the number of cache nodes in this <code>ModifyCacheCluster</code> call is 5, you must list 2 (7 - 5) cache node IDs to remove.</p>
pub fn cache_node_ids_to_remove(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
self.inner = self.inner.cache_node_ids_to_remove(input.into());
self
}
/// <p>A list of cache node IDs to be removed. A node ID is a numeric identifier (0001, 0002, etc.). This parameter is only valid when <code>NumCacheNodes</code> is less than the existing number of cache nodes. The number of cache node IDs supplied in this parameter must match the difference between the existing number of cache nodes in the cluster or pending cache nodes, whichever is greater, and the value of <code>NumCacheNodes</code> in the request.</p>
/// <p>For example: If you have 3 active cache nodes, 7 pending cache nodes, and the number of cache nodes in this <code>ModifyCacheCluster</code> call is 5, you must list 2 (7 - 5) cache node IDs to remove.</p>
pub fn set_cache_node_ids_to_remove(mut self, input: ::std::option::Option<::std::vec::Vec<::std::string::String>>) -> Self {
self.inner = self.inner.set_cache_node_ids_to_remove(input);
self
}
/// <p>A list of cache node IDs to be removed. A node ID is a numeric identifier (0001, 0002, etc.). This parameter is only valid when <code>NumCacheNodes</code> is less than the existing number of cache nodes. The number of cache node IDs supplied in this parameter must match the difference between the existing number of cache nodes in the cluster or pending cache nodes, whichever is greater, and the value of <code>NumCacheNodes</code> in the request.</p>
/// <p>For example: If you have 3 active cache nodes, 7 pending cache nodes, and the number of cache nodes in this <code>ModifyCacheCluster</code> call is 5, you must list 2 (7 - 5) cache node IDs to remove.</p>
pub fn get_cache_node_ids_to_remove(&self) -> &::std::option::Option<::std::vec::Vec<::std::string::String>> {
self.inner.get_cache_node_ids_to_remove()
}
/// <p>Specifies whether the new nodes in this Memcached cluster are all created in a single Availability Zone or created across multiple Availability Zones.</p>
/// <p>Valid values: <code>single-az</code> | <code>cross-az</code>.</p>
/// <p>This option is only supported for Memcached clusters.</p> <note>
/// <p>You cannot specify <code>single-az</code> if the Memcached cluster already has cache nodes in different Availability Zones. If <code>cross-az</code> is specified, existing Memcached nodes remain in their current Availability Zone.</p>
/// <p>Only newly created nodes are located in different Availability Zones. </p>
/// </note>
pub fn az_mode(mut self, input: crate::types::AzMode) -> Self {
self.inner = self.inner.az_mode(input);
self
}
/// <p>Specifies whether the new nodes in this Memcached cluster are all created in a single Availability Zone or created across multiple Availability Zones.</p>
/// <p>Valid values: <code>single-az</code> | <code>cross-az</code>.</p>
/// <p>This option is only supported for Memcached clusters.</p> <note>
/// <p>You cannot specify <code>single-az</code> if the Memcached cluster already has cache nodes in different Availability Zones. If <code>cross-az</code> is specified, existing Memcached nodes remain in their current Availability Zone.</p>
/// <p>Only newly created nodes are located in different Availability Zones. </p>
/// </note>
pub fn set_az_mode(mut self, input: ::std::option::Option<crate::types::AzMode>) -> Self {
self.inner = self.inner.set_az_mode(input);
self
}
/// <p>Specifies whether the new nodes in this Memcached cluster are all created in a single Availability Zone or created across multiple Availability Zones.</p>
/// <p>Valid values: <code>single-az</code> | <code>cross-az</code>.</p>
/// <p>This option is only supported for Memcached clusters.</p> <note>
/// <p>You cannot specify <code>single-az</code> if the Memcached cluster already has cache nodes in different Availability Zones. If <code>cross-az</code> is specified, existing Memcached nodes remain in their current Availability Zone.</p>
/// <p>Only newly created nodes are located in different Availability Zones. </p>
/// </note>
pub fn get_az_mode(&self) -> &::std::option::Option<crate::types::AzMode> {
self.inner.get_az_mode()
}
/// Appends an item to `NewAvailabilityZones`.
///
/// To override the contents of this collection use [`set_new_availability_zones`](Self::set_new_availability_zones).
///
/// <note>
/// <p>This option is only supported on Memcached clusters.</p>
/// </note>
/// <p>The list of Availability Zones where the new Memcached cache nodes are created.</p>
/// <p>This parameter is only valid when <code>NumCacheNodes</code> in the request is greater than the sum of the number of active cache nodes and the number of cache nodes pending creation (which may be zero). The number of Availability Zones supplied in this list must match the cache nodes being added in this request.</p>
/// <p>Scenarios:</p>
/// <ul>
/// <li> <p> <b>Scenario 1:</b> You have 3 active nodes and wish to add 2 nodes. Specify <code>NumCacheNodes=5</code> (3 + 2) and optionally specify two Availability Zones for the two new nodes.</p> </li>
/// <li> <p> <b>Scenario 2:</b> You have 3 active nodes and 2 nodes pending creation (from the scenario 1 call) and want to add 1 more node. Specify <code>NumCacheNodes=6</code> ((3 + 2) + 1) and optionally specify an Availability Zone for the new node.</p> </li>
/// <li> <p> <b>Scenario 3:</b> You want to cancel all pending operations. Specify <code>NumCacheNodes=3</code> to cancel all pending operations.</p> </li>
/// </ul>
/// <p>The Availability Zone placement of nodes pending creation cannot be modified. If you wish to cancel any nodes pending creation, add 0 nodes by setting <code>NumCacheNodes</code> to the number of current nodes.</p>
/// <p>If <code>cross-az</code> is specified, existing Memcached nodes remain in their current Availability Zone. Only newly created nodes can be located in different Availability Zones. For guidance on how to move existing Memcached nodes to different Availability Zones, see the <b>Availability Zone Considerations</b> section of <a href="https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/CacheNodes.SupportedTypes.html">Cache Node Considerations for Memcached</a>.</p>
/// <p> <b>Impact of new add/remove requests upon pending requests</b> </p>
/// <ul>
/// <li> <p>Scenario-1</p>
/// <ul>
/// <li> <p>Pending Action: Delete</p> </li>
/// <li> <p>New Request: Delete</p> </li>
/// <li> <p>Result: The new delete, pending or immediate, replaces the pending delete.</p> </li>
/// </ul> </li>
/// <li> <p>Scenario-2</p>
/// <ul>
/// <li> <p>Pending Action: Delete</p> </li>
/// <li> <p>New Request: Create</p> </li>
/// <li> <p>Result: The new create, pending or immediate, replaces the pending delete.</p> </li>
/// </ul> </li>
/// <li> <p>Scenario-3</p>
/// <ul>
/// <li> <p>Pending Action: Create</p> </li>
/// <li> <p>New Request: Delete</p> </li>
/// <li> <p>Result: The new delete, pending or immediate, replaces the pending create.</p> </li>
/// </ul> </li>
/// <li> <p>Scenario-4</p>
/// <ul>
/// <li> <p>Pending Action: Create</p> </li>
/// <li> <p>New Request: Create</p> </li>
/// <li> <p>Result: The new create is added to the pending create.</p> <important>
/// <p> <b>Important:</b> If the new create request is <b>Apply Immediately - Yes</b>, all creates are performed immediately. If the new create request is <b>Apply Immediately - No</b>, all creates are pending.</p>
/// </important> </li>
/// </ul> </li>
/// </ul>
pub fn new_availability_zones(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
self.inner = self.inner.new_availability_zones(input.into());
self
}
/// <note>
/// <p>This option is only supported on Memcached clusters.</p>
/// </note>
/// <p>The list of Availability Zones where the new Memcached cache nodes are created.</p>
/// <p>This parameter is only valid when <code>NumCacheNodes</code> in the request is greater than the sum of the number of active cache nodes and the number of cache nodes pending creation (which may be zero). The number of Availability Zones supplied in this list must match the cache nodes being added in this request.</p>
/// <p>Scenarios:</p>
/// <ul>
/// <li> <p> <b>Scenario 1:</b> You have 3 active nodes and wish to add 2 nodes. Specify <code>NumCacheNodes=5</code> (3 + 2) and optionally specify two Availability Zones for the two new nodes.</p> </li>
/// <li> <p> <b>Scenario 2:</b> You have 3 active nodes and 2 nodes pending creation (from the scenario 1 call) and want to add 1 more node. Specify <code>NumCacheNodes=6</code> ((3 + 2) + 1) and optionally specify an Availability Zone for the new node.</p> </li>
/// <li> <p> <b>Scenario 3:</b> You want to cancel all pending operations. Specify <code>NumCacheNodes=3</code> to cancel all pending operations.</p> </li>
/// </ul>
/// <p>The Availability Zone placement of nodes pending creation cannot be modified. If you wish to cancel any nodes pending creation, add 0 nodes by setting <code>NumCacheNodes</code> to the number of current nodes.</p>
/// <p>If <code>cross-az</code> is specified, existing Memcached nodes remain in their current Availability Zone. Only newly created nodes can be located in different Availability Zones. For guidance on how to move existing Memcached nodes to different Availability Zones, see the <b>Availability Zone Considerations</b> section of <a href="https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/CacheNodes.SupportedTypes.html">Cache Node Considerations for Memcached</a>.</p>
/// <p> <b>Impact of new add/remove requests upon pending requests</b> </p>
/// <ul>
/// <li> <p>Scenario-1</p>
/// <ul>
/// <li> <p>Pending Action: Delete</p> </li>
/// <li> <p>New Request: Delete</p> </li>
/// <li> <p>Result: The new delete, pending or immediate, replaces the pending delete.</p> </li>
/// </ul> </li>
/// <li> <p>Scenario-2</p>
/// <ul>
/// <li> <p>Pending Action: Delete</p> </li>
/// <li> <p>New Request: Create</p> </li>
/// <li> <p>Result: The new create, pending or immediate, replaces the pending delete.</p> </li>
/// </ul> </li>
/// <li> <p>Scenario-3</p>
/// <ul>
/// <li> <p>Pending Action: Create</p> </li>
/// <li> <p>New Request: Delete</p> </li>
/// <li> <p>Result: The new delete, pending or immediate, replaces the pending create.</p> </li>
/// </ul> </li>
/// <li> <p>Scenario-4</p>
/// <ul>
/// <li> <p>Pending Action: Create</p> </li>
/// <li> <p>New Request: Create</p> </li>
/// <li> <p>Result: The new create is added to the pending create.</p> <important>
/// <p> <b>Important:</b> If the new create request is <b>Apply Immediately - Yes</b>, all creates are performed immediately. If the new create request is <b>Apply Immediately - No</b>, all creates are pending.</p>
/// </important> </li>
/// </ul> </li>
/// </ul>
pub fn set_new_availability_zones(mut self, input: ::std::option::Option<::std::vec::Vec<::std::string::String>>) -> Self {
self.inner = self.inner.set_new_availability_zones(input);
self
}
/// <note>
/// <p>This option is only supported on Memcached clusters.</p>
/// </note>
/// <p>The list of Availability Zones where the new Memcached cache nodes are created.</p>
/// <p>This parameter is only valid when <code>NumCacheNodes</code> in the request is greater than the sum of the number of active cache nodes and the number of cache nodes pending creation (which may be zero). The number of Availability Zones supplied in this list must match the cache nodes being added in this request.</p>
/// <p>Scenarios:</p>
/// <ul>
/// <li> <p> <b>Scenario 1:</b> You have 3 active nodes and wish to add 2 nodes. Specify <code>NumCacheNodes=5</code> (3 + 2) and optionally specify two Availability Zones for the two new nodes.</p> </li>
/// <li> <p> <b>Scenario 2:</b> You have 3 active nodes and 2 nodes pending creation (from the scenario 1 call) and want to add 1 more node. Specify <code>NumCacheNodes=6</code> ((3 + 2) + 1) and optionally specify an Availability Zone for the new node.</p> </li>
/// <li> <p> <b>Scenario 3:</b> You want to cancel all pending operations. Specify <code>NumCacheNodes=3</code> to cancel all pending operations.</p> </li>
/// </ul>
/// <p>The Availability Zone placement of nodes pending creation cannot be modified. If you wish to cancel any nodes pending creation, add 0 nodes by setting <code>NumCacheNodes</code> to the number of current nodes.</p>
/// <p>If <code>cross-az</code> is specified, existing Memcached nodes remain in their current Availability Zone. Only newly created nodes can be located in different Availability Zones. For guidance on how to move existing Memcached nodes to different Availability Zones, see the <b>Availability Zone Considerations</b> section of <a href="https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/CacheNodes.SupportedTypes.html">Cache Node Considerations for Memcached</a>.</p>
/// <p> <b>Impact of new add/remove requests upon pending requests</b> </p>
/// <ul>
/// <li> <p>Scenario-1</p>
/// <ul>
/// <li> <p>Pending Action: Delete</p> </li>
/// <li> <p>New Request: Delete</p> </li>
/// <li> <p>Result: The new delete, pending or immediate, replaces the pending delete.</p> </li>
/// </ul> </li>
/// <li> <p>Scenario-2</p>
/// <ul>
/// <li> <p>Pending Action: Delete</p> </li>
/// <li> <p>New Request: Create</p> </li>
/// <li> <p>Result: The new create, pending or immediate, replaces the pending delete.</p> </li>
/// </ul> </li>
/// <li> <p>Scenario-3</p>
/// <ul>
/// <li> <p>Pending Action: Create</p> </li>
/// <li> <p>New Request: Delete</p> </li>
/// <li> <p>Result: The new delete, pending or immediate, replaces the pending create.</p> </li>
/// </ul> </li>
/// <li> <p>Scenario-4</p>
/// <ul>
/// <li> <p>Pending Action: Create</p> </li>
/// <li> <p>New Request: Create</p> </li>
/// <li> <p>Result: The new create is added to the pending create.</p> <important>
/// <p> <b>Important:</b> If the new create request is <b>Apply Immediately - Yes</b>, all creates are performed immediately. If the new create request is <b>Apply Immediately - No</b>, all creates are pending.</p>
/// </important> </li>
/// </ul> </li>
/// </ul>
pub fn get_new_availability_zones(&self) -> &::std::option::Option<::std::vec::Vec<::std::string::String>> {
self.inner.get_new_availability_zones()
}
/// Appends an item to `CacheSecurityGroupNames`.
///
/// To override the contents of this collection use [`set_cache_security_group_names`](Self::set_cache_security_group_names).
///
/// <p>A list of cache security group names to authorize on this cluster. This change is asynchronously applied as soon as possible.</p>
/// <p>You can use this parameter only with clusters that are created outside of an Amazon Virtual Private Cloud (Amazon VPC).</p>
/// <p>Constraints: Must contain no more than 255 alphanumeric characters. Must not be "Default".</p>
pub fn cache_security_group_names(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
self.inner = self.inner.cache_security_group_names(input.into());
self
}
/// <p>A list of cache security group names to authorize on this cluster. This change is asynchronously applied as soon as possible.</p>
/// <p>You can use this parameter only with clusters that are created outside of an Amazon Virtual Private Cloud (Amazon VPC).</p>
/// <p>Constraints: Must contain no more than 255 alphanumeric characters. Must not be "Default".</p>
pub fn set_cache_security_group_names(mut self, input: ::std::option::Option<::std::vec::Vec<::std::string::String>>) -> Self {
self.inner = self.inner.set_cache_security_group_names(input);
self
}
/// <p>A list of cache security group names to authorize on this cluster. This change is asynchronously applied as soon as possible.</p>
/// <p>You can use this parameter only with clusters that are created outside of an Amazon Virtual Private Cloud (Amazon VPC).</p>
/// <p>Constraints: Must contain no more than 255 alphanumeric characters. Must not be "Default".</p>
pub fn get_cache_security_group_names(&self) -> &::std::option::Option<::std::vec::Vec<::std::string::String>> {
self.inner.get_cache_security_group_names()
}
/// Appends an item to `SecurityGroupIds`.
///
/// To override the contents of this collection use [`set_security_group_ids`](Self::set_security_group_ids).
///
/// <p>Specifies the VPC Security Groups associated with the cluster.</p>
/// <p>This parameter can be used only with clusters that are created in an Amazon Virtual Private Cloud (Amazon VPC).</p>
pub fn security_group_ids(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
self.inner = self.inner.security_group_ids(input.into());
self
}
/// <p>Specifies the VPC Security Groups associated with the cluster.</p>
/// <p>This parameter can be used only with clusters that are created in an Amazon Virtual Private Cloud (Amazon VPC).</p>
pub fn set_security_group_ids(mut self, input: ::std::option::Option<::std::vec::Vec<::std::string::String>>) -> Self {
self.inner = self.inner.set_security_group_ids(input);
self
}
/// <p>Specifies the VPC Security Groups associated with the cluster.</p>
/// <p>This parameter can be used only with clusters that are created in an Amazon Virtual Private Cloud (Amazon VPC).</p>
pub fn get_security_group_ids(&self) -> &::std::option::Option<::std::vec::Vec<::std::string::String>> {
self.inner.get_security_group_ids()
}
/// <p>Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.</p>
/// <p>Valid values for <code>ddd</code> are:</p>
/// <ul>
/// <li> <p> <code>sun</code> </p> </li>
/// <li> <p> <code>mon</code> </p> </li>
/// <li> <p> <code>tue</code> </p> </li>
/// <li> <p> <code>wed</code> </p> </li>
/// <li> <p> <code>thu</code> </p> </li>
/// <li> <p> <code>fri</code> </p> </li>
/// <li> <p> <code>sat</code> </p> </li>
/// </ul>
/// <p>Example: <code>sun:23:00-mon:01:30</code> </p>
pub fn preferred_maintenance_window(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
self.inner = self.inner.preferred_maintenance_window(input.into());
self
}
/// <p>Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.</p>
/// <p>Valid values for <code>ddd</code> are:</p>
/// <ul>
/// <li> <p> <code>sun</code> </p> </li>
/// <li> <p> <code>mon</code> </p> </li>
/// <li> <p> <code>tue</code> </p> </li>
/// <li> <p> <code>wed</code> </p> </li>
/// <li> <p> <code>thu</code> </p> </li>
/// <li> <p> <code>fri</code> </p> </li>
/// <li> <p> <code>sat</code> </p> </li>
/// </ul>
/// <p>Example: <code>sun:23:00-mon:01:30</code> </p>
pub fn set_preferred_maintenance_window(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
self.inner = self.inner.set_preferred_maintenance_window(input);
self
}
/// <p>Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.</p>
/// <p>Valid values for <code>ddd</code> are:</p>
/// <ul>
/// <li> <p> <code>sun</code> </p> </li>
/// <li> <p> <code>mon</code> </p> </li>
/// <li> <p> <code>tue</code> </p> </li>
/// <li> <p> <code>wed</code> </p> </li>
/// <li> <p> <code>thu</code> </p> </li>
/// <li> <p> <code>fri</code> </p> </li>
/// <li> <p> <code>sat</code> </p> </li>
/// </ul>
/// <p>Example: <code>sun:23:00-mon:01:30</code> </p>
pub fn get_preferred_maintenance_window(&self) -> &::std::option::Option<::std::string::String> {
self.inner.get_preferred_maintenance_window()
}
/// <p>The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications are sent.</p> <note>
/// <p>The Amazon SNS topic owner must be same as the cluster owner.</p>
/// </note>
pub fn notification_topic_arn(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
self.inner = self.inner.notification_topic_arn(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications are sent.</p> <note>
/// <p>The Amazon SNS topic owner must be same as the cluster owner.</p>
/// </note>
pub fn set_notification_topic_arn(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
self.inner = self.inner.set_notification_topic_arn(input);
self
}
/// <p>The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications are sent.</p> <note>
/// <p>The Amazon SNS topic owner must be same as the cluster owner.</p>
/// </note>
pub fn get_notification_topic_arn(&self) -> &::std::option::Option<::std::string::String> {
self.inner.get_notification_topic_arn()
}
/// <p>The name of the cache parameter group to apply to this cluster. This change is asynchronously applied as soon as possible for parameters when the <code>ApplyImmediately</code> parameter is specified as <code>true</code> for this request.</p>
pub fn cache_parameter_group_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
self.inner = self.inner.cache_parameter_group_name(input.into());
self
}
/// <p>The name of the cache parameter group to apply to this cluster. This change is asynchronously applied as soon as possible for parameters when the <code>ApplyImmediately</code> parameter is specified as <code>true</code> for this request.</p>
pub fn set_cache_parameter_group_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
self.inner = self.inner.set_cache_parameter_group_name(input);
self
}
/// <p>The name of the cache parameter group to apply to this cluster. This change is asynchronously applied as soon as possible for parameters when the <code>ApplyImmediately</code> parameter is specified as <code>true</code> for this request.</p>
pub fn get_cache_parameter_group_name(&self) -> &::std::option::Option<::std::string::String> {
self.inner.get_cache_parameter_group_name()
}
/// <p>The status of the Amazon SNS notification topic. Notifications are sent only if the status is <code>active</code>.</p>
/// <p>Valid values: <code>active</code> | <code>inactive</code> </p>
pub fn notification_topic_status(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
self.inner = self.inner.notification_topic_status(input.into());
self
}
/// <p>The status of the Amazon SNS notification topic. Notifications are sent only if the status is <code>active</code>.</p>
/// <p>Valid values: <code>active</code> | <code>inactive</code> </p>
pub fn set_notification_topic_status(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
self.inner = self.inner.set_notification_topic_status(input);
self
}
/// <p>The status of the Amazon SNS notification topic. Notifications are sent only if the status is <code>active</code>.</p>
/// <p>Valid values: <code>active</code> | <code>inactive</code> </p>
pub fn get_notification_topic_status(&self) -> &::std::option::Option<::std::string::String> {
self.inner.get_notification_topic_status()
}
/// <p>If <code>true</code>, this parameter causes the modifications in this request and any pending modifications to be applied, asynchronously and as soon as possible, regardless of the <code>PreferredMaintenanceWindow</code> setting for the cluster.</p>
/// <p>If <code>false</code>, changes to the cluster are applied on the next maintenance reboot, or the next failure reboot, whichever occurs first.</p> <important>
/// <p>If you perform a <code>ModifyCacheCluster</code> before a pending modification is applied, the pending modification is replaced by the newer modification.</p>
/// </important>
/// <p>Valid values: <code>true</code> | <code>false</code> </p>
/// <p>Default: <code>false</code> </p>
pub fn apply_immediately(mut self, input: bool) -> Self {
self.inner = self.inner.apply_immediately(input);
self
}
/// <p>If <code>true</code>, this parameter causes the modifications in this request and any pending modifications to be applied, asynchronously and as soon as possible, regardless of the <code>PreferredMaintenanceWindow</code> setting for the cluster.</p>
/// <p>If <code>false</code>, changes to the cluster are applied on the next maintenance reboot, or the next failure reboot, whichever occurs first.</p> <important>
/// <p>If you perform a <code>ModifyCacheCluster</code> before a pending modification is applied, the pending modification is replaced by the newer modification.</p>
/// </important>
/// <p>Valid values: <code>true</code> | <code>false</code> </p>
/// <p>Default: <code>false</code> </p>
pub fn set_apply_immediately(mut self, input: ::std::option::Option<bool>) -> Self {
self.inner = self.inner.set_apply_immediately(input);
self
}
/// <p>If <code>true</code>, this parameter causes the modifications in this request and any pending modifications to be applied, asynchronously and as soon as possible, regardless of the <code>PreferredMaintenanceWindow</code> setting for the cluster.</p>
/// <p>If <code>false</code>, changes to the cluster are applied on the next maintenance reboot, or the next failure reboot, whichever occurs first.</p> <important>
/// <p>If you perform a <code>ModifyCacheCluster</code> before a pending modification is applied, the pending modification is replaced by the newer modification.</p>
/// </important>
/// <p>Valid values: <code>true</code> | <code>false</code> </p>
/// <p>Default: <code>false</code> </p>
pub fn get_apply_immediately(&self) -> &::std::option::Option<bool> {
self.inner.get_apply_immediately()
}
/// <p>The upgraded version of the cache engine to be run on the cache nodes.</p>
/// <p> <b>Important:</b> You can upgrade to a newer engine version (see <a href="https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement">Selecting a Cache Engine and Version</a>), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster and create it anew with the earlier engine version. </p>
pub fn engine_version(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
self.inner = self.inner.engine_version(input.into());
self
}
/// <p>The upgraded version of the cache engine to be run on the cache nodes.</p>
/// <p> <b>Important:</b> You can upgrade to a newer engine version (see <a href="https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement">Selecting a Cache Engine and Version</a>), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster and create it anew with the earlier engine version. </p>
pub fn set_engine_version(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
self.inner = self.inner.set_engine_version(input);
self
}
/// <p>The upgraded version of the cache engine to be run on the cache nodes.</p>
/// <p> <b>Important:</b> You can upgrade to a newer engine version (see <a href="https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement">Selecting a Cache Engine and Version</a>), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster and create it anew with the earlier engine version. </p>
pub fn get_engine_version(&self) -> &::std::option::Option<::std::string::String> {
self.inner.get_engine_version()
}
/// <p> If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. </p>
pub fn auto_minor_version_upgrade(mut self, input: bool) -> Self {
self.inner = self.inner.auto_minor_version_upgrade(input);
self
}
/// <p> If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. </p>
pub fn set_auto_minor_version_upgrade(mut self, input: ::std::option::Option<bool>) -> Self {
self.inner = self.inner.set_auto_minor_version_upgrade(input);
self
}
/// <p> If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. </p>
pub fn get_auto_minor_version_upgrade(&self) -> &::std::option::Option<bool> {
self.inner.get_auto_minor_version_upgrade()
}
/// <p>The number of days for which ElastiCache retains automatic cluster snapshots before deleting them. For example, if you set <code>SnapshotRetentionLimit</code> to 5, a snapshot that was taken today is retained for 5 days before being deleted.</p> <note>
/// <p>If the value of <code>SnapshotRetentionLimit</code> is set to zero (0), backups are turned off.</p>
/// </note>
pub fn snapshot_retention_limit(mut self, input: i32) -> Self {
self.inner = self.inner.snapshot_retention_limit(input);
self
}
/// <p>The number of days for which ElastiCache retains automatic cluster snapshots before deleting them. For example, if you set <code>SnapshotRetentionLimit</code> to 5, a snapshot that was taken today is retained for 5 days before being deleted.</p> <note>
/// <p>If the value of <code>SnapshotRetentionLimit</code> is set to zero (0), backups are turned off.</p>
/// </note>
pub fn set_snapshot_retention_limit(mut self, input: ::std::option::Option<i32>) -> Self {
self.inner = self.inner.set_snapshot_retention_limit(input);
self
}
/// <p>The number of days for which ElastiCache retains automatic cluster snapshots before deleting them. For example, if you set <code>SnapshotRetentionLimit</code> to 5, a snapshot that was taken today is retained for 5 days before being deleted.</p> <note>
/// <p>If the value of <code>SnapshotRetentionLimit</code> is set to zero (0), backups are turned off.</p>
/// </note>
pub fn get_snapshot_retention_limit(&self) -> &::std::option::Option<i32> {
self.inner.get_snapshot_retention_limit()
}
/// <p>The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your cluster. </p>
pub fn snapshot_window(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
self.inner = self.inner.snapshot_window(input.into());
self
}
/// <p>The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your cluster. </p>
pub fn set_snapshot_window(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
self.inner = self.inner.set_snapshot_window(input);
self
}
/// <p>The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your cluster. </p>
pub fn get_snapshot_window(&self) -> &::std::option::Option<::std::string::String> {
self.inner.get_snapshot_window()
}
/// <p>A valid cache node type that you want to scale this cluster up to.</p>
pub fn cache_node_type(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
self.inner = self.inner.cache_node_type(input.into());
self
}
/// <p>A valid cache node type that you want to scale this cluster up to.</p>
pub fn set_cache_node_type(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
self.inner = self.inner.set_cache_node_type(input);
self
}
/// <p>A valid cache node type that you want to scale this cluster up to.</p>
pub fn get_cache_node_type(&self) -> &::std::option::Option<::std::string::String> {
self.inner.get_cache_node_type()
}
/// <p>Reserved parameter. The password used to access a password protected server. This parameter must be specified with the <code>auth-token-update</code> parameter. Password constraints:</p>
/// <ul>
/// <li> <p>Must be only printable ASCII characters</p> </li>
/// <li> <p>Must be at least 16 characters and no more than 128 characters in length</p> </li>
/// <li> <p>Cannot contain any of the following characters: '/', '"', or '@', '%'</p> </li>
/// </ul>
/// <p> For more information, see AUTH password at <a href="http://redis.io/commands/AUTH">AUTH</a>.</p>
pub fn auth_token(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
self.inner = self.inner.auth_token(input.into());
self
}
/// <p>Reserved parameter. The password used to access a password protected server. This parameter must be specified with the <code>auth-token-update</code> parameter. Password constraints:</p>
/// <ul>
/// <li> <p>Must be only printable ASCII characters</p> </li>
/// <li> <p>Must be at least 16 characters and no more than 128 characters in length</p> </li>
/// <li> <p>Cannot contain any of the following characters: '/', '"', or '@', '%'</p> </li>
/// </ul>
/// <p> For more information, see AUTH password at <a href="http://redis.io/commands/AUTH">AUTH</a>.</p>
pub fn set_auth_token(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
self.inner = self.inner.set_auth_token(input);
self
}
/// <p>Reserved parameter. The password used to access a password protected server. This parameter must be specified with the <code>auth-token-update</code> parameter. Password constraints:</p>
/// <ul>
/// <li> <p>Must be only printable ASCII characters</p> </li>
/// <li> <p>Must be at least 16 characters and no more than 128 characters in length</p> </li>
/// <li> <p>Cannot contain any of the following characters: '/', '"', or '@', '%'</p> </li>
/// </ul>
/// <p> For more information, see AUTH password at <a href="http://redis.io/commands/AUTH">AUTH</a>.</p>
pub fn get_auth_token(&self) -> &::std::option::Option<::std::string::String> {
self.inner.get_auth_token()
}
/// <p>Specifies the strategy to use to update the AUTH token. This parameter must be specified with the <code>auth-token</code> parameter. Possible values:</p>
/// <ul>
/// <li> <p>Rotate</p> </li>
/// <li> <p>Set</p> </li>
/// </ul>
/// <p> For more information, see <a href="http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html">Authenticating Users with Redis AUTH</a> </p>
pub fn auth_token_update_strategy(mut self, input: crate::types::AuthTokenUpdateStrategyType) -> Self {
self.inner = self.inner.auth_token_update_strategy(input);
self
}
/// <p>Specifies the strategy to use to update the AUTH token. This parameter must be specified with the <code>auth-token</code> parameter. Possible values:</p>
/// <ul>
/// <li> <p>Rotate</p> </li>
/// <li> <p>Set</p> </li>
/// </ul>
/// <p> For more information, see <a href="http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html">Authenticating Users with Redis AUTH</a> </p>
pub fn set_auth_token_update_strategy(mut self, input: ::std::option::Option<crate::types::AuthTokenUpdateStrategyType>) -> Self {
self.inner = self.inner.set_auth_token_update_strategy(input);
self
}
/// <p>Specifies the strategy to use to update the AUTH token. This parameter must be specified with the <code>auth-token</code> parameter. Possible values:</p>
/// <ul>
/// <li> <p>Rotate</p> </li>
/// <li> <p>Set</p> </li>
/// </ul>
/// <p> For more information, see <a href="http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html">Authenticating Users with Redis AUTH</a> </p>
pub fn get_auth_token_update_strategy(&self) -> &::std::option::Option<crate::types::AuthTokenUpdateStrategyType> {
self.inner.get_auth_token_update_strategy()
}
/// Appends an item to `LogDeliveryConfigurations`.
///
/// To override the contents of this collection use [`set_log_delivery_configurations`](Self::set_log_delivery_configurations).
///
/// <p>Specifies the destination, format and type of the logs.</p>
pub fn log_delivery_configurations(mut self, input: crate::types::LogDeliveryConfigurationRequest) -> Self {
self.inner = self.inner.log_delivery_configurations(input);
self
}
/// <p>Specifies the destination, format and type of the logs.</p>
pub fn set_log_delivery_configurations(
mut self,
input: ::std::option::Option<::std::vec::Vec<crate::types::LogDeliveryConfigurationRequest>>,
) -> Self {
self.inner = self.inner.set_log_delivery_configurations(input);
self
}
/// <p>Specifies the destination, format and type of the logs.</p>
pub fn get_log_delivery_configurations(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::LogDeliveryConfigurationRequest>> {
self.inner.get_log_delivery_configurations()
}
/// <p>The network type you choose when modifying a cluster, either <code>ipv4</code> | <code>ipv6</code>. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the <a href="http://aws.amazon.com/ec2/nitro/">Nitro system</a>.</p>
pub fn ip_discovery(mut self, input: crate::types::IpDiscovery) -> Self {
self.inner = self.inner.ip_discovery(input);
self
}
/// <p>The network type you choose when modifying a cluster, either <code>ipv4</code> | <code>ipv6</code>. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the <a href="http://aws.amazon.com/ec2/nitro/">Nitro system</a>.</p>
pub fn set_ip_discovery(mut self, input: ::std::option::Option<crate::types::IpDiscovery>) -> Self {
self.inner = self.inner.set_ip_discovery(input);
self
}
/// <p>The network type you choose when modifying a cluster, either <code>ipv4</code> | <code>ipv6</code>. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the <a href="http://aws.amazon.com/ec2/nitro/">Nitro system</a>.</p>
pub fn get_ip_discovery(&self) -> &::std::option::Option<crate::types::IpDiscovery> {
self.inner.get_ip_discovery()
}
}