aws-sdk-fsx 0.24.0

AWS SDK for Amazon FSx
Documentation
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.

/// <p>Describes an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Volume {
    /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
    #[doc(hidden)]
    pub creation_time: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>The globally unique ID of the file system, assigned by Amazon FSx.</p>
    #[doc(hidden)]
    pub file_system_id: std::option::Option<std::string::String>,
    /// <p>The lifecycle status of the volume.</p>
    /// <ul>
    /// <li> <p> <code>AVAILABLE</code> - The volume is fully available for use.</p> </li>
    /// <li> <p> <code>CREATED</code> - The volume has been created.</p> </li>
    /// <li> <p> <code>CREATING</code> - Amazon FSx is creating the new volume.</p> </li>
    /// <li> <p> <code>DELETING</code> - Amazon FSx is deleting an existing volume.</p> </li>
    /// <li> <p> <code>FAILED</code> - Amazon FSx was unable to create the volume.</p> </li>
    /// <li> <p> <code>MISCONFIGURED</code> - The volume is in a failed but recoverable state.</p> </li>
    /// <li> <p> <code>PENDING</code> - Amazon FSx hasn't started creating the volume.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub lifecycle: std::option::Option<crate::model::VolumeLifecycle>,
    /// <p>The name of the volume.</p>
    #[doc(hidden)]
    pub name: std::option::Option<std::string::String>,
    /// <p>The configuration of an Amazon FSx for NetApp ONTAP volume.</p>
    #[doc(hidden)]
    pub ontap_configuration: std::option::Option<crate::model::OntapVolumeConfiguration>,
    /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
    #[doc(hidden)]
    pub resource_arn: std::option::Option<std::string::String>,
    /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
    #[doc(hidden)]
    pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
    /// <p>The system-generated, unique ID of the volume.</p>
    #[doc(hidden)]
    pub volume_id: std::option::Option<std::string::String>,
    /// <p>The type of the volume.</p>
    #[doc(hidden)]
    pub volume_type: std::option::Option<crate::model::VolumeType>,
    /// <p>The reason why the volume lifecycle status changed.</p>
    #[doc(hidden)]
    pub lifecycle_transition_reason: std::option::Option<crate::model::LifecycleTransitionReason>,
    /// <p>A list of administrative actions for the volume that are in process or waiting to be processed. Administrative actions describe changes to the volume that you have initiated using the <code>UpdateVolume</code> action.</p>
    #[doc(hidden)]
    pub administrative_actions:
        std::option::Option<std::vec::Vec<crate::model::AdministrativeAction>>,
    /// <p>The configuration of an Amazon FSx for OpenZFS volume.</p>
    #[doc(hidden)]
    pub open_zfs_configuration: std::option::Option<crate::model::OpenZfsVolumeConfiguration>,
}
impl Volume {
    /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
    pub fn creation_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.creation_time.as_ref()
    }
    /// <p>The globally unique ID of the file system, assigned by Amazon FSx.</p>
    pub fn file_system_id(&self) -> std::option::Option<&str> {
        self.file_system_id.as_deref()
    }
    /// <p>The lifecycle status of the volume.</p>
    /// <ul>
    /// <li> <p> <code>AVAILABLE</code> - The volume is fully available for use.</p> </li>
    /// <li> <p> <code>CREATED</code> - The volume has been created.</p> </li>
    /// <li> <p> <code>CREATING</code> - Amazon FSx is creating the new volume.</p> </li>
    /// <li> <p> <code>DELETING</code> - Amazon FSx is deleting an existing volume.</p> </li>
    /// <li> <p> <code>FAILED</code> - Amazon FSx was unable to create the volume.</p> </li>
    /// <li> <p> <code>MISCONFIGURED</code> - The volume is in a failed but recoverable state.</p> </li>
    /// <li> <p> <code>PENDING</code> - Amazon FSx hasn't started creating the volume.</p> </li>
    /// </ul>
    pub fn lifecycle(&self) -> std::option::Option<&crate::model::VolumeLifecycle> {
        self.lifecycle.as_ref()
    }
    /// <p>The name of the volume.</p>
    pub fn name(&self) -> std::option::Option<&str> {
        self.name.as_deref()
    }
    /// <p>The configuration of an Amazon FSx for NetApp ONTAP volume.</p>
    pub fn ontap_configuration(
        &self,
    ) -> std::option::Option<&crate::model::OntapVolumeConfiguration> {
        self.ontap_configuration.as_ref()
    }
    /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
    pub fn resource_arn(&self) -> std::option::Option<&str> {
        self.resource_arn.as_deref()
    }
    /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
    pub fn tags(&self) -> std::option::Option<&[crate::model::Tag]> {
        self.tags.as_deref()
    }
    /// <p>The system-generated, unique ID of the volume.</p>
    pub fn volume_id(&self) -> std::option::Option<&str> {
        self.volume_id.as_deref()
    }
    /// <p>The type of the volume.</p>
    pub fn volume_type(&self) -> std::option::Option<&crate::model::VolumeType> {
        self.volume_type.as_ref()
    }
    /// <p>The reason why the volume lifecycle status changed.</p>
    pub fn lifecycle_transition_reason(
        &self,
    ) -> std::option::Option<&crate::model::LifecycleTransitionReason> {
        self.lifecycle_transition_reason.as_ref()
    }
    /// <p>A list of administrative actions for the volume that are in process or waiting to be processed. Administrative actions describe changes to the volume that you have initiated using the <code>UpdateVolume</code> action.</p>
    pub fn administrative_actions(
        &self,
    ) -> std::option::Option<&[crate::model::AdministrativeAction]> {
        self.administrative_actions.as_deref()
    }
    /// <p>The configuration of an Amazon FSx for OpenZFS volume.</p>
    pub fn open_zfs_configuration(
        &self,
    ) -> std::option::Option<&crate::model::OpenZfsVolumeConfiguration> {
        self.open_zfs_configuration.as_ref()
    }
}
/// See [`Volume`](crate::model::Volume).
pub mod volume {

    /// A builder for [`Volume`](crate::model::Volume).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) creation_time: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) file_system_id: std::option::Option<std::string::String>,
        pub(crate) lifecycle: std::option::Option<crate::model::VolumeLifecycle>,
        pub(crate) name: std::option::Option<std::string::String>,
        pub(crate) ontap_configuration: std::option::Option<crate::model::OntapVolumeConfiguration>,
        pub(crate) resource_arn: std::option::Option<std::string::String>,
        pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        pub(crate) volume_id: std::option::Option<std::string::String>,
        pub(crate) volume_type: std::option::Option<crate::model::VolumeType>,
        pub(crate) lifecycle_transition_reason:
            std::option::Option<crate::model::LifecycleTransitionReason>,
        pub(crate) administrative_actions:
            std::option::Option<std::vec::Vec<crate::model::AdministrativeAction>>,
        pub(crate) open_zfs_configuration:
            std::option::Option<crate::model::OpenZfsVolumeConfiguration>,
    }
    impl Builder {
        /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
        pub fn creation_time(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.creation_time = Some(input);
            self
        }
        /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
        pub fn set_creation_time(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.creation_time = input;
            self
        }
        /// <p>The globally unique ID of the file system, assigned by Amazon FSx.</p>
        pub fn file_system_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.file_system_id = Some(input.into());
            self
        }
        /// <p>The globally unique ID of the file system, assigned by Amazon FSx.</p>
        pub fn set_file_system_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.file_system_id = input;
            self
        }
        /// <p>The lifecycle status of the volume.</p>
        /// <ul>
        /// <li> <p> <code>AVAILABLE</code> - The volume is fully available for use.</p> </li>
        /// <li> <p> <code>CREATED</code> - The volume has been created.</p> </li>
        /// <li> <p> <code>CREATING</code> - Amazon FSx is creating the new volume.</p> </li>
        /// <li> <p> <code>DELETING</code> - Amazon FSx is deleting an existing volume.</p> </li>
        /// <li> <p> <code>FAILED</code> - Amazon FSx was unable to create the volume.</p> </li>
        /// <li> <p> <code>MISCONFIGURED</code> - The volume is in a failed but recoverable state.</p> </li>
        /// <li> <p> <code>PENDING</code> - Amazon FSx hasn't started creating the volume.</p> </li>
        /// </ul>
        pub fn lifecycle(mut self, input: crate::model::VolumeLifecycle) -> Self {
            self.lifecycle = Some(input);
            self
        }
        /// <p>The lifecycle status of the volume.</p>
        /// <ul>
        /// <li> <p> <code>AVAILABLE</code> - The volume is fully available for use.</p> </li>
        /// <li> <p> <code>CREATED</code> - The volume has been created.</p> </li>
        /// <li> <p> <code>CREATING</code> - Amazon FSx is creating the new volume.</p> </li>
        /// <li> <p> <code>DELETING</code> - Amazon FSx is deleting an existing volume.</p> </li>
        /// <li> <p> <code>FAILED</code> - Amazon FSx was unable to create the volume.</p> </li>
        /// <li> <p> <code>MISCONFIGURED</code> - The volume is in a failed but recoverable state.</p> </li>
        /// <li> <p> <code>PENDING</code> - Amazon FSx hasn't started creating the volume.</p> </li>
        /// </ul>
        pub fn set_lifecycle(
            mut self,
            input: std::option::Option<crate::model::VolumeLifecycle>,
        ) -> Self {
            self.lifecycle = input;
            self
        }
        /// <p>The name of the volume.</p>
        pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
            self.name = Some(input.into());
            self
        }
        /// <p>The name of the volume.</p>
        pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.name = input;
            self
        }
        /// <p>The configuration of an Amazon FSx for NetApp ONTAP volume.</p>
        pub fn ontap_configuration(
            mut self,
            input: crate::model::OntapVolumeConfiguration,
        ) -> Self {
            self.ontap_configuration = Some(input);
            self
        }
        /// <p>The configuration of an Amazon FSx for NetApp ONTAP volume.</p>
        pub fn set_ontap_configuration(
            mut self,
            input: std::option::Option<crate::model::OntapVolumeConfiguration>,
        ) -> Self {
            self.ontap_configuration = input;
            self
        }
        /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
        pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
            self.resource_arn = Some(input.into());
            self
        }
        /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
        pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.resource_arn = input;
            self
        }
        /// Appends an item to `tags`.
        ///
        /// To override the contents of this collection use [`set_tags`](Self::set_tags).
        ///
        /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
        pub fn tags(mut self, input: crate::model::Tag) -> Self {
            let mut v = self.tags.unwrap_or_default();
            v.push(input);
            self.tags = Some(v);
            self
        }
        /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
        pub fn set_tags(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        ) -> Self {
            self.tags = input;
            self
        }
        /// <p>The system-generated, unique ID of the volume.</p>
        pub fn volume_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.volume_id = Some(input.into());
            self
        }
        /// <p>The system-generated, unique ID of the volume.</p>
        pub fn set_volume_id(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.volume_id = input;
            self
        }
        /// <p>The type of the volume.</p>
        pub fn volume_type(mut self, input: crate::model::VolumeType) -> Self {
            self.volume_type = Some(input);
            self
        }
        /// <p>The type of the volume.</p>
        pub fn set_volume_type(
            mut self,
            input: std::option::Option<crate::model::VolumeType>,
        ) -> Self {
            self.volume_type = input;
            self
        }
        /// <p>The reason why the volume lifecycle status changed.</p>
        pub fn lifecycle_transition_reason(
            mut self,
            input: crate::model::LifecycleTransitionReason,
        ) -> Self {
            self.lifecycle_transition_reason = Some(input);
            self
        }
        /// <p>The reason why the volume lifecycle status changed.</p>
        pub fn set_lifecycle_transition_reason(
            mut self,
            input: std::option::Option<crate::model::LifecycleTransitionReason>,
        ) -> Self {
            self.lifecycle_transition_reason = input;
            self
        }
        /// Appends an item to `administrative_actions`.
        ///
        /// To override the contents of this collection use [`set_administrative_actions`](Self::set_administrative_actions).
        ///
        /// <p>A list of administrative actions for the volume that are in process or waiting to be processed. Administrative actions describe changes to the volume that you have initiated using the <code>UpdateVolume</code> action.</p>
        pub fn administrative_actions(mut self, input: crate::model::AdministrativeAction) -> Self {
            let mut v = self.administrative_actions.unwrap_or_default();
            v.push(input);
            self.administrative_actions = Some(v);
            self
        }
        /// <p>A list of administrative actions for the volume that are in process or waiting to be processed. Administrative actions describe changes to the volume that you have initiated using the <code>UpdateVolume</code> action.</p>
        pub fn set_administrative_actions(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::AdministrativeAction>>,
        ) -> Self {
            self.administrative_actions = input;
            self
        }
        /// <p>The configuration of an Amazon FSx for OpenZFS volume.</p>
        pub fn open_zfs_configuration(
            mut self,
            input: crate::model::OpenZfsVolumeConfiguration,
        ) -> Self {
            self.open_zfs_configuration = Some(input);
            self
        }
        /// <p>The configuration of an Amazon FSx for OpenZFS volume.</p>
        pub fn set_open_zfs_configuration(
            mut self,
            input: std::option::Option<crate::model::OpenZfsVolumeConfiguration>,
        ) -> Self {
            self.open_zfs_configuration = input;
            self
        }
        /// Consumes the builder and constructs a [`Volume`](crate::model::Volume).
        pub fn build(self) -> crate::model::Volume {
            crate::model::Volume {
                creation_time: self.creation_time,
                file_system_id: self.file_system_id,
                lifecycle: self.lifecycle,
                name: self.name,
                ontap_configuration: self.ontap_configuration,
                resource_arn: self.resource_arn,
                tags: self.tags,
                volume_id: self.volume_id,
                volume_type: self.volume_type,
                lifecycle_transition_reason: self.lifecycle_transition_reason,
                administrative_actions: self.administrative_actions,
                open_zfs_configuration: self.open_zfs_configuration,
            }
        }
    }
}
impl Volume {
    /// Creates a new builder-style object to manufacture [`Volume`](crate::model::Volume).
    pub fn builder() -> crate::model::volume::Builder {
        crate::model::volume::Builder::default()
    }
}

/// <p>The configuration of an Amazon FSx for OpenZFS volume.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct OpenZfsVolumeConfiguration {
    /// <p>The ID of the parent volume.</p>
    #[doc(hidden)]
    pub parent_volume_id: std::option::Option<std::string::String>,
    /// <p>The path to the volume from the root volume. For example, <code>fsx/parentVolume/volume1</code>.</p>
    #[doc(hidden)]
    pub volume_path: std::option::Option<std::string::String>,
    /// <p>The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't reserve more storage than the parent volume has reserved.</p>
    #[doc(hidden)]
    pub storage_capacity_reservation_gi_b: std::option::Option<i32>,
    /// <p>The maximum amount of storage in gibibtyes (GiB) that the volume can use from its parent. You can specify a quota larger than the storage on the parent volume.</p>
    #[doc(hidden)]
    pub storage_capacity_quota_gi_b: std::option::Option<i32>,
    /// <p>The record size of an OpenZFS volume, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the default record size. For guidance on when to set a custom record size, see the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
    #[doc(hidden)]
    pub record_size_ki_b: std::option::Option<i32>,
    /// <p>Specifies the method used to compress the data on the volume. The compression type is <code>NONE</code> by default.</p>
    /// <ul>
    /// <li> <p> <code>NONE</code> - Doesn't compress the data on the volume. <code>NONE</code> is the default.</p> </li>
    /// <li> <p> <code>ZSTD</code> - Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. Compared to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.</p> </li>
    /// <li> <p> <code>LZ4</code> - Compresses the data in the volume using the LZ4 compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive and delivers higher write throughput speeds.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub data_compression_type: std::option::Option<crate::model::OpenZfsDataCompressionType>,
    /// <p>A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is <code>true</code> and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value.</p>
    #[doc(hidden)]
    pub copy_tags_to_snapshots: std::option::Option<bool>,
    /// <p>The configuration object that specifies the snapshot to use as the origin of the data for the volume.</p>
    #[doc(hidden)]
    pub origin_snapshot: std::option::Option<crate::model::OpenZfsOriginSnapshotConfiguration>,
    /// <p>A Boolean value indicating whether the volume is read-only.</p>
    #[doc(hidden)]
    pub read_only: std::option::Option<bool>,
    /// <p>The configuration object for mounting a Network File System (NFS) file system.</p>
    #[doc(hidden)]
    pub nfs_exports: std::option::Option<std::vec::Vec<crate::model::OpenZfsNfsExport>>,
    /// <p>An object specifying how much storage users or groups can use on the volume.</p>
    #[doc(hidden)]
    pub user_and_group_quotas:
        std::option::Option<std::vec::Vec<crate::model::OpenZfsUserOrGroupQuota>>,
    /// <p>Specifies the ID of the snapshot to which the volume was restored.</p>
    #[doc(hidden)]
    pub restore_to_snapshot: std::option::Option<std::string::String>,
    /// <p>A Boolean value indicating whether snapshots between the current state and the specified snapshot should be deleted when a volume is restored from snapshot.</p>
    #[doc(hidden)]
    pub delete_intermediate_snaphots: std::option::Option<bool>,
    /// <p>A Boolean value indicating whether dependent clone volumes created from intermediate snapshots should be deleted when a volume is restored from snapshot.</p>
    #[doc(hidden)]
    pub delete_cloned_volumes: std::option::Option<bool>,
}
impl OpenZfsVolumeConfiguration {
    /// <p>The ID of the parent volume.</p>
    pub fn parent_volume_id(&self) -> std::option::Option<&str> {
        self.parent_volume_id.as_deref()
    }
    /// <p>The path to the volume from the root volume. For example, <code>fsx/parentVolume/volume1</code>.</p>
    pub fn volume_path(&self) -> std::option::Option<&str> {
        self.volume_path.as_deref()
    }
    /// <p>The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't reserve more storage than the parent volume has reserved.</p>
    pub fn storage_capacity_reservation_gi_b(&self) -> std::option::Option<i32> {
        self.storage_capacity_reservation_gi_b
    }
    /// <p>The maximum amount of storage in gibibtyes (GiB) that the volume can use from its parent. You can specify a quota larger than the storage on the parent volume.</p>
    pub fn storage_capacity_quota_gi_b(&self) -> std::option::Option<i32> {
        self.storage_capacity_quota_gi_b
    }
    /// <p>The record size of an OpenZFS volume, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the default record size. For guidance on when to set a custom record size, see the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
    pub fn record_size_ki_b(&self) -> std::option::Option<i32> {
        self.record_size_ki_b
    }
    /// <p>Specifies the method used to compress the data on the volume. The compression type is <code>NONE</code> by default.</p>
    /// <ul>
    /// <li> <p> <code>NONE</code> - Doesn't compress the data on the volume. <code>NONE</code> is the default.</p> </li>
    /// <li> <p> <code>ZSTD</code> - Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. Compared to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.</p> </li>
    /// <li> <p> <code>LZ4</code> - Compresses the data in the volume using the LZ4 compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive and delivers higher write throughput speeds.</p> </li>
    /// </ul>
    pub fn data_compression_type(
        &self,
    ) -> std::option::Option<&crate::model::OpenZfsDataCompressionType> {
        self.data_compression_type.as_ref()
    }
    /// <p>A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is <code>true</code> and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value.</p>
    pub fn copy_tags_to_snapshots(&self) -> std::option::Option<bool> {
        self.copy_tags_to_snapshots
    }
    /// <p>The configuration object that specifies the snapshot to use as the origin of the data for the volume.</p>
    pub fn origin_snapshot(
        &self,
    ) -> std::option::Option<&crate::model::OpenZfsOriginSnapshotConfiguration> {
        self.origin_snapshot.as_ref()
    }
    /// <p>A Boolean value indicating whether the volume is read-only.</p>
    pub fn read_only(&self) -> std::option::Option<bool> {
        self.read_only
    }
    /// <p>The configuration object for mounting a Network File System (NFS) file system.</p>
    pub fn nfs_exports(&self) -> std::option::Option<&[crate::model::OpenZfsNfsExport]> {
        self.nfs_exports.as_deref()
    }
    /// <p>An object specifying how much storage users or groups can use on the volume.</p>
    pub fn user_and_group_quotas(
        &self,
    ) -> std::option::Option<&[crate::model::OpenZfsUserOrGroupQuota]> {
        self.user_and_group_quotas.as_deref()
    }
    /// <p>Specifies the ID of the snapshot to which the volume was restored.</p>
    pub fn restore_to_snapshot(&self) -> std::option::Option<&str> {
        self.restore_to_snapshot.as_deref()
    }
    /// <p>A Boolean value indicating whether snapshots between the current state and the specified snapshot should be deleted when a volume is restored from snapshot.</p>
    pub fn delete_intermediate_snaphots(&self) -> std::option::Option<bool> {
        self.delete_intermediate_snaphots
    }
    /// <p>A Boolean value indicating whether dependent clone volumes created from intermediate snapshots should be deleted when a volume is restored from snapshot.</p>
    pub fn delete_cloned_volumes(&self) -> std::option::Option<bool> {
        self.delete_cloned_volumes
    }
}
/// See [`OpenZfsVolumeConfiguration`](crate::model::OpenZfsVolumeConfiguration).
pub mod open_zfs_volume_configuration {

    /// A builder for [`OpenZfsVolumeConfiguration`](crate::model::OpenZfsVolumeConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) parent_volume_id: std::option::Option<std::string::String>,
        pub(crate) volume_path: std::option::Option<std::string::String>,
        pub(crate) storage_capacity_reservation_gi_b: std::option::Option<i32>,
        pub(crate) storage_capacity_quota_gi_b: std::option::Option<i32>,
        pub(crate) record_size_ki_b: std::option::Option<i32>,
        pub(crate) data_compression_type:
            std::option::Option<crate::model::OpenZfsDataCompressionType>,
        pub(crate) copy_tags_to_snapshots: std::option::Option<bool>,
        pub(crate) origin_snapshot:
            std::option::Option<crate::model::OpenZfsOriginSnapshotConfiguration>,
        pub(crate) read_only: std::option::Option<bool>,
        pub(crate) nfs_exports: std::option::Option<std::vec::Vec<crate::model::OpenZfsNfsExport>>,
        pub(crate) user_and_group_quotas:
            std::option::Option<std::vec::Vec<crate::model::OpenZfsUserOrGroupQuota>>,
        pub(crate) restore_to_snapshot: std::option::Option<std::string::String>,
        pub(crate) delete_intermediate_snaphots: std::option::Option<bool>,
        pub(crate) delete_cloned_volumes: std::option::Option<bool>,
    }
    impl Builder {
        /// <p>The ID of the parent volume.</p>
        pub fn parent_volume_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.parent_volume_id = Some(input.into());
            self
        }
        /// <p>The ID of the parent volume.</p>
        pub fn set_parent_volume_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.parent_volume_id = input;
            self
        }
        /// <p>The path to the volume from the root volume. For example, <code>fsx/parentVolume/volume1</code>.</p>
        pub fn volume_path(mut self, input: impl Into<std::string::String>) -> Self {
            self.volume_path = Some(input.into());
            self
        }
        /// <p>The path to the volume from the root volume. For example, <code>fsx/parentVolume/volume1</code>.</p>
        pub fn set_volume_path(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.volume_path = input;
            self
        }
        /// <p>The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't reserve more storage than the parent volume has reserved.</p>
        pub fn storage_capacity_reservation_gi_b(mut self, input: i32) -> Self {
            self.storage_capacity_reservation_gi_b = Some(input);
            self
        }
        /// <p>The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't reserve more storage than the parent volume has reserved.</p>
        pub fn set_storage_capacity_reservation_gi_b(
            mut self,
            input: std::option::Option<i32>,
        ) -> Self {
            self.storage_capacity_reservation_gi_b = input;
            self
        }
        /// <p>The maximum amount of storage in gibibtyes (GiB) that the volume can use from its parent. You can specify a quota larger than the storage on the parent volume.</p>
        pub fn storage_capacity_quota_gi_b(mut self, input: i32) -> Self {
            self.storage_capacity_quota_gi_b = Some(input);
            self
        }
        /// <p>The maximum amount of storage in gibibtyes (GiB) that the volume can use from its parent. You can specify a quota larger than the storage on the parent volume.</p>
        pub fn set_storage_capacity_quota_gi_b(mut self, input: std::option::Option<i32>) -> Self {
            self.storage_capacity_quota_gi_b = input;
            self
        }
        /// <p>The record size of an OpenZFS volume, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the default record size. For guidance on when to set a custom record size, see the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
        pub fn record_size_ki_b(mut self, input: i32) -> Self {
            self.record_size_ki_b = Some(input);
            self
        }
        /// <p>The record size of an OpenZFS volume, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the default record size. For guidance on when to set a custom record size, see the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
        pub fn set_record_size_ki_b(mut self, input: std::option::Option<i32>) -> Self {
            self.record_size_ki_b = input;
            self
        }
        /// <p>Specifies the method used to compress the data on the volume. The compression type is <code>NONE</code> by default.</p>
        /// <ul>
        /// <li> <p> <code>NONE</code> - Doesn't compress the data on the volume. <code>NONE</code> is the default.</p> </li>
        /// <li> <p> <code>ZSTD</code> - Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. Compared to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.</p> </li>
        /// <li> <p> <code>LZ4</code> - Compresses the data in the volume using the LZ4 compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive and delivers higher write throughput speeds.</p> </li>
        /// </ul>
        pub fn data_compression_type(
            mut self,
            input: crate::model::OpenZfsDataCompressionType,
        ) -> Self {
            self.data_compression_type = Some(input);
            self
        }
        /// <p>Specifies the method used to compress the data on the volume. The compression type is <code>NONE</code> by default.</p>
        /// <ul>
        /// <li> <p> <code>NONE</code> - Doesn't compress the data on the volume. <code>NONE</code> is the default.</p> </li>
        /// <li> <p> <code>ZSTD</code> - Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. Compared to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.</p> </li>
        /// <li> <p> <code>LZ4</code> - Compresses the data in the volume using the LZ4 compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive and delivers higher write throughput speeds.</p> </li>
        /// </ul>
        pub fn set_data_compression_type(
            mut self,
            input: std::option::Option<crate::model::OpenZfsDataCompressionType>,
        ) -> Self {
            self.data_compression_type = input;
            self
        }
        /// <p>A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is <code>true</code> and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value.</p>
        pub fn copy_tags_to_snapshots(mut self, input: bool) -> Self {
            self.copy_tags_to_snapshots = Some(input);
            self
        }
        /// <p>A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is <code>true</code> and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value.</p>
        pub fn set_copy_tags_to_snapshots(mut self, input: std::option::Option<bool>) -> Self {
            self.copy_tags_to_snapshots = input;
            self
        }
        /// <p>The configuration object that specifies the snapshot to use as the origin of the data for the volume.</p>
        pub fn origin_snapshot(
            mut self,
            input: crate::model::OpenZfsOriginSnapshotConfiguration,
        ) -> Self {
            self.origin_snapshot = Some(input);
            self
        }
        /// <p>The configuration object that specifies the snapshot to use as the origin of the data for the volume.</p>
        pub fn set_origin_snapshot(
            mut self,
            input: std::option::Option<crate::model::OpenZfsOriginSnapshotConfiguration>,
        ) -> Self {
            self.origin_snapshot = input;
            self
        }
        /// <p>A Boolean value indicating whether the volume is read-only.</p>
        pub fn read_only(mut self, input: bool) -> Self {
            self.read_only = Some(input);
            self
        }
        /// <p>A Boolean value indicating whether the volume is read-only.</p>
        pub fn set_read_only(mut self, input: std::option::Option<bool>) -> Self {
            self.read_only = input;
            self
        }
        /// Appends an item to `nfs_exports`.
        ///
        /// To override the contents of this collection use [`set_nfs_exports`](Self::set_nfs_exports).
        ///
        /// <p>The configuration object for mounting a Network File System (NFS) file system.</p>
        pub fn nfs_exports(mut self, input: crate::model::OpenZfsNfsExport) -> Self {
            let mut v = self.nfs_exports.unwrap_or_default();
            v.push(input);
            self.nfs_exports = Some(v);
            self
        }
        /// <p>The configuration object for mounting a Network File System (NFS) file system.</p>
        pub fn set_nfs_exports(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::OpenZfsNfsExport>>,
        ) -> Self {
            self.nfs_exports = input;
            self
        }
        /// Appends an item to `user_and_group_quotas`.
        ///
        /// To override the contents of this collection use [`set_user_and_group_quotas`](Self::set_user_and_group_quotas).
        ///
        /// <p>An object specifying how much storage users or groups can use on the volume.</p>
        pub fn user_and_group_quotas(
            mut self,
            input: crate::model::OpenZfsUserOrGroupQuota,
        ) -> Self {
            let mut v = self.user_and_group_quotas.unwrap_or_default();
            v.push(input);
            self.user_and_group_quotas = Some(v);
            self
        }
        /// <p>An object specifying how much storage users or groups can use on the volume.</p>
        pub fn set_user_and_group_quotas(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::OpenZfsUserOrGroupQuota>>,
        ) -> Self {
            self.user_and_group_quotas = input;
            self
        }
        /// <p>Specifies the ID of the snapshot to which the volume was restored.</p>
        pub fn restore_to_snapshot(mut self, input: impl Into<std::string::String>) -> Self {
            self.restore_to_snapshot = Some(input.into());
            self
        }
        /// <p>Specifies the ID of the snapshot to which the volume was restored.</p>
        pub fn set_restore_to_snapshot(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.restore_to_snapshot = input;
            self
        }
        /// <p>A Boolean value indicating whether snapshots between the current state and the specified snapshot should be deleted when a volume is restored from snapshot.</p>
        pub fn delete_intermediate_snaphots(mut self, input: bool) -> Self {
            self.delete_intermediate_snaphots = Some(input);
            self
        }
        /// <p>A Boolean value indicating whether snapshots between the current state and the specified snapshot should be deleted when a volume is restored from snapshot.</p>
        pub fn set_delete_intermediate_snaphots(
            mut self,
            input: std::option::Option<bool>,
        ) -> Self {
            self.delete_intermediate_snaphots = input;
            self
        }
        /// <p>A Boolean value indicating whether dependent clone volumes created from intermediate snapshots should be deleted when a volume is restored from snapshot.</p>
        pub fn delete_cloned_volumes(mut self, input: bool) -> Self {
            self.delete_cloned_volumes = Some(input);
            self
        }
        /// <p>A Boolean value indicating whether dependent clone volumes created from intermediate snapshots should be deleted when a volume is restored from snapshot.</p>
        pub fn set_delete_cloned_volumes(mut self, input: std::option::Option<bool>) -> Self {
            self.delete_cloned_volumes = input;
            self
        }
        /// Consumes the builder and constructs a [`OpenZfsVolumeConfiguration`](crate::model::OpenZfsVolumeConfiguration).
        pub fn build(self) -> crate::model::OpenZfsVolumeConfiguration {
            crate::model::OpenZfsVolumeConfiguration {
                parent_volume_id: self.parent_volume_id,
                volume_path: self.volume_path,
                storage_capacity_reservation_gi_b: self.storage_capacity_reservation_gi_b,
                storage_capacity_quota_gi_b: self.storage_capacity_quota_gi_b,
                record_size_ki_b: self.record_size_ki_b,
                data_compression_type: self.data_compression_type,
                copy_tags_to_snapshots: self.copy_tags_to_snapshots,
                origin_snapshot: self.origin_snapshot,
                read_only: self.read_only,
                nfs_exports: self.nfs_exports,
                user_and_group_quotas: self.user_and_group_quotas,
                restore_to_snapshot: self.restore_to_snapshot,
                delete_intermediate_snaphots: self.delete_intermediate_snaphots,
                delete_cloned_volumes: self.delete_cloned_volumes,
            }
        }
    }
}
impl OpenZfsVolumeConfiguration {
    /// Creates a new builder-style object to manufacture [`OpenZfsVolumeConfiguration`](crate::model::OpenZfsVolumeConfiguration).
    pub fn builder() -> crate::model::open_zfs_volume_configuration::Builder {
        crate::model::open_zfs_volume_configuration::Builder::default()
    }
}

/// <p>The configuration for how much storage a user or group can use on the volume. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct OpenZfsUserOrGroupQuota {
    /// <p>A value that specifies whether the quota applies to a user or group.</p>
    #[doc(hidden)]
    pub r#type: std::option::Option<crate::model::OpenZfsQuotaType>,
    /// <p>The ID of the user or group.</p>
    #[doc(hidden)]
    pub id: std::option::Option<i32>,
    /// <p>The amount of storage that the user or group can use in gibibytes (GiB).</p>
    #[doc(hidden)]
    pub storage_capacity_quota_gi_b: std::option::Option<i32>,
}
impl OpenZfsUserOrGroupQuota {
    /// <p>A value that specifies whether the quota applies to a user or group.</p>
    pub fn r#type(&self) -> std::option::Option<&crate::model::OpenZfsQuotaType> {
        self.r#type.as_ref()
    }
    /// <p>The ID of the user or group.</p>
    pub fn id(&self) -> std::option::Option<i32> {
        self.id
    }
    /// <p>The amount of storage that the user or group can use in gibibytes (GiB).</p>
    pub fn storage_capacity_quota_gi_b(&self) -> std::option::Option<i32> {
        self.storage_capacity_quota_gi_b
    }
}
/// See [`OpenZfsUserOrGroupQuota`](crate::model::OpenZfsUserOrGroupQuota).
pub mod open_zfs_user_or_group_quota {

    /// A builder for [`OpenZfsUserOrGroupQuota`](crate::model::OpenZfsUserOrGroupQuota).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) r#type: std::option::Option<crate::model::OpenZfsQuotaType>,
        pub(crate) id: std::option::Option<i32>,
        pub(crate) storage_capacity_quota_gi_b: std::option::Option<i32>,
    }
    impl Builder {
        /// <p>A value that specifies whether the quota applies to a user or group.</p>
        pub fn r#type(mut self, input: crate::model::OpenZfsQuotaType) -> Self {
            self.r#type = Some(input);
            self
        }
        /// <p>A value that specifies whether the quota applies to a user or group.</p>
        pub fn set_type(
            mut self,
            input: std::option::Option<crate::model::OpenZfsQuotaType>,
        ) -> Self {
            self.r#type = input;
            self
        }
        /// <p>The ID of the user or group.</p>
        pub fn id(mut self, input: i32) -> Self {
            self.id = Some(input);
            self
        }
        /// <p>The ID of the user or group.</p>
        pub fn set_id(mut self, input: std::option::Option<i32>) -> Self {
            self.id = input;
            self
        }
        /// <p>The amount of storage that the user or group can use in gibibytes (GiB).</p>
        pub fn storage_capacity_quota_gi_b(mut self, input: i32) -> Self {
            self.storage_capacity_quota_gi_b = Some(input);
            self
        }
        /// <p>The amount of storage that the user or group can use in gibibytes (GiB).</p>
        pub fn set_storage_capacity_quota_gi_b(mut self, input: std::option::Option<i32>) -> Self {
            self.storage_capacity_quota_gi_b = input;
            self
        }
        /// Consumes the builder and constructs a [`OpenZfsUserOrGroupQuota`](crate::model::OpenZfsUserOrGroupQuota).
        pub fn build(self) -> crate::model::OpenZfsUserOrGroupQuota {
            crate::model::OpenZfsUserOrGroupQuota {
                r#type: self.r#type,
                id: self.id,
                storage_capacity_quota_gi_b: self.storage_capacity_quota_gi_b,
            }
        }
    }
}
impl OpenZfsUserOrGroupQuota {
    /// Creates a new builder-style object to manufacture [`OpenZfsUserOrGroupQuota`](crate::model::OpenZfsUserOrGroupQuota).
    pub fn builder() -> crate::model::open_zfs_user_or_group_quota::Builder {
        crate::model::open_zfs_user_or_group_quota::Builder::default()
    }
}

/// When writing a match expression against `OpenZfsQuotaType`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let openzfsquotatype = unimplemented!();
/// match openzfsquotatype {
///     OpenZfsQuotaType::Group => { /* ... */ },
///     OpenZfsQuotaType::User => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `openzfsquotatype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `OpenZfsQuotaType::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `OpenZfsQuotaType::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `OpenZfsQuotaType::NewFeature` is defined.
/// Specifically, when `openzfsquotatype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `OpenZfsQuotaType::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum OpenZfsQuotaType {
    #[allow(missing_docs)] // documentation missing in model
    Group,
    #[allow(missing_docs)] // documentation missing in model
    User,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for OpenZfsQuotaType {
    fn from(s: &str) -> Self {
        match s {
            "GROUP" => OpenZfsQuotaType::Group,
            "USER" => OpenZfsQuotaType::User,
            other => OpenZfsQuotaType::Unknown(crate::types::UnknownVariantValue(other.to_owned())),
        }
    }
}
impl std::str::FromStr for OpenZfsQuotaType {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(OpenZfsQuotaType::from(s))
    }
}
impl OpenZfsQuotaType {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            OpenZfsQuotaType::Group => "GROUP",
            OpenZfsQuotaType::User => "USER",
            OpenZfsQuotaType::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["GROUP", "USER"]
    }
}
impl AsRef<str> for OpenZfsQuotaType {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>The Network File System (NFS) configurations for mounting an Amazon FSx for OpenZFS file system. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct OpenZfsNfsExport {
    /// <p>A list of configuration objects that contain the client and options for mounting the OpenZFS file system. </p>
    #[doc(hidden)]
    pub client_configurations:
        std::option::Option<std::vec::Vec<crate::model::OpenZfsClientConfiguration>>,
}
impl OpenZfsNfsExport {
    /// <p>A list of configuration objects that contain the client and options for mounting the OpenZFS file system. </p>
    pub fn client_configurations(
        &self,
    ) -> std::option::Option<&[crate::model::OpenZfsClientConfiguration]> {
        self.client_configurations.as_deref()
    }
}
/// See [`OpenZfsNfsExport`](crate::model::OpenZfsNfsExport).
pub mod open_zfs_nfs_export {

    /// A builder for [`OpenZfsNfsExport`](crate::model::OpenZfsNfsExport).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) client_configurations:
            std::option::Option<std::vec::Vec<crate::model::OpenZfsClientConfiguration>>,
    }
    impl Builder {
        /// Appends an item to `client_configurations`.
        ///
        /// To override the contents of this collection use [`set_client_configurations`](Self::set_client_configurations).
        ///
        /// <p>A list of configuration objects that contain the client and options for mounting the OpenZFS file system. </p>
        pub fn client_configurations(
            mut self,
            input: crate::model::OpenZfsClientConfiguration,
        ) -> Self {
            let mut v = self.client_configurations.unwrap_or_default();
            v.push(input);
            self.client_configurations = Some(v);
            self
        }
        /// <p>A list of configuration objects that contain the client and options for mounting the OpenZFS file system. </p>
        pub fn set_client_configurations(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::OpenZfsClientConfiguration>>,
        ) -> Self {
            self.client_configurations = input;
            self
        }
        /// Consumes the builder and constructs a [`OpenZfsNfsExport`](crate::model::OpenZfsNfsExport).
        pub fn build(self) -> crate::model::OpenZfsNfsExport {
            crate::model::OpenZfsNfsExport {
                client_configurations: self.client_configurations,
            }
        }
    }
}
impl OpenZfsNfsExport {
    /// Creates a new builder-style object to manufacture [`OpenZfsNfsExport`](crate::model::OpenZfsNfsExport).
    pub fn builder() -> crate::model::open_zfs_nfs_export::Builder {
        crate::model::open_zfs_nfs_export::Builder::default()
    }
}

/// <p>Specifies who can mount an OpenZFS file system and the options available while mounting the file system.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct OpenZfsClientConfiguration {
    /// <p>A value that specifies who can mount the file system. You can provide a wildcard character (<code>*</code>), an IP address (<code>0.0.0.0</code>), or a CIDR address (<code>192.0.2.0/24</code>). By default, Amazon FSx uses the wildcard character when specifying the client. </p>
    #[doc(hidden)]
    pub clients: std::option::Option<std::string::String>,
    /// <p>The options to use when mounting the file system. For a list of options that you can use with Network File System (NFS), see the <a href="https://linux.die.net/man/5/exports">exports(5) - Linux man page</a>. When choosing your options, consider the following:</p>
    /// <ul>
    /// <li> <p> <code>crossmnt</code> is used by default. If you don't specify <code>crossmnt</code> when changing the client configuration, you won't be able to see or access snapshots in your file system's snapshot directory.</p> </li>
    /// <li> <p> <code>sync</code> is used by default. If you instead specify <code>async</code>, the system acknowledges writes before writing to disk. If the system crashes before the writes are finished, you lose the unwritten data. </p> </li>
    /// </ul>
    #[doc(hidden)]
    pub options: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl OpenZfsClientConfiguration {
    /// <p>A value that specifies who can mount the file system. You can provide a wildcard character (<code>*</code>), an IP address (<code>0.0.0.0</code>), or a CIDR address (<code>192.0.2.0/24</code>). By default, Amazon FSx uses the wildcard character when specifying the client. </p>
    pub fn clients(&self) -> std::option::Option<&str> {
        self.clients.as_deref()
    }
    /// <p>The options to use when mounting the file system. For a list of options that you can use with Network File System (NFS), see the <a href="https://linux.die.net/man/5/exports">exports(5) - Linux man page</a>. When choosing your options, consider the following:</p>
    /// <ul>
    /// <li> <p> <code>crossmnt</code> is used by default. If you don't specify <code>crossmnt</code> when changing the client configuration, you won't be able to see or access snapshots in your file system's snapshot directory.</p> </li>
    /// <li> <p> <code>sync</code> is used by default. If you instead specify <code>async</code>, the system acknowledges writes before writing to disk. If the system crashes before the writes are finished, you lose the unwritten data. </p> </li>
    /// </ul>
    pub fn options(&self) -> std::option::Option<&[std::string::String]> {
        self.options.as_deref()
    }
}
/// See [`OpenZfsClientConfiguration`](crate::model::OpenZfsClientConfiguration).
pub mod open_zfs_client_configuration {

    /// A builder for [`OpenZfsClientConfiguration`](crate::model::OpenZfsClientConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) clients: std::option::Option<std::string::String>,
        pub(crate) options: std::option::Option<std::vec::Vec<std::string::String>>,
    }
    impl Builder {
        /// <p>A value that specifies who can mount the file system. You can provide a wildcard character (<code>*</code>), an IP address (<code>0.0.0.0</code>), or a CIDR address (<code>192.0.2.0/24</code>). By default, Amazon FSx uses the wildcard character when specifying the client. </p>
        pub fn clients(mut self, input: impl Into<std::string::String>) -> Self {
            self.clients = Some(input.into());
            self
        }
        /// <p>A value that specifies who can mount the file system. You can provide a wildcard character (<code>*</code>), an IP address (<code>0.0.0.0</code>), or a CIDR address (<code>192.0.2.0/24</code>). By default, Amazon FSx uses the wildcard character when specifying the client. </p>
        pub fn set_clients(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.clients = input;
            self
        }
        /// Appends an item to `options`.
        ///
        /// To override the contents of this collection use [`set_options`](Self::set_options).
        ///
        /// <p>The options to use when mounting the file system. For a list of options that you can use with Network File System (NFS), see the <a href="https://linux.die.net/man/5/exports">exports(5) - Linux man page</a>. When choosing your options, consider the following:</p>
        /// <ul>
        /// <li> <p> <code>crossmnt</code> is used by default. If you don't specify <code>crossmnt</code> when changing the client configuration, you won't be able to see or access snapshots in your file system's snapshot directory.</p> </li>
        /// <li> <p> <code>sync</code> is used by default. If you instead specify <code>async</code>, the system acknowledges writes before writing to disk. If the system crashes before the writes are finished, you lose the unwritten data. </p> </li>
        /// </ul>
        pub fn options(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.options.unwrap_or_default();
            v.push(input.into());
            self.options = Some(v);
            self
        }
        /// <p>The options to use when mounting the file system. For a list of options that you can use with Network File System (NFS), see the <a href="https://linux.die.net/man/5/exports">exports(5) - Linux man page</a>. When choosing your options, consider the following:</p>
        /// <ul>
        /// <li> <p> <code>crossmnt</code> is used by default. If you don't specify <code>crossmnt</code> when changing the client configuration, you won't be able to see or access snapshots in your file system's snapshot directory.</p> </li>
        /// <li> <p> <code>sync</code> is used by default. If you instead specify <code>async</code>, the system acknowledges writes before writing to disk. If the system crashes before the writes are finished, you lose the unwritten data. </p> </li>
        /// </ul>
        pub fn set_options(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.options = input;
            self
        }
        /// Consumes the builder and constructs a [`OpenZfsClientConfiguration`](crate::model::OpenZfsClientConfiguration).
        pub fn build(self) -> crate::model::OpenZfsClientConfiguration {
            crate::model::OpenZfsClientConfiguration {
                clients: self.clients,
                options: self.options,
            }
        }
    }
}
impl OpenZfsClientConfiguration {
    /// Creates a new builder-style object to manufacture [`OpenZfsClientConfiguration`](crate::model::OpenZfsClientConfiguration).
    pub fn builder() -> crate::model::open_zfs_client_configuration::Builder {
        crate::model::open_zfs_client_configuration::Builder::default()
    }
}

/// <p>The snapshot configuration to use when creating an OpenZFS volume from a snapshot.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct OpenZfsOriginSnapshotConfiguration {
    /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
    #[doc(hidden)]
    pub snapshot_arn: std::option::Option<std::string::String>,
    /// <p>The strategy used when copying data from the snapshot to the new volume. </p>
    /// <ul>
    /// <li> <p> <code>CLONE</code> - The new volume references the data in the origin snapshot. Cloning a snapshot is faster than copying the data from a snapshot to a new volume and doesn't consume disk throughput. However, the origin snapshot can't be deleted if there is a volume using its copied data. </p> </li>
    /// <li> <p> <code>FULL_COPY</code> - Copies all data from the snapshot to the new volume. </p> </li>
    /// </ul>
    #[doc(hidden)]
    pub copy_strategy: std::option::Option<crate::model::OpenZfsCopyStrategy>,
}
impl OpenZfsOriginSnapshotConfiguration {
    /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
    pub fn snapshot_arn(&self) -> std::option::Option<&str> {
        self.snapshot_arn.as_deref()
    }
    /// <p>The strategy used when copying data from the snapshot to the new volume. </p>
    /// <ul>
    /// <li> <p> <code>CLONE</code> - The new volume references the data in the origin snapshot. Cloning a snapshot is faster than copying the data from a snapshot to a new volume and doesn't consume disk throughput. However, the origin snapshot can't be deleted if there is a volume using its copied data. </p> </li>
    /// <li> <p> <code>FULL_COPY</code> - Copies all data from the snapshot to the new volume. </p> </li>
    /// </ul>
    pub fn copy_strategy(&self) -> std::option::Option<&crate::model::OpenZfsCopyStrategy> {
        self.copy_strategy.as_ref()
    }
}
/// See [`OpenZfsOriginSnapshotConfiguration`](crate::model::OpenZfsOriginSnapshotConfiguration).
pub mod open_zfs_origin_snapshot_configuration {

    /// A builder for [`OpenZfsOriginSnapshotConfiguration`](crate::model::OpenZfsOriginSnapshotConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) snapshot_arn: std::option::Option<std::string::String>,
        pub(crate) copy_strategy: std::option::Option<crate::model::OpenZfsCopyStrategy>,
    }
    impl Builder {
        /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
        pub fn snapshot_arn(mut self, input: impl Into<std::string::String>) -> Self {
            self.snapshot_arn = Some(input.into());
            self
        }
        /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
        pub fn set_snapshot_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.snapshot_arn = input;
            self
        }
        /// <p>The strategy used when copying data from the snapshot to the new volume. </p>
        /// <ul>
        /// <li> <p> <code>CLONE</code> - The new volume references the data in the origin snapshot. Cloning a snapshot is faster than copying the data from a snapshot to a new volume and doesn't consume disk throughput. However, the origin snapshot can't be deleted if there is a volume using its copied data. </p> </li>
        /// <li> <p> <code>FULL_COPY</code> - Copies all data from the snapshot to the new volume. </p> </li>
        /// </ul>
        pub fn copy_strategy(mut self, input: crate::model::OpenZfsCopyStrategy) -> Self {
            self.copy_strategy = Some(input);
            self
        }
        /// <p>The strategy used when copying data from the snapshot to the new volume. </p>
        /// <ul>
        /// <li> <p> <code>CLONE</code> - The new volume references the data in the origin snapshot. Cloning a snapshot is faster than copying the data from a snapshot to a new volume and doesn't consume disk throughput. However, the origin snapshot can't be deleted if there is a volume using its copied data. </p> </li>
        /// <li> <p> <code>FULL_COPY</code> - Copies all data from the snapshot to the new volume. </p> </li>
        /// </ul>
        pub fn set_copy_strategy(
            mut self,
            input: std::option::Option<crate::model::OpenZfsCopyStrategy>,
        ) -> Self {
            self.copy_strategy = input;
            self
        }
        /// Consumes the builder and constructs a [`OpenZfsOriginSnapshotConfiguration`](crate::model::OpenZfsOriginSnapshotConfiguration).
        pub fn build(self) -> crate::model::OpenZfsOriginSnapshotConfiguration {
            crate::model::OpenZfsOriginSnapshotConfiguration {
                snapshot_arn: self.snapshot_arn,
                copy_strategy: self.copy_strategy,
            }
        }
    }
}
impl OpenZfsOriginSnapshotConfiguration {
    /// Creates a new builder-style object to manufacture [`OpenZfsOriginSnapshotConfiguration`](crate::model::OpenZfsOriginSnapshotConfiguration).
    pub fn builder() -> crate::model::open_zfs_origin_snapshot_configuration::Builder {
        crate::model::open_zfs_origin_snapshot_configuration::Builder::default()
    }
}

/// When writing a match expression against `OpenZfsCopyStrategy`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let openzfscopystrategy = unimplemented!();
/// match openzfscopystrategy {
///     OpenZfsCopyStrategy::Clone => { /* ... */ },
///     OpenZfsCopyStrategy::FullCopy => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `openzfscopystrategy` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `OpenZfsCopyStrategy::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `OpenZfsCopyStrategy::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `OpenZfsCopyStrategy::NewFeature` is defined.
/// Specifically, when `openzfscopystrategy` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `OpenZfsCopyStrategy::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum OpenZfsCopyStrategy {
    #[allow(missing_docs)] // documentation missing in model
    Clone,
    #[allow(missing_docs)] // documentation missing in model
    FullCopy,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for OpenZfsCopyStrategy {
    fn from(s: &str) -> Self {
        match s {
            "CLONE" => OpenZfsCopyStrategy::Clone,
            "FULL_COPY" => OpenZfsCopyStrategy::FullCopy,
            other => {
                OpenZfsCopyStrategy::Unknown(crate::types::UnknownVariantValue(other.to_owned()))
            }
        }
    }
}
impl std::str::FromStr for OpenZfsCopyStrategy {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(OpenZfsCopyStrategy::from(s))
    }
}
impl OpenZfsCopyStrategy {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            OpenZfsCopyStrategy::Clone => "CLONE",
            OpenZfsCopyStrategy::FullCopy => "FULL_COPY",
            OpenZfsCopyStrategy::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["CLONE", "FULL_COPY"]
    }
}
impl AsRef<str> for OpenZfsCopyStrategy {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// When writing a match expression against `OpenZfsDataCompressionType`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let openzfsdatacompressiontype = unimplemented!();
/// match openzfsdatacompressiontype {
///     OpenZfsDataCompressionType::Lz4 => { /* ... */ },
///     OpenZfsDataCompressionType::None => { /* ... */ },
///     OpenZfsDataCompressionType::Zstd => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `openzfsdatacompressiontype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `OpenZfsDataCompressionType::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `OpenZfsDataCompressionType::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `OpenZfsDataCompressionType::NewFeature` is defined.
/// Specifically, when `openzfsdatacompressiontype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `OpenZfsDataCompressionType::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum OpenZfsDataCompressionType {
    #[allow(missing_docs)] // documentation missing in model
    Lz4,
    #[allow(missing_docs)] // documentation missing in model
    None,
    #[allow(missing_docs)] // documentation missing in model
    Zstd,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for OpenZfsDataCompressionType {
    fn from(s: &str) -> Self {
        match s {
            "LZ4" => OpenZfsDataCompressionType::Lz4,
            "NONE" => OpenZfsDataCompressionType::None,
            "ZSTD" => OpenZfsDataCompressionType::Zstd,
            other => OpenZfsDataCompressionType::Unknown(crate::types::UnknownVariantValue(
                other.to_owned(),
            )),
        }
    }
}
impl std::str::FromStr for OpenZfsDataCompressionType {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(OpenZfsDataCompressionType::from(s))
    }
}
impl OpenZfsDataCompressionType {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            OpenZfsDataCompressionType::Lz4 => "LZ4",
            OpenZfsDataCompressionType::None => "NONE",
            OpenZfsDataCompressionType::Zstd => "ZSTD",
            OpenZfsDataCompressionType::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["LZ4", "NONE", "ZSTD"]
    }
}
impl AsRef<str> for OpenZfsDataCompressionType {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>Describes a specific Amazon FSx administrative action for the current Windows, Lustre, or OpenZFS file system.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct AdministrativeAction {
    /// <p>Describes the type of administrative action, as follows:</p>
    /// <ul>
    /// <li> <p> <code>FILE_SYSTEM_UPDATE</code> - A file system update administrative action initiated from the Amazon FSx console, API (<code>UpdateFileSystem</code>), or CLI (<code>update-file-system</code>).</p> </li>
    /// <li> <p> <code>STORAGE_OPTIMIZATION</code> - After the <code>FILE_SYSTEM_UPDATE</code> task to increase a file system's storage capacity has been completed successfully, a <code>STORAGE_OPTIMIZATION</code> task starts. </p>
    /// <ul>
    /// <li> <p>For Windows and ONTAP, storage optimization is the process of migrating the file system data to newer larger disks.</p> </li>
    /// <li> <p>For Lustre, storage optimization consists of rebalancing the data across the existing and newly added file servers.</p> </li>
    /// </ul> <p>You can track the storage-optimization progress using the <code>ProgressPercent</code> property. When <code>STORAGE_OPTIMIZATION</code> has been completed successfully, the parent <code>FILE_SYSTEM_UPDATE</code> action status changes to <code>COMPLETED</code>. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html">Managing storage capacity</a> in the <i>Amazon FSx for Windows File Server User Guide</i>, <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/managing-storage-capacity.html">Managing storage and throughput capacity</a> in the <i>Amazon FSx for Lustre User Guide</i>, and <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-storage-capacity.html">Managing storage capacity and provisioned IOPS</a> in the <i>Amazon FSx for NetApp ONTAP User Guide</i>.</p> </li>
    /// <li> <p> <code>FILE_SYSTEM_ALIAS_ASSOCIATION</code> - A file system update to associate a new Domain Name System (DNS) alias with the file system. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/APIReference/API_AssociateFileSystemAliases.html"> AssociateFileSystemAliases</a>.</p> </li>
    /// <li> <p> <code>FILE_SYSTEM_ALIAS_DISASSOCIATION</code> - A file system update to disassociate a DNS alias from the file system. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/APIReference/API_DisassociateFileSystemAliases.html">DisassociateFileSystemAliases</a>.</p> </li>
    /// <li> <p> <code>VOLUME_UPDATE</code> - A volume update to an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume initiated from the Amazon FSx console, API (<code>UpdateVolume</code>), or CLI (<code>update-volume</code>).</p> </li>
    /// <li> <p> <code>VOLUME_RESTORE</code> - An Amazon FSx for OpenZFS volume is returned to the state saved by the specified snapshot, initiated from an API (<code>RestoreVolumeFromSnapshot</code>) or CLI (<code>restore-volume-from-snapshot</code>).</p> </li>
    /// <li> <p> <code>SNAPSHOT_UPDATE</code> - A snapshot update to an Amazon FSx for OpenZFS volume initiated from the Amazon FSx console, API (<code>UpdateSnapshot</code>), or CLI (<code>update-snapshot</code>).</p> </li>
    /// <li> <p> <code>RELEASE_NFS_V3_LOCKS</code> - Tracks the release of Network File System (NFS) V3 locks on an Amazon FSx for OpenZFS file system.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub administrative_action_type: std::option::Option<crate::model::AdministrativeActionType>,
    /// <p>The percentage-complete status of a <code>STORAGE_OPTIMIZATION</code> administrative action. Does not apply to any other administrative action type.</p>
    #[doc(hidden)]
    pub progress_percent: std::option::Option<i32>,
    /// <p>The time that the administrative action request was received.</p>
    #[doc(hidden)]
    pub request_time: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>Describes the status of the administrative action, as follows:</p>
    /// <ul>
    /// <li> <p> <code>FAILED</code> - Amazon FSx failed to process the administrative action successfully.</p> </li>
    /// <li> <p> <code>IN_PROGRESS</code> - Amazon FSx is processing the administrative action.</p> </li>
    /// <li> <p> <code>PENDING</code> - Amazon FSx is waiting to process the administrative action.</p> </li>
    /// <li> <p> <code>COMPLETED</code> - Amazon FSx has finished processing the administrative task.</p> </li>
    /// <li> <p> <code>UPDATED_OPTIMIZING</code> - For a storage-capacity increase update, Amazon FSx has updated the file system with the new storage capacity, and is now performing the storage-optimization process. </p> </li>
    /// </ul>
    #[doc(hidden)]
    pub status: std::option::Option<crate::model::Status>,
    /// <p>Describes the target value for the administration action, provided in the <code>UpdateFileSystem</code> operation. Returned for <code>FILE_SYSTEM_UPDATE</code> administrative actions. </p>
    #[doc(hidden)]
    pub target_file_system_values: std::option::Option<crate::model::FileSystem>,
    /// <p>Provides information about a failed administrative action.</p>
    #[doc(hidden)]
    pub failure_details: std::option::Option<crate::model::AdministrativeActionFailureDetails>,
    /// <p>Describes an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume.</p>
    #[doc(hidden)]
    pub target_volume_values: std::option::Option<crate::model::Volume>,
    /// <p>A snapshot of an Amazon FSx for OpenZFS volume.</p>
    #[doc(hidden)]
    pub target_snapshot_values: std::option::Option<crate::model::Snapshot>,
}
impl AdministrativeAction {
    /// <p>Describes the type of administrative action, as follows:</p>
    /// <ul>
    /// <li> <p> <code>FILE_SYSTEM_UPDATE</code> - A file system update administrative action initiated from the Amazon FSx console, API (<code>UpdateFileSystem</code>), or CLI (<code>update-file-system</code>).</p> </li>
    /// <li> <p> <code>STORAGE_OPTIMIZATION</code> - After the <code>FILE_SYSTEM_UPDATE</code> task to increase a file system's storage capacity has been completed successfully, a <code>STORAGE_OPTIMIZATION</code> task starts. </p>
    /// <ul>
    /// <li> <p>For Windows and ONTAP, storage optimization is the process of migrating the file system data to newer larger disks.</p> </li>
    /// <li> <p>For Lustre, storage optimization consists of rebalancing the data across the existing and newly added file servers.</p> </li>
    /// </ul> <p>You can track the storage-optimization progress using the <code>ProgressPercent</code> property. When <code>STORAGE_OPTIMIZATION</code> has been completed successfully, the parent <code>FILE_SYSTEM_UPDATE</code> action status changes to <code>COMPLETED</code>. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html">Managing storage capacity</a> in the <i>Amazon FSx for Windows File Server User Guide</i>, <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/managing-storage-capacity.html">Managing storage and throughput capacity</a> in the <i>Amazon FSx for Lustre User Guide</i>, and <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-storage-capacity.html">Managing storage capacity and provisioned IOPS</a> in the <i>Amazon FSx for NetApp ONTAP User Guide</i>.</p> </li>
    /// <li> <p> <code>FILE_SYSTEM_ALIAS_ASSOCIATION</code> - A file system update to associate a new Domain Name System (DNS) alias with the file system. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/APIReference/API_AssociateFileSystemAliases.html"> AssociateFileSystemAliases</a>.</p> </li>
    /// <li> <p> <code>FILE_SYSTEM_ALIAS_DISASSOCIATION</code> - A file system update to disassociate a DNS alias from the file system. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/APIReference/API_DisassociateFileSystemAliases.html">DisassociateFileSystemAliases</a>.</p> </li>
    /// <li> <p> <code>VOLUME_UPDATE</code> - A volume update to an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume initiated from the Amazon FSx console, API (<code>UpdateVolume</code>), or CLI (<code>update-volume</code>).</p> </li>
    /// <li> <p> <code>VOLUME_RESTORE</code> - An Amazon FSx for OpenZFS volume is returned to the state saved by the specified snapshot, initiated from an API (<code>RestoreVolumeFromSnapshot</code>) or CLI (<code>restore-volume-from-snapshot</code>).</p> </li>
    /// <li> <p> <code>SNAPSHOT_UPDATE</code> - A snapshot update to an Amazon FSx for OpenZFS volume initiated from the Amazon FSx console, API (<code>UpdateSnapshot</code>), or CLI (<code>update-snapshot</code>).</p> </li>
    /// <li> <p> <code>RELEASE_NFS_V3_LOCKS</code> - Tracks the release of Network File System (NFS) V3 locks on an Amazon FSx for OpenZFS file system.</p> </li>
    /// </ul>
    pub fn administrative_action_type(
        &self,
    ) -> std::option::Option<&crate::model::AdministrativeActionType> {
        self.administrative_action_type.as_ref()
    }
    /// <p>The percentage-complete status of a <code>STORAGE_OPTIMIZATION</code> administrative action. Does not apply to any other administrative action type.</p>
    pub fn progress_percent(&self) -> std::option::Option<i32> {
        self.progress_percent
    }
    /// <p>The time that the administrative action request was received.</p>
    pub fn request_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.request_time.as_ref()
    }
    /// <p>Describes the status of the administrative action, as follows:</p>
    /// <ul>
    /// <li> <p> <code>FAILED</code> - Amazon FSx failed to process the administrative action successfully.</p> </li>
    /// <li> <p> <code>IN_PROGRESS</code> - Amazon FSx is processing the administrative action.</p> </li>
    /// <li> <p> <code>PENDING</code> - Amazon FSx is waiting to process the administrative action.</p> </li>
    /// <li> <p> <code>COMPLETED</code> - Amazon FSx has finished processing the administrative task.</p> </li>
    /// <li> <p> <code>UPDATED_OPTIMIZING</code> - For a storage-capacity increase update, Amazon FSx has updated the file system with the new storage capacity, and is now performing the storage-optimization process. </p> </li>
    /// </ul>
    pub fn status(&self) -> std::option::Option<&crate::model::Status> {
        self.status.as_ref()
    }
    /// <p>Describes the target value for the administration action, provided in the <code>UpdateFileSystem</code> operation. Returned for <code>FILE_SYSTEM_UPDATE</code> administrative actions. </p>
    pub fn target_file_system_values(&self) -> std::option::Option<&crate::model::FileSystem> {
        self.target_file_system_values.as_ref()
    }
    /// <p>Provides information about a failed administrative action.</p>
    pub fn failure_details(
        &self,
    ) -> std::option::Option<&crate::model::AdministrativeActionFailureDetails> {
        self.failure_details.as_ref()
    }
    /// <p>Describes an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume.</p>
    pub fn target_volume_values(&self) -> std::option::Option<&crate::model::Volume> {
        self.target_volume_values.as_ref()
    }
    /// <p>A snapshot of an Amazon FSx for OpenZFS volume.</p>
    pub fn target_snapshot_values(&self) -> std::option::Option<&crate::model::Snapshot> {
        self.target_snapshot_values.as_ref()
    }
}
/// See [`AdministrativeAction`](crate::model::AdministrativeAction).
pub mod administrative_action {

    /// A builder for [`AdministrativeAction`](crate::model::AdministrativeAction).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) administrative_action_type:
            std::option::Option<crate::model::AdministrativeActionType>,
        pub(crate) progress_percent: std::option::Option<i32>,
        pub(crate) request_time: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) status: std::option::Option<crate::model::Status>,
        pub(crate) target_file_system_values: std::option::Option<crate::model::FileSystem>,
        pub(crate) failure_details:
            std::option::Option<crate::model::AdministrativeActionFailureDetails>,
        pub(crate) target_volume_values: std::option::Option<crate::model::Volume>,
        pub(crate) target_snapshot_values: std::option::Option<crate::model::Snapshot>,
    }
    impl Builder {
        /// <p>Describes the type of administrative action, as follows:</p>
        /// <ul>
        /// <li> <p> <code>FILE_SYSTEM_UPDATE</code> - A file system update administrative action initiated from the Amazon FSx console, API (<code>UpdateFileSystem</code>), or CLI (<code>update-file-system</code>).</p> </li>
        /// <li> <p> <code>STORAGE_OPTIMIZATION</code> - After the <code>FILE_SYSTEM_UPDATE</code> task to increase a file system's storage capacity has been completed successfully, a <code>STORAGE_OPTIMIZATION</code> task starts. </p>
        /// <ul>
        /// <li> <p>For Windows and ONTAP, storage optimization is the process of migrating the file system data to newer larger disks.</p> </li>
        /// <li> <p>For Lustre, storage optimization consists of rebalancing the data across the existing and newly added file servers.</p> </li>
        /// </ul> <p>You can track the storage-optimization progress using the <code>ProgressPercent</code> property. When <code>STORAGE_OPTIMIZATION</code> has been completed successfully, the parent <code>FILE_SYSTEM_UPDATE</code> action status changes to <code>COMPLETED</code>. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html">Managing storage capacity</a> in the <i>Amazon FSx for Windows File Server User Guide</i>, <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/managing-storage-capacity.html">Managing storage and throughput capacity</a> in the <i>Amazon FSx for Lustre User Guide</i>, and <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-storage-capacity.html">Managing storage capacity and provisioned IOPS</a> in the <i>Amazon FSx for NetApp ONTAP User Guide</i>.</p> </li>
        /// <li> <p> <code>FILE_SYSTEM_ALIAS_ASSOCIATION</code> - A file system update to associate a new Domain Name System (DNS) alias with the file system. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/APIReference/API_AssociateFileSystemAliases.html"> AssociateFileSystemAliases</a>.</p> </li>
        /// <li> <p> <code>FILE_SYSTEM_ALIAS_DISASSOCIATION</code> - A file system update to disassociate a DNS alias from the file system. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/APIReference/API_DisassociateFileSystemAliases.html">DisassociateFileSystemAliases</a>.</p> </li>
        /// <li> <p> <code>VOLUME_UPDATE</code> - A volume update to an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume initiated from the Amazon FSx console, API (<code>UpdateVolume</code>), or CLI (<code>update-volume</code>).</p> </li>
        /// <li> <p> <code>VOLUME_RESTORE</code> - An Amazon FSx for OpenZFS volume is returned to the state saved by the specified snapshot, initiated from an API (<code>RestoreVolumeFromSnapshot</code>) or CLI (<code>restore-volume-from-snapshot</code>).</p> </li>
        /// <li> <p> <code>SNAPSHOT_UPDATE</code> - A snapshot update to an Amazon FSx for OpenZFS volume initiated from the Amazon FSx console, API (<code>UpdateSnapshot</code>), or CLI (<code>update-snapshot</code>).</p> </li>
        /// <li> <p> <code>RELEASE_NFS_V3_LOCKS</code> - Tracks the release of Network File System (NFS) V3 locks on an Amazon FSx for OpenZFS file system.</p> </li>
        /// </ul>
        pub fn administrative_action_type(
            mut self,
            input: crate::model::AdministrativeActionType,
        ) -> Self {
            self.administrative_action_type = Some(input);
            self
        }
        /// <p>Describes the type of administrative action, as follows:</p>
        /// <ul>
        /// <li> <p> <code>FILE_SYSTEM_UPDATE</code> - A file system update administrative action initiated from the Amazon FSx console, API (<code>UpdateFileSystem</code>), or CLI (<code>update-file-system</code>).</p> </li>
        /// <li> <p> <code>STORAGE_OPTIMIZATION</code> - After the <code>FILE_SYSTEM_UPDATE</code> task to increase a file system's storage capacity has been completed successfully, a <code>STORAGE_OPTIMIZATION</code> task starts. </p>
        /// <ul>
        /// <li> <p>For Windows and ONTAP, storage optimization is the process of migrating the file system data to newer larger disks.</p> </li>
        /// <li> <p>For Lustre, storage optimization consists of rebalancing the data across the existing and newly added file servers.</p> </li>
        /// </ul> <p>You can track the storage-optimization progress using the <code>ProgressPercent</code> property. When <code>STORAGE_OPTIMIZATION</code> has been completed successfully, the parent <code>FILE_SYSTEM_UPDATE</code> action status changes to <code>COMPLETED</code>. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html">Managing storage capacity</a> in the <i>Amazon FSx for Windows File Server User Guide</i>, <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/managing-storage-capacity.html">Managing storage and throughput capacity</a> in the <i>Amazon FSx for Lustre User Guide</i>, and <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-storage-capacity.html">Managing storage capacity and provisioned IOPS</a> in the <i>Amazon FSx for NetApp ONTAP User Guide</i>.</p> </li>
        /// <li> <p> <code>FILE_SYSTEM_ALIAS_ASSOCIATION</code> - A file system update to associate a new Domain Name System (DNS) alias with the file system. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/APIReference/API_AssociateFileSystemAliases.html"> AssociateFileSystemAliases</a>.</p> </li>
        /// <li> <p> <code>FILE_SYSTEM_ALIAS_DISASSOCIATION</code> - A file system update to disassociate a DNS alias from the file system. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/APIReference/API_DisassociateFileSystemAliases.html">DisassociateFileSystemAliases</a>.</p> </li>
        /// <li> <p> <code>VOLUME_UPDATE</code> - A volume update to an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume initiated from the Amazon FSx console, API (<code>UpdateVolume</code>), or CLI (<code>update-volume</code>).</p> </li>
        /// <li> <p> <code>VOLUME_RESTORE</code> - An Amazon FSx for OpenZFS volume is returned to the state saved by the specified snapshot, initiated from an API (<code>RestoreVolumeFromSnapshot</code>) or CLI (<code>restore-volume-from-snapshot</code>).</p> </li>
        /// <li> <p> <code>SNAPSHOT_UPDATE</code> - A snapshot update to an Amazon FSx for OpenZFS volume initiated from the Amazon FSx console, API (<code>UpdateSnapshot</code>), or CLI (<code>update-snapshot</code>).</p> </li>
        /// <li> <p> <code>RELEASE_NFS_V3_LOCKS</code> - Tracks the release of Network File System (NFS) V3 locks on an Amazon FSx for OpenZFS file system.</p> </li>
        /// </ul>
        pub fn set_administrative_action_type(
            mut self,
            input: std::option::Option<crate::model::AdministrativeActionType>,
        ) -> Self {
            self.administrative_action_type = input;
            self
        }
        /// <p>The percentage-complete status of a <code>STORAGE_OPTIMIZATION</code> administrative action. Does not apply to any other administrative action type.</p>
        pub fn progress_percent(mut self, input: i32) -> Self {
            self.progress_percent = Some(input);
            self
        }
        /// <p>The percentage-complete status of a <code>STORAGE_OPTIMIZATION</code> administrative action. Does not apply to any other administrative action type.</p>
        pub fn set_progress_percent(mut self, input: std::option::Option<i32>) -> Self {
            self.progress_percent = input;
            self
        }
        /// <p>The time that the administrative action request was received.</p>
        pub fn request_time(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.request_time = Some(input);
            self
        }
        /// <p>The time that the administrative action request was received.</p>
        pub fn set_request_time(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.request_time = input;
            self
        }
        /// <p>Describes the status of the administrative action, as follows:</p>
        /// <ul>
        /// <li> <p> <code>FAILED</code> - Amazon FSx failed to process the administrative action successfully.</p> </li>
        /// <li> <p> <code>IN_PROGRESS</code> - Amazon FSx is processing the administrative action.</p> </li>
        /// <li> <p> <code>PENDING</code> - Amazon FSx is waiting to process the administrative action.</p> </li>
        /// <li> <p> <code>COMPLETED</code> - Amazon FSx has finished processing the administrative task.</p> </li>
        /// <li> <p> <code>UPDATED_OPTIMIZING</code> - For a storage-capacity increase update, Amazon FSx has updated the file system with the new storage capacity, and is now performing the storage-optimization process. </p> </li>
        /// </ul>
        pub fn status(mut self, input: crate::model::Status) -> Self {
            self.status = Some(input);
            self
        }
        /// <p>Describes the status of the administrative action, as follows:</p>
        /// <ul>
        /// <li> <p> <code>FAILED</code> - Amazon FSx failed to process the administrative action successfully.</p> </li>
        /// <li> <p> <code>IN_PROGRESS</code> - Amazon FSx is processing the administrative action.</p> </li>
        /// <li> <p> <code>PENDING</code> - Amazon FSx is waiting to process the administrative action.</p> </li>
        /// <li> <p> <code>COMPLETED</code> - Amazon FSx has finished processing the administrative task.</p> </li>
        /// <li> <p> <code>UPDATED_OPTIMIZING</code> - For a storage-capacity increase update, Amazon FSx has updated the file system with the new storage capacity, and is now performing the storage-optimization process. </p> </li>
        /// </ul>
        pub fn set_status(mut self, input: std::option::Option<crate::model::Status>) -> Self {
            self.status = input;
            self
        }
        /// <p>Describes the target value for the administration action, provided in the <code>UpdateFileSystem</code> operation. Returned for <code>FILE_SYSTEM_UPDATE</code> administrative actions. </p>
        pub fn target_file_system_values(mut self, input: crate::model::FileSystem) -> Self {
            self.target_file_system_values = Some(input);
            self
        }
        /// <p>Describes the target value for the administration action, provided in the <code>UpdateFileSystem</code> operation. Returned for <code>FILE_SYSTEM_UPDATE</code> administrative actions. </p>
        pub fn set_target_file_system_values(
            mut self,
            input: std::option::Option<crate::model::FileSystem>,
        ) -> Self {
            self.target_file_system_values = input;
            self
        }
        /// <p>Provides information about a failed administrative action.</p>
        pub fn failure_details(
            mut self,
            input: crate::model::AdministrativeActionFailureDetails,
        ) -> Self {
            self.failure_details = Some(input);
            self
        }
        /// <p>Provides information about a failed administrative action.</p>
        pub fn set_failure_details(
            mut self,
            input: std::option::Option<crate::model::AdministrativeActionFailureDetails>,
        ) -> Self {
            self.failure_details = input;
            self
        }
        /// <p>Describes an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume.</p>
        pub fn target_volume_values(mut self, input: crate::model::Volume) -> Self {
            self.target_volume_values = Some(input);
            self
        }
        /// <p>Describes an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume.</p>
        pub fn set_target_volume_values(
            mut self,
            input: std::option::Option<crate::model::Volume>,
        ) -> Self {
            self.target_volume_values = input;
            self
        }
        /// <p>A snapshot of an Amazon FSx for OpenZFS volume.</p>
        pub fn target_snapshot_values(mut self, input: crate::model::Snapshot) -> Self {
            self.target_snapshot_values = Some(input);
            self
        }
        /// <p>A snapshot of an Amazon FSx for OpenZFS volume.</p>
        pub fn set_target_snapshot_values(
            mut self,
            input: std::option::Option<crate::model::Snapshot>,
        ) -> Self {
            self.target_snapshot_values = input;
            self
        }
        /// Consumes the builder and constructs a [`AdministrativeAction`](crate::model::AdministrativeAction).
        pub fn build(self) -> crate::model::AdministrativeAction {
            crate::model::AdministrativeAction {
                administrative_action_type: self.administrative_action_type,
                progress_percent: self.progress_percent,
                request_time: self.request_time,
                status: self.status,
                target_file_system_values: self.target_file_system_values,
                failure_details: self.failure_details,
                target_volume_values: self.target_volume_values,
                target_snapshot_values: self.target_snapshot_values,
            }
        }
    }
}
impl AdministrativeAction {
    /// Creates a new builder-style object to manufacture [`AdministrativeAction`](crate::model::AdministrativeAction).
    pub fn builder() -> crate::model::administrative_action::Builder {
        crate::model::administrative_action::Builder::default()
    }
}

/// <p>A snapshot of an Amazon FSx for OpenZFS volume.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Snapshot {
    /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
    #[doc(hidden)]
    pub resource_arn: std::option::Option<std::string::String>,
    /// <p>The ID of the snapshot.</p>
    #[doc(hidden)]
    pub snapshot_id: std::option::Option<std::string::String>,
    /// <p>The name of the snapshot.</p>
    #[doc(hidden)]
    pub name: std::option::Option<std::string::String>,
    /// <p>The ID of the volume that the snapshot is of.</p>
    #[doc(hidden)]
    pub volume_id: std::option::Option<std::string::String>,
    /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
    #[doc(hidden)]
    pub creation_time: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>The lifecycle status of the snapshot.</p>
    /// <ul>
    /// <li> <p> <code>PENDING</code> - Amazon FSx hasn't started creating the snapshot.</p> </li>
    /// <li> <p> <code>CREATING</code> - Amazon FSx is creating the snapshot.</p> </li>
    /// <li> <p> <code>DELETING</code> - Amazon FSx is deleting the snapshot.</p> </li>
    /// <li> <p> <code>AVAILABLE</code> - The snapshot is fully available.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub lifecycle: std::option::Option<crate::model::SnapshotLifecycle>,
    /// <p>Describes why a resource lifecycle state changed.</p>
    #[doc(hidden)]
    pub lifecycle_transition_reason: std::option::Option<crate::model::LifecycleTransitionReason>,
    /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
    #[doc(hidden)]
    pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
    /// <p>A list of administrative actions for the file system that are in process or waiting to be processed. Administrative actions describe changes to the Amazon FSx system.</p>
    #[doc(hidden)]
    pub administrative_actions:
        std::option::Option<std::vec::Vec<crate::model::AdministrativeAction>>,
}
impl Snapshot {
    /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
    pub fn resource_arn(&self) -> std::option::Option<&str> {
        self.resource_arn.as_deref()
    }
    /// <p>The ID of the snapshot.</p>
    pub fn snapshot_id(&self) -> std::option::Option<&str> {
        self.snapshot_id.as_deref()
    }
    /// <p>The name of the snapshot.</p>
    pub fn name(&self) -> std::option::Option<&str> {
        self.name.as_deref()
    }
    /// <p>The ID of the volume that the snapshot is of.</p>
    pub fn volume_id(&self) -> std::option::Option<&str> {
        self.volume_id.as_deref()
    }
    /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
    pub fn creation_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.creation_time.as_ref()
    }
    /// <p>The lifecycle status of the snapshot.</p>
    /// <ul>
    /// <li> <p> <code>PENDING</code> - Amazon FSx hasn't started creating the snapshot.</p> </li>
    /// <li> <p> <code>CREATING</code> - Amazon FSx is creating the snapshot.</p> </li>
    /// <li> <p> <code>DELETING</code> - Amazon FSx is deleting the snapshot.</p> </li>
    /// <li> <p> <code>AVAILABLE</code> - The snapshot is fully available.</p> </li>
    /// </ul>
    pub fn lifecycle(&self) -> std::option::Option<&crate::model::SnapshotLifecycle> {
        self.lifecycle.as_ref()
    }
    /// <p>Describes why a resource lifecycle state changed.</p>
    pub fn lifecycle_transition_reason(
        &self,
    ) -> std::option::Option<&crate::model::LifecycleTransitionReason> {
        self.lifecycle_transition_reason.as_ref()
    }
    /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
    pub fn tags(&self) -> std::option::Option<&[crate::model::Tag]> {
        self.tags.as_deref()
    }
    /// <p>A list of administrative actions for the file system that are in process or waiting to be processed. Administrative actions describe changes to the Amazon FSx system.</p>
    pub fn administrative_actions(
        &self,
    ) -> std::option::Option<&[crate::model::AdministrativeAction]> {
        self.administrative_actions.as_deref()
    }
}
/// See [`Snapshot`](crate::model::Snapshot).
pub mod snapshot {

    /// A builder for [`Snapshot`](crate::model::Snapshot).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) resource_arn: std::option::Option<std::string::String>,
        pub(crate) snapshot_id: std::option::Option<std::string::String>,
        pub(crate) name: std::option::Option<std::string::String>,
        pub(crate) volume_id: std::option::Option<std::string::String>,
        pub(crate) creation_time: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) lifecycle: std::option::Option<crate::model::SnapshotLifecycle>,
        pub(crate) lifecycle_transition_reason:
            std::option::Option<crate::model::LifecycleTransitionReason>,
        pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        pub(crate) administrative_actions:
            std::option::Option<std::vec::Vec<crate::model::AdministrativeAction>>,
    }
    impl Builder {
        /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
        pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
            self.resource_arn = Some(input.into());
            self
        }
        /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
        pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.resource_arn = input;
            self
        }
        /// <p>The ID of the snapshot.</p>
        pub fn snapshot_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.snapshot_id = Some(input.into());
            self
        }
        /// <p>The ID of the snapshot.</p>
        pub fn set_snapshot_id(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.snapshot_id = input;
            self
        }
        /// <p>The name of the snapshot.</p>
        pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
            self.name = Some(input.into());
            self
        }
        /// <p>The name of the snapshot.</p>
        pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.name = input;
            self
        }
        /// <p>The ID of the volume that the snapshot is of.</p>
        pub fn volume_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.volume_id = Some(input.into());
            self
        }
        /// <p>The ID of the volume that the snapshot is of.</p>
        pub fn set_volume_id(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.volume_id = input;
            self
        }
        /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
        pub fn creation_time(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.creation_time = Some(input);
            self
        }
        /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
        pub fn set_creation_time(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.creation_time = input;
            self
        }
        /// <p>The lifecycle status of the snapshot.</p>
        /// <ul>
        /// <li> <p> <code>PENDING</code> - Amazon FSx hasn't started creating the snapshot.</p> </li>
        /// <li> <p> <code>CREATING</code> - Amazon FSx is creating the snapshot.</p> </li>
        /// <li> <p> <code>DELETING</code> - Amazon FSx is deleting the snapshot.</p> </li>
        /// <li> <p> <code>AVAILABLE</code> - The snapshot is fully available.</p> </li>
        /// </ul>
        pub fn lifecycle(mut self, input: crate::model::SnapshotLifecycle) -> Self {
            self.lifecycle = Some(input);
            self
        }
        /// <p>The lifecycle status of the snapshot.</p>
        /// <ul>
        /// <li> <p> <code>PENDING</code> - Amazon FSx hasn't started creating the snapshot.</p> </li>
        /// <li> <p> <code>CREATING</code> - Amazon FSx is creating the snapshot.</p> </li>
        /// <li> <p> <code>DELETING</code> - Amazon FSx is deleting the snapshot.</p> </li>
        /// <li> <p> <code>AVAILABLE</code> - The snapshot is fully available.</p> </li>
        /// </ul>
        pub fn set_lifecycle(
            mut self,
            input: std::option::Option<crate::model::SnapshotLifecycle>,
        ) -> Self {
            self.lifecycle = input;
            self
        }
        /// <p>Describes why a resource lifecycle state changed.</p>
        pub fn lifecycle_transition_reason(
            mut self,
            input: crate::model::LifecycleTransitionReason,
        ) -> Self {
            self.lifecycle_transition_reason = Some(input);
            self
        }
        /// <p>Describes why a resource lifecycle state changed.</p>
        pub fn set_lifecycle_transition_reason(
            mut self,
            input: std::option::Option<crate::model::LifecycleTransitionReason>,
        ) -> Self {
            self.lifecycle_transition_reason = input;
            self
        }
        /// Appends an item to `tags`.
        ///
        /// To override the contents of this collection use [`set_tags`](Self::set_tags).
        ///
        /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
        pub fn tags(mut self, input: crate::model::Tag) -> Self {
            let mut v = self.tags.unwrap_or_default();
            v.push(input);
            self.tags = Some(v);
            self
        }
        /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
        pub fn set_tags(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        ) -> Self {
            self.tags = input;
            self
        }
        /// Appends an item to `administrative_actions`.
        ///
        /// To override the contents of this collection use [`set_administrative_actions`](Self::set_administrative_actions).
        ///
        /// <p>A list of administrative actions for the file system that are in process or waiting to be processed. Administrative actions describe changes to the Amazon FSx system.</p>
        pub fn administrative_actions(mut self, input: crate::model::AdministrativeAction) -> Self {
            let mut v = self.administrative_actions.unwrap_or_default();
            v.push(input);
            self.administrative_actions = Some(v);
            self
        }
        /// <p>A list of administrative actions for the file system that are in process or waiting to be processed. Administrative actions describe changes to the Amazon FSx system.</p>
        pub fn set_administrative_actions(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::AdministrativeAction>>,
        ) -> Self {
            self.administrative_actions = input;
            self
        }
        /// Consumes the builder and constructs a [`Snapshot`](crate::model::Snapshot).
        pub fn build(self) -> crate::model::Snapshot {
            crate::model::Snapshot {
                resource_arn: self.resource_arn,
                snapshot_id: self.snapshot_id,
                name: self.name,
                volume_id: self.volume_id,
                creation_time: self.creation_time,
                lifecycle: self.lifecycle,
                lifecycle_transition_reason: self.lifecycle_transition_reason,
                tags: self.tags,
                administrative_actions: self.administrative_actions,
            }
        }
    }
}
impl Snapshot {
    /// Creates a new builder-style object to manufacture [`Snapshot`](crate::model::Snapshot).
    pub fn builder() -> crate::model::snapshot::Builder {
        crate::model::snapshot::Builder::default()
    }
}

/// <p>Specifies a key-value pair for a resource tag.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Tag {
    /// <p>A value that specifies the <code>TagKey</code>, the name of the tag. Tag keys must be unique for the resource to which they are attached.</p>
    #[doc(hidden)]
    pub key: std::option::Option<std::string::String>,
    /// <p>A value that specifies the <code>TagValue</code>, the value assigned to the corresponding tag key. Tag values can be null and don't have to be unique in a tag set. For example, you can have a key-value pair in a tag set of <code>finances : April</code> and also of <code>payroll : April</code>.</p>
    #[doc(hidden)]
    pub value: std::option::Option<std::string::String>,
}
impl Tag {
    /// <p>A value that specifies the <code>TagKey</code>, the name of the tag. Tag keys must be unique for the resource to which they are attached.</p>
    pub fn key(&self) -> std::option::Option<&str> {
        self.key.as_deref()
    }
    /// <p>A value that specifies the <code>TagValue</code>, the value assigned to the corresponding tag key. Tag values can be null and don't have to be unique in a tag set. For example, you can have a key-value pair in a tag set of <code>finances : April</code> and also of <code>payroll : April</code>.</p>
    pub fn value(&self) -> std::option::Option<&str> {
        self.value.as_deref()
    }
}
/// See [`Tag`](crate::model::Tag).
pub mod tag {

    /// A builder for [`Tag`](crate::model::Tag).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) key: std::option::Option<std::string::String>,
        pub(crate) value: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>A value that specifies the <code>TagKey</code>, the name of the tag. Tag keys must be unique for the resource to which they are attached.</p>
        pub fn key(mut self, input: impl Into<std::string::String>) -> Self {
            self.key = Some(input.into());
            self
        }
        /// <p>A value that specifies the <code>TagKey</code>, the name of the tag. Tag keys must be unique for the resource to which they are attached.</p>
        pub fn set_key(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.key = input;
            self
        }
        /// <p>A value that specifies the <code>TagValue</code>, the value assigned to the corresponding tag key. Tag values can be null and don't have to be unique in a tag set. For example, you can have a key-value pair in a tag set of <code>finances : April</code> and also of <code>payroll : April</code>.</p>
        pub fn value(mut self, input: impl Into<std::string::String>) -> Self {
            self.value = Some(input.into());
            self
        }
        /// <p>A value that specifies the <code>TagValue</code>, the value assigned to the corresponding tag key. Tag values can be null and don't have to be unique in a tag set. For example, you can have a key-value pair in a tag set of <code>finances : April</code> and also of <code>payroll : April</code>.</p>
        pub fn set_value(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.value = input;
            self
        }
        /// Consumes the builder and constructs a [`Tag`](crate::model::Tag).
        pub fn build(self) -> crate::model::Tag {
            crate::model::Tag {
                key: self.key,
                value: self.value,
            }
        }
    }
}
impl Tag {
    /// Creates a new builder-style object to manufacture [`Tag`](crate::model::Tag).
    pub fn builder() -> crate::model::tag::Builder {
        crate::model::tag::Builder::default()
    }
}

/// <p>Describes why a resource lifecycle state changed.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct LifecycleTransitionReason {
    /// <p>A detailed error message.</p>
    #[doc(hidden)]
    pub message: std::option::Option<std::string::String>,
}
impl LifecycleTransitionReason {
    /// <p>A detailed error message.</p>
    pub fn message(&self) -> std::option::Option<&str> {
        self.message.as_deref()
    }
}
/// See [`LifecycleTransitionReason`](crate::model::LifecycleTransitionReason).
pub mod lifecycle_transition_reason {

    /// A builder for [`LifecycleTransitionReason`](crate::model::LifecycleTransitionReason).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) message: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>A detailed error message.</p>
        pub fn message(mut self, input: impl Into<std::string::String>) -> Self {
            self.message = Some(input.into());
            self
        }
        /// <p>A detailed error message.</p>
        pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.message = input;
            self
        }
        /// Consumes the builder and constructs a [`LifecycleTransitionReason`](crate::model::LifecycleTransitionReason).
        pub fn build(self) -> crate::model::LifecycleTransitionReason {
            crate::model::LifecycleTransitionReason {
                message: self.message,
            }
        }
    }
}
impl LifecycleTransitionReason {
    /// Creates a new builder-style object to manufacture [`LifecycleTransitionReason`](crate::model::LifecycleTransitionReason).
    pub fn builder() -> crate::model::lifecycle_transition_reason::Builder {
        crate::model::lifecycle_transition_reason::Builder::default()
    }
}

/// When writing a match expression against `SnapshotLifecycle`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let snapshotlifecycle = unimplemented!();
/// match snapshotlifecycle {
///     SnapshotLifecycle::Available => { /* ... */ },
///     SnapshotLifecycle::Creating => { /* ... */ },
///     SnapshotLifecycle::Deleting => { /* ... */ },
///     SnapshotLifecycle::Pending => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `snapshotlifecycle` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `SnapshotLifecycle::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `SnapshotLifecycle::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `SnapshotLifecycle::NewFeature` is defined.
/// Specifically, when `snapshotlifecycle` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `SnapshotLifecycle::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum SnapshotLifecycle {
    #[allow(missing_docs)] // documentation missing in model
    Available,
    #[allow(missing_docs)] // documentation missing in model
    Creating,
    #[allow(missing_docs)] // documentation missing in model
    Deleting,
    #[allow(missing_docs)] // documentation missing in model
    Pending,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for SnapshotLifecycle {
    fn from(s: &str) -> Self {
        match s {
            "AVAILABLE" => SnapshotLifecycle::Available,
            "CREATING" => SnapshotLifecycle::Creating,
            "DELETING" => SnapshotLifecycle::Deleting,
            "PENDING" => SnapshotLifecycle::Pending,
            other => {
                SnapshotLifecycle::Unknown(crate::types::UnknownVariantValue(other.to_owned()))
            }
        }
    }
}
impl std::str::FromStr for SnapshotLifecycle {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(SnapshotLifecycle::from(s))
    }
}
impl SnapshotLifecycle {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            SnapshotLifecycle::Available => "AVAILABLE",
            SnapshotLifecycle::Creating => "CREATING",
            SnapshotLifecycle::Deleting => "DELETING",
            SnapshotLifecycle::Pending => "PENDING",
            SnapshotLifecycle::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["AVAILABLE", "CREATING", "DELETING", "PENDING"]
    }
}
impl AsRef<str> for SnapshotLifecycle {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>Provides information about a failed administrative action.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct AdministrativeActionFailureDetails {
    /// <p>Error message providing details about the failed administrative action.</p>
    #[doc(hidden)]
    pub message: std::option::Option<std::string::String>,
}
impl AdministrativeActionFailureDetails {
    /// <p>Error message providing details about the failed administrative action.</p>
    pub fn message(&self) -> std::option::Option<&str> {
        self.message.as_deref()
    }
}
/// See [`AdministrativeActionFailureDetails`](crate::model::AdministrativeActionFailureDetails).
pub mod administrative_action_failure_details {

    /// A builder for [`AdministrativeActionFailureDetails`](crate::model::AdministrativeActionFailureDetails).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) message: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>Error message providing details about the failed administrative action.</p>
        pub fn message(mut self, input: impl Into<std::string::String>) -> Self {
            self.message = Some(input.into());
            self
        }
        /// <p>Error message providing details about the failed administrative action.</p>
        pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.message = input;
            self
        }
        /// Consumes the builder and constructs a [`AdministrativeActionFailureDetails`](crate::model::AdministrativeActionFailureDetails).
        pub fn build(self) -> crate::model::AdministrativeActionFailureDetails {
            crate::model::AdministrativeActionFailureDetails {
                message: self.message,
            }
        }
    }
}
impl AdministrativeActionFailureDetails {
    /// Creates a new builder-style object to manufacture [`AdministrativeActionFailureDetails`](crate::model::AdministrativeActionFailureDetails).
    pub fn builder() -> crate::model::administrative_action_failure_details::Builder {
        crate::model::administrative_action_failure_details::Builder::default()
    }
}

/// <p>A description of a specific Amazon FSx file system.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct FileSystem {
    /// <p>The Amazon Web Services account that created the file system. If the file system was created by an Identity and Access Management (IAM) user, the Amazon Web Services account to which the IAM user belongs is the owner.</p>
    #[doc(hidden)]
    pub owner_id: std::option::Option<std::string::String>,
    /// <p>The time that the file system was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
    #[doc(hidden)]
    pub creation_time: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>The system-generated, unique 17-digit ID of the file system.</p>
    #[doc(hidden)]
    pub file_system_id: std::option::Option<std::string::String>,
    /// <p>The type of Amazon FSx file system, which can be <code>LUSTRE</code>, <code>WINDOWS</code>, <code>ONTAP</code>, or <code>OPENZFS</code>.</p>
    #[doc(hidden)]
    pub file_system_type: std::option::Option<crate::model::FileSystemType>,
    /// <p>The lifecycle status of the file system. The following are the possible values and what they mean:</p>
    /// <ul>
    /// <li> <p> <code>AVAILABLE</code> - The file system is in a healthy state, and is reachable and available for use.</p> </li>
    /// <li> <p> <code>CREATING</code> - Amazon FSx is creating the new file system.</p> </li>
    /// <li> <p> <code>DELETING</code> - Amazon FSx is deleting an existing file system.</p> </li>
    /// <li> <p> <code>FAILED</code> - An existing file system has experienced an unrecoverable failure. When creating a new file system, Amazon FSx was unable to create the file system.</p> </li>
    /// <li> <p> <code>MISCONFIGURED</code> - The file system is in a failed but recoverable state.</p> </li>
    /// <li> <p> <code>MISCONFIGURED_UNAVAILABLE</code> - (Amazon FSx for Windows File Server only) The file system is currently unavailable due to a change in your Active Directory configuration.</p> </li>
    /// <li> <p> <code>UPDATING</code> - The file system is undergoing a customer-initiated update.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub lifecycle: std::option::Option<crate::model::FileSystemLifecycle>,
    /// <p>A structure providing details of any failures that occurred.</p>
    #[doc(hidden)]
    pub failure_details: std::option::Option<crate::model::FileSystemFailureDetails>,
    /// <p>The storage capacity of the file system in gibibytes (GiB).</p>
    #[doc(hidden)]
    pub storage_capacity: std::option::Option<i32>,
    /// <p>The type of storage the file system is using. If set to <code>SSD</code>, the file system uses solid state drive storage. If set to <code>HDD</code>, the file system uses hard disk drive storage. </p>
    #[doc(hidden)]
    pub storage_type: std::option::Option<crate::model::StorageType>,
    /// <p>The ID of the primary virtual private cloud (VPC) for the file system.</p>
    #[doc(hidden)]
    pub vpc_id: std::option::Option<std::string::String>,
    /// <p>Specifies the IDs of the subnets that the file system is accessible from. For the Amazon FSx Windows and ONTAP <code>MULTI_AZ_1</code> file system deployment type, there are two subnet IDs, one for the preferred file server and one for the standby file server. The preferred file server subnet identified in the <code>PreferredSubnetID</code> property. All other file systems have only one subnet ID.</p>
    /// <p>For FSx for Lustre file systems, and Single-AZ Windows file systems, this is the ID of the subnet that contains the file system's endpoint. For <code>MULTI_AZ_1</code> Windows and ONTAP file systems, the file system endpoint is available in the <code>PreferredSubnetID</code>.</p>
    #[doc(hidden)]
    pub subnet_ids: std::option::Option<std::vec::Vec<std::string::String>>,
    /// <p>The IDs of the elastic network interfaces from which a specific file system is accessible. The elastic network interface is automatically created in the same virtual private cloud (VPC) that the Amazon FSx file system was created in. For more information, see <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html">Elastic Network Interfaces</a> in the <i>Amazon EC2 User Guide.</i> </p>
    /// <p>For an Amazon FSx for Windows File Server file system, you can have one network interface ID. For an Amazon FSx for Lustre file system, you can have more than one.</p>
    #[doc(hidden)]
    pub network_interface_ids: std::option::Option<std::vec::Vec<std::string::String>>,
    /// <p>The Domain Name System (DNS) name for the file system.</p>
    #[doc(hidden)]
    pub dns_name: std::option::Option<std::string::String>,
    /// <p>The ID of the Key Management Service (KMS) key used to encrypt Amazon FSx file system data. Used as follows with Amazon FSx file system types:</p>
    /// <ul>
    /// <li> <p>Amazon FSx for Lustre <code>PERSISTENT_1</code> and <code>PERSISTENT_2</code> deployment types only.</p> <p> <code>SCRATCH_1</code> and <code>SCRATCH_2</code> types are encrypted using the Amazon FSx service KMS key for your account.</p> </li>
    /// <li> <p>Amazon FSx for NetApp ONTAP</p> </li>
    /// <li> <p>Amazon FSx for OpenZFS</p> </li>
    /// <li> <p>Amazon FSx for Windows File Server</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub kms_key_id: std::option::Option<std::string::String>,
    /// <p>The Amazon Resource Name (ARN) of the file system resource.</p>
    #[doc(hidden)]
    pub resource_arn: std::option::Option<std::string::String>,
    /// <p>The tags to associate with the file system. For more information, see <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html">Tagging your Amazon EC2 resources</a> in the <i>Amazon EC2 User Guide</i>.</p>
    #[doc(hidden)]
    pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
    /// <p>The configuration for this Amazon FSx for Windows File Server file system.</p>
    #[doc(hidden)]
    pub windows_configuration: std::option::Option<crate::model::WindowsFileSystemConfiguration>,
    /// <p>The configuration for the Amazon FSx for Lustre file system.</p>
    #[doc(hidden)]
    pub lustre_configuration: std::option::Option<crate::model::LustreFileSystemConfiguration>,
    /// <p>A list of administrative actions for the file system that are in process or waiting to be processed. Administrative actions describe changes to the Amazon FSx system that you have initiated using the <code>UpdateFileSystem</code> operation.</p>
    #[doc(hidden)]
    pub administrative_actions:
        std::option::Option<std::vec::Vec<crate::model::AdministrativeAction>>,
    /// <p>The configuration for this Amazon FSx for NetApp ONTAP file system.</p>
    #[doc(hidden)]
    pub ontap_configuration: std::option::Option<crate::model::OntapFileSystemConfiguration>,
    /// <p>The Lustre version of the Amazon FSx for Lustre file system, either <code>2.10</code> or <code>2.12</code>.</p>
    #[doc(hidden)]
    pub file_system_type_version: std::option::Option<std::string::String>,
    /// <p>The configuration for this Amazon FSx for OpenZFS file system.</p>
    #[doc(hidden)]
    pub open_zfs_configuration: std::option::Option<crate::model::OpenZfsFileSystemConfiguration>,
}
impl FileSystem {
    /// <p>The Amazon Web Services account that created the file system. If the file system was created by an Identity and Access Management (IAM) user, the Amazon Web Services account to which the IAM user belongs is the owner.</p>
    pub fn owner_id(&self) -> std::option::Option<&str> {
        self.owner_id.as_deref()
    }
    /// <p>The time that the file system was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
    pub fn creation_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.creation_time.as_ref()
    }
    /// <p>The system-generated, unique 17-digit ID of the file system.</p>
    pub fn file_system_id(&self) -> std::option::Option<&str> {
        self.file_system_id.as_deref()
    }
    /// <p>The type of Amazon FSx file system, which can be <code>LUSTRE</code>, <code>WINDOWS</code>, <code>ONTAP</code>, or <code>OPENZFS</code>.</p>
    pub fn file_system_type(&self) -> std::option::Option<&crate::model::FileSystemType> {
        self.file_system_type.as_ref()
    }
    /// <p>The lifecycle status of the file system. The following are the possible values and what they mean:</p>
    /// <ul>
    /// <li> <p> <code>AVAILABLE</code> - The file system is in a healthy state, and is reachable and available for use.</p> </li>
    /// <li> <p> <code>CREATING</code> - Amazon FSx is creating the new file system.</p> </li>
    /// <li> <p> <code>DELETING</code> - Amazon FSx is deleting an existing file system.</p> </li>
    /// <li> <p> <code>FAILED</code> - An existing file system has experienced an unrecoverable failure. When creating a new file system, Amazon FSx was unable to create the file system.</p> </li>
    /// <li> <p> <code>MISCONFIGURED</code> - The file system is in a failed but recoverable state.</p> </li>
    /// <li> <p> <code>MISCONFIGURED_UNAVAILABLE</code> - (Amazon FSx for Windows File Server only) The file system is currently unavailable due to a change in your Active Directory configuration.</p> </li>
    /// <li> <p> <code>UPDATING</code> - The file system is undergoing a customer-initiated update.</p> </li>
    /// </ul>
    pub fn lifecycle(&self) -> std::option::Option<&crate::model::FileSystemLifecycle> {
        self.lifecycle.as_ref()
    }
    /// <p>A structure providing details of any failures that occurred.</p>
    pub fn failure_details(&self) -> std::option::Option<&crate::model::FileSystemFailureDetails> {
        self.failure_details.as_ref()
    }
    /// <p>The storage capacity of the file system in gibibytes (GiB).</p>
    pub fn storage_capacity(&self) -> std::option::Option<i32> {
        self.storage_capacity
    }
    /// <p>The type of storage the file system is using. If set to <code>SSD</code>, the file system uses solid state drive storage. If set to <code>HDD</code>, the file system uses hard disk drive storage. </p>
    pub fn storage_type(&self) -> std::option::Option<&crate::model::StorageType> {
        self.storage_type.as_ref()
    }
    /// <p>The ID of the primary virtual private cloud (VPC) for the file system.</p>
    pub fn vpc_id(&self) -> std::option::Option<&str> {
        self.vpc_id.as_deref()
    }
    /// <p>Specifies the IDs of the subnets that the file system is accessible from. For the Amazon FSx Windows and ONTAP <code>MULTI_AZ_1</code> file system deployment type, there are two subnet IDs, one for the preferred file server and one for the standby file server. The preferred file server subnet identified in the <code>PreferredSubnetID</code> property. All other file systems have only one subnet ID.</p>
    /// <p>For FSx for Lustre file systems, and Single-AZ Windows file systems, this is the ID of the subnet that contains the file system's endpoint. For <code>MULTI_AZ_1</code> Windows and ONTAP file systems, the file system endpoint is available in the <code>PreferredSubnetID</code>.</p>
    pub fn subnet_ids(&self) -> std::option::Option<&[std::string::String]> {
        self.subnet_ids.as_deref()
    }
    /// <p>The IDs of the elastic network interfaces from which a specific file system is accessible. The elastic network interface is automatically created in the same virtual private cloud (VPC) that the Amazon FSx file system was created in. For more information, see <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html">Elastic Network Interfaces</a> in the <i>Amazon EC2 User Guide.</i> </p>
    /// <p>For an Amazon FSx for Windows File Server file system, you can have one network interface ID. For an Amazon FSx for Lustre file system, you can have more than one.</p>
    pub fn network_interface_ids(&self) -> std::option::Option<&[std::string::String]> {
        self.network_interface_ids.as_deref()
    }
    /// <p>The Domain Name System (DNS) name for the file system.</p>
    pub fn dns_name(&self) -> std::option::Option<&str> {
        self.dns_name.as_deref()
    }
    /// <p>The ID of the Key Management Service (KMS) key used to encrypt Amazon FSx file system data. Used as follows with Amazon FSx file system types:</p>
    /// <ul>
    /// <li> <p>Amazon FSx for Lustre <code>PERSISTENT_1</code> and <code>PERSISTENT_2</code> deployment types only.</p> <p> <code>SCRATCH_1</code> and <code>SCRATCH_2</code> types are encrypted using the Amazon FSx service KMS key for your account.</p> </li>
    /// <li> <p>Amazon FSx for NetApp ONTAP</p> </li>
    /// <li> <p>Amazon FSx for OpenZFS</p> </li>
    /// <li> <p>Amazon FSx for Windows File Server</p> </li>
    /// </ul>
    pub fn kms_key_id(&self) -> std::option::Option<&str> {
        self.kms_key_id.as_deref()
    }
    /// <p>The Amazon Resource Name (ARN) of the file system resource.</p>
    pub fn resource_arn(&self) -> std::option::Option<&str> {
        self.resource_arn.as_deref()
    }
    /// <p>The tags to associate with the file system. For more information, see <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html">Tagging your Amazon EC2 resources</a> in the <i>Amazon EC2 User Guide</i>.</p>
    pub fn tags(&self) -> std::option::Option<&[crate::model::Tag]> {
        self.tags.as_deref()
    }
    /// <p>The configuration for this Amazon FSx for Windows File Server file system.</p>
    pub fn windows_configuration(
        &self,
    ) -> std::option::Option<&crate::model::WindowsFileSystemConfiguration> {
        self.windows_configuration.as_ref()
    }
    /// <p>The configuration for the Amazon FSx for Lustre file system.</p>
    pub fn lustre_configuration(
        &self,
    ) -> std::option::Option<&crate::model::LustreFileSystemConfiguration> {
        self.lustre_configuration.as_ref()
    }
    /// <p>A list of administrative actions for the file system that are in process or waiting to be processed. Administrative actions describe changes to the Amazon FSx system that you have initiated using the <code>UpdateFileSystem</code> operation.</p>
    pub fn administrative_actions(
        &self,
    ) -> std::option::Option<&[crate::model::AdministrativeAction]> {
        self.administrative_actions.as_deref()
    }
    /// <p>The configuration for this Amazon FSx for NetApp ONTAP file system.</p>
    pub fn ontap_configuration(
        &self,
    ) -> std::option::Option<&crate::model::OntapFileSystemConfiguration> {
        self.ontap_configuration.as_ref()
    }
    /// <p>The Lustre version of the Amazon FSx for Lustre file system, either <code>2.10</code> or <code>2.12</code>.</p>
    pub fn file_system_type_version(&self) -> std::option::Option<&str> {
        self.file_system_type_version.as_deref()
    }
    /// <p>The configuration for this Amazon FSx for OpenZFS file system.</p>
    pub fn open_zfs_configuration(
        &self,
    ) -> std::option::Option<&crate::model::OpenZfsFileSystemConfiguration> {
        self.open_zfs_configuration.as_ref()
    }
}
/// See [`FileSystem`](crate::model::FileSystem).
pub mod file_system {

    /// A builder for [`FileSystem`](crate::model::FileSystem).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) owner_id: std::option::Option<std::string::String>,
        pub(crate) creation_time: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) file_system_id: std::option::Option<std::string::String>,
        pub(crate) file_system_type: std::option::Option<crate::model::FileSystemType>,
        pub(crate) lifecycle: std::option::Option<crate::model::FileSystemLifecycle>,
        pub(crate) failure_details: std::option::Option<crate::model::FileSystemFailureDetails>,
        pub(crate) storage_capacity: std::option::Option<i32>,
        pub(crate) storage_type: std::option::Option<crate::model::StorageType>,
        pub(crate) vpc_id: std::option::Option<std::string::String>,
        pub(crate) subnet_ids: std::option::Option<std::vec::Vec<std::string::String>>,
        pub(crate) network_interface_ids: std::option::Option<std::vec::Vec<std::string::String>>,
        pub(crate) dns_name: std::option::Option<std::string::String>,
        pub(crate) kms_key_id: std::option::Option<std::string::String>,
        pub(crate) resource_arn: std::option::Option<std::string::String>,
        pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        pub(crate) windows_configuration:
            std::option::Option<crate::model::WindowsFileSystemConfiguration>,
        pub(crate) lustre_configuration:
            std::option::Option<crate::model::LustreFileSystemConfiguration>,
        pub(crate) administrative_actions:
            std::option::Option<std::vec::Vec<crate::model::AdministrativeAction>>,
        pub(crate) ontap_configuration:
            std::option::Option<crate::model::OntapFileSystemConfiguration>,
        pub(crate) file_system_type_version: std::option::Option<std::string::String>,
        pub(crate) open_zfs_configuration:
            std::option::Option<crate::model::OpenZfsFileSystemConfiguration>,
    }
    impl Builder {
        /// <p>The Amazon Web Services account that created the file system. If the file system was created by an Identity and Access Management (IAM) user, the Amazon Web Services account to which the IAM user belongs is the owner.</p>
        pub fn owner_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.owner_id = Some(input.into());
            self
        }
        /// <p>The Amazon Web Services account that created the file system. If the file system was created by an Identity and Access Management (IAM) user, the Amazon Web Services account to which the IAM user belongs is the owner.</p>
        pub fn set_owner_id(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.owner_id = input;
            self
        }
        /// <p>The time that the file system was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
        pub fn creation_time(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.creation_time = Some(input);
            self
        }
        /// <p>The time that the file system was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
        pub fn set_creation_time(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.creation_time = input;
            self
        }
        /// <p>The system-generated, unique 17-digit ID of the file system.</p>
        pub fn file_system_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.file_system_id = Some(input.into());
            self
        }
        /// <p>The system-generated, unique 17-digit ID of the file system.</p>
        pub fn set_file_system_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.file_system_id = input;
            self
        }
        /// <p>The type of Amazon FSx file system, which can be <code>LUSTRE</code>, <code>WINDOWS</code>, <code>ONTAP</code>, or <code>OPENZFS</code>.</p>
        pub fn file_system_type(mut self, input: crate::model::FileSystemType) -> Self {
            self.file_system_type = Some(input);
            self
        }
        /// <p>The type of Amazon FSx file system, which can be <code>LUSTRE</code>, <code>WINDOWS</code>, <code>ONTAP</code>, or <code>OPENZFS</code>.</p>
        pub fn set_file_system_type(
            mut self,
            input: std::option::Option<crate::model::FileSystemType>,
        ) -> Self {
            self.file_system_type = input;
            self
        }
        /// <p>The lifecycle status of the file system. The following are the possible values and what they mean:</p>
        /// <ul>
        /// <li> <p> <code>AVAILABLE</code> - The file system is in a healthy state, and is reachable and available for use.</p> </li>
        /// <li> <p> <code>CREATING</code> - Amazon FSx is creating the new file system.</p> </li>
        /// <li> <p> <code>DELETING</code> - Amazon FSx is deleting an existing file system.</p> </li>
        /// <li> <p> <code>FAILED</code> - An existing file system has experienced an unrecoverable failure. When creating a new file system, Amazon FSx was unable to create the file system.</p> </li>
        /// <li> <p> <code>MISCONFIGURED</code> - The file system is in a failed but recoverable state.</p> </li>
        /// <li> <p> <code>MISCONFIGURED_UNAVAILABLE</code> - (Amazon FSx for Windows File Server only) The file system is currently unavailable due to a change in your Active Directory configuration.</p> </li>
        /// <li> <p> <code>UPDATING</code> - The file system is undergoing a customer-initiated update.</p> </li>
        /// </ul>
        pub fn lifecycle(mut self, input: crate::model::FileSystemLifecycle) -> Self {
            self.lifecycle = Some(input);
            self
        }
        /// <p>The lifecycle status of the file system. The following are the possible values and what they mean:</p>
        /// <ul>
        /// <li> <p> <code>AVAILABLE</code> - The file system is in a healthy state, and is reachable and available for use.</p> </li>
        /// <li> <p> <code>CREATING</code> - Amazon FSx is creating the new file system.</p> </li>
        /// <li> <p> <code>DELETING</code> - Amazon FSx is deleting an existing file system.</p> </li>
        /// <li> <p> <code>FAILED</code> - An existing file system has experienced an unrecoverable failure. When creating a new file system, Amazon FSx was unable to create the file system.</p> </li>
        /// <li> <p> <code>MISCONFIGURED</code> - The file system is in a failed but recoverable state.</p> </li>
        /// <li> <p> <code>MISCONFIGURED_UNAVAILABLE</code> - (Amazon FSx for Windows File Server only) The file system is currently unavailable due to a change in your Active Directory configuration.</p> </li>
        /// <li> <p> <code>UPDATING</code> - The file system is undergoing a customer-initiated update.</p> </li>
        /// </ul>
        pub fn set_lifecycle(
            mut self,
            input: std::option::Option<crate::model::FileSystemLifecycle>,
        ) -> Self {
            self.lifecycle = input;
            self
        }
        /// <p>A structure providing details of any failures that occurred.</p>
        pub fn failure_details(mut self, input: crate::model::FileSystemFailureDetails) -> Self {
            self.failure_details = Some(input);
            self
        }
        /// <p>A structure providing details of any failures that occurred.</p>
        pub fn set_failure_details(
            mut self,
            input: std::option::Option<crate::model::FileSystemFailureDetails>,
        ) -> Self {
            self.failure_details = input;
            self
        }
        /// <p>The storage capacity of the file system in gibibytes (GiB).</p>
        pub fn storage_capacity(mut self, input: i32) -> Self {
            self.storage_capacity = Some(input);
            self
        }
        /// <p>The storage capacity of the file system in gibibytes (GiB).</p>
        pub fn set_storage_capacity(mut self, input: std::option::Option<i32>) -> Self {
            self.storage_capacity = input;
            self
        }
        /// <p>The type of storage the file system is using. If set to <code>SSD</code>, the file system uses solid state drive storage. If set to <code>HDD</code>, the file system uses hard disk drive storage. </p>
        pub fn storage_type(mut self, input: crate::model::StorageType) -> Self {
            self.storage_type = Some(input);
            self
        }
        /// <p>The type of storage the file system is using. If set to <code>SSD</code>, the file system uses solid state drive storage. If set to <code>HDD</code>, the file system uses hard disk drive storage. </p>
        pub fn set_storage_type(
            mut self,
            input: std::option::Option<crate::model::StorageType>,
        ) -> Self {
            self.storage_type = input;
            self
        }
        /// <p>The ID of the primary virtual private cloud (VPC) for the file system.</p>
        pub fn vpc_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.vpc_id = Some(input.into());
            self
        }
        /// <p>The ID of the primary virtual private cloud (VPC) for the file system.</p>
        pub fn set_vpc_id(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.vpc_id = input;
            self
        }
        /// Appends an item to `subnet_ids`.
        ///
        /// To override the contents of this collection use [`set_subnet_ids`](Self::set_subnet_ids).
        ///
        /// <p>Specifies the IDs of the subnets that the file system is accessible from. For the Amazon FSx Windows and ONTAP <code>MULTI_AZ_1</code> file system deployment type, there are two subnet IDs, one for the preferred file server and one for the standby file server. The preferred file server subnet identified in the <code>PreferredSubnetID</code> property. All other file systems have only one subnet ID.</p>
        /// <p>For FSx for Lustre file systems, and Single-AZ Windows file systems, this is the ID of the subnet that contains the file system's endpoint. For <code>MULTI_AZ_1</code> Windows and ONTAP file systems, the file system endpoint is available in the <code>PreferredSubnetID</code>.</p>
        pub fn subnet_ids(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.subnet_ids.unwrap_or_default();
            v.push(input.into());
            self.subnet_ids = Some(v);
            self
        }
        /// <p>Specifies the IDs of the subnets that the file system is accessible from. For the Amazon FSx Windows and ONTAP <code>MULTI_AZ_1</code> file system deployment type, there are two subnet IDs, one for the preferred file server and one for the standby file server. The preferred file server subnet identified in the <code>PreferredSubnetID</code> property. All other file systems have only one subnet ID.</p>
        /// <p>For FSx for Lustre file systems, and Single-AZ Windows file systems, this is the ID of the subnet that contains the file system's endpoint. For <code>MULTI_AZ_1</code> Windows and ONTAP file systems, the file system endpoint is available in the <code>PreferredSubnetID</code>.</p>
        pub fn set_subnet_ids(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.subnet_ids = input;
            self
        }
        /// Appends an item to `network_interface_ids`.
        ///
        /// To override the contents of this collection use [`set_network_interface_ids`](Self::set_network_interface_ids).
        ///
        /// <p>The IDs of the elastic network interfaces from which a specific file system is accessible. The elastic network interface is automatically created in the same virtual private cloud (VPC) that the Amazon FSx file system was created in. For more information, see <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html">Elastic Network Interfaces</a> in the <i>Amazon EC2 User Guide.</i> </p>
        /// <p>For an Amazon FSx for Windows File Server file system, you can have one network interface ID. For an Amazon FSx for Lustre file system, you can have more than one.</p>
        pub fn network_interface_ids(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.network_interface_ids.unwrap_or_default();
            v.push(input.into());
            self.network_interface_ids = Some(v);
            self
        }
        /// <p>The IDs of the elastic network interfaces from which a specific file system is accessible. The elastic network interface is automatically created in the same virtual private cloud (VPC) that the Amazon FSx file system was created in. For more information, see <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html">Elastic Network Interfaces</a> in the <i>Amazon EC2 User Guide.</i> </p>
        /// <p>For an Amazon FSx for Windows File Server file system, you can have one network interface ID. For an Amazon FSx for Lustre file system, you can have more than one.</p>
        pub fn set_network_interface_ids(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.network_interface_ids = input;
            self
        }
        /// <p>The Domain Name System (DNS) name for the file system.</p>
        pub fn dns_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.dns_name = Some(input.into());
            self
        }
        /// <p>The Domain Name System (DNS) name for the file system.</p>
        pub fn set_dns_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.dns_name = input;
            self
        }
        /// <p>The ID of the Key Management Service (KMS) key used to encrypt Amazon FSx file system data. Used as follows with Amazon FSx file system types:</p>
        /// <ul>
        /// <li> <p>Amazon FSx for Lustre <code>PERSISTENT_1</code> and <code>PERSISTENT_2</code> deployment types only.</p> <p> <code>SCRATCH_1</code> and <code>SCRATCH_2</code> types are encrypted using the Amazon FSx service KMS key for your account.</p> </li>
        /// <li> <p>Amazon FSx for NetApp ONTAP</p> </li>
        /// <li> <p>Amazon FSx for OpenZFS</p> </li>
        /// <li> <p>Amazon FSx for Windows File Server</p> </li>
        /// </ul>
        pub fn kms_key_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.kms_key_id = Some(input.into());
            self
        }
        /// <p>The ID of the Key Management Service (KMS) key used to encrypt Amazon FSx file system data. Used as follows with Amazon FSx file system types:</p>
        /// <ul>
        /// <li> <p>Amazon FSx for Lustre <code>PERSISTENT_1</code> and <code>PERSISTENT_2</code> deployment types only.</p> <p> <code>SCRATCH_1</code> and <code>SCRATCH_2</code> types are encrypted using the Amazon FSx service KMS key for your account.</p> </li>
        /// <li> <p>Amazon FSx for NetApp ONTAP</p> </li>
        /// <li> <p>Amazon FSx for OpenZFS</p> </li>
        /// <li> <p>Amazon FSx for Windows File Server</p> </li>
        /// </ul>
        pub fn set_kms_key_id(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.kms_key_id = input;
            self
        }
        /// <p>The Amazon Resource Name (ARN) of the file system resource.</p>
        pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
            self.resource_arn = Some(input.into());
            self
        }
        /// <p>The Amazon Resource Name (ARN) of the file system resource.</p>
        pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.resource_arn = input;
            self
        }
        /// Appends an item to `tags`.
        ///
        /// To override the contents of this collection use [`set_tags`](Self::set_tags).
        ///
        /// <p>The tags to associate with the file system. For more information, see <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html">Tagging your Amazon EC2 resources</a> in the <i>Amazon EC2 User Guide</i>.</p>
        pub fn tags(mut self, input: crate::model::Tag) -> Self {
            let mut v = self.tags.unwrap_or_default();
            v.push(input);
            self.tags = Some(v);
            self
        }
        /// <p>The tags to associate with the file system. For more information, see <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html">Tagging your Amazon EC2 resources</a> in the <i>Amazon EC2 User Guide</i>.</p>
        pub fn set_tags(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        ) -> Self {
            self.tags = input;
            self
        }
        /// <p>The configuration for this Amazon FSx for Windows File Server file system.</p>
        pub fn windows_configuration(
            mut self,
            input: crate::model::WindowsFileSystemConfiguration,
        ) -> Self {
            self.windows_configuration = Some(input);
            self
        }
        /// <p>The configuration for this Amazon FSx for Windows File Server file system.</p>
        pub fn set_windows_configuration(
            mut self,
            input: std::option::Option<crate::model::WindowsFileSystemConfiguration>,
        ) -> Self {
            self.windows_configuration = input;
            self
        }
        /// <p>The configuration for the Amazon FSx for Lustre file system.</p>
        pub fn lustre_configuration(
            mut self,
            input: crate::model::LustreFileSystemConfiguration,
        ) -> Self {
            self.lustre_configuration = Some(input);
            self
        }
        /// <p>The configuration for the Amazon FSx for Lustre file system.</p>
        pub fn set_lustre_configuration(
            mut self,
            input: std::option::Option<crate::model::LustreFileSystemConfiguration>,
        ) -> Self {
            self.lustre_configuration = input;
            self
        }
        /// Appends an item to `administrative_actions`.
        ///
        /// To override the contents of this collection use [`set_administrative_actions`](Self::set_administrative_actions).
        ///
        /// <p>A list of administrative actions for the file system that are in process or waiting to be processed. Administrative actions describe changes to the Amazon FSx system that you have initiated using the <code>UpdateFileSystem</code> operation.</p>
        pub fn administrative_actions(mut self, input: crate::model::AdministrativeAction) -> Self {
            let mut v = self.administrative_actions.unwrap_or_default();
            v.push(input);
            self.administrative_actions = Some(v);
            self
        }
        /// <p>A list of administrative actions for the file system that are in process or waiting to be processed. Administrative actions describe changes to the Amazon FSx system that you have initiated using the <code>UpdateFileSystem</code> operation.</p>
        pub fn set_administrative_actions(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::AdministrativeAction>>,
        ) -> Self {
            self.administrative_actions = input;
            self
        }
        /// <p>The configuration for this Amazon FSx for NetApp ONTAP file system.</p>
        pub fn ontap_configuration(
            mut self,
            input: crate::model::OntapFileSystemConfiguration,
        ) -> Self {
            self.ontap_configuration = Some(input);
            self
        }
        /// <p>The configuration for this Amazon FSx for NetApp ONTAP file system.</p>
        pub fn set_ontap_configuration(
            mut self,
            input: std::option::Option<crate::model::OntapFileSystemConfiguration>,
        ) -> Self {
            self.ontap_configuration = input;
            self
        }
        /// <p>The Lustre version of the Amazon FSx for Lustre file system, either <code>2.10</code> or <code>2.12</code>.</p>
        pub fn file_system_type_version(mut self, input: impl Into<std::string::String>) -> Self {
            self.file_system_type_version = Some(input.into());
            self
        }
        /// <p>The Lustre version of the Amazon FSx for Lustre file system, either <code>2.10</code> or <code>2.12</code>.</p>
        pub fn set_file_system_type_version(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.file_system_type_version = input;
            self
        }
        /// <p>The configuration for this Amazon FSx for OpenZFS file system.</p>
        pub fn open_zfs_configuration(
            mut self,
            input: crate::model::OpenZfsFileSystemConfiguration,
        ) -> Self {
            self.open_zfs_configuration = Some(input);
            self
        }
        /// <p>The configuration for this Amazon FSx for OpenZFS file system.</p>
        pub fn set_open_zfs_configuration(
            mut self,
            input: std::option::Option<crate::model::OpenZfsFileSystemConfiguration>,
        ) -> Self {
            self.open_zfs_configuration = input;
            self
        }
        /// Consumes the builder and constructs a [`FileSystem`](crate::model::FileSystem).
        pub fn build(self) -> crate::model::FileSystem {
            crate::model::FileSystem {
                owner_id: self.owner_id,
                creation_time: self.creation_time,
                file_system_id: self.file_system_id,
                file_system_type: self.file_system_type,
                lifecycle: self.lifecycle,
                failure_details: self.failure_details,
                storage_capacity: self.storage_capacity,
                storage_type: self.storage_type,
                vpc_id: self.vpc_id,
                subnet_ids: self.subnet_ids,
                network_interface_ids: self.network_interface_ids,
                dns_name: self.dns_name,
                kms_key_id: self.kms_key_id,
                resource_arn: self.resource_arn,
                tags: self.tags,
                windows_configuration: self.windows_configuration,
                lustre_configuration: self.lustre_configuration,
                administrative_actions: self.administrative_actions,
                ontap_configuration: self.ontap_configuration,
                file_system_type_version: self.file_system_type_version,
                open_zfs_configuration: self.open_zfs_configuration,
            }
        }
    }
}
impl FileSystem {
    /// Creates a new builder-style object to manufacture [`FileSystem`](crate::model::FileSystem).
    pub fn builder() -> crate::model::file_system::Builder {
        crate::model::file_system::Builder::default()
    }
}

/// <p>The configuration for the Amazon FSx for OpenZFS file system. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct OpenZfsFileSystemConfiguration {
    /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
    #[doc(hidden)]
    pub automatic_backup_retention_days: std::option::Option<i32>,
    /// <p>A Boolean value indicating whether tags on the file system should be copied to backups. If it's set to <code>true</code>, all tags on the file system are copied to all automatic backups and any user-initiated backups where the user doesn't specify any tags. If this value is <code>true</code> and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value. </p>
    #[doc(hidden)]
    pub copy_tags_to_backups: std::option::Option<bool>,
    /// <p>A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is <code>true</code> and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value. </p>
    #[doc(hidden)]
    pub copy_tags_to_volumes: std::option::Option<bool>,
    /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
    #[doc(hidden)]
    pub daily_automatic_backup_start_time: std::option::Option<std::string::String>,
    /// <p>Specifies the file-system deployment type. Amazon FSx for OpenZFS supports
 <code>SINGLE_AZ_1</code> and <code>SINGLE_AZ_2</code>.</p>
    #[doc(hidden)]
    pub deployment_type: std::option::Option<crate::model::OpenZfsDeploymentType>,
    /// <p>The throughput of an Amazon FSx file system, measured in megabytes per second (MBps).</p>
    #[doc(hidden)]
    pub throughput_capacity: std::option::Option<i32>,
    /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
    /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
    /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
    /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
    #[doc(hidden)]
    pub weekly_maintenance_start_time: std::option::Option<std::string::String>,
    /// <p>The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of the total number of provisioned SSD IOPS and how the amount was provisioned (by the customer or by the system).</p>
    #[doc(hidden)]
    pub disk_iops_configuration: std::option::Option<crate::model::DiskIopsConfiguration>,
    /// <p>The ID of the root volume of the OpenZFS file system. </p>
    #[doc(hidden)]
    pub root_volume_id: std::option::Option<std::string::String>,
}
impl OpenZfsFileSystemConfiguration {
    /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
    pub fn automatic_backup_retention_days(&self) -> std::option::Option<i32> {
        self.automatic_backup_retention_days
    }
    /// <p>A Boolean value indicating whether tags on the file system should be copied to backups. If it's set to <code>true</code>, all tags on the file system are copied to all automatic backups and any user-initiated backups where the user doesn't specify any tags. If this value is <code>true</code> and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value. </p>
    pub fn copy_tags_to_backups(&self) -> std::option::Option<bool> {
        self.copy_tags_to_backups
    }
    /// <p>A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is <code>true</code> and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value. </p>
    pub fn copy_tags_to_volumes(&self) -> std::option::Option<bool> {
        self.copy_tags_to_volumes
    }
    /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
    pub fn daily_automatic_backup_start_time(&self) -> std::option::Option<&str> {
        self.daily_automatic_backup_start_time.as_deref()
    }
    /// <p>Specifies the file-system deployment type. Amazon FSx for OpenZFS supports
 <code>SINGLE_AZ_1</code> and <code>SINGLE_AZ_2</code>.</p>
    pub fn deployment_type(&self) -> std::option::Option<&crate::model::OpenZfsDeploymentType> {
        self.deployment_type.as_ref()
    }
    /// <p>The throughput of an Amazon FSx file system, measured in megabytes per second (MBps).</p>
    pub fn throughput_capacity(&self) -> std::option::Option<i32> {
        self.throughput_capacity
    }
    /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
    /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
    /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
    /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
    pub fn weekly_maintenance_start_time(&self) -> std::option::Option<&str> {
        self.weekly_maintenance_start_time.as_deref()
    }
    /// <p>The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of the total number of provisioned SSD IOPS and how the amount was provisioned (by the customer or by the system).</p>
    pub fn disk_iops_configuration(
        &self,
    ) -> std::option::Option<&crate::model::DiskIopsConfiguration> {
        self.disk_iops_configuration.as_ref()
    }
    /// <p>The ID of the root volume of the OpenZFS file system. </p>
    pub fn root_volume_id(&self) -> std::option::Option<&str> {
        self.root_volume_id.as_deref()
    }
}
/// See [`OpenZfsFileSystemConfiguration`](crate::model::OpenZfsFileSystemConfiguration).
pub mod open_zfs_file_system_configuration {

    /// A builder for [`OpenZfsFileSystemConfiguration`](crate::model::OpenZfsFileSystemConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) automatic_backup_retention_days: std::option::Option<i32>,
        pub(crate) copy_tags_to_backups: std::option::Option<bool>,
        pub(crate) copy_tags_to_volumes: std::option::Option<bool>,
        pub(crate) daily_automatic_backup_start_time: std::option::Option<std::string::String>,
        pub(crate) deployment_type: std::option::Option<crate::model::OpenZfsDeploymentType>,
        pub(crate) throughput_capacity: std::option::Option<i32>,
        pub(crate) weekly_maintenance_start_time: std::option::Option<std::string::String>,
        pub(crate) disk_iops_configuration:
            std::option::Option<crate::model::DiskIopsConfiguration>,
        pub(crate) root_volume_id: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
        pub fn automatic_backup_retention_days(mut self, input: i32) -> Self {
            self.automatic_backup_retention_days = Some(input);
            self
        }
        /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
        pub fn set_automatic_backup_retention_days(
            mut self,
            input: std::option::Option<i32>,
        ) -> Self {
            self.automatic_backup_retention_days = input;
            self
        }
        /// <p>A Boolean value indicating whether tags on the file system should be copied to backups. If it's set to <code>true</code>, all tags on the file system are copied to all automatic backups and any user-initiated backups where the user doesn't specify any tags. If this value is <code>true</code> and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value. </p>
        pub fn copy_tags_to_backups(mut self, input: bool) -> Self {
            self.copy_tags_to_backups = Some(input);
            self
        }
        /// <p>A Boolean value indicating whether tags on the file system should be copied to backups. If it's set to <code>true</code>, all tags on the file system are copied to all automatic backups and any user-initiated backups where the user doesn't specify any tags. If this value is <code>true</code> and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value. </p>
        pub fn set_copy_tags_to_backups(mut self, input: std::option::Option<bool>) -> Self {
            self.copy_tags_to_backups = input;
            self
        }
        /// <p>A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is <code>true</code> and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value. </p>
        pub fn copy_tags_to_volumes(mut self, input: bool) -> Self {
            self.copy_tags_to_volumes = Some(input);
            self
        }
        /// <p>A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is <code>true</code> and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value. </p>
        pub fn set_copy_tags_to_volumes(mut self, input: std::option::Option<bool>) -> Self {
            self.copy_tags_to_volumes = input;
            self
        }
        /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
        pub fn daily_automatic_backup_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.daily_automatic_backup_start_time = Some(input.into());
            self
        }
        /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
        pub fn set_daily_automatic_backup_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.daily_automatic_backup_start_time = input;
            self
        }
        /// <p>Specifies the file-system deployment type. Amazon FSx for OpenZFS supports
 <code>SINGLE_AZ_1</code> and <code>SINGLE_AZ_2</code>.</p>
        pub fn deployment_type(mut self, input: crate::model::OpenZfsDeploymentType) -> Self {
            self.deployment_type = Some(input);
            self
        }
        /// <p>Specifies the file-system deployment type. Amazon FSx for OpenZFS supports
 <code>SINGLE_AZ_1</code> and <code>SINGLE_AZ_2</code>.</p>
        pub fn set_deployment_type(
            mut self,
            input: std::option::Option<crate::model::OpenZfsDeploymentType>,
        ) -> Self {
            self.deployment_type = input;
            self
        }
        /// <p>The throughput of an Amazon FSx file system, measured in megabytes per second (MBps).</p>
        pub fn throughput_capacity(mut self, input: i32) -> Self {
            self.throughput_capacity = Some(input);
            self
        }
        /// <p>The throughput of an Amazon FSx file system, measured in megabytes per second (MBps).</p>
        pub fn set_throughput_capacity(mut self, input: std::option::Option<i32>) -> Self {
            self.throughput_capacity = input;
            self
        }
        /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
        /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
        /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
        /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
        pub fn weekly_maintenance_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = Some(input.into());
            self
        }
        /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
        /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
        /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
        /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
        pub fn set_weekly_maintenance_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = input;
            self
        }
        /// <p>The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of the total number of provisioned SSD IOPS and how the amount was provisioned (by the customer or by the system).</p>
        pub fn disk_iops_configuration(
            mut self,
            input: crate::model::DiskIopsConfiguration,
        ) -> Self {
            self.disk_iops_configuration = Some(input);
            self
        }
        /// <p>The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of the total number of provisioned SSD IOPS and how the amount was provisioned (by the customer or by the system).</p>
        pub fn set_disk_iops_configuration(
            mut self,
            input: std::option::Option<crate::model::DiskIopsConfiguration>,
        ) -> Self {
            self.disk_iops_configuration = input;
            self
        }
        /// <p>The ID of the root volume of the OpenZFS file system. </p>
        pub fn root_volume_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.root_volume_id = Some(input.into());
            self
        }
        /// <p>The ID of the root volume of the OpenZFS file system. </p>
        pub fn set_root_volume_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.root_volume_id = input;
            self
        }
        /// Consumes the builder and constructs a [`OpenZfsFileSystemConfiguration`](crate::model::OpenZfsFileSystemConfiguration).
        pub fn build(self) -> crate::model::OpenZfsFileSystemConfiguration {
            crate::model::OpenZfsFileSystemConfiguration {
                automatic_backup_retention_days: self.automatic_backup_retention_days,
                copy_tags_to_backups: self.copy_tags_to_backups,
                copy_tags_to_volumes: self.copy_tags_to_volumes,
                daily_automatic_backup_start_time: self.daily_automatic_backup_start_time,
                deployment_type: self.deployment_type,
                throughput_capacity: self.throughput_capacity,
                weekly_maintenance_start_time: self.weekly_maintenance_start_time,
                disk_iops_configuration: self.disk_iops_configuration,
                root_volume_id: self.root_volume_id,
            }
        }
    }
}
impl OpenZfsFileSystemConfiguration {
    /// Creates a new builder-style object to manufacture [`OpenZfsFileSystemConfiguration`](crate::model::OpenZfsFileSystemConfiguration).
    pub fn builder() -> crate::model::open_zfs_file_system_configuration::Builder {
        crate::model::open_zfs_file_system_configuration::Builder::default()
    }
}

/// <p>The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of the total number of provisioned SSD IOPS and how the amount was provisioned (by the customer or by the system).</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct DiskIopsConfiguration {
    /// <p>Specifies whether the number of IOPS for the file system is using the system default (<code>AUTOMATIC</code>) or was provisioned by the customer (<code>USER_PROVISIONED</code>).</p>
    #[doc(hidden)]
    pub mode: std::option::Option<crate::model::DiskIopsConfigurationMode>,
    /// <p>The total number of SSD IOPS provisioned for the file system.</p>
    #[doc(hidden)]
    pub iops: std::option::Option<i64>,
}
impl DiskIopsConfiguration {
    /// <p>Specifies whether the number of IOPS for the file system is using the system default (<code>AUTOMATIC</code>) or was provisioned by the customer (<code>USER_PROVISIONED</code>).</p>
    pub fn mode(&self) -> std::option::Option<&crate::model::DiskIopsConfigurationMode> {
        self.mode.as_ref()
    }
    /// <p>The total number of SSD IOPS provisioned for the file system.</p>
    pub fn iops(&self) -> std::option::Option<i64> {
        self.iops
    }
}
/// See [`DiskIopsConfiguration`](crate::model::DiskIopsConfiguration).
pub mod disk_iops_configuration {

    /// A builder for [`DiskIopsConfiguration`](crate::model::DiskIopsConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) mode: std::option::Option<crate::model::DiskIopsConfigurationMode>,
        pub(crate) iops: std::option::Option<i64>,
    }
    impl Builder {
        /// <p>Specifies whether the number of IOPS for the file system is using the system default (<code>AUTOMATIC</code>) or was provisioned by the customer (<code>USER_PROVISIONED</code>).</p>
        pub fn mode(mut self, input: crate::model::DiskIopsConfigurationMode) -> Self {
            self.mode = Some(input);
            self
        }
        /// <p>Specifies whether the number of IOPS for the file system is using the system default (<code>AUTOMATIC</code>) or was provisioned by the customer (<code>USER_PROVISIONED</code>).</p>
        pub fn set_mode(
            mut self,
            input: std::option::Option<crate::model::DiskIopsConfigurationMode>,
        ) -> Self {
            self.mode = input;
            self
        }
        /// <p>The total number of SSD IOPS provisioned for the file system.</p>
        pub fn iops(mut self, input: i64) -> Self {
            self.iops = Some(input);
            self
        }
        /// <p>The total number of SSD IOPS provisioned for the file system.</p>
        pub fn set_iops(mut self, input: std::option::Option<i64>) -> Self {
            self.iops = input;
            self
        }
        /// Consumes the builder and constructs a [`DiskIopsConfiguration`](crate::model::DiskIopsConfiguration).
        pub fn build(self) -> crate::model::DiskIopsConfiguration {
            crate::model::DiskIopsConfiguration {
                mode: self.mode,
                iops: self.iops,
            }
        }
    }
}
impl DiskIopsConfiguration {
    /// Creates a new builder-style object to manufacture [`DiskIopsConfiguration`](crate::model::DiskIopsConfiguration).
    pub fn builder() -> crate::model::disk_iops_configuration::Builder {
        crate::model::disk_iops_configuration::Builder::default()
    }
}

/// When writing a match expression against `DiskIopsConfigurationMode`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let diskiopsconfigurationmode = unimplemented!();
/// match diskiopsconfigurationmode {
///     DiskIopsConfigurationMode::Automatic => { /* ... */ },
///     DiskIopsConfigurationMode::UserProvisioned => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `diskiopsconfigurationmode` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `DiskIopsConfigurationMode::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `DiskIopsConfigurationMode::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `DiskIopsConfigurationMode::NewFeature` is defined.
/// Specifically, when `diskiopsconfigurationmode` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `DiskIopsConfigurationMode::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum DiskIopsConfigurationMode {
    #[allow(missing_docs)] // documentation missing in model
    Automatic,
    #[allow(missing_docs)] // documentation missing in model
    UserProvisioned,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for DiskIopsConfigurationMode {
    fn from(s: &str) -> Self {
        match s {
            "AUTOMATIC" => DiskIopsConfigurationMode::Automatic,
            "USER_PROVISIONED" => DiskIopsConfigurationMode::UserProvisioned,
            other => DiskIopsConfigurationMode::Unknown(crate::types::UnknownVariantValue(
                other.to_owned(),
            )),
        }
    }
}
impl std::str::FromStr for DiskIopsConfigurationMode {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(DiskIopsConfigurationMode::from(s))
    }
}
impl DiskIopsConfigurationMode {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            DiskIopsConfigurationMode::Automatic => "AUTOMATIC",
            DiskIopsConfigurationMode::UserProvisioned => "USER_PROVISIONED",
            DiskIopsConfigurationMode::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["AUTOMATIC", "USER_PROVISIONED"]
    }
}
impl AsRef<str> for DiskIopsConfigurationMode {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// When writing a match expression against `OpenZfsDeploymentType`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let openzfsdeploymenttype = unimplemented!();
/// match openzfsdeploymenttype {
///     OpenZfsDeploymentType::SingleAz1 => { /* ... */ },
///     OpenZfsDeploymentType::SingleAz2 => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `openzfsdeploymenttype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `OpenZfsDeploymentType::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `OpenZfsDeploymentType::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `OpenZfsDeploymentType::NewFeature` is defined.
/// Specifically, when `openzfsdeploymenttype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `OpenZfsDeploymentType::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum OpenZfsDeploymentType {
    #[allow(missing_docs)] // documentation missing in model
    SingleAz1,
    #[allow(missing_docs)] // documentation missing in model
    SingleAz2,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for OpenZfsDeploymentType {
    fn from(s: &str) -> Self {
        match s {
            "SINGLE_AZ_1" => OpenZfsDeploymentType::SingleAz1,
            "SINGLE_AZ_2" => OpenZfsDeploymentType::SingleAz2,
            other => {
                OpenZfsDeploymentType::Unknown(crate::types::UnknownVariantValue(other.to_owned()))
            }
        }
    }
}
impl std::str::FromStr for OpenZfsDeploymentType {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(OpenZfsDeploymentType::from(s))
    }
}
impl OpenZfsDeploymentType {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            OpenZfsDeploymentType::SingleAz1 => "SINGLE_AZ_1",
            OpenZfsDeploymentType::SingleAz2 => "SINGLE_AZ_2",
            OpenZfsDeploymentType::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["SINGLE_AZ_1", "SINGLE_AZ_2"]
    }
}
impl AsRef<str> for OpenZfsDeploymentType {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>Configuration for the FSx for NetApp ONTAP file system.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct OntapFileSystemConfiguration {
    /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
    #[doc(hidden)]
    pub automatic_backup_retention_days: std::option::Option<i32>,
    /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
    #[doc(hidden)]
    pub daily_automatic_backup_start_time: std::option::Option<std::string::String>,
    /// <p>Specifies the FSx for ONTAP file system deployment type in use in the file system. </p>
    /// <ul>
    /// <li> <p> <code>MULTI_AZ_1</code> - (Default) A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. </p> </li>
    /// <li> <p> <code>SINGLE_AZ_1</code> - A file system configured for Single-AZ redundancy.</p> </li>
    /// </ul>
    /// <p>For information about the use cases for Multi-AZ and Single-AZ deployments, refer to <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/high-availability-multiAZ.html">Choosing Multi-AZ or Single-AZ file system deployment</a>. </p>
    #[doc(hidden)]
    pub deployment_type: std::option::Option<crate::model::OntapDeploymentType>,
    /// <p>(Multi-AZ only) The IP address range in which the endpoints to access your file system are created.</p> <important>
    /// <p>The Endpoint IP address range you select for your file system must exist outside the VPC's CIDR range and must be at least /30 or larger. If you do not specify this optional parameter, Amazon FSx will automatically select a CIDR block for you.</p>
    /// </important>
    #[doc(hidden)]
    pub endpoint_ip_address_range: std::option::Option<std::string::String>,
    /// <p>The <code>Management</code> and <code>Intercluster</code> endpoints that are used to access data or to manage the file system using the NetApp ONTAP CLI, REST API, or NetApp SnapMirror.</p>
    #[doc(hidden)]
    pub endpoints: std::option::Option<crate::model::FileSystemEndpoints>,
    /// <p>The SSD IOPS configuration for the ONTAP file system, specifying the number of provisioned IOPS and the provision mode.</p>
    #[doc(hidden)]
    pub disk_iops_configuration: std::option::Option<crate::model::DiskIopsConfiguration>,
    /// <p>The ID for a subnet. A <i>subnet</i> is a range of IP addresses in your virtual private cloud (VPC). For more information, see <a href="https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html">VPC and subnets</a> in the <i>Amazon VPC User Guide.</i> </p>
    #[doc(hidden)]
    pub preferred_subnet_id: std::option::Option<std::string::String>,
    /// <p>(Multi-AZ only) The VPC route tables in which your file system's endpoints are created.</p>
    #[doc(hidden)]
    pub route_table_ids: std::option::Option<std::vec::Vec<std::string::String>>,
    /// <p>The sustained throughput of an Amazon FSx file system in Megabytes per second (MBps).</p>
    #[doc(hidden)]
    pub throughput_capacity: std::option::Option<i32>,
    /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
    /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
    /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
    /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
    #[doc(hidden)]
    pub weekly_maintenance_start_time: std::option::Option<std::string::String>,
}
impl OntapFileSystemConfiguration {
    /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
    pub fn automatic_backup_retention_days(&self) -> std::option::Option<i32> {
        self.automatic_backup_retention_days
    }
    /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
    pub fn daily_automatic_backup_start_time(&self) -> std::option::Option<&str> {
        self.daily_automatic_backup_start_time.as_deref()
    }
    /// <p>Specifies the FSx for ONTAP file system deployment type in use in the file system. </p>
    /// <ul>
    /// <li> <p> <code>MULTI_AZ_1</code> - (Default) A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. </p> </li>
    /// <li> <p> <code>SINGLE_AZ_1</code> - A file system configured for Single-AZ redundancy.</p> </li>
    /// </ul>
    /// <p>For information about the use cases for Multi-AZ and Single-AZ deployments, refer to <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/high-availability-multiAZ.html">Choosing Multi-AZ or Single-AZ file system deployment</a>. </p>
    pub fn deployment_type(&self) -> std::option::Option<&crate::model::OntapDeploymentType> {
        self.deployment_type.as_ref()
    }
    /// <p>(Multi-AZ only) The IP address range in which the endpoints to access your file system are created.</p> <important>
    /// <p>The Endpoint IP address range you select for your file system must exist outside the VPC's CIDR range and must be at least /30 or larger. If you do not specify this optional parameter, Amazon FSx will automatically select a CIDR block for you.</p>
    /// </important>
    pub fn endpoint_ip_address_range(&self) -> std::option::Option<&str> {
        self.endpoint_ip_address_range.as_deref()
    }
    /// <p>The <code>Management</code> and <code>Intercluster</code> endpoints that are used to access data or to manage the file system using the NetApp ONTAP CLI, REST API, or NetApp SnapMirror.</p>
    pub fn endpoints(&self) -> std::option::Option<&crate::model::FileSystemEndpoints> {
        self.endpoints.as_ref()
    }
    /// <p>The SSD IOPS configuration for the ONTAP file system, specifying the number of provisioned IOPS and the provision mode.</p>
    pub fn disk_iops_configuration(
        &self,
    ) -> std::option::Option<&crate::model::DiskIopsConfiguration> {
        self.disk_iops_configuration.as_ref()
    }
    /// <p>The ID for a subnet. A <i>subnet</i> is a range of IP addresses in your virtual private cloud (VPC). For more information, see <a href="https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html">VPC and subnets</a> in the <i>Amazon VPC User Guide.</i> </p>
    pub fn preferred_subnet_id(&self) -> std::option::Option<&str> {
        self.preferred_subnet_id.as_deref()
    }
    /// <p>(Multi-AZ only) The VPC route tables in which your file system's endpoints are created.</p>
    pub fn route_table_ids(&self) -> std::option::Option<&[std::string::String]> {
        self.route_table_ids.as_deref()
    }
    /// <p>The sustained throughput of an Amazon FSx file system in Megabytes per second (MBps).</p>
    pub fn throughput_capacity(&self) -> std::option::Option<i32> {
        self.throughput_capacity
    }
    /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
    /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
    /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
    /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
    pub fn weekly_maintenance_start_time(&self) -> std::option::Option<&str> {
        self.weekly_maintenance_start_time.as_deref()
    }
}
/// See [`OntapFileSystemConfiguration`](crate::model::OntapFileSystemConfiguration).
pub mod ontap_file_system_configuration {

    /// A builder for [`OntapFileSystemConfiguration`](crate::model::OntapFileSystemConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) automatic_backup_retention_days: std::option::Option<i32>,
        pub(crate) daily_automatic_backup_start_time: std::option::Option<std::string::String>,
        pub(crate) deployment_type: std::option::Option<crate::model::OntapDeploymentType>,
        pub(crate) endpoint_ip_address_range: std::option::Option<std::string::String>,
        pub(crate) endpoints: std::option::Option<crate::model::FileSystemEndpoints>,
        pub(crate) disk_iops_configuration:
            std::option::Option<crate::model::DiskIopsConfiguration>,
        pub(crate) preferred_subnet_id: std::option::Option<std::string::String>,
        pub(crate) route_table_ids: std::option::Option<std::vec::Vec<std::string::String>>,
        pub(crate) throughput_capacity: std::option::Option<i32>,
        pub(crate) weekly_maintenance_start_time: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
        pub fn automatic_backup_retention_days(mut self, input: i32) -> Self {
            self.automatic_backup_retention_days = Some(input);
            self
        }
        /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
        pub fn set_automatic_backup_retention_days(
            mut self,
            input: std::option::Option<i32>,
        ) -> Self {
            self.automatic_backup_retention_days = input;
            self
        }
        /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
        pub fn daily_automatic_backup_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.daily_automatic_backup_start_time = Some(input.into());
            self
        }
        /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
        pub fn set_daily_automatic_backup_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.daily_automatic_backup_start_time = input;
            self
        }
        /// <p>Specifies the FSx for ONTAP file system deployment type in use in the file system. </p>
        /// <ul>
        /// <li> <p> <code>MULTI_AZ_1</code> - (Default) A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. </p> </li>
        /// <li> <p> <code>SINGLE_AZ_1</code> - A file system configured for Single-AZ redundancy.</p> </li>
        /// </ul>
        /// <p>For information about the use cases for Multi-AZ and Single-AZ deployments, refer to <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/high-availability-multiAZ.html">Choosing Multi-AZ or Single-AZ file system deployment</a>. </p>
        pub fn deployment_type(mut self, input: crate::model::OntapDeploymentType) -> Self {
            self.deployment_type = Some(input);
            self
        }
        /// <p>Specifies the FSx for ONTAP file system deployment type in use in the file system. </p>
        /// <ul>
        /// <li> <p> <code>MULTI_AZ_1</code> - (Default) A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. </p> </li>
        /// <li> <p> <code>SINGLE_AZ_1</code> - A file system configured for Single-AZ redundancy.</p> </li>
        /// </ul>
        /// <p>For information about the use cases for Multi-AZ and Single-AZ deployments, refer to <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/high-availability-multiAZ.html">Choosing Multi-AZ or Single-AZ file system deployment</a>. </p>
        pub fn set_deployment_type(
            mut self,
            input: std::option::Option<crate::model::OntapDeploymentType>,
        ) -> Self {
            self.deployment_type = input;
            self
        }
        /// <p>(Multi-AZ only) The IP address range in which the endpoints to access your file system are created.</p> <important>
        /// <p>The Endpoint IP address range you select for your file system must exist outside the VPC's CIDR range and must be at least /30 or larger. If you do not specify this optional parameter, Amazon FSx will automatically select a CIDR block for you.</p>
        /// </important>
        pub fn endpoint_ip_address_range(mut self, input: impl Into<std::string::String>) -> Self {
            self.endpoint_ip_address_range = Some(input.into());
            self
        }
        /// <p>(Multi-AZ only) The IP address range in which the endpoints to access your file system are created.</p> <important>
        /// <p>The Endpoint IP address range you select for your file system must exist outside the VPC's CIDR range and must be at least /30 or larger. If you do not specify this optional parameter, Amazon FSx will automatically select a CIDR block for you.</p>
        /// </important>
        pub fn set_endpoint_ip_address_range(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.endpoint_ip_address_range = input;
            self
        }
        /// <p>The <code>Management</code> and <code>Intercluster</code> endpoints that are used to access data or to manage the file system using the NetApp ONTAP CLI, REST API, or NetApp SnapMirror.</p>
        pub fn endpoints(mut self, input: crate::model::FileSystemEndpoints) -> Self {
            self.endpoints = Some(input);
            self
        }
        /// <p>The <code>Management</code> and <code>Intercluster</code> endpoints that are used to access data or to manage the file system using the NetApp ONTAP CLI, REST API, or NetApp SnapMirror.</p>
        pub fn set_endpoints(
            mut self,
            input: std::option::Option<crate::model::FileSystemEndpoints>,
        ) -> Self {
            self.endpoints = input;
            self
        }
        /// <p>The SSD IOPS configuration for the ONTAP file system, specifying the number of provisioned IOPS and the provision mode.</p>
        pub fn disk_iops_configuration(
            mut self,
            input: crate::model::DiskIopsConfiguration,
        ) -> Self {
            self.disk_iops_configuration = Some(input);
            self
        }
        /// <p>The SSD IOPS configuration for the ONTAP file system, specifying the number of provisioned IOPS and the provision mode.</p>
        pub fn set_disk_iops_configuration(
            mut self,
            input: std::option::Option<crate::model::DiskIopsConfiguration>,
        ) -> Self {
            self.disk_iops_configuration = input;
            self
        }
        /// <p>The ID for a subnet. A <i>subnet</i> is a range of IP addresses in your virtual private cloud (VPC). For more information, see <a href="https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html">VPC and subnets</a> in the <i>Amazon VPC User Guide.</i> </p>
        pub fn preferred_subnet_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.preferred_subnet_id = Some(input.into());
            self
        }
        /// <p>The ID for a subnet. A <i>subnet</i> is a range of IP addresses in your virtual private cloud (VPC). For more information, see <a href="https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html">VPC and subnets</a> in the <i>Amazon VPC User Guide.</i> </p>
        pub fn set_preferred_subnet_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.preferred_subnet_id = input;
            self
        }
        /// Appends an item to `route_table_ids`.
        ///
        /// To override the contents of this collection use [`set_route_table_ids`](Self::set_route_table_ids).
        ///
        /// <p>(Multi-AZ only) The VPC route tables in which your file system's endpoints are created.</p>
        pub fn route_table_ids(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.route_table_ids.unwrap_or_default();
            v.push(input.into());
            self.route_table_ids = Some(v);
            self
        }
        /// <p>(Multi-AZ only) The VPC route tables in which your file system's endpoints are created.</p>
        pub fn set_route_table_ids(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.route_table_ids = input;
            self
        }
        /// <p>The sustained throughput of an Amazon FSx file system in Megabytes per second (MBps).</p>
        pub fn throughput_capacity(mut self, input: i32) -> Self {
            self.throughput_capacity = Some(input);
            self
        }
        /// <p>The sustained throughput of an Amazon FSx file system in Megabytes per second (MBps).</p>
        pub fn set_throughput_capacity(mut self, input: std::option::Option<i32>) -> Self {
            self.throughput_capacity = input;
            self
        }
        /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
        /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
        /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
        /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
        pub fn weekly_maintenance_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = Some(input.into());
            self
        }
        /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
        /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
        /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
        /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
        pub fn set_weekly_maintenance_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = input;
            self
        }
        /// Consumes the builder and constructs a [`OntapFileSystemConfiguration`](crate::model::OntapFileSystemConfiguration).
        pub fn build(self) -> crate::model::OntapFileSystemConfiguration {
            crate::model::OntapFileSystemConfiguration {
                automatic_backup_retention_days: self.automatic_backup_retention_days,
                daily_automatic_backup_start_time: self.daily_automatic_backup_start_time,
                deployment_type: self.deployment_type,
                endpoint_ip_address_range: self.endpoint_ip_address_range,
                endpoints: self.endpoints,
                disk_iops_configuration: self.disk_iops_configuration,
                preferred_subnet_id: self.preferred_subnet_id,
                route_table_ids: self.route_table_ids,
                throughput_capacity: self.throughput_capacity,
                weekly_maintenance_start_time: self.weekly_maintenance_start_time,
            }
        }
    }
}
impl OntapFileSystemConfiguration {
    /// Creates a new builder-style object to manufacture [`OntapFileSystemConfiguration`](crate::model::OntapFileSystemConfiguration).
    pub fn builder() -> crate::model::ontap_file_system_configuration::Builder {
        crate::model::ontap_file_system_configuration::Builder::default()
    }
}

/// <p>An Amazon FSx for NetApp ONTAP file system has the following endpoints that are used to access data or to manage the file system using the NetApp ONTAP CLI, REST API, or NetApp SnapMirror.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct FileSystemEndpoints {
    /// <p>An endpoint for managing your file system by setting up NetApp SnapMirror with other ONTAP systems.</p>
    #[doc(hidden)]
    pub intercluster: std::option::Option<crate::model::FileSystemEndpoint>,
    /// <p>An endpoint for managing your file system using the NetApp ONTAP CLI and NetApp ONTAP API.</p>
    #[doc(hidden)]
    pub management: std::option::Option<crate::model::FileSystemEndpoint>,
}
impl FileSystemEndpoints {
    /// <p>An endpoint for managing your file system by setting up NetApp SnapMirror with other ONTAP systems.</p>
    pub fn intercluster(&self) -> std::option::Option<&crate::model::FileSystemEndpoint> {
        self.intercluster.as_ref()
    }
    /// <p>An endpoint for managing your file system using the NetApp ONTAP CLI and NetApp ONTAP API.</p>
    pub fn management(&self) -> std::option::Option<&crate::model::FileSystemEndpoint> {
        self.management.as_ref()
    }
}
/// See [`FileSystemEndpoints`](crate::model::FileSystemEndpoints).
pub mod file_system_endpoints {

    /// A builder for [`FileSystemEndpoints`](crate::model::FileSystemEndpoints).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) intercluster: std::option::Option<crate::model::FileSystemEndpoint>,
        pub(crate) management: std::option::Option<crate::model::FileSystemEndpoint>,
    }
    impl Builder {
        /// <p>An endpoint for managing your file system by setting up NetApp SnapMirror with other ONTAP systems.</p>
        pub fn intercluster(mut self, input: crate::model::FileSystemEndpoint) -> Self {
            self.intercluster = Some(input);
            self
        }
        /// <p>An endpoint for managing your file system by setting up NetApp SnapMirror with other ONTAP systems.</p>
        pub fn set_intercluster(
            mut self,
            input: std::option::Option<crate::model::FileSystemEndpoint>,
        ) -> Self {
            self.intercluster = input;
            self
        }
        /// <p>An endpoint for managing your file system using the NetApp ONTAP CLI and NetApp ONTAP API.</p>
        pub fn management(mut self, input: crate::model::FileSystemEndpoint) -> Self {
            self.management = Some(input);
            self
        }
        /// <p>An endpoint for managing your file system using the NetApp ONTAP CLI and NetApp ONTAP API.</p>
        pub fn set_management(
            mut self,
            input: std::option::Option<crate::model::FileSystemEndpoint>,
        ) -> Self {
            self.management = input;
            self
        }
        /// Consumes the builder and constructs a [`FileSystemEndpoints`](crate::model::FileSystemEndpoints).
        pub fn build(self) -> crate::model::FileSystemEndpoints {
            crate::model::FileSystemEndpoints {
                intercluster: self.intercluster,
                management: self.management,
            }
        }
    }
}
impl FileSystemEndpoints {
    /// Creates a new builder-style object to manufacture [`FileSystemEndpoints`](crate::model::FileSystemEndpoints).
    pub fn builder() -> crate::model::file_system_endpoints::Builder {
        crate::model::file_system_endpoints::Builder::default()
    }
}

/// <p>An Amazon FSx for NetApp ONTAP file system has two endpoints that are used to access data or to manage the file system using the NetApp ONTAP CLI, REST API, or NetApp SnapMirror. They are the <code>Management</code> and <code>Intercluster</code> endpoints.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct FileSystemEndpoint {
    /// <p>The Domain Name Service (DNS) name for the file system. You can mount your file system using its DNS name.</p>
    #[doc(hidden)]
    pub dns_name: std::option::Option<std::string::String>,
    /// <p>IP addresses of the file system endpoint.</p>
    #[doc(hidden)]
    pub ip_addresses: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl FileSystemEndpoint {
    /// <p>The Domain Name Service (DNS) name for the file system. You can mount your file system using its DNS name.</p>
    pub fn dns_name(&self) -> std::option::Option<&str> {
        self.dns_name.as_deref()
    }
    /// <p>IP addresses of the file system endpoint.</p>
    pub fn ip_addresses(&self) -> std::option::Option<&[std::string::String]> {
        self.ip_addresses.as_deref()
    }
}
/// See [`FileSystemEndpoint`](crate::model::FileSystemEndpoint).
pub mod file_system_endpoint {

    /// A builder for [`FileSystemEndpoint`](crate::model::FileSystemEndpoint).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) dns_name: std::option::Option<std::string::String>,
        pub(crate) ip_addresses: std::option::Option<std::vec::Vec<std::string::String>>,
    }
    impl Builder {
        /// <p>The Domain Name Service (DNS) name for the file system. You can mount your file system using its DNS name.</p>
        pub fn dns_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.dns_name = Some(input.into());
            self
        }
        /// <p>The Domain Name Service (DNS) name for the file system. You can mount your file system using its DNS name.</p>
        pub fn set_dns_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.dns_name = input;
            self
        }
        /// Appends an item to `ip_addresses`.
        ///
        /// To override the contents of this collection use [`set_ip_addresses`](Self::set_ip_addresses).
        ///
        /// <p>IP addresses of the file system endpoint.</p>
        pub fn ip_addresses(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.ip_addresses.unwrap_or_default();
            v.push(input.into());
            self.ip_addresses = Some(v);
            self
        }
        /// <p>IP addresses of the file system endpoint.</p>
        pub fn set_ip_addresses(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.ip_addresses = input;
            self
        }
        /// Consumes the builder and constructs a [`FileSystemEndpoint`](crate::model::FileSystemEndpoint).
        pub fn build(self) -> crate::model::FileSystemEndpoint {
            crate::model::FileSystemEndpoint {
                dns_name: self.dns_name,
                ip_addresses: self.ip_addresses,
            }
        }
    }
}
impl FileSystemEndpoint {
    /// Creates a new builder-style object to manufacture [`FileSystemEndpoint`](crate::model::FileSystemEndpoint).
    pub fn builder() -> crate::model::file_system_endpoint::Builder {
        crate::model::file_system_endpoint::Builder::default()
    }
}

/// When writing a match expression against `OntapDeploymentType`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let ontapdeploymenttype = unimplemented!();
/// match ontapdeploymenttype {
///     OntapDeploymentType::MultiAz1 => { /* ... */ },
///     OntapDeploymentType::SingleAz1 => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `ontapdeploymenttype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `OntapDeploymentType::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `OntapDeploymentType::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `OntapDeploymentType::NewFeature` is defined.
/// Specifically, when `ontapdeploymenttype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `OntapDeploymentType::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum OntapDeploymentType {
    #[allow(missing_docs)] // documentation missing in model
    MultiAz1,
    #[allow(missing_docs)] // documentation missing in model
    SingleAz1,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for OntapDeploymentType {
    fn from(s: &str) -> Self {
        match s {
            "MULTI_AZ_1" => OntapDeploymentType::MultiAz1,
            "SINGLE_AZ_1" => OntapDeploymentType::SingleAz1,
            other => {
                OntapDeploymentType::Unknown(crate::types::UnknownVariantValue(other.to_owned()))
            }
        }
    }
}
impl std::str::FromStr for OntapDeploymentType {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(OntapDeploymentType::from(s))
    }
}
impl OntapDeploymentType {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            OntapDeploymentType::MultiAz1 => "MULTI_AZ_1",
            OntapDeploymentType::SingleAz1 => "SINGLE_AZ_1",
            OntapDeploymentType::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["MULTI_AZ_1", "SINGLE_AZ_1"]
    }
}
impl AsRef<str> for OntapDeploymentType {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>The configuration for the Amazon FSx for Lustre file system.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct LustreFileSystemConfiguration {
    /// <p>The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. Here, <code>d</code> is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.</p>
    #[doc(hidden)]
    pub weekly_maintenance_start_time: std::option::Option<std::string::String>,
    /// <p>The data repository configuration object for Lustre file systems returned in the response of the <code>CreateFileSystem</code> operation.</p>
    /// <p>This data type is not supported for file systems with the <code>Persistent_2</code> deployment type. Instead, use .</p>
    #[doc(hidden)]
    pub data_repository_configuration:
        std::option::Option<crate::model::DataRepositoryConfiguration>,
    /// <p>The deployment type of the FSx for Lustre file system. <i>Scratch deployment type</i> is designed for temporary storage and shorter-term processing of data.</p>
    /// <p> <code>SCRATCH_1</code> and <code>SCRATCH_2</code> deployment types are best suited for when you need temporary storage and shorter-term processing of data. The <code>SCRATCH_2</code> deployment type provides in-transit encryption of data and higher burst throughput capacity than <code>SCRATCH_1</code>.</p>
    /// <p>The <code>PERSISTENT_1</code> and <code>PERSISTENT_2</code> deployment type is used for longer-term storage and workloads and encryption of data in transit. <code>PERSISTENT_2</code> is built on Lustre v2.12 and offers higher <code>PerUnitStorageThroughput</code> (up to 1000 MB/s/TiB) along with a lower minimum storage capacity requirement (600 GiB). To learn more about FSx for Lustre deployment types, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/lustre-deployment-types.html"> FSx for Lustre deployment options</a>.</p>
    /// <p>The default is <code>SCRATCH_1</code>.</p>
    #[doc(hidden)]
    pub deployment_type: std::option::Option<crate::model::LustreDeploymentType>,
    /// <p>Per unit storage throughput represents the megabytes per second of read or write throughput per 1 tebibyte of storage provisioned. File system throughput capacity is equal to Storage capacity (TiB) * PerUnitStorageThroughput (MB/s/TiB). This option is only valid for <code>PERSISTENT_1</code> and <code>PERSISTENT_2</code> deployment types. </p>
    /// <p>Valid values:</p>
    /// <ul>
    /// <li> <p>For <code>PERSISTENT_1</code> SSD storage: 50, 100, 200.</p> </li>
    /// <li> <p>For <code>PERSISTENT_1</code> HDD storage: 12, 40.</p> </li>
    /// <li> <p>For <code>PERSISTENT_2</code> SSD storage: 125, 250, 500, 1000.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub per_unit_storage_throughput: std::option::Option<i32>,
    /// <p>You use the <code>MountName</code> value when mounting the file system.</p>
    /// <p>For the <code>SCRATCH_1</code> deployment type, this value is always "<code>fsx</code>". For <code>SCRATCH_2</code>, <code>PERSISTENT_1</code>, and <code>PERSISTENT_2</code> deployment types, this value is a string that is unique within an Amazon Web Services Region. </p>
    #[doc(hidden)]
    pub mount_name: std::option::Option<std::string::String>,
    /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
    #[doc(hidden)]
    pub daily_automatic_backup_start_time: std::option::Option<std::string::String>,
    /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
    #[doc(hidden)]
    pub automatic_backup_retention_days: std::option::Option<i32>,
    /// <p>A boolean flag indicating whether tags on the file system are copied to backups. If it's set to true, all tags on the file system are copied to all automatic backups and any user-initiated backups where the user doesn't specify any tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value. (Default = false)</p>
    #[doc(hidden)]
    pub copy_tags_to_backups: std::option::Option<bool>,
    /// <p>The type of drive cache used by <code>PERSISTENT_1</code> file systems that are provisioned with HDD storage devices. This parameter is required when <code>StorageType</code> is HDD. When set to <code>READ</code> the file system has an SSD storage cache that is sized to 20% of the file system's storage capacity. This improves the performance for frequently accessed files by caching up to 20% of the total storage capacity.</p>
    /// <p>This parameter is required when <code>StorageType</code> is set to HDD.</p>
    #[doc(hidden)]
    pub drive_cache_type: std::option::Option<crate::model::DriveCacheType>,
    /// <p>The data compression configuration for the file system. <code>DataCompressionType</code> can have the following values:</p>
    /// <ul>
    /// <li> <p> <code>NONE</code> - Data compression is turned off for the file system.</p> </li>
    /// <li> <p> <code>LZ4</code> - Data compression is turned on with the LZ4 algorithm.</p> </li>
    /// </ul>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/data-compression.html">Lustre data compression</a>.</p>
    #[doc(hidden)]
    pub data_compression_type: std::option::Option<crate::model::DataCompressionType>,
    /// <p>The Lustre logging configuration. Lustre logging writes the enabled log events for your file system to Amazon CloudWatch Logs.</p>
    #[doc(hidden)]
    pub log_configuration: std::option::Option<crate::model::LustreLogConfiguration>,
    /// <p>The Lustre root squash configuration for an Amazon FSx for Lustre file system. When enabled, root squash restricts root-level access from clients that try to access your file system as a root user.</p>
    #[doc(hidden)]
    pub root_squash_configuration: std::option::Option<crate::model::LustreRootSquashConfiguration>,
}
impl LustreFileSystemConfiguration {
    /// <p>The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. Here, <code>d</code> is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.</p>
    pub fn weekly_maintenance_start_time(&self) -> std::option::Option<&str> {
        self.weekly_maintenance_start_time.as_deref()
    }
    /// <p>The data repository configuration object for Lustre file systems returned in the response of the <code>CreateFileSystem</code> operation.</p>
    /// <p>This data type is not supported for file systems with the <code>Persistent_2</code> deployment type. Instead, use .</p>
    pub fn data_repository_configuration(
        &self,
    ) -> std::option::Option<&crate::model::DataRepositoryConfiguration> {
        self.data_repository_configuration.as_ref()
    }
    /// <p>The deployment type of the FSx for Lustre file system. <i>Scratch deployment type</i> is designed for temporary storage and shorter-term processing of data.</p>
    /// <p> <code>SCRATCH_1</code> and <code>SCRATCH_2</code> deployment types are best suited for when you need temporary storage and shorter-term processing of data. The <code>SCRATCH_2</code> deployment type provides in-transit encryption of data and higher burst throughput capacity than <code>SCRATCH_1</code>.</p>
    /// <p>The <code>PERSISTENT_1</code> and <code>PERSISTENT_2</code> deployment type is used for longer-term storage and workloads and encryption of data in transit. <code>PERSISTENT_2</code> is built on Lustre v2.12 and offers higher <code>PerUnitStorageThroughput</code> (up to 1000 MB/s/TiB) along with a lower minimum storage capacity requirement (600 GiB). To learn more about FSx for Lustre deployment types, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/lustre-deployment-types.html"> FSx for Lustre deployment options</a>.</p>
    /// <p>The default is <code>SCRATCH_1</code>.</p>
    pub fn deployment_type(&self) -> std::option::Option<&crate::model::LustreDeploymentType> {
        self.deployment_type.as_ref()
    }
    /// <p>Per unit storage throughput represents the megabytes per second of read or write throughput per 1 tebibyte of storage provisioned. File system throughput capacity is equal to Storage capacity (TiB) * PerUnitStorageThroughput (MB/s/TiB). This option is only valid for <code>PERSISTENT_1</code> and <code>PERSISTENT_2</code> deployment types. </p>
    /// <p>Valid values:</p>
    /// <ul>
    /// <li> <p>For <code>PERSISTENT_1</code> SSD storage: 50, 100, 200.</p> </li>
    /// <li> <p>For <code>PERSISTENT_1</code> HDD storage: 12, 40.</p> </li>
    /// <li> <p>For <code>PERSISTENT_2</code> SSD storage: 125, 250, 500, 1000.</p> </li>
    /// </ul>
    pub fn per_unit_storage_throughput(&self) -> std::option::Option<i32> {
        self.per_unit_storage_throughput
    }
    /// <p>You use the <code>MountName</code> value when mounting the file system.</p>
    /// <p>For the <code>SCRATCH_1</code> deployment type, this value is always "<code>fsx</code>". For <code>SCRATCH_2</code>, <code>PERSISTENT_1</code>, and <code>PERSISTENT_2</code> deployment types, this value is a string that is unique within an Amazon Web Services Region. </p>
    pub fn mount_name(&self) -> std::option::Option<&str> {
        self.mount_name.as_deref()
    }
    /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
    pub fn daily_automatic_backup_start_time(&self) -> std::option::Option<&str> {
        self.daily_automatic_backup_start_time.as_deref()
    }
    /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
    pub fn automatic_backup_retention_days(&self) -> std::option::Option<i32> {
        self.automatic_backup_retention_days
    }
    /// <p>A boolean flag indicating whether tags on the file system are copied to backups. If it's set to true, all tags on the file system are copied to all automatic backups and any user-initiated backups where the user doesn't specify any tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value. (Default = false)</p>
    pub fn copy_tags_to_backups(&self) -> std::option::Option<bool> {
        self.copy_tags_to_backups
    }
    /// <p>The type of drive cache used by <code>PERSISTENT_1</code> file systems that are provisioned with HDD storage devices. This parameter is required when <code>StorageType</code> is HDD. When set to <code>READ</code> the file system has an SSD storage cache that is sized to 20% of the file system's storage capacity. This improves the performance for frequently accessed files by caching up to 20% of the total storage capacity.</p>
    /// <p>This parameter is required when <code>StorageType</code> is set to HDD.</p>
    pub fn drive_cache_type(&self) -> std::option::Option<&crate::model::DriveCacheType> {
        self.drive_cache_type.as_ref()
    }
    /// <p>The data compression configuration for the file system. <code>DataCompressionType</code> can have the following values:</p>
    /// <ul>
    /// <li> <p> <code>NONE</code> - Data compression is turned off for the file system.</p> </li>
    /// <li> <p> <code>LZ4</code> - Data compression is turned on with the LZ4 algorithm.</p> </li>
    /// </ul>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/data-compression.html">Lustre data compression</a>.</p>
    pub fn data_compression_type(&self) -> std::option::Option<&crate::model::DataCompressionType> {
        self.data_compression_type.as_ref()
    }
    /// <p>The Lustre logging configuration. Lustre logging writes the enabled log events for your file system to Amazon CloudWatch Logs.</p>
    pub fn log_configuration(&self) -> std::option::Option<&crate::model::LustreLogConfiguration> {
        self.log_configuration.as_ref()
    }
    /// <p>The Lustre root squash configuration for an Amazon FSx for Lustre file system. When enabled, root squash restricts root-level access from clients that try to access your file system as a root user.</p>
    pub fn root_squash_configuration(
        &self,
    ) -> std::option::Option<&crate::model::LustreRootSquashConfiguration> {
        self.root_squash_configuration.as_ref()
    }
}
/// See [`LustreFileSystemConfiguration`](crate::model::LustreFileSystemConfiguration).
pub mod lustre_file_system_configuration {

    /// A builder for [`LustreFileSystemConfiguration`](crate::model::LustreFileSystemConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) weekly_maintenance_start_time: std::option::Option<std::string::String>,
        pub(crate) data_repository_configuration:
            std::option::Option<crate::model::DataRepositoryConfiguration>,
        pub(crate) deployment_type: std::option::Option<crate::model::LustreDeploymentType>,
        pub(crate) per_unit_storage_throughput: std::option::Option<i32>,
        pub(crate) mount_name: std::option::Option<std::string::String>,
        pub(crate) daily_automatic_backup_start_time: std::option::Option<std::string::String>,
        pub(crate) automatic_backup_retention_days: std::option::Option<i32>,
        pub(crate) copy_tags_to_backups: std::option::Option<bool>,
        pub(crate) drive_cache_type: std::option::Option<crate::model::DriveCacheType>,
        pub(crate) data_compression_type: std::option::Option<crate::model::DataCompressionType>,
        pub(crate) log_configuration: std::option::Option<crate::model::LustreLogConfiguration>,
        pub(crate) root_squash_configuration:
            std::option::Option<crate::model::LustreRootSquashConfiguration>,
    }
    impl Builder {
        /// <p>The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. Here, <code>d</code> is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.</p>
        pub fn weekly_maintenance_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = Some(input.into());
            self
        }
        /// <p>The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. Here, <code>d</code> is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.</p>
        pub fn set_weekly_maintenance_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = input;
            self
        }
        /// <p>The data repository configuration object for Lustre file systems returned in the response of the <code>CreateFileSystem</code> operation.</p>
        /// <p>This data type is not supported for file systems with the <code>Persistent_2</code> deployment type. Instead, use .</p>
        pub fn data_repository_configuration(
            mut self,
            input: crate::model::DataRepositoryConfiguration,
        ) -> Self {
            self.data_repository_configuration = Some(input);
            self
        }
        /// <p>The data repository configuration object for Lustre file systems returned in the response of the <code>CreateFileSystem</code> operation.</p>
        /// <p>This data type is not supported for file systems with the <code>Persistent_2</code> deployment type. Instead, use .</p>
        pub fn set_data_repository_configuration(
            mut self,
            input: std::option::Option<crate::model::DataRepositoryConfiguration>,
        ) -> Self {
            self.data_repository_configuration = input;
            self
        }
        /// <p>The deployment type of the FSx for Lustre file system. <i>Scratch deployment type</i> is designed for temporary storage and shorter-term processing of data.</p>
        /// <p> <code>SCRATCH_1</code> and <code>SCRATCH_2</code> deployment types are best suited for when you need temporary storage and shorter-term processing of data. The <code>SCRATCH_2</code> deployment type provides in-transit encryption of data and higher burst throughput capacity than <code>SCRATCH_1</code>.</p>
        /// <p>The <code>PERSISTENT_1</code> and <code>PERSISTENT_2</code> deployment type is used for longer-term storage and workloads and encryption of data in transit. <code>PERSISTENT_2</code> is built on Lustre v2.12 and offers higher <code>PerUnitStorageThroughput</code> (up to 1000 MB/s/TiB) along with a lower minimum storage capacity requirement (600 GiB). To learn more about FSx for Lustre deployment types, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/lustre-deployment-types.html"> FSx for Lustre deployment options</a>.</p>
        /// <p>The default is <code>SCRATCH_1</code>.</p>
        pub fn deployment_type(mut self, input: crate::model::LustreDeploymentType) -> Self {
            self.deployment_type = Some(input);
            self
        }
        /// <p>The deployment type of the FSx for Lustre file system. <i>Scratch deployment type</i> is designed for temporary storage and shorter-term processing of data.</p>
        /// <p> <code>SCRATCH_1</code> and <code>SCRATCH_2</code> deployment types are best suited for when you need temporary storage and shorter-term processing of data. The <code>SCRATCH_2</code> deployment type provides in-transit encryption of data and higher burst throughput capacity than <code>SCRATCH_1</code>.</p>
        /// <p>The <code>PERSISTENT_1</code> and <code>PERSISTENT_2</code> deployment type is used for longer-term storage and workloads and encryption of data in transit. <code>PERSISTENT_2</code> is built on Lustre v2.12 and offers higher <code>PerUnitStorageThroughput</code> (up to 1000 MB/s/TiB) along with a lower minimum storage capacity requirement (600 GiB). To learn more about FSx for Lustre deployment types, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/lustre-deployment-types.html"> FSx for Lustre deployment options</a>.</p>
        /// <p>The default is <code>SCRATCH_1</code>.</p>
        pub fn set_deployment_type(
            mut self,
            input: std::option::Option<crate::model::LustreDeploymentType>,
        ) -> Self {
            self.deployment_type = input;
            self
        }
        /// <p>Per unit storage throughput represents the megabytes per second of read or write throughput per 1 tebibyte of storage provisioned. File system throughput capacity is equal to Storage capacity (TiB) * PerUnitStorageThroughput (MB/s/TiB). This option is only valid for <code>PERSISTENT_1</code> and <code>PERSISTENT_2</code> deployment types. </p>
        /// <p>Valid values:</p>
        /// <ul>
        /// <li> <p>For <code>PERSISTENT_1</code> SSD storage: 50, 100, 200.</p> </li>
        /// <li> <p>For <code>PERSISTENT_1</code> HDD storage: 12, 40.</p> </li>
        /// <li> <p>For <code>PERSISTENT_2</code> SSD storage: 125, 250, 500, 1000.</p> </li>
        /// </ul>
        pub fn per_unit_storage_throughput(mut self, input: i32) -> Self {
            self.per_unit_storage_throughput = Some(input);
            self
        }
        /// <p>Per unit storage throughput represents the megabytes per second of read or write throughput per 1 tebibyte of storage provisioned. File system throughput capacity is equal to Storage capacity (TiB) * PerUnitStorageThroughput (MB/s/TiB). This option is only valid for <code>PERSISTENT_1</code> and <code>PERSISTENT_2</code> deployment types. </p>
        /// <p>Valid values:</p>
        /// <ul>
        /// <li> <p>For <code>PERSISTENT_1</code> SSD storage: 50, 100, 200.</p> </li>
        /// <li> <p>For <code>PERSISTENT_1</code> HDD storage: 12, 40.</p> </li>
        /// <li> <p>For <code>PERSISTENT_2</code> SSD storage: 125, 250, 500, 1000.</p> </li>
        /// </ul>
        pub fn set_per_unit_storage_throughput(mut self, input: std::option::Option<i32>) -> Self {
            self.per_unit_storage_throughput = input;
            self
        }
        /// <p>You use the <code>MountName</code> value when mounting the file system.</p>
        /// <p>For the <code>SCRATCH_1</code> deployment type, this value is always "<code>fsx</code>". For <code>SCRATCH_2</code>, <code>PERSISTENT_1</code>, and <code>PERSISTENT_2</code> deployment types, this value is a string that is unique within an Amazon Web Services Region. </p>
        pub fn mount_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.mount_name = Some(input.into());
            self
        }
        /// <p>You use the <code>MountName</code> value when mounting the file system.</p>
        /// <p>For the <code>SCRATCH_1</code> deployment type, this value is always "<code>fsx</code>". For <code>SCRATCH_2</code>, <code>PERSISTENT_1</code>, and <code>PERSISTENT_2</code> deployment types, this value is a string that is unique within an Amazon Web Services Region. </p>
        pub fn set_mount_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.mount_name = input;
            self
        }
        /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
        pub fn daily_automatic_backup_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.daily_automatic_backup_start_time = Some(input.into());
            self
        }
        /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
        pub fn set_daily_automatic_backup_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.daily_automatic_backup_start_time = input;
            self
        }
        /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
        pub fn automatic_backup_retention_days(mut self, input: i32) -> Self {
            self.automatic_backup_retention_days = Some(input);
            self
        }
        /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
        pub fn set_automatic_backup_retention_days(
            mut self,
            input: std::option::Option<i32>,
        ) -> Self {
            self.automatic_backup_retention_days = input;
            self
        }
        /// <p>A boolean flag indicating whether tags on the file system are copied to backups. If it's set to true, all tags on the file system are copied to all automatic backups and any user-initiated backups where the user doesn't specify any tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value. (Default = false)</p>
        pub fn copy_tags_to_backups(mut self, input: bool) -> Self {
            self.copy_tags_to_backups = Some(input);
            self
        }
        /// <p>A boolean flag indicating whether tags on the file system are copied to backups. If it's set to true, all tags on the file system are copied to all automatic backups and any user-initiated backups where the user doesn't specify any tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value. (Default = false)</p>
        pub fn set_copy_tags_to_backups(mut self, input: std::option::Option<bool>) -> Self {
            self.copy_tags_to_backups = input;
            self
        }
        /// <p>The type of drive cache used by <code>PERSISTENT_1</code> file systems that are provisioned with HDD storage devices. This parameter is required when <code>StorageType</code> is HDD. When set to <code>READ</code> the file system has an SSD storage cache that is sized to 20% of the file system's storage capacity. This improves the performance for frequently accessed files by caching up to 20% of the total storage capacity.</p>
        /// <p>This parameter is required when <code>StorageType</code> is set to HDD.</p>
        pub fn drive_cache_type(mut self, input: crate::model::DriveCacheType) -> Self {
            self.drive_cache_type = Some(input);
            self
        }
        /// <p>The type of drive cache used by <code>PERSISTENT_1</code> file systems that are provisioned with HDD storage devices. This parameter is required when <code>StorageType</code> is HDD. When set to <code>READ</code> the file system has an SSD storage cache that is sized to 20% of the file system's storage capacity. This improves the performance for frequently accessed files by caching up to 20% of the total storage capacity.</p>
        /// <p>This parameter is required when <code>StorageType</code> is set to HDD.</p>
        pub fn set_drive_cache_type(
            mut self,
            input: std::option::Option<crate::model::DriveCacheType>,
        ) -> Self {
            self.drive_cache_type = input;
            self
        }
        /// <p>The data compression configuration for the file system. <code>DataCompressionType</code> can have the following values:</p>
        /// <ul>
        /// <li> <p> <code>NONE</code> - Data compression is turned off for the file system.</p> </li>
        /// <li> <p> <code>LZ4</code> - Data compression is turned on with the LZ4 algorithm.</p> </li>
        /// </ul>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/data-compression.html">Lustre data compression</a>.</p>
        pub fn data_compression_type(mut self, input: crate::model::DataCompressionType) -> Self {
            self.data_compression_type = Some(input);
            self
        }
        /// <p>The data compression configuration for the file system. <code>DataCompressionType</code> can have the following values:</p>
        /// <ul>
        /// <li> <p> <code>NONE</code> - Data compression is turned off for the file system.</p> </li>
        /// <li> <p> <code>LZ4</code> - Data compression is turned on with the LZ4 algorithm.</p> </li>
        /// </ul>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/data-compression.html">Lustre data compression</a>.</p>
        pub fn set_data_compression_type(
            mut self,
            input: std::option::Option<crate::model::DataCompressionType>,
        ) -> Self {
            self.data_compression_type = input;
            self
        }
        /// <p>The Lustre logging configuration. Lustre logging writes the enabled log events for your file system to Amazon CloudWatch Logs.</p>
        pub fn log_configuration(mut self, input: crate::model::LustreLogConfiguration) -> Self {
            self.log_configuration = Some(input);
            self
        }
        /// <p>The Lustre logging configuration. Lustre logging writes the enabled log events for your file system to Amazon CloudWatch Logs.</p>
        pub fn set_log_configuration(
            mut self,
            input: std::option::Option<crate::model::LustreLogConfiguration>,
        ) -> Self {
            self.log_configuration = input;
            self
        }
        /// <p>The Lustre root squash configuration for an Amazon FSx for Lustre file system. When enabled, root squash restricts root-level access from clients that try to access your file system as a root user.</p>
        pub fn root_squash_configuration(
            mut self,
            input: crate::model::LustreRootSquashConfiguration,
        ) -> Self {
            self.root_squash_configuration = Some(input);
            self
        }
        /// <p>The Lustre root squash configuration for an Amazon FSx for Lustre file system. When enabled, root squash restricts root-level access from clients that try to access your file system as a root user.</p>
        pub fn set_root_squash_configuration(
            mut self,
            input: std::option::Option<crate::model::LustreRootSquashConfiguration>,
        ) -> Self {
            self.root_squash_configuration = input;
            self
        }
        /// Consumes the builder and constructs a [`LustreFileSystemConfiguration`](crate::model::LustreFileSystemConfiguration).
        pub fn build(self) -> crate::model::LustreFileSystemConfiguration {
            crate::model::LustreFileSystemConfiguration {
                weekly_maintenance_start_time: self.weekly_maintenance_start_time,
                data_repository_configuration: self.data_repository_configuration,
                deployment_type: self.deployment_type,
                per_unit_storage_throughput: self.per_unit_storage_throughput,
                mount_name: self.mount_name,
                daily_automatic_backup_start_time: self.daily_automatic_backup_start_time,
                automatic_backup_retention_days: self.automatic_backup_retention_days,
                copy_tags_to_backups: self.copy_tags_to_backups,
                drive_cache_type: self.drive_cache_type,
                data_compression_type: self.data_compression_type,
                log_configuration: self.log_configuration,
                root_squash_configuration: self.root_squash_configuration,
            }
        }
    }
}
impl LustreFileSystemConfiguration {
    /// Creates a new builder-style object to manufacture [`LustreFileSystemConfiguration`](crate::model::LustreFileSystemConfiguration).
    pub fn builder() -> crate::model::lustre_file_system_configuration::Builder {
        crate::model::lustre_file_system_configuration::Builder::default()
    }
}

/// <p>The configuration for Lustre root squash used to restrict root-level access from clients that try to access your FSx for Lustre file system as root. Use the <code>RootSquash</code> parameter to enable root squash. To learn more about Lustre root squash, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/root-squash.html">Lustre root squash</a>.</p>
/// <p>You can also use the <code>NoSquashNids</code> parameter to provide an array of clients who are not affected by the root squash setting. These clients will access the file system as root, with unrestricted privileges.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct LustreRootSquashConfiguration {
    /// <p>You enable root squash by setting a user ID (UID) and group ID (GID) for the file system in the format <code>UID:GID</code> (for example, <code>365534:65534</code>). The UID and GID values can range from <code>0</code> to <code>4294967294</code>:</p>
    /// <ul>
    /// <li> <p>A non-zero value for UID and GID enables root squash. The UID and GID values can be different, but each must be a non-zero value.</p> </li>
    /// <li> <p>A value of <code>0</code> (zero) for UID and GID indicates root, and therefore disables root squash.</p> </li>
    /// </ul>
    /// <p>When root squash is enabled, the user ID and group ID of a root user accessing the file system are re-mapped to the UID and GID you provide.</p>
    #[doc(hidden)]
    pub root_squash: std::option::Option<std::string::String>,
    /// <p>When root squash is enabled, you can optionally specify an array of NIDs of clients for which root squash does not apply. A client NID is a Lustre Network Identifier used to uniquely identify a client. You can specify the NID as either a single address or a range of addresses:</p>
    /// <ul>
    /// <li> <p>A single address is described in standard Lustre NID format by specifying the client’s IP address followed by the Lustre network ID (for example, <code>10.0.1.6@tcp</code>).</p> </li>
    /// <li> <p>An address range is described using a dash to separate the range (for example, <code>10.0.[2-10].[1-255]@tcp</code>).</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub no_squash_nids: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl LustreRootSquashConfiguration {
    /// <p>You enable root squash by setting a user ID (UID) and group ID (GID) for the file system in the format <code>UID:GID</code> (for example, <code>365534:65534</code>). The UID and GID values can range from <code>0</code> to <code>4294967294</code>:</p>
    /// <ul>
    /// <li> <p>A non-zero value for UID and GID enables root squash. The UID and GID values can be different, but each must be a non-zero value.</p> </li>
    /// <li> <p>A value of <code>0</code> (zero) for UID and GID indicates root, and therefore disables root squash.</p> </li>
    /// </ul>
    /// <p>When root squash is enabled, the user ID and group ID of a root user accessing the file system are re-mapped to the UID and GID you provide.</p>
    pub fn root_squash(&self) -> std::option::Option<&str> {
        self.root_squash.as_deref()
    }
    /// <p>When root squash is enabled, you can optionally specify an array of NIDs of clients for which root squash does not apply. A client NID is a Lustre Network Identifier used to uniquely identify a client. You can specify the NID as either a single address or a range of addresses:</p>
    /// <ul>
    /// <li> <p>A single address is described in standard Lustre NID format by specifying the client’s IP address followed by the Lustre network ID (for example, <code>10.0.1.6@tcp</code>).</p> </li>
    /// <li> <p>An address range is described using a dash to separate the range (for example, <code>10.0.[2-10].[1-255]@tcp</code>).</p> </li>
    /// </ul>
    pub fn no_squash_nids(&self) -> std::option::Option<&[std::string::String]> {
        self.no_squash_nids.as_deref()
    }
}
/// See [`LustreRootSquashConfiguration`](crate::model::LustreRootSquashConfiguration).
pub mod lustre_root_squash_configuration {

    /// A builder for [`LustreRootSquashConfiguration`](crate::model::LustreRootSquashConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) root_squash: std::option::Option<std::string::String>,
        pub(crate) no_squash_nids: std::option::Option<std::vec::Vec<std::string::String>>,
    }
    impl Builder {
        /// <p>You enable root squash by setting a user ID (UID) and group ID (GID) for the file system in the format <code>UID:GID</code> (for example, <code>365534:65534</code>). The UID and GID values can range from <code>0</code> to <code>4294967294</code>:</p>
        /// <ul>
        /// <li> <p>A non-zero value for UID and GID enables root squash. The UID and GID values can be different, but each must be a non-zero value.</p> </li>
        /// <li> <p>A value of <code>0</code> (zero) for UID and GID indicates root, and therefore disables root squash.</p> </li>
        /// </ul>
        /// <p>When root squash is enabled, the user ID and group ID of a root user accessing the file system are re-mapped to the UID and GID you provide.</p>
        pub fn root_squash(mut self, input: impl Into<std::string::String>) -> Self {
            self.root_squash = Some(input.into());
            self
        }
        /// <p>You enable root squash by setting a user ID (UID) and group ID (GID) for the file system in the format <code>UID:GID</code> (for example, <code>365534:65534</code>). The UID and GID values can range from <code>0</code> to <code>4294967294</code>:</p>
        /// <ul>
        /// <li> <p>A non-zero value for UID and GID enables root squash. The UID and GID values can be different, but each must be a non-zero value.</p> </li>
        /// <li> <p>A value of <code>0</code> (zero) for UID and GID indicates root, and therefore disables root squash.</p> </li>
        /// </ul>
        /// <p>When root squash is enabled, the user ID and group ID of a root user accessing the file system are re-mapped to the UID and GID you provide.</p>
        pub fn set_root_squash(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.root_squash = input;
            self
        }
        /// Appends an item to `no_squash_nids`.
        ///
        /// To override the contents of this collection use [`set_no_squash_nids`](Self::set_no_squash_nids).
        ///
        /// <p>When root squash is enabled, you can optionally specify an array of NIDs of clients for which root squash does not apply. A client NID is a Lustre Network Identifier used to uniquely identify a client. You can specify the NID as either a single address or a range of addresses:</p>
        /// <ul>
        /// <li> <p>A single address is described in standard Lustre NID format by specifying the client’s IP address followed by the Lustre network ID (for example, <code>10.0.1.6@tcp</code>).</p> </li>
        /// <li> <p>An address range is described using a dash to separate the range (for example, <code>10.0.[2-10].[1-255]@tcp</code>).</p> </li>
        /// </ul>
        pub fn no_squash_nids(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.no_squash_nids.unwrap_or_default();
            v.push(input.into());
            self.no_squash_nids = Some(v);
            self
        }
        /// <p>When root squash is enabled, you can optionally specify an array of NIDs of clients for which root squash does not apply. A client NID is a Lustre Network Identifier used to uniquely identify a client. You can specify the NID as either a single address or a range of addresses:</p>
        /// <ul>
        /// <li> <p>A single address is described in standard Lustre NID format by specifying the client’s IP address followed by the Lustre network ID (for example, <code>10.0.1.6@tcp</code>).</p> </li>
        /// <li> <p>An address range is described using a dash to separate the range (for example, <code>10.0.[2-10].[1-255]@tcp</code>).</p> </li>
        /// </ul>
        pub fn set_no_squash_nids(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.no_squash_nids = input;
            self
        }
        /// Consumes the builder and constructs a [`LustreRootSquashConfiguration`](crate::model::LustreRootSquashConfiguration).
        pub fn build(self) -> crate::model::LustreRootSquashConfiguration {
            crate::model::LustreRootSquashConfiguration {
                root_squash: self.root_squash,
                no_squash_nids: self.no_squash_nids,
            }
        }
    }
}
impl LustreRootSquashConfiguration {
    /// Creates a new builder-style object to manufacture [`LustreRootSquashConfiguration`](crate::model::LustreRootSquashConfiguration).
    pub fn builder() -> crate::model::lustre_root_squash_configuration::Builder {
        crate::model::lustre_root_squash_configuration::Builder::default()
    }
}

/// <p>The configuration for Lustre logging used to write the enabled logging events for your Amazon FSx for Lustre file system or Amazon File Cache resource to Amazon CloudWatch Logs.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct LustreLogConfiguration {
    /// <p>The data repository events that are logged by Amazon FSx.</p>
    /// <ul>
    /// <li> <p> <code>WARN_ONLY</code> - only warning events are logged.</p> </li>
    /// <li> <p> <code>ERROR_ONLY</code> - only error events are logged.</p> </li>
    /// <li> <p> <code>WARN_ERROR</code> - both warning events and error events are logged.</p> </li>
    /// <li> <p> <code>DISABLED</code> - logging of data repository events is turned off.</p> </li>
    /// </ul>
    /// <p>Note that Amazon File Cache uses a default setting of <code>WARN_ERROR</code>, which can't be changed.</p>
    #[doc(hidden)]
    pub level: std::option::Option<crate::model::LustreAccessAuditLogLevel>,
    /// <p>The Amazon Resource Name (ARN) that specifies the destination of the logs. The destination can be any Amazon CloudWatch Logs log group ARN. The destination ARN must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.</p>
    #[doc(hidden)]
    pub destination: std::option::Option<std::string::String>,
}
impl LustreLogConfiguration {
    /// <p>The data repository events that are logged by Amazon FSx.</p>
    /// <ul>
    /// <li> <p> <code>WARN_ONLY</code> - only warning events are logged.</p> </li>
    /// <li> <p> <code>ERROR_ONLY</code> - only error events are logged.</p> </li>
    /// <li> <p> <code>WARN_ERROR</code> - both warning events and error events are logged.</p> </li>
    /// <li> <p> <code>DISABLED</code> - logging of data repository events is turned off.</p> </li>
    /// </ul>
    /// <p>Note that Amazon File Cache uses a default setting of <code>WARN_ERROR</code>, which can't be changed.</p>
    pub fn level(&self) -> std::option::Option<&crate::model::LustreAccessAuditLogLevel> {
        self.level.as_ref()
    }
    /// <p>The Amazon Resource Name (ARN) that specifies the destination of the logs. The destination can be any Amazon CloudWatch Logs log group ARN. The destination ARN must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.</p>
    pub fn destination(&self) -> std::option::Option<&str> {
        self.destination.as_deref()
    }
}
/// See [`LustreLogConfiguration`](crate::model::LustreLogConfiguration).
pub mod lustre_log_configuration {

    /// A builder for [`LustreLogConfiguration`](crate::model::LustreLogConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) level: std::option::Option<crate::model::LustreAccessAuditLogLevel>,
        pub(crate) destination: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>The data repository events that are logged by Amazon FSx.</p>
        /// <ul>
        /// <li> <p> <code>WARN_ONLY</code> - only warning events are logged.</p> </li>
        /// <li> <p> <code>ERROR_ONLY</code> - only error events are logged.</p> </li>
        /// <li> <p> <code>WARN_ERROR</code> - both warning events and error events are logged.</p> </li>
        /// <li> <p> <code>DISABLED</code> - logging of data repository events is turned off.</p> </li>
        /// </ul>
        /// <p>Note that Amazon File Cache uses a default setting of <code>WARN_ERROR</code>, which can't be changed.</p>
        pub fn level(mut self, input: crate::model::LustreAccessAuditLogLevel) -> Self {
            self.level = Some(input);
            self
        }
        /// <p>The data repository events that are logged by Amazon FSx.</p>
        /// <ul>
        /// <li> <p> <code>WARN_ONLY</code> - only warning events are logged.</p> </li>
        /// <li> <p> <code>ERROR_ONLY</code> - only error events are logged.</p> </li>
        /// <li> <p> <code>WARN_ERROR</code> - both warning events and error events are logged.</p> </li>
        /// <li> <p> <code>DISABLED</code> - logging of data repository events is turned off.</p> </li>
        /// </ul>
        /// <p>Note that Amazon File Cache uses a default setting of <code>WARN_ERROR</code>, which can't be changed.</p>
        pub fn set_level(
            mut self,
            input: std::option::Option<crate::model::LustreAccessAuditLogLevel>,
        ) -> Self {
            self.level = input;
            self
        }
        /// <p>The Amazon Resource Name (ARN) that specifies the destination of the logs. The destination can be any Amazon CloudWatch Logs log group ARN. The destination ARN must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.</p>
        pub fn destination(mut self, input: impl Into<std::string::String>) -> Self {
            self.destination = Some(input.into());
            self
        }
        /// <p>The Amazon Resource Name (ARN) that specifies the destination of the logs. The destination can be any Amazon CloudWatch Logs log group ARN. The destination ARN must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.</p>
        pub fn set_destination(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.destination = input;
            self
        }
        /// Consumes the builder and constructs a [`LustreLogConfiguration`](crate::model::LustreLogConfiguration).
        pub fn build(self) -> crate::model::LustreLogConfiguration {
            crate::model::LustreLogConfiguration {
                level: self.level,
                destination: self.destination,
            }
        }
    }
}
impl LustreLogConfiguration {
    /// Creates a new builder-style object to manufacture [`LustreLogConfiguration`](crate::model::LustreLogConfiguration).
    pub fn builder() -> crate::model::lustre_log_configuration::Builder {
        crate::model::lustre_log_configuration::Builder::default()
    }
}

/// When writing a match expression against `LustreAccessAuditLogLevel`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let lustreaccessauditloglevel = unimplemented!();
/// match lustreaccessauditloglevel {
///     LustreAccessAuditLogLevel::Disabled => { /* ... */ },
///     LustreAccessAuditLogLevel::ErrorOnly => { /* ... */ },
///     LustreAccessAuditLogLevel::WarnError => { /* ... */ },
///     LustreAccessAuditLogLevel::WarnOnly => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `lustreaccessauditloglevel` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `LustreAccessAuditLogLevel::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `LustreAccessAuditLogLevel::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `LustreAccessAuditLogLevel::NewFeature` is defined.
/// Specifically, when `lustreaccessauditloglevel` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `LustreAccessAuditLogLevel::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum LustreAccessAuditLogLevel {
    #[allow(missing_docs)] // documentation missing in model
    Disabled,
    #[allow(missing_docs)] // documentation missing in model
    ErrorOnly,
    #[allow(missing_docs)] // documentation missing in model
    WarnError,
    #[allow(missing_docs)] // documentation missing in model
    WarnOnly,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for LustreAccessAuditLogLevel {
    fn from(s: &str) -> Self {
        match s {
            "DISABLED" => LustreAccessAuditLogLevel::Disabled,
            "ERROR_ONLY" => LustreAccessAuditLogLevel::ErrorOnly,
            "WARN_ERROR" => LustreAccessAuditLogLevel::WarnError,
            "WARN_ONLY" => LustreAccessAuditLogLevel::WarnOnly,
            other => LustreAccessAuditLogLevel::Unknown(crate::types::UnknownVariantValue(
                other.to_owned(),
            )),
        }
    }
}
impl std::str::FromStr for LustreAccessAuditLogLevel {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(LustreAccessAuditLogLevel::from(s))
    }
}
impl LustreAccessAuditLogLevel {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            LustreAccessAuditLogLevel::Disabled => "DISABLED",
            LustreAccessAuditLogLevel::ErrorOnly => "ERROR_ONLY",
            LustreAccessAuditLogLevel::WarnError => "WARN_ERROR",
            LustreAccessAuditLogLevel::WarnOnly => "WARN_ONLY",
            LustreAccessAuditLogLevel::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["DISABLED", "ERROR_ONLY", "WARN_ERROR", "WARN_ONLY"]
    }
}
impl AsRef<str> for LustreAccessAuditLogLevel {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// When writing a match expression against `DataCompressionType`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let datacompressiontype = unimplemented!();
/// match datacompressiontype {
///     DataCompressionType::Lz4 => { /* ... */ },
///     DataCompressionType::None => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `datacompressiontype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `DataCompressionType::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `DataCompressionType::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `DataCompressionType::NewFeature` is defined.
/// Specifically, when `datacompressiontype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `DataCompressionType::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum DataCompressionType {
    #[allow(missing_docs)] // documentation missing in model
    Lz4,
    #[allow(missing_docs)] // documentation missing in model
    None,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for DataCompressionType {
    fn from(s: &str) -> Self {
        match s {
            "LZ4" => DataCompressionType::Lz4,
            "NONE" => DataCompressionType::None,
            other => {
                DataCompressionType::Unknown(crate::types::UnknownVariantValue(other.to_owned()))
            }
        }
    }
}
impl std::str::FromStr for DataCompressionType {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(DataCompressionType::from(s))
    }
}
impl DataCompressionType {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            DataCompressionType::Lz4 => "LZ4",
            DataCompressionType::None => "NONE",
            DataCompressionType::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["LZ4", "NONE"]
    }
}
impl AsRef<str> for DataCompressionType {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// When writing a match expression against `DriveCacheType`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let drivecachetype = unimplemented!();
/// match drivecachetype {
///     DriveCacheType::None => { /* ... */ },
///     DriveCacheType::Read => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `drivecachetype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `DriveCacheType::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `DriveCacheType::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `DriveCacheType::NewFeature` is defined.
/// Specifically, when `drivecachetype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `DriveCacheType::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum DriveCacheType {
    #[allow(missing_docs)] // documentation missing in model
    None,
    #[allow(missing_docs)] // documentation missing in model
    Read,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for DriveCacheType {
    fn from(s: &str) -> Self {
        match s {
            "NONE" => DriveCacheType::None,
            "READ" => DriveCacheType::Read,
            other => DriveCacheType::Unknown(crate::types::UnknownVariantValue(other.to_owned())),
        }
    }
}
impl std::str::FromStr for DriveCacheType {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(DriveCacheType::from(s))
    }
}
impl DriveCacheType {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            DriveCacheType::None => "NONE",
            DriveCacheType::Read => "READ",
            DriveCacheType::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["NONE", "READ"]
    }
}
impl AsRef<str> for DriveCacheType {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// When writing a match expression against `LustreDeploymentType`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let lustredeploymenttype = unimplemented!();
/// match lustredeploymenttype {
///     LustreDeploymentType::Persistent1 => { /* ... */ },
///     LustreDeploymentType::Persistent2 => { /* ... */ },
///     LustreDeploymentType::Scratch1 => { /* ... */ },
///     LustreDeploymentType::Scratch2 => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `lustredeploymenttype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `LustreDeploymentType::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `LustreDeploymentType::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `LustreDeploymentType::NewFeature` is defined.
/// Specifically, when `lustredeploymenttype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `LustreDeploymentType::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum LustreDeploymentType {
    #[allow(missing_docs)] // documentation missing in model
    Persistent1,
    #[allow(missing_docs)] // documentation missing in model
    Persistent2,
    #[allow(missing_docs)] // documentation missing in model
    Scratch1,
    #[allow(missing_docs)] // documentation missing in model
    Scratch2,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for LustreDeploymentType {
    fn from(s: &str) -> Self {
        match s {
            "PERSISTENT_1" => LustreDeploymentType::Persistent1,
            "PERSISTENT_2" => LustreDeploymentType::Persistent2,
            "SCRATCH_1" => LustreDeploymentType::Scratch1,
            "SCRATCH_2" => LustreDeploymentType::Scratch2,
            other => {
                LustreDeploymentType::Unknown(crate::types::UnknownVariantValue(other.to_owned()))
            }
        }
    }
}
impl std::str::FromStr for LustreDeploymentType {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(LustreDeploymentType::from(s))
    }
}
impl LustreDeploymentType {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            LustreDeploymentType::Persistent1 => "PERSISTENT_1",
            LustreDeploymentType::Persistent2 => "PERSISTENT_2",
            LustreDeploymentType::Scratch1 => "SCRATCH_1",
            LustreDeploymentType::Scratch2 => "SCRATCH_2",
            LustreDeploymentType::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["PERSISTENT_1", "PERSISTENT_2", "SCRATCH_1", "SCRATCH_2"]
    }
}
impl AsRef<str> for LustreDeploymentType {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>The data repository configuration object for Lustre file systems returned in the response of the <code>CreateFileSystem</code> operation.</p>
/// <p>This data type is not supported for file systems with the <code>Persistent_2</code> deployment type. Instead, use .</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct DataRepositoryConfiguration {
    /// <p>Describes the state of the file system's S3 durable data repository, if it is configured with an S3 repository. The lifecycle can have the following values:</p>
    /// <ul>
    /// <li> <p> <code>CREATING</code> - The data repository configuration between the FSx file system and the linked S3 data repository is being created. The data repository is unavailable.</p> </li>
    /// <li> <p> <code>AVAILABLE</code> - The data repository is available for use.</p> </li>
    /// <li> <p> <code>MISCONFIGURED</code> - Amazon FSx cannot automatically import updates from the S3 bucket until the data repository configuration is corrected. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/troubleshooting.html#troubleshooting-misconfigured-data-repository">Troubleshooting a Misconfigured linked S3 bucket</a>. </p> </li>
    /// <li> <p> <code>UPDATING</code> - The data repository is undergoing a customer initiated update and availability may be impacted.</p> </li>
    /// <li> <p> <code>FAILED</code> - The data repository is in a terminal state that cannot be recovered.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub lifecycle: std::option::Option<crate::model::DataRepositoryLifecycle>,
    /// <p>The import path to the Amazon S3 bucket (and optional prefix) that you're using as the data repository for your FSx for Lustre file system, for example <code>s3://import-bucket/optional-prefix</code>. If a prefix is specified after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file system.</p>
    #[doc(hidden)]
    pub import_path: std::option::Option<std::string::String>,
    /// <p>The export path to the Amazon S3 bucket (and prefix) that you are using to store new and changed Lustre file system files in S3.</p>
    #[doc(hidden)]
    pub export_path: std::option::Option<std::string::String>,
    /// <p>For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.</p>
    /// <p>The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.</p>
    #[doc(hidden)]
    pub imported_file_chunk_size: std::option::Option<i32>,
    /// <p>Describes the file system's linked S3 data repository's <code>AutoImportPolicy</code>. The AutoImportPolicy configures how Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. <code>AutoImportPolicy</code> can have the following values:</p>
    /// <ul>
    /// <li> <p> <code>NONE</code> - (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the linked S3 bucket when the file system is created. FSx does not update file and directory listings for any new or changed objects after choosing this option.</p> </li>
    /// <li> <p> <code>NEW</code> - AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that do not currently exist in the FSx file system. </p> </li>
    /// <li> <p> <code>NEW_CHANGED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose this option.</p> </li>
    /// <li> <p> <code>NEW_CHANGED_DELETED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub auto_import_policy: std::option::Option<crate::model::AutoImportPolicyType>,
    /// <p>Provides detailed information about the data repository if its <code>Lifecycle</code> is set to <code>MISCONFIGURED</code> or <code>FAILED</code>.</p>
    #[doc(hidden)]
    pub failure_details: std::option::Option<crate::model::DataRepositoryFailureDetails>,
}
impl DataRepositoryConfiguration {
    /// <p>Describes the state of the file system's S3 durable data repository, if it is configured with an S3 repository. The lifecycle can have the following values:</p>
    /// <ul>
    /// <li> <p> <code>CREATING</code> - The data repository configuration between the FSx file system and the linked S3 data repository is being created. The data repository is unavailable.</p> </li>
    /// <li> <p> <code>AVAILABLE</code> - The data repository is available for use.</p> </li>
    /// <li> <p> <code>MISCONFIGURED</code> - Amazon FSx cannot automatically import updates from the S3 bucket until the data repository configuration is corrected. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/troubleshooting.html#troubleshooting-misconfigured-data-repository">Troubleshooting a Misconfigured linked S3 bucket</a>. </p> </li>
    /// <li> <p> <code>UPDATING</code> - The data repository is undergoing a customer initiated update and availability may be impacted.</p> </li>
    /// <li> <p> <code>FAILED</code> - The data repository is in a terminal state that cannot be recovered.</p> </li>
    /// </ul>
    pub fn lifecycle(&self) -> std::option::Option<&crate::model::DataRepositoryLifecycle> {
        self.lifecycle.as_ref()
    }
    /// <p>The import path to the Amazon S3 bucket (and optional prefix) that you're using as the data repository for your FSx for Lustre file system, for example <code>s3://import-bucket/optional-prefix</code>. If a prefix is specified after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file system.</p>
    pub fn import_path(&self) -> std::option::Option<&str> {
        self.import_path.as_deref()
    }
    /// <p>The export path to the Amazon S3 bucket (and prefix) that you are using to store new and changed Lustre file system files in S3.</p>
    pub fn export_path(&self) -> std::option::Option<&str> {
        self.export_path.as_deref()
    }
    /// <p>For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.</p>
    /// <p>The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.</p>
    pub fn imported_file_chunk_size(&self) -> std::option::Option<i32> {
        self.imported_file_chunk_size
    }
    /// <p>Describes the file system's linked S3 data repository's <code>AutoImportPolicy</code>. The AutoImportPolicy configures how Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. <code>AutoImportPolicy</code> can have the following values:</p>
    /// <ul>
    /// <li> <p> <code>NONE</code> - (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the linked S3 bucket when the file system is created. FSx does not update file and directory listings for any new or changed objects after choosing this option.</p> </li>
    /// <li> <p> <code>NEW</code> - AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that do not currently exist in the FSx file system. </p> </li>
    /// <li> <p> <code>NEW_CHANGED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose this option.</p> </li>
    /// <li> <p> <code>NEW_CHANGED_DELETED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.</p> </li>
    /// </ul>
    pub fn auto_import_policy(&self) -> std::option::Option<&crate::model::AutoImportPolicyType> {
        self.auto_import_policy.as_ref()
    }
    /// <p>Provides detailed information about the data repository if its <code>Lifecycle</code> is set to <code>MISCONFIGURED</code> or <code>FAILED</code>.</p>
    pub fn failure_details(
        &self,
    ) -> std::option::Option<&crate::model::DataRepositoryFailureDetails> {
        self.failure_details.as_ref()
    }
}
/// See [`DataRepositoryConfiguration`](crate::model::DataRepositoryConfiguration).
pub mod data_repository_configuration {

    /// A builder for [`DataRepositoryConfiguration`](crate::model::DataRepositoryConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) lifecycle: std::option::Option<crate::model::DataRepositoryLifecycle>,
        pub(crate) import_path: std::option::Option<std::string::String>,
        pub(crate) export_path: std::option::Option<std::string::String>,
        pub(crate) imported_file_chunk_size: std::option::Option<i32>,
        pub(crate) auto_import_policy: std::option::Option<crate::model::AutoImportPolicyType>,
        pub(crate) failure_details: std::option::Option<crate::model::DataRepositoryFailureDetails>,
    }
    impl Builder {
        /// <p>Describes the state of the file system's S3 durable data repository, if it is configured with an S3 repository. The lifecycle can have the following values:</p>
        /// <ul>
        /// <li> <p> <code>CREATING</code> - The data repository configuration between the FSx file system and the linked S3 data repository is being created. The data repository is unavailable.</p> </li>
        /// <li> <p> <code>AVAILABLE</code> - The data repository is available for use.</p> </li>
        /// <li> <p> <code>MISCONFIGURED</code> - Amazon FSx cannot automatically import updates from the S3 bucket until the data repository configuration is corrected. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/troubleshooting.html#troubleshooting-misconfigured-data-repository">Troubleshooting a Misconfigured linked S3 bucket</a>. </p> </li>
        /// <li> <p> <code>UPDATING</code> - The data repository is undergoing a customer initiated update and availability may be impacted.</p> </li>
        /// <li> <p> <code>FAILED</code> - The data repository is in a terminal state that cannot be recovered.</p> </li>
        /// </ul>
        pub fn lifecycle(mut self, input: crate::model::DataRepositoryLifecycle) -> Self {
            self.lifecycle = Some(input);
            self
        }
        /// <p>Describes the state of the file system's S3 durable data repository, if it is configured with an S3 repository. The lifecycle can have the following values:</p>
        /// <ul>
        /// <li> <p> <code>CREATING</code> - The data repository configuration between the FSx file system and the linked S3 data repository is being created. The data repository is unavailable.</p> </li>
        /// <li> <p> <code>AVAILABLE</code> - The data repository is available for use.</p> </li>
        /// <li> <p> <code>MISCONFIGURED</code> - Amazon FSx cannot automatically import updates from the S3 bucket until the data repository configuration is corrected. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/troubleshooting.html#troubleshooting-misconfigured-data-repository">Troubleshooting a Misconfigured linked S3 bucket</a>. </p> </li>
        /// <li> <p> <code>UPDATING</code> - The data repository is undergoing a customer initiated update and availability may be impacted.</p> </li>
        /// <li> <p> <code>FAILED</code> - The data repository is in a terminal state that cannot be recovered.</p> </li>
        /// </ul>
        pub fn set_lifecycle(
            mut self,
            input: std::option::Option<crate::model::DataRepositoryLifecycle>,
        ) -> Self {
            self.lifecycle = input;
            self
        }
        /// <p>The import path to the Amazon S3 bucket (and optional prefix) that you're using as the data repository for your FSx for Lustre file system, for example <code>s3://import-bucket/optional-prefix</code>. If a prefix is specified after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file system.</p>
        pub fn import_path(mut self, input: impl Into<std::string::String>) -> Self {
            self.import_path = Some(input.into());
            self
        }
        /// <p>The import path to the Amazon S3 bucket (and optional prefix) that you're using as the data repository for your FSx for Lustre file system, for example <code>s3://import-bucket/optional-prefix</code>. If a prefix is specified after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file system.</p>
        pub fn set_import_path(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.import_path = input;
            self
        }
        /// <p>The export path to the Amazon S3 bucket (and prefix) that you are using to store new and changed Lustre file system files in S3.</p>
        pub fn export_path(mut self, input: impl Into<std::string::String>) -> Self {
            self.export_path = Some(input.into());
            self
        }
        /// <p>The export path to the Amazon S3 bucket (and prefix) that you are using to store new and changed Lustre file system files in S3.</p>
        pub fn set_export_path(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.export_path = input;
            self
        }
        /// <p>For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.</p>
        /// <p>The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.</p>
        pub fn imported_file_chunk_size(mut self, input: i32) -> Self {
            self.imported_file_chunk_size = Some(input);
            self
        }
        /// <p>For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.</p>
        /// <p>The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.</p>
        pub fn set_imported_file_chunk_size(mut self, input: std::option::Option<i32>) -> Self {
            self.imported_file_chunk_size = input;
            self
        }
        /// <p>Describes the file system's linked S3 data repository's <code>AutoImportPolicy</code>. The AutoImportPolicy configures how Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. <code>AutoImportPolicy</code> can have the following values:</p>
        /// <ul>
        /// <li> <p> <code>NONE</code> - (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the linked S3 bucket when the file system is created. FSx does not update file and directory listings for any new or changed objects after choosing this option.</p> </li>
        /// <li> <p> <code>NEW</code> - AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that do not currently exist in the FSx file system. </p> </li>
        /// <li> <p> <code>NEW_CHANGED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose this option.</p> </li>
        /// <li> <p> <code>NEW_CHANGED_DELETED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.</p> </li>
        /// </ul>
        pub fn auto_import_policy(mut self, input: crate::model::AutoImportPolicyType) -> Self {
            self.auto_import_policy = Some(input);
            self
        }
        /// <p>Describes the file system's linked S3 data repository's <code>AutoImportPolicy</code>. The AutoImportPolicy configures how Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. <code>AutoImportPolicy</code> can have the following values:</p>
        /// <ul>
        /// <li> <p> <code>NONE</code> - (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the linked S3 bucket when the file system is created. FSx does not update file and directory listings for any new or changed objects after choosing this option.</p> </li>
        /// <li> <p> <code>NEW</code> - AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that do not currently exist in the FSx file system. </p> </li>
        /// <li> <p> <code>NEW_CHANGED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose this option.</p> </li>
        /// <li> <p> <code>NEW_CHANGED_DELETED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.</p> </li>
        /// </ul>
        pub fn set_auto_import_policy(
            mut self,
            input: std::option::Option<crate::model::AutoImportPolicyType>,
        ) -> Self {
            self.auto_import_policy = input;
            self
        }
        /// <p>Provides detailed information about the data repository if its <code>Lifecycle</code> is set to <code>MISCONFIGURED</code> or <code>FAILED</code>.</p>
        pub fn failure_details(
            mut self,
            input: crate::model::DataRepositoryFailureDetails,
        ) -> Self {
            self.failure_details = Some(input);
            self
        }
        /// <p>Provides detailed information about the data repository if its <code>Lifecycle</code> is set to <code>MISCONFIGURED</code> or <code>FAILED</code>.</p>
        pub fn set_failure_details(
            mut self,
            input: std::option::Option<crate::model::DataRepositoryFailureDetails>,
        ) -> Self {
            self.failure_details = input;
            self
        }
        /// Consumes the builder and constructs a [`DataRepositoryConfiguration`](crate::model::DataRepositoryConfiguration).
        pub fn build(self) -> crate::model::DataRepositoryConfiguration {
            crate::model::DataRepositoryConfiguration {
                lifecycle: self.lifecycle,
                import_path: self.import_path,
                export_path: self.export_path,
                imported_file_chunk_size: self.imported_file_chunk_size,
                auto_import_policy: self.auto_import_policy,
                failure_details: self.failure_details,
            }
        }
    }
}
impl DataRepositoryConfiguration {
    /// Creates a new builder-style object to manufacture [`DataRepositoryConfiguration`](crate::model::DataRepositoryConfiguration).
    pub fn builder() -> crate::model::data_repository_configuration::Builder {
        crate::model::data_repository_configuration::Builder::default()
    }
}

/// <p>Provides detailed information about the data repository if its <code>Lifecycle</code> is set to <code>MISCONFIGURED</code> or <code>FAILED</code>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct DataRepositoryFailureDetails {
    /// <p>A detailed error message.</p>
    #[doc(hidden)]
    pub message: std::option::Option<std::string::String>,
}
impl DataRepositoryFailureDetails {
    /// <p>A detailed error message.</p>
    pub fn message(&self) -> std::option::Option<&str> {
        self.message.as_deref()
    }
}
/// See [`DataRepositoryFailureDetails`](crate::model::DataRepositoryFailureDetails).
pub mod data_repository_failure_details {

    /// A builder for [`DataRepositoryFailureDetails`](crate::model::DataRepositoryFailureDetails).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) message: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>A detailed error message.</p>
        pub fn message(mut self, input: impl Into<std::string::String>) -> Self {
            self.message = Some(input.into());
            self
        }
        /// <p>A detailed error message.</p>
        pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.message = input;
            self
        }
        /// Consumes the builder and constructs a [`DataRepositoryFailureDetails`](crate::model::DataRepositoryFailureDetails).
        pub fn build(self) -> crate::model::DataRepositoryFailureDetails {
            crate::model::DataRepositoryFailureDetails {
                message: self.message,
            }
        }
    }
}
impl DataRepositoryFailureDetails {
    /// Creates a new builder-style object to manufacture [`DataRepositoryFailureDetails`](crate::model::DataRepositoryFailureDetails).
    pub fn builder() -> crate::model::data_repository_failure_details::Builder {
        crate::model::data_repository_failure_details::Builder::default()
    }
}

/// When writing a match expression against `AutoImportPolicyType`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let autoimportpolicytype = unimplemented!();
/// match autoimportpolicytype {
///     AutoImportPolicyType::New => { /* ... */ },
///     AutoImportPolicyType::NewChanged => { /* ... */ },
///     AutoImportPolicyType::NewChangedDeleted => { /* ... */ },
///     AutoImportPolicyType::None => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `autoimportpolicytype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `AutoImportPolicyType::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `AutoImportPolicyType::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `AutoImportPolicyType::NewFeature` is defined.
/// Specifically, when `autoimportpolicytype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `AutoImportPolicyType::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum AutoImportPolicyType {
    #[allow(missing_docs)] // documentation missing in model
    New,
    #[allow(missing_docs)] // documentation missing in model
    NewChanged,
    #[allow(missing_docs)] // documentation missing in model
    NewChangedDeleted,
    #[allow(missing_docs)] // documentation missing in model
    None,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for AutoImportPolicyType {
    fn from(s: &str) -> Self {
        match s {
            "NEW" => AutoImportPolicyType::New,
            "NEW_CHANGED" => AutoImportPolicyType::NewChanged,
            "NEW_CHANGED_DELETED" => AutoImportPolicyType::NewChangedDeleted,
            "NONE" => AutoImportPolicyType::None,
            other => {
                AutoImportPolicyType::Unknown(crate::types::UnknownVariantValue(other.to_owned()))
            }
        }
    }
}
impl std::str::FromStr for AutoImportPolicyType {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(AutoImportPolicyType::from(s))
    }
}
impl AutoImportPolicyType {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            AutoImportPolicyType::New => "NEW",
            AutoImportPolicyType::NewChanged => "NEW_CHANGED",
            AutoImportPolicyType::NewChangedDeleted => "NEW_CHANGED_DELETED",
            AutoImportPolicyType::None => "NONE",
            AutoImportPolicyType::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["NEW", "NEW_CHANGED", "NEW_CHANGED_DELETED", "NONE"]
    }
}
impl AsRef<str> for AutoImportPolicyType {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// When writing a match expression against `DataRepositoryLifecycle`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let datarepositorylifecycle = unimplemented!();
/// match datarepositorylifecycle {
///     DataRepositoryLifecycle::Available => { /* ... */ },
///     DataRepositoryLifecycle::Creating => { /* ... */ },
///     DataRepositoryLifecycle::Deleting => { /* ... */ },
///     DataRepositoryLifecycle::Failed => { /* ... */ },
///     DataRepositoryLifecycle::Misconfigured => { /* ... */ },
///     DataRepositoryLifecycle::Updating => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `datarepositorylifecycle` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `DataRepositoryLifecycle::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `DataRepositoryLifecycle::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `DataRepositoryLifecycle::NewFeature` is defined.
/// Specifically, when `datarepositorylifecycle` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `DataRepositoryLifecycle::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum DataRepositoryLifecycle {
    #[allow(missing_docs)] // documentation missing in model
    Available,
    #[allow(missing_docs)] // documentation missing in model
    Creating,
    #[allow(missing_docs)] // documentation missing in model
    Deleting,
    #[allow(missing_docs)] // documentation missing in model
    Failed,
    #[allow(missing_docs)] // documentation missing in model
    Misconfigured,
    #[allow(missing_docs)] // documentation missing in model
    Updating,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for DataRepositoryLifecycle {
    fn from(s: &str) -> Self {
        match s {
            "AVAILABLE" => DataRepositoryLifecycle::Available,
            "CREATING" => DataRepositoryLifecycle::Creating,
            "DELETING" => DataRepositoryLifecycle::Deleting,
            "FAILED" => DataRepositoryLifecycle::Failed,
            "MISCONFIGURED" => DataRepositoryLifecycle::Misconfigured,
            "UPDATING" => DataRepositoryLifecycle::Updating,
            other => DataRepositoryLifecycle::Unknown(crate::types::UnknownVariantValue(
                other.to_owned(),
            )),
        }
    }
}
impl std::str::FromStr for DataRepositoryLifecycle {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(DataRepositoryLifecycle::from(s))
    }
}
impl DataRepositoryLifecycle {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            DataRepositoryLifecycle::Available => "AVAILABLE",
            DataRepositoryLifecycle::Creating => "CREATING",
            DataRepositoryLifecycle::Deleting => "DELETING",
            DataRepositoryLifecycle::Failed => "FAILED",
            DataRepositoryLifecycle::Misconfigured => "MISCONFIGURED",
            DataRepositoryLifecycle::Updating => "UPDATING",
            DataRepositoryLifecycle::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &[
            "AVAILABLE",
            "CREATING",
            "DELETING",
            "FAILED",
            "MISCONFIGURED",
            "UPDATING",
        ]
    }
}
impl AsRef<str> for DataRepositoryLifecycle {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>The configuration for this Microsoft Windows file system.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct WindowsFileSystemConfiguration {
    /// <p>The ID for an existing Amazon Web Services Managed Microsoft Active Directory instance that the file system is joined to.</p>
    #[doc(hidden)]
    pub active_directory_id: std::option::Option<std::string::String>,
    /// <p>The configuration of the self-managed Microsoft Active Directory (AD) directory to which the Windows File Server or ONTAP storage virtual machine (SVM) instance is joined.</p>
    #[doc(hidden)]
    pub self_managed_active_directory_configuration:
        std::option::Option<crate::model::SelfManagedActiveDirectoryAttributes>,
    /// <p>Specifies the file system deployment type, valid values are the following:</p>
    /// <ul>
    /// <li> <p> <code>MULTI_AZ_1</code> - Specifies a high availability file system that is configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability, and supports SSD and HDD storage.</p> </li>
    /// <li> <p> <code>SINGLE_AZ_1</code> - (Default) Specifies a file system that is configured for single AZ redundancy, only supports SSD storage.</p> </li>
    /// <li> <p> <code>SINGLE_AZ_2</code> - Latest generation Single AZ file system. Specifies a file system that is configured for single AZ redundancy and supports SSD and HDD storage.</p> </li>
    /// </ul>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/high-availability-multiAZ.html">Single-AZ and Multi-AZ File Systems</a>.</p>
    #[doc(hidden)]
    pub deployment_type: std::option::Option<crate::model::WindowsDeploymentType>,
    /// <p>For <code>MULTI_AZ_1</code> deployment types, use this endpoint when performing administrative tasks on the file system using Amazon FSx Remote PowerShell.</p>
    /// <p>For <code>SINGLE_AZ_1</code> and <code>SINGLE_AZ_2</code> deployment types, this is the DNS name of the file system.</p>
    /// <p>This endpoint is temporarily unavailable when the file system is undergoing maintenance.</p>
    #[doc(hidden)]
    pub remote_administration_endpoint: std::option::Option<std::string::String>,
    /// <p>For <code>MULTI_AZ_1</code> deployment types, it specifies the ID of the subnet where the preferred file server is located. Must be one of the two subnet IDs specified in <code>SubnetIds</code> property. Amazon FSx serves traffic from this subnet except in the event of a failover to the secondary file server.</p>
    /// <p>For <code>SINGLE_AZ_1</code> and <code>SINGLE_AZ_2</code> deployment types, this value is the same as that for <code>SubnetIDs</code>. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/high-availability-multiAZ.html#single-multi-az-resources">Availability and durability: Single-AZ and Multi-AZ file systems</a>.</p>
    #[doc(hidden)]
    pub preferred_subnet_id: std::option::Option<std::string::String>,
    /// <p>For <code>MULTI_AZ_1</code> deployment types, the IP address of the primary, or preferred, file server.</p>
    /// <p>Use this IP address when mounting the file system on Linux SMB clients or Windows SMB clients that are not joined to a Microsoft Active Directory. Applicable for all Windows file system deployment types. This IP address is temporarily unavailable when the file system is undergoing maintenance. For Linux and Windows SMB clients that are joined to an Active Directory, use the file system's DNSName instead. For more information on mapping and mounting file shares, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/accessing-file-shares.html">Accessing File Shares</a>.</p>
    #[doc(hidden)]
    pub preferred_file_server_ip: std::option::Option<std::string::String>,
    /// <p>The throughput of the Amazon FSx file system, measured in megabytes per second.</p>
    #[doc(hidden)]
    pub throughput_capacity: std::option::Option<i32>,
    /// <p>The list of maintenance operations in progress for this file system.</p>
    #[doc(hidden)]
    pub maintenance_operations_in_progress:
        std::option::Option<std::vec::Vec<crate::model::FileSystemMaintenanceOperation>>,
    /// <p>The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.</p>
    #[doc(hidden)]
    pub weekly_maintenance_start_time: std::option::Option<std::string::String>,
    /// <p>The preferred time to take daily automatic backups, in the UTC time zone.</p>
    #[doc(hidden)]
    pub daily_automatic_backup_start_time: std::option::Option<std::string::String>,
    /// <p>The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days.</p>
    #[doc(hidden)]
    pub automatic_backup_retention_days: std::option::Option<i32>,
    /// <p>A boolean flag indicating whether tags on the file system should be copied to backups. This value defaults to false. If it's set to true, all tags on the file system are copied to all automatic backups and any user-initiated backups where the user doesn't specify any tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.</p>
    #[doc(hidden)]
    pub copy_tags_to_backups: std::option::Option<bool>,
    /// <p>An array of one or more DNS aliases that are currently associated with the Amazon FSx file system. Aliases allow you to use existing DNS names to access the data in your Amazon FSx file system. You can associate up to 50 aliases with a file system at any time. You can associate additional DNS aliases after you create the file system using the AssociateFileSystemAliases operation. You can remove DNS aliases from the file system after it is created using the DisassociateFileSystemAliases operation. You only need to specify the alias name in the request payload. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-dns-aliases.html">DNS aliases</a>.</p>
    #[doc(hidden)]
    pub aliases: std::option::Option<std::vec::Vec<crate::model::Alias>>,
    /// <p>The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system.</p>
    #[doc(hidden)]
    pub audit_log_configuration: std::option::Option<crate::model::WindowsAuditLogConfiguration>,
}
impl WindowsFileSystemConfiguration {
    /// <p>The ID for an existing Amazon Web Services Managed Microsoft Active Directory instance that the file system is joined to.</p>
    pub fn active_directory_id(&self) -> std::option::Option<&str> {
        self.active_directory_id.as_deref()
    }
    /// <p>The configuration of the self-managed Microsoft Active Directory (AD) directory to which the Windows File Server or ONTAP storage virtual machine (SVM) instance is joined.</p>
    pub fn self_managed_active_directory_configuration(
        &self,
    ) -> std::option::Option<&crate::model::SelfManagedActiveDirectoryAttributes> {
        self.self_managed_active_directory_configuration.as_ref()
    }
    /// <p>Specifies the file system deployment type, valid values are the following:</p>
    /// <ul>
    /// <li> <p> <code>MULTI_AZ_1</code> - Specifies a high availability file system that is configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability, and supports SSD and HDD storage.</p> </li>
    /// <li> <p> <code>SINGLE_AZ_1</code> - (Default) Specifies a file system that is configured for single AZ redundancy, only supports SSD storage.</p> </li>
    /// <li> <p> <code>SINGLE_AZ_2</code> - Latest generation Single AZ file system. Specifies a file system that is configured for single AZ redundancy and supports SSD and HDD storage.</p> </li>
    /// </ul>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/high-availability-multiAZ.html">Single-AZ and Multi-AZ File Systems</a>.</p>
    pub fn deployment_type(&self) -> std::option::Option<&crate::model::WindowsDeploymentType> {
        self.deployment_type.as_ref()
    }
    /// <p>For <code>MULTI_AZ_1</code> deployment types, use this endpoint when performing administrative tasks on the file system using Amazon FSx Remote PowerShell.</p>
    /// <p>For <code>SINGLE_AZ_1</code> and <code>SINGLE_AZ_2</code> deployment types, this is the DNS name of the file system.</p>
    /// <p>This endpoint is temporarily unavailable when the file system is undergoing maintenance.</p>
    pub fn remote_administration_endpoint(&self) -> std::option::Option<&str> {
        self.remote_administration_endpoint.as_deref()
    }
    /// <p>For <code>MULTI_AZ_1</code> deployment types, it specifies the ID of the subnet where the preferred file server is located. Must be one of the two subnet IDs specified in <code>SubnetIds</code> property. Amazon FSx serves traffic from this subnet except in the event of a failover to the secondary file server.</p>
    /// <p>For <code>SINGLE_AZ_1</code> and <code>SINGLE_AZ_2</code> deployment types, this value is the same as that for <code>SubnetIDs</code>. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/high-availability-multiAZ.html#single-multi-az-resources">Availability and durability: Single-AZ and Multi-AZ file systems</a>.</p>
    pub fn preferred_subnet_id(&self) -> std::option::Option<&str> {
        self.preferred_subnet_id.as_deref()
    }
    /// <p>For <code>MULTI_AZ_1</code> deployment types, the IP address of the primary, or preferred, file server.</p>
    /// <p>Use this IP address when mounting the file system on Linux SMB clients or Windows SMB clients that are not joined to a Microsoft Active Directory. Applicable for all Windows file system deployment types. This IP address is temporarily unavailable when the file system is undergoing maintenance. For Linux and Windows SMB clients that are joined to an Active Directory, use the file system's DNSName instead. For more information on mapping and mounting file shares, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/accessing-file-shares.html">Accessing File Shares</a>.</p>
    pub fn preferred_file_server_ip(&self) -> std::option::Option<&str> {
        self.preferred_file_server_ip.as_deref()
    }
    /// <p>The throughput of the Amazon FSx file system, measured in megabytes per second.</p>
    pub fn throughput_capacity(&self) -> std::option::Option<i32> {
        self.throughput_capacity
    }
    /// <p>The list of maintenance operations in progress for this file system.</p>
    pub fn maintenance_operations_in_progress(
        &self,
    ) -> std::option::Option<&[crate::model::FileSystemMaintenanceOperation]> {
        self.maintenance_operations_in_progress.as_deref()
    }
    /// <p>The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.</p>
    pub fn weekly_maintenance_start_time(&self) -> std::option::Option<&str> {
        self.weekly_maintenance_start_time.as_deref()
    }
    /// <p>The preferred time to take daily automatic backups, in the UTC time zone.</p>
    pub fn daily_automatic_backup_start_time(&self) -> std::option::Option<&str> {
        self.daily_automatic_backup_start_time.as_deref()
    }
    /// <p>The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days.</p>
    pub fn automatic_backup_retention_days(&self) -> std::option::Option<i32> {
        self.automatic_backup_retention_days
    }
    /// <p>A boolean flag indicating whether tags on the file system should be copied to backups. This value defaults to false. If it's set to true, all tags on the file system are copied to all automatic backups and any user-initiated backups where the user doesn't specify any tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.</p>
    pub fn copy_tags_to_backups(&self) -> std::option::Option<bool> {
        self.copy_tags_to_backups
    }
    /// <p>An array of one or more DNS aliases that are currently associated with the Amazon FSx file system. Aliases allow you to use existing DNS names to access the data in your Amazon FSx file system. You can associate up to 50 aliases with a file system at any time. You can associate additional DNS aliases after you create the file system using the AssociateFileSystemAliases operation. You can remove DNS aliases from the file system after it is created using the DisassociateFileSystemAliases operation. You only need to specify the alias name in the request payload. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-dns-aliases.html">DNS aliases</a>.</p>
    pub fn aliases(&self) -> std::option::Option<&[crate::model::Alias]> {
        self.aliases.as_deref()
    }
    /// <p>The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system.</p>
    pub fn audit_log_configuration(
        &self,
    ) -> std::option::Option<&crate::model::WindowsAuditLogConfiguration> {
        self.audit_log_configuration.as_ref()
    }
}
/// See [`WindowsFileSystemConfiguration`](crate::model::WindowsFileSystemConfiguration).
pub mod windows_file_system_configuration {

    /// A builder for [`WindowsFileSystemConfiguration`](crate::model::WindowsFileSystemConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) active_directory_id: std::option::Option<std::string::String>,
        pub(crate) self_managed_active_directory_configuration:
            std::option::Option<crate::model::SelfManagedActiveDirectoryAttributes>,
        pub(crate) deployment_type: std::option::Option<crate::model::WindowsDeploymentType>,
        pub(crate) remote_administration_endpoint: std::option::Option<std::string::String>,
        pub(crate) preferred_subnet_id: std::option::Option<std::string::String>,
        pub(crate) preferred_file_server_ip: std::option::Option<std::string::String>,
        pub(crate) throughput_capacity: std::option::Option<i32>,
        pub(crate) maintenance_operations_in_progress:
            std::option::Option<std::vec::Vec<crate::model::FileSystemMaintenanceOperation>>,
        pub(crate) weekly_maintenance_start_time: std::option::Option<std::string::String>,
        pub(crate) daily_automatic_backup_start_time: std::option::Option<std::string::String>,
        pub(crate) automatic_backup_retention_days: std::option::Option<i32>,
        pub(crate) copy_tags_to_backups: std::option::Option<bool>,
        pub(crate) aliases: std::option::Option<std::vec::Vec<crate::model::Alias>>,
        pub(crate) audit_log_configuration:
            std::option::Option<crate::model::WindowsAuditLogConfiguration>,
    }
    impl Builder {
        /// <p>The ID for an existing Amazon Web Services Managed Microsoft Active Directory instance that the file system is joined to.</p>
        pub fn active_directory_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.active_directory_id = Some(input.into());
            self
        }
        /// <p>The ID for an existing Amazon Web Services Managed Microsoft Active Directory instance that the file system is joined to.</p>
        pub fn set_active_directory_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.active_directory_id = input;
            self
        }
        /// <p>The configuration of the self-managed Microsoft Active Directory (AD) directory to which the Windows File Server or ONTAP storage virtual machine (SVM) instance is joined.</p>
        pub fn self_managed_active_directory_configuration(
            mut self,
            input: crate::model::SelfManagedActiveDirectoryAttributes,
        ) -> Self {
            self.self_managed_active_directory_configuration = Some(input);
            self
        }
        /// <p>The configuration of the self-managed Microsoft Active Directory (AD) directory to which the Windows File Server or ONTAP storage virtual machine (SVM) instance is joined.</p>
        pub fn set_self_managed_active_directory_configuration(
            mut self,
            input: std::option::Option<crate::model::SelfManagedActiveDirectoryAttributes>,
        ) -> Self {
            self.self_managed_active_directory_configuration = input;
            self
        }
        /// <p>Specifies the file system deployment type, valid values are the following:</p>
        /// <ul>
        /// <li> <p> <code>MULTI_AZ_1</code> - Specifies a high availability file system that is configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability, and supports SSD and HDD storage.</p> </li>
        /// <li> <p> <code>SINGLE_AZ_1</code> - (Default) Specifies a file system that is configured for single AZ redundancy, only supports SSD storage.</p> </li>
        /// <li> <p> <code>SINGLE_AZ_2</code> - Latest generation Single AZ file system. Specifies a file system that is configured for single AZ redundancy and supports SSD and HDD storage.</p> </li>
        /// </ul>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/high-availability-multiAZ.html">Single-AZ and Multi-AZ File Systems</a>.</p>
        pub fn deployment_type(mut self, input: crate::model::WindowsDeploymentType) -> Self {
            self.deployment_type = Some(input);
            self
        }
        /// <p>Specifies the file system deployment type, valid values are the following:</p>
        /// <ul>
        /// <li> <p> <code>MULTI_AZ_1</code> - Specifies a high availability file system that is configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability, and supports SSD and HDD storage.</p> </li>
        /// <li> <p> <code>SINGLE_AZ_1</code> - (Default) Specifies a file system that is configured for single AZ redundancy, only supports SSD storage.</p> </li>
        /// <li> <p> <code>SINGLE_AZ_2</code> - Latest generation Single AZ file system. Specifies a file system that is configured for single AZ redundancy and supports SSD and HDD storage.</p> </li>
        /// </ul>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/high-availability-multiAZ.html">Single-AZ and Multi-AZ File Systems</a>.</p>
        pub fn set_deployment_type(
            mut self,
            input: std::option::Option<crate::model::WindowsDeploymentType>,
        ) -> Self {
            self.deployment_type = input;
            self
        }
        /// <p>For <code>MULTI_AZ_1</code> deployment types, use this endpoint when performing administrative tasks on the file system using Amazon FSx Remote PowerShell.</p>
        /// <p>For <code>SINGLE_AZ_1</code> and <code>SINGLE_AZ_2</code> deployment types, this is the DNS name of the file system.</p>
        /// <p>This endpoint is temporarily unavailable when the file system is undergoing maintenance.</p>
        pub fn remote_administration_endpoint(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.remote_administration_endpoint = Some(input.into());
            self
        }
        /// <p>For <code>MULTI_AZ_1</code> deployment types, use this endpoint when performing administrative tasks on the file system using Amazon FSx Remote PowerShell.</p>
        /// <p>For <code>SINGLE_AZ_1</code> and <code>SINGLE_AZ_2</code> deployment types, this is the DNS name of the file system.</p>
        /// <p>This endpoint is temporarily unavailable when the file system is undergoing maintenance.</p>
        pub fn set_remote_administration_endpoint(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.remote_administration_endpoint = input;
            self
        }
        /// <p>For <code>MULTI_AZ_1</code> deployment types, it specifies the ID of the subnet where the preferred file server is located. Must be one of the two subnet IDs specified in <code>SubnetIds</code> property. Amazon FSx serves traffic from this subnet except in the event of a failover to the secondary file server.</p>
        /// <p>For <code>SINGLE_AZ_1</code> and <code>SINGLE_AZ_2</code> deployment types, this value is the same as that for <code>SubnetIDs</code>. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/high-availability-multiAZ.html#single-multi-az-resources">Availability and durability: Single-AZ and Multi-AZ file systems</a>.</p>
        pub fn preferred_subnet_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.preferred_subnet_id = Some(input.into());
            self
        }
        /// <p>For <code>MULTI_AZ_1</code> deployment types, it specifies the ID of the subnet where the preferred file server is located. Must be one of the two subnet IDs specified in <code>SubnetIds</code> property. Amazon FSx serves traffic from this subnet except in the event of a failover to the secondary file server.</p>
        /// <p>For <code>SINGLE_AZ_1</code> and <code>SINGLE_AZ_2</code> deployment types, this value is the same as that for <code>SubnetIDs</code>. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/high-availability-multiAZ.html#single-multi-az-resources">Availability and durability: Single-AZ and Multi-AZ file systems</a>.</p>
        pub fn set_preferred_subnet_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.preferred_subnet_id = input;
            self
        }
        /// <p>For <code>MULTI_AZ_1</code> deployment types, the IP address of the primary, or preferred, file server.</p>
        /// <p>Use this IP address when mounting the file system on Linux SMB clients or Windows SMB clients that are not joined to a Microsoft Active Directory. Applicable for all Windows file system deployment types. This IP address is temporarily unavailable when the file system is undergoing maintenance. For Linux and Windows SMB clients that are joined to an Active Directory, use the file system's DNSName instead. For more information on mapping and mounting file shares, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/accessing-file-shares.html">Accessing File Shares</a>.</p>
        pub fn preferred_file_server_ip(mut self, input: impl Into<std::string::String>) -> Self {
            self.preferred_file_server_ip = Some(input.into());
            self
        }
        /// <p>For <code>MULTI_AZ_1</code> deployment types, the IP address of the primary, or preferred, file server.</p>
        /// <p>Use this IP address when mounting the file system on Linux SMB clients or Windows SMB clients that are not joined to a Microsoft Active Directory. Applicable for all Windows file system deployment types. This IP address is temporarily unavailable when the file system is undergoing maintenance. For Linux and Windows SMB clients that are joined to an Active Directory, use the file system's DNSName instead. For more information on mapping and mounting file shares, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/accessing-file-shares.html">Accessing File Shares</a>.</p>
        pub fn set_preferred_file_server_ip(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.preferred_file_server_ip = input;
            self
        }
        /// <p>The throughput of the Amazon FSx file system, measured in megabytes per second.</p>
        pub fn throughput_capacity(mut self, input: i32) -> Self {
            self.throughput_capacity = Some(input);
            self
        }
        /// <p>The throughput of the Amazon FSx file system, measured in megabytes per second.</p>
        pub fn set_throughput_capacity(mut self, input: std::option::Option<i32>) -> Self {
            self.throughput_capacity = input;
            self
        }
        /// Appends an item to `maintenance_operations_in_progress`.
        ///
        /// To override the contents of this collection use [`set_maintenance_operations_in_progress`](Self::set_maintenance_operations_in_progress).
        ///
        /// <p>The list of maintenance operations in progress for this file system.</p>
        pub fn maintenance_operations_in_progress(
            mut self,
            input: crate::model::FileSystemMaintenanceOperation,
        ) -> Self {
            let mut v = self.maintenance_operations_in_progress.unwrap_or_default();
            v.push(input);
            self.maintenance_operations_in_progress = Some(v);
            self
        }
        /// <p>The list of maintenance operations in progress for this file system.</p>
        pub fn set_maintenance_operations_in_progress(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::FileSystemMaintenanceOperation>>,
        ) -> Self {
            self.maintenance_operations_in_progress = input;
            self
        }
        /// <p>The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.</p>
        pub fn weekly_maintenance_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = Some(input.into());
            self
        }
        /// <p>The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.</p>
        pub fn set_weekly_maintenance_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = input;
            self
        }
        /// <p>The preferred time to take daily automatic backups, in the UTC time zone.</p>
        pub fn daily_automatic_backup_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.daily_automatic_backup_start_time = Some(input.into());
            self
        }
        /// <p>The preferred time to take daily automatic backups, in the UTC time zone.</p>
        pub fn set_daily_automatic_backup_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.daily_automatic_backup_start_time = input;
            self
        }
        /// <p>The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days.</p>
        pub fn automatic_backup_retention_days(mut self, input: i32) -> Self {
            self.automatic_backup_retention_days = Some(input);
            self
        }
        /// <p>The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days.</p>
        pub fn set_automatic_backup_retention_days(
            mut self,
            input: std::option::Option<i32>,
        ) -> Self {
            self.automatic_backup_retention_days = input;
            self
        }
        /// <p>A boolean flag indicating whether tags on the file system should be copied to backups. This value defaults to false. If it's set to true, all tags on the file system are copied to all automatic backups and any user-initiated backups where the user doesn't specify any tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.</p>
        pub fn copy_tags_to_backups(mut self, input: bool) -> Self {
            self.copy_tags_to_backups = Some(input);
            self
        }
        /// <p>A boolean flag indicating whether tags on the file system should be copied to backups. This value defaults to false. If it's set to true, all tags on the file system are copied to all automatic backups and any user-initiated backups where the user doesn't specify any tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.</p>
        pub fn set_copy_tags_to_backups(mut self, input: std::option::Option<bool>) -> Self {
            self.copy_tags_to_backups = input;
            self
        }
        /// Appends an item to `aliases`.
        ///
        /// To override the contents of this collection use [`set_aliases`](Self::set_aliases).
        ///
        /// <p>An array of one or more DNS aliases that are currently associated with the Amazon FSx file system. Aliases allow you to use existing DNS names to access the data in your Amazon FSx file system. You can associate up to 50 aliases with a file system at any time. You can associate additional DNS aliases after you create the file system using the AssociateFileSystemAliases operation. You can remove DNS aliases from the file system after it is created using the DisassociateFileSystemAliases operation. You only need to specify the alias name in the request payload. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-dns-aliases.html">DNS aliases</a>.</p>
        pub fn aliases(mut self, input: crate::model::Alias) -> Self {
            let mut v = self.aliases.unwrap_or_default();
            v.push(input);
            self.aliases = Some(v);
            self
        }
        /// <p>An array of one or more DNS aliases that are currently associated with the Amazon FSx file system. Aliases allow you to use existing DNS names to access the data in your Amazon FSx file system. You can associate up to 50 aliases with a file system at any time. You can associate additional DNS aliases after you create the file system using the AssociateFileSystemAliases operation. You can remove DNS aliases from the file system after it is created using the DisassociateFileSystemAliases operation. You only need to specify the alias name in the request payload. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-dns-aliases.html">DNS aliases</a>.</p>
        pub fn set_aliases(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::Alias>>,
        ) -> Self {
            self.aliases = input;
            self
        }
        /// <p>The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system.</p>
        pub fn audit_log_configuration(
            mut self,
            input: crate::model::WindowsAuditLogConfiguration,
        ) -> Self {
            self.audit_log_configuration = Some(input);
            self
        }
        /// <p>The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system.</p>
        pub fn set_audit_log_configuration(
            mut self,
            input: std::option::Option<crate::model::WindowsAuditLogConfiguration>,
        ) -> Self {
            self.audit_log_configuration = input;
            self
        }
        /// Consumes the builder and constructs a [`WindowsFileSystemConfiguration`](crate::model::WindowsFileSystemConfiguration).
        pub fn build(self) -> crate::model::WindowsFileSystemConfiguration {
            crate::model::WindowsFileSystemConfiguration {
                active_directory_id: self.active_directory_id,
                self_managed_active_directory_configuration: self
                    .self_managed_active_directory_configuration,
                deployment_type: self.deployment_type,
                remote_administration_endpoint: self.remote_administration_endpoint,
                preferred_subnet_id: self.preferred_subnet_id,
                preferred_file_server_ip: self.preferred_file_server_ip,
                throughput_capacity: self.throughput_capacity,
                maintenance_operations_in_progress: self.maintenance_operations_in_progress,
                weekly_maintenance_start_time: self.weekly_maintenance_start_time,
                daily_automatic_backup_start_time: self.daily_automatic_backup_start_time,
                automatic_backup_retention_days: self.automatic_backup_retention_days,
                copy_tags_to_backups: self.copy_tags_to_backups,
                aliases: self.aliases,
                audit_log_configuration: self.audit_log_configuration,
            }
        }
    }
}
impl WindowsFileSystemConfiguration {
    /// Creates a new builder-style object to manufacture [`WindowsFileSystemConfiguration`](crate::model::WindowsFileSystemConfiguration).
    pub fn builder() -> crate::model::windows_file_system_configuration::Builder {
        crate::model::windows_file_system_configuration::Builder::default()
    }
}

/// <p>The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/file-access-auditing.html"> File access auditing</a>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct WindowsAuditLogConfiguration {
    /// <p>Sets which attempt type is logged by Amazon FSx for file and folder accesses.</p>
    /// <ul>
    /// <li> <p> <code>SUCCESS_ONLY</code> - only successful attempts to access files or folders are logged.</p> </li>
    /// <li> <p> <code>FAILURE_ONLY</code> - only failed attempts to access files or folders are logged.</p> </li>
    /// <li> <p> <code>SUCCESS_AND_FAILURE</code> - both successful attempts and failed attempts to access files or folders are logged.</p> </li>
    /// <li> <p> <code>DISABLED</code> - access auditing of files and folders is turned off.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub file_access_audit_log_level: std::option::Option<crate::model::WindowsAccessAuditLogLevel>,
    /// <p>Sets which attempt type is logged by Amazon FSx for file share accesses.</p>
    /// <ul>
    /// <li> <p> <code>SUCCESS_ONLY</code> - only successful attempts to access file shares are logged.</p> </li>
    /// <li> <p> <code>FAILURE_ONLY</code> - only failed attempts to access file shares are logged.</p> </li>
    /// <li> <p> <code>SUCCESS_AND_FAILURE</code> - both successful attempts and failed attempts to access file shares are logged.</p> </li>
    /// <li> <p> <code>DISABLED</code> - access auditing of file shares is turned off.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub file_share_access_audit_log_level:
        std::option::Option<crate::model::WindowsAccessAuditLogLevel>,
    /// <p>The Amazon Resource Name (ARN) for the destination of the audit logs. The destination can be any Amazon CloudWatch Logs log group ARN or Amazon Kinesis Data Firehose delivery stream ARN.</p>
    /// <p>The name of the Amazon CloudWatch Logs log group must begin with the <code>/aws/fsx</code> prefix. The name of the Amazon Kinesis Data Firehouse delivery stream must begin with the <code>aws-fsx</code> prefix.</p>
    /// <p>The destination ARN (either CloudWatch Logs log group or Kinesis Data Firehose delivery stream) must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.</p>
    #[doc(hidden)]
    pub audit_log_destination: std::option::Option<std::string::String>,
}
impl WindowsAuditLogConfiguration {
    /// <p>Sets which attempt type is logged by Amazon FSx for file and folder accesses.</p>
    /// <ul>
    /// <li> <p> <code>SUCCESS_ONLY</code> - only successful attempts to access files or folders are logged.</p> </li>
    /// <li> <p> <code>FAILURE_ONLY</code> - only failed attempts to access files or folders are logged.</p> </li>
    /// <li> <p> <code>SUCCESS_AND_FAILURE</code> - both successful attempts and failed attempts to access files or folders are logged.</p> </li>
    /// <li> <p> <code>DISABLED</code> - access auditing of files and folders is turned off.</p> </li>
    /// </ul>
    pub fn file_access_audit_log_level(
        &self,
    ) -> std::option::Option<&crate::model::WindowsAccessAuditLogLevel> {
        self.file_access_audit_log_level.as_ref()
    }
    /// <p>Sets which attempt type is logged by Amazon FSx for file share accesses.</p>
    /// <ul>
    /// <li> <p> <code>SUCCESS_ONLY</code> - only successful attempts to access file shares are logged.</p> </li>
    /// <li> <p> <code>FAILURE_ONLY</code> - only failed attempts to access file shares are logged.</p> </li>
    /// <li> <p> <code>SUCCESS_AND_FAILURE</code> - both successful attempts and failed attempts to access file shares are logged.</p> </li>
    /// <li> <p> <code>DISABLED</code> - access auditing of file shares is turned off.</p> </li>
    /// </ul>
    pub fn file_share_access_audit_log_level(
        &self,
    ) -> std::option::Option<&crate::model::WindowsAccessAuditLogLevel> {
        self.file_share_access_audit_log_level.as_ref()
    }
    /// <p>The Amazon Resource Name (ARN) for the destination of the audit logs. The destination can be any Amazon CloudWatch Logs log group ARN or Amazon Kinesis Data Firehose delivery stream ARN.</p>
    /// <p>The name of the Amazon CloudWatch Logs log group must begin with the <code>/aws/fsx</code> prefix. The name of the Amazon Kinesis Data Firehouse delivery stream must begin with the <code>aws-fsx</code> prefix.</p>
    /// <p>The destination ARN (either CloudWatch Logs log group or Kinesis Data Firehose delivery stream) must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.</p>
    pub fn audit_log_destination(&self) -> std::option::Option<&str> {
        self.audit_log_destination.as_deref()
    }
}
/// See [`WindowsAuditLogConfiguration`](crate::model::WindowsAuditLogConfiguration).
pub mod windows_audit_log_configuration {

    /// A builder for [`WindowsAuditLogConfiguration`](crate::model::WindowsAuditLogConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) file_access_audit_log_level:
            std::option::Option<crate::model::WindowsAccessAuditLogLevel>,
        pub(crate) file_share_access_audit_log_level:
            std::option::Option<crate::model::WindowsAccessAuditLogLevel>,
        pub(crate) audit_log_destination: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>Sets which attempt type is logged by Amazon FSx for file and folder accesses.</p>
        /// <ul>
        /// <li> <p> <code>SUCCESS_ONLY</code> - only successful attempts to access files or folders are logged.</p> </li>
        /// <li> <p> <code>FAILURE_ONLY</code> - only failed attempts to access files or folders are logged.</p> </li>
        /// <li> <p> <code>SUCCESS_AND_FAILURE</code> - both successful attempts and failed attempts to access files or folders are logged.</p> </li>
        /// <li> <p> <code>DISABLED</code> - access auditing of files and folders is turned off.</p> </li>
        /// </ul>
        pub fn file_access_audit_log_level(
            mut self,
            input: crate::model::WindowsAccessAuditLogLevel,
        ) -> Self {
            self.file_access_audit_log_level = Some(input);
            self
        }
        /// <p>Sets which attempt type is logged by Amazon FSx for file and folder accesses.</p>
        /// <ul>
        /// <li> <p> <code>SUCCESS_ONLY</code> - only successful attempts to access files or folders are logged.</p> </li>
        /// <li> <p> <code>FAILURE_ONLY</code> - only failed attempts to access files or folders are logged.</p> </li>
        /// <li> <p> <code>SUCCESS_AND_FAILURE</code> - both successful attempts and failed attempts to access files or folders are logged.</p> </li>
        /// <li> <p> <code>DISABLED</code> - access auditing of files and folders is turned off.</p> </li>
        /// </ul>
        pub fn set_file_access_audit_log_level(
            mut self,
            input: std::option::Option<crate::model::WindowsAccessAuditLogLevel>,
        ) -> Self {
            self.file_access_audit_log_level = input;
            self
        }
        /// <p>Sets which attempt type is logged by Amazon FSx for file share accesses.</p>
        /// <ul>
        /// <li> <p> <code>SUCCESS_ONLY</code> - only successful attempts to access file shares are logged.</p> </li>
        /// <li> <p> <code>FAILURE_ONLY</code> - only failed attempts to access file shares are logged.</p> </li>
        /// <li> <p> <code>SUCCESS_AND_FAILURE</code> - both successful attempts and failed attempts to access file shares are logged.</p> </li>
        /// <li> <p> <code>DISABLED</code> - access auditing of file shares is turned off.</p> </li>
        /// </ul>
        pub fn file_share_access_audit_log_level(
            mut self,
            input: crate::model::WindowsAccessAuditLogLevel,
        ) -> Self {
            self.file_share_access_audit_log_level = Some(input);
            self
        }
        /// <p>Sets which attempt type is logged by Amazon FSx for file share accesses.</p>
        /// <ul>
        /// <li> <p> <code>SUCCESS_ONLY</code> - only successful attempts to access file shares are logged.</p> </li>
        /// <li> <p> <code>FAILURE_ONLY</code> - only failed attempts to access file shares are logged.</p> </li>
        /// <li> <p> <code>SUCCESS_AND_FAILURE</code> - both successful attempts and failed attempts to access file shares are logged.</p> </li>
        /// <li> <p> <code>DISABLED</code> - access auditing of file shares is turned off.</p> </li>
        /// </ul>
        pub fn set_file_share_access_audit_log_level(
            mut self,
            input: std::option::Option<crate::model::WindowsAccessAuditLogLevel>,
        ) -> Self {
            self.file_share_access_audit_log_level = input;
            self
        }
        /// <p>The Amazon Resource Name (ARN) for the destination of the audit logs. The destination can be any Amazon CloudWatch Logs log group ARN or Amazon Kinesis Data Firehose delivery stream ARN.</p>
        /// <p>The name of the Amazon CloudWatch Logs log group must begin with the <code>/aws/fsx</code> prefix. The name of the Amazon Kinesis Data Firehouse delivery stream must begin with the <code>aws-fsx</code> prefix.</p>
        /// <p>The destination ARN (either CloudWatch Logs log group or Kinesis Data Firehose delivery stream) must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.</p>
        pub fn audit_log_destination(mut self, input: impl Into<std::string::String>) -> Self {
            self.audit_log_destination = Some(input.into());
            self
        }
        /// <p>The Amazon Resource Name (ARN) for the destination of the audit logs. The destination can be any Amazon CloudWatch Logs log group ARN or Amazon Kinesis Data Firehose delivery stream ARN.</p>
        /// <p>The name of the Amazon CloudWatch Logs log group must begin with the <code>/aws/fsx</code> prefix. The name of the Amazon Kinesis Data Firehouse delivery stream must begin with the <code>aws-fsx</code> prefix.</p>
        /// <p>The destination ARN (either CloudWatch Logs log group or Kinesis Data Firehose delivery stream) must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.</p>
        pub fn set_audit_log_destination(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.audit_log_destination = input;
            self
        }
        /// Consumes the builder and constructs a [`WindowsAuditLogConfiguration`](crate::model::WindowsAuditLogConfiguration).
        pub fn build(self) -> crate::model::WindowsAuditLogConfiguration {
            crate::model::WindowsAuditLogConfiguration {
                file_access_audit_log_level: self.file_access_audit_log_level,
                file_share_access_audit_log_level: self.file_share_access_audit_log_level,
                audit_log_destination: self.audit_log_destination,
            }
        }
    }
}
impl WindowsAuditLogConfiguration {
    /// Creates a new builder-style object to manufacture [`WindowsAuditLogConfiguration`](crate::model::WindowsAuditLogConfiguration).
    pub fn builder() -> crate::model::windows_audit_log_configuration::Builder {
        crate::model::windows_audit_log_configuration::Builder::default()
    }
}

/// When writing a match expression against `WindowsAccessAuditLogLevel`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let windowsaccessauditloglevel = unimplemented!();
/// match windowsaccessauditloglevel {
///     WindowsAccessAuditLogLevel::Disabled => { /* ... */ },
///     WindowsAccessAuditLogLevel::FailureOnly => { /* ... */ },
///     WindowsAccessAuditLogLevel::SuccessAndFailure => { /* ... */ },
///     WindowsAccessAuditLogLevel::SuccessOnly => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `windowsaccessauditloglevel` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `WindowsAccessAuditLogLevel::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `WindowsAccessAuditLogLevel::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `WindowsAccessAuditLogLevel::NewFeature` is defined.
/// Specifically, when `windowsaccessauditloglevel` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `WindowsAccessAuditLogLevel::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum WindowsAccessAuditLogLevel {
    #[allow(missing_docs)] // documentation missing in model
    Disabled,
    #[allow(missing_docs)] // documentation missing in model
    FailureOnly,
    #[allow(missing_docs)] // documentation missing in model
    SuccessAndFailure,
    #[allow(missing_docs)] // documentation missing in model
    SuccessOnly,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for WindowsAccessAuditLogLevel {
    fn from(s: &str) -> Self {
        match s {
            "DISABLED" => WindowsAccessAuditLogLevel::Disabled,
            "FAILURE_ONLY" => WindowsAccessAuditLogLevel::FailureOnly,
            "SUCCESS_AND_FAILURE" => WindowsAccessAuditLogLevel::SuccessAndFailure,
            "SUCCESS_ONLY" => WindowsAccessAuditLogLevel::SuccessOnly,
            other => WindowsAccessAuditLogLevel::Unknown(crate::types::UnknownVariantValue(
                other.to_owned(),
            )),
        }
    }
}
impl std::str::FromStr for WindowsAccessAuditLogLevel {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(WindowsAccessAuditLogLevel::from(s))
    }
}
impl WindowsAccessAuditLogLevel {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            WindowsAccessAuditLogLevel::Disabled => "DISABLED",
            WindowsAccessAuditLogLevel::FailureOnly => "FAILURE_ONLY",
            WindowsAccessAuditLogLevel::SuccessAndFailure => "SUCCESS_AND_FAILURE",
            WindowsAccessAuditLogLevel::SuccessOnly => "SUCCESS_ONLY",
            WindowsAccessAuditLogLevel::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &[
            "DISABLED",
            "FAILURE_ONLY",
            "SUCCESS_AND_FAILURE",
            "SUCCESS_ONLY",
        ]
    }
}
impl AsRef<str> for WindowsAccessAuditLogLevel {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>A DNS alias that is associated with the file system. You can use a DNS alias to access a file system using user-defined DNS names, in addition to the default DNS name that Amazon FSx assigns to the file system. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-dns-aliases.html">DNS aliases</a> in the <i>FSx for Windows File Server User Guide</i>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Alias {
    /// <p>The name of the DNS alias. The alias name has to meet the following requirements:</p>
    /// <ul>
    /// <li> <p>Formatted as a fully-qualified domain name (FQDN), <code>hostname.domain</code>, for example, <code>accounting.example.com</code>.</p> </li>
    /// <li> <p>Can contain alphanumeric characters, the underscore (_), and the hyphen (-).</p> </li>
    /// <li> <p>Cannot start or end with a hyphen.</p> </li>
    /// <li> <p>Can start with a numeric.</p> </li>
    /// </ul>
    /// <p>For DNS names, Amazon FSx stores alphabetic characters as lowercase letters (a-z), regardless of how you specify them: as uppercase letters, lowercase letters, or the corresponding letters in escape codes.</p>
    #[doc(hidden)]
    pub name: std::option::Option<std::string::String>,
    /// <p>Describes the state of the DNS alias.</p>
    /// <ul>
    /// <li> <p>AVAILABLE - The DNS alias is associated with an Amazon FSx file system.</p> </li>
    /// <li> <p>CREATING - Amazon FSx is creating the DNS alias and associating it with the file system.</p> </li>
    /// <li> <p>CREATE_FAILED - Amazon FSx was unable to associate the DNS alias with the file system.</p> </li>
    /// <li> <p>DELETING - Amazon FSx is disassociating the DNS alias from the file system and deleting it.</p> </li>
    /// <li> <p>DELETE_FAILED - Amazon FSx was unable to disassociate the DNS alias from the file system.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub lifecycle: std::option::Option<crate::model::AliasLifecycle>,
}
impl Alias {
    /// <p>The name of the DNS alias. The alias name has to meet the following requirements:</p>
    /// <ul>
    /// <li> <p>Formatted as a fully-qualified domain name (FQDN), <code>hostname.domain</code>, for example, <code>accounting.example.com</code>.</p> </li>
    /// <li> <p>Can contain alphanumeric characters, the underscore (_), and the hyphen (-).</p> </li>
    /// <li> <p>Cannot start or end with a hyphen.</p> </li>
    /// <li> <p>Can start with a numeric.</p> </li>
    /// </ul>
    /// <p>For DNS names, Amazon FSx stores alphabetic characters as lowercase letters (a-z), regardless of how you specify them: as uppercase letters, lowercase letters, or the corresponding letters in escape codes.</p>
    pub fn name(&self) -> std::option::Option<&str> {
        self.name.as_deref()
    }
    /// <p>Describes the state of the DNS alias.</p>
    /// <ul>
    /// <li> <p>AVAILABLE - The DNS alias is associated with an Amazon FSx file system.</p> </li>
    /// <li> <p>CREATING - Amazon FSx is creating the DNS alias and associating it with the file system.</p> </li>
    /// <li> <p>CREATE_FAILED - Amazon FSx was unable to associate the DNS alias with the file system.</p> </li>
    /// <li> <p>DELETING - Amazon FSx is disassociating the DNS alias from the file system and deleting it.</p> </li>
    /// <li> <p>DELETE_FAILED - Amazon FSx was unable to disassociate the DNS alias from the file system.</p> </li>
    /// </ul>
    pub fn lifecycle(&self) -> std::option::Option<&crate::model::AliasLifecycle> {
        self.lifecycle.as_ref()
    }
}
/// See [`Alias`](crate::model::Alias).
pub mod alias {

    /// A builder for [`Alias`](crate::model::Alias).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) name: std::option::Option<std::string::String>,
        pub(crate) lifecycle: std::option::Option<crate::model::AliasLifecycle>,
    }
    impl Builder {
        /// <p>The name of the DNS alias. The alias name has to meet the following requirements:</p>
        /// <ul>
        /// <li> <p>Formatted as a fully-qualified domain name (FQDN), <code>hostname.domain</code>, for example, <code>accounting.example.com</code>.</p> </li>
        /// <li> <p>Can contain alphanumeric characters, the underscore (_), and the hyphen (-).</p> </li>
        /// <li> <p>Cannot start or end with a hyphen.</p> </li>
        /// <li> <p>Can start with a numeric.</p> </li>
        /// </ul>
        /// <p>For DNS names, Amazon FSx stores alphabetic characters as lowercase letters (a-z), regardless of how you specify them: as uppercase letters, lowercase letters, or the corresponding letters in escape codes.</p>
        pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
            self.name = Some(input.into());
            self
        }
        /// <p>The name of the DNS alias. The alias name has to meet the following requirements:</p>
        /// <ul>
        /// <li> <p>Formatted as a fully-qualified domain name (FQDN), <code>hostname.domain</code>, for example, <code>accounting.example.com</code>.</p> </li>
        /// <li> <p>Can contain alphanumeric characters, the underscore (_), and the hyphen (-).</p> </li>
        /// <li> <p>Cannot start or end with a hyphen.</p> </li>
        /// <li> <p>Can start with a numeric.</p> </li>
        /// </ul>
        /// <p>For DNS names, Amazon FSx stores alphabetic characters as lowercase letters (a-z), regardless of how you specify them: as uppercase letters, lowercase letters, or the corresponding letters in escape codes.</p>
        pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.name = input;
            self
        }
        /// <p>Describes the state of the DNS alias.</p>
        /// <ul>
        /// <li> <p>AVAILABLE - The DNS alias is associated with an Amazon FSx file system.</p> </li>
        /// <li> <p>CREATING - Amazon FSx is creating the DNS alias and associating it with the file system.</p> </li>
        /// <li> <p>CREATE_FAILED - Amazon FSx was unable to associate the DNS alias with the file system.</p> </li>
        /// <li> <p>DELETING - Amazon FSx is disassociating the DNS alias from the file system and deleting it.</p> </li>
        /// <li> <p>DELETE_FAILED - Amazon FSx was unable to disassociate the DNS alias from the file system.</p> </li>
        /// </ul>
        pub fn lifecycle(mut self, input: crate::model::AliasLifecycle) -> Self {
            self.lifecycle = Some(input);
            self
        }
        /// <p>Describes the state of the DNS alias.</p>
        /// <ul>
        /// <li> <p>AVAILABLE - The DNS alias is associated with an Amazon FSx file system.</p> </li>
        /// <li> <p>CREATING - Amazon FSx is creating the DNS alias and associating it with the file system.</p> </li>
        /// <li> <p>CREATE_FAILED - Amazon FSx was unable to associate the DNS alias with the file system.</p> </li>
        /// <li> <p>DELETING - Amazon FSx is disassociating the DNS alias from the file system and deleting it.</p> </li>
        /// <li> <p>DELETE_FAILED - Amazon FSx was unable to disassociate the DNS alias from the file system.</p> </li>
        /// </ul>
        pub fn set_lifecycle(
            mut self,
            input: std::option::Option<crate::model::AliasLifecycle>,
        ) -> Self {
            self.lifecycle = input;
            self
        }
        /// Consumes the builder and constructs a [`Alias`](crate::model::Alias).
        pub fn build(self) -> crate::model::Alias {
            crate::model::Alias {
                name: self.name,
                lifecycle: self.lifecycle,
            }
        }
    }
}
impl Alias {
    /// Creates a new builder-style object to manufacture [`Alias`](crate::model::Alias).
    pub fn builder() -> crate::model::alias::Builder {
        crate::model::alias::Builder::default()
    }
}

/// When writing a match expression against `AliasLifecycle`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let aliaslifecycle = unimplemented!();
/// match aliaslifecycle {
///     AliasLifecycle::Available => { /* ... */ },
///     AliasLifecycle::CreateFailed => { /* ... */ },
///     AliasLifecycle::Creating => { /* ... */ },
///     AliasLifecycle::DeleteFailed => { /* ... */ },
///     AliasLifecycle::Deleting => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `aliaslifecycle` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `AliasLifecycle::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `AliasLifecycle::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `AliasLifecycle::NewFeature` is defined.
/// Specifically, when `aliaslifecycle` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `AliasLifecycle::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum AliasLifecycle {
    #[allow(missing_docs)] // documentation missing in model
    Available,
    #[allow(missing_docs)] // documentation missing in model
    CreateFailed,
    #[allow(missing_docs)] // documentation missing in model
    Creating,
    #[allow(missing_docs)] // documentation missing in model
    DeleteFailed,
    #[allow(missing_docs)] // documentation missing in model
    Deleting,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for AliasLifecycle {
    fn from(s: &str) -> Self {
        match s {
            "AVAILABLE" => AliasLifecycle::Available,
            "CREATE_FAILED" => AliasLifecycle::CreateFailed,
            "CREATING" => AliasLifecycle::Creating,
            "DELETE_FAILED" => AliasLifecycle::DeleteFailed,
            "DELETING" => AliasLifecycle::Deleting,
            other => AliasLifecycle::Unknown(crate::types::UnknownVariantValue(other.to_owned())),
        }
    }
}
impl std::str::FromStr for AliasLifecycle {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(AliasLifecycle::from(s))
    }
}
impl AliasLifecycle {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            AliasLifecycle::Available => "AVAILABLE",
            AliasLifecycle::CreateFailed => "CREATE_FAILED",
            AliasLifecycle::Creating => "CREATING",
            AliasLifecycle::DeleteFailed => "DELETE_FAILED",
            AliasLifecycle::Deleting => "DELETING",
            AliasLifecycle::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &[
            "AVAILABLE",
            "CREATE_FAILED",
            "CREATING",
            "DELETE_FAILED",
            "DELETING",
        ]
    }
}
impl AsRef<str> for AliasLifecycle {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// When writing a match expression against `FileSystemMaintenanceOperation`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let filesystemmaintenanceoperation = unimplemented!();
/// match filesystemmaintenanceoperation {
///     FileSystemMaintenanceOperation::BackingUp => { /* ... */ },
///     FileSystemMaintenanceOperation::Patching => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `filesystemmaintenanceoperation` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `FileSystemMaintenanceOperation::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `FileSystemMaintenanceOperation::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `FileSystemMaintenanceOperation::NewFeature` is defined.
/// Specifically, when `filesystemmaintenanceoperation` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `FileSystemMaintenanceOperation::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
/// <p>An enumeration specifying the currently ongoing maintenance operation.</p>
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum FileSystemMaintenanceOperation {
    #[allow(missing_docs)] // documentation missing in model
    BackingUp,
    #[allow(missing_docs)] // documentation missing in model
    Patching,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for FileSystemMaintenanceOperation {
    fn from(s: &str) -> Self {
        match s {
            "BACKING_UP" => FileSystemMaintenanceOperation::BackingUp,
            "PATCHING" => FileSystemMaintenanceOperation::Patching,
            other => FileSystemMaintenanceOperation::Unknown(crate::types::UnknownVariantValue(
                other.to_owned(),
            )),
        }
    }
}
impl std::str::FromStr for FileSystemMaintenanceOperation {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(FileSystemMaintenanceOperation::from(s))
    }
}
impl FileSystemMaintenanceOperation {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            FileSystemMaintenanceOperation::BackingUp => "BACKING_UP",
            FileSystemMaintenanceOperation::Patching => "PATCHING",
            FileSystemMaintenanceOperation::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["BACKING_UP", "PATCHING"]
    }
}
impl AsRef<str> for FileSystemMaintenanceOperation {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// When writing a match expression against `WindowsDeploymentType`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let windowsdeploymenttype = unimplemented!();
/// match windowsdeploymenttype {
///     WindowsDeploymentType::MultiAz1 => { /* ... */ },
///     WindowsDeploymentType::SingleAz1 => { /* ... */ },
///     WindowsDeploymentType::SingleAz2 => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `windowsdeploymenttype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `WindowsDeploymentType::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `WindowsDeploymentType::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `WindowsDeploymentType::NewFeature` is defined.
/// Specifically, when `windowsdeploymenttype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `WindowsDeploymentType::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum WindowsDeploymentType {
    #[allow(missing_docs)] // documentation missing in model
    MultiAz1,
    #[allow(missing_docs)] // documentation missing in model
    SingleAz1,
    #[allow(missing_docs)] // documentation missing in model
    SingleAz2,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for WindowsDeploymentType {
    fn from(s: &str) -> Self {
        match s {
            "MULTI_AZ_1" => WindowsDeploymentType::MultiAz1,
            "SINGLE_AZ_1" => WindowsDeploymentType::SingleAz1,
            "SINGLE_AZ_2" => WindowsDeploymentType::SingleAz2,
            other => {
                WindowsDeploymentType::Unknown(crate::types::UnknownVariantValue(other.to_owned()))
            }
        }
    }
}
impl std::str::FromStr for WindowsDeploymentType {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(WindowsDeploymentType::from(s))
    }
}
impl WindowsDeploymentType {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            WindowsDeploymentType::MultiAz1 => "MULTI_AZ_1",
            WindowsDeploymentType::SingleAz1 => "SINGLE_AZ_1",
            WindowsDeploymentType::SingleAz2 => "SINGLE_AZ_2",
            WindowsDeploymentType::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["MULTI_AZ_1", "SINGLE_AZ_1", "SINGLE_AZ_2"]
    }
}
impl AsRef<str> for WindowsDeploymentType {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>The configuration of the self-managed Microsoft Active Directory (AD) directory to which the Windows File Server or ONTAP storage virtual machine (SVM) instance is joined.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct SelfManagedActiveDirectoryAttributes {
    /// <p>The fully qualified domain name of the self-managed AD directory.</p>
    #[doc(hidden)]
    pub domain_name: std::option::Option<std::string::String>,
    /// <p>The fully qualified distinguished name of the organizational unit within the self-managed AD directory to which the Windows File Server or ONTAP storage virtual machine (SVM) instance is joined.</p>
    #[doc(hidden)]
    pub organizational_unit_distinguished_name: std::option::Option<std::string::String>,
    /// <p>The name of the domain group whose members have administrative privileges for the FSx file system.</p>
    #[doc(hidden)]
    pub file_system_administrators_group: std::option::Option<std::string::String>,
    /// <p>The user name for the service account on your self-managed AD domain that FSx uses to join to your AD domain.</p>
    #[doc(hidden)]
    pub user_name: std::option::Option<std::string::String>,
    /// <p>A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory.</p>
    #[doc(hidden)]
    pub dns_ips: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl SelfManagedActiveDirectoryAttributes {
    /// <p>The fully qualified domain name of the self-managed AD directory.</p>
    pub fn domain_name(&self) -> std::option::Option<&str> {
        self.domain_name.as_deref()
    }
    /// <p>The fully qualified distinguished name of the organizational unit within the self-managed AD directory to which the Windows File Server or ONTAP storage virtual machine (SVM) instance is joined.</p>
    pub fn organizational_unit_distinguished_name(&self) -> std::option::Option<&str> {
        self.organizational_unit_distinguished_name.as_deref()
    }
    /// <p>The name of the domain group whose members have administrative privileges for the FSx file system.</p>
    pub fn file_system_administrators_group(&self) -> std::option::Option<&str> {
        self.file_system_administrators_group.as_deref()
    }
    /// <p>The user name for the service account on your self-managed AD domain that FSx uses to join to your AD domain.</p>
    pub fn user_name(&self) -> std::option::Option<&str> {
        self.user_name.as_deref()
    }
    /// <p>A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory.</p>
    pub fn dns_ips(&self) -> std::option::Option<&[std::string::String]> {
        self.dns_ips.as_deref()
    }
}
/// See [`SelfManagedActiveDirectoryAttributes`](crate::model::SelfManagedActiveDirectoryAttributes).
pub mod self_managed_active_directory_attributes {

    /// A builder for [`SelfManagedActiveDirectoryAttributes`](crate::model::SelfManagedActiveDirectoryAttributes).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) domain_name: std::option::Option<std::string::String>,
        pub(crate) organizational_unit_distinguished_name: std::option::Option<std::string::String>,
        pub(crate) file_system_administrators_group: std::option::Option<std::string::String>,
        pub(crate) user_name: std::option::Option<std::string::String>,
        pub(crate) dns_ips: std::option::Option<std::vec::Vec<std::string::String>>,
    }
    impl Builder {
        /// <p>The fully qualified domain name of the self-managed AD directory.</p>
        pub fn domain_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.domain_name = Some(input.into());
            self
        }
        /// <p>The fully qualified domain name of the self-managed AD directory.</p>
        pub fn set_domain_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.domain_name = input;
            self
        }
        /// <p>The fully qualified distinguished name of the organizational unit within the self-managed AD directory to which the Windows File Server or ONTAP storage virtual machine (SVM) instance is joined.</p>
        pub fn organizational_unit_distinguished_name(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.organizational_unit_distinguished_name = Some(input.into());
            self
        }
        /// <p>The fully qualified distinguished name of the organizational unit within the self-managed AD directory to which the Windows File Server or ONTAP storage virtual machine (SVM) instance is joined.</p>
        pub fn set_organizational_unit_distinguished_name(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.organizational_unit_distinguished_name = input;
            self
        }
        /// <p>The name of the domain group whose members have administrative privileges for the FSx file system.</p>
        pub fn file_system_administrators_group(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.file_system_administrators_group = Some(input.into());
            self
        }
        /// <p>The name of the domain group whose members have administrative privileges for the FSx file system.</p>
        pub fn set_file_system_administrators_group(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.file_system_administrators_group = input;
            self
        }
        /// <p>The user name for the service account on your self-managed AD domain that FSx uses to join to your AD domain.</p>
        pub fn user_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.user_name = Some(input.into());
            self
        }
        /// <p>The user name for the service account on your self-managed AD domain that FSx uses to join to your AD domain.</p>
        pub fn set_user_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.user_name = input;
            self
        }
        /// Appends an item to `dns_ips`.
        ///
        /// To override the contents of this collection use [`set_dns_ips`](Self::set_dns_ips).
        ///
        /// <p>A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory.</p>
        pub fn dns_ips(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.dns_ips.unwrap_or_default();
            v.push(input.into());
            self.dns_ips = Some(v);
            self
        }
        /// <p>A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory.</p>
        pub fn set_dns_ips(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.dns_ips = input;
            self
        }
        /// Consumes the builder and constructs a [`SelfManagedActiveDirectoryAttributes`](crate::model::SelfManagedActiveDirectoryAttributes).
        pub fn build(self) -> crate::model::SelfManagedActiveDirectoryAttributes {
            crate::model::SelfManagedActiveDirectoryAttributes {
                domain_name: self.domain_name,
                organizational_unit_distinguished_name: self.organizational_unit_distinguished_name,
                file_system_administrators_group: self.file_system_administrators_group,
                user_name: self.user_name,
                dns_ips: self.dns_ips,
            }
        }
    }
}
impl SelfManagedActiveDirectoryAttributes {
    /// Creates a new builder-style object to manufacture [`SelfManagedActiveDirectoryAttributes`](crate::model::SelfManagedActiveDirectoryAttributes).
    pub fn builder() -> crate::model::self_managed_active_directory_attributes::Builder {
        crate::model::self_managed_active_directory_attributes::Builder::default()
    }
}

/// When writing a match expression against `StorageType`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let storagetype = unimplemented!();
/// match storagetype {
///     StorageType::Hdd => { /* ... */ },
///     StorageType::Ssd => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `storagetype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `StorageType::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `StorageType::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `StorageType::NewFeature` is defined.
/// Specifically, when `storagetype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `StorageType::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
/// <p>The storage type for your Amazon FSx file system.</p>
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum StorageType {
    #[allow(missing_docs)] // documentation missing in model
    Hdd,
    #[allow(missing_docs)] // documentation missing in model
    Ssd,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for StorageType {
    fn from(s: &str) -> Self {
        match s {
            "HDD" => StorageType::Hdd,
            "SSD" => StorageType::Ssd,
            other => StorageType::Unknown(crate::types::UnknownVariantValue(other.to_owned())),
        }
    }
}
impl std::str::FromStr for StorageType {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(StorageType::from(s))
    }
}
impl StorageType {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            StorageType::Hdd => "HDD",
            StorageType::Ssd => "SSD",
            StorageType::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["HDD", "SSD"]
    }
}
impl AsRef<str> for StorageType {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>A structure providing details of any failures that occurred.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct FileSystemFailureDetails {
    /// <p>A message describing any failures that occurred.</p>
    #[doc(hidden)]
    pub message: std::option::Option<std::string::String>,
}
impl FileSystemFailureDetails {
    /// <p>A message describing any failures that occurred.</p>
    pub fn message(&self) -> std::option::Option<&str> {
        self.message.as_deref()
    }
}
/// See [`FileSystemFailureDetails`](crate::model::FileSystemFailureDetails).
pub mod file_system_failure_details {

    /// A builder for [`FileSystemFailureDetails`](crate::model::FileSystemFailureDetails).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) message: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>A message describing any failures that occurred.</p>
        pub fn message(mut self, input: impl Into<std::string::String>) -> Self {
            self.message = Some(input.into());
            self
        }
        /// <p>A message describing any failures that occurred.</p>
        pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.message = input;
            self
        }
        /// Consumes the builder and constructs a [`FileSystemFailureDetails`](crate::model::FileSystemFailureDetails).
        pub fn build(self) -> crate::model::FileSystemFailureDetails {
            crate::model::FileSystemFailureDetails {
                message: self.message,
            }
        }
    }
}
impl FileSystemFailureDetails {
    /// Creates a new builder-style object to manufacture [`FileSystemFailureDetails`](crate::model::FileSystemFailureDetails).
    pub fn builder() -> crate::model::file_system_failure_details::Builder {
        crate::model::file_system_failure_details::Builder::default()
    }
}

/// When writing a match expression against `FileSystemLifecycle`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let filesystemlifecycle = unimplemented!();
/// match filesystemlifecycle {
///     FileSystemLifecycle::Available => { /* ... */ },
///     FileSystemLifecycle::Creating => { /* ... */ },
///     FileSystemLifecycle::Deleting => { /* ... */ },
///     FileSystemLifecycle::Failed => { /* ... */ },
///     FileSystemLifecycle::Misconfigured => { /* ... */ },
///     FileSystemLifecycle::MisconfiguredUnavailable => { /* ... */ },
///     FileSystemLifecycle::Updating => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `filesystemlifecycle` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `FileSystemLifecycle::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `FileSystemLifecycle::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `FileSystemLifecycle::NewFeature` is defined.
/// Specifically, when `filesystemlifecycle` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `FileSystemLifecycle::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
/// <p>The lifecycle status of the file system.</p>
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum FileSystemLifecycle {
    #[allow(missing_docs)] // documentation missing in model
    Available,
    #[allow(missing_docs)] // documentation missing in model
    Creating,
    #[allow(missing_docs)] // documentation missing in model
    Deleting,
    #[allow(missing_docs)] // documentation missing in model
    Failed,
    #[allow(missing_docs)] // documentation missing in model
    Misconfigured,
    #[allow(missing_docs)] // documentation missing in model
    MisconfiguredUnavailable,
    #[allow(missing_docs)] // documentation missing in model
    Updating,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for FileSystemLifecycle {
    fn from(s: &str) -> Self {
        match s {
            "AVAILABLE" => FileSystemLifecycle::Available,
            "CREATING" => FileSystemLifecycle::Creating,
            "DELETING" => FileSystemLifecycle::Deleting,
            "FAILED" => FileSystemLifecycle::Failed,
            "MISCONFIGURED" => FileSystemLifecycle::Misconfigured,
            "MISCONFIGURED_UNAVAILABLE" => FileSystemLifecycle::MisconfiguredUnavailable,
            "UPDATING" => FileSystemLifecycle::Updating,
            other => {
                FileSystemLifecycle::Unknown(crate::types::UnknownVariantValue(other.to_owned()))
            }
        }
    }
}
impl std::str::FromStr for FileSystemLifecycle {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(FileSystemLifecycle::from(s))
    }
}
impl FileSystemLifecycle {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            FileSystemLifecycle::Available => "AVAILABLE",
            FileSystemLifecycle::Creating => "CREATING",
            FileSystemLifecycle::Deleting => "DELETING",
            FileSystemLifecycle::Failed => "FAILED",
            FileSystemLifecycle::Misconfigured => "MISCONFIGURED",
            FileSystemLifecycle::MisconfiguredUnavailable => "MISCONFIGURED_UNAVAILABLE",
            FileSystemLifecycle::Updating => "UPDATING",
            FileSystemLifecycle::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &[
            "AVAILABLE",
            "CREATING",
            "DELETING",
            "FAILED",
            "MISCONFIGURED",
            "MISCONFIGURED_UNAVAILABLE",
            "UPDATING",
        ]
    }
}
impl AsRef<str> for FileSystemLifecycle {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// When writing a match expression against `FileSystemType`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let filesystemtype = unimplemented!();
/// match filesystemtype {
///     FileSystemType::Lustre => { /* ... */ },
///     FileSystemType::Ontap => { /* ... */ },
///     FileSystemType::Openzfs => { /* ... */ },
///     FileSystemType::Windows => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `filesystemtype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `FileSystemType::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `FileSystemType::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `FileSystemType::NewFeature` is defined.
/// Specifically, when `filesystemtype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `FileSystemType::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
/// <p>The type of file system.</p>
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum FileSystemType {
    #[allow(missing_docs)] // documentation missing in model
    Lustre,
    #[allow(missing_docs)] // documentation missing in model
    Ontap,
    #[allow(missing_docs)] // documentation missing in model
    Openzfs,
    #[allow(missing_docs)] // documentation missing in model
    Windows,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for FileSystemType {
    fn from(s: &str) -> Self {
        match s {
            "LUSTRE" => FileSystemType::Lustre,
            "ONTAP" => FileSystemType::Ontap,
            "OPENZFS" => FileSystemType::Openzfs,
            "WINDOWS" => FileSystemType::Windows,
            other => FileSystemType::Unknown(crate::types::UnknownVariantValue(other.to_owned())),
        }
    }
}
impl std::str::FromStr for FileSystemType {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(FileSystemType::from(s))
    }
}
impl FileSystemType {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            FileSystemType::Lustre => "LUSTRE",
            FileSystemType::Ontap => "ONTAP",
            FileSystemType::Openzfs => "OPENZFS",
            FileSystemType::Windows => "WINDOWS",
            FileSystemType::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["LUSTRE", "ONTAP", "OPENZFS", "WINDOWS"]
    }
}
impl AsRef<str> for FileSystemType {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// When writing a match expression against `Status`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let status = unimplemented!();
/// match status {
///     Status::Completed => { /* ... */ },
///     Status::Failed => { /* ... */ },
///     Status::InProgress => { /* ... */ },
///     Status::Pending => { /* ... */ },
///     Status::UpdatedOptimizing => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `status` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `Status::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `Status::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `Status::NewFeature` is defined.
/// Specifically, when `status` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `Status::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum Status {
    #[allow(missing_docs)] // documentation missing in model
    Completed,
    #[allow(missing_docs)] // documentation missing in model
    Failed,
    #[allow(missing_docs)] // documentation missing in model
    InProgress,
    #[allow(missing_docs)] // documentation missing in model
    Pending,
    #[allow(missing_docs)] // documentation missing in model
    UpdatedOptimizing,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for Status {
    fn from(s: &str) -> Self {
        match s {
            "COMPLETED" => Status::Completed,
            "FAILED" => Status::Failed,
            "IN_PROGRESS" => Status::InProgress,
            "PENDING" => Status::Pending,
            "UPDATED_OPTIMIZING" => Status::UpdatedOptimizing,
            other => Status::Unknown(crate::types::UnknownVariantValue(other.to_owned())),
        }
    }
}
impl std::str::FromStr for Status {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(Status::from(s))
    }
}
impl Status {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            Status::Completed => "COMPLETED",
            Status::Failed => "FAILED",
            Status::InProgress => "IN_PROGRESS",
            Status::Pending => "PENDING",
            Status::UpdatedOptimizing => "UPDATED_OPTIMIZING",
            Status::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &[
            "COMPLETED",
            "FAILED",
            "IN_PROGRESS",
            "PENDING",
            "UPDATED_OPTIMIZING",
        ]
    }
}
impl AsRef<str> for Status {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// When writing a match expression against `AdministrativeActionType`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let administrativeactiontype = unimplemented!();
/// match administrativeactiontype {
///     AdministrativeActionType::FileSystemAliasAssociation => { /* ... */ },
///     AdministrativeActionType::FileSystemAliasDisassociation => { /* ... */ },
///     AdministrativeActionType::FileSystemUpdate => { /* ... */ },
///     AdministrativeActionType::ReleaseNfsV3Locks => { /* ... */ },
///     AdministrativeActionType::SnapshotUpdate => { /* ... */ },
///     AdministrativeActionType::StorageOptimization => { /* ... */ },
///     AdministrativeActionType::VolumeRestore => { /* ... */ },
///     AdministrativeActionType::VolumeUpdate => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `administrativeactiontype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `AdministrativeActionType::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `AdministrativeActionType::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `AdministrativeActionType::NewFeature` is defined.
/// Specifically, when `administrativeactiontype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `AdministrativeActionType::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
/// <p>Describes the type of administrative action, as follows:</p>
/// <ul>
/// <li>
/// <p>
/// <code>FILE_SYSTEM_UPDATE</code> - A file system update administrative action
/// initiated from the Amazon FSx console, API
/// (<code>UpdateFileSystem</code>), or CLI
/// (<code>update-file-system</code>).</p>
/// </li>
/// <li>
/// <p>
/// <code>STORAGE_OPTIMIZATION</code> - After the <code>FILE_SYSTEM_UPDATE</code>
/// task to increase a file system's storage capacity has been completed
/// successfully, a <code>STORAGE_OPTIMIZATION</code> task starts. </p>
/// <ul>
/// <li>
/// <p>For Windows and ONTAP, storage optimization is the process of migrating the file system data
/// to newer larger disks.</p>
/// </li>
/// <li>
/// <p>For Lustre, storage optimization consists of rebalancing the data across the existing and
/// newly added file servers.</p>
/// </li>
/// </ul>
/// <p>You can track the storage-optimization progress using the
/// <code>ProgressPercent</code> property. When
/// <code>STORAGE_OPTIMIZATION</code> has been completed successfully, the
/// parent <code>FILE_SYSTEM_UPDATE</code> action status changes to
/// <code>COMPLETED</code>. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html">Managing
/// storage capacity</a> in the <i>Amazon FSx for Windows
/// File Server User Guide</i>, <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/managing-storage-capacity.html">Managing storage
/// and throughput capacity</a> in the <i>Amazon FSx for
/// Lustre User Guide</i>, and
/// <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-storage-capacity.html">Managing storage capacity and provisioned IOPS</a> in the <i>Amazon FSx for NetApp ONTAP User
/// Guide</i>.</p>
/// </li>
/// <li>
/// <p>
/// <code>FILE_SYSTEM_ALIAS_ASSOCIATION</code> - A file system update to associate a new Domain
/// Name System (DNS) alias with the file system. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/APIReference/API_AssociateFileSystemAliases.html">
/// AssociateFileSystemAliases</a>.</p>
/// </li>
/// <li>
/// <p>
/// <code>FILE_SYSTEM_ALIAS_DISASSOCIATION</code> - A file system update to disassociate a DNS alias from the file system.
/// For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/APIReference/API_DisassociateFileSystemAliases.html">DisassociateFileSystemAliases</a>.</p>
/// </li>
/// <li>
/// <p>
/// <code>VOLUME_UPDATE</code> - A volume update to an Amazon FSx for NetApp ONTAP or
/// Amazon FSx for OpenZFS volume initiated from the Amazon FSx
/// console, API (<code>UpdateVolume</code>), or CLI
/// (<code>update-volume</code>).</p>
/// </li>
/// <li>
/// <p>
/// <code>VOLUME_RESTORE</code> - An Amazon FSx for OpenZFS volume
/// is returned to the state saved by the specified snapshot, initiated from an
/// API (<code>RestoreVolumeFromSnapshot</code>) or CLI
/// (<code>restore-volume-from-snapshot</code>).</p>
/// </li>
/// <li>
/// <p>
/// <code>SNAPSHOT_UPDATE</code> - A snapshot update to an Amazon FSx for
/// OpenZFS volume initiated from the Amazon FSx console, API
/// (<code>UpdateSnapshot</code>), or CLI (<code>update-snapshot</code>).</p>
/// </li>
/// <li>
/// <p>
/// <code>RELEASE_NFS_V3_LOCKS</code> - Tracks the release of Network File System
/// (NFS) V3 locks on an Amazon FSx for OpenZFS file system.</p>
/// </li>
/// </ul>
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum AdministrativeActionType {
    #[allow(missing_docs)] // documentation missing in model
    FileSystemAliasAssociation,
    #[allow(missing_docs)] // documentation missing in model
    FileSystemAliasDisassociation,
    #[allow(missing_docs)] // documentation missing in model
    FileSystemUpdate,
    #[allow(missing_docs)] // documentation missing in model
    ReleaseNfsV3Locks,
    #[allow(missing_docs)] // documentation missing in model
    SnapshotUpdate,
    #[allow(missing_docs)] // documentation missing in model
    StorageOptimization,
    #[allow(missing_docs)] // documentation missing in model
    VolumeRestore,
    #[allow(missing_docs)] // documentation missing in model
    VolumeUpdate,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for AdministrativeActionType {
    fn from(s: &str) -> Self {
        match s {
            "FILE_SYSTEM_ALIAS_ASSOCIATION" => AdministrativeActionType::FileSystemAliasAssociation,
            "FILE_SYSTEM_ALIAS_DISASSOCIATION" => {
                AdministrativeActionType::FileSystemAliasDisassociation
            }
            "FILE_SYSTEM_UPDATE" => AdministrativeActionType::FileSystemUpdate,
            "RELEASE_NFS_V3_LOCKS" => AdministrativeActionType::ReleaseNfsV3Locks,
            "SNAPSHOT_UPDATE" => AdministrativeActionType::SnapshotUpdate,
            "STORAGE_OPTIMIZATION" => AdministrativeActionType::StorageOptimization,
            "VOLUME_RESTORE" => AdministrativeActionType::VolumeRestore,
            "VOLUME_UPDATE" => AdministrativeActionType::VolumeUpdate,
            other => AdministrativeActionType::Unknown(crate::types::UnknownVariantValue(
                other.to_owned(),
            )),
        }
    }
}
impl std::str::FromStr for AdministrativeActionType {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(AdministrativeActionType::from(s))
    }
}
impl AdministrativeActionType {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            AdministrativeActionType::FileSystemAliasAssociation => "FILE_SYSTEM_ALIAS_ASSOCIATION",
            AdministrativeActionType::FileSystemAliasDisassociation => {
                "FILE_SYSTEM_ALIAS_DISASSOCIATION"
            }
            AdministrativeActionType::FileSystemUpdate => "FILE_SYSTEM_UPDATE",
            AdministrativeActionType::ReleaseNfsV3Locks => "RELEASE_NFS_V3_LOCKS",
            AdministrativeActionType::SnapshotUpdate => "SNAPSHOT_UPDATE",
            AdministrativeActionType::StorageOptimization => "STORAGE_OPTIMIZATION",
            AdministrativeActionType::VolumeRestore => "VOLUME_RESTORE",
            AdministrativeActionType::VolumeUpdate => "VOLUME_UPDATE",
            AdministrativeActionType::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &[
            "FILE_SYSTEM_ALIAS_ASSOCIATION",
            "FILE_SYSTEM_ALIAS_DISASSOCIATION",
            "FILE_SYSTEM_UPDATE",
            "RELEASE_NFS_V3_LOCKS",
            "SNAPSHOT_UPDATE",
            "STORAGE_OPTIMIZATION",
            "VOLUME_RESTORE",
            "VOLUME_UPDATE",
        ]
    }
}
impl AsRef<str> for AdministrativeActionType {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// When writing a match expression against `VolumeType`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let volumetype = unimplemented!();
/// match volumetype {
///     VolumeType::Ontap => { /* ... */ },
///     VolumeType::Openzfs => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `volumetype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `VolumeType::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `VolumeType::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `VolumeType::NewFeature` is defined.
/// Specifically, when `volumetype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `VolumeType::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum VolumeType {
    #[allow(missing_docs)] // documentation missing in model
    Ontap,
    #[allow(missing_docs)] // documentation missing in model
    Openzfs,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for VolumeType {
    fn from(s: &str) -> Self {
        match s {
            "ONTAP" => VolumeType::Ontap,
            "OPENZFS" => VolumeType::Openzfs,
            other => VolumeType::Unknown(crate::types::UnknownVariantValue(other.to_owned())),
        }
    }
}
impl std::str::FromStr for VolumeType {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(VolumeType::from(s))
    }
}
impl VolumeType {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            VolumeType::Ontap => "ONTAP",
            VolumeType::Openzfs => "OPENZFS",
            VolumeType::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["ONTAP", "OPENZFS"]
    }
}
impl AsRef<str> for VolumeType {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>The configuration of an Amazon FSx for NetApp ONTAP volume.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct OntapVolumeConfiguration {
    /// <p>Specifies the FlexCache endpoint type of the volume. Valid values are the following:</p>
    /// <ul>
    /// <li> <p> <code>NONE</code> specifies that the volume doesn't have a FlexCache configuration. <code>NONE</code> is the default.</p> </li>
    /// <li> <p> <code>ORIGIN</code> specifies that the volume is the origin volume for a FlexCache volume.</p> </li>
    /// <li> <p> <code>CACHE</code> specifies that the volume is a FlexCache volume.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub flex_cache_endpoint_type: std::option::Option<crate::model::FlexCacheEndpointType>,
    /// <p>Specifies the directory that network-attached storage (NAS) clients use to mount the volume, along with the storage virtual machine (SVM) Domain Name System (DNS) name or IP address. You can create a <code>JunctionPath</code> directly below a parent volume junction or on a directory within a volume. A <code>JunctionPath</code> for a volume named <code>vol3</code> might be <code>/vol1/vol2/vol3</code>, or <code>/vol1/dir2/vol3</code>, or even <code>/dir1/dir2/vol3</code>.</p>
    #[doc(hidden)]
    pub junction_path: std::option::Option<std::string::String>,
    /// <p>The security style for the volume, which can be <code>UNIX</code>, <code>NTFS</code>, or <code>MIXED</code>.</p>
    #[doc(hidden)]
    pub security_style: std::option::Option<crate::model::SecurityStyle>,
    /// <p>The configured size of the volume, in megabytes (MBs).</p>
    #[doc(hidden)]
    pub size_in_megabytes: std::option::Option<i32>,
    /// <p>The volume's storage efficiency setting.</p>
    #[doc(hidden)]
    pub storage_efficiency_enabled: std::option::Option<bool>,
    /// <p>The ID of the volume's storage virtual machine.</p>
    #[doc(hidden)]
    pub storage_virtual_machine_id: std::option::Option<std::string::String>,
    /// <p>A Boolean flag indicating whether this volume is the root volume for its storage virtual machine (SVM). Only one volume on an SVM can be the root volume. This value defaults to <code>false</code>. If this value is <code>true</code>, then this is the SVM root volume.</p>
    /// <p>This flag is useful when you're deleting an SVM, because you must first delete all non-root volumes. This flag, when set to <code>false</code>, helps you identify which volumes to delete before you can delete the SVM.</p>
    #[doc(hidden)]
    pub storage_virtual_machine_root: std::option::Option<bool>,
    /// <p>The volume's <code>TieringPolicy</code> setting.</p>
    #[doc(hidden)]
    pub tiering_policy: std::option::Option<crate::model::TieringPolicy>,
    /// <p>The volume's universally unique identifier (UUID).</p>
    #[doc(hidden)]
    pub uuid: std::option::Option<std::string::String>,
    /// <p>Specifies the type of volume. Valid values are the following:</p>
    /// <ul>
    /// <li> <p> <code>RW</code> specifies a read/write volume. <code>RW</code> is the default.</p> </li>
    /// <li> <p> <code>DP</code> specifies a data-protection volume. You can protect data by replicating it to data-protection mirror copies. If a disaster occurs, you can use these data-protection mirror copies to recover data.</p> </li>
    /// <li> <p> <code>LS</code> specifies a load-sharing mirror volume. A load-sharing mirror reduces the network traffic to a FlexVol volume by providing additional read-only access to clients.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub ontap_volume_type: std::option::Option<crate::model::OntapVolumeType>,
    /// <p>Specifies the snapshot policy for the volume. There are three built-in snapshot policies:</p>
    /// <ul>
    /// <li> <p> <code>default</code>: This is the default policy. A maximum of six hourly snapshots taken five minutes past the hour. A maximum of two daily snapshots taken Monday through Saturday at 10 minutes after midnight. A maximum of two weekly snapshots taken every Sunday at 15 minutes after midnight.</p> </li>
    /// <li> <p> <code>default-1weekly</code>: This policy is the same as the <code>default</code> policy except that it only retains one snapshot from the weekly schedule.</p> </li>
    /// <li> <p> <code>none</code>: This policy does not take any snapshots. This policy can be assigned to volumes to prevent automatic snapshots from being taken.</p> </li>
    /// </ul>
    /// <p>You can also provide the name of a custom policy that you created with the ONTAP CLI or REST API.</p>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/snapshots-ontap.html#snapshot-policies">Snapshot policies</a> in the <i>Amazon FSx for NetApp ONTAP User Guide</i>.</p>
    #[doc(hidden)]
    pub snapshot_policy: std::option::Option<std::string::String>,
    /// <p>A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to false. If it's set to true, all tags for the volume are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the volume, regardless of this value.</p>
    #[doc(hidden)]
    pub copy_tags_to_backups: std::option::Option<bool>,
}
impl OntapVolumeConfiguration {
    /// <p>Specifies the FlexCache endpoint type of the volume. Valid values are the following:</p>
    /// <ul>
    /// <li> <p> <code>NONE</code> specifies that the volume doesn't have a FlexCache configuration. <code>NONE</code> is the default.</p> </li>
    /// <li> <p> <code>ORIGIN</code> specifies that the volume is the origin volume for a FlexCache volume.</p> </li>
    /// <li> <p> <code>CACHE</code> specifies that the volume is a FlexCache volume.</p> </li>
    /// </ul>
    pub fn flex_cache_endpoint_type(
        &self,
    ) -> std::option::Option<&crate::model::FlexCacheEndpointType> {
        self.flex_cache_endpoint_type.as_ref()
    }
    /// <p>Specifies the directory that network-attached storage (NAS) clients use to mount the volume, along with the storage virtual machine (SVM) Domain Name System (DNS) name or IP address. You can create a <code>JunctionPath</code> directly below a parent volume junction or on a directory within a volume. A <code>JunctionPath</code> for a volume named <code>vol3</code> might be <code>/vol1/vol2/vol3</code>, or <code>/vol1/dir2/vol3</code>, or even <code>/dir1/dir2/vol3</code>.</p>
    pub fn junction_path(&self) -> std::option::Option<&str> {
        self.junction_path.as_deref()
    }
    /// <p>The security style for the volume, which can be <code>UNIX</code>, <code>NTFS</code>, or <code>MIXED</code>.</p>
    pub fn security_style(&self) -> std::option::Option<&crate::model::SecurityStyle> {
        self.security_style.as_ref()
    }
    /// <p>The configured size of the volume, in megabytes (MBs).</p>
    pub fn size_in_megabytes(&self) -> std::option::Option<i32> {
        self.size_in_megabytes
    }
    /// <p>The volume's storage efficiency setting.</p>
    pub fn storage_efficiency_enabled(&self) -> std::option::Option<bool> {
        self.storage_efficiency_enabled
    }
    /// <p>The ID of the volume's storage virtual machine.</p>
    pub fn storage_virtual_machine_id(&self) -> std::option::Option<&str> {
        self.storage_virtual_machine_id.as_deref()
    }
    /// <p>A Boolean flag indicating whether this volume is the root volume for its storage virtual machine (SVM). Only one volume on an SVM can be the root volume. This value defaults to <code>false</code>. If this value is <code>true</code>, then this is the SVM root volume.</p>
    /// <p>This flag is useful when you're deleting an SVM, because you must first delete all non-root volumes. This flag, when set to <code>false</code>, helps you identify which volumes to delete before you can delete the SVM.</p>
    pub fn storage_virtual_machine_root(&self) -> std::option::Option<bool> {
        self.storage_virtual_machine_root
    }
    /// <p>The volume's <code>TieringPolicy</code> setting.</p>
    pub fn tiering_policy(&self) -> std::option::Option<&crate::model::TieringPolicy> {
        self.tiering_policy.as_ref()
    }
    /// <p>The volume's universally unique identifier (UUID).</p>
    pub fn uuid(&self) -> std::option::Option<&str> {
        self.uuid.as_deref()
    }
    /// <p>Specifies the type of volume. Valid values are the following:</p>
    /// <ul>
    /// <li> <p> <code>RW</code> specifies a read/write volume. <code>RW</code> is the default.</p> </li>
    /// <li> <p> <code>DP</code> specifies a data-protection volume. You can protect data by replicating it to data-protection mirror copies. If a disaster occurs, you can use these data-protection mirror copies to recover data.</p> </li>
    /// <li> <p> <code>LS</code> specifies a load-sharing mirror volume. A load-sharing mirror reduces the network traffic to a FlexVol volume by providing additional read-only access to clients.</p> </li>
    /// </ul>
    pub fn ontap_volume_type(&self) -> std::option::Option<&crate::model::OntapVolumeType> {
        self.ontap_volume_type.as_ref()
    }
    /// <p>Specifies the snapshot policy for the volume. There are three built-in snapshot policies:</p>
    /// <ul>
    /// <li> <p> <code>default</code>: This is the default policy. A maximum of six hourly snapshots taken five minutes past the hour. A maximum of two daily snapshots taken Monday through Saturday at 10 minutes after midnight. A maximum of two weekly snapshots taken every Sunday at 15 minutes after midnight.</p> </li>
    /// <li> <p> <code>default-1weekly</code>: This policy is the same as the <code>default</code> policy except that it only retains one snapshot from the weekly schedule.</p> </li>
    /// <li> <p> <code>none</code>: This policy does not take any snapshots. This policy can be assigned to volumes to prevent automatic snapshots from being taken.</p> </li>
    /// </ul>
    /// <p>You can also provide the name of a custom policy that you created with the ONTAP CLI or REST API.</p>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/snapshots-ontap.html#snapshot-policies">Snapshot policies</a> in the <i>Amazon FSx for NetApp ONTAP User Guide</i>.</p>
    pub fn snapshot_policy(&self) -> std::option::Option<&str> {
        self.snapshot_policy.as_deref()
    }
    /// <p>A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to false. If it's set to true, all tags for the volume are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the volume, regardless of this value.</p>
    pub fn copy_tags_to_backups(&self) -> std::option::Option<bool> {
        self.copy_tags_to_backups
    }
}
/// See [`OntapVolumeConfiguration`](crate::model::OntapVolumeConfiguration).
pub mod ontap_volume_configuration {

    /// A builder for [`OntapVolumeConfiguration`](crate::model::OntapVolumeConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) flex_cache_endpoint_type:
            std::option::Option<crate::model::FlexCacheEndpointType>,
        pub(crate) junction_path: std::option::Option<std::string::String>,
        pub(crate) security_style: std::option::Option<crate::model::SecurityStyle>,
        pub(crate) size_in_megabytes: std::option::Option<i32>,
        pub(crate) storage_efficiency_enabled: std::option::Option<bool>,
        pub(crate) storage_virtual_machine_id: std::option::Option<std::string::String>,
        pub(crate) storage_virtual_machine_root: std::option::Option<bool>,
        pub(crate) tiering_policy: std::option::Option<crate::model::TieringPolicy>,
        pub(crate) uuid: std::option::Option<std::string::String>,
        pub(crate) ontap_volume_type: std::option::Option<crate::model::OntapVolumeType>,
        pub(crate) snapshot_policy: std::option::Option<std::string::String>,
        pub(crate) copy_tags_to_backups: std::option::Option<bool>,
    }
    impl Builder {
        /// <p>Specifies the FlexCache endpoint type of the volume. Valid values are the following:</p>
        /// <ul>
        /// <li> <p> <code>NONE</code> specifies that the volume doesn't have a FlexCache configuration. <code>NONE</code> is the default.</p> </li>
        /// <li> <p> <code>ORIGIN</code> specifies that the volume is the origin volume for a FlexCache volume.</p> </li>
        /// <li> <p> <code>CACHE</code> specifies that the volume is a FlexCache volume.</p> </li>
        /// </ul>
        pub fn flex_cache_endpoint_type(
            mut self,
            input: crate::model::FlexCacheEndpointType,
        ) -> Self {
            self.flex_cache_endpoint_type = Some(input);
            self
        }
        /// <p>Specifies the FlexCache endpoint type of the volume. Valid values are the following:</p>
        /// <ul>
        /// <li> <p> <code>NONE</code> specifies that the volume doesn't have a FlexCache configuration. <code>NONE</code> is the default.</p> </li>
        /// <li> <p> <code>ORIGIN</code> specifies that the volume is the origin volume for a FlexCache volume.</p> </li>
        /// <li> <p> <code>CACHE</code> specifies that the volume is a FlexCache volume.</p> </li>
        /// </ul>
        pub fn set_flex_cache_endpoint_type(
            mut self,
            input: std::option::Option<crate::model::FlexCacheEndpointType>,
        ) -> Self {
            self.flex_cache_endpoint_type = input;
            self
        }
        /// <p>Specifies the directory that network-attached storage (NAS) clients use to mount the volume, along with the storage virtual machine (SVM) Domain Name System (DNS) name or IP address. You can create a <code>JunctionPath</code> directly below a parent volume junction or on a directory within a volume. A <code>JunctionPath</code> for a volume named <code>vol3</code> might be <code>/vol1/vol2/vol3</code>, or <code>/vol1/dir2/vol3</code>, or even <code>/dir1/dir2/vol3</code>.</p>
        pub fn junction_path(mut self, input: impl Into<std::string::String>) -> Self {
            self.junction_path = Some(input.into());
            self
        }
        /// <p>Specifies the directory that network-attached storage (NAS) clients use to mount the volume, along with the storage virtual machine (SVM) Domain Name System (DNS) name or IP address. You can create a <code>JunctionPath</code> directly below a parent volume junction or on a directory within a volume. A <code>JunctionPath</code> for a volume named <code>vol3</code> might be <code>/vol1/vol2/vol3</code>, or <code>/vol1/dir2/vol3</code>, or even <code>/dir1/dir2/vol3</code>.</p>
        pub fn set_junction_path(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.junction_path = input;
            self
        }
        /// <p>The security style for the volume, which can be <code>UNIX</code>, <code>NTFS</code>, or <code>MIXED</code>.</p>
        pub fn security_style(mut self, input: crate::model::SecurityStyle) -> Self {
            self.security_style = Some(input);
            self
        }
        /// <p>The security style for the volume, which can be <code>UNIX</code>, <code>NTFS</code>, or <code>MIXED</code>.</p>
        pub fn set_security_style(
            mut self,
            input: std::option::Option<crate::model::SecurityStyle>,
        ) -> Self {
            self.security_style = input;
            self
        }
        /// <p>The configured size of the volume, in megabytes (MBs).</p>
        pub fn size_in_megabytes(mut self, input: i32) -> Self {
            self.size_in_megabytes = Some(input);
            self
        }
        /// <p>The configured size of the volume, in megabytes (MBs).</p>
        pub fn set_size_in_megabytes(mut self, input: std::option::Option<i32>) -> Self {
            self.size_in_megabytes = input;
            self
        }
        /// <p>The volume's storage efficiency setting.</p>
        pub fn storage_efficiency_enabled(mut self, input: bool) -> Self {
            self.storage_efficiency_enabled = Some(input);
            self
        }
        /// <p>The volume's storage efficiency setting.</p>
        pub fn set_storage_efficiency_enabled(mut self, input: std::option::Option<bool>) -> Self {
            self.storage_efficiency_enabled = input;
            self
        }
        /// <p>The ID of the volume's storage virtual machine.</p>
        pub fn storage_virtual_machine_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.storage_virtual_machine_id = Some(input.into());
            self
        }
        /// <p>The ID of the volume's storage virtual machine.</p>
        pub fn set_storage_virtual_machine_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.storage_virtual_machine_id = input;
            self
        }
        /// <p>A Boolean flag indicating whether this volume is the root volume for its storage virtual machine (SVM). Only one volume on an SVM can be the root volume. This value defaults to <code>false</code>. If this value is <code>true</code>, then this is the SVM root volume.</p>
        /// <p>This flag is useful when you're deleting an SVM, because you must first delete all non-root volumes. This flag, when set to <code>false</code>, helps you identify which volumes to delete before you can delete the SVM.</p>
        pub fn storage_virtual_machine_root(mut self, input: bool) -> Self {
            self.storage_virtual_machine_root = Some(input);
            self
        }
        /// <p>A Boolean flag indicating whether this volume is the root volume for its storage virtual machine (SVM). Only one volume on an SVM can be the root volume. This value defaults to <code>false</code>. If this value is <code>true</code>, then this is the SVM root volume.</p>
        /// <p>This flag is useful when you're deleting an SVM, because you must first delete all non-root volumes. This flag, when set to <code>false</code>, helps you identify which volumes to delete before you can delete the SVM.</p>
        pub fn set_storage_virtual_machine_root(
            mut self,
            input: std::option::Option<bool>,
        ) -> Self {
            self.storage_virtual_machine_root = input;
            self
        }
        /// <p>The volume's <code>TieringPolicy</code> setting.</p>
        pub fn tiering_policy(mut self, input: crate::model::TieringPolicy) -> Self {
            self.tiering_policy = Some(input);
            self
        }
        /// <p>The volume's <code>TieringPolicy</code> setting.</p>
        pub fn set_tiering_policy(
            mut self,
            input: std::option::Option<crate::model::TieringPolicy>,
        ) -> Self {
            self.tiering_policy = input;
            self
        }
        /// <p>The volume's universally unique identifier (UUID).</p>
        pub fn uuid(mut self, input: impl Into<std::string::String>) -> Self {
            self.uuid = Some(input.into());
            self
        }
        /// <p>The volume's universally unique identifier (UUID).</p>
        pub fn set_uuid(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.uuid = input;
            self
        }
        /// <p>Specifies the type of volume. Valid values are the following:</p>
        /// <ul>
        /// <li> <p> <code>RW</code> specifies a read/write volume. <code>RW</code> is the default.</p> </li>
        /// <li> <p> <code>DP</code> specifies a data-protection volume. You can protect data by replicating it to data-protection mirror copies. If a disaster occurs, you can use these data-protection mirror copies to recover data.</p> </li>
        /// <li> <p> <code>LS</code> specifies a load-sharing mirror volume. A load-sharing mirror reduces the network traffic to a FlexVol volume by providing additional read-only access to clients.</p> </li>
        /// </ul>
        pub fn ontap_volume_type(mut self, input: crate::model::OntapVolumeType) -> Self {
            self.ontap_volume_type = Some(input);
            self
        }
        /// <p>Specifies the type of volume. Valid values are the following:</p>
        /// <ul>
        /// <li> <p> <code>RW</code> specifies a read/write volume. <code>RW</code> is the default.</p> </li>
        /// <li> <p> <code>DP</code> specifies a data-protection volume. You can protect data by replicating it to data-protection mirror copies. If a disaster occurs, you can use these data-protection mirror copies to recover data.</p> </li>
        /// <li> <p> <code>LS</code> specifies a load-sharing mirror volume. A load-sharing mirror reduces the network traffic to a FlexVol volume by providing additional read-only access to clients.</p> </li>
        /// </ul>
        pub fn set_ontap_volume_type(
            mut self,
            input: std::option::Option<crate::model::OntapVolumeType>,
        ) -> Self {
            self.ontap_volume_type = input;
            self
        }
        /// <p>Specifies the snapshot policy for the volume. There are three built-in snapshot policies:</p>
        /// <ul>
        /// <li> <p> <code>default</code>: This is the default policy. A maximum of six hourly snapshots taken five minutes past the hour. A maximum of two daily snapshots taken Monday through Saturday at 10 minutes after midnight. A maximum of two weekly snapshots taken every Sunday at 15 minutes after midnight.</p> </li>
        /// <li> <p> <code>default-1weekly</code>: This policy is the same as the <code>default</code> policy except that it only retains one snapshot from the weekly schedule.</p> </li>
        /// <li> <p> <code>none</code>: This policy does not take any snapshots. This policy can be assigned to volumes to prevent automatic snapshots from being taken.</p> </li>
        /// </ul>
        /// <p>You can also provide the name of a custom policy that you created with the ONTAP CLI or REST API.</p>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/snapshots-ontap.html#snapshot-policies">Snapshot policies</a> in the <i>Amazon FSx for NetApp ONTAP User Guide</i>.</p>
        pub fn snapshot_policy(mut self, input: impl Into<std::string::String>) -> Self {
            self.snapshot_policy = Some(input.into());
            self
        }
        /// <p>Specifies the snapshot policy for the volume. There are three built-in snapshot policies:</p>
        /// <ul>
        /// <li> <p> <code>default</code>: This is the default policy. A maximum of six hourly snapshots taken five minutes past the hour. A maximum of two daily snapshots taken Monday through Saturday at 10 minutes after midnight. A maximum of two weekly snapshots taken every Sunday at 15 minutes after midnight.</p> </li>
        /// <li> <p> <code>default-1weekly</code>: This policy is the same as the <code>default</code> policy except that it only retains one snapshot from the weekly schedule.</p> </li>
        /// <li> <p> <code>none</code>: This policy does not take any snapshots. This policy can be assigned to volumes to prevent automatic snapshots from being taken.</p> </li>
        /// </ul>
        /// <p>You can also provide the name of a custom policy that you created with the ONTAP CLI or REST API.</p>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/snapshots-ontap.html#snapshot-policies">Snapshot policies</a> in the <i>Amazon FSx for NetApp ONTAP User Guide</i>.</p>
        pub fn set_snapshot_policy(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.snapshot_policy = input;
            self
        }
        /// <p>A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to false. If it's set to true, all tags for the volume are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the volume, regardless of this value.</p>
        pub fn copy_tags_to_backups(mut self, input: bool) -> Self {
            self.copy_tags_to_backups = Some(input);
            self
        }
        /// <p>A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to false. If it's set to true, all tags for the volume are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the volume, regardless of this value.</p>
        pub fn set_copy_tags_to_backups(mut self, input: std::option::Option<bool>) -> Self {
            self.copy_tags_to_backups = input;
            self
        }
        /// Consumes the builder and constructs a [`OntapVolumeConfiguration`](crate::model::OntapVolumeConfiguration).
        pub fn build(self) -> crate::model::OntapVolumeConfiguration {
            crate::model::OntapVolumeConfiguration {
                flex_cache_endpoint_type: self.flex_cache_endpoint_type,
                junction_path: self.junction_path,
                security_style: self.security_style,
                size_in_megabytes: self.size_in_megabytes,
                storage_efficiency_enabled: self.storage_efficiency_enabled,
                storage_virtual_machine_id: self.storage_virtual_machine_id,
                storage_virtual_machine_root: self.storage_virtual_machine_root,
                tiering_policy: self.tiering_policy,
                uuid: self.uuid,
                ontap_volume_type: self.ontap_volume_type,
                snapshot_policy: self.snapshot_policy,
                copy_tags_to_backups: self.copy_tags_to_backups,
            }
        }
    }
}
impl OntapVolumeConfiguration {
    /// Creates a new builder-style object to manufacture [`OntapVolumeConfiguration`](crate::model::OntapVolumeConfiguration).
    pub fn builder() -> crate::model::ontap_volume_configuration::Builder {
        crate::model::ontap_volume_configuration::Builder::default()
    }
}

/// When writing a match expression against `OntapVolumeType`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let ontapvolumetype = unimplemented!();
/// match ontapvolumetype {
///     OntapVolumeType::Dp => { /* ... */ },
///     OntapVolumeType::Ls => { /* ... */ },
///     OntapVolumeType::Rw => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `ontapvolumetype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `OntapVolumeType::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `OntapVolumeType::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `OntapVolumeType::NewFeature` is defined.
/// Specifically, when `ontapvolumetype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `OntapVolumeType::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum OntapVolumeType {
    #[allow(missing_docs)] // documentation missing in model
    Dp,
    #[allow(missing_docs)] // documentation missing in model
    Ls,
    #[allow(missing_docs)] // documentation missing in model
    Rw,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for OntapVolumeType {
    fn from(s: &str) -> Self {
        match s {
            "DP" => OntapVolumeType::Dp,
            "LS" => OntapVolumeType::Ls,
            "RW" => OntapVolumeType::Rw,
            other => OntapVolumeType::Unknown(crate::types::UnknownVariantValue(other.to_owned())),
        }
    }
}
impl std::str::FromStr for OntapVolumeType {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(OntapVolumeType::from(s))
    }
}
impl OntapVolumeType {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            OntapVolumeType::Dp => "DP",
            OntapVolumeType::Ls => "LS",
            OntapVolumeType::Rw => "RW",
            OntapVolumeType::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["DP", "LS", "RW"]
    }
}
impl AsRef<str> for OntapVolumeType {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>Describes the data tiering policy for an ONTAP volume. When enabled, Amazon FSx for ONTAP's intelligent tiering automatically transitions a volume's data between the file system's primary storage and capacity pool storage based on your access patterns.</p>
/// <p>Valid tiering policies are the following:</p>
/// <ul>
/// <li> <p> <code>SNAPSHOT_ONLY</code> - (Default value) moves cold snapshots to the capacity pool storage tier.</p> </li>
/// </ul>
/// <ul>
/// <li> <p> <code>AUTO</code> - moves cold user data and snapshots to the capacity pool storage tier based on your access patterns.</p> </li>
/// </ul>
/// <ul>
/// <li> <p> <code>ALL</code> - moves all user data blocks in both the active file system and Snapshot copies to the storage pool tier.</p> </li>
/// </ul>
/// <ul>
/// <li> <p> <code>NONE</code> - keeps a volume's data in the primary storage tier, preventing it from being moved to the capacity pool tier.</p> </li>
/// </ul>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct TieringPolicy {
    /// <p>Specifies the number of days that user data in a volume must remain inactive before it is considered "cold" and moved to the capacity pool. Used with the <code>AUTO</code> and <code>SNAPSHOT_ONLY</code> tiering policies. Enter a whole number between 2 and 183. Default values are 31 days for <code>AUTO</code> and 2 days for <code>SNAPSHOT_ONLY</code>.</p>
    #[doc(hidden)]
    pub cooling_period: std::option::Option<i32>,
    /// <p>Specifies the tiering policy used to transition data. Default value is <code>SNAPSHOT_ONLY</code>.</p>
    /// <ul>
    /// <li> <p> <code>SNAPSHOT_ONLY</code> - moves cold snapshots to the capacity pool storage tier.</p> </li>
    /// <li> <p> <code>AUTO</code> - moves cold user data and snapshots to the capacity pool storage tier based on your access patterns.</p> </li>
    /// <li> <p> <code>ALL</code> - moves all user data blocks in both the active file system and Snapshot copies to the storage pool tier.</p> </li>
    /// <li> <p> <code>NONE</code> - keeps a volume's data in the primary storage tier, preventing it from being moved to the capacity pool tier.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub name: std::option::Option<crate::model::TieringPolicyName>,
}
impl TieringPolicy {
    /// <p>Specifies the number of days that user data in a volume must remain inactive before it is considered "cold" and moved to the capacity pool. Used with the <code>AUTO</code> and <code>SNAPSHOT_ONLY</code> tiering policies. Enter a whole number between 2 and 183. Default values are 31 days for <code>AUTO</code> and 2 days for <code>SNAPSHOT_ONLY</code>.</p>
    pub fn cooling_period(&self) -> std::option::Option<i32> {
        self.cooling_period
    }
    /// <p>Specifies the tiering policy used to transition data. Default value is <code>SNAPSHOT_ONLY</code>.</p>
    /// <ul>
    /// <li> <p> <code>SNAPSHOT_ONLY</code> - moves cold snapshots to the capacity pool storage tier.</p> </li>
    /// <li> <p> <code>AUTO</code> - moves cold user data and snapshots to the capacity pool storage tier based on your access patterns.</p> </li>
    /// <li> <p> <code>ALL</code> - moves all user data blocks in both the active file system and Snapshot copies to the storage pool tier.</p> </li>
    /// <li> <p> <code>NONE</code> - keeps a volume's data in the primary storage tier, preventing it from being moved to the capacity pool tier.</p> </li>
    /// </ul>
    pub fn name(&self) -> std::option::Option<&crate::model::TieringPolicyName> {
        self.name.as_ref()
    }
}
/// See [`TieringPolicy`](crate::model::TieringPolicy).
pub mod tiering_policy {

    /// A builder for [`TieringPolicy`](crate::model::TieringPolicy).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) cooling_period: std::option::Option<i32>,
        pub(crate) name: std::option::Option<crate::model::TieringPolicyName>,
    }
    impl Builder {
        /// <p>Specifies the number of days that user data in a volume must remain inactive before it is considered "cold" and moved to the capacity pool. Used with the <code>AUTO</code> and <code>SNAPSHOT_ONLY</code> tiering policies. Enter a whole number between 2 and 183. Default values are 31 days for <code>AUTO</code> and 2 days for <code>SNAPSHOT_ONLY</code>.</p>
        pub fn cooling_period(mut self, input: i32) -> Self {
            self.cooling_period = Some(input);
            self
        }
        /// <p>Specifies the number of days that user data in a volume must remain inactive before it is considered "cold" and moved to the capacity pool. Used with the <code>AUTO</code> and <code>SNAPSHOT_ONLY</code> tiering policies. Enter a whole number between 2 and 183. Default values are 31 days for <code>AUTO</code> and 2 days for <code>SNAPSHOT_ONLY</code>.</p>
        pub fn set_cooling_period(mut self, input: std::option::Option<i32>) -> Self {
            self.cooling_period = input;
            self
        }
        /// <p>Specifies the tiering policy used to transition data. Default value is <code>SNAPSHOT_ONLY</code>.</p>
        /// <ul>
        /// <li> <p> <code>SNAPSHOT_ONLY</code> - moves cold snapshots to the capacity pool storage tier.</p> </li>
        /// <li> <p> <code>AUTO</code> - moves cold user data and snapshots to the capacity pool storage tier based on your access patterns.</p> </li>
        /// <li> <p> <code>ALL</code> - moves all user data blocks in both the active file system and Snapshot copies to the storage pool tier.</p> </li>
        /// <li> <p> <code>NONE</code> - keeps a volume's data in the primary storage tier, preventing it from being moved to the capacity pool tier.</p> </li>
        /// </ul>
        pub fn name(mut self, input: crate::model::TieringPolicyName) -> Self {
            self.name = Some(input);
            self
        }
        /// <p>Specifies the tiering policy used to transition data. Default value is <code>SNAPSHOT_ONLY</code>.</p>
        /// <ul>
        /// <li> <p> <code>SNAPSHOT_ONLY</code> - moves cold snapshots to the capacity pool storage tier.</p> </li>
        /// <li> <p> <code>AUTO</code> - moves cold user data and snapshots to the capacity pool storage tier based on your access patterns.</p> </li>
        /// <li> <p> <code>ALL</code> - moves all user data blocks in both the active file system and Snapshot copies to the storage pool tier.</p> </li>
        /// <li> <p> <code>NONE</code> - keeps a volume's data in the primary storage tier, preventing it from being moved to the capacity pool tier.</p> </li>
        /// </ul>
        pub fn set_name(
            mut self,
            input: std::option::Option<crate::model::TieringPolicyName>,
        ) -> Self {
            self.name = input;
            self
        }
        /// Consumes the builder and constructs a [`TieringPolicy`](crate::model::TieringPolicy).
        pub fn build(self) -> crate::model::TieringPolicy {
            crate::model::TieringPolicy {
                cooling_period: self.cooling_period,
                name: self.name,
            }
        }
    }
}
impl TieringPolicy {
    /// Creates a new builder-style object to manufacture [`TieringPolicy`](crate::model::TieringPolicy).
    pub fn builder() -> crate::model::tiering_policy::Builder {
        crate::model::tiering_policy::Builder::default()
    }
}

/// When writing a match expression against `TieringPolicyName`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let tieringpolicyname = unimplemented!();
/// match tieringpolicyname {
///     TieringPolicyName::All => { /* ... */ },
///     TieringPolicyName::Auto => { /* ... */ },
///     TieringPolicyName::None => { /* ... */ },
///     TieringPolicyName::SnapshotOnly => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `tieringpolicyname` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `TieringPolicyName::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `TieringPolicyName::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `TieringPolicyName::NewFeature` is defined.
/// Specifically, when `tieringpolicyname` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `TieringPolicyName::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum TieringPolicyName {
    #[allow(missing_docs)] // documentation missing in model
    All,
    #[allow(missing_docs)] // documentation missing in model
    Auto,
    #[allow(missing_docs)] // documentation missing in model
    None,
    #[allow(missing_docs)] // documentation missing in model
    SnapshotOnly,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for TieringPolicyName {
    fn from(s: &str) -> Self {
        match s {
            "ALL" => TieringPolicyName::All,
            "AUTO" => TieringPolicyName::Auto,
            "NONE" => TieringPolicyName::None,
            "SNAPSHOT_ONLY" => TieringPolicyName::SnapshotOnly,
            other => {
                TieringPolicyName::Unknown(crate::types::UnknownVariantValue(other.to_owned()))
            }
        }
    }
}
impl std::str::FromStr for TieringPolicyName {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(TieringPolicyName::from(s))
    }
}
impl TieringPolicyName {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            TieringPolicyName::All => "ALL",
            TieringPolicyName::Auto => "AUTO",
            TieringPolicyName::None => "NONE",
            TieringPolicyName::SnapshotOnly => "SNAPSHOT_ONLY",
            TieringPolicyName::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["ALL", "AUTO", "NONE", "SNAPSHOT_ONLY"]
    }
}
impl AsRef<str> for TieringPolicyName {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// When writing a match expression against `SecurityStyle`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let securitystyle = unimplemented!();
/// match securitystyle {
///     SecurityStyle::Mixed => { /* ... */ },
///     SecurityStyle::Ntfs => { /* ... */ },
///     SecurityStyle::Unix => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `securitystyle` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `SecurityStyle::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `SecurityStyle::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `SecurityStyle::NewFeature` is defined.
/// Specifically, when `securitystyle` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `SecurityStyle::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum SecurityStyle {
    #[allow(missing_docs)] // documentation missing in model
    Mixed,
    #[allow(missing_docs)] // documentation missing in model
    Ntfs,
    #[allow(missing_docs)] // documentation missing in model
    Unix,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for SecurityStyle {
    fn from(s: &str) -> Self {
        match s {
            "MIXED" => SecurityStyle::Mixed,
            "NTFS" => SecurityStyle::Ntfs,
            "UNIX" => SecurityStyle::Unix,
            other => SecurityStyle::Unknown(crate::types::UnknownVariantValue(other.to_owned())),
        }
    }
}
impl std::str::FromStr for SecurityStyle {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(SecurityStyle::from(s))
    }
}
impl SecurityStyle {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            SecurityStyle::Mixed => "MIXED",
            SecurityStyle::Ntfs => "NTFS",
            SecurityStyle::Unix => "UNIX",
            SecurityStyle::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["MIXED", "NTFS", "UNIX"]
    }
}
impl AsRef<str> for SecurityStyle {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// When writing a match expression against `FlexCacheEndpointType`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let flexcacheendpointtype = unimplemented!();
/// match flexcacheendpointtype {
///     FlexCacheEndpointType::Cache => { /* ... */ },
///     FlexCacheEndpointType::None => { /* ... */ },
///     FlexCacheEndpointType::Origin => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `flexcacheendpointtype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `FlexCacheEndpointType::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `FlexCacheEndpointType::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `FlexCacheEndpointType::NewFeature` is defined.
/// Specifically, when `flexcacheendpointtype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `FlexCacheEndpointType::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum FlexCacheEndpointType {
    #[allow(missing_docs)] // documentation missing in model
    Cache,
    #[allow(missing_docs)] // documentation missing in model
    None,
    #[allow(missing_docs)] // documentation missing in model
    Origin,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for FlexCacheEndpointType {
    fn from(s: &str) -> Self {
        match s {
            "CACHE" => FlexCacheEndpointType::Cache,
            "NONE" => FlexCacheEndpointType::None,
            "ORIGIN" => FlexCacheEndpointType::Origin,
            other => {
                FlexCacheEndpointType::Unknown(crate::types::UnknownVariantValue(other.to_owned()))
            }
        }
    }
}
impl std::str::FromStr for FlexCacheEndpointType {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(FlexCacheEndpointType::from(s))
    }
}
impl FlexCacheEndpointType {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            FlexCacheEndpointType::Cache => "CACHE",
            FlexCacheEndpointType::None => "NONE",
            FlexCacheEndpointType::Origin => "ORIGIN",
            FlexCacheEndpointType::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["CACHE", "NONE", "ORIGIN"]
    }
}
impl AsRef<str> for FlexCacheEndpointType {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// When writing a match expression against `VolumeLifecycle`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let volumelifecycle = unimplemented!();
/// match volumelifecycle {
///     VolumeLifecycle::Available => { /* ... */ },
///     VolumeLifecycle::Created => { /* ... */ },
///     VolumeLifecycle::Creating => { /* ... */ },
///     VolumeLifecycle::Deleting => { /* ... */ },
///     VolumeLifecycle::Failed => { /* ... */ },
///     VolumeLifecycle::Misconfigured => { /* ... */ },
///     VolumeLifecycle::Pending => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `volumelifecycle` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `VolumeLifecycle::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `VolumeLifecycle::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `VolumeLifecycle::NewFeature` is defined.
/// Specifically, when `volumelifecycle` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `VolumeLifecycle::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum VolumeLifecycle {
    #[allow(missing_docs)] // documentation missing in model
    Available,
    #[allow(missing_docs)] // documentation missing in model
    Created,
    #[allow(missing_docs)] // documentation missing in model
    Creating,
    #[allow(missing_docs)] // documentation missing in model
    Deleting,
    #[allow(missing_docs)] // documentation missing in model
    Failed,
    #[allow(missing_docs)] // documentation missing in model
    Misconfigured,
    #[allow(missing_docs)] // documentation missing in model
    Pending,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for VolumeLifecycle {
    fn from(s: &str) -> Self {
        match s {
            "AVAILABLE" => VolumeLifecycle::Available,
            "CREATED" => VolumeLifecycle::Created,
            "CREATING" => VolumeLifecycle::Creating,
            "DELETING" => VolumeLifecycle::Deleting,
            "FAILED" => VolumeLifecycle::Failed,
            "MISCONFIGURED" => VolumeLifecycle::Misconfigured,
            "PENDING" => VolumeLifecycle::Pending,
            other => VolumeLifecycle::Unknown(crate::types::UnknownVariantValue(other.to_owned())),
        }
    }
}
impl std::str::FromStr for VolumeLifecycle {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(VolumeLifecycle::from(s))
    }
}
impl VolumeLifecycle {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            VolumeLifecycle::Available => "AVAILABLE",
            VolumeLifecycle::Created => "CREATED",
            VolumeLifecycle::Creating => "CREATING",
            VolumeLifecycle::Deleting => "DELETING",
            VolumeLifecycle::Failed => "FAILED",
            VolumeLifecycle::Misconfigured => "MISCONFIGURED",
            VolumeLifecycle::Pending => "PENDING",
            VolumeLifecycle::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &[
            "AVAILABLE",
            "CREATED",
            "CREATING",
            "DELETING",
            "FAILED",
            "MISCONFIGURED",
            "PENDING",
        ]
    }
}
impl AsRef<str> for VolumeLifecycle {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>Used to specify changes to the OpenZFS configuration for the volume that you are updating.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct UpdateOpenZfsVolumeConfiguration {
    /// <p>The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't reserve more storage than the parent volume has reserved. You can specify a value of <code>-1</code> to unset a volume's storage capacity reservation.</p>
    #[doc(hidden)]
    pub storage_capacity_reservation_gi_b: std::option::Option<i32>,
    /// <p>The maximum amount of storage in gibibytes (GiB) that the volume can use from its parent. You can specify a quota larger than the storage on the parent volume. You can specify a value of <code>-1</code> to unset a volume's storage capacity quota.</p>
    #[doc(hidden)]
    pub storage_capacity_quota_gi_b: std::option::Option<i32>,
    /// <p>Specifies the record size of an OpenZFS volume, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the default record size. Database workflows can benefit from a smaller record size, while streaming workflows can benefit from a larger record size. For additional guidance on when to set a custom record size, see <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#performance-tips-zfs"> Tips for maximizing performance</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
    #[doc(hidden)]
    pub record_size_ki_b: std::option::Option<i32>,
    /// <p>Specifies the method used to compress the data on the volume. The compression type is <code>NONE</code> by default.</p>
    /// <ul>
    /// <li> <p> <code>NONE</code> - Doesn't compress the data on the volume. <code>NONE</code> is the default.</p> </li>
    /// <li> <p> <code>ZSTD</code> - Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. Compared to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.</p> </li>
    /// <li> <p> <code>LZ4</code> - Compresses the data in the volume using the LZ4 compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive and delivers higher write throughput speeds.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub data_compression_type: std::option::Option<crate::model::OpenZfsDataCompressionType>,
    /// <p>The configuration object for mounting a Network File System (NFS) file system.</p>
    #[doc(hidden)]
    pub nfs_exports: std::option::Option<std::vec::Vec<crate::model::OpenZfsNfsExport>>,
    /// <p>An object specifying how much storage users or groups can use on the volume.</p>
    #[doc(hidden)]
    pub user_and_group_quotas:
        std::option::Option<std::vec::Vec<crate::model::OpenZfsUserOrGroupQuota>>,
    /// <p>A Boolean value indicating whether the volume is read-only.</p>
    #[doc(hidden)]
    pub read_only: std::option::Option<bool>,
}
impl UpdateOpenZfsVolumeConfiguration {
    /// <p>The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't reserve more storage than the parent volume has reserved. You can specify a value of <code>-1</code> to unset a volume's storage capacity reservation.</p>
    pub fn storage_capacity_reservation_gi_b(&self) -> std::option::Option<i32> {
        self.storage_capacity_reservation_gi_b
    }
    /// <p>The maximum amount of storage in gibibytes (GiB) that the volume can use from its parent. You can specify a quota larger than the storage on the parent volume. You can specify a value of <code>-1</code> to unset a volume's storage capacity quota.</p>
    pub fn storage_capacity_quota_gi_b(&self) -> std::option::Option<i32> {
        self.storage_capacity_quota_gi_b
    }
    /// <p>Specifies the record size of an OpenZFS volume, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the default record size. Database workflows can benefit from a smaller record size, while streaming workflows can benefit from a larger record size. For additional guidance on when to set a custom record size, see <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#performance-tips-zfs"> Tips for maximizing performance</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
    pub fn record_size_ki_b(&self) -> std::option::Option<i32> {
        self.record_size_ki_b
    }
    /// <p>Specifies the method used to compress the data on the volume. The compression type is <code>NONE</code> by default.</p>
    /// <ul>
    /// <li> <p> <code>NONE</code> - Doesn't compress the data on the volume. <code>NONE</code> is the default.</p> </li>
    /// <li> <p> <code>ZSTD</code> - Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. Compared to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.</p> </li>
    /// <li> <p> <code>LZ4</code> - Compresses the data in the volume using the LZ4 compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive and delivers higher write throughput speeds.</p> </li>
    /// </ul>
    pub fn data_compression_type(
        &self,
    ) -> std::option::Option<&crate::model::OpenZfsDataCompressionType> {
        self.data_compression_type.as_ref()
    }
    /// <p>The configuration object for mounting a Network File System (NFS) file system.</p>
    pub fn nfs_exports(&self) -> std::option::Option<&[crate::model::OpenZfsNfsExport]> {
        self.nfs_exports.as_deref()
    }
    /// <p>An object specifying how much storage users or groups can use on the volume.</p>
    pub fn user_and_group_quotas(
        &self,
    ) -> std::option::Option<&[crate::model::OpenZfsUserOrGroupQuota]> {
        self.user_and_group_quotas.as_deref()
    }
    /// <p>A Boolean value indicating whether the volume is read-only.</p>
    pub fn read_only(&self) -> std::option::Option<bool> {
        self.read_only
    }
}
/// See [`UpdateOpenZfsVolumeConfiguration`](crate::model::UpdateOpenZfsVolumeConfiguration).
pub mod update_open_zfs_volume_configuration {

    /// A builder for [`UpdateOpenZfsVolumeConfiguration`](crate::model::UpdateOpenZfsVolumeConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) storage_capacity_reservation_gi_b: std::option::Option<i32>,
        pub(crate) storage_capacity_quota_gi_b: std::option::Option<i32>,
        pub(crate) record_size_ki_b: std::option::Option<i32>,
        pub(crate) data_compression_type:
            std::option::Option<crate::model::OpenZfsDataCompressionType>,
        pub(crate) nfs_exports: std::option::Option<std::vec::Vec<crate::model::OpenZfsNfsExport>>,
        pub(crate) user_and_group_quotas:
            std::option::Option<std::vec::Vec<crate::model::OpenZfsUserOrGroupQuota>>,
        pub(crate) read_only: std::option::Option<bool>,
    }
    impl Builder {
        /// <p>The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't reserve more storage than the parent volume has reserved. You can specify a value of <code>-1</code> to unset a volume's storage capacity reservation.</p>
        pub fn storage_capacity_reservation_gi_b(mut self, input: i32) -> Self {
            self.storage_capacity_reservation_gi_b = Some(input);
            self
        }
        /// <p>The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't reserve more storage than the parent volume has reserved. You can specify a value of <code>-1</code> to unset a volume's storage capacity reservation.</p>
        pub fn set_storage_capacity_reservation_gi_b(
            mut self,
            input: std::option::Option<i32>,
        ) -> Self {
            self.storage_capacity_reservation_gi_b = input;
            self
        }
        /// <p>The maximum amount of storage in gibibytes (GiB) that the volume can use from its parent. You can specify a quota larger than the storage on the parent volume. You can specify a value of <code>-1</code> to unset a volume's storage capacity quota.</p>
        pub fn storage_capacity_quota_gi_b(mut self, input: i32) -> Self {
            self.storage_capacity_quota_gi_b = Some(input);
            self
        }
        /// <p>The maximum amount of storage in gibibytes (GiB) that the volume can use from its parent. You can specify a quota larger than the storage on the parent volume. You can specify a value of <code>-1</code> to unset a volume's storage capacity quota.</p>
        pub fn set_storage_capacity_quota_gi_b(mut self, input: std::option::Option<i32>) -> Self {
            self.storage_capacity_quota_gi_b = input;
            self
        }
        /// <p>Specifies the record size of an OpenZFS volume, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the default record size. Database workflows can benefit from a smaller record size, while streaming workflows can benefit from a larger record size. For additional guidance on when to set a custom record size, see <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#performance-tips-zfs"> Tips for maximizing performance</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
        pub fn record_size_ki_b(mut self, input: i32) -> Self {
            self.record_size_ki_b = Some(input);
            self
        }
        /// <p>Specifies the record size of an OpenZFS volume, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the default record size. Database workflows can benefit from a smaller record size, while streaming workflows can benefit from a larger record size. For additional guidance on when to set a custom record size, see <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#performance-tips-zfs"> Tips for maximizing performance</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
        pub fn set_record_size_ki_b(mut self, input: std::option::Option<i32>) -> Self {
            self.record_size_ki_b = input;
            self
        }
        /// <p>Specifies the method used to compress the data on the volume. The compression type is <code>NONE</code> by default.</p>
        /// <ul>
        /// <li> <p> <code>NONE</code> - Doesn't compress the data on the volume. <code>NONE</code> is the default.</p> </li>
        /// <li> <p> <code>ZSTD</code> - Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. Compared to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.</p> </li>
        /// <li> <p> <code>LZ4</code> - Compresses the data in the volume using the LZ4 compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive and delivers higher write throughput speeds.</p> </li>
        /// </ul>
        pub fn data_compression_type(
            mut self,
            input: crate::model::OpenZfsDataCompressionType,
        ) -> Self {
            self.data_compression_type = Some(input);
            self
        }
        /// <p>Specifies the method used to compress the data on the volume. The compression type is <code>NONE</code> by default.</p>
        /// <ul>
        /// <li> <p> <code>NONE</code> - Doesn't compress the data on the volume. <code>NONE</code> is the default.</p> </li>
        /// <li> <p> <code>ZSTD</code> - Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. Compared to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.</p> </li>
        /// <li> <p> <code>LZ4</code> - Compresses the data in the volume using the LZ4 compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive and delivers higher write throughput speeds.</p> </li>
        /// </ul>
        pub fn set_data_compression_type(
            mut self,
            input: std::option::Option<crate::model::OpenZfsDataCompressionType>,
        ) -> Self {
            self.data_compression_type = input;
            self
        }
        /// Appends an item to `nfs_exports`.
        ///
        /// To override the contents of this collection use [`set_nfs_exports`](Self::set_nfs_exports).
        ///
        /// <p>The configuration object for mounting a Network File System (NFS) file system.</p>
        pub fn nfs_exports(mut self, input: crate::model::OpenZfsNfsExport) -> Self {
            let mut v = self.nfs_exports.unwrap_or_default();
            v.push(input);
            self.nfs_exports = Some(v);
            self
        }
        /// <p>The configuration object for mounting a Network File System (NFS) file system.</p>
        pub fn set_nfs_exports(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::OpenZfsNfsExport>>,
        ) -> Self {
            self.nfs_exports = input;
            self
        }
        /// Appends an item to `user_and_group_quotas`.
        ///
        /// To override the contents of this collection use [`set_user_and_group_quotas`](Self::set_user_and_group_quotas).
        ///
        /// <p>An object specifying how much storage users or groups can use on the volume.</p>
        pub fn user_and_group_quotas(
            mut self,
            input: crate::model::OpenZfsUserOrGroupQuota,
        ) -> Self {
            let mut v = self.user_and_group_quotas.unwrap_or_default();
            v.push(input);
            self.user_and_group_quotas = Some(v);
            self
        }
        /// <p>An object specifying how much storage users or groups can use on the volume.</p>
        pub fn set_user_and_group_quotas(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::OpenZfsUserOrGroupQuota>>,
        ) -> Self {
            self.user_and_group_quotas = input;
            self
        }
        /// <p>A Boolean value indicating whether the volume is read-only.</p>
        pub fn read_only(mut self, input: bool) -> Self {
            self.read_only = Some(input);
            self
        }
        /// <p>A Boolean value indicating whether the volume is read-only.</p>
        pub fn set_read_only(mut self, input: std::option::Option<bool>) -> Self {
            self.read_only = input;
            self
        }
        /// Consumes the builder and constructs a [`UpdateOpenZfsVolumeConfiguration`](crate::model::UpdateOpenZfsVolumeConfiguration).
        pub fn build(self) -> crate::model::UpdateOpenZfsVolumeConfiguration {
            crate::model::UpdateOpenZfsVolumeConfiguration {
                storage_capacity_reservation_gi_b: self.storage_capacity_reservation_gi_b,
                storage_capacity_quota_gi_b: self.storage_capacity_quota_gi_b,
                record_size_ki_b: self.record_size_ki_b,
                data_compression_type: self.data_compression_type,
                nfs_exports: self.nfs_exports,
                user_and_group_quotas: self.user_and_group_quotas,
                read_only: self.read_only,
            }
        }
    }
}
impl UpdateOpenZfsVolumeConfiguration {
    /// Creates a new builder-style object to manufacture [`UpdateOpenZfsVolumeConfiguration`](crate::model::UpdateOpenZfsVolumeConfiguration).
    pub fn builder() -> crate::model::update_open_zfs_volume_configuration::Builder {
        crate::model::update_open_zfs_volume_configuration::Builder::default()
    }
}

/// <p>Used to specify changes to the ONTAP configuration for the volume you are updating.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct UpdateOntapVolumeConfiguration {
    /// <p>Specifies the location in the SVM's namespace where the volume is mounted. The <code>JunctionPath</code> must have a leading forward slash, such as <code>/vol3</code>.</p>
    #[doc(hidden)]
    pub junction_path: std::option::Option<std::string::String>,
    /// <p>The security style for the volume, which can be <code>UNIX</code>. <code>NTFS</code>, or <code>MIXED</code>.</p>
    #[doc(hidden)]
    pub security_style: std::option::Option<crate::model::SecurityStyle>,
    /// <p>Specifies the size of the volume in megabytes.</p>
    #[doc(hidden)]
    pub size_in_megabytes: std::option::Option<i32>,
    /// <p>Default is <code>false</code>. Set to true to enable the deduplication, compression, and compaction storage efficiency features on the volume.</p>
    #[doc(hidden)]
    pub storage_efficiency_enabled: std::option::Option<bool>,
    /// <p>Update the volume's data tiering policy.</p>
    #[doc(hidden)]
    pub tiering_policy: std::option::Option<crate::model::TieringPolicy>,
    /// <p>Specifies the snapshot policy for the volume. There are three built-in snapshot policies:</p>
    /// <ul>
    /// <li> <p> <code>default</code>: This is the default policy. A maximum of six hourly snapshots taken five minutes past the hour. A maximum of two daily snapshots taken Monday through Saturday at 10 minutes after midnight. A maximum of two weekly snapshots taken every Sunday at 15 minutes after midnight.</p> </li>
    /// <li> <p> <code>default-1weekly</code>: This policy is the same as the <code>default</code> policy except that it only retains one snapshot from the weekly schedule.</p> </li>
    /// <li> <p> <code>none</code>: This policy does not take any snapshots. This policy can be assigned to volumes to prevent automatic snapshots from being taken.</p> </li>
    /// </ul>
    /// <p>You can also provide the name of a custom policy that you created with the ONTAP CLI or REST API.</p>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/snapshots-ontap.html#snapshot-policies">Snapshot policies</a> in the <i>Amazon FSx for NetApp ONTAP User Guide</i>.</p>
    #[doc(hidden)]
    pub snapshot_policy: std::option::Option<std::string::String>,
    /// <p>A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to false. If it's set to true, all tags for the volume are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the volume, regardless of this value.</p>
    #[doc(hidden)]
    pub copy_tags_to_backups: std::option::Option<bool>,
}
impl UpdateOntapVolumeConfiguration {
    /// <p>Specifies the location in the SVM's namespace where the volume is mounted. The <code>JunctionPath</code> must have a leading forward slash, such as <code>/vol3</code>.</p>
    pub fn junction_path(&self) -> std::option::Option<&str> {
        self.junction_path.as_deref()
    }
    /// <p>The security style for the volume, which can be <code>UNIX</code>. <code>NTFS</code>, or <code>MIXED</code>.</p>
    pub fn security_style(&self) -> std::option::Option<&crate::model::SecurityStyle> {
        self.security_style.as_ref()
    }
    /// <p>Specifies the size of the volume in megabytes.</p>
    pub fn size_in_megabytes(&self) -> std::option::Option<i32> {
        self.size_in_megabytes
    }
    /// <p>Default is <code>false</code>. Set to true to enable the deduplication, compression, and compaction storage efficiency features on the volume.</p>
    pub fn storage_efficiency_enabled(&self) -> std::option::Option<bool> {
        self.storage_efficiency_enabled
    }
    /// <p>Update the volume's data tiering policy.</p>
    pub fn tiering_policy(&self) -> std::option::Option<&crate::model::TieringPolicy> {
        self.tiering_policy.as_ref()
    }
    /// <p>Specifies the snapshot policy for the volume. There are three built-in snapshot policies:</p>
    /// <ul>
    /// <li> <p> <code>default</code>: This is the default policy. A maximum of six hourly snapshots taken five minutes past the hour. A maximum of two daily snapshots taken Monday through Saturday at 10 minutes after midnight. A maximum of two weekly snapshots taken every Sunday at 15 minutes after midnight.</p> </li>
    /// <li> <p> <code>default-1weekly</code>: This policy is the same as the <code>default</code> policy except that it only retains one snapshot from the weekly schedule.</p> </li>
    /// <li> <p> <code>none</code>: This policy does not take any snapshots. This policy can be assigned to volumes to prevent automatic snapshots from being taken.</p> </li>
    /// </ul>
    /// <p>You can also provide the name of a custom policy that you created with the ONTAP CLI or REST API.</p>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/snapshots-ontap.html#snapshot-policies">Snapshot policies</a> in the <i>Amazon FSx for NetApp ONTAP User Guide</i>.</p>
    pub fn snapshot_policy(&self) -> std::option::Option<&str> {
        self.snapshot_policy.as_deref()
    }
    /// <p>A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to false. If it's set to true, all tags for the volume are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the volume, regardless of this value.</p>
    pub fn copy_tags_to_backups(&self) -> std::option::Option<bool> {
        self.copy_tags_to_backups
    }
}
/// See [`UpdateOntapVolumeConfiguration`](crate::model::UpdateOntapVolumeConfiguration).
pub mod update_ontap_volume_configuration {

    /// A builder for [`UpdateOntapVolumeConfiguration`](crate::model::UpdateOntapVolumeConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) junction_path: std::option::Option<std::string::String>,
        pub(crate) security_style: std::option::Option<crate::model::SecurityStyle>,
        pub(crate) size_in_megabytes: std::option::Option<i32>,
        pub(crate) storage_efficiency_enabled: std::option::Option<bool>,
        pub(crate) tiering_policy: std::option::Option<crate::model::TieringPolicy>,
        pub(crate) snapshot_policy: std::option::Option<std::string::String>,
        pub(crate) copy_tags_to_backups: std::option::Option<bool>,
    }
    impl Builder {
        /// <p>Specifies the location in the SVM's namespace where the volume is mounted. The <code>JunctionPath</code> must have a leading forward slash, such as <code>/vol3</code>.</p>
        pub fn junction_path(mut self, input: impl Into<std::string::String>) -> Self {
            self.junction_path = Some(input.into());
            self
        }
        /// <p>Specifies the location in the SVM's namespace where the volume is mounted. The <code>JunctionPath</code> must have a leading forward slash, such as <code>/vol3</code>.</p>
        pub fn set_junction_path(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.junction_path = input;
            self
        }
        /// <p>The security style for the volume, which can be <code>UNIX</code>. <code>NTFS</code>, or <code>MIXED</code>.</p>
        pub fn security_style(mut self, input: crate::model::SecurityStyle) -> Self {
            self.security_style = Some(input);
            self
        }
        /// <p>The security style for the volume, which can be <code>UNIX</code>. <code>NTFS</code>, or <code>MIXED</code>.</p>
        pub fn set_security_style(
            mut self,
            input: std::option::Option<crate::model::SecurityStyle>,
        ) -> Self {
            self.security_style = input;
            self
        }
        /// <p>Specifies the size of the volume in megabytes.</p>
        pub fn size_in_megabytes(mut self, input: i32) -> Self {
            self.size_in_megabytes = Some(input);
            self
        }
        /// <p>Specifies the size of the volume in megabytes.</p>
        pub fn set_size_in_megabytes(mut self, input: std::option::Option<i32>) -> Self {
            self.size_in_megabytes = input;
            self
        }
        /// <p>Default is <code>false</code>. Set to true to enable the deduplication, compression, and compaction storage efficiency features on the volume.</p>
        pub fn storage_efficiency_enabled(mut self, input: bool) -> Self {
            self.storage_efficiency_enabled = Some(input);
            self
        }
        /// <p>Default is <code>false</code>. Set to true to enable the deduplication, compression, and compaction storage efficiency features on the volume.</p>
        pub fn set_storage_efficiency_enabled(mut self, input: std::option::Option<bool>) -> Self {
            self.storage_efficiency_enabled = input;
            self
        }
        /// <p>Update the volume's data tiering policy.</p>
        pub fn tiering_policy(mut self, input: crate::model::TieringPolicy) -> Self {
            self.tiering_policy = Some(input);
            self
        }
        /// <p>Update the volume's data tiering policy.</p>
        pub fn set_tiering_policy(
            mut self,
            input: std::option::Option<crate::model::TieringPolicy>,
        ) -> Self {
            self.tiering_policy = input;
            self
        }
        /// <p>Specifies the snapshot policy for the volume. There are three built-in snapshot policies:</p>
        /// <ul>
        /// <li> <p> <code>default</code>: This is the default policy. A maximum of six hourly snapshots taken five minutes past the hour. A maximum of two daily snapshots taken Monday through Saturday at 10 minutes after midnight. A maximum of two weekly snapshots taken every Sunday at 15 minutes after midnight.</p> </li>
        /// <li> <p> <code>default-1weekly</code>: This policy is the same as the <code>default</code> policy except that it only retains one snapshot from the weekly schedule.</p> </li>
        /// <li> <p> <code>none</code>: This policy does not take any snapshots. This policy can be assigned to volumes to prevent automatic snapshots from being taken.</p> </li>
        /// </ul>
        /// <p>You can also provide the name of a custom policy that you created with the ONTAP CLI or REST API.</p>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/snapshots-ontap.html#snapshot-policies">Snapshot policies</a> in the <i>Amazon FSx for NetApp ONTAP User Guide</i>.</p>
        pub fn snapshot_policy(mut self, input: impl Into<std::string::String>) -> Self {
            self.snapshot_policy = Some(input.into());
            self
        }
        /// <p>Specifies the snapshot policy for the volume. There are three built-in snapshot policies:</p>
        /// <ul>
        /// <li> <p> <code>default</code>: This is the default policy. A maximum of six hourly snapshots taken five minutes past the hour. A maximum of two daily snapshots taken Monday through Saturday at 10 minutes after midnight. A maximum of two weekly snapshots taken every Sunday at 15 minutes after midnight.</p> </li>
        /// <li> <p> <code>default-1weekly</code>: This policy is the same as the <code>default</code> policy except that it only retains one snapshot from the weekly schedule.</p> </li>
        /// <li> <p> <code>none</code>: This policy does not take any snapshots. This policy can be assigned to volumes to prevent automatic snapshots from being taken.</p> </li>
        /// </ul>
        /// <p>You can also provide the name of a custom policy that you created with the ONTAP CLI or REST API.</p>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/snapshots-ontap.html#snapshot-policies">Snapshot policies</a> in the <i>Amazon FSx for NetApp ONTAP User Guide</i>.</p>
        pub fn set_snapshot_policy(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.snapshot_policy = input;
            self
        }
        /// <p>A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to false. If it's set to true, all tags for the volume are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the volume, regardless of this value.</p>
        pub fn copy_tags_to_backups(mut self, input: bool) -> Self {
            self.copy_tags_to_backups = Some(input);
            self
        }
        /// <p>A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to false. If it's set to true, all tags for the volume are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the volume, regardless of this value.</p>
        pub fn set_copy_tags_to_backups(mut self, input: std::option::Option<bool>) -> Self {
            self.copy_tags_to_backups = input;
            self
        }
        /// Consumes the builder and constructs a [`UpdateOntapVolumeConfiguration`](crate::model::UpdateOntapVolumeConfiguration).
        pub fn build(self) -> crate::model::UpdateOntapVolumeConfiguration {
            crate::model::UpdateOntapVolumeConfiguration {
                junction_path: self.junction_path,
                security_style: self.security_style,
                size_in_megabytes: self.size_in_megabytes,
                storage_efficiency_enabled: self.storage_efficiency_enabled,
                tiering_policy: self.tiering_policy,
                snapshot_policy: self.snapshot_policy,
                copy_tags_to_backups: self.copy_tags_to_backups,
            }
        }
    }
}
impl UpdateOntapVolumeConfiguration {
    /// Creates a new builder-style object to manufacture [`UpdateOntapVolumeConfiguration`](crate::model::UpdateOntapVolumeConfiguration).
    pub fn builder() -> crate::model::update_ontap_volume_configuration::Builder {
        crate::model::update_ontap_volume_configuration::Builder::default()
    }
}

/// <p>Describes the Amazon FSx for NetApp ONTAP storage virtual machine (SVM) configuration.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct StorageVirtualMachine {
    /// <p>Describes the Microsoft Active Directory configuration to which the SVM is joined, if applicable.</p>
    #[doc(hidden)]
    pub active_directory_configuration:
        std::option::Option<crate::model::SvmActiveDirectoryConfiguration>,
    /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
    #[doc(hidden)]
    pub creation_time: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>The endpoints that are used to access data or to manage the SVM using the NetApp ONTAP CLI, REST API, or NetApp CloudManager. They are the <code>Iscsi</code>, <code>Management</code>, <code>Nfs</code>, and <code>Smb</code> endpoints.</p>
    #[doc(hidden)]
    pub endpoints: std::option::Option<crate::model::SvmEndpoints>,
    /// <p>The globally unique ID of the file system, assigned by Amazon FSx.</p>
    #[doc(hidden)]
    pub file_system_id: std::option::Option<std::string::String>,
    /// <p>Describes the SVM's lifecycle status.</p>
    /// <ul>
    /// <li> <p> <code>CREATED</code> - The SVM is fully available for use.</p> </li>
    /// <li> <p> <code>CREATING</code> - Amazon FSx is creating the new SVM.</p> </li>
    /// <li> <p> <code>DELETING</code> - Amazon FSx is deleting an existing SVM.</p> </li>
    /// <li> <p> <code>FAILED</code> - Amazon FSx was unable to create the SVM.</p> </li>
    /// <li> <p> <code>MISCONFIGURED</code> - The SVM is in a failed but recoverable state.</p> </li>
    /// <li> <p> <code>PENDING</code> - Amazon FSx has not started creating the SVM.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub lifecycle: std::option::Option<crate::model::StorageVirtualMachineLifecycle>,
    /// <p>The name of the SVM, if provisioned.</p>
    #[doc(hidden)]
    pub name: std::option::Option<std::string::String>,
    /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
    #[doc(hidden)]
    pub resource_arn: std::option::Option<std::string::String>,
    /// <p>The SVM's system generated unique ID.</p>
    #[doc(hidden)]
    pub storage_virtual_machine_id: std::option::Option<std::string::String>,
    /// <p>Describes the SVM's subtype.</p>
    #[doc(hidden)]
    pub subtype: std::option::Option<crate::model::StorageVirtualMachineSubtype>,
    /// <p>The SVM's UUID (universally unique identifier).</p>
    #[doc(hidden)]
    pub uuid: std::option::Option<std::string::String>,
    /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
    #[doc(hidden)]
    pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
    /// <p>Describes why the SVM lifecycle state changed.</p>
    #[doc(hidden)]
    pub lifecycle_transition_reason: std::option::Option<crate::model::LifecycleTransitionReason>,
    /// <p>The security style of the root volume of the SVM.</p>
    #[doc(hidden)]
    pub root_volume_security_style:
        std::option::Option<crate::model::StorageVirtualMachineRootVolumeSecurityStyle>,
}
impl StorageVirtualMachine {
    /// <p>Describes the Microsoft Active Directory configuration to which the SVM is joined, if applicable.</p>
    pub fn active_directory_configuration(
        &self,
    ) -> std::option::Option<&crate::model::SvmActiveDirectoryConfiguration> {
        self.active_directory_configuration.as_ref()
    }
    /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
    pub fn creation_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.creation_time.as_ref()
    }
    /// <p>The endpoints that are used to access data or to manage the SVM using the NetApp ONTAP CLI, REST API, or NetApp CloudManager. They are the <code>Iscsi</code>, <code>Management</code>, <code>Nfs</code>, and <code>Smb</code> endpoints.</p>
    pub fn endpoints(&self) -> std::option::Option<&crate::model::SvmEndpoints> {
        self.endpoints.as_ref()
    }
    /// <p>The globally unique ID of the file system, assigned by Amazon FSx.</p>
    pub fn file_system_id(&self) -> std::option::Option<&str> {
        self.file_system_id.as_deref()
    }
    /// <p>Describes the SVM's lifecycle status.</p>
    /// <ul>
    /// <li> <p> <code>CREATED</code> - The SVM is fully available for use.</p> </li>
    /// <li> <p> <code>CREATING</code> - Amazon FSx is creating the new SVM.</p> </li>
    /// <li> <p> <code>DELETING</code> - Amazon FSx is deleting an existing SVM.</p> </li>
    /// <li> <p> <code>FAILED</code> - Amazon FSx was unable to create the SVM.</p> </li>
    /// <li> <p> <code>MISCONFIGURED</code> - The SVM is in a failed but recoverable state.</p> </li>
    /// <li> <p> <code>PENDING</code> - Amazon FSx has not started creating the SVM.</p> </li>
    /// </ul>
    pub fn lifecycle(&self) -> std::option::Option<&crate::model::StorageVirtualMachineLifecycle> {
        self.lifecycle.as_ref()
    }
    /// <p>The name of the SVM, if provisioned.</p>
    pub fn name(&self) -> std::option::Option<&str> {
        self.name.as_deref()
    }
    /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
    pub fn resource_arn(&self) -> std::option::Option<&str> {
        self.resource_arn.as_deref()
    }
    /// <p>The SVM's system generated unique ID.</p>
    pub fn storage_virtual_machine_id(&self) -> std::option::Option<&str> {
        self.storage_virtual_machine_id.as_deref()
    }
    /// <p>Describes the SVM's subtype.</p>
    pub fn subtype(&self) -> std::option::Option<&crate::model::StorageVirtualMachineSubtype> {
        self.subtype.as_ref()
    }
    /// <p>The SVM's UUID (universally unique identifier).</p>
    pub fn uuid(&self) -> std::option::Option<&str> {
        self.uuid.as_deref()
    }
    /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
    pub fn tags(&self) -> std::option::Option<&[crate::model::Tag]> {
        self.tags.as_deref()
    }
    /// <p>Describes why the SVM lifecycle state changed.</p>
    pub fn lifecycle_transition_reason(
        &self,
    ) -> std::option::Option<&crate::model::LifecycleTransitionReason> {
        self.lifecycle_transition_reason.as_ref()
    }
    /// <p>The security style of the root volume of the SVM.</p>
    pub fn root_volume_security_style(
        &self,
    ) -> std::option::Option<&crate::model::StorageVirtualMachineRootVolumeSecurityStyle> {
        self.root_volume_security_style.as_ref()
    }
}
/// See [`StorageVirtualMachine`](crate::model::StorageVirtualMachine).
pub mod storage_virtual_machine {

    /// A builder for [`StorageVirtualMachine`](crate::model::StorageVirtualMachine).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) active_directory_configuration:
            std::option::Option<crate::model::SvmActiveDirectoryConfiguration>,
        pub(crate) creation_time: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) endpoints: std::option::Option<crate::model::SvmEndpoints>,
        pub(crate) file_system_id: std::option::Option<std::string::String>,
        pub(crate) lifecycle: std::option::Option<crate::model::StorageVirtualMachineLifecycle>,
        pub(crate) name: std::option::Option<std::string::String>,
        pub(crate) resource_arn: std::option::Option<std::string::String>,
        pub(crate) storage_virtual_machine_id: std::option::Option<std::string::String>,
        pub(crate) subtype: std::option::Option<crate::model::StorageVirtualMachineSubtype>,
        pub(crate) uuid: std::option::Option<std::string::String>,
        pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        pub(crate) lifecycle_transition_reason:
            std::option::Option<crate::model::LifecycleTransitionReason>,
        pub(crate) root_volume_security_style:
            std::option::Option<crate::model::StorageVirtualMachineRootVolumeSecurityStyle>,
    }
    impl Builder {
        /// <p>Describes the Microsoft Active Directory configuration to which the SVM is joined, if applicable.</p>
        pub fn active_directory_configuration(
            mut self,
            input: crate::model::SvmActiveDirectoryConfiguration,
        ) -> Self {
            self.active_directory_configuration = Some(input);
            self
        }
        /// <p>Describes the Microsoft Active Directory configuration to which the SVM is joined, if applicable.</p>
        pub fn set_active_directory_configuration(
            mut self,
            input: std::option::Option<crate::model::SvmActiveDirectoryConfiguration>,
        ) -> Self {
            self.active_directory_configuration = input;
            self
        }
        /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
        pub fn creation_time(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.creation_time = Some(input);
            self
        }
        /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
        pub fn set_creation_time(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.creation_time = input;
            self
        }
        /// <p>The endpoints that are used to access data or to manage the SVM using the NetApp ONTAP CLI, REST API, or NetApp CloudManager. They are the <code>Iscsi</code>, <code>Management</code>, <code>Nfs</code>, and <code>Smb</code> endpoints.</p>
        pub fn endpoints(mut self, input: crate::model::SvmEndpoints) -> Self {
            self.endpoints = Some(input);
            self
        }
        /// <p>The endpoints that are used to access data or to manage the SVM using the NetApp ONTAP CLI, REST API, or NetApp CloudManager. They are the <code>Iscsi</code>, <code>Management</code>, <code>Nfs</code>, and <code>Smb</code> endpoints.</p>
        pub fn set_endpoints(
            mut self,
            input: std::option::Option<crate::model::SvmEndpoints>,
        ) -> Self {
            self.endpoints = input;
            self
        }
        /// <p>The globally unique ID of the file system, assigned by Amazon FSx.</p>
        pub fn file_system_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.file_system_id = Some(input.into());
            self
        }
        /// <p>The globally unique ID of the file system, assigned by Amazon FSx.</p>
        pub fn set_file_system_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.file_system_id = input;
            self
        }
        /// <p>Describes the SVM's lifecycle status.</p>
        /// <ul>
        /// <li> <p> <code>CREATED</code> - The SVM is fully available for use.</p> </li>
        /// <li> <p> <code>CREATING</code> - Amazon FSx is creating the new SVM.</p> </li>
        /// <li> <p> <code>DELETING</code> - Amazon FSx is deleting an existing SVM.</p> </li>
        /// <li> <p> <code>FAILED</code> - Amazon FSx was unable to create the SVM.</p> </li>
        /// <li> <p> <code>MISCONFIGURED</code> - The SVM is in a failed but recoverable state.</p> </li>
        /// <li> <p> <code>PENDING</code> - Amazon FSx has not started creating the SVM.</p> </li>
        /// </ul>
        pub fn lifecycle(mut self, input: crate::model::StorageVirtualMachineLifecycle) -> Self {
            self.lifecycle = Some(input);
            self
        }
        /// <p>Describes the SVM's lifecycle status.</p>
        /// <ul>
        /// <li> <p> <code>CREATED</code> - The SVM is fully available for use.</p> </li>
        /// <li> <p> <code>CREATING</code> - Amazon FSx is creating the new SVM.</p> </li>
        /// <li> <p> <code>DELETING</code> - Amazon FSx is deleting an existing SVM.</p> </li>
        /// <li> <p> <code>FAILED</code> - Amazon FSx was unable to create the SVM.</p> </li>
        /// <li> <p> <code>MISCONFIGURED</code> - The SVM is in a failed but recoverable state.</p> </li>
        /// <li> <p> <code>PENDING</code> - Amazon FSx has not started creating the SVM.</p> </li>
        /// </ul>
        pub fn set_lifecycle(
            mut self,
            input: std::option::Option<crate::model::StorageVirtualMachineLifecycle>,
        ) -> Self {
            self.lifecycle = input;
            self
        }
        /// <p>The name of the SVM, if provisioned.</p>
        pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
            self.name = Some(input.into());
            self
        }
        /// <p>The name of the SVM, if provisioned.</p>
        pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.name = input;
            self
        }
        /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
        pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
            self.resource_arn = Some(input.into());
            self
        }
        /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
        pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.resource_arn = input;
            self
        }
        /// <p>The SVM's system generated unique ID.</p>
        pub fn storage_virtual_machine_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.storage_virtual_machine_id = Some(input.into());
            self
        }
        /// <p>The SVM's system generated unique ID.</p>
        pub fn set_storage_virtual_machine_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.storage_virtual_machine_id = input;
            self
        }
        /// <p>Describes the SVM's subtype.</p>
        pub fn subtype(mut self, input: crate::model::StorageVirtualMachineSubtype) -> Self {
            self.subtype = Some(input);
            self
        }
        /// <p>Describes the SVM's subtype.</p>
        pub fn set_subtype(
            mut self,
            input: std::option::Option<crate::model::StorageVirtualMachineSubtype>,
        ) -> Self {
            self.subtype = input;
            self
        }
        /// <p>The SVM's UUID (universally unique identifier).</p>
        pub fn uuid(mut self, input: impl Into<std::string::String>) -> Self {
            self.uuid = Some(input.into());
            self
        }
        /// <p>The SVM's UUID (universally unique identifier).</p>
        pub fn set_uuid(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.uuid = input;
            self
        }
        /// Appends an item to `tags`.
        ///
        /// To override the contents of this collection use [`set_tags`](Self::set_tags).
        ///
        /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
        pub fn tags(mut self, input: crate::model::Tag) -> Self {
            let mut v = self.tags.unwrap_or_default();
            v.push(input);
            self.tags = Some(v);
            self
        }
        /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
        pub fn set_tags(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        ) -> Self {
            self.tags = input;
            self
        }
        /// <p>Describes why the SVM lifecycle state changed.</p>
        pub fn lifecycle_transition_reason(
            mut self,
            input: crate::model::LifecycleTransitionReason,
        ) -> Self {
            self.lifecycle_transition_reason = Some(input);
            self
        }
        /// <p>Describes why the SVM lifecycle state changed.</p>
        pub fn set_lifecycle_transition_reason(
            mut self,
            input: std::option::Option<crate::model::LifecycleTransitionReason>,
        ) -> Self {
            self.lifecycle_transition_reason = input;
            self
        }
        /// <p>The security style of the root volume of the SVM.</p>
        pub fn root_volume_security_style(
            mut self,
            input: crate::model::StorageVirtualMachineRootVolumeSecurityStyle,
        ) -> Self {
            self.root_volume_security_style = Some(input);
            self
        }
        /// <p>The security style of the root volume of the SVM.</p>
        pub fn set_root_volume_security_style(
            mut self,
            input: std::option::Option<crate::model::StorageVirtualMachineRootVolumeSecurityStyle>,
        ) -> Self {
            self.root_volume_security_style = input;
            self
        }
        /// Consumes the builder and constructs a [`StorageVirtualMachine`](crate::model::StorageVirtualMachine).
        pub fn build(self) -> crate::model::StorageVirtualMachine {
            crate::model::StorageVirtualMachine {
                active_directory_configuration: self.active_directory_configuration,
                creation_time: self.creation_time,
                endpoints: self.endpoints,
                file_system_id: self.file_system_id,
                lifecycle: self.lifecycle,
                name: self.name,
                resource_arn: self.resource_arn,
                storage_virtual_machine_id: self.storage_virtual_machine_id,
                subtype: self.subtype,
                uuid: self.uuid,
                tags: self.tags,
                lifecycle_transition_reason: self.lifecycle_transition_reason,
                root_volume_security_style: self.root_volume_security_style,
            }
        }
    }
}
impl StorageVirtualMachine {
    /// Creates a new builder-style object to manufacture [`StorageVirtualMachine`](crate::model::StorageVirtualMachine).
    pub fn builder() -> crate::model::storage_virtual_machine::Builder {
        crate::model::storage_virtual_machine::Builder::default()
    }
}

/// When writing a match expression against `StorageVirtualMachineRootVolumeSecurityStyle`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let storagevirtualmachinerootvolumesecuritystyle = unimplemented!();
/// match storagevirtualmachinerootvolumesecuritystyle {
///     StorageVirtualMachineRootVolumeSecurityStyle::Mixed => { /* ... */ },
///     StorageVirtualMachineRootVolumeSecurityStyle::Ntfs => { /* ... */ },
///     StorageVirtualMachineRootVolumeSecurityStyle::Unix => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `storagevirtualmachinerootvolumesecuritystyle` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `StorageVirtualMachineRootVolumeSecurityStyle::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `StorageVirtualMachineRootVolumeSecurityStyle::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `StorageVirtualMachineRootVolumeSecurityStyle::NewFeature` is defined.
/// Specifically, when `storagevirtualmachinerootvolumesecuritystyle` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `StorageVirtualMachineRootVolumeSecurityStyle::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum StorageVirtualMachineRootVolumeSecurityStyle {
    #[allow(missing_docs)] // documentation missing in model
    Mixed,
    #[allow(missing_docs)] // documentation missing in model
    Ntfs,
    #[allow(missing_docs)] // documentation missing in model
    Unix,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for StorageVirtualMachineRootVolumeSecurityStyle {
    fn from(s: &str) -> Self {
        match s {
            "MIXED" => StorageVirtualMachineRootVolumeSecurityStyle::Mixed,
            "NTFS" => StorageVirtualMachineRootVolumeSecurityStyle::Ntfs,
            "UNIX" => StorageVirtualMachineRootVolumeSecurityStyle::Unix,
            other => StorageVirtualMachineRootVolumeSecurityStyle::Unknown(
                crate::types::UnknownVariantValue(other.to_owned()),
            ),
        }
    }
}
impl std::str::FromStr for StorageVirtualMachineRootVolumeSecurityStyle {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(StorageVirtualMachineRootVolumeSecurityStyle::from(s))
    }
}
impl StorageVirtualMachineRootVolumeSecurityStyle {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            StorageVirtualMachineRootVolumeSecurityStyle::Mixed => "MIXED",
            StorageVirtualMachineRootVolumeSecurityStyle::Ntfs => "NTFS",
            StorageVirtualMachineRootVolumeSecurityStyle::Unix => "UNIX",
            StorageVirtualMachineRootVolumeSecurityStyle::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["MIXED", "NTFS", "UNIX"]
    }
}
impl AsRef<str> for StorageVirtualMachineRootVolumeSecurityStyle {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// When writing a match expression against `StorageVirtualMachineSubtype`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let storagevirtualmachinesubtype = unimplemented!();
/// match storagevirtualmachinesubtype {
///     StorageVirtualMachineSubtype::Default => { /* ... */ },
///     StorageVirtualMachineSubtype::DpDestination => { /* ... */ },
///     StorageVirtualMachineSubtype::SyncDestination => { /* ... */ },
///     StorageVirtualMachineSubtype::SyncSource => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `storagevirtualmachinesubtype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `StorageVirtualMachineSubtype::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `StorageVirtualMachineSubtype::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `StorageVirtualMachineSubtype::NewFeature` is defined.
/// Specifically, when `storagevirtualmachinesubtype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `StorageVirtualMachineSubtype::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum StorageVirtualMachineSubtype {
    #[allow(missing_docs)] // documentation missing in model
    Default,
    #[allow(missing_docs)] // documentation missing in model
    DpDestination,
    #[allow(missing_docs)] // documentation missing in model
    SyncDestination,
    #[allow(missing_docs)] // documentation missing in model
    SyncSource,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for StorageVirtualMachineSubtype {
    fn from(s: &str) -> Self {
        match s {
            "DEFAULT" => StorageVirtualMachineSubtype::Default,
            "DP_DESTINATION" => StorageVirtualMachineSubtype::DpDestination,
            "SYNC_DESTINATION" => StorageVirtualMachineSubtype::SyncDestination,
            "SYNC_SOURCE" => StorageVirtualMachineSubtype::SyncSource,
            other => StorageVirtualMachineSubtype::Unknown(crate::types::UnknownVariantValue(
                other.to_owned(),
            )),
        }
    }
}
impl std::str::FromStr for StorageVirtualMachineSubtype {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(StorageVirtualMachineSubtype::from(s))
    }
}
impl StorageVirtualMachineSubtype {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            StorageVirtualMachineSubtype::Default => "DEFAULT",
            StorageVirtualMachineSubtype::DpDestination => "DP_DESTINATION",
            StorageVirtualMachineSubtype::SyncDestination => "SYNC_DESTINATION",
            StorageVirtualMachineSubtype::SyncSource => "SYNC_SOURCE",
            StorageVirtualMachineSubtype::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &[
            "DEFAULT",
            "DP_DESTINATION",
            "SYNC_DESTINATION",
            "SYNC_SOURCE",
        ]
    }
}
impl AsRef<str> for StorageVirtualMachineSubtype {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// When writing a match expression against `StorageVirtualMachineLifecycle`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let storagevirtualmachinelifecycle = unimplemented!();
/// match storagevirtualmachinelifecycle {
///     StorageVirtualMachineLifecycle::Created => { /* ... */ },
///     StorageVirtualMachineLifecycle::Creating => { /* ... */ },
///     StorageVirtualMachineLifecycle::Deleting => { /* ... */ },
///     StorageVirtualMachineLifecycle::Failed => { /* ... */ },
///     StorageVirtualMachineLifecycle::Misconfigured => { /* ... */ },
///     StorageVirtualMachineLifecycle::Pending => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `storagevirtualmachinelifecycle` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `StorageVirtualMachineLifecycle::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `StorageVirtualMachineLifecycle::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `StorageVirtualMachineLifecycle::NewFeature` is defined.
/// Specifically, when `storagevirtualmachinelifecycle` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `StorageVirtualMachineLifecycle::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum StorageVirtualMachineLifecycle {
    #[allow(missing_docs)] // documentation missing in model
    Created,
    #[allow(missing_docs)] // documentation missing in model
    Creating,
    #[allow(missing_docs)] // documentation missing in model
    Deleting,
    #[allow(missing_docs)] // documentation missing in model
    Failed,
    #[allow(missing_docs)] // documentation missing in model
    Misconfigured,
    #[allow(missing_docs)] // documentation missing in model
    Pending,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for StorageVirtualMachineLifecycle {
    fn from(s: &str) -> Self {
        match s {
            "CREATED" => StorageVirtualMachineLifecycle::Created,
            "CREATING" => StorageVirtualMachineLifecycle::Creating,
            "DELETING" => StorageVirtualMachineLifecycle::Deleting,
            "FAILED" => StorageVirtualMachineLifecycle::Failed,
            "MISCONFIGURED" => StorageVirtualMachineLifecycle::Misconfigured,
            "PENDING" => StorageVirtualMachineLifecycle::Pending,
            other => StorageVirtualMachineLifecycle::Unknown(crate::types::UnknownVariantValue(
                other.to_owned(),
            )),
        }
    }
}
impl std::str::FromStr for StorageVirtualMachineLifecycle {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(StorageVirtualMachineLifecycle::from(s))
    }
}
impl StorageVirtualMachineLifecycle {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            StorageVirtualMachineLifecycle::Created => "CREATED",
            StorageVirtualMachineLifecycle::Creating => "CREATING",
            StorageVirtualMachineLifecycle::Deleting => "DELETING",
            StorageVirtualMachineLifecycle::Failed => "FAILED",
            StorageVirtualMachineLifecycle::Misconfigured => "MISCONFIGURED",
            StorageVirtualMachineLifecycle::Pending => "PENDING",
            StorageVirtualMachineLifecycle::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &[
            "CREATED",
            "CREATING",
            "DELETING",
            "FAILED",
            "MISCONFIGURED",
            "PENDING",
        ]
    }
}
impl AsRef<str> for StorageVirtualMachineLifecycle {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>An Amazon FSx for NetApp ONTAP storage virtual machine (SVM) has the following endpoints that are used to access data or to manage the SVM using the NetApp ONTAP CLI, REST API, or NetApp CloudManager.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct SvmEndpoints {
    /// <p>An endpoint for connecting using the Internet Small Computer Systems Interface (iSCSI) protocol.</p>
    #[doc(hidden)]
    pub iscsi: std::option::Option<crate::model::SvmEndpoint>,
    /// <p>An endpoint for managing SVMs using the NetApp ONTAP CLI, NetApp ONTAP API, or NetApp CloudManager.</p>
    #[doc(hidden)]
    pub management: std::option::Option<crate::model::SvmEndpoint>,
    /// <p>An endpoint for connecting using the Network File System (NFS) protocol.</p>
    #[doc(hidden)]
    pub nfs: std::option::Option<crate::model::SvmEndpoint>,
    /// <p>An endpoint for connecting using the Server Message Block (SMB) protocol.</p>
    #[doc(hidden)]
    pub smb: std::option::Option<crate::model::SvmEndpoint>,
}
impl SvmEndpoints {
    /// <p>An endpoint for connecting using the Internet Small Computer Systems Interface (iSCSI) protocol.</p>
    pub fn iscsi(&self) -> std::option::Option<&crate::model::SvmEndpoint> {
        self.iscsi.as_ref()
    }
    /// <p>An endpoint for managing SVMs using the NetApp ONTAP CLI, NetApp ONTAP API, or NetApp CloudManager.</p>
    pub fn management(&self) -> std::option::Option<&crate::model::SvmEndpoint> {
        self.management.as_ref()
    }
    /// <p>An endpoint for connecting using the Network File System (NFS) protocol.</p>
    pub fn nfs(&self) -> std::option::Option<&crate::model::SvmEndpoint> {
        self.nfs.as_ref()
    }
    /// <p>An endpoint for connecting using the Server Message Block (SMB) protocol.</p>
    pub fn smb(&self) -> std::option::Option<&crate::model::SvmEndpoint> {
        self.smb.as_ref()
    }
}
/// See [`SvmEndpoints`](crate::model::SvmEndpoints).
pub mod svm_endpoints {

    /// A builder for [`SvmEndpoints`](crate::model::SvmEndpoints).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) iscsi: std::option::Option<crate::model::SvmEndpoint>,
        pub(crate) management: std::option::Option<crate::model::SvmEndpoint>,
        pub(crate) nfs: std::option::Option<crate::model::SvmEndpoint>,
        pub(crate) smb: std::option::Option<crate::model::SvmEndpoint>,
    }
    impl Builder {
        /// <p>An endpoint for connecting using the Internet Small Computer Systems Interface (iSCSI) protocol.</p>
        pub fn iscsi(mut self, input: crate::model::SvmEndpoint) -> Self {
            self.iscsi = Some(input);
            self
        }
        /// <p>An endpoint for connecting using the Internet Small Computer Systems Interface (iSCSI) protocol.</p>
        pub fn set_iscsi(mut self, input: std::option::Option<crate::model::SvmEndpoint>) -> Self {
            self.iscsi = input;
            self
        }
        /// <p>An endpoint for managing SVMs using the NetApp ONTAP CLI, NetApp ONTAP API, or NetApp CloudManager.</p>
        pub fn management(mut self, input: crate::model::SvmEndpoint) -> Self {
            self.management = Some(input);
            self
        }
        /// <p>An endpoint for managing SVMs using the NetApp ONTAP CLI, NetApp ONTAP API, or NetApp CloudManager.</p>
        pub fn set_management(
            mut self,
            input: std::option::Option<crate::model::SvmEndpoint>,
        ) -> Self {
            self.management = input;
            self
        }
        /// <p>An endpoint for connecting using the Network File System (NFS) protocol.</p>
        pub fn nfs(mut self, input: crate::model::SvmEndpoint) -> Self {
            self.nfs = Some(input);
            self
        }
        /// <p>An endpoint for connecting using the Network File System (NFS) protocol.</p>
        pub fn set_nfs(mut self, input: std::option::Option<crate::model::SvmEndpoint>) -> Self {
            self.nfs = input;
            self
        }
        /// <p>An endpoint for connecting using the Server Message Block (SMB) protocol.</p>
        pub fn smb(mut self, input: crate::model::SvmEndpoint) -> Self {
            self.smb = Some(input);
            self
        }
        /// <p>An endpoint for connecting using the Server Message Block (SMB) protocol.</p>
        pub fn set_smb(mut self, input: std::option::Option<crate::model::SvmEndpoint>) -> Self {
            self.smb = input;
            self
        }
        /// Consumes the builder and constructs a [`SvmEndpoints`](crate::model::SvmEndpoints).
        pub fn build(self) -> crate::model::SvmEndpoints {
            crate::model::SvmEndpoints {
                iscsi: self.iscsi,
                management: self.management,
                nfs: self.nfs,
                smb: self.smb,
            }
        }
    }
}
impl SvmEndpoints {
    /// Creates a new builder-style object to manufacture [`SvmEndpoints`](crate::model::SvmEndpoints).
    pub fn builder() -> crate::model::svm_endpoints::Builder {
        crate::model::svm_endpoints::Builder::default()
    }
}

/// <p>An Amazon FSx for NetApp ONTAP storage virtual machine (SVM) has four endpoints that are used to access data or to manage the SVM using the NetApp ONTAP CLI, REST API, or NetApp CloudManager. They are the <code>Iscsi</code>, <code>Management</code>, <code>Nfs</code>, and <code>Smb</code> endpoints.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct SvmEndpoint {
    /// <p>The Domain Name Service (DNS) name for the file system. You can mount your file system using its DNS name.</p>
    #[doc(hidden)]
    pub dns_name: std::option::Option<std::string::String>,
    /// <p>The SVM endpoint's IP addresses.</p>
    #[doc(hidden)]
    pub ip_addresses: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl SvmEndpoint {
    /// <p>The Domain Name Service (DNS) name for the file system. You can mount your file system using its DNS name.</p>
    pub fn dns_name(&self) -> std::option::Option<&str> {
        self.dns_name.as_deref()
    }
    /// <p>The SVM endpoint's IP addresses.</p>
    pub fn ip_addresses(&self) -> std::option::Option<&[std::string::String]> {
        self.ip_addresses.as_deref()
    }
}
/// See [`SvmEndpoint`](crate::model::SvmEndpoint).
pub mod svm_endpoint {

    /// A builder for [`SvmEndpoint`](crate::model::SvmEndpoint).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) dns_name: std::option::Option<std::string::String>,
        pub(crate) ip_addresses: std::option::Option<std::vec::Vec<std::string::String>>,
    }
    impl Builder {
        /// <p>The Domain Name Service (DNS) name for the file system. You can mount your file system using its DNS name.</p>
        pub fn dns_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.dns_name = Some(input.into());
            self
        }
        /// <p>The Domain Name Service (DNS) name for the file system. You can mount your file system using its DNS name.</p>
        pub fn set_dns_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.dns_name = input;
            self
        }
        /// Appends an item to `ip_addresses`.
        ///
        /// To override the contents of this collection use [`set_ip_addresses`](Self::set_ip_addresses).
        ///
        /// <p>The SVM endpoint's IP addresses.</p>
        pub fn ip_addresses(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.ip_addresses.unwrap_or_default();
            v.push(input.into());
            self.ip_addresses = Some(v);
            self
        }
        /// <p>The SVM endpoint's IP addresses.</p>
        pub fn set_ip_addresses(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.ip_addresses = input;
            self
        }
        /// Consumes the builder and constructs a [`SvmEndpoint`](crate::model::SvmEndpoint).
        pub fn build(self) -> crate::model::SvmEndpoint {
            crate::model::SvmEndpoint {
                dns_name: self.dns_name,
                ip_addresses: self.ip_addresses,
            }
        }
    }
}
impl SvmEndpoint {
    /// Creates a new builder-style object to manufacture [`SvmEndpoint`](crate::model::SvmEndpoint).
    pub fn builder() -> crate::model::svm_endpoint::Builder {
        crate::model::svm_endpoint::Builder::default()
    }
}

/// <p>Describes the configuration of the Microsoft Active Directory (AD) directory to which the Amazon FSx for ONTAP storage virtual machine (SVM) is joined. Pleae note, account credentials are not returned in the response payload.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct SvmActiveDirectoryConfiguration {
    /// <p>The NetBIOS name of the Active Directory computer object that is joined to your SVM.</p>
    #[doc(hidden)]
    pub net_bios_name: std::option::Option<std::string::String>,
    /// <p>The configuration of the self-managed Microsoft Active Directory (AD) directory to which the Windows File Server or ONTAP storage virtual machine (SVM) instance is joined.</p>
    #[doc(hidden)]
    pub self_managed_active_directory_configuration:
        std::option::Option<crate::model::SelfManagedActiveDirectoryAttributes>,
}
impl SvmActiveDirectoryConfiguration {
    /// <p>The NetBIOS name of the Active Directory computer object that is joined to your SVM.</p>
    pub fn net_bios_name(&self) -> std::option::Option<&str> {
        self.net_bios_name.as_deref()
    }
    /// <p>The configuration of the self-managed Microsoft Active Directory (AD) directory to which the Windows File Server or ONTAP storage virtual machine (SVM) instance is joined.</p>
    pub fn self_managed_active_directory_configuration(
        &self,
    ) -> std::option::Option<&crate::model::SelfManagedActiveDirectoryAttributes> {
        self.self_managed_active_directory_configuration.as_ref()
    }
}
/// See [`SvmActiveDirectoryConfiguration`](crate::model::SvmActiveDirectoryConfiguration).
pub mod svm_active_directory_configuration {

    /// A builder for [`SvmActiveDirectoryConfiguration`](crate::model::SvmActiveDirectoryConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) net_bios_name: std::option::Option<std::string::String>,
        pub(crate) self_managed_active_directory_configuration:
            std::option::Option<crate::model::SelfManagedActiveDirectoryAttributes>,
    }
    impl Builder {
        /// <p>The NetBIOS name of the Active Directory computer object that is joined to your SVM.</p>
        pub fn net_bios_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.net_bios_name = Some(input.into());
            self
        }
        /// <p>The NetBIOS name of the Active Directory computer object that is joined to your SVM.</p>
        pub fn set_net_bios_name(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.net_bios_name = input;
            self
        }
        /// <p>The configuration of the self-managed Microsoft Active Directory (AD) directory to which the Windows File Server or ONTAP storage virtual machine (SVM) instance is joined.</p>
        pub fn self_managed_active_directory_configuration(
            mut self,
            input: crate::model::SelfManagedActiveDirectoryAttributes,
        ) -> Self {
            self.self_managed_active_directory_configuration = Some(input);
            self
        }
        /// <p>The configuration of the self-managed Microsoft Active Directory (AD) directory to which the Windows File Server or ONTAP storage virtual machine (SVM) instance is joined.</p>
        pub fn set_self_managed_active_directory_configuration(
            mut self,
            input: std::option::Option<crate::model::SelfManagedActiveDirectoryAttributes>,
        ) -> Self {
            self.self_managed_active_directory_configuration = input;
            self
        }
        /// Consumes the builder and constructs a [`SvmActiveDirectoryConfiguration`](crate::model::SvmActiveDirectoryConfiguration).
        pub fn build(self) -> crate::model::SvmActiveDirectoryConfiguration {
            crate::model::SvmActiveDirectoryConfiguration {
                net_bios_name: self.net_bios_name,
                self_managed_active_directory_configuration: self
                    .self_managed_active_directory_configuration,
            }
        }
    }
}
impl SvmActiveDirectoryConfiguration {
    /// Creates a new builder-style object to manufacture [`SvmActiveDirectoryConfiguration`](crate::model::SvmActiveDirectoryConfiguration).
    pub fn builder() -> crate::model::svm_active_directory_configuration::Builder {
        crate::model::svm_active_directory_configuration::Builder::default()
    }
}

/// <p>Updates the Microsoft Active Directory (AD) configuration of an SVM joined to an AD. Please note, account credentials are not returned in the response payload.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct UpdateSvmActiveDirectoryConfiguration {
    /// <p>The configuration that Amazon FSx uses to join the Windows File Server instance to a self-managed Microsoft Active Directory (AD) directory.</p>
    #[doc(hidden)]
    pub self_managed_active_directory_configuration:
        std::option::Option<crate::model::SelfManagedActiveDirectoryConfigurationUpdates>,
}
impl UpdateSvmActiveDirectoryConfiguration {
    /// <p>The configuration that Amazon FSx uses to join the Windows File Server instance to a self-managed Microsoft Active Directory (AD) directory.</p>
    pub fn self_managed_active_directory_configuration(
        &self,
    ) -> std::option::Option<&crate::model::SelfManagedActiveDirectoryConfigurationUpdates> {
        self.self_managed_active_directory_configuration.as_ref()
    }
}
/// See [`UpdateSvmActiveDirectoryConfiguration`](crate::model::UpdateSvmActiveDirectoryConfiguration).
pub mod update_svm_active_directory_configuration {

    /// A builder for [`UpdateSvmActiveDirectoryConfiguration`](crate::model::UpdateSvmActiveDirectoryConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) self_managed_active_directory_configuration:
            std::option::Option<crate::model::SelfManagedActiveDirectoryConfigurationUpdates>,
    }
    impl Builder {
        /// <p>The configuration that Amazon FSx uses to join the Windows File Server instance to a self-managed Microsoft Active Directory (AD) directory.</p>
        pub fn self_managed_active_directory_configuration(
            mut self,
            input: crate::model::SelfManagedActiveDirectoryConfigurationUpdates,
        ) -> Self {
            self.self_managed_active_directory_configuration = Some(input);
            self
        }
        /// <p>The configuration that Amazon FSx uses to join the Windows File Server instance to a self-managed Microsoft Active Directory (AD) directory.</p>
        pub fn set_self_managed_active_directory_configuration(
            mut self,
            input: std::option::Option<
                crate::model::SelfManagedActiveDirectoryConfigurationUpdates,
            >,
        ) -> Self {
            self.self_managed_active_directory_configuration = input;
            self
        }
        /// Consumes the builder and constructs a [`UpdateSvmActiveDirectoryConfiguration`](crate::model::UpdateSvmActiveDirectoryConfiguration).
        pub fn build(self) -> crate::model::UpdateSvmActiveDirectoryConfiguration {
            crate::model::UpdateSvmActiveDirectoryConfiguration {
                self_managed_active_directory_configuration: self
                    .self_managed_active_directory_configuration,
            }
        }
    }
}
impl UpdateSvmActiveDirectoryConfiguration {
    /// Creates a new builder-style object to manufacture [`UpdateSvmActiveDirectoryConfiguration`](crate::model::UpdateSvmActiveDirectoryConfiguration).
    pub fn builder() -> crate::model::update_svm_active_directory_configuration::Builder {
        crate::model::update_svm_active_directory_configuration::Builder::default()
    }
}

/// <p>The configuration that Amazon FSx uses to join the Windows File Server instance to a self-managed Microsoft Active Directory (AD) directory.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct SelfManagedActiveDirectoryConfigurationUpdates {
    /// <p>The user name for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain. This account must have the permission to join computers to the domain in the organizational unit provided in <code>OrganizationalUnitDistinguishedName</code>.</p>
    #[doc(hidden)]
    pub user_name: std::option::Option<std::string::String>,
    /// <p>The password for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain.</p>
    #[doc(hidden)]
    pub password: std::option::Option<std::string::String>,
    /// <p>A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory.</p>
    #[doc(hidden)]
    pub dns_ips: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl SelfManagedActiveDirectoryConfigurationUpdates {
    /// <p>The user name for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain. This account must have the permission to join computers to the domain in the organizational unit provided in <code>OrganizationalUnitDistinguishedName</code>.</p>
    pub fn user_name(&self) -> std::option::Option<&str> {
        self.user_name.as_deref()
    }
    /// <p>The password for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain.</p>
    pub fn password(&self) -> std::option::Option<&str> {
        self.password.as_deref()
    }
    /// <p>A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory.</p>
    pub fn dns_ips(&self) -> std::option::Option<&[std::string::String]> {
        self.dns_ips.as_deref()
    }
}
impl std::fmt::Debug for SelfManagedActiveDirectoryConfigurationUpdates {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        let mut formatter = f.debug_struct("SelfManagedActiveDirectoryConfigurationUpdates");
        formatter.field("user_name", &self.user_name);
        formatter.field("password", &"*** Sensitive Data Redacted ***");
        formatter.field("dns_ips", &self.dns_ips);
        formatter.finish()
    }
}
/// See [`SelfManagedActiveDirectoryConfigurationUpdates`](crate::model::SelfManagedActiveDirectoryConfigurationUpdates).
pub mod self_managed_active_directory_configuration_updates {

    /// A builder for [`SelfManagedActiveDirectoryConfigurationUpdates`](crate::model::SelfManagedActiveDirectoryConfigurationUpdates).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default)]
    pub struct Builder {
        pub(crate) user_name: std::option::Option<std::string::String>,
        pub(crate) password: std::option::Option<std::string::String>,
        pub(crate) dns_ips: std::option::Option<std::vec::Vec<std::string::String>>,
    }
    impl Builder {
        /// <p>The user name for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain. This account must have the permission to join computers to the domain in the organizational unit provided in <code>OrganizationalUnitDistinguishedName</code>.</p>
        pub fn user_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.user_name = Some(input.into());
            self
        }
        /// <p>The user name for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain. This account must have the permission to join computers to the domain in the organizational unit provided in <code>OrganizationalUnitDistinguishedName</code>.</p>
        pub fn set_user_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.user_name = input;
            self
        }
        /// <p>The password for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain.</p>
        pub fn password(mut self, input: impl Into<std::string::String>) -> Self {
            self.password = Some(input.into());
            self
        }
        /// <p>The password for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain.</p>
        pub fn set_password(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.password = input;
            self
        }
        /// Appends an item to `dns_ips`.
        ///
        /// To override the contents of this collection use [`set_dns_ips`](Self::set_dns_ips).
        ///
        /// <p>A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory.</p>
        pub fn dns_ips(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.dns_ips.unwrap_or_default();
            v.push(input.into());
            self.dns_ips = Some(v);
            self
        }
        /// <p>A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory.</p>
        pub fn set_dns_ips(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.dns_ips = input;
            self
        }
        /// Consumes the builder and constructs a [`SelfManagedActiveDirectoryConfigurationUpdates`](crate::model::SelfManagedActiveDirectoryConfigurationUpdates).
        pub fn build(self) -> crate::model::SelfManagedActiveDirectoryConfigurationUpdates {
            crate::model::SelfManagedActiveDirectoryConfigurationUpdates {
                user_name: self.user_name,
                password: self.password,
                dns_ips: self.dns_ips,
            }
        }
    }
    impl std::fmt::Debug for Builder {
        fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
            let mut formatter = f.debug_struct("Builder");
            formatter.field("user_name", &self.user_name);
            formatter.field("password", &"*** Sensitive Data Redacted ***");
            formatter.field("dns_ips", &self.dns_ips);
            formatter.finish()
        }
    }
}
impl SelfManagedActiveDirectoryConfigurationUpdates {
    /// Creates a new builder-style object to manufacture [`SelfManagedActiveDirectoryConfigurationUpdates`](crate::model::SelfManagedActiveDirectoryConfigurationUpdates).
    pub fn builder() -> crate::model::self_managed_active_directory_configuration_updates::Builder {
        crate::model::self_managed_active_directory_configuration_updates::Builder::default()
    }
}

/// When writing a match expression against `ServiceLimit`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let servicelimit = unimplemented!();
/// match servicelimit {
///     ServiceLimit::FileCacheCount => { /* ... */ },
///     ServiceLimit::FileSystemCount => { /* ... */ },
///     ServiceLimit::StorageVirtualMachinesPerFileSystem => { /* ... */ },
///     ServiceLimit::TotalInProgressCopyBackups => { /* ... */ },
///     ServiceLimit::TotalSsdIops => { /* ... */ },
///     ServiceLimit::TotalStorage => { /* ... */ },
///     ServiceLimit::TotalThroughputCapacity => { /* ... */ },
///     ServiceLimit::TotalUserInitiatedBackups => { /* ... */ },
///     ServiceLimit::TotalUserTags => { /* ... */ },
///     ServiceLimit::VolumesPerFileSystem => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `servicelimit` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `ServiceLimit::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `ServiceLimit::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `ServiceLimit::NewFeature` is defined.
/// Specifically, when `servicelimit` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `ServiceLimit::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
/// <p>The types of limits on your service utilization. Limits include file system count,
/// total throughput capacity, total storage, and total user-initiated backups. These limits
/// apply for a specific account in a specific Amazon Web Services Region. You can increase some of them by
/// contacting Amazon Web Services Support.</p>
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum ServiceLimit {
    #[allow(missing_docs)] // documentation missing in model
    FileCacheCount,
    #[allow(missing_docs)] // documentation missing in model
    FileSystemCount,
    #[allow(missing_docs)] // documentation missing in model
    StorageVirtualMachinesPerFileSystem,
    #[allow(missing_docs)] // documentation missing in model
    TotalInProgressCopyBackups,
    #[allow(missing_docs)] // documentation missing in model
    TotalSsdIops,
    #[allow(missing_docs)] // documentation missing in model
    TotalStorage,
    #[allow(missing_docs)] // documentation missing in model
    TotalThroughputCapacity,
    #[allow(missing_docs)] // documentation missing in model
    TotalUserInitiatedBackups,
    #[allow(missing_docs)] // documentation missing in model
    TotalUserTags,
    #[allow(missing_docs)] // documentation missing in model
    VolumesPerFileSystem,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for ServiceLimit {
    fn from(s: &str) -> Self {
        match s {
            "FILE_CACHE_COUNT" => ServiceLimit::FileCacheCount,
            "FILE_SYSTEM_COUNT" => ServiceLimit::FileSystemCount,
            "STORAGE_VIRTUAL_MACHINES_PER_FILE_SYSTEM" => {
                ServiceLimit::StorageVirtualMachinesPerFileSystem
            }
            "TOTAL_IN_PROGRESS_COPY_BACKUPS" => ServiceLimit::TotalInProgressCopyBackups,
            "TOTAL_SSD_IOPS" => ServiceLimit::TotalSsdIops,
            "TOTAL_STORAGE" => ServiceLimit::TotalStorage,
            "TOTAL_THROUGHPUT_CAPACITY" => ServiceLimit::TotalThroughputCapacity,
            "TOTAL_USER_INITIATED_BACKUPS" => ServiceLimit::TotalUserInitiatedBackups,
            "TOTAL_USER_TAGS" => ServiceLimit::TotalUserTags,
            "VOLUMES_PER_FILE_SYSTEM" => ServiceLimit::VolumesPerFileSystem,
            other => ServiceLimit::Unknown(crate::types::UnknownVariantValue(other.to_owned())),
        }
    }
}
impl std::str::FromStr for ServiceLimit {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(ServiceLimit::from(s))
    }
}
impl ServiceLimit {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            ServiceLimit::FileCacheCount => "FILE_CACHE_COUNT",
            ServiceLimit::FileSystemCount => "FILE_SYSTEM_COUNT",
            ServiceLimit::StorageVirtualMachinesPerFileSystem => {
                "STORAGE_VIRTUAL_MACHINES_PER_FILE_SYSTEM"
            }
            ServiceLimit::TotalInProgressCopyBackups => "TOTAL_IN_PROGRESS_COPY_BACKUPS",
            ServiceLimit::TotalSsdIops => "TOTAL_SSD_IOPS",
            ServiceLimit::TotalStorage => "TOTAL_STORAGE",
            ServiceLimit::TotalThroughputCapacity => "TOTAL_THROUGHPUT_CAPACITY",
            ServiceLimit::TotalUserInitiatedBackups => "TOTAL_USER_INITIATED_BACKUPS",
            ServiceLimit::TotalUserTags => "TOTAL_USER_TAGS",
            ServiceLimit::VolumesPerFileSystem => "VOLUMES_PER_FILE_SYSTEM",
            ServiceLimit::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &[
            "FILE_CACHE_COUNT",
            "FILE_SYSTEM_COUNT",
            "STORAGE_VIRTUAL_MACHINES_PER_FILE_SYSTEM",
            "TOTAL_IN_PROGRESS_COPY_BACKUPS",
            "TOTAL_SSD_IOPS",
            "TOTAL_STORAGE",
            "TOTAL_THROUGHPUT_CAPACITY",
            "TOTAL_USER_INITIATED_BACKUPS",
            "TOTAL_USER_TAGS",
            "VOLUMES_PER_FILE_SYSTEM",
        ]
    }
}
impl AsRef<str> for ServiceLimit {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>The configuration updates for an Amazon FSx for OpenZFS file system.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct UpdateFileSystemOpenZfsConfiguration {
    /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
    #[doc(hidden)]
    pub automatic_backup_retention_days: std::option::Option<i32>,
    /// <p>A Boolean value indicating whether tags for the file system should be copied to backups. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is <code>true</code> and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.</p>
    #[doc(hidden)]
    pub copy_tags_to_backups: std::option::Option<bool>,
    /// <p>A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is <code>true</code> and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value.</p>
    #[doc(hidden)]
    pub copy_tags_to_volumes: std::option::Option<bool>,
    /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
    #[doc(hidden)]
    pub daily_automatic_backup_start_time: std::option::Option<std::string::String>,
    /// <p>The throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second
 (MB/s). Valid values depend on the DeploymentType you choose, as follows:</p>
    /// <ul>
    /// <li> <p>For <code>SINGLE_AZ_1</code>, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s.</p> </li>
    /// <li> <p>For <code>SINGLE_AZ_2</code>, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MB/s.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub throughput_capacity: std::option::Option<i32>,
    /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
    /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
    /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
    /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
    #[doc(hidden)]
    pub weekly_maintenance_start_time: std::option::Option<std::string::String>,
    /// <p>The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of the total number of provisioned SSD IOPS and how the amount was provisioned (by the customer or by the system).</p>
    #[doc(hidden)]
    pub disk_iops_configuration: std::option::Option<crate::model::DiskIopsConfiguration>,
}
impl UpdateFileSystemOpenZfsConfiguration {
    /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
    pub fn automatic_backup_retention_days(&self) -> std::option::Option<i32> {
        self.automatic_backup_retention_days
    }
    /// <p>A Boolean value indicating whether tags for the file system should be copied to backups. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is <code>true</code> and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.</p>
    pub fn copy_tags_to_backups(&self) -> std::option::Option<bool> {
        self.copy_tags_to_backups
    }
    /// <p>A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is <code>true</code> and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value.</p>
    pub fn copy_tags_to_volumes(&self) -> std::option::Option<bool> {
        self.copy_tags_to_volumes
    }
    /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
    pub fn daily_automatic_backup_start_time(&self) -> std::option::Option<&str> {
        self.daily_automatic_backup_start_time.as_deref()
    }
    /// <p>The throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second
 (MB/s). Valid values depend on the DeploymentType you choose, as follows:</p>
    /// <ul>
    /// <li> <p>For <code>SINGLE_AZ_1</code>, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s.</p> </li>
    /// <li> <p>For <code>SINGLE_AZ_2</code>, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MB/s.</p> </li>
    /// </ul>
    pub fn throughput_capacity(&self) -> std::option::Option<i32> {
        self.throughput_capacity
    }
    /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
    /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
    /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
    /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
    pub fn weekly_maintenance_start_time(&self) -> std::option::Option<&str> {
        self.weekly_maintenance_start_time.as_deref()
    }
    /// <p>The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of the total number of provisioned SSD IOPS and how the amount was provisioned (by the customer or by the system).</p>
    pub fn disk_iops_configuration(
        &self,
    ) -> std::option::Option<&crate::model::DiskIopsConfiguration> {
        self.disk_iops_configuration.as_ref()
    }
}
/// See [`UpdateFileSystemOpenZfsConfiguration`](crate::model::UpdateFileSystemOpenZfsConfiguration).
pub mod update_file_system_open_zfs_configuration {

    /// A builder for [`UpdateFileSystemOpenZfsConfiguration`](crate::model::UpdateFileSystemOpenZfsConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) automatic_backup_retention_days: std::option::Option<i32>,
        pub(crate) copy_tags_to_backups: std::option::Option<bool>,
        pub(crate) copy_tags_to_volumes: std::option::Option<bool>,
        pub(crate) daily_automatic_backup_start_time: std::option::Option<std::string::String>,
        pub(crate) throughput_capacity: std::option::Option<i32>,
        pub(crate) weekly_maintenance_start_time: std::option::Option<std::string::String>,
        pub(crate) disk_iops_configuration:
            std::option::Option<crate::model::DiskIopsConfiguration>,
    }
    impl Builder {
        /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
        pub fn automatic_backup_retention_days(mut self, input: i32) -> Self {
            self.automatic_backup_retention_days = Some(input);
            self
        }
        /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
        pub fn set_automatic_backup_retention_days(
            mut self,
            input: std::option::Option<i32>,
        ) -> Self {
            self.automatic_backup_retention_days = input;
            self
        }
        /// <p>A Boolean value indicating whether tags for the file system should be copied to backups. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is <code>true</code> and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.</p>
        pub fn copy_tags_to_backups(mut self, input: bool) -> Self {
            self.copy_tags_to_backups = Some(input);
            self
        }
        /// <p>A Boolean value indicating whether tags for the file system should be copied to backups. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is <code>true</code> and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.</p>
        pub fn set_copy_tags_to_backups(mut self, input: std::option::Option<bool>) -> Self {
            self.copy_tags_to_backups = input;
            self
        }
        /// <p>A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is <code>true</code> and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value.</p>
        pub fn copy_tags_to_volumes(mut self, input: bool) -> Self {
            self.copy_tags_to_volumes = Some(input);
            self
        }
        /// <p>A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is <code>true</code> and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value.</p>
        pub fn set_copy_tags_to_volumes(mut self, input: std::option::Option<bool>) -> Self {
            self.copy_tags_to_volumes = input;
            self
        }
        /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
        pub fn daily_automatic_backup_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.daily_automatic_backup_start_time = Some(input.into());
            self
        }
        /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
        pub fn set_daily_automatic_backup_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.daily_automatic_backup_start_time = input;
            self
        }
        /// <p>The throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second
 (MB/s). Valid values depend on the DeploymentType you choose, as follows:</p>
        /// <ul>
        /// <li> <p>For <code>SINGLE_AZ_1</code>, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s.</p> </li>
        /// <li> <p>For <code>SINGLE_AZ_2</code>, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MB/s.</p> </li>
        /// </ul>
        pub fn throughput_capacity(mut self, input: i32) -> Self {
            self.throughput_capacity = Some(input);
            self
        }
        /// <p>The throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second
 (MB/s). Valid values depend on the DeploymentType you choose, as follows:</p>
        /// <ul>
        /// <li> <p>For <code>SINGLE_AZ_1</code>, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s.</p> </li>
        /// <li> <p>For <code>SINGLE_AZ_2</code>, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MB/s.</p> </li>
        /// </ul>
        pub fn set_throughput_capacity(mut self, input: std::option::Option<i32>) -> Self {
            self.throughput_capacity = input;
            self
        }
        /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
        /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
        /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
        /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
        pub fn weekly_maintenance_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = Some(input.into());
            self
        }
        /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
        /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
        /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
        /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
        pub fn set_weekly_maintenance_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = input;
            self
        }
        /// <p>The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of the total number of provisioned SSD IOPS and how the amount was provisioned (by the customer or by the system).</p>
        pub fn disk_iops_configuration(
            mut self,
            input: crate::model::DiskIopsConfiguration,
        ) -> Self {
            self.disk_iops_configuration = Some(input);
            self
        }
        /// <p>The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of the total number of provisioned SSD IOPS and how the amount was provisioned (by the customer or by the system).</p>
        pub fn set_disk_iops_configuration(
            mut self,
            input: std::option::Option<crate::model::DiskIopsConfiguration>,
        ) -> Self {
            self.disk_iops_configuration = input;
            self
        }
        /// Consumes the builder and constructs a [`UpdateFileSystemOpenZfsConfiguration`](crate::model::UpdateFileSystemOpenZfsConfiguration).
        pub fn build(self) -> crate::model::UpdateFileSystemOpenZfsConfiguration {
            crate::model::UpdateFileSystemOpenZfsConfiguration {
                automatic_backup_retention_days: self.automatic_backup_retention_days,
                copy_tags_to_backups: self.copy_tags_to_backups,
                copy_tags_to_volumes: self.copy_tags_to_volumes,
                daily_automatic_backup_start_time: self.daily_automatic_backup_start_time,
                throughput_capacity: self.throughput_capacity,
                weekly_maintenance_start_time: self.weekly_maintenance_start_time,
                disk_iops_configuration: self.disk_iops_configuration,
            }
        }
    }
}
impl UpdateFileSystemOpenZfsConfiguration {
    /// Creates a new builder-style object to manufacture [`UpdateFileSystemOpenZfsConfiguration`](crate::model::UpdateFileSystemOpenZfsConfiguration).
    pub fn builder() -> crate::model::update_file_system_open_zfs_configuration::Builder {
        crate::model::update_file_system_open_zfs_configuration::Builder::default()
    }
}

/// <p>The configuration updates for an Amazon FSx for NetApp ONTAP file system.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct UpdateFileSystemOntapConfiguration {
    /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
    #[doc(hidden)]
    pub automatic_backup_retention_days: std::option::Option<i32>,
    /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
    #[doc(hidden)]
    pub daily_automatic_backup_start_time: std::option::Option<std::string::String>,
    /// <p>The ONTAP administrative password for the <code>fsxadmin</code> user.</p>
    #[doc(hidden)]
    pub fsx_admin_password: std::option::Option<std::string::String>,
    /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
    /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
    /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
    /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
    #[doc(hidden)]
    pub weekly_maintenance_start_time: std::option::Option<std::string::String>,
    /// <p>The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of an IOPS mode (<code>AUTOMATIC</code> or <code>USER_PROVISIONED</code>), and in the case of <code>USER_PROVISIONED</code> IOPS, the total number of SSD IOPS provisioned.</p>
    #[doc(hidden)]
    pub disk_iops_configuration: std::option::Option<crate::model::DiskIopsConfiguration>,
    /// <p>Specifies the throughput of an FSx for NetApp ONTAP file system, measured in megabytes per second (MBps). Valid values are 128, 256, 512, 1024, 2048, and 4096 MBps.</p>
    #[doc(hidden)]
    pub throughput_capacity: std::option::Option<i32>,
    /// <p>(Multi-AZ only) A list of IDs of new virtual private cloud (VPC) route tables to associate (add) with your Amazon FSx for NetApp ONTAP file system.</p>
    #[doc(hidden)]
    pub add_route_table_ids: std::option::Option<std::vec::Vec<std::string::String>>,
    /// <p>(Multi-AZ only) A list of IDs of existing virtual private cloud (VPC) route tables to disassociate (remove) from your Amazon FSx for NetApp ONTAP file system. You can use the API operation to retrieve the list of VPC route table IDs for a file system.</p>
    #[doc(hidden)]
    pub remove_route_table_ids: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl UpdateFileSystemOntapConfiguration {
    /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
    pub fn automatic_backup_retention_days(&self) -> std::option::Option<i32> {
        self.automatic_backup_retention_days
    }
    /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
    pub fn daily_automatic_backup_start_time(&self) -> std::option::Option<&str> {
        self.daily_automatic_backup_start_time.as_deref()
    }
    /// <p>The ONTAP administrative password for the <code>fsxadmin</code> user.</p>
    pub fn fsx_admin_password(&self) -> std::option::Option<&str> {
        self.fsx_admin_password.as_deref()
    }
    /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
    /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
    /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
    /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
    pub fn weekly_maintenance_start_time(&self) -> std::option::Option<&str> {
        self.weekly_maintenance_start_time.as_deref()
    }
    /// <p>The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of an IOPS mode (<code>AUTOMATIC</code> or <code>USER_PROVISIONED</code>), and in the case of <code>USER_PROVISIONED</code> IOPS, the total number of SSD IOPS provisioned.</p>
    pub fn disk_iops_configuration(
        &self,
    ) -> std::option::Option<&crate::model::DiskIopsConfiguration> {
        self.disk_iops_configuration.as_ref()
    }
    /// <p>Specifies the throughput of an FSx for NetApp ONTAP file system, measured in megabytes per second (MBps). Valid values are 128, 256, 512, 1024, 2048, and 4096 MBps.</p>
    pub fn throughput_capacity(&self) -> std::option::Option<i32> {
        self.throughput_capacity
    }
    /// <p>(Multi-AZ only) A list of IDs of new virtual private cloud (VPC) route tables to associate (add) with your Amazon FSx for NetApp ONTAP file system.</p>
    pub fn add_route_table_ids(&self) -> std::option::Option<&[std::string::String]> {
        self.add_route_table_ids.as_deref()
    }
    /// <p>(Multi-AZ only) A list of IDs of existing virtual private cloud (VPC) route tables to disassociate (remove) from your Amazon FSx for NetApp ONTAP file system. You can use the API operation to retrieve the list of VPC route table IDs for a file system.</p>
    pub fn remove_route_table_ids(&self) -> std::option::Option<&[std::string::String]> {
        self.remove_route_table_ids.as_deref()
    }
}
impl std::fmt::Debug for UpdateFileSystemOntapConfiguration {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        let mut formatter = f.debug_struct("UpdateFileSystemOntapConfiguration");
        formatter.field(
            "automatic_backup_retention_days",
            &self.automatic_backup_retention_days,
        );
        formatter.field(
            "daily_automatic_backup_start_time",
            &self.daily_automatic_backup_start_time,
        );
        formatter.field("fsx_admin_password", &"*** Sensitive Data Redacted ***");
        formatter.field(
            "weekly_maintenance_start_time",
            &self.weekly_maintenance_start_time,
        );
        formatter.field("disk_iops_configuration", &self.disk_iops_configuration);
        formatter.field("throughput_capacity", &self.throughput_capacity);
        formatter.field("add_route_table_ids", &self.add_route_table_ids);
        formatter.field("remove_route_table_ids", &self.remove_route_table_ids);
        formatter.finish()
    }
}
/// See [`UpdateFileSystemOntapConfiguration`](crate::model::UpdateFileSystemOntapConfiguration).
pub mod update_file_system_ontap_configuration {

    /// A builder for [`UpdateFileSystemOntapConfiguration`](crate::model::UpdateFileSystemOntapConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default)]
    pub struct Builder {
        pub(crate) automatic_backup_retention_days: std::option::Option<i32>,
        pub(crate) daily_automatic_backup_start_time: std::option::Option<std::string::String>,
        pub(crate) fsx_admin_password: std::option::Option<std::string::String>,
        pub(crate) weekly_maintenance_start_time: std::option::Option<std::string::String>,
        pub(crate) disk_iops_configuration:
            std::option::Option<crate::model::DiskIopsConfiguration>,
        pub(crate) throughput_capacity: std::option::Option<i32>,
        pub(crate) add_route_table_ids: std::option::Option<std::vec::Vec<std::string::String>>,
        pub(crate) remove_route_table_ids: std::option::Option<std::vec::Vec<std::string::String>>,
    }
    impl Builder {
        /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
        pub fn automatic_backup_retention_days(mut self, input: i32) -> Self {
            self.automatic_backup_retention_days = Some(input);
            self
        }
        /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
        pub fn set_automatic_backup_retention_days(
            mut self,
            input: std::option::Option<i32>,
        ) -> Self {
            self.automatic_backup_retention_days = input;
            self
        }
        /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
        pub fn daily_automatic_backup_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.daily_automatic_backup_start_time = Some(input.into());
            self
        }
        /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
        pub fn set_daily_automatic_backup_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.daily_automatic_backup_start_time = input;
            self
        }
        /// <p>The ONTAP administrative password for the <code>fsxadmin</code> user.</p>
        pub fn fsx_admin_password(mut self, input: impl Into<std::string::String>) -> Self {
            self.fsx_admin_password = Some(input.into());
            self
        }
        /// <p>The ONTAP administrative password for the <code>fsxadmin</code> user.</p>
        pub fn set_fsx_admin_password(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.fsx_admin_password = input;
            self
        }
        /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
        /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
        /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
        /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
        pub fn weekly_maintenance_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = Some(input.into());
            self
        }
        /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
        /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
        /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
        /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
        pub fn set_weekly_maintenance_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = input;
            self
        }
        /// <p>The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of an IOPS mode (<code>AUTOMATIC</code> or <code>USER_PROVISIONED</code>), and in the case of <code>USER_PROVISIONED</code> IOPS, the total number of SSD IOPS provisioned.</p>
        pub fn disk_iops_configuration(
            mut self,
            input: crate::model::DiskIopsConfiguration,
        ) -> Self {
            self.disk_iops_configuration = Some(input);
            self
        }
        /// <p>The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of an IOPS mode (<code>AUTOMATIC</code> or <code>USER_PROVISIONED</code>), and in the case of <code>USER_PROVISIONED</code> IOPS, the total number of SSD IOPS provisioned.</p>
        pub fn set_disk_iops_configuration(
            mut self,
            input: std::option::Option<crate::model::DiskIopsConfiguration>,
        ) -> Self {
            self.disk_iops_configuration = input;
            self
        }
        /// <p>Specifies the throughput of an FSx for NetApp ONTAP file system, measured in megabytes per second (MBps). Valid values are 128, 256, 512, 1024, 2048, and 4096 MBps.</p>
        pub fn throughput_capacity(mut self, input: i32) -> Self {
            self.throughput_capacity = Some(input);
            self
        }
        /// <p>Specifies the throughput of an FSx for NetApp ONTAP file system, measured in megabytes per second (MBps). Valid values are 128, 256, 512, 1024, 2048, and 4096 MBps.</p>
        pub fn set_throughput_capacity(mut self, input: std::option::Option<i32>) -> Self {
            self.throughput_capacity = input;
            self
        }
        /// Appends an item to `add_route_table_ids`.
        ///
        /// To override the contents of this collection use [`set_add_route_table_ids`](Self::set_add_route_table_ids).
        ///
        /// <p>(Multi-AZ only) A list of IDs of new virtual private cloud (VPC) route tables to associate (add) with your Amazon FSx for NetApp ONTAP file system.</p>
        pub fn add_route_table_ids(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.add_route_table_ids.unwrap_or_default();
            v.push(input.into());
            self.add_route_table_ids = Some(v);
            self
        }
        /// <p>(Multi-AZ only) A list of IDs of new virtual private cloud (VPC) route tables to associate (add) with your Amazon FSx for NetApp ONTAP file system.</p>
        pub fn set_add_route_table_ids(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.add_route_table_ids = input;
            self
        }
        /// Appends an item to `remove_route_table_ids`.
        ///
        /// To override the contents of this collection use [`set_remove_route_table_ids`](Self::set_remove_route_table_ids).
        ///
        /// <p>(Multi-AZ only) A list of IDs of existing virtual private cloud (VPC) route tables to disassociate (remove) from your Amazon FSx for NetApp ONTAP file system. You can use the API operation to retrieve the list of VPC route table IDs for a file system.</p>
        pub fn remove_route_table_ids(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.remove_route_table_ids.unwrap_or_default();
            v.push(input.into());
            self.remove_route_table_ids = Some(v);
            self
        }
        /// <p>(Multi-AZ only) A list of IDs of existing virtual private cloud (VPC) route tables to disassociate (remove) from your Amazon FSx for NetApp ONTAP file system. You can use the API operation to retrieve the list of VPC route table IDs for a file system.</p>
        pub fn set_remove_route_table_ids(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.remove_route_table_ids = input;
            self
        }
        /// Consumes the builder and constructs a [`UpdateFileSystemOntapConfiguration`](crate::model::UpdateFileSystemOntapConfiguration).
        pub fn build(self) -> crate::model::UpdateFileSystemOntapConfiguration {
            crate::model::UpdateFileSystemOntapConfiguration {
                automatic_backup_retention_days: self.automatic_backup_retention_days,
                daily_automatic_backup_start_time: self.daily_automatic_backup_start_time,
                fsx_admin_password: self.fsx_admin_password,
                weekly_maintenance_start_time: self.weekly_maintenance_start_time,
                disk_iops_configuration: self.disk_iops_configuration,
                throughput_capacity: self.throughput_capacity,
                add_route_table_ids: self.add_route_table_ids,
                remove_route_table_ids: self.remove_route_table_ids,
            }
        }
    }
    impl std::fmt::Debug for Builder {
        fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
            let mut formatter = f.debug_struct("Builder");
            formatter.field(
                "automatic_backup_retention_days",
                &self.automatic_backup_retention_days,
            );
            formatter.field(
                "daily_automatic_backup_start_time",
                &self.daily_automatic_backup_start_time,
            );
            formatter.field("fsx_admin_password", &"*** Sensitive Data Redacted ***");
            formatter.field(
                "weekly_maintenance_start_time",
                &self.weekly_maintenance_start_time,
            );
            formatter.field("disk_iops_configuration", &self.disk_iops_configuration);
            formatter.field("throughput_capacity", &self.throughput_capacity);
            formatter.field("add_route_table_ids", &self.add_route_table_ids);
            formatter.field("remove_route_table_ids", &self.remove_route_table_ids);
            formatter.finish()
        }
    }
}
impl UpdateFileSystemOntapConfiguration {
    /// Creates a new builder-style object to manufacture [`UpdateFileSystemOntapConfiguration`](crate::model::UpdateFileSystemOntapConfiguration).
    pub fn builder() -> crate::model::update_file_system_ontap_configuration::Builder {
        crate::model::update_file_system_ontap_configuration::Builder::default()
    }
}

/// <p>The configuration object for Amazon FSx for Lustre file systems used in the <code>UpdateFileSystem</code> operation.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct UpdateFileSystemLustreConfiguration {
    /// <p>(Optional) The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.</p>
    #[doc(hidden)]
    pub weekly_maintenance_start_time: std::option::Option<std::string::String>,
    /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
    #[doc(hidden)]
    pub daily_automatic_backup_start_time: std::option::Option<std::string::String>,
    /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
    #[doc(hidden)]
    pub automatic_backup_retention_days: std::option::Option<i32>,
    /// <p> (Optional) When you create your file system, your existing S3 objects appear as file and directory listings. Use this property to choose how Amazon FSx keeps your file and directory listing up to date as you add or modify objects in your linked S3 bucket. <code>AutoImportPolicy</code> can have the following values:</p>
    /// <ul>
    /// <li> <p> <code>NONE</code> - (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the linked S3 bucket when the file system is created. FSx does not update the file and directory listing for any new or changed objects after choosing this option.</p> </li>
    /// <li> <p> <code>NEW</code> - AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that do not currently exist in the FSx file system. </p> </li>
    /// <li> <p> <code>NEW_CHANGED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose this option.</p> </li>
    /// <li> <p> <code>NEW_CHANGED_DELETED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.</p> </li>
    /// </ul>
    /// <p>The <code>AutoImportPolicy</code> parameter is not supported for Lustre file systems with the <code>Persistent_2</code> deployment type. Instead, use to update a data repository association on your <code>Persistent_2</code> file system.</p>
    #[doc(hidden)]
    pub auto_import_policy: std::option::Option<crate::model::AutoImportPolicyType>,
    /// <p>Sets the data compression configuration for the file system. <code>DataCompressionType</code> can have the following values:</p>
    /// <ul>
    /// <li> <p> <code>NONE</code> - Data compression is turned off for the file system.</p> </li>
    /// <li> <p> <code>LZ4</code> - Data compression is turned on with the LZ4 algorithm.</p> </li>
    /// </ul>
    /// <p>If you don't use <code>DataCompressionType</code>, the file system retains its current data compression configuration.</p>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/data-compression.html">Lustre data compression</a>.</p>
    #[doc(hidden)]
    pub data_compression_type: std::option::Option<crate::model::DataCompressionType>,
    /// <p>The Lustre logging configuration used when updating an Amazon FSx for Lustre file system. When logging is enabled, Lustre logs error and warning events for data repositories associated with your file system to Amazon CloudWatch Logs.</p>
    #[doc(hidden)]
    pub log_configuration: std::option::Option<crate::model::LustreLogCreateConfiguration>,
    /// <p>The Lustre root squash configuration used when updating an Amazon FSx for Lustre file system. When enabled, root squash restricts root-level access from clients that try to access your file system as a root user.</p>
    #[doc(hidden)]
    pub root_squash_configuration: std::option::Option<crate::model::LustreRootSquashConfiguration>,
}
impl UpdateFileSystemLustreConfiguration {
    /// <p>(Optional) The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.</p>
    pub fn weekly_maintenance_start_time(&self) -> std::option::Option<&str> {
        self.weekly_maintenance_start_time.as_deref()
    }
    /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
    pub fn daily_automatic_backup_start_time(&self) -> std::option::Option<&str> {
        self.daily_automatic_backup_start_time.as_deref()
    }
    /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
    pub fn automatic_backup_retention_days(&self) -> std::option::Option<i32> {
        self.automatic_backup_retention_days
    }
    /// <p> (Optional) When you create your file system, your existing S3 objects appear as file and directory listings. Use this property to choose how Amazon FSx keeps your file and directory listing up to date as you add or modify objects in your linked S3 bucket. <code>AutoImportPolicy</code> can have the following values:</p>
    /// <ul>
    /// <li> <p> <code>NONE</code> - (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the linked S3 bucket when the file system is created. FSx does not update the file and directory listing for any new or changed objects after choosing this option.</p> </li>
    /// <li> <p> <code>NEW</code> - AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that do not currently exist in the FSx file system. </p> </li>
    /// <li> <p> <code>NEW_CHANGED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose this option.</p> </li>
    /// <li> <p> <code>NEW_CHANGED_DELETED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.</p> </li>
    /// </ul>
    /// <p>The <code>AutoImportPolicy</code> parameter is not supported for Lustre file systems with the <code>Persistent_2</code> deployment type. Instead, use to update a data repository association on your <code>Persistent_2</code> file system.</p>
    pub fn auto_import_policy(&self) -> std::option::Option<&crate::model::AutoImportPolicyType> {
        self.auto_import_policy.as_ref()
    }
    /// <p>Sets the data compression configuration for the file system. <code>DataCompressionType</code> can have the following values:</p>
    /// <ul>
    /// <li> <p> <code>NONE</code> - Data compression is turned off for the file system.</p> </li>
    /// <li> <p> <code>LZ4</code> - Data compression is turned on with the LZ4 algorithm.</p> </li>
    /// </ul>
    /// <p>If you don't use <code>DataCompressionType</code>, the file system retains its current data compression configuration.</p>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/data-compression.html">Lustre data compression</a>.</p>
    pub fn data_compression_type(&self) -> std::option::Option<&crate::model::DataCompressionType> {
        self.data_compression_type.as_ref()
    }
    /// <p>The Lustre logging configuration used when updating an Amazon FSx for Lustre file system. When logging is enabled, Lustre logs error and warning events for data repositories associated with your file system to Amazon CloudWatch Logs.</p>
    pub fn log_configuration(
        &self,
    ) -> std::option::Option<&crate::model::LustreLogCreateConfiguration> {
        self.log_configuration.as_ref()
    }
    /// <p>The Lustre root squash configuration used when updating an Amazon FSx for Lustre file system. When enabled, root squash restricts root-level access from clients that try to access your file system as a root user.</p>
    pub fn root_squash_configuration(
        &self,
    ) -> std::option::Option<&crate::model::LustreRootSquashConfiguration> {
        self.root_squash_configuration.as_ref()
    }
}
/// See [`UpdateFileSystemLustreConfiguration`](crate::model::UpdateFileSystemLustreConfiguration).
pub mod update_file_system_lustre_configuration {

    /// A builder for [`UpdateFileSystemLustreConfiguration`](crate::model::UpdateFileSystemLustreConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) weekly_maintenance_start_time: std::option::Option<std::string::String>,
        pub(crate) daily_automatic_backup_start_time: std::option::Option<std::string::String>,
        pub(crate) automatic_backup_retention_days: std::option::Option<i32>,
        pub(crate) auto_import_policy: std::option::Option<crate::model::AutoImportPolicyType>,
        pub(crate) data_compression_type: std::option::Option<crate::model::DataCompressionType>,
        pub(crate) log_configuration:
            std::option::Option<crate::model::LustreLogCreateConfiguration>,
        pub(crate) root_squash_configuration:
            std::option::Option<crate::model::LustreRootSquashConfiguration>,
    }
    impl Builder {
        /// <p>(Optional) The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.</p>
        pub fn weekly_maintenance_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = Some(input.into());
            self
        }
        /// <p>(Optional) The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.</p>
        pub fn set_weekly_maintenance_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = input;
            self
        }
        /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
        pub fn daily_automatic_backup_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.daily_automatic_backup_start_time = Some(input.into());
            self
        }
        /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
        pub fn set_daily_automatic_backup_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.daily_automatic_backup_start_time = input;
            self
        }
        /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
        pub fn automatic_backup_retention_days(mut self, input: i32) -> Self {
            self.automatic_backup_retention_days = Some(input);
            self
        }
        /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
        pub fn set_automatic_backup_retention_days(
            mut self,
            input: std::option::Option<i32>,
        ) -> Self {
            self.automatic_backup_retention_days = input;
            self
        }
        /// <p> (Optional) When you create your file system, your existing S3 objects appear as file and directory listings. Use this property to choose how Amazon FSx keeps your file and directory listing up to date as you add or modify objects in your linked S3 bucket. <code>AutoImportPolicy</code> can have the following values:</p>
        /// <ul>
        /// <li> <p> <code>NONE</code> - (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the linked S3 bucket when the file system is created. FSx does not update the file and directory listing for any new or changed objects after choosing this option.</p> </li>
        /// <li> <p> <code>NEW</code> - AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that do not currently exist in the FSx file system. </p> </li>
        /// <li> <p> <code>NEW_CHANGED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose this option.</p> </li>
        /// <li> <p> <code>NEW_CHANGED_DELETED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.</p> </li>
        /// </ul>
        /// <p>The <code>AutoImportPolicy</code> parameter is not supported for Lustre file systems with the <code>Persistent_2</code> deployment type. Instead, use to update a data repository association on your <code>Persistent_2</code> file system.</p>
        pub fn auto_import_policy(mut self, input: crate::model::AutoImportPolicyType) -> Self {
            self.auto_import_policy = Some(input);
            self
        }
        /// <p> (Optional) When you create your file system, your existing S3 objects appear as file and directory listings. Use this property to choose how Amazon FSx keeps your file and directory listing up to date as you add or modify objects in your linked S3 bucket. <code>AutoImportPolicy</code> can have the following values:</p>
        /// <ul>
        /// <li> <p> <code>NONE</code> - (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the linked S3 bucket when the file system is created. FSx does not update the file and directory listing for any new or changed objects after choosing this option.</p> </li>
        /// <li> <p> <code>NEW</code> - AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that do not currently exist in the FSx file system. </p> </li>
        /// <li> <p> <code>NEW_CHANGED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose this option.</p> </li>
        /// <li> <p> <code>NEW_CHANGED_DELETED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.</p> </li>
        /// </ul>
        /// <p>The <code>AutoImportPolicy</code> parameter is not supported for Lustre file systems with the <code>Persistent_2</code> deployment type. Instead, use to update a data repository association on your <code>Persistent_2</code> file system.</p>
        pub fn set_auto_import_policy(
            mut self,
            input: std::option::Option<crate::model::AutoImportPolicyType>,
        ) -> Self {
            self.auto_import_policy = input;
            self
        }
        /// <p>Sets the data compression configuration for the file system. <code>DataCompressionType</code> can have the following values:</p>
        /// <ul>
        /// <li> <p> <code>NONE</code> - Data compression is turned off for the file system.</p> </li>
        /// <li> <p> <code>LZ4</code> - Data compression is turned on with the LZ4 algorithm.</p> </li>
        /// </ul>
        /// <p>If you don't use <code>DataCompressionType</code>, the file system retains its current data compression configuration.</p>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/data-compression.html">Lustre data compression</a>.</p>
        pub fn data_compression_type(mut self, input: crate::model::DataCompressionType) -> Self {
            self.data_compression_type = Some(input);
            self
        }
        /// <p>Sets the data compression configuration for the file system. <code>DataCompressionType</code> can have the following values:</p>
        /// <ul>
        /// <li> <p> <code>NONE</code> - Data compression is turned off for the file system.</p> </li>
        /// <li> <p> <code>LZ4</code> - Data compression is turned on with the LZ4 algorithm.</p> </li>
        /// </ul>
        /// <p>If you don't use <code>DataCompressionType</code>, the file system retains its current data compression configuration.</p>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/data-compression.html">Lustre data compression</a>.</p>
        pub fn set_data_compression_type(
            mut self,
            input: std::option::Option<crate::model::DataCompressionType>,
        ) -> Self {
            self.data_compression_type = input;
            self
        }
        /// <p>The Lustre logging configuration used when updating an Amazon FSx for Lustre file system. When logging is enabled, Lustre logs error and warning events for data repositories associated with your file system to Amazon CloudWatch Logs.</p>
        pub fn log_configuration(
            mut self,
            input: crate::model::LustreLogCreateConfiguration,
        ) -> Self {
            self.log_configuration = Some(input);
            self
        }
        /// <p>The Lustre logging configuration used when updating an Amazon FSx for Lustre file system. When logging is enabled, Lustre logs error and warning events for data repositories associated with your file system to Amazon CloudWatch Logs.</p>
        pub fn set_log_configuration(
            mut self,
            input: std::option::Option<crate::model::LustreLogCreateConfiguration>,
        ) -> Self {
            self.log_configuration = input;
            self
        }
        /// <p>The Lustre root squash configuration used when updating an Amazon FSx for Lustre file system. When enabled, root squash restricts root-level access from clients that try to access your file system as a root user.</p>
        pub fn root_squash_configuration(
            mut self,
            input: crate::model::LustreRootSquashConfiguration,
        ) -> Self {
            self.root_squash_configuration = Some(input);
            self
        }
        /// <p>The Lustre root squash configuration used when updating an Amazon FSx for Lustre file system. When enabled, root squash restricts root-level access from clients that try to access your file system as a root user.</p>
        pub fn set_root_squash_configuration(
            mut self,
            input: std::option::Option<crate::model::LustreRootSquashConfiguration>,
        ) -> Self {
            self.root_squash_configuration = input;
            self
        }
        /// Consumes the builder and constructs a [`UpdateFileSystemLustreConfiguration`](crate::model::UpdateFileSystemLustreConfiguration).
        pub fn build(self) -> crate::model::UpdateFileSystemLustreConfiguration {
            crate::model::UpdateFileSystemLustreConfiguration {
                weekly_maintenance_start_time: self.weekly_maintenance_start_time,
                daily_automatic_backup_start_time: self.daily_automatic_backup_start_time,
                automatic_backup_retention_days: self.automatic_backup_retention_days,
                auto_import_policy: self.auto_import_policy,
                data_compression_type: self.data_compression_type,
                log_configuration: self.log_configuration,
                root_squash_configuration: self.root_squash_configuration,
            }
        }
    }
}
impl UpdateFileSystemLustreConfiguration {
    /// Creates a new builder-style object to manufacture [`UpdateFileSystemLustreConfiguration`](crate::model::UpdateFileSystemLustreConfiguration).
    pub fn builder() -> crate::model::update_file_system_lustre_configuration::Builder {
        crate::model::update_file_system_lustre_configuration::Builder::default()
    }
}

/// <p>The Lustre logging configuration used when creating or updating an Amazon FSx for Lustre file system. An Amazon File Cache is created with Lustre logging enabled by default, with a setting of <code>WARN_ERROR</code> for the logging events. which can't be changed.</p>
/// <p>Lustre logging writes the enabled logging events for your file system or cache to Amazon CloudWatch Logs.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct LustreLogCreateConfiguration {
    /// <p>Sets which data repository events are logged by Amazon FSx.</p>
    /// <ul>
    /// <li> <p> <code>WARN_ONLY</code> - only warning events are logged.</p> </li>
    /// <li> <p> <code>ERROR_ONLY</code> - only error events are logged.</p> </li>
    /// <li> <p> <code>WARN_ERROR</code> - both warning events and error events are logged.</p> </li>
    /// <li> <p> <code>DISABLED</code> - logging of data repository events is turned off.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub level: std::option::Option<crate::model::LustreAccessAuditLogLevel>,
    /// <p>The Amazon Resource Name (ARN) that specifies the destination of the logs.</p>
    /// <p>The destination can be any Amazon CloudWatch Logs log group ARN, with the following requirements:</p>
    /// <ul>
    /// <li> <p>The destination ARN that you provide must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.</p> </li>
    /// <li> <p>The name of the Amazon CloudWatch Logs log group must begin with the <code>/aws/fsx</code> prefix.</p> </li>
    /// <li> <p>If you do not provide a destination, Amazon FSx will create and use a log stream in the CloudWatch Logs <code>/aws/fsx/lustre</code> log group (for Amazon FSx for Lustre) or <code>/aws/fsx/filecache</code> (for Amazon File Cache).</p> </li>
    /// <li> <p>If <code>Destination</code> is provided and the resource does not exist, the request will fail with a <code>BadRequest</code> error.</p> </li>
    /// <li> <p>If <code>Level</code> is set to <code>DISABLED</code>, you cannot specify a destination in <code>Destination</code>.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub destination: std::option::Option<std::string::String>,
}
impl LustreLogCreateConfiguration {
    /// <p>Sets which data repository events are logged by Amazon FSx.</p>
    /// <ul>
    /// <li> <p> <code>WARN_ONLY</code> - only warning events are logged.</p> </li>
    /// <li> <p> <code>ERROR_ONLY</code> - only error events are logged.</p> </li>
    /// <li> <p> <code>WARN_ERROR</code> - both warning events and error events are logged.</p> </li>
    /// <li> <p> <code>DISABLED</code> - logging of data repository events is turned off.</p> </li>
    /// </ul>
    pub fn level(&self) -> std::option::Option<&crate::model::LustreAccessAuditLogLevel> {
        self.level.as_ref()
    }
    /// <p>The Amazon Resource Name (ARN) that specifies the destination of the logs.</p>
    /// <p>The destination can be any Amazon CloudWatch Logs log group ARN, with the following requirements:</p>
    /// <ul>
    /// <li> <p>The destination ARN that you provide must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.</p> </li>
    /// <li> <p>The name of the Amazon CloudWatch Logs log group must begin with the <code>/aws/fsx</code> prefix.</p> </li>
    /// <li> <p>If you do not provide a destination, Amazon FSx will create and use a log stream in the CloudWatch Logs <code>/aws/fsx/lustre</code> log group (for Amazon FSx for Lustre) or <code>/aws/fsx/filecache</code> (for Amazon File Cache).</p> </li>
    /// <li> <p>If <code>Destination</code> is provided and the resource does not exist, the request will fail with a <code>BadRequest</code> error.</p> </li>
    /// <li> <p>If <code>Level</code> is set to <code>DISABLED</code>, you cannot specify a destination in <code>Destination</code>.</p> </li>
    /// </ul>
    pub fn destination(&self) -> std::option::Option<&str> {
        self.destination.as_deref()
    }
}
/// See [`LustreLogCreateConfiguration`](crate::model::LustreLogCreateConfiguration).
pub mod lustre_log_create_configuration {

    /// A builder for [`LustreLogCreateConfiguration`](crate::model::LustreLogCreateConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) level: std::option::Option<crate::model::LustreAccessAuditLogLevel>,
        pub(crate) destination: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>Sets which data repository events are logged by Amazon FSx.</p>
        /// <ul>
        /// <li> <p> <code>WARN_ONLY</code> - only warning events are logged.</p> </li>
        /// <li> <p> <code>ERROR_ONLY</code> - only error events are logged.</p> </li>
        /// <li> <p> <code>WARN_ERROR</code> - both warning events and error events are logged.</p> </li>
        /// <li> <p> <code>DISABLED</code> - logging of data repository events is turned off.</p> </li>
        /// </ul>
        pub fn level(mut self, input: crate::model::LustreAccessAuditLogLevel) -> Self {
            self.level = Some(input);
            self
        }
        /// <p>Sets which data repository events are logged by Amazon FSx.</p>
        /// <ul>
        /// <li> <p> <code>WARN_ONLY</code> - only warning events are logged.</p> </li>
        /// <li> <p> <code>ERROR_ONLY</code> - only error events are logged.</p> </li>
        /// <li> <p> <code>WARN_ERROR</code> - both warning events and error events are logged.</p> </li>
        /// <li> <p> <code>DISABLED</code> - logging of data repository events is turned off.</p> </li>
        /// </ul>
        pub fn set_level(
            mut self,
            input: std::option::Option<crate::model::LustreAccessAuditLogLevel>,
        ) -> Self {
            self.level = input;
            self
        }
        /// <p>The Amazon Resource Name (ARN) that specifies the destination of the logs.</p>
        /// <p>The destination can be any Amazon CloudWatch Logs log group ARN, with the following requirements:</p>
        /// <ul>
        /// <li> <p>The destination ARN that you provide must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.</p> </li>
        /// <li> <p>The name of the Amazon CloudWatch Logs log group must begin with the <code>/aws/fsx</code> prefix.</p> </li>
        /// <li> <p>If you do not provide a destination, Amazon FSx will create and use a log stream in the CloudWatch Logs <code>/aws/fsx/lustre</code> log group (for Amazon FSx for Lustre) or <code>/aws/fsx/filecache</code> (for Amazon File Cache).</p> </li>
        /// <li> <p>If <code>Destination</code> is provided and the resource does not exist, the request will fail with a <code>BadRequest</code> error.</p> </li>
        /// <li> <p>If <code>Level</code> is set to <code>DISABLED</code>, you cannot specify a destination in <code>Destination</code>.</p> </li>
        /// </ul>
        pub fn destination(mut self, input: impl Into<std::string::String>) -> Self {
            self.destination = Some(input.into());
            self
        }
        /// <p>The Amazon Resource Name (ARN) that specifies the destination of the logs.</p>
        /// <p>The destination can be any Amazon CloudWatch Logs log group ARN, with the following requirements:</p>
        /// <ul>
        /// <li> <p>The destination ARN that you provide must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.</p> </li>
        /// <li> <p>The name of the Amazon CloudWatch Logs log group must begin with the <code>/aws/fsx</code> prefix.</p> </li>
        /// <li> <p>If you do not provide a destination, Amazon FSx will create and use a log stream in the CloudWatch Logs <code>/aws/fsx/lustre</code> log group (for Amazon FSx for Lustre) or <code>/aws/fsx/filecache</code> (for Amazon File Cache).</p> </li>
        /// <li> <p>If <code>Destination</code> is provided and the resource does not exist, the request will fail with a <code>BadRequest</code> error.</p> </li>
        /// <li> <p>If <code>Level</code> is set to <code>DISABLED</code>, you cannot specify a destination in <code>Destination</code>.</p> </li>
        /// </ul>
        pub fn set_destination(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.destination = input;
            self
        }
        /// Consumes the builder and constructs a [`LustreLogCreateConfiguration`](crate::model::LustreLogCreateConfiguration).
        pub fn build(self) -> crate::model::LustreLogCreateConfiguration {
            crate::model::LustreLogCreateConfiguration {
                level: self.level,
                destination: self.destination,
            }
        }
    }
}
impl LustreLogCreateConfiguration {
    /// Creates a new builder-style object to manufacture [`LustreLogCreateConfiguration`](crate::model::LustreLogCreateConfiguration).
    pub fn builder() -> crate::model::lustre_log_create_configuration::Builder {
        crate::model::lustre_log_create_configuration::Builder::default()
    }
}

/// <p>Updates the configuration for an existing Amazon FSx for Windows File Server file system. Amazon FSx only overwrites existing properties with non-null values provided in the request.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct UpdateFileSystemWindowsConfiguration {
    /// <p>The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. Where d is the weekday number, from 1 through 7, with 1 = Monday and 7 = Sunday.</p>
    #[doc(hidden)]
    pub weekly_maintenance_start_time: std::option::Option<std::string::String>,
    /// <p>The preferred time to start the daily automatic backup, in the UTC time zone, for example, <code>02:00</code> </p>
    #[doc(hidden)]
    pub daily_automatic_backup_start_time: std::option::Option<std::string::String>,
    /// <p>The number of days to retain automatic daily backups. Setting this to zero (0) disables automatic daily backups. You can retain automatic daily backups for a maximum of 90 days. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/using-backups.html#automatic-backups">Working with Automatic Daily Backups</a>.</p>
    #[doc(hidden)]
    pub automatic_backup_retention_days: std::option::Option<i32>,
    /// <p>Sets the target value for a file system's throughput capacity, in MB/s, that you are updating the file system to. Valid values are 8, 16, 32, 64, 128, 256, 512, 1024, 2048. You cannot make a throughput capacity update request if there is an existing throughput capacity update request in progress. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-throughput-capacity.html">Managing Throughput Capacity</a>.</p>
    #[doc(hidden)]
    pub throughput_capacity: std::option::Option<i32>,
    /// <p>The configuration Amazon FSx uses to join the Windows File Server instance to the self-managed Microsoft AD directory. You cannot make a self-managed Microsoft AD update request if there is an existing self-managed Microsoft AD update request in progress.</p>
    #[doc(hidden)]
    pub self_managed_active_directory_configuration:
        std::option::Option<crate::model::SelfManagedActiveDirectoryConfigurationUpdates>,
    /// <p>The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system..</p>
    #[doc(hidden)]
    pub audit_log_configuration:
        std::option::Option<crate::model::WindowsAuditLogCreateConfiguration>,
}
impl UpdateFileSystemWindowsConfiguration {
    /// <p>The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. Where d is the weekday number, from 1 through 7, with 1 = Monday and 7 = Sunday.</p>
    pub fn weekly_maintenance_start_time(&self) -> std::option::Option<&str> {
        self.weekly_maintenance_start_time.as_deref()
    }
    /// <p>The preferred time to start the daily automatic backup, in the UTC time zone, for example, <code>02:00</code> </p>
    pub fn daily_automatic_backup_start_time(&self) -> std::option::Option<&str> {
        self.daily_automatic_backup_start_time.as_deref()
    }
    /// <p>The number of days to retain automatic daily backups. Setting this to zero (0) disables automatic daily backups. You can retain automatic daily backups for a maximum of 90 days. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/using-backups.html#automatic-backups">Working with Automatic Daily Backups</a>.</p>
    pub fn automatic_backup_retention_days(&self) -> std::option::Option<i32> {
        self.automatic_backup_retention_days
    }
    /// <p>Sets the target value for a file system's throughput capacity, in MB/s, that you are updating the file system to. Valid values are 8, 16, 32, 64, 128, 256, 512, 1024, 2048. You cannot make a throughput capacity update request if there is an existing throughput capacity update request in progress. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-throughput-capacity.html">Managing Throughput Capacity</a>.</p>
    pub fn throughput_capacity(&self) -> std::option::Option<i32> {
        self.throughput_capacity
    }
    /// <p>The configuration Amazon FSx uses to join the Windows File Server instance to the self-managed Microsoft AD directory. You cannot make a self-managed Microsoft AD update request if there is an existing self-managed Microsoft AD update request in progress.</p>
    pub fn self_managed_active_directory_configuration(
        &self,
    ) -> std::option::Option<&crate::model::SelfManagedActiveDirectoryConfigurationUpdates> {
        self.self_managed_active_directory_configuration.as_ref()
    }
    /// <p>The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system..</p>
    pub fn audit_log_configuration(
        &self,
    ) -> std::option::Option<&crate::model::WindowsAuditLogCreateConfiguration> {
        self.audit_log_configuration.as_ref()
    }
}
/// See [`UpdateFileSystemWindowsConfiguration`](crate::model::UpdateFileSystemWindowsConfiguration).
pub mod update_file_system_windows_configuration {

    /// A builder for [`UpdateFileSystemWindowsConfiguration`](crate::model::UpdateFileSystemWindowsConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) weekly_maintenance_start_time: std::option::Option<std::string::String>,
        pub(crate) daily_automatic_backup_start_time: std::option::Option<std::string::String>,
        pub(crate) automatic_backup_retention_days: std::option::Option<i32>,
        pub(crate) throughput_capacity: std::option::Option<i32>,
        pub(crate) self_managed_active_directory_configuration:
            std::option::Option<crate::model::SelfManagedActiveDirectoryConfigurationUpdates>,
        pub(crate) audit_log_configuration:
            std::option::Option<crate::model::WindowsAuditLogCreateConfiguration>,
    }
    impl Builder {
        /// <p>The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. Where d is the weekday number, from 1 through 7, with 1 = Monday and 7 = Sunday.</p>
        pub fn weekly_maintenance_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = Some(input.into());
            self
        }
        /// <p>The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. Where d is the weekday number, from 1 through 7, with 1 = Monday and 7 = Sunday.</p>
        pub fn set_weekly_maintenance_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = input;
            self
        }
        /// <p>The preferred time to start the daily automatic backup, in the UTC time zone, for example, <code>02:00</code> </p>
        pub fn daily_automatic_backup_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.daily_automatic_backup_start_time = Some(input.into());
            self
        }
        /// <p>The preferred time to start the daily automatic backup, in the UTC time zone, for example, <code>02:00</code> </p>
        pub fn set_daily_automatic_backup_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.daily_automatic_backup_start_time = input;
            self
        }
        /// <p>The number of days to retain automatic daily backups. Setting this to zero (0) disables automatic daily backups. You can retain automatic daily backups for a maximum of 90 days. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/using-backups.html#automatic-backups">Working with Automatic Daily Backups</a>.</p>
        pub fn automatic_backup_retention_days(mut self, input: i32) -> Self {
            self.automatic_backup_retention_days = Some(input);
            self
        }
        /// <p>The number of days to retain automatic daily backups. Setting this to zero (0) disables automatic daily backups. You can retain automatic daily backups for a maximum of 90 days. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/using-backups.html#automatic-backups">Working with Automatic Daily Backups</a>.</p>
        pub fn set_automatic_backup_retention_days(
            mut self,
            input: std::option::Option<i32>,
        ) -> Self {
            self.automatic_backup_retention_days = input;
            self
        }
        /// <p>Sets the target value for a file system's throughput capacity, in MB/s, that you are updating the file system to. Valid values are 8, 16, 32, 64, 128, 256, 512, 1024, 2048. You cannot make a throughput capacity update request if there is an existing throughput capacity update request in progress. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-throughput-capacity.html">Managing Throughput Capacity</a>.</p>
        pub fn throughput_capacity(mut self, input: i32) -> Self {
            self.throughput_capacity = Some(input);
            self
        }
        /// <p>Sets the target value for a file system's throughput capacity, in MB/s, that you are updating the file system to. Valid values are 8, 16, 32, 64, 128, 256, 512, 1024, 2048. You cannot make a throughput capacity update request if there is an existing throughput capacity update request in progress. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-throughput-capacity.html">Managing Throughput Capacity</a>.</p>
        pub fn set_throughput_capacity(mut self, input: std::option::Option<i32>) -> Self {
            self.throughput_capacity = input;
            self
        }
        /// <p>The configuration Amazon FSx uses to join the Windows File Server instance to the self-managed Microsoft AD directory. You cannot make a self-managed Microsoft AD update request if there is an existing self-managed Microsoft AD update request in progress.</p>
        pub fn self_managed_active_directory_configuration(
            mut self,
            input: crate::model::SelfManagedActiveDirectoryConfigurationUpdates,
        ) -> Self {
            self.self_managed_active_directory_configuration = Some(input);
            self
        }
        /// <p>The configuration Amazon FSx uses to join the Windows File Server instance to the self-managed Microsoft AD directory. You cannot make a self-managed Microsoft AD update request if there is an existing self-managed Microsoft AD update request in progress.</p>
        pub fn set_self_managed_active_directory_configuration(
            mut self,
            input: std::option::Option<
                crate::model::SelfManagedActiveDirectoryConfigurationUpdates,
            >,
        ) -> Self {
            self.self_managed_active_directory_configuration = input;
            self
        }
        /// <p>The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system..</p>
        pub fn audit_log_configuration(
            mut self,
            input: crate::model::WindowsAuditLogCreateConfiguration,
        ) -> Self {
            self.audit_log_configuration = Some(input);
            self
        }
        /// <p>The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system..</p>
        pub fn set_audit_log_configuration(
            mut self,
            input: std::option::Option<crate::model::WindowsAuditLogCreateConfiguration>,
        ) -> Self {
            self.audit_log_configuration = input;
            self
        }
        /// Consumes the builder and constructs a [`UpdateFileSystemWindowsConfiguration`](crate::model::UpdateFileSystemWindowsConfiguration).
        pub fn build(self) -> crate::model::UpdateFileSystemWindowsConfiguration {
            crate::model::UpdateFileSystemWindowsConfiguration {
                weekly_maintenance_start_time: self.weekly_maintenance_start_time,
                daily_automatic_backup_start_time: self.daily_automatic_backup_start_time,
                automatic_backup_retention_days: self.automatic_backup_retention_days,
                throughput_capacity: self.throughput_capacity,
                self_managed_active_directory_configuration: self
                    .self_managed_active_directory_configuration,
                audit_log_configuration: self.audit_log_configuration,
            }
        }
    }
}
impl UpdateFileSystemWindowsConfiguration {
    /// Creates a new builder-style object to manufacture [`UpdateFileSystemWindowsConfiguration`](crate::model::UpdateFileSystemWindowsConfiguration).
    pub fn builder() -> crate::model::update_file_system_windows_configuration::Builder {
        crate::model::update_file_system_windows_configuration::Builder::default()
    }
}

/// <p>The Windows file access auditing configuration used when creating or updating an Amazon FSx for Windows File Server file system.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct WindowsAuditLogCreateConfiguration {
    /// <p>Sets which attempt type is logged by Amazon FSx for file and folder accesses.</p>
    /// <ul>
    /// <li> <p> <code>SUCCESS_ONLY</code> - only successful attempts to access files or folders are logged.</p> </li>
    /// <li> <p> <code>FAILURE_ONLY</code> - only failed attempts to access files or folders are logged.</p> </li>
    /// <li> <p> <code>SUCCESS_AND_FAILURE</code> - both successful attempts and failed attempts to access files or folders are logged.</p> </li>
    /// <li> <p> <code>DISABLED</code> - access auditing of files and folders is turned off.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub file_access_audit_log_level: std::option::Option<crate::model::WindowsAccessAuditLogLevel>,
    /// <p>Sets which attempt type is logged by Amazon FSx for file share accesses.</p>
    /// <ul>
    /// <li> <p> <code>SUCCESS_ONLY</code> - only successful attempts to access file shares are logged.</p> </li>
    /// <li> <p> <code>FAILURE_ONLY</code> - only failed attempts to access file shares are logged.</p> </li>
    /// <li> <p> <code>SUCCESS_AND_FAILURE</code> - both successful attempts and failed attempts to access file shares are logged.</p> </li>
    /// <li> <p> <code>DISABLED</code> - access auditing of file shares is turned off.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub file_share_access_audit_log_level:
        std::option::Option<crate::model::WindowsAccessAuditLogLevel>,
    /// <p>The Amazon Resource Name (ARN) that specifies the destination of the audit logs.</p>
    /// <p>The destination can be any Amazon CloudWatch Logs log group ARN or Amazon Kinesis Data Firehose delivery stream ARN, with the following requirements:</p>
    /// <ul>
    /// <li> <p>The destination ARN that you provide (either CloudWatch Logs log group or Kinesis Data Firehose delivery stream) must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.</p> </li>
    /// <li> <p>The name of the Amazon CloudWatch Logs log group must begin with the <code>/aws/fsx</code> prefix. The name of the Amazon Kinesis Data Firehouse delivery stream must begin with the <code>aws-fsx</code> prefix.</p> </li>
    /// <li> <p>If you do not provide a destination in <code>AuditLogDestination</code>, Amazon FSx will create and use a log stream in the CloudWatch Logs <code>/aws/fsx/windows</code> log group.</p> </li>
    /// <li> <p>If <code>AuditLogDestination</code> is provided and the resource does not exist, the request will fail with a <code>BadRequest</code> error.</p> </li>
    /// <li> <p>If <code>FileAccessAuditLogLevel</code> and <code>FileShareAccessAuditLogLevel</code> are both set to <code>DISABLED</code>, you cannot specify a destination in <code>AuditLogDestination</code>.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub audit_log_destination: std::option::Option<std::string::String>,
}
impl WindowsAuditLogCreateConfiguration {
    /// <p>Sets which attempt type is logged by Amazon FSx for file and folder accesses.</p>
    /// <ul>
    /// <li> <p> <code>SUCCESS_ONLY</code> - only successful attempts to access files or folders are logged.</p> </li>
    /// <li> <p> <code>FAILURE_ONLY</code> - only failed attempts to access files or folders are logged.</p> </li>
    /// <li> <p> <code>SUCCESS_AND_FAILURE</code> - both successful attempts and failed attempts to access files or folders are logged.</p> </li>
    /// <li> <p> <code>DISABLED</code> - access auditing of files and folders is turned off.</p> </li>
    /// </ul>
    pub fn file_access_audit_log_level(
        &self,
    ) -> std::option::Option<&crate::model::WindowsAccessAuditLogLevel> {
        self.file_access_audit_log_level.as_ref()
    }
    /// <p>Sets which attempt type is logged by Amazon FSx for file share accesses.</p>
    /// <ul>
    /// <li> <p> <code>SUCCESS_ONLY</code> - only successful attempts to access file shares are logged.</p> </li>
    /// <li> <p> <code>FAILURE_ONLY</code> - only failed attempts to access file shares are logged.</p> </li>
    /// <li> <p> <code>SUCCESS_AND_FAILURE</code> - both successful attempts and failed attempts to access file shares are logged.</p> </li>
    /// <li> <p> <code>DISABLED</code> - access auditing of file shares is turned off.</p> </li>
    /// </ul>
    pub fn file_share_access_audit_log_level(
        &self,
    ) -> std::option::Option<&crate::model::WindowsAccessAuditLogLevel> {
        self.file_share_access_audit_log_level.as_ref()
    }
    /// <p>The Amazon Resource Name (ARN) that specifies the destination of the audit logs.</p>
    /// <p>The destination can be any Amazon CloudWatch Logs log group ARN or Amazon Kinesis Data Firehose delivery stream ARN, with the following requirements:</p>
    /// <ul>
    /// <li> <p>The destination ARN that you provide (either CloudWatch Logs log group or Kinesis Data Firehose delivery stream) must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.</p> </li>
    /// <li> <p>The name of the Amazon CloudWatch Logs log group must begin with the <code>/aws/fsx</code> prefix. The name of the Amazon Kinesis Data Firehouse delivery stream must begin with the <code>aws-fsx</code> prefix.</p> </li>
    /// <li> <p>If you do not provide a destination in <code>AuditLogDestination</code>, Amazon FSx will create and use a log stream in the CloudWatch Logs <code>/aws/fsx/windows</code> log group.</p> </li>
    /// <li> <p>If <code>AuditLogDestination</code> is provided and the resource does not exist, the request will fail with a <code>BadRequest</code> error.</p> </li>
    /// <li> <p>If <code>FileAccessAuditLogLevel</code> and <code>FileShareAccessAuditLogLevel</code> are both set to <code>DISABLED</code>, you cannot specify a destination in <code>AuditLogDestination</code>.</p> </li>
    /// </ul>
    pub fn audit_log_destination(&self) -> std::option::Option<&str> {
        self.audit_log_destination.as_deref()
    }
}
/// See [`WindowsAuditLogCreateConfiguration`](crate::model::WindowsAuditLogCreateConfiguration).
pub mod windows_audit_log_create_configuration {

    /// A builder for [`WindowsAuditLogCreateConfiguration`](crate::model::WindowsAuditLogCreateConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) file_access_audit_log_level:
            std::option::Option<crate::model::WindowsAccessAuditLogLevel>,
        pub(crate) file_share_access_audit_log_level:
            std::option::Option<crate::model::WindowsAccessAuditLogLevel>,
        pub(crate) audit_log_destination: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>Sets which attempt type is logged by Amazon FSx for file and folder accesses.</p>
        /// <ul>
        /// <li> <p> <code>SUCCESS_ONLY</code> - only successful attempts to access files or folders are logged.</p> </li>
        /// <li> <p> <code>FAILURE_ONLY</code> - only failed attempts to access files or folders are logged.</p> </li>
        /// <li> <p> <code>SUCCESS_AND_FAILURE</code> - both successful attempts and failed attempts to access files or folders are logged.</p> </li>
        /// <li> <p> <code>DISABLED</code> - access auditing of files and folders is turned off.</p> </li>
        /// </ul>
        pub fn file_access_audit_log_level(
            mut self,
            input: crate::model::WindowsAccessAuditLogLevel,
        ) -> Self {
            self.file_access_audit_log_level = Some(input);
            self
        }
        /// <p>Sets which attempt type is logged by Amazon FSx for file and folder accesses.</p>
        /// <ul>
        /// <li> <p> <code>SUCCESS_ONLY</code> - only successful attempts to access files or folders are logged.</p> </li>
        /// <li> <p> <code>FAILURE_ONLY</code> - only failed attempts to access files or folders are logged.</p> </li>
        /// <li> <p> <code>SUCCESS_AND_FAILURE</code> - both successful attempts and failed attempts to access files or folders are logged.</p> </li>
        /// <li> <p> <code>DISABLED</code> - access auditing of files and folders is turned off.</p> </li>
        /// </ul>
        pub fn set_file_access_audit_log_level(
            mut self,
            input: std::option::Option<crate::model::WindowsAccessAuditLogLevel>,
        ) -> Self {
            self.file_access_audit_log_level = input;
            self
        }
        /// <p>Sets which attempt type is logged by Amazon FSx for file share accesses.</p>
        /// <ul>
        /// <li> <p> <code>SUCCESS_ONLY</code> - only successful attempts to access file shares are logged.</p> </li>
        /// <li> <p> <code>FAILURE_ONLY</code> - only failed attempts to access file shares are logged.</p> </li>
        /// <li> <p> <code>SUCCESS_AND_FAILURE</code> - both successful attempts and failed attempts to access file shares are logged.</p> </li>
        /// <li> <p> <code>DISABLED</code> - access auditing of file shares is turned off.</p> </li>
        /// </ul>
        pub fn file_share_access_audit_log_level(
            mut self,
            input: crate::model::WindowsAccessAuditLogLevel,
        ) -> Self {
            self.file_share_access_audit_log_level = Some(input);
            self
        }
        /// <p>Sets which attempt type is logged by Amazon FSx for file share accesses.</p>
        /// <ul>
        /// <li> <p> <code>SUCCESS_ONLY</code> - only successful attempts to access file shares are logged.</p> </li>
        /// <li> <p> <code>FAILURE_ONLY</code> - only failed attempts to access file shares are logged.</p> </li>
        /// <li> <p> <code>SUCCESS_AND_FAILURE</code> - both successful attempts and failed attempts to access file shares are logged.</p> </li>
        /// <li> <p> <code>DISABLED</code> - access auditing of file shares is turned off.</p> </li>
        /// </ul>
        pub fn set_file_share_access_audit_log_level(
            mut self,
            input: std::option::Option<crate::model::WindowsAccessAuditLogLevel>,
        ) -> Self {
            self.file_share_access_audit_log_level = input;
            self
        }
        /// <p>The Amazon Resource Name (ARN) that specifies the destination of the audit logs.</p>
        /// <p>The destination can be any Amazon CloudWatch Logs log group ARN or Amazon Kinesis Data Firehose delivery stream ARN, with the following requirements:</p>
        /// <ul>
        /// <li> <p>The destination ARN that you provide (either CloudWatch Logs log group or Kinesis Data Firehose delivery stream) must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.</p> </li>
        /// <li> <p>The name of the Amazon CloudWatch Logs log group must begin with the <code>/aws/fsx</code> prefix. The name of the Amazon Kinesis Data Firehouse delivery stream must begin with the <code>aws-fsx</code> prefix.</p> </li>
        /// <li> <p>If you do not provide a destination in <code>AuditLogDestination</code>, Amazon FSx will create and use a log stream in the CloudWatch Logs <code>/aws/fsx/windows</code> log group.</p> </li>
        /// <li> <p>If <code>AuditLogDestination</code> is provided and the resource does not exist, the request will fail with a <code>BadRequest</code> error.</p> </li>
        /// <li> <p>If <code>FileAccessAuditLogLevel</code> and <code>FileShareAccessAuditLogLevel</code> are both set to <code>DISABLED</code>, you cannot specify a destination in <code>AuditLogDestination</code>.</p> </li>
        /// </ul>
        pub fn audit_log_destination(mut self, input: impl Into<std::string::String>) -> Self {
            self.audit_log_destination = Some(input.into());
            self
        }
        /// <p>The Amazon Resource Name (ARN) that specifies the destination of the audit logs.</p>
        /// <p>The destination can be any Amazon CloudWatch Logs log group ARN or Amazon Kinesis Data Firehose delivery stream ARN, with the following requirements:</p>
        /// <ul>
        /// <li> <p>The destination ARN that you provide (either CloudWatch Logs log group or Kinesis Data Firehose delivery stream) must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.</p> </li>
        /// <li> <p>The name of the Amazon CloudWatch Logs log group must begin with the <code>/aws/fsx</code> prefix. The name of the Amazon Kinesis Data Firehouse delivery stream must begin with the <code>aws-fsx</code> prefix.</p> </li>
        /// <li> <p>If you do not provide a destination in <code>AuditLogDestination</code>, Amazon FSx will create and use a log stream in the CloudWatch Logs <code>/aws/fsx/windows</code> log group.</p> </li>
        /// <li> <p>If <code>AuditLogDestination</code> is provided and the resource does not exist, the request will fail with a <code>BadRequest</code> error.</p> </li>
        /// <li> <p>If <code>FileAccessAuditLogLevel</code> and <code>FileShareAccessAuditLogLevel</code> are both set to <code>DISABLED</code>, you cannot specify a destination in <code>AuditLogDestination</code>.</p> </li>
        /// </ul>
        pub fn set_audit_log_destination(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.audit_log_destination = input;
            self
        }
        /// Consumes the builder and constructs a [`WindowsAuditLogCreateConfiguration`](crate::model::WindowsAuditLogCreateConfiguration).
        pub fn build(self) -> crate::model::WindowsAuditLogCreateConfiguration {
            crate::model::WindowsAuditLogCreateConfiguration {
                file_access_audit_log_level: self.file_access_audit_log_level,
                file_share_access_audit_log_level: self.file_share_access_audit_log_level,
                audit_log_destination: self.audit_log_destination,
            }
        }
    }
}
impl WindowsAuditLogCreateConfiguration {
    /// Creates a new builder-style object to manufacture [`WindowsAuditLogCreateConfiguration`](crate::model::WindowsAuditLogCreateConfiguration).
    pub fn builder() -> crate::model::windows_audit_log_create_configuration::Builder {
        crate::model::windows_audit_log_create_configuration::Builder::default()
    }
}

/// <p>A description of a specific Amazon File Cache resource, which is a response object from the <code>DescribeFileCaches</code> operation.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct FileCache {
    /// <p>An Amazon Web Services account ID. This ID is a 12-digit number that you use to construct Amazon Resource Names (ARNs) for resources.</p>
    #[doc(hidden)]
    pub owner_id: std::option::Option<std::string::String>,
    /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
    #[doc(hidden)]
    pub creation_time: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>The system-generated, unique ID of the cache.</p>
    #[doc(hidden)]
    pub file_cache_id: std::option::Option<std::string::String>,
    /// <p>The type of cache, which must be <code>LUSTRE</code>.</p>
    #[doc(hidden)]
    pub file_cache_type: std::option::Option<crate::model::FileCacheType>,
    /// <p>The Lustre version of the cache, which must be <code>2.12</code>.</p>
    #[doc(hidden)]
    pub file_cache_type_version: std::option::Option<std::string::String>,
    /// <p>The lifecycle status of the cache. The following are the possible values and what they mean:</p>
    /// <ul>
    /// <li> <p> <code>AVAILABLE</code> - The cache is in a healthy state, and is reachable and available for use.</p> </li>
    /// <li> <p> <code>CREATING</code> - The new cache is being created.</p> </li>
    /// <li> <p> <code>DELETING</code> - An existing cache is being deleted.</p> </li>
    /// <li> <p> <code>UPDATING</code> - The cache is undergoing a customer-initiated update.</p> </li>
    /// <li> <p> <code>FAILED</code> - An existing cache has experienced an unrecoverable failure. When creating a new cache, the cache was unable to be created.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub lifecycle: std::option::Option<crate::model::FileCacheLifecycle>,
    /// <p>A structure providing details of any failures that occurred.</p>
    #[doc(hidden)]
    pub failure_details: std::option::Option<crate::model::FileCacheFailureDetails>,
    /// <p>The storage capacity of the cache in gibibytes (GiB).</p>
    #[doc(hidden)]
    pub storage_capacity: std::option::Option<i32>,
    /// <p>The ID of your virtual private cloud (VPC). For more information, see <a href="https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html">VPC and subnets</a> in the <i>Amazon VPC User Guide</i>.</p>
    #[doc(hidden)]
    pub vpc_id: std::option::Option<std::string::String>,
    /// <p>A list of subnet IDs that the cache will be accessible from. You can specify only one subnet ID in a call to the <code>CreateFileCache</code> operation.</p>
    #[doc(hidden)]
    pub subnet_ids: std::option::Option<std::vec::Vec<std::string::String>>,
    /// <p>A list of network interface IDs.</p>
    #[doc(hidden)]
    pub network_interface_ids: std::option::Option<std::vec::Vec<std::string::String>>,
    /// <p>The Domain Name System (DNS) name for the cache.</p>
    #[doc(hidden)]
    pub dns_name: std::option::Option<std::string::String>,
    /// <p>Specifies the ID of the Key Management Service (KMS) key to use for encrypting data on an Amazon File Cache. If a <code>KmsKeyId</code> isn't specified, the Amazon FSx-managed KMS key for your account is used. For more information, see <a href="https://docs.aws.amazon.com/kms/latest/APIReference/API_Encrypt.html">Encrypt</a> in the <i>Key Management Service API Reference</i>.</p>
    #[doc(hidden)]
    pub kms_key_id: std::option::Option<std::string::String>,
    /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
    #[doc(hidden)]
    pub resource_arn: std::option::Option<std::string::String>,
    /// <p>The configuration for the Amazon File Cache resource.</p>
    #[doc(hidden)]
    pub lustre_configuration: std::option::Option<crate::model::FileCacheLustreConfiguration>,
    /// <p>A list of IDs of data repository associations that are associated with this cache.</p>
    #[doc(hidden)]
    pub data_repository_association_ids: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl FileCache {
    /// <p>An Amazon Web Services account ID. This ID is a 12-digit number that you use to construct Amazon Resource Names (ARNs) for resources.</p>
    pub fn owner_id(&self) -> std::option::Option<&str> {
        self.owner_id.as_deref()
    }
    /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
    pub fn creation_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.creation_time.as_ref()
    }
    /// <p>The system-generated, unique ID of the cache.</p>
    pub fn file_cache_id(&self) -> std::option::Option<&str> {
        self.file_cache_id.as_deref()
    }
    /// <p>The type of cache, which must be <code>LUSTRE</code>.</p>
    pub fn file_cache_type(&self) -> std::option::Option<&crate::model::FileCacheType> {
        self.file_cache_type.as_ref()
    }
    /// <p>The Lustre version of the cache, which must be <code>2.12</code>.</p>
    pub fn file_cache_type_version(&self) -> std::option::Option<&str> {
        self.file_cache_type_version.as_deref()
    }
    /// <p>The lifecycle status of the cache. The following are the possible values and what they mean:</p>
    /// <ul>
    /// <li> <p> <code>AVAILABLE</code> - The cache is in a healthy state, and is reachable and available for use.</p> </li>
    /// <li> <p> <code>CREATING</code> - The new cache is being created.</p> </li>
    /// <li> <p> <code>DELETING</code> - An existing cache is being deleted.</p> </li>
    /// <li> <p> <code>UPDATING</code> - The cache is undergoing a customer-initiated update.</p> </li>
    /// <li> <p> <code>FAILED</code> - An existing cache has experienced an unrecoverable failure. When creating a new cache, the cache was unable to be created.</p> </li>
    /// </ul>
    pub fn lifecycle(&self) -> std::option::Option<&crate::model::FileCacheLifecycle> {
        self.lifecycle.as_ref()
    }
    /// <p>A structure providing details of any failures that occurred.</p>
    pub fn failure_details(&self) -> std::option::Option<&crate::model::FileCacheFailureDetails> {
        self.failure_details.as_ref()
    }
    /// <p>The storage capacity of the cache in gibibytes (GiB).</p>
    pub fn storage_capacity(&self) -> std::option::Option<i32> {
        self.storage_capacity
    }
    /// <p>The ID of your virtual private cloud (VPC). For more information, see <a href="https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html">VPC and subnets</a> in the <i>Amazon VPC User Guide</i>.</p>
    pub fn vpc_id(&self) -> std::option::Option<&str> {
        self.vpc_id.as_deref()
    }
    /// <p>A list of subnet IDs that the cache will be accessible from. You can specify only one subnet ID in a call to the <code>CreateFileCache</code> operation.</p>
    pub fn subnet_ids(&self) -> std::option::Option<&[std::string::String]> {
        self.subnet_ids.as_deref()
    }
    /// <p>A list of network interface IDs.</p>
    pub fn network_interface_ids(&self) -> std::option::Option<&[std::string::String]> {
        self.network_interface_ids.as_deref()
    }
    /// <p>The Domain Name System (DNS) name for the cache.</p>
    pub fn dns_name(&self) -> std::option::Option<&str> {
        self.dns_name.as_deref()
    }
    /// <p>Specifies the ID of the Key Management Service (KMS) key to use for encrypting data on an Amazon File Cache. If a <code>KmsKeyId</code> isn't specified, the Amazon FSx-managed KMS key for your account is used. For more information, see <a href="https://docs.aws.amazon.com/kms/latest/APIReference/API_Encrypt.html">Encrypt</a> in the <i>Key Management Service API Reference</i>.</p>
    pub fn kms_key_id(&self) -> std::option::Option<&str> {
        self.kms_key_id.as_deref()
    }
    /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
    pub fn resource_arn(&self) -> std::option::Option<&str> {
        self.resource_arn.as_deref()
    }
    /// <p>The configuration for the Amazon File Cache resource.</p>
    pub fn lustre_configuration(
        &self,
    ) -> std::option::Option<&crate::model::FileCacheLustreConfiguration> {
        self.lustre_configuration.as_ref()
    }
    /// <p>A list of IDs of data repository associations that are associated with this cache.</p>
    pub fn data_repository_association_ids(&self) -> std::option::Option<&[std::string::String]> {
        self.data_repository_association_ids.as_deref()
    }
}
/// See [`FileCache`](crate::model::FileCache).
pub mod file_cache {

    /// A builder for [`FileCache`](crate::model::FileCache).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) owner_id: std::option::Option<std::string::String>,
        pub(crate) creation_time: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) file_cache_id: std::option::Option<std::string::String>,
        pub(crate) file_cache_type: std::option::Option<crate::model::FileCacheType>,
        pub(crate) file_cache_type_version: std::option::Option<std::string::String>,
        pub(crate) lifecycle: std::option::Option<crate::model::FileCacheLifecycle>,
        pub(crate) failure_details: std::option::Option<crate::model::FileCacheFailureDetails>,
        pub(crate) storage_capacity: std::option::Option<i32>,
        pub(crate) vpc_id: std::option::Option<std::string::String>,
        pub(crate) subnet_ids: std::option::Option<std::vec::Vec<std::string::String>>,
        pub(crate) network_interface_ids: std::option::Option<std::vec::Vec<std::string::String>>,
        pub(crate) dns_name: std::option::Option<std::string::String>,
        pub(crate) kms_key_id: std::option::Option<std::string::String>,
        pub(crate) resource_arn: std::option::Option<std::string::String>,
        pub(crate) lustre_configuration:
            std::option::Option<crate::model::FileCacheLustreConfiguration>,
        pub(crate) data_repository_association_ids:
            std::option::Option<std::vec::Vec<std::string::String>>,
    }
    impl Builder {
        /// <p>An Amazon Web Services account ID. This ID is a 12-digit number that you use to construct Amazon Resource Names (ARNs) for resources.</p>
        pub fn owner_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.owner_id = Some(input.into());
            self
        }
        /// <p>An Amazon Web Services account ID. This ID is a 12-digit number that you use to construct Amazon Resource Names (ARNs) for resources.</p>
        pub fn set_owner_id(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.owner_id = input;
            self
        }
        /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
        pub fn creation_time(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.creation_time = Some(input);
            self
        }
        /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
        pub fn set_creation_time(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.creation_time = input;
            self
        }
        /// <p>The system-generated, unique ID of the cache.</p>
        pub fn file_cache_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.file_cache_id = Some(input.into());
            self
        }
        /// <p>The system-generated, unique ID of the cache.</p>
        pub fn set_file_cache_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.file_cache_id = input;
            self
        }
        /// <p>The type of cache, which must be <code>LUSTRE</code>.</p>
        pub fn file_cache_type(mut self, input: crate::model::FileCacheType) -> Self {
            self.file_cache_type = Some(input);
            self
        }
        /// <p>The type of cache, which must be <code>LUSTRE</code>.</p>
        pub fn set_file_cache_type(
            mut self,
            input: std::option::Option<crate::model::FileCacheType>,
        ) -> Self {
            self.file_cache_type = input;
            self
        }
        /// <p>The Lustre version of the cache, which must be <code>2.12</code>.</p>
        pub fn file_cache_type_version(mut self, input: impl Into<std::string::String>) -> Self {
            self.file_cache_type_version = Some(input.into());
            self
        }
        /// <p>The Lustre version of the cache, which must be <code>2.12</code>.</p>
        pub fn set_file_cache_type_version(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.file_cache_type_version = input;
            self
        }
        /// <p>The lifecycle status of the cache. The following are the possible values and what they mean:</p>
        /// <ul>
        /// <li> <p> <code>AVAILABLE</code> - The cache is in a healthy state, and is reachable and available for use.</p> </li>
        /// <li> <p> <code>CREATING</code> - The new cache is being created.</p> </li>
        /// <li> <p> <code>DELETING</code> - An existing cache is being deleted.</p> </li>
        /// <li> <p> <code>UPDATING</code> - The cache is undergoing a customer-initiated update.</p> </li>
        /// <li> <p> <code>FAILED</code> - An existing cache has experienced an unrecoverable failure. When creating a new cache, the cache was unable to be created.</p> </li>
        /// </ul>
        pub fn lifecycle(mut self, input: crate::model::FileCacheLifecycle) -> Self {
            self.lifecycle = Some(input);
            self
        }
        /// <p>The lifecycle status of the cache. The following are the possible values and what they mean:</p>
        /// <ul>
        /// <li> <p> <code>AVAILABLE</code> - The cache is in a healthy state, and is reachable and available for use.</p> </li>
        /// <li> <p> <code>CREATING</code> - The new cache is being created.</p> </li>
        /// <li> <p> <code>DELETING</code> - An existing cache is being deleted.</p> </li>
        /// <li> <p> <code>UPDATING</code> - The cache is undergoing a customer-initiated update.</p> </li>
        /// <li> <p> <code>FAILED</code> - An existing cache has experienced an unrecoverable failure. When creating a new cache, the cache was unable to be created.</p> </li>
        /// </ul>
        pub fn set_lifecycle(
            mut self,
            input: std::option::Option<crate::model::FileCacheLifecycle>,
        ) -> Self {
            self.lifecycle = input;
            self
        }
        /// <p>A structure providing details of any failures that occurred.</p>
        pub fn failure_details(mut self, input: crate::model::FileCacheFailureDetails) -> Self {
            self.failure_details = Some(input);
            self
        }
        /// <p>A structure providing details of any failures that occurred.</p>
        pub fn set_failure_details(
            mut self,
            input: std::option::Option<crate::model::FileCacheFailureDetails>,
        ) -> Self {
            self.failure_details = input;
            self
        }
        /// <p>The storage capacity of the cache in gibibytes (GiB).</p>
        pub fn storage_capacity(mut self, input: i32) -> Self {
            self.storage_capacity = Some(input);
            self
        }
        /// <p>The storage capacity of the cache in gibibytes (GiB).</p>
        pub fn set_storage_capacity(mut self, input: std::option::Option<i32>) -> Self {
            self.storage_capacity = input;
            self
        }
        /// <p>The ID of your virtual private cloud (VPC). For more information, see <a href="https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html">VPC and subnets</a> in the <i>Amazon VPC User Guide</i>.</p>
        pub fn vpc_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.vpc_id = Some(input.into());
            self
        }
        /// <p>The ID of your virtual private cloud (VPC). For more information, see <a href="https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html">VPC and subnets</a> in the <i>Amazon VPC User Guide</i>.</p>
        pub fn set_vpc_id(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.vpc_id = input;
            self
        }
        /// Appends an item to `subnet_ids`.
        ///
        /// To override the contents of this collection use [`set_subnet_ids`](Self::set_subnet_ids).
        ///
        /// <p>A list of subnet IDs that the cache will be accessible from. You can specify only one subnet ID in a call to the <code>CreateFileCache</code> operation.</p>
        pub fn subnet_ids(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.subnet_ids.unwrap_or_default();
            v.push(input.into());
            self.subnet_ids = Some(v);
            self
        }
        /// <p>A list of subnet IDs that the cache will be accessible from. You can specify only one subnet ID in a call to the <code>CreateFileCache</code> operation.</p>
        pub fn set_subnet_ids(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.subnet_ids = input;
            self
        }
        /// Appends an item to `network_interface_ids`.
        ///
        /// To override the contents of this collection use [`set_network_interface_ids`](Self::set_network_interface_ids).
        ///
        /// <p>A list of network interface IDs.</p>
        pub fn network_interface_ids(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.network_interface_ids.unwrap_or_default();
            v.push(input.into());
            self.network_interface_ids = Some(v);
            self
        }
        /// <p>A list of network interface IDs.</p>
        pub fn set_network_interface_ids(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.network_interface_ids = input;
            self
        }
        /// <p>The Domain Name System (DNS) name for the cache.</p>
        pub fn dns_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.dns_name = Some(input.into());
            self
        }
        /// <p>The Domain Name System (DNS) name for the cache.</p>
        pub fn set_dns_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.dns_name = input;
            self
        }
        /// <p>Specifies the ID of the Key Management Service (KMS) key to use for encrypting data on an Amazon File Cache. If a <code>KmsKeyId</code> isn't specified, the Amazon FSx-managed KMS key for your account is used. For more information, see <a href="https://docs.aws.amazon.com/kms/latest/APIReference/API_Encrypt.html">Encrypt</a> in the <i>Key Management Service API Reference</i>.</p>
        pub fn kms_key_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.kms_key_id = Some(input.into());
            self
        }
        /// <p>Specifies the ID of the Key Management Service (KMS) key to use for encrypting data on an Amazon File Cache. If a <code>KmsKeyId</code> isn't specified, the Amazon FSx-managed KMS key for your account is used. For more information, see <a href="https://docs.aws.amazon.com/kms/latest/APIReference/API_Encrypt.html">Encrypt</a> in the <i>Key Management Service API Reference</i>.</p>
        pub fn set_kms_key_id(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.kms_key_id = input;
            self
        }
        /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
        pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
            self.resource_arn = Some(input.into());
            self
        }
        /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
        pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.resource_arn = input;
            self
        }
        /// <p>The configuration for the Amazon File Cache resource.</p>
        pub fn lustre_configuration(
            mut self,
            input: crate::model::FileCacheLustreConfiguration,
        ) -> Self {
            self.lustre_configuration = Some(input);
            self
        }
        /// <p>The configuration for the Amazon File Cache resource.</p>
        pub fn set_lustre_configuration(
            mut self,
            input: std::option::Option<crate::model::FileCacheLustreConfiguration>,
        ) -> Self {
            self.lustre_configuration = input;
            self
        }
        /// Appends an item to `data_repository_association_ids`.
        ///
        /// To override the contents of this collection use [`set_data_repository_association_ids`](Self::set_data_repository_association_ids).
        ///
        /// <p>A list of IDs of data repository associations that are associated with this cache.</p>
        pub fn data_repository_association_ids(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            let mut v = self.data_repository_association_ids.unwrap_or_default();
            v.push(input.into());
            self.data_repository_association_ids = Some(v);
            self
        }
        /// <p>A list of IDs of data repository associations that are associated with this cache.</p>
        pub fn set_data_repository_association_ids(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.data_repository_association_ids = input;
            self
        }
        /// Consumes the builder and constructs a [`FileCache`](crate::model::FileCache).
        pub fn build(self) -> crate::model::FileCache {
            crate::model::FileCache {
                owner_id: self.owner_id,
                creation_time: self.creation_time,
                file_cache_id: self.file_cache_id,
                file_cache_type: self.file_cache_type,
                file_cache_type_version: self.file_cache_type_version,
                lifecycle: self.lifecycle,
                failure_details: self.failure_details,
                storage_capacity: self.storage_capacity,
                vpc_id: self.vpc_id,
                subnet_ids: self.subnet_ids,
                network_interface_ids: self.network_interface_ids,
                dns_name: self.dns_name,
                kms_key_id: self.kms_key_id,
                resource_arn: self.resource_arn,
                lustre_configuration: self.lustre_configuration,
                data_repository_association_ids: self.data_repository_association_ids,
            }
        }
    }
}
impl FileCache {
    /// Creates a new builder-style object to manufacture [`FileCache`](crate::model::FileCache).
    pub fn builder() -> crate::model::file_cache::Builder {
        crate::model::file_cache::Builder::default()
    }
}

/// <p>The configuration for the Amazon File Cache resource.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct FileCacheLustreConfiguration {
    /// <p>Per unit storage throughput represents the megabytes per second of read or write throughput per 1 tebibyte of storage provisioned. Cache throughput capacity is equal to Storage capacity (TiB) * PerUnitStorageThroughput (MB/s/TiB). The only supported value is <code>1000</code>.</p>
    #[doc(hidden)]
    pub per_unit_storage_throughput: std::option::Option<i32>,
    /// <p>The deployment type of the Amazon File Cache resource, which must be <code>CACHE_1</code>.</p>
    #[doc(hidden)]
    pub deployment_type: std::option::Option<crate::model::FileCacheLustreDeploymentType>,
    /// <p>You use the <code>MountName</code> value when mounting the cache. If you pass a cache ID to the <code>DescribeFileCaches</code> operation, it returns the the <code>MountName</code> value as part of the cache's description.</p>
    #[doc(hidden)]
    pub mount_name: std::option::Option<std::string::String>,
    /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
    /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
    /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
    /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
    #[doc(hidden)]
    pub weekly_maintenance_start_time: std::option::Option<std::string::String>,
    /// <p>The configuration for a Lustre MDT (Metadata Target) storage volume.</p>
    #[doc(hidden)]
    pub metadata_configuration:
        std::option::Option<crate::model::FileCacheLustreMetadataConfiguration>,
    /// <p>The configuration for Lustre logging used to write the enabled logging events for your Amazon File Cache resource to Amazon CloudWatch Logs.</p>
    #[doc(hidden)]
    pub log_configuration: std::option::Option<crate::model::LustreLogConfiguration>,
}
impl FileCacheLustreConfiguration {
    /// <p>Per unit storage throughput represents the megabytes per second of read or write throughput per 1 tebibyte of storage provisioned. Cache throughput capacity is equal to Storage capacity (TiB) * PerUnitStorageThroughput (MB/s/TiB). The only supported value is <code>1000</code>.</p>
    pub fn per_unit_storage_throughput(&self) -> std::option::Option<i32> {
        self.per_unit_storage_throughput
    }
    /// <p>The deployment type of the Amazon File Cache resource, which must be <code>CACHE_1</code>.</p>
    pub fn deployment_type(
        &self,
    ) -> std::option::Option<&crate::model::FileCacheLustreDeploymentType> {
        self.deployment_type.as_ref()
    }
    /// <p>You use the <code>MountName</code> value when mounting the cache. If you pass a cache ID to the <code>DescribeFileCaches</code> operation, it returns the the <code>MountName</code> value as part of the cache's description.</p>
    pub fn mount_name(&self) -> std::option::Option<&str> {
        self.mount_name.as_deref()
    }
    /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
    /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
    /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
    /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
    pub fn weekly_maintenance_start_time(&self) -> std::option::Option<&str> {
        self.weekly_maintenance_start_time.as_deref()
    }
    /// <p>The configuration for a Lustre MDT (Metadata Target) storage volume.</p>
    pub fn metadata_configuration(
        &self,
    ) -> std::option::Option<&crate::model::FileCacheLustreMetadataConfiguration> {
        self.metadata_configuration.as_ref()
    }
    /// <p>The configuration for Lustre logging used to write the enabled logging events for your Amazon File Cache resource to Amazon CloudWatch Logs.</p>
    pub fn log_configuration(&self) -> std::option::Option<&crate::model::LustreLogConfiguration> {
        self.log_configuration.as_ref()
    }
}
/// See [`FileCacheLustreConfiguration`](crate::model::FileCacheLustreConfiguration).
pub mod file_cache_lustre_configuration {

    /// A builder for [`FileCacheLustreConfiguration`](crate::model::FileCacheLustreConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) per_unit_storage_throughput: std::option::Option<i32>,
        pub(crate) deployment_type:
            std::option::Option<crate::model::FileCacheLustreDeploymentType>,
        pub(crate) mount_name: std::option::Option<std::string::String>,
        pub(crate) weekly_maintenance_start_time: std::option::Option<std::string::String>,
        pub(crate) metadata_configuration:
            std::option::Option<crate::model::FileCacheLustreMetadataConfiguration>,
        pub(crate) log_configuration: std::option::Option<crate::model::LustreLogConfiguration>,
    }
    impl Builder {
        /// <p>Per unit storage throughput represents the megabytes per second of read or write throughput per 1 tebibyte of storage provisioned. Cache throughput capacity is equal to Storage capacity (TiB) * PerUnitStorageThroughput (MB/s/TiB). The only supported value is <code>1000</code>.</p>
        pub fn per_unit_storage_throughput(mut self, input: i32) -> Self {
            self.per_unit_storage_throughput = Some(input);
            self
        }
        /// <p>Per unit storage throughput represents the megabytes per second of read or write throughput per 1 tebibyte of storage provisioned. Cache throughput capacity is equal to Storage capacity (TiB) * PerUnitStorageThroughput (MB/s/TiB). The only supported value is <code>1000</code>.</p>
        pub fn set_per_unit_storage_throughput(mut self, input: std::option::Option<i32>) -> Self {
            self.per_unit_storage_throughput = input;
            self
        }
        /// <p>The deployment type of the Amazon File Cache resource, which must be <code>CACHE_1</code>.</p>
        pub fn deployment_type(
            mut self,
            input: crate::model::FileCacheLustreDeploymentType,
        ) -> Self {
            self.deployment_type = Some(input);
            self
        }
        /// <p>The deployment type of the Amazon File Cache resource, which must be <code>CACHE_1</code>.</p>
        pub fn set_deployment_type(
            mut self,
            input: std::option::Option<crate::model::FileCacheLustreDeploymentType>,
        ) -> Self {
            self.deployment_type = input;
            self
        }
        /// <p>You use the <code>MountName</code> value when mounting the cache. If you pass a cache ID to the <code>DescribeFileCaches</code> operation, it returns the the <code>MountName</code> value as part of the cache's description.</p>
        pub fn mount_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.mount_name = Some(input.into());
            self
        }
        /// <p>You use the <code>MountName</code> value when mounting the cache. If you pass a cache ID to the <code>DescribeFileCaches</code> operation, it returns the the <code>MountName</code> value as part of the cache's description.</p>
        pub fn set_mount_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.mount_name = input;
            self
        }
        /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
        /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
        /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
        /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
        pub fn weekly_maintenance_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = Some(input.into());
            self
        }
        /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
        /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
        /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
        /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
        pub fn set_weekly_maintenance_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = input;
            self
        }
        /// <p>The configuration for a Lustre MDT (Metadata Target) storage volume.</p>
        pub fn metadata_configuration(
            mut self,
            input: crate::model::FileCacheLustreMetadataConfiguration,
        ) -> Self {
            self.metadata_configuration = Some(input);
            self
        }
        /// <p>The configuration for a Lustre MDT (Metadata Target) storage volume.</p>
        pub fn set_metadata_configuration(
            mut self,
            input: std::option::Option<crate::model::FileCacheLustreMetadataConfiguration>,
        ) -> Self {
            self.metadata_configuration = input;
            self
        }
        /// <p>The configuration for Lustre logging used to write the enabled logging events for your Amazon File Cache resource to Amazon CloudWatch Logs.</p>
        pub fn log_configuration(mut self, input: crate::model::LustreLogConfiguration) -> Self {
            self.log_configuration = Some(input);
            self
        }
        /// <p>The configuration for Lustre logging used to write the enabled logging events for your Amazon File Cache resource to Amazon CloudWatch Logs.</p>
        pub fn set_log_configuration(
            mut self,
            input: std::option::Option<crate::model::LustreLogConfiguration>,
        ) -> Self {
            self.log_configuration = input;
            self
        }
        /// Consumes the builder and constructs a [`FileCacheLustreConfiguration`](crate::model::FileCacheLustreConfiguration).
        pub fn build(self) -> crate::model::FileCacheLustreConfiguration {
            crate::model::FileCacheLustreConfiguration {
                per_unit_storage_throughput: self.per_unit_storage_throughput,
                deployment_type: self.deployment_type,
                mount_name: self.mount_name,
                weekly_maintenance_start_time: self.weekly_maintenance_start_time,
                metadata_configuration: self.metadata_configuration,
                log_configuration: self.log_configuration,
            }
        }
    }
}
impl FileCacheLustreConfiguration {
    /// Creates a new builder-style object to manufacture [`FileCacheLustreConfiguration`](crate::model::FileCacheLustreConfiguration).
    pub fn builder() -> crate::model::file_cache_lustre_configuration::Builder {
        crate::model::file_cache_lustre_configuration::Builder::default()
    }
}

/// <p>The configuration for a Lustre MDT (Metadata Target) storage volume. The metadata on Amazon File Cache is managed by a Lustre Metadata Server (MDS) while the actual metadata is persisted on an MDT.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct FileCacheLustreMetadataConfiguration {
    /// <p>The storage capacity of the Lustre MDT (Metadata Target) storage volume in gibibytes (GiB). The only supported value is <code>2400</code> GiB.</p>
    #[doc(hidden)]
    pub storage_capacity: std::option::Option<i32>,
}
impl FileCacheLustreMetadataConfiguration {
    /// <p>The storage capacity of the Lustre MDT (Metadata Target) storage volume in gibibytes (GiB). The only supported value is <code>2400</code> GiB.</p>
    pub fn storage_capacity(&self) -> std::option::Option<i32> {
        self.storage_capacity
    }
}
/// See [`FileCacheLustreMetadataConfiguration`](crate::model::FileCacheLustreMetadataConfiguration).
pub mod file_cache_lustre_metadata_configuration {

    /// A builder for [`FileCacheLustreMetadataConfiguration`](crate::model::FileCacheLustreMetadataConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) storage_capacity: std::option::Option<i32>,
    }
    impl Builder {
        /// <p>The storage capacity of the Lustre MDT (Metadata Target) storage volume in gibibytes (GiB). The only supported value is <code>2400</code> GiB.</p>
        pub fn storage_capacity(mut self, input: i32) -> Self {
            self.storage_capacity = Some(input);
            self
        }
        /// <p>The storage capacity of the Lustre MDT (Metadata Target) storage volume in gibibytes (GiB). The only supported value is <code>2400</code> GiB.</p>
        pub fn set_storage_capacity(mut self, input: std::option::Option<i32>) -> Self {
            self.storage_capacity = input;
            self
        }
        /// Consumes the builder and constructs a [`FileCacheLustreMetadataConfiguration`](crate::model::FileCacheLustreMetadataConfiguration).
        pub fn build(self) -> crate::model::FileCacheLustreMetadataConfiguration {
            crate::model::FileCacheLustreMetadataConfiguration {
                storage_capacity: self.storage_capacity,
            }
        }
    }
}
impl FileCacheLustreMetadataConfiguration {
    /// Creates a new builder-style object to manufacture [`FileCacheLustreMetadataConfiguration`](crate::model::FileCacheLustreMetadataConfiguration).
    pub fn builder() -> crate::model::file_cache_lustre_metadata_configuration::Builder {
        crate::model::file_cache_lustre_metadata_configuration::Builder::default()
    }
}

/// When writing a match expression against `FileCacheLustreDeploymentType`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let filecachelustredeploymenttype = unimplemented!();
/// match filecachelustredeploymenttype {
///     FileCacheLustreDeploymentType::Cache1 => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `filecachelustredeploymenttype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `FileCacheLustreDeploymentType::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `FileCacheLustreDeploymentType::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `FileCacheLustreDeploymentType::NewFeature` is defined.
/// Specifically, when `filecachelustredeploymenttype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `FileCacheLustreDeploymentType::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum FileCacheLustreDeploymentType {
    #[allow(missing_docs)] // documentation missing in model
    Cache1,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for FileCacheLustreDeploymentType {
    fn from(s: &str) -> Self {
        match s {
            "CACHE_1" => FileCacheLustreDeploymentType::Cache1,
            other => FileCacheLustreDeploymentType::Unknown(crate::types::UnknownVariantValue(
                other.to_owned(),
            )),
        }
    }
}
impl std::str::FromStr for FileCacheLustreDeploymentType {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(FileCacheLustreDeploymentType::from(s))
    }
}
impl FileCacheLustreDeploymentType {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            FileCacheLustreDeploymentType::Cache1 => "CACHE_1",
            FileCacheLustreDeploymentType::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["CACHE_1"]
    }
}
impl AsRef<str> for FileCacheLustreDeploymentType {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>A structure providing details of any failures that occurred.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct FileCacheFailureDetails {
    /// <p>A message describing any failures that occurred.</p>
    #[doc(hidden)]
    pub message: std::option::Option<std::string::String>,
}
impl FileCacheFailureDetails {
    /// <p>A message describing any failures that occurred.</p>
    pub fn message(&self) -> std::option::Option<&str> {
        self.message.as_deref()
    }
}
/// See [`FileCacheFailureDetails`](crate::model::FileCacheFailureDetails).
pub mod file_cache_failure_details {

    /// A builder for [`FileCacheFailureDetails`](crate::model::FileCacheFailureDetails).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) message: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>A message describing any failures that occurred.</p>
        pub fn message(mut self, input: impl Into<std::string::String>) -> Self {
            self.message = Some(input.into());
            self
        }
        /// <p>A message describing any failures that occurred.</p>
        pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.message = input;
            self
        }
        /// Consumes the builder and constructs a [`FileCacheFailureDetails`](crate::model::FileCacheFailureDetails).
        pub fn build(self) -> crate::model::FileCacheFailureDetails {
            crate::model::FileCacheFailureDetails {
                message: self.message,
            }
        }
    }
}
impl FileCacheFailureDetails {
    /// Creates a new builder-style object to manufacture [`FileCacheFailureDetails`](crate::model::FileCacheFailureDetails).
    pub fn builder() -> crate::model::file_cache_failure_details::Builder {
        crate::model::file_cache_failure_details::Builder::default()
    }
}

/// When writing a match expression against `FileCacheLifecycle`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let filecachelifecycle = unimplemented!();
/// match filecachelifecycle {
///     FileCacheLifecycle::Available => { /* ... */ },
///     FileCacheLifecycle::Creating => { /* ... */ },
///     FileCacheLifecycle::Deleting => { /* ... */ },
///     FileCacheLifecycle::Failed => { /* ... */ },
///     FileCacheLifecycle::Updating => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `filecachelifecycle` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `FileCacheLifecycle::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `FileCacheLifecycle::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `FileCacheLifecycle::NewFeature` is defined.
/// Specifically, when `filecachelifecycle` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `FileCacheLifecycle::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum FileCacheLifecycle {
    #[allow(missing_docs)] // documentation missing in model
    Available,
    #[allow(missing_docs)] // documentation missing in model
    Creating,
    #[allow(missing_docs)] // documentation missing in model
    Deleting,
    #[allow(missing_docs)] // documentation missing in model
    Failed,
    #[allow(missing_docs)] // documentation missing in model
    Updating,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for FileCacheLifecycle {
    fn from(s: &str) -> Self {
        match s {
            "AVAILABLE" => FileCacheLifecycle::Available,
            "CREATING" => FileCacheLifecycle::Creating,
            "DELETING" => FileCacheLifecycle::Deleting,
            "FAILED" => FileCacheLifecycle::Failed,
            "UPDATING" => FileCacheLifecycle::Updating,
            other => {
                FileCacheLifecycle::Unknown(crate::types::UnknownVariantValue(other.to_owned()))
            }
        }
    }
}
impl std::str::FromStr for FileCacheLifecycle {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(FileCacheLifecycle::from(s))
    }
}
impl FileCacheLifecycle {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            FileCacheLifecycle::Available => "AVAILABLE",
            FileCacheLifecycle::Creating => "CREATING",
            FileCacheLifecycle::Deleting => "DELETING",
            FileCacheLifecycle::Failed => "FAILED",
            FileCacheLifecycle::Updating => "UPDATING",
            FileCacheLifecycle::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["AVAILABLE", "CREATING", "DELETING", "FAILED", "UPDATING"]
    }
}
impl AsRef<str> for FileCacheLifecycle {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// When writing a match expression against `FileCacheType`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let filecachetype = unimplemented!();
/// match filecachetype {
///     FileCacheType::Lustre => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `filecachetype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `FileCacheType::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `FileCacheType::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `FileCacheType::NewFeature` is defined.
/// Specifically, when `filecachetype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `FileCacheType::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum FileCacheType {
    #[allow(missing_docs)] // documentation missing in model
    Lustre,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for FileCacheType {
    fn from(s: &str) -> Self {
        match s {
            "LUSTRE" => FileCacheType::Lustre,
            other => FileCacheType::Unknown(crate::types::UnknownVariantValue(other.to_owned())),
        }
    }
}
impl std::str::FromStr for FileCacheType {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(FileCacheType::from(s))
    }
}
impl FileCacheType {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            FileCacheType::Lustre => "LUSTRE",
            FileCacheType::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["LUSTRE"]
    }
}
impl AsRef<str> for FileCacheType {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>The configuration update for an Amazon File Cache resource.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct UpdateFileCacheLustreConfiguration {
    /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
    /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
    /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
    /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
    #[doc(hidden)]
    pub weekly_maintenance_start_time: std::option::Option<std::string::String>,
}
impl UpdateFileCacheLustreConfiguration {
    /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
    /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
    /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
    /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
    pub fn weekly_maintenance_start_time(&self) -> std::option::Option<&str> {
        self.weekly_maintenance_start_time.as_deref()
    }
}
/// See [`UpdateFileCacheLustreConfiguration`](crate::model::UpdateFileCacheLustreConfiguration).
pub mod update_file_cache_lustre_configuration {

    /// A builder for [`UpdateFileCacheLustreConfiguration`](crate::model::UpdateFileCacheLustreConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) weekly_maintenance_start_time: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
        /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
        /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
        /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
        pub fn weekly_maintenance_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = Some(input.into());
            self
        }
        /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
        /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
        /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
        /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
        pub fn set_weekly_maintenance_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = input;
            self
        }
        /// Consumes the builder and constructs a [`UpdateFileCacheLustreConfiguration`](crate::model::UpdateFileCacheLustreConfiguration).
        pub fn build(self) -> crate::model::UpdateFileCacheLustreConfiguration {
            crate::model::UpdateFileCacheLustreConfiguration {
                weekly_maintenance_start_time: self.weekly_maintenance_start_time,
            }
        }
    }
}
impl UpdateFileCacheLustreConfiguration {
    /// Creates a new builder-style object to manufacture [`UpdateFileCacheLustreConfiguration`](crate::model::UpdateFileCacheLustreConfiguration).
    pub fn builder() -> crate::model::update_file_cache_lustre_configuration::Builder {
        crate::model::update_file_cache_lustre_configuration::Builder::default()
    }
}

/// <p>The configuration of a data repository association that links an Amazon FSx for Lustre file system to an Amazon S3 bucket or an Amazon File Cache resource to an Amazon S3 bucket or an NFS file system. The data repository association configuration object is returned in the response of the following operations:</p>
/// <ul>
/// <li> <p> <code>CreateDataRepositoryAssociation</code> </p> </li>
/// <li> <p> <code>UpdateDataRepositoryAssociation</code> </p> </li>
/// <li> <p> <code>DescribeDataRepositoryAssociations</code> </p> </li>
/// </ul>
/// <p>Data repository associations are supported only for an Amazon FSx for Lustre file system with the <code>Persistent_2</code> deployment type and for an Amazon File Cache resource.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct DataRepositoryAssociation {
    /// <p>The system-generated, unique ID of the data repository association.</p>
    #[doc(hidden)]
    pub association_id: std::option::Option<std::string::String>,
    /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
    #[doc(hidden)]
    pub resource_arn: std::option::Option<std::string::String>,
    /// <p>The globally unique ID of the file system, assigned by Amazon FSx.</p>
    #[doc(hidden)]
    pub file_system_id: std::option::Option<std::string::String>,
    /// <p>Describes the state of a data repository association. The lifecycle can have the following values:</p>
    /// <ul>
    /// <li> <p> <code>CREATING</code> - The data repository association between the file system or cache and the data repository is being created. The data repository is unavailable.</p> </li>
    /// <li> <p> <code>AVAILABLE</code> - The data repository association is available for use.</p> </li>
    /// <li> <p> <code>MISCONFIGURED</code> - The data repository association is misconfigured. Until the configuration is corrected, automatic import and automatic export will not work (only for Amazon FSx for Lustre).</p> </li>
    /// <li> <p> <code>UPDATING</code> - The data repository association is undergoing a customer initiated update that might affect its availability.</p> </li>
    /// <li> <p> <code>DELETING</code> - The data repository association is undergoing a customer initiated deletion.</p> </li>
    /// <li> <p> <code>FAILED</code> - The data repository association is in a terminal state that cannot be recovered.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub lifecycle: std::option::Option<crate::model::DataRepositoryLifecycle>,
    /// <p>Provides detailed information about the data repository if its <code>Lifecycle</code> is set to <code>MISCONFIGURED</code> or <code>FAILED</code>.</p>
    #[doc(hidden)]
    pub failure_details: std::option::Option<crate::model::DataRepositoryFailureDetails>,
    /// <p>A path on the Amazon FSx for Lustre file system that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path <code>/ns1/</code>, then you cannot link another data repository with file system path <code>/ns1/ns2</code>.</p>
    /// <p>This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.</p> <note>
    /// <p>If you specify only a forward slash (<code>/</code>) as the file system path, you can link only one data repository to the file system. You can only specify "/" as the file system path for the first data repository associated with a file system.</p>
    /// </note>
    #[doc(hidden)]
    pub file_system_path: std::option::Option<std::string::String>,
    /// <p>The path to the data repository that will be linked to the cache or file system.</p>
    /// <ul>
    /// <li> <p>For Amazon File Cache, the path can be an NFS data repository that will be linked to the cache. The path can be in one of two formats:</p>
    /// <ul>
    /// <li> <p>If you are not using the <code>DataRepositorySubdirectories</code> parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format <code>nsf://nfs-domain-name/exportpath</code>. You can therefore link a single NFS Export to a single data repository association.</p> </li>
    /// <li> <p>If you are using the <code>DataRepositorySubdirectories</code> parameter, the path is the domain name of the NFS file system in the format <code>nfs://filer-domain-name</code>, which indicates the root of the subdirectories specified with the <code>DataRepositorySubdirectories</code> parameter.</p> </li>
    /// </ul> </li>
    /// <li> <p>For Amazon File Cache, the path can be an S3 bucket or prefix in the format <code>s3://myBucket/myPrefix/</code>.</p> </li>
    /// <li> <p>For Amazon FSx for Lustre, the path can be an S3 bucket or prefix in the format <code>s3://myBucket/myPrefix/</code>.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub data_repository_path: std::option::Option<std::string::String>,
    /// <p>A boolean flag indicating whether an import data repository task to import metadata should run after the data repository association is created. The task runs if this flag is set to <code>true</code>.</p> <note>
    /// <p> <code>BatchImportMetaDataOnCreate</code> is not supported for data repositories linked to an Amazon File Cache resource.</p>
    /// </note>
    #[doc(hidden)]
    pub batch_import_meta_data_on_create: std::option::Option<bool>,
    /// <p>For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system or cache.</p>
    /// <p>The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.</p>
    #[doc(hidden)]
    pub imported_file_chunk_size: std::option::Option<i32>,
    /// <p>The configuration for an Amazon S3 data repository linked to an Amazon FSx for Lustre file system with a data repository association.</p>
    #[doc(hidden)]
    pub s3: std::option::Option<crate::model::S3DataRepositoryConfiguration>,
    /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
    #[doc(hidden)]
    pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
    /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
    #[doc(hidden)]
    pub creation_time: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>The globally unique ID of the Amazon File Cache resource.</p>
    #[doc(hidden)]
    pub file_cache_id: std::option::Option<std::string::String>,
    /// <p>A path on the Amazon File Cache that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the path is required. Two data repository associations cannot have overlapping cache paths. For example, if a data repository is associated with cache path <code>/ns1/</code>, then you cannot link another data repository with cache path <code>/ns1/ns2</code>.</p>
    /// <p>This path specifies the directory in your cache where files will be exported from. This cache directory can be linked to only one data repository (S3 or NFS) and no other data repository can be linked to the directory.</p> <note>
    /// <p>The cache path can only be set to root (/) on an NFS DRA when <code>DataRepositorySubdirectories</code> is specified. If you specify root (/) as the cache path, you can create only one DRA on the cache.</p>
    /// <p>The cache path cannot be set to root (/) for an S3 DRA.</p>
    /// </note>
    #[doc(hidden)]
    pub file_cache_path: std::option::Option<std::string::String>,
    /// <p>For Amazon File Cache, a list of NFS Exports that will be linked with an NFS data repository association. All the subdirectories must be on a single NFS file system. The Export paths are in the format <code>/exportpath1</code>. To use this parameter, you must configure <code>DataRepositoryPath</code> as the domain name of the NFS file system. The NFS file system domain name in effect is the root of the subdirectories. Note that <code>DataRepositorySubdirectories</code> is not supported for S3 data repositories.</p>
    #[doc(hidden)]
    pub data_repository_subdirectories: std::option::Option<std::vec::Vec<std::string::String>>,
    /// <p>The configuration for an NFS data repository linked to an Amazon File Cache resource with a data repository association.</p>
    #[doc(hidden)]
    pub nfs: std::option::Option<crate::model::NfsDataRepositoryConfiguration>,
}
impl DataRepositoryAssociation {
    /// <p>The system-generated, unique ID of the data repository association.</p>
    pub fn association_id(&self) -> std::option::Option<&str> {
        self.association_id.as_deref()
    }
    /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
    pub fn resource_arn(&self) -> std::option::Option<&str> {
        self.resource_arn.as_deref()
    }
    /// <p>The globally unique ID of the file system, assigned by Amazon FSx.</p>
    pub fn file_system_id(&self) -> std::option::Option<&str> {
        self.file_system_id.as_deref()
    }
    /// <p>Describes the state of a data repository association. The lifecycle can have the following values:</p>
    /// <ul>
    /// <li> <p> <code>CREATING</code> - The data repository association between the file system or cache and the data repository is being created. The data repository is unavailable.</p> </li>
    /// <li> <p> <code>AVAILABLE</code> - The data repository association is available for use.</p> </li>
    /// <li> <p> <code>MISCONFIGURED</code> - The data repository association is misconfigured. Until the configuration is corrected, automatic import and automatic export will not work (only for Amazon FSx for Lustre).</p> </li>
    /// <li> <p> <code>UPDATING</code> - The data repository association is undergoing a customer initiated update that might affect its availability.</p> </li>
    /// <li> <p> <code>DELETING</code> - The data repository association is undergoing a customer initiated deletion.</p> </li>
    /// <li> <p> <code>FAILED</code> - The data repository association is in a terminal state that cannot be recovered.</p> </li>
    /// </ul>
    pub fn lifecycle(&self) -> std::option::Option<&crate::model::DataRepositoryLifecycle> {
        self.lifecycle.as_ref()
    }
    /// <p>Provides detailed information about the data repository if its <code>Lifecycle</code> is set to <code>MISCONFIGURED</code> or <code>FAILED</code>.</p>
    pub fn failure_details(
        &self,
    ) -> std::option::Option<&crate::model::DataRepositoryFailureDetails> {
        self.failure_details.as_ref()
    }
    /// <p>A path on the Amazon FSx for Lustre file system that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path <code>/ns1/</code>, then you cannot link another data repository with file system path <code>/ns1/ns2</code>.</p>
    /// <p>This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.</p> <note>
    /// <p>If you specify only a forward slash (<code>/</code>) as the file system path, you can link only one data repository to the file system. You can only specify "/" as the file system path for the first data repository associated with a file system.</p>
    /// </note>
    pub fn file_system_path(&self) -> std::option::Option<&str> {
        self.file_system_path.as_deref()
    }
    /// <p>The path to the data repository that will be linked to the cache or file system.</p>
    /// <ul>
    /// <li> <p>For Amazon File Cache, the path can be an NFS data repository that will be linked to the cache. The path can be in one of two formats:</p>
    /// <ul>
    /// <li> <p>If you are not using the <code>DataRepositorySubdirectories</code> parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format <code>nsf://nfs-domain-name/exportpath</code>. You can therefore link a single NFS Export to a single data repository association.</p> </li>
    /// <li> <p>If you are using the <code>DataRepositorySubdirectories</code> parameter, the path is the domain name of the NFS file system in the format <code>nfs://filer-domain-name</code>, which indicates the root of the subdirectories specified with the <code>DataRepositorySubdirectories</code> parameter.</p> </li>
    /// </ul> </li>
    /// <li> <p>For Amazon File Cache, the path can be an S3 bucket or prefix in the format <code>s3://myBucket/myPrefix/</code>.</p> </li>
    /// <li> <p>For Amazon FSx for Lustre, the path can be an S3 bucket or prefix in the format <code>s3://myBucket/myPrefix/</code>.</p> </li>
    /// </ul>
    pub fn data_repository_path(&self) -> std::option::Option<&str> {
        self.data_repository_path.as_deref()
    }
    /// <p>A boolean flag indicating whether an import data repository task to import metadata should run after the data repository association is created. The task runs if this flag is set to <code>true</code>.</p> <note>
    /// <p> <code>BatchImportMetaDataOnCreate</code> is not supported for data repositories linked to an Amazon File Cache resource.</p>
    /// </note>
    pub fn batch_import_meta_data_on_create(&self) -> std::option::Option<bool> {
        self.batch_import_meta_data_on_create
    }
    /// <p>For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system or cache.</p>
    /// <p>The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.</p>
    pub fn imported_file_chunk_size(&self) -> std::option::Option<i32> {
        self.imported_file_chunk_size
    }
    /// <p>The configuration for an Amazon S3 data repository linked to an Amazon FSx for Lustre file system with a data repository association.</p>
    pub fn s3(&self) -> std::option::Option<&crate::model::S3DataRepositoryConfiguration> {
        self.s3.as_ref()
    }
    /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
    pub fn tags(&self) -> std::option::Option<&[crate::model::Tag]> {
        self.tags.as_deref()
    }
    /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
    pub fn creation_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.creation_time.as_ref()
    }
    /// <p>The globally unique ID of the Amazon File Cache resource.</p>
    pub fn file_cache_id(&self) -> std::option::Option<&str> {
        self.file_cache_id.as_deref()
    }
    /// <p>A path on the Amazon File Cache that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the path is required. Two data repository associations cannot have overlapping cache paths. For example, if a data repository is associated with cache path <code>/ns1/</code>, then you cannot link another data repository with cache path <code>/ns1/ns2</code>.</p>
    /// <p>This path specifies the directory in your cache where files will be exported from. This cache directory can be linked to only one data repository (S3 or NFS) and no other data repository can be linked to the directory.</p> <note>
    /// <p>The cache path can only be set to root (/) on an NFS DRA when <code>DataRepositorySubdirectories</code> is specified. If you specify root (/) as the cache path, you can create only one DRA on the cache.</p>
    /// <p>The cache path cannot be set to root (/) for an S3 DRA.</p>
    /// </note>
    pub fn file_cache_path(&self) -> std::option::Option<&str> {
        self.file_cache_path.as_deref()
    }
    /// <p>For Amazon File Cache, a list of NFS Exports that will be linked with an NFS data repository association. All the subdirectories must be on a single NFS file system. The Export paths are in the format <code>/exportpath1</code>. To use this parameter, you must configure <code>DataRepositoryPath</code> as the domain name of the NFS file system. The NFS file system domain name in effect is the root of the subdirectories. Note that <code>DataRepositorySubdirectories</code> is not supported for S3 data repositories.</p>
    pub fn data_repository_subdirectories(&self) -> std::option::Option<&[std::string::String]> {
        self.data_repository_subdirectories.as_deref()
    }
    /// <p>The configuration for an NFS data repository linked to an Amazon File Cache resource with a data repository association.</p>
    pub fn nfs(&self) -> std::option::Option<&crate::model::NfsDataRepositoryConfiguration> {
        self.nfs.as_ref()
    }
}
/// See [`DataRepositoryAssociation`](crate::model::DataRepositoryAssociation).
pub mod data_repository_association {

    /// A builder for [`DataRepositoryAssociation`](crate::model::DataRepositoryAssociation).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) association_id: std::option::Option<std::string::String>,
        pub(crate) resource_arn: std::option::Option<std::string::String>,
        pub(crate) file_system_id: std::option::Option<std::string::String>,
        pub(crate) lifecycle: std::option::Option<crate::model::DataRepositoryLifecycle>,
        pub(crate) failure_details: std::option::Option<crate::model::DataRepositoryFailureDetails>,
        pub(crate) file_system_path: std::option::Option<std::string::String>,
        pub(crate) data_repository_path: std::option::Option<std::string::String>,
        pub(crate) batch_import_meta_data_on_create: std::option::Option<bool>,
        pub(crate) imported_file_chunk_size: std::option::Option<i32>,
        pub(crate) s3: std::option::Option<crate::model::S3DataRepositoryConfiguration>,
        pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        pub(crate) creation_time: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) file_cache_id: std::option::Option<std::string::String>,
        pub(crate) file_cache_path: std::option::Option<std::string::String>,
        pub(crate) data_repository_subdirectories:
            std::option::Option<std::vec::Vec<std::string::String>>,
        pub(crate) nfs: std::option::Option<crate::model::NfsDataRepositoryConfiguration>,
    }
    impl Builder {
        /// <p>The system-generated, unique ID of the data repository association.</p>
        pub fn association_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.association_id = Some(input.into());
            self
        }
        /// <p>The system-generated, unique ID of the data repository association.</p>
        pub fn set_association_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.association_id = input;
            self
        }
        /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
        pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
            self.resource_arn = Some(input.into());
            self
        }
        /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
        pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.resource_arn = input;
            self
        }
        /// <p>The globally unique ID of the file system, assigned by Amazon FSx.</p>
        pub fn file_system_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.file_system_id = Some(input.into());
            self
        }
        /// <p>The globally unique ID of the file system, assigned by Amazon FSx.</p>
        pub fn set_file_system_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.file_system_id = input;
            self
        }
        /// <p>Describes the state of a data repository association. The lifecycle can have the following values:</p>
        /// <ul>
        /// <li> <p> <code>CREATING</code> - The data repository association between the file system or cache and the data repository is being created. The data repository is unavailable.</p> </li>
        /// <li> <p> <code>AVAILABLE</code> - The data repository association is available for use.</p> </li>
        /// <li> <p> <code>MISCONFIGURED</code> - The data repository association is misconfigured. Until the configuration is corrected, automatic import and automatic export will not work (only for Amazon FSx for Lustre).</p> </li>
        /// <li> <p> <code>UPDATING</code> - The data repository association is undergoing a customer initiated update that might affect its availability.</p> </li>
        /// <li> <p> <code>DELETING</code> - The data repository association is undergoing a customer initiated deletion.</p> </li>
        /// <li> <p> <code>FAILED</code> - The data repository association is in a terminal state that cannot be recovered.</p> </li>
        /// </ul>
        pub fn lifecycle(mut self, input: crate::model::DataRepositoryLifecycle) -> Self {
            self.lifecycle = Some(input);
            self
        }
        /// <p>Describes the state of a data repository association. The lifecycle can have the following values:</p>
        /// <ul>
        /// <li> <p> <code>CREATING</code> - The data repository association between the file system or cache and the data repository is being created. The data repository is unavailable.</p> </li>
        /// <li> <p> <code>AVAILABLE</code> - The data repository association is available for use.</p> </li>
        /// <li> <p> <code>MISCONFIGURED</code> - The data repository association is misconfigured. Until the configuration is corrected, automatic import and automatic export will not work (only for Amazon FSx for Lustre).</p> </li>
        /// <li> <p> <code>UPDATING</code> - The data repository association is undergoing a customer initiated update that might affect its availability.</p> </li>
        /// <li> <p> <code>DELETING</code> - The data repository association is undergoing a customer initiated deletion.</p> </li>
        /// <li> <p> <code>FAILED</code> - The data repository association is in a terminal state that cannot be recovered.</p> </li>
        /// </ul>
        pub fn set_lifecycle(
            mut self,
            input: std::option::Option<crate::model::DataRepositoryLifecycle>,
        ) -> Self {
            self.lifecycle = input;
            self
        }
        /// <p>Provides detailed information about the data repository if its <code>Lifecycle</code> is set to <code>MISCONFIGURED</code> or <code>FAILED</code>.</p>
        pub fn failure_details(
            mut self,
            input: crate::model::DataRepositoryFailureDetails,
        ) -> Self {
            self.failure_details = Some(input);
            self
        }
        /// <p>Provides detailed information about the data repository if its <code>Lifecycle</code> is set to <code>MISCONFIGURED</code> or <code>FAILED</code>.</p>
        pub fn set_failure_details(
            mut self,
            input: std::option::Option<crate::model::DataRepositoryFailureDetails>,
        ) -> Self {
            self.failure_details = input;
            self
        }
        /// <p>A path on the Amazon FSx for Lustre file system that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path <code>/ns1/</code>, then you cannot link another data repository with file system path <code>/ns1/ns2</code>.</p>
        /// <p>This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.</p> <note>
        /// <p>If you specify only a forward slash (<code>/</code>) as the file system path, you can link only one data repository to the file system. You can only specify "/" as the file system path for the first data repository associated with a file system.</p>
        /// </note>
        pub fn file_system_path(mut self, input: impl Into<std::string::String>) -> Self {
            self.file_system_path = Some(input.into());
            self
        }
        /// <p>A path on the Amazon FSx for Lustre file system that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path <code>/ns1/</code>, then you cannot link another data repository with file system path <code>/ns1/ns2</code>.</p>
        /// <p>This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.</p> <note>
        /// <p>If you specify only a forward slash (<code>/</code>) as the file system path, you can link only one data repository to the file system. You can only specify "/" as the file system path for the first data repository associated with a file system.</p>
        /// </note>
        pub fn set_file_system_path(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.file_system_path = input;
            self
        }
        /// <p>The path to the data repository that will be linked to the cache or file system.</p>
        /// <ul>
        /// <li> <p>For Amazon File Cache, the path can be an NFS data repository that will be linked to the cache. The path can be in one of two formats:</p>
        /// <ul>
        /// <li> <p>If you are not using the <code>DataRepositorySubdirectories</code> parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format <code>nsf://nfs-domain-name/exportpath</code>. You can therefore link a single NFS Export to a single data repository association.</p> </li>
        /// <li> <p>If you are using the <code>DataRepositorySubdirectories</code> parameter, the path is the domain name of the NFS file system in the format <code>nfs://filer-domain-name</code>, which indicates the root of the subdirectories specified with the <code>DataRepositorySubdirectories</code> parameter.</p> </li>
        /// </ul> </li>
        /// <li> <p>For Amazon File Cache, the path can be an S3 bucket or prefix in the format <code>s3://myBucket/myPrefix/</code>.</p> </li>
        /// <li> <p>For Amazon FSx for Lustre, the path can be an S3 bucket or prefix in the format <code>s3://myBucket/myPrefix/</code>.</p> </li>
        /// </ul>
        pub fn data_repository_path(mut self, input: impl Into<std::string::String>) -> Self {
            self.data_repository_path = Some(input.into());
            self
        }
        /// <p>The path to the data repository that will be linked to the cache or file system.</p>
        /// <ul>
        /// <li> <p>For Amazon File Cache, the path can be an NFS data repository that will be linked to the cache. The path can be in one of two formats:</p>
        /// <ul>
        /// <li> <p>If you are not using the <code>DataRepositorySubdirectories</code> parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format <code>nsf://nfs-domain-name/exportpath</code>. You can therefore link a single NFS Export to a single data repository association.</p> </li>
        /// <li> <p>If you are using the <code>DataRepositorySubdirectories</code> parameter, the path is the domain name of the NFS file system in the format <code>nfs://filer-domain-name</code>, which indicates the root of the subdirectories specified with the <code>DataRepositorySubdirectories</code> parameter.</p> </li>
        /// </ul> </li>
        /// <li> <p>For Amazon File Cache, the path can be an S3 bucket or prefix in the format <code>s3://myBucket/myPrefix/</code>.</p> </li>
        /// <li> <p>For Amazon FSx for Lustre, the path can be an S3 bucket or prefix in the format <code>s3://myBucket/myPrefix/</code>.</p> </li>
        /// </ul>
        pub fn set_data_repository_path(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.data_repository_path = input;
            self
        }
        /// <p>A boolean flag indicating whether an import data repository task to import metadata should run after the data repository association is created. The task runs if this flag is set to <code>true</code>.</p> <note>
        /// <p> <code>BatchImportMetaDataOnCreate</code> is not supported for data repositories linked to an Amazon File Cache resource.</p>
        /// </note>
        pub fn batch_import_meta_data_on_create(mut self, input: bool) -> Self {
            self.batch_import_meta_data_on_create = Some(input);
            self
        }
        /// <p>A boolean flag indicating whether an import data repository task to import metadata should run after the data repository association is created. The task runs if this flag is set to <code>true</code>.</p> <note>
        /// <p> <code>BatchImportMetaDataOnCreate</code> is not supported for data repositories linked to an Amazon File Cache resource.</p>
        /// </note>
        pub fn set_batch_import_meta_data_on_create(
            mut self,
            input: std::option::Option<bool>,
        ) -> Self {
            self.batch_import_meta_data_on_create = input;
            self
        }
        /// <p>For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system or cache.</p>
        /// <p>The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.</p>
        pub fn imported_file_chunk_size(mut self, input: i32) -> Self {
            self.imported_file_chunk_size = Some(input);
            self
        }
        /// <p>For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system or cache.</p>
        /// <p>The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.</p>
        pub fn set_imported_file_chunk_size(mut self, input: std::option::Option<i32>) -> Self {
            self.imported_file_chunk_size = input;
            self
        }
        /// <p>The configuration for an Amazon S3 data repository linked to an Amazon FSx for Lustre file system with a data repository association.</p>
        pub fn s3(mut self, input: crate::model::S3DataRepositoryConfiguration) -> Self {
            self.s3 = Some(input);
            self
        }
        /// <p>The configuration for an Amazon S3 data repository linked to an Amazon FSx for Lustre file system with a data repository association.</p>
        pub fn set_s3(
            mut self,
            input: std::option::Option<crate::model::S3DataRepositoryConfiguration>,
        ) -> Self {
            self.s3 = input;
            self
        }
        /// Appends an item to `tags`.
        ///
        /// To override the contents of this collection use [`set_tags`](Self::set_tags).
        ///
        /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
        pub fn tags(mut self, input: crate::model::Tag) -> Self {
            let mut v = self.tags.unwrap_or_default();
            v.push(input);
            self.tags = Some(v);
            self
        }
        /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
        pub fn set_tags(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        ) -> Self {
            self.tags = input;
            self
        }
        /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
        pub fn creation_time(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.creation_time = Some(input);
            self
        }
        /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
        pub fn set_creation_time(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.creation_time = input;
            self
        }
        /// <p>The globally unique ID of the Amazon File Cache resource.</p>
        pub fn file_cache_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.file_cache_id = Some(input.into());
            self
        }
        /// <p>The globally unique ID of the Amazon File Cache resource.</p>
        pub fn set_file_cache_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.file_cache_id = input;
            self
        }
        /// <p>A path on the Amazon File Cache that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the path is required. Two data repository associations cannot have overlapping cache paths. For example, if a data repository is associated with cache path <code>/ns1/</code>, then you cannot link another data repository with cache path <code>/ns1/ns2</code>.</p>
        /// <p>This path specifies the directory in your cache where files will be exported from. This cache directory can be linked to only one data repository (S3 or NFS) and no other data repository can be linked to the directory.</p> <note>
        /// <p>The cache path can only be set to root (/) on an NFS DRA when <code>DataRepositorySubdirectories</code> is specified. If you specify root (/) as the cache path, you can create only one DRA on the cache.</p>
        /// <p>The cache path cannot be set to root (/) for an S3 DRA.</p>
        /// </note>
        pub fn file_cache_path(mut self, input: impl Into<std::string::String>) -> Self {
            self.file_cache_path = Some(input.into());
            self
        }
        /// <p>A path on the Amazon File Cache that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the path is required. Two data repository associations cannot have overlapping cache paths. For example, if a data repository is associated with cache path <code>/ns1/</code>, then you cannot link another data repository with cache path <code>/ns1/ns2</code>.</p>
        /// <p>This path specifies the directory in your cache where files will be exported from. This cache directory can be linked to only one data repository (S3 or NFS) and no other data repository can be linked to the directory.</p> <note>
        /// <p>The cache path can only be set to root (/) on an NFS DRA when <code>DataRepositorySubdirectories</code> is specified. If you specify root (/) as the cache path, you can create only one DRA on the cache.</p>
        /// <p>The cache path cannot be set to root (/) for an S3 DRA.</p>
        /// </note>
        pub fn set_file_cache_path(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.file_cache_path = input;
            self
        }
        /// Appends an item to `data_repository_subdirectories`.
        ///
        /// To override the contents of this collection use [`set_data_repository_subdirectories`](Self::set_data_repository_subdirectories).
        ///
        /// <p>For Amazon File Cache, a list of NFS Exports that will be linked with an NFS data repository association. All the subdirectories must be on a single NFS file system. The Export paths are in the format <code>/exportpath1</code>. To use this parameter, you must configure <code>DataRepositoryPath</code> as the domain name of the NFS file system. The NFS file system domain name in effect is the root of the subdirectories. Note that <code>DataRepositorySubdirectories</code> is not supported for S3 data repositories.</p>
        pub fn data_repository_subdirectories(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            let mut v = self.data_repository_subdirectories.unwrap_or_default();
            v.push(input.into());
            self.data_repository_subdirectories = Some(v);
            self
        }
        /// <p>For Amazon File Cache, a list of NFS Exports that will be linked with an NFS data repository association. All the subdirectories must be on a single NFS file system. The Export paths are in the format <code>/exportpath1</code>. To use this parameter, you must configure <code>DataRepositoryPath</code> as the domain name of the NFS file system. The NFS file system domain name in effect is the root of the subdirectories. Note that <code>DataRepositorySubdirectories</code> is not supported for S3 data repositories.</p>
        pub fn set_data_repository_subdirectories(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.data_repository_subdirectories = input;
            self
        }
        /// <p>The configuration for an NFS data repository linked to an Amazon File Cache resource with a data repository association.</p>
        pub fn nfs(mut self, input: crate::model::NfsDataRepositoryConfiguration) -> Self {
            self.nfs = Some(input);
            self
        }
        /// <p>The configuration for an NFS data repository linked to an Amazon File Cache resource with a data repository association.</p>
        pub fn set_nfs(
            mut self,
            input: std::option::Option<crate::model::NfsDataRepositoryConfiguration>,
        ) -> Self {
            self.nfs = input;
            self
        }
        /// Consumes the builder and constructs a [`DataRepositoryAssociation`](crate::model::DataRepositoryAssociation).
        pub fn build(self) -> crate::model::DataRepositoryAssociation {
            crate::model::DataRepositoryAssociation {
                association_id: self.association_id,
                resource_arn: self.resource_arn,
                file_system_id: self.file_system_id,
                lifecycle: self.lifecycle,
                failure_details: self.failure_details,
                file_system_path: self.file_system_path,
                data_repository_path: self.data_repository_path,
                batch_import_meta_data_on_create: self.batch_import_meta_data_on_create,
                imported_file_chunk_size: self.imported_file_chunk_size,
                s3: self.s3,
                tags: self.tags,
                creation_time: self.creation_time,
                file_cache_id: self.file_cache_id,
                file_cache_path: self.file_cache_path,
                data_repository_subdirectories: self.data_repository_subdirectories,
                nfs: self.nfs,
            }
        }
    }
}
impl DataRepositoryAssociation {
    /// Creates a new builder-style object to manufacture [`DataRepositoryAssociation`](crate::model::DataRepositoryAssociation).
    pub fn builder() -> crate::model::data_repository_association::Builder {
        crate::model::data_repository_association::Builder::default()
    }
}

/// <p>The configuration for a data repository association that links an Amazon File Cache resource to an NFS data repository.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct NfsDataRepositoryConfiguration {
    /// <p>The version of the NFS (Network File System) protocol of the NFS data repository. Currently, the only supported value is <code>NFS3</code>, which indicates that the data repository must support the NFSv3 protocol.</p>
    #[doc(hidden)]
    pub version: std::option::Option<crate::model::NfsVersion>,
    /// <p>A list of up to 2 IP addresses of DNS servers used to resolve the NFS file system domain name. The provided IP addresses can either be the IP addresses of a DNS forwarder or resolver that the customer manages and runs inside the customer VPC, or the IP addresses of the on-premises DNS servers.</p>
    #[doc(hidden)]
    pub dns_ips: std::option::Option<std::vec::Vec<std::string::String>>,
    /// <p>This parameter is not supported for Amazon File Cache.</p>
    #[doc(hidden)]
    pub auto_export_policy: std::option::Option<crate::model::AutoExportPolicy>,
}
impl NfsDataRepositoryConfiguration {
    /// <p>The version of the NFS (Network File System) protocol of the NFS data repository. Currently, the only supported value is <code>NFS3</code>, which indicates that the data repository must support the NFSv3 protocol.</p>
    pub fn version(&self) -> std::option::Option<&crate::model::NfsVersion> {
        self.version.as_ref()
    }
    /// <p>A list of up to 2 IP addresses of DNS servers used to resolve the NFS file system domain name. The provided IP addresses can either be the IP addresses of a DNS forwarder or resolver that the customer manages and runs inside the customer VPC, or the IP addresses of the on-premises DNS servers.</p>
    pub fn dns_ips(&self) -> std::option::Option<&[std::string::String]> {
        self.dns_ips.as_deref()
    }
    /// <p>This parameter is not supported for Amazon File Cache.</p>
    pub fn auto_export_policy(&self) -> std::option::Option<&crate::model::AutoExportPolicy> {
        self.auto_export_policy.as_ref()
    }
}
/// See [`NfsDataRepositoryConfiguration`](crate::model::NfsDataRepositoryConfiguration).
pub mod nfs_data_repository_configuration {

    /// A builder for [`NfsDataRepositoryConfiguration`](crate::model::NfsDataRepositoryConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) version: std::option::Option<crate::model::NfsVersion>,
        pub(crate) dns_ips: std::option::Option<std::vec::Vec<std::string::String>>,
        pub(crate) auto_export_policy: std::option::Option<crate::model::AutoExportPolicy>,
    }
    impl Builder {
        /// <p>The version of the NFS (Network File System) protocol of the NFS data repository. Currently, the only supported value is <code>NFS3</code>, which indicates that the data repository must support the NFSv3 protocol.</p>
        pub fn version(mut self, input: crate::model::NfsVersion) -> Self {
            self.version = Some(input);
            self
        }
        /// <p>The version of the NFS (Network File System) protocol of the NFS data repository. Currently, the only supported value is <code>NFS3</code>, which indicates that the data repository must support the NFSv3 protocol.</p>
        pub fn set_version(mut self, input: std::option::Option<crate::model::NfsVersion>) -> Self {
            self.version = input;
            self
        }
        /// Appends an item to `dns_ips`.
        ///
        /// To override the contents of this collection use [`set_dns_ips`](Self::set_dns_ips).
        ///
        /// <p>A list of up to 2 IP addresses of DNS servers used to resolve the NFS file system domain name. The provided IP addresses can either be the IP addresses of a DNS forwarder or resolver that the customer manages and runs inside the customer VPC, or the IP addresses of the on-premises DNS servers.</p>
        pub fn dns_ips(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.dns_ips.unwrap_or_default();
            v.push(input.into());
            self.dns_ips = Some(v);
            self
        }
        /// <p>A list of up to 2 IP addresses of DNS servers used to resolve the NFS file system domain name. The provided IP addresses can either be the IP addresses of a DNS forwarder or resolver that the customer manages and runs inside the customer VPC, or the IP addresses of the on-premises DNS servers.</p>
        pub fn set_dns_ips(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.dns_ips = input;
            self
        }
        /// <p>This parameter is not supported for Amazon File Cache.</p>
        pub fn auto_export_policy(mut self, input: crate::model::AutoExportPolicy) -> Self {
            self.auto_export_policy = Some(input);
            self
        }
        /// <p>This parameter is not supported for Amazon File Cache.</p>
        pub fn set_auto_export_policy(
            mut self,
            input: std::option::Option<crate::model::AutoExportPolicy>,
        ) -> Self {
            self.auto_export_policy = input;
            self
        }
        /// Consumes the builder and constructs a [`NfsDataRepositoryConfiguration`](crate::model::NfsDataRepositoryConfiguration).
        pub fn build(self) -> crate::model::NfsDataRepositoryConfiguration {
            crate::model::NfsDataRepositoryConfiguration {
                version: self.version,
                dns_ips: self.dns_ips,
                auto_export_policy: self.auto_export_policy,
            }
        }
    }
}
impl NfsDataRepositoryConfiguration {
    /// Creates a new builder-style object to manufacture [`NfsDataRepositoryConfiguration`](crate::model::NfsDataRepositoryConfiguration).
    pub fn builder() -> crate::model::nfs_data_repository_configuration::Builder {
        crate::model::nfs_data_repository_configuration::Builder::default()
    }
}

/// <p>Describes a data repository association's automatic export policy. The <code>AutoExportPolicy</code> defines the types of updated objects on the file system that will be automatically exported to the data repository. As you create, modify, or delete files, Amazon FSx for Lustre automatically exports the defined changes asynchronously once your application finishes modifying the file.</p>
/// <p>This <code>AutoExportPolicy</code> is supported only for Amazon FSx for Lustre file systems with the <code>Persistent_2</code> deployment type.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct AutoExportPolicy {
    /// <p>The <code>AutoExportPolicy</code> can have the following event values:</p>
    /// <ul>
    /// <li> <p> <code>NEW</code> - New files and directories are automatically exported to the data repository as they are added to the file system.</p> </li>
    /// <li> <p> <code>CHANGED</code> - Changes to files and directories on the file system are automatically exported to the data repository.</p> </li>
    /// <li> <p> <code>DELETED</code> - Files and directories are automatically deleted on the data repository when they are deleted on the file system.</p> </li>
    /// </ul>
    /// <p>You can define any combination of event types for your <code>AutoExportPolicy</code>.</p>
    #[doc(hidden)]
    pub events: std::option::Option<std::vec::Vec<crate::model::EventType>>,
}
impl AutoExportPolicy {
    /// <p>The <code>AutoExportPolicy</code> can have the following event values:</p>
    /// <ul>
    /// <li> <p> <code>NEW</code> - New files and directories are automatically exported to the data repository as they are added to the file system.</p> </li>
    /// <li> <p> <code>CHANGED</code> - Changes to files and directories on the file system are automatically exported to the data repository.</p> </li>
    /// <li> <p> <code>DELETED</code> - Files and directories are automatically deleted on the data repository when they are deleted on the file system.</p> </li>
    /// </ul>
    /// <p>You can define any combination of event types for your <code>AutoExportPolicy</code>.</p>
    pub fn events(&self) -> std::option::Option<&[crate::model::EventType]> {
        self.events.as_deref()
    }
}
/// See [`AutoExportPolicy`](crate::model::AutoExportPolicy).
pub mod auto_export_policy {

    /// A builder for [`AutoExportPolicy`](crate::model::AutoExportPolicy).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) events: std::option::Option<std::vec::Vec<crate::model::EventType>>,
    }
    impl Builder {
        /// Appends an item to `events`.
        ///
        /// To override the contents of this collection use [`set_events`](Self::set_events).
        ///
        /// <p>The <code>AutoExportPolicy</code> can have the following event values:</p>
        /// <ul>
        /// <li> <p> <code>NEW</code> - New files and directories are automatically exported to the data repository as they are added to the file system.</p> </li>
        /// <li> <p> <code>CHANGED</code> - Changes to files and directories on the file system are automatically exported to the data repository.</p> </li>
        /// <li> <p> <code>DELETED</code> - Files and directories are automatically deleted on the data repository when they are deleted on the file system.</p> </li>
        /// </ul>
        /// <p>You can define any combination of event types for your <code>AutoExportPolicy</code>.</p>
        pub fn events(mut self, input: crate::model::EventType) -> Self {
            let mut v = self.events.unwrap_or_default();
            v.push(input);
            self.events = Some(v);
            self
        }
        /// <p>The <code>AutoExportPolicy</code> can have the following event values:</p>
        /// <ul>
        /// <li> <p> <code>NEW</code> - New files and directories are automatically exported to the data repository as they are added to the file system.</p> </li>
        /// <li> <p> <code>CHANGED</code> - Changes to files and directories on the file system are automatically exported to the data repository.</p> </li>
        /// <li> <p> <code>DELETED</code> - Files and directories are automatically deleted on the data repository when they are deleted on the file system.</p> </li>
        /// </ul>
        /// <p>You can define any combination of event types for your <code>AutoExportPolicy</code>.</p>
        pub fn set_events(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::EventType>>,
        ) -> Self {
            self.events = input;
            self
        }
        /// Consumes the builder and constructs a [`AutoExportPolicy`](crate::model::AutoExportPolicy).
        pub fn build(self) -> crate::model::AutoExportPolicy {
            crate::model::AutoExportPolicy {
                events: self.events,
            }
        }
    }
}
impl AutoExportPolicy {
    /// Creates a new builder-style object to manufacture [`AutoExportPolicy`](crate::model::AutoExportPolicy).
    pub fn builder() -> crate::model::auto_export_policy::Builder {
        crate::model::auto_export_policy::Builder::default()
    }
}

/// When writing a match expression against `EventType`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let eventtype = unimplemented!();
/// match eventtype {
///     EventType::Changed => { /* ... */ },
///     EventType::Deleted => { /* ... */ },
///     EventType::New => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `eventtype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `EventType::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `EventType::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `EventType::NewFeature` is defined.
/// Specifically, when `eventtype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `EventType::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum EventType {
    #[allow(missing_docs)] // documentation missing in model
    Changed,
    #[allow(missing_docs)] // documentation missing in model
    Deleted,
    #[allow(missing_docs)] // documentation missing in model
    New,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for EventType {
    fn from(s: &str) -> Self {
        match s {
            "CHANGED" => EventType::Changed,
            "DELETED" => EventType::Deleted,
            "NEW" => EventType::New,
            other => EventType::Unknown(crate::types::UnknownVariantValue(other.to_owned())),
        }
    }
}
impl std::str::FromStr for EventType {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(EventType::from(s))
    }
}
impl EventType {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            EventType::Changed => "CHANGED",
            EventType::Deleted => "DELETED",
            EventType::New => "NEW",
            EventType::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["CHANGED", "DELETED", "NEW"]
    }
}
impl AsRef<str> for EventType {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// When writing a match expression against `NfsVersion`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let nfsversion = unimplemented!();
/// match nfsversion {
///     NfsVersion::Nfs3 => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `nfsversion` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `NfsVersion::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `NfsVersion::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `NfsVersion::NewFeature` is defined.
/// Specifically, when `nfsversion` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `NfsVersion::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum NfsVersion {
    #[allow(missing_docs)] // documentation missing in model
    Nfs3,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for NfsVersion {
    fn from(s: &str) -> Self {
        match s {
            "NFS3" => NfsVersion::Nfs3,
            other => NfsVersion::Unknown(crate::types::UnknownVariantValue(other.to_owned())),
        }
    }
}
impl std::str::FromStr for NfsVersion {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(NfsVersion::from(s))
    }
}
impl NfsVersion {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            NfsVersion::Nfs3 => "NFS3",
            NfsVersion::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["NFS3"]
    }
}
impl AsRef<str> for NfsVersion {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>The configuration for an Amazon S3 data repository linked to an Amazon FSx for Lustre file system with a data repository association. The configuration consists of an <code>AutoImportPolicy</code> that defines which file events on the data repository are automatically imported to the file system and an <code>AutoExportPolicy</code> that defines which file events on the file system are automatically exported to the data repository. File events are when files or directories are added, changed, or deleted on the file system or the data repository.</p> <note>
/// <p>Data repository associations on Amazon File Cache don't use <code>S3DataRepositoryConfiguration</code> because they don't support automatic import or automatic export.</p>
/// </note>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct S3DataRepositoryConfiguration {
    /// <p>Specifies the type of updated objects (new, changed, deleted) that will be automatically imported from the linked S3 bucket to your file system.</p>
    #[doc(hidden)]
    pub auto_import_policy: std::option::Option<crate::model::AutoImportPolicy>,
    /// <p>Specifies the type of updated objects (new, changed, deleted) that will be automatically exported from your file system to the linked S3 bucket.</p>
    #[doc(hidden)]
    pub auto_export_policy: std::option::Option<crate::model::AutoExportPolicy>,
}
impl S3DataRepositoryConfiguration {
    /// <p>Specifies the type of updated objects (new, changed, deleted) that will be automatically imported from the linked S3 bucket to your file system.</p>
    pub fn auto_import_policy(&self) -> std::option::Option<&crate::model::AutoImportPolicy> {
        self.auto_import_policy.as_ref()
    }
    /// <p>Specifies the type of updated objects (new, changed, deleted) that will be automatically exported from your file system to the linked S3 bucket.</p>
    pub fn auto_export_policy(&self) -> std::option::Option<&crate::model::AutoExportPolicy> {
        self.auto_export_policy.as_ref()
    }
}
/// See [`S3DataRepositoryConfiguration`](crate::model::S3DataRepositoryConfiguration).
pub mod s3_data_repository_configuration {

    /// A builder for [`S3DataRepositoryConfiguration`](crate::model::S3DataRepositoryConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) auto_import_policy: std::option::Option<crate::model::AutoImportPolicy>,
        pub(crate) auto_export_policy: std::option::Option<crate::model::AutoExportPolicy>,
    }
    impl Builder {
        /// <p>Specifies the type of updated objects (new, changed, deleted) that will be automatically imported from the linked S3 bucket to your file system.</p>
        pub fn auto_import_policy(mut self, input: crate::model::AutoImportPolicy) -> Self {
            self.auto_import_policy = Some(input);
            self
        }
        /// <p>Specifies the type of updated objects (new, changed, deleted) that will be automatically imported from the linked S3 bucket to your file system.</p>
        pub fn set_auto_import_policy(
            mut self,
            input: std::option::Option<crate::model::AutoImportPolicy>,
        ) -> Self {
            self.auto_import_policy = input;
            self
        }
        /// <p>Specifies the type of updated objects (new, changed, deleted) that will be automatically exported from your file system to the linked S3 bucket.</p>
        pub fn auto_export_policy(mut self, input: crate::model::AutoExportPolicy) -> Self {
            self.auto_export_policy = Some(input);
            self
        }
        /// <p>Specifies the type of updated objects (new, changed, deleted) that will be automatically exported from your file system to the linked S3 bucket.</p>
        pub fn set_auto_export_policy(
            mut self,
            input: std::option::Option<crate::model::AutoExportPolicy>,
        ) -> Self {
            self.auto_export_policy = input;
            self
        }
        /// Consumes the builder and constructs a [`S3DataRepositoryConfiguration`](crate::model::S3DataRepositoryConfiguration).
        pub fn build(self) -> crate::model::S3DataRepositoryConfiguration {
            crate::model::S3DataRepositoryConfiguration {
                auto_import_policy: self.auto_import_policy,
                auto_export_policy: self.auto_export_policy,
            }
        }
    }
}
impl S3DataRepositoryConfiguration {
    /// Creates a new builder-style object to manufacture [`S3DataRepositoryConfiguration`](crate::model::S3DataRepositoryConfiguration).
    pub fn builder() -> crate::model::s3_data_repository_configuration::Builder {
        crate::model::s3_data_repository_configuration::Builder::default()
    }
}

/// <p>Describes the data repository association's automatic import policy. The AutoImportPolicy defines how Amazon FSx keeps your file metadata and directory listings up to date by importing changes to your Amazon FSx for Lustre file system as you modify objects in a linked S3 bucket.</p>
/// <p>The <code>AutoImportPolicy</code> is supported only for Amazon FSx for Lustre file systems with the <code>Persistent_2</code> deployment type.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct AutoImportPolicy {
    /// <p>The <code>AutoImportPolicy</code> can have the following event values:</p>
    /// <ul>
    /// <li> <p> <code>NEW</code> - Amazon FSx automatically imports metadata of files added to the linked S3 bucket that do not currently exist in the FSx file system.</p> </li>
    /// <li> <p> <code>CHANGED</code> - Amazon FSx automatically updates file metadata and invalidates existing file content on the file system as files change in the data repository.</p> </li>
    /// <li> <p> <code>DELETED</code> - Amazon FSx automatically deletes files on the file system as corresponding files are deleted in the data repository.</p> </li>
    /// </ul>
    /// <p>You can define any combination of event types for your <code>AutoImportPolicy</code>.</p>
    #[doc(hidden)]
    pub events: std::option::Option<std::vec::Vec<crate::model::EventType>>,
}
impl AutoImportPolicy {
    /// <p>The <code>AutoImportPolicy</code> can have the following event values:</p>
    /// <ul>
    /// <li> <p> <code>NEW</code> - Amazon FSx automatically imports metadata of files added to the linked S3 bucket that do not currently exist in the FSx file system.</p> </li>
    /// <li> <p> <code>CHANGED</code> - Amazon FSx automatically updates file metadata and invalidates existing file content on the file system as files change in the data repository.</p> </li>
    /// <li> <p> <code>DELETED</code> - Amazon FSx automatically deletes files on the file system as corresponding files are deleted in the data repository.</p> </li>
    /// </ul>
    /// <p>You can define any combination of event types for your <code>AutoImportPolicy</code>.</p>
    pub fn events(&self) -> std::option::Option<&[crate::model::EventType]> {
        self.events.as_deref()
    }
}
/// See [`AutoImportPolicy`](crate::model::AutoImportPolicy).
pub mod auto_import_policy {

    /// A builder for [`AutoImportPolicy`](crate::model::AutoImportPolicy).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) events: std::option::Option<std::vec::Vec<crate::model::EventType>>,
    }
    impl Builder {
        /// Appends an item to `events`.
        ///
        /// To override the contents of this collection use [`set_events`](Self::set_events).
        ///
        /// <p>The <code>AutoImportPolicy</code> can have the following event values:</p>
        /// <ul>
        /// <li> <p> <code>NEW</code> - Amazon FSx automatically imports metadata of files added to the linked S3 bucket that do not currently exist in the FSx file system.</p> </li>
        /// <li> <p> <code>CHANGED</code> - Amazon FSx automatically updates file metadata and invalidates existing file content on the file system as files change in the data repository.</p> </li>
        /// <li> <p> <code>DELETED</code> - Amazon FSx automatically deletes files on the file system as corresponding files are deleted in the data repository.</p> </li>
        /// </ul>
        /// <p>You can define any combination of event types for your <code>AutoImportPolicy</code>.</p>
        pub fn events(mut self, input: crate::model::EventType) -> Self {
            let mut v = self.events.unwrap_or_default();
            v.push(input);
            self.events = Some(v);
            self
        }
        /// <p>The <code>AutoImportPolicy</code> can have the following event values:</p>
        /// <ul>
        /// <li> <p> <code>NEW</code> - Amazon FSx automatically imports metadata of files added to the linked S3 bucket that do not currently exist in the FSx file system.</p> </li>
        /// <li> <p> <code>CHANGED</code> - Amazon FSx automatically updates file metadata and invalidates existing file content on the file system as files change in the data repository.</p> </li>
        /// <li> <p> <code>DELETED</code> - Amazon FSx automatically deletes files on the file system as corresponding files are deleted in the data repository.</p> </li>
        /// </ul>
        /// <p>You can define any combination of event types for your <code>AutoImportPolicy</code>.</p>
        pub fn set_events(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::EventType>>,
        ) -> Self {
            self.events = input;
            self
        }
        /// Consumes the builder and constructs a [`AutoImportPolicy`](crate::model::AutoImportPolicy).
        pub fn build(self) -> crate::model::AutoImportPolicy {
            crate::model::AutoImportPolicy {
                events: self.events,
            }
        }
    }
}
impl AutoImportPolicy {
    /// Creates a new builder-style object to manufacture [`AutoImportPolicy`](crate::model::AutoImportPolicy).
    pub fn builder() -> crate::model::auto_import_policy::Builder {
        crate::model::auto_import_policy::Builder::default()
    }
}

/// When writing a match expression against `RestoreOpenZfsVolumeOption`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let restoreopenzfsvolumeoption = unimplemented!();
/// match restoreopenzfsvolumeoption {
///     RestoreOpenZfsVolumeOption::DeleteClonedVolumes => { /* ... */ },
///     RestoreOpenZfsVolumeOption::DeleteIntermediateSnapshots => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `restoreopenzfsvolumeoption` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `RestoreOpenZfsVolumeOption::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `RestoreOpenZfsVolumeOption::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `RestoreOpenZfsVolumeOption::NewFeature` is defined.
/// Specifically, when `restoreopenzfsvolumeoption` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `RestoreOpenZfsVolumeOption::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum RestoreOpenZfsVolumeOption {
    #[allow(missing_docs)] // documentation missing in model
    DeleteClonedVolumes,
    #[allow(missing_docs)] // documentation missing in model
    DeleteIntermediateSnapshots,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for RestoreOpenZfsVolumeOption {
    fn from(s: &str) -> Self {
        match s {
            "DELETE_CLONED_VOLUMES" => RestoreOpenZfsVolumeOption::DeleteClonedVolumes,
            "DELETE_INTERMEDIATE_SNAPSHOTS" => {
                RestoreOpenZfsVolumeOption::DeleteIntermediateSnapshots
            }
            other => RestoreOpenZfsVolumeOption::Unknown(crate::types::UnknownVariantValue(
                other.to_owned(),
            )),
        }
    }
}
impl std::str::FromStr for RestoreOpenZfsVolumeOption {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(RestoreOpenZfsVolumeOption::from(s))
    }
}
impl RestoreOpenZfsVolumeOption {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            RestoreOpenZfsVolumeOption::DeleteClonedVolumes => "DELETE_CLONED_VOLUMES",
            RestoreOpenZfsVolumeOption::DeleteIntermediateSnapshots => {
                "DELETE_INTERMEDIATE_SNAPSHOTS"
            }
            RestoreOpenZfsVolumeOption::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["DELETE_CLONED_VOLUMES", "DELETE_INTERMEDIATE_SNAPSHOTS"]
    }
}
impl AsRef<str> for RestoreOpenZfsVolumeOption {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>A filter used to restrict the results of describe calls for Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volumes. You can use multiple filters to return results that meet all applied filter requirements.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct VolumeFilter {
    /// <p>The name for this filter.</p>
    #[doc(hidden)]
    pub name: std::option::Option<crate::model::VolumeFilterName>,
    /// <p>The values of the filter. These are all the values for any of the applied filters.</p>
    #[doc(hidden)]
    pub values: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl VolumeFilter {
    /// <p>The name for this filter.</p>
    pub fn name(&self) -> std::option::Option<&crate::model::VolumeFilterName> {
        self.name.as_ref()
    }
    /// <p>The values of the filter. These are all the values for any of the applied filters.</p>
    pub fn values(&self) -> std::option::Option<&[std::string::String]> {
        self.values.as_deref()
    }
}
/// See [`VolumeFilter`](crate::model::VolumeFilter).
pub mod volume_filter {

    /// A builder for [`VolumeFilter`](crate::model::VolumeFilter).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) name: std::option::Option<crate::model::VolumeFilterName>,
        pub(crate) values: std::option::Option<std::vec::Vec<std::string::String>>,
    }
    impl Builder {
        /// <p>The name for this filter.</p>
        pub fn name(mut self, input: crate::model::VolumeFilterName) -> Self {
            self.name = Some(input);
            self
        }
        /// <p>The name for this filter.</p>
        pub fn set_name(
            mut self,
            input: std::option::Option<crate::model::VolumeFilterName>,
        ) -> Self {
            self.name = input;
            self
        }
        /// Appends an item to `values`.
        ///
        /// To override the contents of this collection use [`set_values`](Self::set_values).
        ///
        /// <p>The values of the filter. These are all the values for any of the applied filters.</p>
        pub fn values(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.values.unwrap_or_default();
            v.push(input.into());
            self.values = Some(v);
            self
        }
        /// <p>The values of the filter. These are all the values for any of the applied filters.</p>
        pub fn set_values(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.values = input;
            self
        }
        /// Consumes the builder and constructs a [`VolumeFilter`](crate::model::VolumeFilter).
        pub fn build(self) -> crate::model::VolumeFilter {
            crate::model::VolumeFilter {
                name: self.name,
                values: self.values,
            }
        }
    }
}
impl VolumeFilter {
    /// Creates a new builder-style object to manufacture [`VolumeFilter`](crate::model::VolumeFilter).
    pub fn builder() -> crate::model::volume_filter::Builder {
        crate::model::volume_filter::Builder::default()
    }
}

/// When writing a match expression against `VolumeFilterName`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let volumefiltername = unimplemented!();
/// match volumefiltername {
///     VolumeFilterName::FileSystemId => { /* ... */ },
///     VolumeFilterName::StorageVirtualMachineId => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `volumefiltername` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `VolumeFilterName::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `VolumeFilterName::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `VolumeFilterName::NewFeature` is defined.
/// Specifically, when `volumefiltername` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `VolumeFilterName::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum VolumeFilterName {
    #[allow(missing_docs)] // documentation missing in model
    FileSystemId,
    #[allow(missing_docs)] // documentation missing in model
    StorageVirtualMachineId,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for VolumeFilterName {
    fn from(s: &str) -> Self {
        match s {
            "file-system-id" => VolumeFilterName::FileSystemId,
            "storage-virtual-machine-id" => VolumeFilterName::StorageVirtualMachineId,
            other => VolumeFilterName::Unknown(crate::types::UnknownVariantValue(other.to_owned())),
        }
    }
}
impl std::str::FromStr for VolumeFilterName {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(VolumeFilterName::from(s))
    }
}
impl VolumeFilterName {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            VolumeFilterName::FileSystemId => "file-system-id",
            VolumeFilterName::StorageVirtualMachineId => "storage-virtual-machine-id",
            VolumeFilterName::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["file-system-id", "storage-virtual-machine-id"]
    }
}
impl AsRef<str> for VolumeFilterName {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>A filter used to restrict the results of describe calls for Amazon FSx for NetApp ONTAP storage virtual machines (SVMs). You can use multiple filters to return results that meet all applied filter requirements.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct StorageVirtualMachineFilter {
    /// <p>The name for this filter.</p>
    #[doc(hidden)]
    pub name: std::option::Option<crate::model::StorageVirtualMachineFilterName>,
    /// <p>The values of the filter. These are all the values for any of the applied filters.</p>
    #[doc(hidden)]
    pub values: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl StorageVirtualMachineFilter {
    /// <p>The name for this filter.</p>
    pub fn name(&self) -> std::option::Option<&crate::model::StorageVirtualMachineFilterName> {
        self.name.as_ref()
    }
    /// <p>The values of the filter. These are all the values for any of the applied filters.</p>
    pub fn values(&self) -> std::option::Option<&[std::string::String]> {
        self.values.as_deref()
    }
}
/// See [`StorageVirtualMachineFilter`](crate::model::StorageVirtualMachineFilter).
pub mod storage_virtual_machine_filter {

    /// A builder for [`StorageVirtualMachineFilter`](crate::model::StorageVirtualMachineFilter).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) name: std::option::Option<crate::model::StorageVirtualMachineFilterName>,
        pub(crate) values: std::option::Option<std::vec::Vec<std::string::String>>,
    }
    impl Builder {
        /// <p>The name for this filter.</p>
        pub fn name(mut self, input: crate::model::StorageVirtualMachineFilterName) -> Self {
            self.name = Some(input);
            self
        }
        /// <p>The name for this filter.</p>
        pub fn set_name(
            mut self,
            input: std::option::Option<crate::model::StorageVirtualMachineFilterName>,
        ) -> Self {
            self.name = input;
            self
        }
        /// Appends an item to `values`.
        ///
        /// To override the contents of this collection use [`set_values`](Self::set_values).
        ///
        /// <p>The values of the filter. These are all the values for any of the applied filters.</p>
        pub fn values(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.values.unwrap_or_default();
            v.push(input.into());
            self.values = Some(v);
            self
        }
        /// <p>The values of the filter. These are all the values for any of the applied filters.</p>
        pub fn set_values(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.values = input;
            self
        }
        /// Consumes the builder and constructs a [`StorageVirtualMachineFilter`](crate::model::StorageVirtualMachineFilter).
        pub fn build(self) -> crate::model::StorageVirtualMachineFilter {
            crate::model::StorageVirtualMachineFilter {
                name: self.name,
                values: self.values,
            }
        }
    }
}
impl StorageVirtualMachineFilter {
    /// Creates a new builder-style object to manufacture [`StorageVirtualMachineFilter`](crate::model::StorageVirtualMachineFilter).
    pub fn builder() -> crate::model::storage_virtual_machine_filter::Builder {
        crate::model::storage_virtual_machine_filter::Builder::default()
    }
}

/// When writing a match expression against `StorageVirtualMachineFilterName`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let storagevirtualmachinefiltername = unimplemented!();
/// match storagevirtualmachinefiltername {
///     StorageVirtualMachineFilterName::FileSystemId => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `storagevirtualmachinefiltername` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `StorageVirtualMachineFilterName::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `StorageVirtualMachineFilterName::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `StorageVirtualMachineFilterName::NewFeature` is defined.
/// Specifically, when `storagevirtualmachinefiltername` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `StorageVirtualMachineFilterName::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum StorageVirtualMachineFilterName {
    #[allow(missing_docs)] // documentation missing in model
    FileSystemId,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for StorageVirtualMachineFilterName {
    fn from(s: &str) -> Self {
        match s {
            "file-system-id" => StorageVirtualMachineFilterName::FileSystemId,
            other => StorageVirtualMachineFilterName::Unknown(crate::types::UnknownVariantValue(
                other.to_owned(),
            )),
        }
    }
}
impl std::str::FromStr for StorageVirtualMachineFilterName {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(StorageVirtualMachineFilterName::from(s))
    }
}
impl StorageVirtualMachineFilterName {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            StorageVirtualMachineFilterName::FileSystemId => "file-system-id",
            StorageVirtualMachineFilterName::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["file-system-id"]
    }
}
impl AsRef<str> for StorageVirtualMachineFilterName {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>A filter used to restrict the results of <code>DescribeSnapshots</code> calls. You can use multiple filters to return results that meet all applied filter requirements.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct SnapshotFilter {
    /// <p>The name of the filter to use. You can filter by the <code>file-system-id</code> or by <code>volume-id</code>.</p>
    #[doc(hidden)]
    pub name: std::option::Option<crate::model::SnapshotFilterName>,
    /// <p>The <code>file-system-id</code> or <code>volume-id</code> that you are filtering for.</p>
    #[doc(hidden)]
    pub values: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl SnapshotFilter {
    /// <p>The name of the filter to use. You can filter by the <code>file-system-id</code> or by <code>volume-id</code>.</p>
    pub fn name(&self) -> std::option::Option<&crate::model::SnapshotFilterName> {
        self.name.as_ref()
    }
    /// <p>The <code>file-system-id</code> or <code>volume-id</code> that you are filtering for.</p>
    pub fn values(&self) -> std::option::Option<&[std::string::String]> {
        self.values.as_deref()
    }
}
/// See [`SnapshotFilter`](crate::model::SnapshotFilter).
pub mod snapshot_filter {

    /// A builder for [`SnapshotFilter`](crate::model::SnapshotFilter).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) name: std::option::Option<crate::model::SnapshotFilterName>,
        pub(crate) values: std::option::Option<std::vec::Vec<std::string::String>>,
    }
    impl Builder {
        /// <p>The name of the filter to use. You can filter by the <code>file-system-id</code> or by <code>volume-id</code>.</p>
        pub fn name(mut self, input: crate::model::SnapshotFilterName) -> Self {
            self.name = Some(input);
            self
        }
        /// <p>The name of the filter to use. You can filter by the <code>file-system-id</code> or by <code>volume-id</code>.</p>
        pub fn set_name(
            mut self,
            input: std::option::Option<crate::model::SnapshotFilterName>,
        ) -> Self {
            self.name = input;
            self
        }
        /// Appends an item to `values`.
        ///
        /// To override the contents of this collection use [`set_values`](Self::set_values).
        ///
        /// <p>The <code>file-system-id</code> or <code>volume-id</code> that you are filtering for.</p>
        pub fn values(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.values.unwrap_or_default();
            v.push(input.into());
            self.values = Some(v);
            self
        }
        /// <p>The <code>file-system-id</code> or <code>volume-id</code> that you are filtering for.</p>
        pub fn set_values(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.values = input;
            self
        }
        /// Consumes the builder and constructs a [`SnapshotFilter`](crate::model::SnapshotFilter).
        pub fn build(self) -> crate::model::SnapshotFilter {
            crate::model::SnapshotFilter {
                name: self.name,
                values: self.values,
            }
        }
    }
}
impl SnapshotFilter {
    /// Creates a new builder-style object to manufacture [`SnapshotFilter`](crate::model::SnapshotFilter).
    pub fn builder() -> crate::model::snapshot_filter::Builder {
        crate::model::snapshot_filter::Builder::default()
    }
}

/// When writing a match expression against `SnapshotFilterName`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let snapshotfiltername = unimplemented!();
/// match snapshotfiltername {
///     SnapshotFilterName::FileSystemId => { /* ... */ },
///     SnapshotFilterName::VolumeId => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `snapshotfiltername` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `SnapshotFilterName::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `SnapshotFilterName::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `SnapshotFilterName::NewFeature` is defined.
/// Specifically, when `snapshotfiltername` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `SnapshotFilterName::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum SnapshotFilterName {
    #[allow(missing_docs)] // documentation missing in model
    FileSystemId,
    #[allow(missing_docs)] // documentation missing in model
    VolumeId,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for SnapshotFilterName {
    fn from(s: &str) -> Self {
        match s {
            "file-system-id" => SnapshotFilterName::FileSystemId,
            "volume-id" => SnapshotFilterName::VolumeId,
            other => {
                SnapshotFilterName::Unknown(crate::types::UnknownVariantValue(other.to_owned()))
            }
        }
    }
}
impl std::str::FromStr for SnapshotFilterName {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(SnapshotFilterName::from(s))
    }
}
impl SnapshotFilterName {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            SnapshotFilterName::FileSystemId => "file-system-id",
            SnapshotFilterName::VolumeId => "volume-id",
            SnapshotFilterName::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["file-system-id", "volume-id"]
    }
}
impl AsRef<str> for SnapshotFilterName {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>A description of the data repository task. You use data repository tasks to perform bulk transfer operations between an Amazon FSx for Lustre file system and a linked data repository. An Amazon File Cache resource uses a task to automatically release files from the cache.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct DataRepositoryTask {
    /// <p>The system-generated, unique 17-digit ID of the data repository task.</p>
    #[doc(hidden)]
    pub task_id: std::option::Option<std::string::String>,
    /// <p>The lifecycle status of the data repository task, as follows:</p>
    /// <ul>
    /// <li> <p> <code>PENDING</code> - The task has not started.</p> </li>
    /// <li> <p> <code>EXECUTING</code> - The task is in process.</p> </li>
    /// <li> <p> <code>FAILED</code> - The task was not able to be completed. For example, there may be files the task failed to process. The <code>DataRepositoryTaskFailureDetails</code> property provides more information about task failures.</p> </li>
    /// <li> <p> <code>SUCCEEDED</code> - The task has completed successfully.</p> </li>
    /// <li> <p> <code>CANCELED</code> - The task was canceled and it did not complete.</p> </li>
    /// <li> <p> <code>CANCELING</code> - The task is in process of being canceled.</p> </li>
    /// </ul> <note>
    /// <p>You cannot delete an FSx for Lustre file system if there are data repository tasks for the file system in the <code>PENDING</code> or <code>EXECUTING</code> states. Please retry when the data repository task is finished (with a status of <code>CANCELED</code>, <code>SUCCEEDED</code>, or <code>FAILED</code>). You can use the DescribeDataRepositoryTask action to monitor the task status. Contact the FSx team if you need to delete your file system immediately.</p>
    /// </note>
    #[doc(hidden)]
    pub lifecycle: std::option::Option<crate::model::DataRepositoryTaskLifecycle>,
    /// <p>The type of data repository task.</p>
    /// <ul>
    /// <li> <p> <code>EXPORT_TO_REPOSITORY</code> tasks export from your Amazon FSx for Lustre file system to a linked data repository.</p> </li>
    /// <li> <p> <code>IMPORT_METADATA_FROM_REPOSITORY</code> tasks import metadata changes from a linked S3 bucket to your Amazon FSx for Lustre file system.</p> </li>
    /// <li> <p> <code>AUTO_RELEASE_DATA</code> tasks automatically release files from an Amazon File Cache resource.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub r#type: std::option::Option<crate::model::DataRepositoryTaskType>,
    /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
    #[doc(hidden)]
    pub creation_time: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>The time the system began processing the task.</p>
    #[doc(hidden)]
    pub start_time: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>The time the system completed processing the task, populated after the task is complete.</p>
    #[doc(hidden)]
    pub end_time: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
    #[doc(hidden)]
    pub resource_arn: std::option::Option<std::string::String>,
    /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
    #[doc(hidden)]
    pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
    /// <p>The globally unique ID of the file system.</p>
    #[doc(hidden)]
    pub file_system_id: std::option::Option<std::string::String>,
    /// <p>An array of paths that specify the data for the data repository task to process. For example, in an EXPORT_TO_REPOSITORY task, the paths specify which data to export to the linked data repository.</p>
    /// <p>(Default) If <code>Paths</code> is not specified, Amazon FSx uses the file system root directory.</p>
    #[doc(hidden)]
    pub paths: std::option::Option<std::vec::Vec<std::string::String>>,
    /// <p>Failure message describing why the task failed, it is populated only when <code>Lifecycle</code> is set to <code>FAILED</code>.</p>
    #[doc(hidden)]
    pub failure_details: std::option::Option<crate::model::DataRepositoryTaskFailureDetails>,
    /// <p>Provides the status of the number of files that the task has processed successfully and failed to process.</p>
    #[doc(hidden)]
    pub status: std::option::Option<crate::model::DataRepositoryTaskStatus>,
    /// <p>Provides a report detailing the data repository task results of the files processed that match the criteria specified in the report <code>Scope</code> parameter. FSx delivers the report to the file system's linked data repository in Amazon S3, using the path specified in the report <code>Path</code> parameter. You can specify whether or not a report gets generated for a task using the <code>Enabled</code> parameter.</p>
    #[doc(hidden)]
    pub report: std::option::Option<crate::model::CompletionReport>,
    /// <p>Specifies the amount of data to release, in GiB, by an Amazon File Cache AUTO_RELEASE_DATA task that automatically releases files from the cache.</p>
    #[doc(hidden)]
    pub capacity_to_release: std::option::Option<i64>,
    /// <p>The system-generated, unique ID of the cache.</p>
    #[doc(hidden)]
    pub file_cache_id: std::option::Option<std::string::String>,
}
impl DataRepositoryTask {
    /// <p>The system-generated, unique 17-digit ID of the data repository task.</p>
    pub fn task_id(&self) -> std::option::Option<&str> {
        self.task_id.as_deref()
    }
    /// <p>The lifecycle status of the data repository task, as follows:</p>
    /// <ul>
    /// <li> <p> <code>PENDING</code> - The task has not started.</p> </li>
    /// <li> <p> <code>EXECUTING</code> - The task is in process.</p> </li>
    /// <li> <p> <code>FAILED</code> - The task was not able to be completed. For example, there may be files the task failed to process. The <code>DataRepositoryTaskFailureDetails</code> property provides more information about task failures.</p> </li>
    /// <li> <p> <code>SUCCEEDED</code> - The task has completed successfully.</p> </li>
    /// <li> <p> <code>CANCELED</code> - The task was canceled and it did not complete.</p> </li>
    /// <li> <p> <code>CANCELING</code> - The task is in process of being canceled.</p> </li>
    /// </ul> <note>
    /// <p>You cannot delete an FSx for Lustre file system if there are data repository tasks for the file system in the <code>PENDING</code> or <code>EXECUTING</code> states. Please retry when the data repository task is finished (with a status of <code>CANCELED</code>, <code>SUCCEEDED</code>, or <code>FAILED</code>). You can use the DescribeDataRepositoryTask action to monitor the task status. Contact the FSx team if you need to delete your file system immediately.</p>
    /// </note>
    pub fn lifecycle(&self) -> std::option::Option<&crate::model::DataRepositoryTaskLifecycle> {
        self.lifecycle.as_ref()
    }
    /// <p>The type of data repository task.</p>
    /// <ul>
    /// <li> <p> <code>EXPORT_TO_REPOSITORY</code> tasks export from your Amazon FSx for Lustre file system to a linked data repository.</p> </li>
    /// <li> <p> <code>IMPORT_METADATA_FROM_REPOSITORY</code> tasks import metadata changes from a linked S3 bucket to your Amazon FSx for Lustre file system.</p> </li>
    /// <li> <p> <code>AUTO_RELEASE_DATA</code> tasks automatically release files from an Amazon File Cache resource.</p> </li>
    /// </ul>
    pub fn r#type(&self) -> std::option::Option<&crate::model::DataRepositoryTaskType> {
        self.r#type.as_ref()
    }
    /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
    pub fn creation_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.creation_time.as_ref()
    }
    /// <p>The time the system began processing the task.</p>
    pub fn start_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.start_time.as_ref()
    }
    /// <p>The time the system completed processing the task, populated after the task is complete.</p>
    pub fn end_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.end_time.as_ref()
    }
    /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
    pub fn resource_arn(&self) -> std::option::Option<&str> {
        self.resource_arn.as_deref()
    }
    /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
    pub fn tags(&self) -> std::option::Option<&[crate::model::Tag]> {
        self.tags.as_deref()
    }
    /// <p>The globally unique ID of the file system.</p>
    pub fn file_system_id(&self) -> std::option::Option<&str> {
        self.file_system_id.as_deref()
    }
    /// <p>An array of paths that specify the data for the data repository task to process. For example, in an EXPORT_TO_REPOSITORY task, the paths specify which data to export to the linked data repository.</p>
    /// <p>(Default) If <code>Paths</code> is not specified, Amazon FSx uses the file system root directory.</p>
    pub fn paths(&self) -> std::option::Option<&[std::string::String]> {
        self.paths.as_deref()
    }
    /// <p>Failure message describing why the task failed, it is populated only when <code>Lifecycle</code> is set to <code>FAILED</code>.</p>
    pub fn failure_details(
        &self,
    ) -> std::option::Option<&crate::model::DataRepositoryTaskFailureDetails> {
        self.failure_details.as_ref()
    }
    /// <p>Provides the status of the number of files that the task has processed successfully and failed to process.</p>
    pub fn status(&self) -> std::option::Option<&crate::model::DataRepositoryTaskStatus> {
        self.status.as_ref()
    }
    /// <p>Provides a report detailing the data repository task results of the files processed that match the criteria specified in the report <code>Scope</code> parameter. FSx delivers the report to the file system's linked data repository in Amazon S3, using the path specified in the report <code>Path</code> parameter. You can specify whether or not a report gets generated for a task using the <code>Enabled</code> parameter.</p>
    pub fn report(&self) -> std::option::Option<&crate::model::CompletionReport> {
        self.report.as_ref()
    }
    /// <p>Specifies the amount of data to release, in GiB, by an Amazon File Cache AUTO_RELEASE_DATA task that automatically releases files from the cache.</p>
    pub fn capacity_to_release(&self) -> std::option::Option<i64> {
        self.capacity_to_release
    }
    /// <p>The system-generated, unique ID of the cache.</p>
    pub fn file_cache_id(&self) -> std::option::Option<&str> {
        self.file_cache_id.as_deref()
    }
}
/// See [`DataRepositoryTask`](crate::model::DataRepositoryTask).
pub mod data_repository_task {

    /// A builder for [`DataRepositoryTask`](crate::model::DataRepositoryTask).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) task_id: std::option::Option<std::string::String>,
        pub(crate) lifecycle: std::option::Option<crate::model::DataRepositoryTaskLifecycle>,
        pub(crate) r#type: std::option::Option<crate::model::DataRepositoryTaskType>,
        pub(crate) creation_time: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) start_time: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) end_time: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) resource_arn: std::option::Option<std::string::String>,
        pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        pub(crate) file_system_id: std::option::Option<std::string::String>,
        pub(crate) paths: std::option::Option<std::vec::Vec<std::string::String>>,
        pub(crate) failure_details:
            std::option::Option<crate::model::DataRepositoryTaskFailureDetails>,
        pub(crate) status: std::option::Option<crate::model::DataRepositoryTaskStatus>,
        pub(crate) report: std::option::Option<crate::model::CompletionReport>,
        pub(crate) capacity_to_release: std::option::Option<i64>,
        pub(crate) file_cache_id: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>The system-generated, unique 17-digit ID of the data repository task.</p>
        pub fn task_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.task_id = Some(input.into());
            self
        }
        /// <p>The system-generated, unique 17-digit ID of the data repository task.</p>
        pub fn set_task_id(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.task_id = input;
            self
        }
        /// <p>The lifecycle status of the data repository task, as follows:</p>
        /// <ul>
        /// <li> <p> <code>PENDING</code> - The task has not started.</p> </li>
        /// <li> <p> <code>EXECUTING</code> - The task is in process.</p> </li>
        /// <li> <p> <code>FAILED</code> - The task was not able to be completed. For example, there may be files the task failed to process. The <code>DataRepositoryTaskFailureDetails</code> property provides more information about task failures.</p> </li>
        /// <li> <p> <code>SUCCEEDED</code> - The task has completed successfully.</p> </li>
        /// <li> <p> <code>CANCELED</code> - The task was canceled and it did not complete.</p> </li>
        /// <li> <p> <code>CANCELING</code> - The task is in process of being canceled.</p> </li>
        /// </ul> <note>
        /// <p>You cannot delete an FSx for Lustre file system if there are data repository tasks for the file system in the <code>PENDING</code> or <code>EXECUTING</code> states. Please retry when the data repository task is finished (with a status of <code>CANCELED</code>, <code>SUCCEEDED</code>, or <code>FAILED</code>). You can use the DescribeDataRepositoryTask action to monitor the task status. Contact the FSx team if you need to delete your file system immediately.</p>
        /// </note>
        pub fn lifecycle(mut self, input: crate::model::DataRepositoryTaskLifecycle) -> Self {
            self.lifecycle = Some(input);
            self
        }
        /// <p>The lifecycle status of the data repository task, as follows:</p>
        /// <ul>
        /// <li> <p> <code>PENDING</code> - The task has not started.</p> </li>
        /// <li> <p> <code>EXECUTING</code> - The task is in process.</p> </li>
        /// <li> <p> <code>FAILED</code> - The task was not able to be completed. For example, there may be files the task failed to process. The <code>DataRepositoryTaskFailureDetails</code> property provides more information about task failures.</p> </li>
        /// <li> <p> <code>SUCCEEDED</code> - The task has completed successfully.</p> </li>
        /// <li> <p> <code>CANCELED</code> - The task was canceled and it did not complete.</p> </li>
        /// <li> <p> <code>CANCELING</code> - The task is in process of being canceled.</p> </li>
        /// </ul> <note>
        /// <p>You cannot delete an FSx for Lustre file system if there are data repository tasks for the file system in the <code>PENDING</code> or <code>EXECUTING</code> states. Please retry when the data repository task is finished (with a status of <code>CANCELED</code>, <code>SUCCEEDED</code>, or <code>FAILED</code>). You can use the DescribeDataRepositoryTask action to monitor the task status. Contact the FSx team if you need to delete your file system immediately.</p>
        /// </note>
        pub fn set_lifecycle(
            mut self,
            input: std::option::Option<crate::model::DataRepositoryTaskLifecycle>,
        ) -> Self {
            self.lifecycle = input;
            self
        }
        /// <p>The type of data repository task.</p>
        /// <ul>
        /// <li> <p> <code>EXPORT_TO_REPOSITORY</code> tasks export from your Amazon FSx for Lustre file system to a linked data repository.</p> </li>
        /// <li> <p> <code>IMPORT_METADATA_FROM_REPOSITORY</code> tasks import metadata changes from a linked S3 bucket to your Amazon FSx for Lustre file system.</p> </li>
        /// <li> <p> <code>AUTO_RELEASE_DATA</code> tasks automatically release files from an Amazon File Cache resource.</p> </li>
        /// </ul>
        pub fn r#type(mut self, input: crate::model::DataRepositoryTaskType) -> Self {
            self.r#type = Some(input);
            self
        }
        /// <p>The type of data repository task.</p>
        /// <ul>
        /// <li> <p> <code>EXPORT_TO_REPOSITORY</code> tasks export from your Amazon FSx for Lustre file system to a linked data repository.</p> </li>
        /// <li> <p> <code>IMPORT_METADATA_FROM_REPOSITORY</code> tasks import metadata changes from a linked S3 bucket to your Amazon FSx for Lustre file system.</p> </li>
        /// <li> <p> <code>AUTO_RELEASE_DATA</code> tasks automatically release files from an Amazon File Cache resource.</p> </li>
        /// </ul>
        pub fn set_type(
            mut self,
            input: std::option::Option<crate::model::DataRepositoryTaskType>,
        ) -> Self {
            self.r#type = input;
            self
        }
        /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
        pub fn creation_time(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.creation_time = Some(input);
            self
        }
        /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
        pub fn set_creation_time(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.creation_time = input;
            self
        }
        /// <p>The time the system began processing the task.</p>
        pub fn start_time(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.start_time = Some(input);
            self
        }
        /// <p>The time the system began processing the task.</p>
        pub fn set_start_time(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.start_time = input;
            self
        }
        /// <p>The time the system completed processing the task, populated after the task is complete.</p>
        pub fn end_time(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.end_time = Some(input);
            self
        }
        /// <p>The time the system completed processing the task, populated after the task is complete.</p>
        pub fn set_end_time(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.end_time = input;
            self
        }
        /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
        pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
            self.resource_arn = Some(input.into());
            self
        }
        /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
        pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.resource_arn = input;
            self
        }
        /// Appends an item to `tags`.
        ///
        /// To override the contents of this collection use [`set_tags`](Self::set_tags).
        ///
        /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
        pub fn tags(mut self, input: crate::model::Tag) -> Self {
            let mut v = self.tags.unwrap_or_default();
            v.push(input);
            self.tags = Some(v);
            self
        }
        /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
        pub fn set_tags(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        ) -> Self {
            self.tags = input;
            self
        }
        /// <p>The globally unique ID of the file system.</p>
        pub fn file_system_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.file_system_id = Some(input.into());
            self
        }
        /// <p>The globally unique ID of the file system.</p>
        pub fn set_file_system_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.file_system_id = input;
            self
        }
        /// Appends an item to `paths`.
        ///
        /// To override the contents of this collection use [`set_paths`](Self::set_paths).
        ///
        /// <p>An array of paths that specify the data for the data repository task to process. For example, in an EXPORT_TO_REPOSITORY task, the paths specify which data to export to the linked data repository.</p>
        /// <p>(Default) If <code>Paths</code> is not specified, Amazon FSx uses the file system root directory.</p>
        pub fn paths(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.paths.unwrap_or_default();
            v.push(input.into());
            self.paths = Some(v);
            self
        }
        /// <p>An array of paths that specify the data for the data repository task to process. For example, in an EXPORT_TO_REPOSITORY task, the paths specify which data to export to the linked data repository.</p>
        /// <p>(Default) If <code>Paths</code> is not specified, Amazon FSx uses the file system root directory.</p>
        pub fn set_paths(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.paths = input;
            self
        }
        /// <p>Failure message describing why the task failed, it is populated only when <code>Lifecycle</code> is set to <code>FAILED</code>.</p>
        pub fn failure_details(
            mut self,
            input: crate::model::DataRepositoryTaskFailureDetails,
        ) -> Self {
            self.failure_details = Some(input);
            self
        }
        /// <p>Failure message describing why the task failed, it is populated only when <code>Lifecycle</code> is set to <code>FAILED</code>.</p>
        pub fn set_failure_details(
            mut self,
            input: std::option::Option<crate::model::DataRepositoryTaskFailureDetails>,
        ) -> Self {
            self.failure_details = input;
            self
        }
        /// <p>Provides the status of the number of files that the task has processed successfully and failed to process.</p>
        pub fn status(mut self, input: crate::model::DataRepositoryTaskStatus) -> Self {
            self.status = Some(input);
            self
        }
        /// <p>Provides the status of the number of files that the task has processed successfully and failed to process.</p>
        pub fn set_status(
            mut self,
            input: std::option::Option<crate::model::DataRepositoryTaskStatus>,
        ) -> Self {
            self.status = input;
            self
        }
        /// <p>Provides a report detailing the data repository task results of the files processed that match the criteria specified in the report <code>Scope</code> parameter. FSx delivers the report to the file system's linked data repository in Amazon S3, using the path specified in the report <code>Path</code> parameter. You can specify whether or not a report gets generated for a task using the <code>Enabled</code> parameter.</p>
        pub fn report(mut self, input: crate::model::CompletionReport) -> Self {
            self.report = Some(input);
            self
        }
        /// <p>Provides a report detailing the data repository task results of the files processed that match the criteria specified in the report <code>Scope</code> parameter. FSx delivers the report to the file system's linked data repository in Amazon S3, using the path specified in the report <code>Path</code> parameter. You can specify whether or not a report gets generated for a task using the <code>Enabled</code> parameter.</p>
        pub fn set_report(
            mut self,
            input: std::option::Option<crate::model::CompletionReport>,
        ) -> Self {
            self.report = input;
            self
        }
        /// <p>Specifies the amount of data to release, in GiB, by an Amazon File Cache AUTO_RELEASE_DATA task that automatically releases files from the cache.</p>
        pub fn capacity_to_release(mut self, input: i64) -> Self {
            self.capacity_to_release = Some(input);
            self
        }
        /// <p>Specifies the amount of data to release, in GiB, by an Amazon File Cache AUTO_RELEASE_DATA task that automatically releases files from the cache.</p>
        pub fn set_capacity_to_release(mut self, input: std::option::Option<i64>) -> Self {
            self.capacity_to_release = input;
            self
        }
        /// <p>The system-generated, unique ID of the cache.</p>
        pub fn file_cache_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.file_cache_id = Some(input.into());
            self
        }
        /// <p>The system-generated, unique ID of the cache.</p>
        pub fn set_file_cache_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.file_cache_id = input;
            self
        }
        /// Consumes the builder and constructs a [`DataRepositoryTask`](crate::model::DataRepositoryTask).
        pub fn build(self) -> crate::model::DataRepositoryTask {
            crate::model::DataRepositoryTask {
                task_id: self.task_id,
                lifecycle: self.lifecycle,
                r#type: self.r#type,
                creation_time: self.creation_time,
                start_time: self.start_time,
                end_time: self.end_time,
                resource_arn: self.resource_arn,
                tags: self.tags,
                file_system_id: self.file_system_id,
                paths: self.paths,
                failure_details: self.failure_details,
                status: self.status,
                report: self.report,
                capacity_to_release: self.capacity_to_release,
                file_cache_id: self.file_cache_id,
            }
        }
    }
}
impl DataRepositoryTask {
    /// Creates a new builder-style object to manufacture [`DataRepositoryTask`](crate::model::DataRepositoryTask).
    pub fn builder() -> crate::model::data_repository_task::Builder {
        crate::model::data_repository_task::Builder::default()
    }
}

/// <p>Provides a report detailing the data repository task results of the files processed that match the criteria specified in the report <code>Scope</code> parameter. FSx delivers the report to the file system's linked data repository in Amazon S3, using the path specified in the report <code>Path</code> parameter. You can specify whether or not a report gets generated for a task using the <code>Enabled</code> parameter.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct CompletionReport {
    /// <p>Set <code>Enabled</code> to <code>True</code> to generate a <code>CompletionReport</code> when the task completes. If set to <code>true</code>, then you need to provide a report <code>Scope</code>, <code>Path</code>, and <code>Format</code>. Set <code>Enabled</code> to <code>False</code> if you do not want a <code>CompletionReport</code> generated when the task completes.</p>
    #[doc(hidden)]
    pub enabled: std::option::Option<bool>,
    /// <p>Required if <code>Enabled</code> is set to <code>true</code>. Specifies the location of the report on the file system's linked S3 data repository. An absolute path that defines where the completion report will be stored in the destination location. The <code>Path</code> you provide must be located within the file system’s ExportPath. An example <code>Path</code> value is "s3://myBucket/myExportPath/optionalPrefix". The report provides the following information for each file in the report: FilePath, FileStatus, and ErrorCode. To learn more about a file system's <code>ExportPath</code>, see . </p>
    #[doc(hidden)]
    pub path: std::option::Option<std::string::String>,
    /// <p>Required if <code>Enabled</code> is set to <code>true</code>. Specifies the format of the <code>CompletionReport</code>. <code>REPORT_CSV_20191124</code> is the only format currently supported. When <code>Format</code> is set to <code>REPORT_CSV_20191124</code>, the <code>CompletionReport</code> is provided in CSV format, and is delivered to <code>{path}/task-{id}/failures.csv</code>. </p>
    #[doc(hidden)]
    pub format: std::option::Option<crate::model::ReportFormat>,
    /// <p>Required if <code>Enabled</code> is set to <code>true</code>. Specifies the scope of the <code>CompletionReport</code>; <code>FAILED_FILES_ONLY</code> is the only scope currently supported. When <code>Scope</code> is set to <code>FAILED_FILES_ONLY</code>, the <code>CompletionReport</code> only contains information about files that the data repository task failed to process.</p>
    #[doc(hidden)]
    pub scope: std::option::Option<crate::model::ReportScope>,
}
impl CompletionReport {
    /// <p>Set <code>Enabled</code> to <code>True</code> to generate a <code>CompletionReport</code> when the task completes. If set to <code>true</code>, then you need to provide a report <code>Scope</code>, <code>Path</code>, and <code>Format</code>. Set <code>Enabled</code> to <code>False</code> if you do not want a <code>CompletionReport</code> generated when the task completes.</p>
    pub fn enabled(&self) -> std::option::Option<bool> {
        self.enabled
    }
    /// <p>Required if <code>Enabled</code> is set to <code>true</code>. Specifies the location of the report on the file system's linked S3 data repository. An absolute path that defines where the completion report will be stored in the destination location. The <code>Path</code> you provide must be located within the file system’s ExportPath. An example <code>Path</code> value is "s3://myBucket/myExportPath/optionalPrefix". The report provides the following information for each file in the report: FilePath, FileStatus, and ErrorCode. To learn more about a file system's <code>ExportPath</code>, see . </p>
    pub fn path(&self) -> std::option::Option<&str> {
        self.path.as_deref()
    }
    /// <p>Required if <code>Enabled</code> is set to <code>true</code>. Specifies the format of the <code>CompletionReport</code>. <code>REPORT_CSV_20191124</code> is the only format currently supported. When <code>Format</code> is set to <code>REPORT_CSV_20191124</code>, the <code>CompletionReport</code> is provided in CSV format, and is delivered to <code>{path}/task-{id}/failures.csv</code>. </p>
    pub fn format(&self) -> std::option::Option<&crate::model::ReportFormat> {
        self.format.as_ref()
    }
    /// <p>Required if <code>Enabled</code> is set to <code>true</code>. Specifies the scope of the <code>CompletionReport</code>; <code>FAILED_FILES_ONLY</code> is the only scope currently supported. When <code>Scope</code> is set to <code>FAILED_FILES_ONLY</code>, the <code>CompletionReport</code> only contains information about files that the data repository task failed to process.</p>
    pub fn scope(&self) -> std::option::Option<&crate::model::ReportScope> {
        self.scope.as_ref()
    }
}
/// See [`CompletionReport`](crate::model::CompletionReport).
pub mod completion_report {

    /// A builder for [`CompletionReport`](crate::model::CompletionReport).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) enabled: std::option::Option<bool>,
        pub(crate) path: std::option::Option<std::string::String>,
        pub(crate) format: std::option::Option<crate::model::ReportFormat>,
        pub(crate) scope: std::option::Option<crate::model::ReportScope>,
    }
    impl Builder {
        /// <p>Set <code>Enabled</code> to <code>True</code> to generate a <code>CompletionReport</code> when the task completes. If set to <code>true</code>, then you need to provide a report <code>Scope</code>, <code>Path</code>, and <code>Format</code>. Set <code>Enabled</code> to <code>False</code> if you do not want a <code>CompletionReport</code> generated when the task completes.</p>
        pub fn enabled(mut self, input: bool) -> Self {
            self.enabled = Some(input);
            self
        }
        /// <p>Set <code>Enabled</code> to <code>True</code> to generate a <code>CompletionReport</code> when the task completes. If set to <code>true</code>, then you need to provide a report <code>Scope</code>, <code>Path</code>, and <code>Format</code>. Set <code>Enabled</code> to <code>False</code> if you do not want a <code>CompletionReport</code> generated when the task completes.</p>
        pub fn set_enabled(mut self, input: std::option::Option<bool>) -> Self {
            self.enabled = input;
            self
        }
        /// <p>Required if <code>Enabled</code> is set to <code>true</code>. Specifies the location of the report on the file system's linked S3 data repository. An absolute path that defines where the completion report will be stored in the destination location. The <code>Path</code> you provide must be located within the file system’s ExportPath. An example <code>Path</code> value is "s3://myBucket/myExportPath/optionalPrefix". The report provides the following information for each file in the report: FilePath, FileStatus, and ErrorCode. To learn more about a file system's <code>ExportPath</code>, see . </p>
        pub fn path(mut self, input: impl Into<std::string::String>) -> Self {
            self.path = Some(input.into());
            self
        }
        /// <p>Required if <code>Enabled</code> is set to <code>true</code>. Specifies the location of the report on the file system's linked S3 data repository. An absolute path that defines where the completion report will be stored in the destination location. The <code>Path</code> you provide must be located within the file system’s ExportPath. An example <code>Path</code> value is "s3://myBucket/myExportPath/optionalPrefix". The report provides the following information for each file in the report: FilePath, FileStatus, and ErrorCode. To learn more about a file system's <code>ExportPath</code>, see . </p>
        pub fn set_path(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.path = input;
            self
        }
        /// <p>Required if <code>Enabled</code> is set to <code>true</code>. Specifies the format of the <code>CompletionReport</code>. <code>REPORT_CSV_20191124</code> is the only format currently supported. When <code>Format</code> is set to <code>REPORT_CSV_20191124</code>, the <code>CompletionReport</code> is provided in CSV format, and is delivered to <code>{path}/task-{id}/failures.csv</code>. </p>
        pub fn format(mut self, input: crate::model::ReportFormat) -> Self {
            self.format = Some(input);
            self
        }
        /// <p>Required if <code>Enabled</code> is set to <code>true</code>. Specifies the format of the <code>CompletionReport</code>. <code>REPORT_CSV_20191124</code> is the only format currently supported. When <code>Format</code> is set to <code>REPORT_CSV_20191124</code>, the <code>CompletionReport</code> is provided in CSV format, and is delivered to <code>{path}/task-{id}/failures.csv</code>. </p>
        pub fn set_format(
            mut self,
            input: std::option::Option<crate::model::ReportFormat>,
        ) -> Self {
            self.format = input;
            self
        }
        /// <p>Required if <code>Enabled</code> is set to <code>true</code>. Specifies the scope of the <code>CompletionReport</code>; <code>FAILED_FILES_ONLY</code> is the only scope currently supported. When <code>Scope</code> is set to <code>FAILED_FILES_ONLY</code>, the <code>CompletionReport</code> only contains information about files that the data repository task failed to process.</p>
        pub fn scope(mut self, input: crate::model::ReportScope) -> Self {
            self.scope = Some(input);
            self
        }
        /// <p>Required if <code>Enabled</code> is set to <code>true</code>. Specifies the scope of the <code>CompletionReport</code>; <code>FAILED_FILES_ONLY</code> is the only scope currently supported. When <code>Scope</code> is set to <code>FAILED_FILES_ONLY</code>, the <code>CompletionReport</code> only contains information about files that the data repository task failed to process.</p>
        pub fn set_scope(mut self, input: std::option::Option<crate::model::ReportScope>) -> Self {
            self.scope = input;
            self
        }
        /// Consumes the builder and constructs a [`CompletionReport`](crate::model::CompletionReport).
        pub fn build(self) -> crate::model::CompletionReport {
            crate::model::CompletionReport {
                enabled: self.enabled,
                path: self.path,
                format: self.format,
                scope: self.scope,
            }
        }
    }
}
impl CompletionReport {
    /// Creates a new builder-style object to manufacture [`CompletionReport`](crate::model::CompletionReport).
    pub fn builder() -> crate::model::completion_report::Builder {
        crate::model::completion_report::Builder::default()
    }
}

/// When writing a match expression against `ReportScope`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let reportscope = unimplemented!();
/// match reportscope {
///     ReportScope::FailedFilesOnly => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `reportscope` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `ReportScope::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `ReportScope::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `ReportScope::NewFeature` is defined.
/// Specifically, when `reportscope` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `ReportScope::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum ReportScope {
    #[allow(missing_docs)] // documentation missing in model
    FailedFilesOnly,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for ReportScope {
    fn from(s: &str) -> Self {
        match s {
            "FAILED_FILES_ONLY" => ReportScope::FailedFilesOnly,
            other => ReportScope::Unknown(crate::types::UnknownVariantValue(other.to_owned())),
        }
    }
}
impl std::str::FromStr for ReportScope {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(ReportScope::from(s))
    }
}
impl ReportScope {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            ReportScope::FailedFilesOnly => "FAILED_FILES_ONLY",
            ReportScope::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["FAILED_FILES_ONLY"]
    }
}
impl AsRef<str> for ReportScope {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// When writing a match expression against `ReportFormat`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let reportformat = unimplemented!();
/// match reportformat {
///     ReportFormat::ReportCsv20191124 => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `reportformat` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `ReportFormat::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `ReportFormat::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `ReportFormat::NewFeature` is defined.
/// Specifically, when `reportformat` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `ReportFormat::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum ReportFormat {
    #[allow(missing_docs)] // documentation missing in model
    ReportCsv20191124,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for ReportFormat {
    fn from(s: &str) -> Self {
        match s {
            "REPORT_CSV_20191124" => ReportFormat::ReportCsv20191124,
            other => ReportFormat::Unknown(crate::types::UnknownVariantValue(other.to_owned())),
        }
    }
}
impl std::str::FromStr for ReportFormat {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(ReportFormat::from(s))
    }
}
impl ReportFormat {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            ReportFormat::ReportCsv20191124 => "REPORT_CSV_20191124",
            ReportFormat::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["REPORT_CSV_20191124"]
    }
}
impl AsRef<str> for ReportFormat {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>Provides the task status showing a running total of the total number of files to be processed, the number successfully processed, and the number of files the task failed to process.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct DataRepositoryTaskStatus {
    /// <p>The total number of files that the task will process. While a task is executing, the sum of <code>SucceededCount</code> plus <code>FailedCount</code> may not equal <code>TotalCount</code>. When the task is complete, <code>TotalCount</code> equals the sum of <code>SucceededCount</code> plus <code>FailedCount</code>.</p>
    #[doc(hidden)]
    pub total_count: std::option::Option<i64>,
    /// <p>A running total of the number of files that the task has successfully processed.</p>
    #[doc(hidden)]
    pub succeeded_count: std::option::Option<i64>,
    /// <p>A running total of the number of files that the task failed to process.</p>
    #[doc(hidden)]
    pub failed_count: std::option::Option<i64>,
    /// <p>The time at which the task status was last updated.</p>
    #[doc(hidden)]
    pub last_updated_time: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>The total amount of data, in GiB, released by an Amazon File Cache AUTO_RELEASE_DATA task that automatically releases files from the cache.</p>
    #[doc(hidden)]
    pub released_capacity: std::option::Option<i64>,
}
impl DataRepositoryTaskStatus {
    /// <p>The total number of files that the task will process. While a task is executing, the sum of <code>SucceededCount</code> plus <code>FailedCount</code> may not equal <code>TotalCount</code>. When the task is complete, <code>TotalCount</code> equals the sum of <code>SucceededCount</code> plus <code>FailedCount</code>.</p>
    pub fn total_count(&self) -> std::option::Option<i64> {
        self.total_count
    }
    /// <p>A running total of the number of files that the task has successfully processed.</p>
    pub fn succeeded_count(&self) -> std::option::Option<i64> {
        self.succeeded_count
    }
    /// <p>A running total of the number of files that the task failed to process.</p>
    pub fn failed_count(&self) -> std::option::Option<i64> {
        self.failed_count
    }
    /// <p>The time at which the task status was last updated.</p>
    pub fn last_updated_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.last_updated_time.as_ref()
    }
    /// <p>The total amount of data, in GiB, released by an Amazon File Cache AUTO_RELEASE_DATA task that automatically releases files from the cache.</p>
    pub fn released_capacity(&self) -> std::option::Option<i64> {
        self.released_capacity
    }
}
/// See [`DataRepositoryTaskStatus`](crate::model::DataRepositoryTaskStatus).
pub mod data_repository_task_status {

    /// A builder for [`DataRepositoryTaskStatus`](crate::model::DataRepositoryTaskStatus).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) total_count: std::option::Option<i64>,
        pub(crate) succeeded_count: std::option::Option<i64>,
        pub(crate) failed_count: std::option::Option<i64>,
        pub(crate) last_updated_time: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) released_capacity: std::option::Option<i64>,
    }
    impl Builder {
        /// <p>The total number of files that the task will process. While a task is executing, the sum of <code>SucceededCount</code> plus <code>FailedCount</code> may not equal <code>TotalCount</code>. When the task is complete, <code>TotalCount</code> equals the sum of <code>SucceededCount</code> plus <code>FailedCount</code>.</p>
        pub fn total_count(mut self, input: i64) -> Self {
            self.total_count = Some(input);
            self
        }
        /// <p>The total number of files that the task will process. While a task is executing, the sum of <code>SucceededCount</code> plus <code>FailedCount</code> may not equal <code>TotalCount</code>. When the task is complete, <code>TotalCount</code> equals the sum of <code>SucceededCount</code> plus <code>FailedCount</code>.</p>
        pub fn set_total_count(mut self, input: std::option::Option<i64>) -> Self {
            self.total_count = input;
            self
        }
        /// <p>A running total of the number of files that the task has successfully processed.</p>
        pub fn succeeded_count(mut self, input: i64) -> Self {
            self.succeeded_count = Some(input);
            self
        }
        /// <p>A running total of the number of files that the task has successfully processed.</p>
        pub fn set_succeeded_count(mut self, input: std::option::Option<i64>) -> Self {
            self.succeeded_count = input;
            self
        }
        /// <p>A running total of the number of files that the task failed to process.</p>
        pub fn failed_count(mut self, input: i64) -> Self {
            self.failed_count = Some(input);
            self
        }
        /// <p>A running total of the number of files that the task failed to process.</p>
        pub fn set_failed_count(mut self, input: std::option::Option<i64>) -> Self {
            self.failed_count = input;
            self
        }
        /// <p>The time at which the task status was last updated.</p>
        pub fn last_updated_time(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.last_updated_time = Some(input);
            self
        }
        /// <p>The time at which the task status was last updated.</p>
        pub fn set_last_updated_time(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.last_updated_time = input;
            self
        }
        /// <p>The total amount of data, in GiB, released by an Amazon File Cache AUTO_RELEASE_DATA task that automatically releases files from the cache.</p>
        pub fn released_capacity(mut self, input: i64) -> Self {
            self.released_capacity = Some(input);
            self
        }
        /// <p>The total amount of data, in GiB, released by an Amazon File Cache AUTO_RELEASE_DATA task that automatically releases files from the cache.</p>
        pub fn set_released_capacity(mut self, input: std::option::Option<i64>) -> Self {
            self.released_capacity = input;
            self
        }
        /// Consumes the builder and constructs a [`DataRepositoryTaskStatus`](crate::model::DataRepositoryTaskStatus).
        pub fn build(self) -> crate::model::DataRepositoryTaskStatus {
            crate::model::DataRepositoryTaskStatus {
                total_count: self.total_count,
                succeeded_count: self.succeeded_count,
                failed_count: self.failed_count,
                last_updated_time: self.last_updated_time,
                released_capacity: self.released_capacity,
            }
        }
    }
}
impl DataRepositoryTaskStatus {
    /// Creates a new builder-style object to manufacture [`DataRepositoryTaskStatus`](crate::model::DataRepositoryTaskStatus).
    pub fn builder() -> crate::model::data_repository_task_status::Builder {
        crate::model::data_repository_task_status::Builder::default()
    }
}

/// <p>Provides information about why a data repository task failed. Only populated when the task <code>Lifecycle</code> is set to <code>FAILED</code>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct DataRepositoryTaskFailureDetails {
    /// <p>A detailed error message.</p>
    #[doc(hidden)]
    pub message: std::option::Option<std::string::String>,
}
impl DataRepositoryTaskFailureDetails {
    /// <p>A detailed error message.</p>
    pub fn message(&self) -> std::option::Option<&str> {
        self.message.as_deref()
    }
}
/// See [`DataRepositoryTaskFailureDetails`](crate::model::DataRepositoryTaskFailureDetails).
pub mod data_repository_task_failure_details {

    /// A builder for [`DataRepositoryTaskFailureDetails`](crate::model::DataRepositoryTaskFailureDetails).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) message: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>A detailed error message.</p>
        pub fn message(mut self, input: impl Into<std::string::String>) -> Self {
            self.message = Some(input.into());
            self
        }
        /// <p>A detailed error message.</p>
        pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.message = input;
            self
        }
        /// Consumes the builder and constructs a [`DataRepositoryTaskFailureDetails`](crate::model::DataRepositoryTaskFailureDetails).
        pub fn build(self) -> crate::model::DataRepositoryTaskFailureDetails {
            crate::model::DataRepositoryTaskFailureDetails {
                message: self.message,
            }
        }
    }
}
impl DataRepositoryTaskFailureDetails {
    /// Creates a new builder-style object to manufacture [`DataRepositoryTaskFailureDetails`](crate::model::DataRepositoryTaskFailureDetails).
    pub fn builder() -> crate::model::data_repository_task_failure_details::Builder {
        crate::model::data_repository_task_failure_details::Builder::default()
    }
}

/// When writing a match expression against `DataRepositoryTaskType`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let datarepositorytasktype = unimplemented!();
/// match datarepositorytasktype {
///     DataRepositoryTaskType::AutoTriggeredEviction => { /* ... */ },
///     DataRepositoryTaskType::Export => { /* ... */ },
///     DataRepositoryTaskType::Import => { /* ... */ },
///     DataRepositoryTaskType::Eviction => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `datarepositorytasktype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `DataRepositoryTaskType::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `DataRepositoryTaskType::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `DataRepositoryTaskType::NewFeature` is defined.
/// Specifically, when `datarepositorytasktype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `DataRepositoryTaskType::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum DataRepositoryTaskType {
    #[allow(missing_docs)] // documentation missing in model
    AutoTriggeredEviction,
    #[allow(missing_docs)] // documentation missing in model
    Export,
    #[allow(missing_docs)] // documentation missing in model
    Import,
    #[allow(missing_docs)] // documentation missing in model
    Eviction,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for DataRepositoryTaskType {
    fn from(s: &str) -> Self {
        match s {
            "AUTO_RELEASE_DATA" => DataRepositoryTaskType::AutoTriggeredEviction,
            "EXPORT_TO_REPOSITORY" => DataRepositoryTaskType::Export,
            "IMPORT_METADATA_FROM_REPOSITORY" => DataRepositoryTaskType::Import,
            "RELEASE_DATA_FROM_FILESYSTEM" => DataRepositoryTaskType::Eviction,
            other => {
                DataRepositoryTaskType::Unknown(crate::types::UnknownVariantValue(other.to_owned()))
            }
        }
    }
}
impl std::str::FromStr for DataRepositoryTaskType {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(DataRepositoryTaskType::from(s))
    }
}
impl DataRepositoryTaskType {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            DataRepositoryTaskType::AutoTriggeredEviction => "AUTO_RELEASE_DATA",
            DataRepositoryTaskType::Export => "EXPORT_TO_REPOSITORY",
            DataRepositoryTaskType::Import => "IMPORT_METADATA_FROM_REPOSITORY",
            DataRepositoryTaskType::Eviction => "RELEASE_DATA_FROM_FILESYSTEM",
            DataRepositoryTaskType::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &[
            "AUTO_RELEASE_DATA",
            "EXPORT_TO_REPOSITORY",
            "IMPORT_METADATA_FROM_REPOSITORY",
            "RELEASE_DATA_FROM_FILESYSTEM",
        ]
    }
}
impl AsRef<str> for DataRepositoryTaskType {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// When writing a match expression against `DataRepositoryTaskLifecycle`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let datarepositorytasklifecycle = unimplemented!();
/// match datarepositorytasklifecycle {
///     DataRepositoryTaskLifecycle::Canceled => { /* ... */ },
///     DataRepositoryTaskLifecycle::Canceling => { /* ... */ },
///     DataRepositoryTaskLifecycle::Executing => { /* ... */ },
///     DataRepositoryTaskLifecycle::Failed => { /* ... */ },
///     DataRepositoryTaskLifecycle::Pending => { /* ... */ },
///     DataRepositoryTaskLifecycle::Succeeded => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `datarepositorytasklifecycle` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `DataRepositoryTaskLifecycle::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `DataRepositoryTaskLifecycle::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `DataRepositoryTaskLifecycle::NewFeature` is defined.
/// Specifically, when `datarepositorytasklifecycle` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `DataRepositoryTaskLifecycle::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum DataRepositoryTaskLifecycle {
    #[allow(missing_docs)] // documentation missing in model
    Canceled,
    #[allow(missing_docs)] // documentation missing in model
    Canceling,
    #[allow(missing_docs)] // documentation missing in model
    Executing,
    #[allow(missing_docs)] // documentation missing in model
    Failed,
    #[allow(missing_docs)] // documentation missing in model
    Pending,
    #[allow(missing_docs)] // documentation missing in model
    Succeeded,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for DataRepositoryTaskLifecycle {
    fn from(s: &str) -> Self {
        match s {
            "CANCELED" => DataRepositoryTaskLifecycle::Canceled,
            "CANCELING" => DataRepositoryTaskLifecycle::Canceling,
            "EXECUTING" => DataRepositoryTaskLifecycle::Executing,
            "FAILED" => DataRepositoryTaskLifecycle::Failed,
            "PENDING" => DataRepositoryTaskLifecycle::Pending,
            "SUCCEEDED" => DataRepositoryTaskLifecycle::Succeeded,
            other => DataRepositoryTaskLifecycle::Unknown(crate::types::UnknownVariantValue(
                other.to_owned(),
            )),
        }
    }
}
impl std::str::FromStr for DataRepositoryTaskLifecycle {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(DataRepositoryTaskLifecycle::from(s))
    }
}
impl DataRepositoryTaskLifecycle {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            DataRepositoryTaskLifecycle::Canceled => "CANCELED",
            DataRepositoryTaskLifecycle::Canceling => "CANCELING",
            DataRepositoryTaskLifecycle::Executing => "EXECUTING",
            DataRepositoryTaskLifecycle::Failed => "FAILED",
            DataRepositoryTaskLifecycle::Pending => "PENDING",
            DataRepositoryTaskLifecycle::Succeeded => "SUCCEEDED",
            DataRepositoryTaskLifecycle::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &[
            "CANCELED",
            "CANCELING",
            "EXECUTING",
            "FAILED",
            "PENDING",
            "SUCCEEDED",
        ]
    }
}
impl AsRef<str> for DataRepositoryTaskLifecycle {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>(Optional) An array of filter objects you can use to filter the response of data repository tasks you will see in the the response. You can filter the tasks returned in the response by one or more file system IDs, task lifecycles, and by task type. A filter object consists of a filter <code>Name</code>, and one or more <code>Values</code> for the filter.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct DataRepositoryTaskFilter {
    /// <p>Name of the task property to use in filtering the tasks returned in the response.</p>
    /// <ul>
    /// <li> <p>Use <code>file-system-id</code> to retrieve data repository tasks for specific file systems.</p> </li>
    /// <li> <p>Use <code>task-lifecycle</code> to retrieve data repository tasks with one or more specific lifecycle states, as follows: CANCELED, EXECUTING, FAILED, PENDING, and SUCCEEDED.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub name: std::option::Option<crate::model::DataRepositoryTaskFilterName>,
    /// <p>Use Values to include the specific file system IDs and task lifecycle states for the filters you are using.</p>
    #[doc(hidden)]
    pub values: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl DataRepositoryTaskFilter {
    /// <p>Name of the task property to use in filtering the tasks returned in the response.</p>
    /// <ul>
    /// <li> <p>Use <code>file-system-id</code> to retrieve data repository tasks for specific file systems.</p> </li>
    /// <li> <p>Use <code>task-lifecycle</code> to retrieve data repository tasks with one or more specific lifecycle states, as follows: CANCELED, EXECUTING, FAILED, PENDING, and SUCCEEDED.</p> </li>
    /// </ul>
    pub fn name(&self) -> std::option::Option<&crate::model::DataRepositoryTaskFilterName> {
        self.name.as_ref()
    }
    /// <p>Use Values to include the specific file system IDs and task lifecycle states for the filters you are using.</p>
    pub fn values(&self) -> std::option::Option<&[std::string::String]> {
        self.values.as_deref()
    }
}
/// See [`DataRepositoryTaskFilter`](crate::model::DataRepositoryTaskFilter).
pub mod data_repository_task_filter {

    /// A builder for [`DataRepositoryTaskFilter`](crate::model::DataRepositoryTaskFilter).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) name: std::option::Option<crate::model::DataRepositoryTaskFilterName>,
        pub(crate) values: std::option::Option<std::vec::Vec<std::string::String>>,
    }
    impl Builder {
        /// <p>Name of the task property to use in filtering the tasks returned in the response.</p>
        /// <ul>
        /// <li> <p>Use <code>file-system-id</code> to retrieve data repository tasks for specific file systems.</p> </li>
        /// <li> <p>Use <code>task-lifecycle</code> to retrieve data repository tasks with one or more specific lifecycle states, as follows: CANCELED, EXECUTING, FAILED, PENDING, and SUCCEEDED.</p> </li>
        /// </ul>
        pub fn name(mut self, input: crate::model::DataRepositoryTaskFilterName) -> Self {
            self.name = Some(input);
            self
        }
        /// <p>Name of the task property to use in filtering the tasks returned in the response.</p>
        /// <ul>
        /// <li> <p>Use <code>file-system-id</code> to retrieve data repository tasks for specific file systems.</p> </li>
        /// <li> <p>Use <code>task-lifecycle</code> to retrieve data repository tasks with one or more specific lifecycle states, as follows: CANCELED, EXECUTING, FAILED, PENDING, and SUCCEEDED.</p> </li>
        /// </ul>
        pub fn set_name(
            mut self,
            input: std::option::Option<crate::model::DataRepositoryTaskFilterName>,
        ) -> Self {
            self.name = input;
            self
        }
        /// Appends an item to `values`.
        ///
        /// To override the contents of this collection use [`set_values`](Self::set_values).
        ///
        /// <p>Use Values to include the specific file system IDs and task lifecycle states for the filters you are using.</p>
        pub fn values(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.values.unwrap_or_default();
            v.push(input.into());
            self.values = Some(v);
            self
        }
        /// <p>Use Values to include the specific file system IDs and task lifecycle states for the filters you are using.</p>
        pub fn set_values(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.values = input;
            self
        }
        /// Consumes the builder and constructs a [`DataRepositoryTaskFilter`](crate::model::DataRepositoryTaskFilter).
        pub fn build(self) -> crate::model::DataRepositoryTaskFilter {
            crate::model::DataRepositoryTaskFilter {
                name: self.name,
                values: self.values,
            }
        }
    }
}
impl DataRepositoryTaskFilter {
    /// Creates a new builder-style object to manufacture [`DataRepositoryTaskFilter`](crate::model::DataRepositoryTaskFilter).
    pub fn builder() -> crate::model::data_repository_task_filter::Builder {
        crate::model::data_repository_task_filter::Builder::default()
    }
}

/// When writing a match expression against `DataRepositoryTaskFilterName`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let datarepositorytaskfiltername = unimplemented!();
/// match datarepositorytaskfiltername {
///     DataRepositoryTaskFilterName::DataRepoAssociationId => { /* ... */ },
///     DataRepositoryTaskFilterName::FileCacheId => { /* ... */ },
///     DataRepositoryTaskFilterName::FileSystemId => { /* ... */ },
///     DataRepositoryTaskFilterName::TaskLifecycle => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `datarepositorytaskfiltername` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `DataRepositoryTaskFilterName::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `DataRepositoryTaskFilterName::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `DataRepositoryTaskFilterName::NewFeature` is defined.
/// Specifically, when `datarepositorytaskfiltername` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `DataRepositoryTaskFilterName::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum DataRepositoryTaskFilterName {
    #[allow(missing_docs)] // documentation missing in model
    DataRepoAssociationId,
    #[allow(missing_docs)] // documentation missing in model
    FileCacheId,
    #[allow(missing_docs)] // documentation missing in model
    FileSystemId,
    #[allow(missing_docs)] // documentation missing in model
    TaskLifecycle,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for DataRepositoryTaskFilterName {
    fn from(s: &str) -> Self {
        match s {
            "data-repository-association-id" => DataRepositoryTaskFilterName::DataRepoAssociationId,
            "file-cache-id" => DataRepositoryTaskFilterName::FileCacheId,
            "file-system-id" => DataRepositoryTaskFilterName::FileSystemId,
            "task-lifecycle" => DataRepositoryTaskFilterName::TaskLifecycle,
            other => DataRepositoryTaskFilterName::Unknown(crate::types::UnknownVariantValue(
                other.to_owned(),
            )),
        }
    }
}
impl std::str::FromStr for DataRepositoryTaskFilterName {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(DataRepositoryTaskFilterName::from(s))
    }
}
impl DataRepositoryTaskFilterName {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            DataRepositoryTaskFilterName::DataRepoAssociationId => "data-repository-association-id",
            DataRepositoryTaskFilterName::FileCacheId => "file-cache-id",
            DataRepositoryTaskFilterName::FileSystemId => "file-system-id",
            DataRepositoryTaskFilterName::TaskLifecycle => "task-lifecycle",
            DataRepositoryTaskFilterName::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &[
            "data-repository-association-id",
            "file-cache-id",
            "file-system-id",
            "task-lifecycle",
        ]
    }
}
impl AsRef<str> for DataRepositoryTaskFilterName {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>A filter used to restrict the results of describe calls. You can use multiple filters to return results that meet all applied filter requirements.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Filter {
    /// <p>The name for this filter.</p>
    #[doc(hidden)]
    pub name: std::option::Option<crate::model::FilterName>,
    /// <p>The values of the filter. These are all the values for any of the applied filters.</p>
    #[doc(hidden)]
    pub values: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Filter {
    /// <p>The name for this filter.</p>
    pub fn name(&self) -> std::option::Option<&crate::model::FilterName> {
        self.name.as_ref()
    }
    /// <p>The values of the filter. These are all the values for any of the applied filters.</p>
    pub fn values(&self) -> std::option::Option<&[std::string::String]> {
        self.values.as_deref()
    }
}
/// See [`Filter`](crate::model::Filter).
pub mod filter {

    /// A builder for [`Filter`](crate::model::Filter).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) name: std::option::Option<crate::model::FilterName>,
        pub(crate) values: std::option::Option<std::vec::Vec<std::string::String>>,
    }
    impl Builder {
        /// <p>The name for this filter.</p>
        pub fn name(mut self, input: crate::model::FilterName) -> Self {
            self.name = Some(input);
            self
        }
        /// <p>The name for this filter.</p>
        pub fn set_name(mut self, input: std::option::Option<crate::model::FilterName>) -> Self {
            self.name = input;
            self
        }
        /// Appends an item to `values`.
        ///
        /// To override the contents of this collection use [`set_values`](Self::set_values).
        ///
        /// <p>The values of the filter. These are all the values for any of the applied filters.</p>
        pub fn values(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.values.unwrap_or_default();
            v.push(input.into());
            self.values = Some(v);
            self
        }
        /// <p>The values of the filter. These are all the values for any of the applied filters.</p>
        pub fn set_values(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.values = input;
            self
        }
        /// Consumes the builder and constructs a [`Filter`](crate::model::Filter).
        pub fn build(self) -> crate::model::Filter {
            crate::model::Filter {
                name: self.name,
                values: self.values,
            }
        }
    }
}
impl Filter {
    /// Creates a new builder-style object to manufacture [`Filter`](crate::model::Filter).
    pub fn builder() -> crate::model::filter::Builder {
        crate::model::filter::Builder::default()
    }
}

/// When writing a match expression against `FilterName`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let filtername = unimplemented!();
/// match filtername {
///     FilterName::BackupType => { /* ... */ },
///     FilterName::DataRepositoryType => { /* ... */ },
///     FilterName::FileCacheId => { /* ... */ },
///     FilterName::FileCacheType => { /* ... */ },
///     FilterName::FileSystemId => { /* ... */ },
///     FilterName::FileSystemType => { /* ... */ },
///     FilterName::VolumeId => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `filtername` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `FilterName::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `FilterName::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `FilterName::NewFeature` is defined.
/// Specifically, when `filtername` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `FilterName::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
/// <p>The name for a filter.</p>
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum FilterName {
    #[allow(missing_docs)] // documentation missing in model
    BackupType,
    #[allow(missing_docs)] // documentation missing in model
    DataRepositoryType,
    #[allow(missing_docs)] // documentation missing in model
    FileCacheId,
    #[allow(missing_docs)] // documentation missing in model
    FileCacheType,
    #[allow(missing_docs)] // documentation missing in model
    FileSystemId,
    #[allow(missing_docs)] // documentation missing in model
    FileSystemType,
    #[allow(missing_docs)] // documentation missing in model
    VolumeId,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for FilterName {
    fn from(s: &str) -> Self {
        match s {
            "backup-type" => FilterName::BackupType,
            "data-repository-type" => FilterName::DataRepositoryType,
            "file-cache-id" => FilterName::FileCacheId,
            "file-cache-type" => FilterName::FileCacheType,
            "file-system-id" => FilterName::FileSystemId,
            "file-system-type" => FilterName::FileSystemType,
            "volume-id" => FilterName::VolumeId,
            other => FilterName::Unknown(crate::types::UnknownVariantValue(other.to_owned())),
        }
    }
}
impl std::str::FromStr for FilterName {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(FilterName::from(s))
    }
}
impl FilterName {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            FilterName::BackupType => "backup-type",
            FilterName::DataRepositoryType => "data-repository-type",
            FilterName::FileCacheId => "file-cache-id",
            FilterName::FileCacheType => "file-cache-type",
            FilterName::FileSystemId => "file-system-id",
            FilterName::FileSystemType => "file-system-type",
            FilterName::VolumeId => "volume-id",
            FilterName::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &[
            "backup-type",
            "data-repository-type",
            "file-cache-id",
            "file-cache-type",
            "file-system-id",
            "file-system-type",
            "volume-id",
        ]
    }
}
impl AsRef<str> for FilterName {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>A backup of an Amazon FSx for Windows File Server, Amazon FSx for Lustre file system, Amazon FSx for NetApp ONTAP volume, or Amazon FSx for OpenZFS file system.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Backup {
    /// <p>The ID of the backup.</p>
    #[doc(hidden)]
    pub backup_id: std::option::Option<std::string::String>,
    /// <p>The lifecycle status of the backup.</p>
    /// <ul>
    /// <li> <p> <code>AVAILABLE</code> - The backup is fully available.</p> </li>
    /// <li> <p> <code>PENDING</code> - For user-initiated backups on Lustre file systems only; Amazon FSx hasn't started creating the backup.</p> </li>
    /// <li> <p> <code>CREATING</code> - Amazon FSx is creating the backup.</p> </li>
    /// <li> <p> <code>TRANSFERRING</code> - For user-initiated backups on Lustre file systems only; Amazon FSx is transferring the backup to Amazon S3.</p> </li>
    /// <li> <p> <code>COPYING</code> - Amazon FSx is copying the backup.</p> </li>
    /// <li> <p> <code>DELETED</code> - Amazon FSx deleted the backup and it's no longer available.</p> </li>
    /// <li> <p> <code>FAILED</code> - Amazon FSx couldn't finish the backup.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub lifecycle: std::option::Option<crate::model::BackupLifecycle>,
    /// <p>Details explaining any failures that occurred when creating a backup.</p>
    #[doc(hidden)]
    pub failure_details: std::option::Option<crate::model::BackupFailureDetails>,
    /// <p>The type of the file-system backup.</p>
    #[doc(hidden)]
    pub r#type: std::option::Option<crate::model::BackupType>,
    /// <p>The current percent of progress of an asynchronous task.</p>
    #[doc(hidden)]
    pub progress_percent: std::option::Option<i32>,
    /// <p>The time when a particular backup was created.</p>
    #[doc(hidden)]
    pub creation_time: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>The ID of the Key Management Service (KMS) key used to encrypt the backup of the Amazon FSx file system's data at rest. </p>
    #[doc(hidden)]
    pub kms_key_id: std::option::Option<std::string::String>,
    /// <p>The Amazon Resource Name (ARN) for the backup resource.</p>
    #[doc(hidden)]
    pub resource_arn: std::option::Option<std::string::String>,
    /// <p>The tags associated with a particular file system.</p>
    #[doc(hidden)]
    pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
    /// <p>The metadata of the file system associated with the backup. This metadata is persisted even if the file system is deleted.</p>
    #[doc(hidden)]
    pub file_system: std::option::Option<crate::model::FileSystem>,
    /// <p>The configuration of the self-managed Microsoft Active Directory directory to which the Windows File Server instance is joined.</p>
    #[doc(hidden)]
    pub directory_information: std::option::Option<crate::model::ActiveDirectoryBackupAttributes>,
    /// <p>An Amazon Web Services account ID. This ID is a 12-digit number that you use to construct Amazon Resource Names (ARNs) for resources.</p>
    #[doc(hidden)]
    pub owner_id: std::option::Option<std::string::String>,
    /// <p>The ID of the source backup. Specifies the backup that you are copying.</p>
    #[doc(hidden)]
    pub source_backup_id: std::option::Option<std::string::String>,
    /// <p>The source Region of the backup. Specifies the Region from where this backup is copied.</p>
    #[doc(hidden)]
    pub source_backup_region: std::option::Option<std::string::String>,
    /// <p>Specifies the resource type that's backed up.</p>
    #[doc(hidden)]
    pub resource_type: std::option::Option<crate::model::ResourceType>,
    /// <p>Describes an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume.</p>
    #[doc(hidden)]
    pub volume: std::option::Option<crate::model::Volume>,
}
impl Backup {
    /// <p>The ID of the backup.</p>
    pub fn backup_id(&self) -> std::option::Option<&str> {
        self.backup_id.as_deref()
    }
    /// <p>The lifecycle status of the backup.</p>
    /// <ul>
    /// <li> <p> <code>AVAILABLE</code> - The backup is fully available.</p> </li>
    /// <li> <p> <code>PENDING</code> - For user-initiated backups on Lustre file systems only; Amazon FSx hasn't started creating the backup.</p> </li>
    /// <li> <p> <code>CREATING</code> - Amazon FSx is creating the backup.</p> </li>
    /// <li> <p> <code>TRANSFERRING</code> - For user-initiated backups on Lustre file systems only; Amazon FSx is transferring the backup to Amazon S3.</p> </li>
    /// <li> <p> <code>COPYING</code> - Amazon FSx is copying the backup.</p> </li>
    /// <li> <p> <code>DELETED</code> - Amazon FSx deleted the backup and it's no longer available.</p> </li>
    /// <li> <p> <code>FAILED</code> - Amazon FSx couldn't finish the backup.</p> </li>
    /// </ul>
    pub fn lifecycle(&self) -> std::option::Option<&crate::model::BackupLifecycle> {
        self.lifecycle.as_ref()
    }
    /// <p>Details explaining any failures that occurred when creating a backup.</p>
    pub fn failure_details(&self) -> std::option::Option<&crate::model::BackupFailureDetails> {
        self.failure_details.as_ref()
    }
    /// <p>The type of the file-system backup.</p>
    pub fn r#type(&self) -> std::option::Option<&crate::model::BackupType> {
        self.r#type.as_ref()
    }
    /// <p>The current percent of progress of an asynchronous task.</p>
    pub fn progress_percent(&self) -> std::option::Option<i32> {
        self.progress_percent
    }
    /// <p>The time when a particular backup was created.</p>
    pub fn creation_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.creation_time.as_ref()
    }
    /// <p>The ID of the Key Management Service (KMS) key used to encrypt the backup of the Amazon FSx file system's data at rest. </p>
    pub fn kms_key_id(&self) -> std::option::Option<&str> {
        self.kms_key_id.as_deref()
    }
    /// <p>The Amazon Resource Name (ARN) for the backup resource.</p>
    pub fn resource_arn(&self) -> std::option::Option<&str> {
        self.resource_arn.as_deref()
    }
    /// <p>The tags associated with a particular file system.</p>
    pub fn tags(&self) -> std::option::Option<&[crate::model::Tag]> {
        self.tags.as_deref()
    }
    /// <p>The metadata of the file system associated with the backup. This metadata is persisted even if the file system is deleted.</p>
    pub fn file_system(&self) -> std::option::Option<&crate::model::FileSystem> {
        self.file_system.as_ref()
    }
    /// <p>The configuration of the self-managed Microsoft Active Directory directory to which the Windows File Server instance is joined.</p>
    pub fn directory_information(
        &self,
    ) -> std::option::Option<&crate::model::ActiveDirectoryBackupAttributes> {
        self.directory_information.as_ref()
    }
    /// <p>An Amazon Web Services account ID. This ID is a 12-digit number that you use to construct Amazon Resource Names (ARNs) for resources.</p>
    pub fn owner_id(&self) -> std::option::Option<&str> {
        self.owner_id.as_deref()
    }
    /// <p>The ID of the source backup. Specifies the backup that you are copying.</p>
    pub fn source_backup_id(&self) -> std::option::Option<&str> {
        self.source_backup_id.as_deref()
    }
    /// <p>The source Region of the backup. Specifies the Region from where this backup is copied.</p>
    pub fn source_backup_region(&self) -> std::option::Option<&str> {
        self.source_backup_region.as_deref()
    }
    /// <p>Specifies the resource type that's backed up.</p>
    pub fn resource_type(&self) -> std::option::Option<&crate::model::ResourceType> {
        self.resource_type.as_ref()
    }
    /// <p>Describes an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume.</p>
    pub fn volume(&self) -> std::option::Option<&crate::model::Volume> {
        self.volume.as_ref()
    }
}
/// See [`Backup`](crate::model::Backup).
pub mod backup {

    /// A builder for [`Backup`](crate::model::Backup).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) backup_id: std::option::Option<std::string::String>,
        pub(crate) lifecycle: std::option::Option<crate::model::BackupLifecycle>,
        pub(crate) failure_details: std::option::Option<crate::model::BackupFailureDetails>,
        pub(crate) r#type: std::option::Option<crate::model::BackupType>,
        pub(crate) progress_percent: std::option::Option<i32>,
        pub(crate) creation_time: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) kms_key_id: std::option::Option<std::string::String>,
        pub(crate) resource_arn: std::option::Option<std::string::String>,
        pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        pub(crate) file_system: std::option::Option<crate::model::FileSystem>,
        pub(crate) directory_information:
            std::option::Option<crate::model::ActiveDirectoryBackupAttributes>,
        pub(crate) owner_id: std::option::Option<std::string::String>,
        pub(crate) source_backup_id: std::option::Option<std::string::String>,
        pub(crate) source_backup_region: std::option::Option<std::string::String>,
        pub(crate) resource_type: std::option::Option<crate::model::ResourceType>,
        pub(crate) volume: std::option::Option<crate::model::Volume>,
    }
    impl Builder {
        /// <p>The ID of the backup.</p>
        pub fn backup_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.backup_id = Some(input.into());
            self
        }
        /// <p>The ID of the backup.</p>
        pub fn set_backup_id(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.backup_id = input;
            self
        }
        /// <p>The lifecycle status of the backup.</p>
        /// <ul>
        /// <li> <p> <code>AVAILABLE</code> - The backup is fully available.</p> </li>
        /// <li> <p> <code>PENDING</code> - For user-initiated backups on Lustre file systems only; Amazon FSx hasn't started creating the backup.</p> </li>
        /// <li> <p> <code>CREATING</code> - Amazon FSx is creating the backup.</p> </li>
        /// <li> <p> <code>TRANSFERRING</code> - For user-initiated backups on Lustre file systems only; Amazon FSx is transferring the backup to Amazon S3.</p> </li>
        /// <li> <p> <code>COPYING</code> - Amazon FSx is copying the backup.</p> </li>
        /// <li> <p> <code>DELETED</code> - Amazon FSx deleted the backup and it's no longer available.</p> </li>
        /// <li> <p> <code>FAILED</code> - Amazon FSx couldn't finish the backup.</p> </li>
        /// </ul>
        pub fn lifecycle(mut self, input: crate::model::BackupLifecycle) -> Self {
            self.lifecycle = Some(input);
            self
        }
        /// <p>The lifecycle status of the backup.</p>
        /// <ul>
        /// <li> <p> <code>AVAILABLE</code> - The backup is fully available.</p> </li>
        /// <li> <p> <code>PENDING</code> - For user-initiated backups on Lustre file systems only; Amazon FSx hasn't started creating the backup.</p> </li>
        /// <li> <p> <code>CREATING</code> - Amazon FSx is creating the backup.</p> </li>
        /// <li> <p> <code>TRANSFERRING</code> - For user-initiated backups on Lustre file systems only; Amazon FSx is transferring the backup to Amazon S3.</p> </li>
        /// <li> <p> <code>COPYING</code> - Amazon FSx is copying the backup.</p> </li>
        /// <li> <p> <code>DELETED</code> - Amazon FSx deleted the backup and it's no longer available.</p> </li>
        /// <li> <p> <code>FAILED</code> - Amazon FSx couldn't finish the backup.</p> </li>
        /// </ul>
        pub fn set_lifecycle(
            mut self,
            input: std::option::Option<crate::model::BackupLifecycle>,
        ) -> Self {
            self.lifecycle = input;
            self
        }
        /// <p>Details explaining any failures that occurred when creating a backup.</p>
        pub fn failure_details(mut self, input: crate::model::BackupFailureDetails) -> Self {
            self.failure_details = Some(input);
            self
        }
        /// <p>Details explaining any failures that occurred when creating a backup.</p>
        pub fn set_failure_details(
            mut self,
            input: std::option::Option<crate::model::BackupFailureDetails>,
        ) -> Self {
            self.failure_details = input;
            self
        }
        /// <p>The type of the file-system backup.</p>
        pub fn r#type(mut self, input: crate::model::BackupType) -> Self {
            self.r#type = Some(input);
            self
        }
        /// <p>The type of the file-system backup.</p>
        pub fn set_type(mut self, input: std::option::Option<crate::model::BackupType>) -> Self {
            self.r#type = input;
            self
        }
        /// <p>The current percent of progress of an asynchronous task.</p>
        pub fn progress_percent(mut self, input: i32) -> Self {
            self.progress_percent = Some(input);
            self
        }
        /// <p>The current percent of progress of an asynchronous task.</p>
        pub fn set_progress_percent(mut self, input: std::option::Option<i32>) -> Self {
            self.progress_percent = input;
            self
        }
        /// <p>The time when a particular backup was created.</p>
        pub fn creation_time(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.creation_time = Some(input);
            self
        }
        /// <p>The time when a particular backup was created.</p>
        pub fn set_creation_time(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.creation_time = input;
            self
        }
        /// <p>The ID of the Key Management Service (KMS) key used to encrypt the backup of the Amazon FSx file system's data at rest. </p>
        pub fn kms_key_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.kms_key_id = Some(input.into());
            self
        }
        /// <p>The ID of the Key Management Service (KMS) key used to encrypt the backup of the Amazon FSx file system's data at rest. </p>
        pub fn set_kms_key_id(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.kms_key_id = input;
            self
        }
        /// <p>The Amazon Resource Name (ARN) for the backup resource.</p>
        pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
            self.resource_arn = Some(input.into());
            self
        }
        /// <p>The Amazon Resource Name (ARN) for the backup resource.</p>
        pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.resource_arn = input;
            self
        }
        /// Appends an item to `tags`.
        ///
        /// To override the contents of this collection use [`set_tags`](Self::set_tags).
        ///
        /// <p>The tags associated with a particular file system.</p>
        pub fn tags(mut self, input: crate::model::Tag) -> Self {
            let mut v = self.tags.unwrap_or_default();
            v.push(input);
            self.tags = Some(v);
            self
        }
        /// <p>The tags associated with a particular file system.</p>
        pub fn set_tags(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        ) -> Self {
            self.tags = input;
            self
        }
        /// <p>The metadata of the file system associated with the backup. This metadata is persisted even if the file system is deleted.</p>
        pub fn file_system(mut self, input: crate::model::FileSystem) -> Self {
            self.file_system = Some(input);
            self
        }
        /// <p>The metadata of the file system associated with the backup. This metadata is persisted even if the file system is deleted.</p>
        pub fn set_file_system(
            mut self,
            input: std::option::Option<crate::model::FileSystem>,
        ) -> Self {
            self.file_system = input;
            self
        }
        /// <p>The configuration of the self-managed Microsoft Active Directory directory to which the Windows File Server instance is joined.</p>
        pub fn directory_information(
            mut self,
            input: crate::model::ActiveDirectoryBackupAttributes,
        ) -> Self {
            self.directory_information = Some(input);
            self
        }
        /// <p>The configuration of the self-managed Microsoft Active Directory directory to which the Windows File Server instance is joined.</p>
        pub fn set_directory_information(
            mut self,
            input: std::option::Option<crate::model::ActiveDirectoryBackupAttributes>,
        ) -> Self {
            self.directory_information = input;
            self
        }
        /// <p>An Amazon Web Services account ID. This ID is a 12-digit number that you use to construct Amazon Resource Names (ARNs) for resources.</p>
        pub fn owner_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.owner_id = Some(input.into());
            self
        }
        /// <p>An Amazon Web Services account ID. This ID is a 12-digit number that you use to construct Amazon Resource Names (ARNs) for resources.</p>
        pub fn set_owner_id(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.owner_id = input;
            self
        }
        /// <p>The ID of the source backup. Specifies the backup that you are copying.</p>
        pub fn source_backup_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.source_backup_id = Some(input.into());
            self
        }
        /// <p>The ID of the source backup. Specifies the backup that you are copying.</p>
        pub fn set_source_backup_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.source_backup_id = input;
            self
        }
        /// <p>The source Region of the backup. Specifies the Region from where this backup is copied.</p>
        pub fn source_backup_region(mut self, input: impl Into<std::string::String>) -> Self {
            self.source_backup_region = Some(input.into());
            self
        }
        /// <p>The source Region of the backup. Specifies the Region from where this backup is copied.</p>
        pub fn set_source_backup_region(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.source_backup_region = input;
            self
        }
        /// <p>Specifies the resource type that's backed up.</p>
        pub fn resource_type(mut self, input: crate::model::ResourceType) -> Self {
            self.resource_type = Some(input);
            self
        }
        /// <p>Specifies the resource type that's backed up.</p>
        pub fn set_resource_type(
            mut self,
            input: std::option::Option<crate::model::ResourceType>,
        ) -> Self {
            self.resource_type = input;
            self
        }
        /// <p>Describes an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume.</p>
        pub fn volume(mut self, input: crate::model::Volume) -> Self {
            self.volume = Some(input);
            self
        }
        /// <p>Describes an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume.</p>
        pub fn set_volume(mut self, input: std::option::Option<crate::model::Volume>) -> Self {
            self.volume = input;
            self
        }
        /// Consumes the builder and constructs a [`Backup`](crate::model::Backup).
        pub fn build(self) -> crate::model::Backup {
            crate::model::Backup {
                backup_id: self.backup_id,
                lifecycle: self.lifecycle,
                failure_details: self.failure_details,
                r#type: self.r#type,
                progress_percent: self.progress_percent,
                creation_time: self.creation_time,
                kms_key_id: self.kms_key_id,
                resource_arn: self.resource_arn,
                tags: self.tags,
                file_system: self.file_system,
                directory_information: self.directory_information,
                owner_id: self.owner_id,
                source_backup_id: self.source_backup_id,
                source_backup_region: self.source_backup_region,
                resource_type: self.resource_type,
                volume: self.volume,
            }
        }
    }
}
impl Backup {
    /// Creates a new builder-style object to manufacture [`Backup`](crate::model::Backup).
    pub fn builder() -> crate::model::backup::Builder {
        crate::model::backup::Builder::default()
    }
}

/// When writing a match expression against `ResourceType`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let resourcetype = unimplemented!();
/// match resourcetype {
///     ResourceType::FileSystem => { /* ... */ },
///     ResourceType::Volume => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `resourcetype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `ResourceType::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `ResourceType::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `ResourceType::NewFeature` is defined.
/// Specifically, when `resourcetype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `ResourceType::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum ResourceType {
    #[allow(missing_docs)] // documentation missing in model
    FileSystem,
    #[allow(missing_docs)] // documentation missing in model
    Volume,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for ResourceType {
    fn from(s: &str) -> Self {
        match s {
            "FILE_SYSTEM" => ResourceType::FileSystem,
            "VOLUME" => ResourceType::Volume,
            other => ResourceType::Unknown(crate::types::UnknownVariantValue(other.to_owned())),
        }
    }
}
impl std::str::FromStr for ResourceType {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(ResourceType::from(s))
    }
}
impl ResourceType {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            ResourceType::FileSystem => "FILE_SYSTEM",
            ResourceType::Volume => "VOLUME",
            ResourceType::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["FILE_SYSTEM", "VOLUME"]
    }
}
impl AsRef<str> for ResourceType {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>The Microsoft Active Directory attributes of the Amazon FSx for Windows File Server file system.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct ActiveDirectoryBackupAttributes {
    /// <p>The fully qualified domain name of the self-managed Active Directory directory.</p>
    #[doc(hidden)]
    pub domain_name: std::option::Option<std::string::String>,
    /// <p>The ID of the Amazon Web Services Managed Microsoft Active Directory instance to which the file system is joined.</p>
    #[doc(hidden)]
    pub active_directory_id: std::option::Option<std::string::String>,
    /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
    #[doc(hidden)]
    pub resource_arn: std::option::Option<std::string::String>,
}
impl ActiveDirectoryBackupAttributes {
    /// <p>The fully qualified domain name of the self-managed Active Directory directory.</p>
    pub fn domain_name(&self) -> std::option::Option<&str> {
        self.domain_name.as_deref()
    }
    /// <p>The ID of the Amazon Web Services Managed Microsoft Active Directory instance to which the file system is joined.</p>
    pub fn active_directory_id(&self) -> std::option::Option<&str> {
        self.active_directory_id.as_deref()
    }
    /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
    pub fn resource_arn(&self) -> std::option::Option<&str> {
        self.resource_arn.as_deref()
    }
}
/// See [`ActiveDirectoryBackupAttributes`](crate::model::ActiveDirectoryBackupAttributes).
pub mod active_directory_backup_attributes {

    /// A builder for [`ActiveDirectoryBackupAttributes`](crate::model::ActiveDirectoryBackupAttributes).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) domain_name: std::option::Option<std::string::String>,
        pub(crate) active_directory_id: std::option::Option<std::string::String>,
        pub(crate) resource_arn: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>The fully qualified domain name of the self-managed Active Directory directory.</p>
        pub fn domain_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.domain_name = Some(input.into());
            self
        }
        /// <p>The fully qualified domain name of the self-managed Active Directory directory.</p>
        pub fn set_domain_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.domain_name = input;
            self
        }
        /// <p>The ID of the Amazon Web Services Managed Microsoft Active Directory instance to which the file system is joined.</p>
        pub fn active_directory_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.active_directory_id = Some(input.into());
            self
        }
        /// <p>The ID of the Amazon Web Services Managed Microsoft Active Directory instance to which the file system is joined.</p>
        pub fn set_active_directory_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.active_directory_id = input;
            self
        }
        /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
        pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
            self.resource_arn = Some(input.into());
            self
        }
        /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
        pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.resource_arn = input;
            self
        }
        /// Consumes the builder and constructs a [`ActiveDirectoryBackupAttributes`](crate::model::ActiveDirectoryBackupAttributes).
        pub fn build(self) -> crate::model::ActiveDirectoryBackupAttributes {
            crate::model::ActiveDirectoryBackupAttributes {
                domain_name: self.domain_name,
                active_directory_id: self.active_directory_id,
                resource_arn: self.resource_arn,
            }
        }
    }
}
impl ActiveDirectoryBackupAttributes {
    /// Creates a new builder-style object to manufacture [`ActiveDirectoryBackupAttributes`](crate::model::ActiveDirectoryBackupAttributes).
    pub fn builder() -> crate::model::active_directory_backup_attributes::Builder {
        crate::model::active_directory_backup_attributes::Builder::default()
    }
}

/// When writing a match expression against `BackupType`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let backuptype = unimplemented!();
/// match backuptype {
///     BackupType::Automatic => { /* ... */ },
///     BackupType::AwsBackup => { /* ... */ },
///     BackupType::UserInitiated => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `backuptype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `BackupType::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `BackupType::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `BackupType::NewFeature` is defined.
/// Specifically, when `backuptype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `BackupType::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
/// <p>The type of the backup.</p>
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum BackupType {
    #[allow(missing_docs)] // documentation missing in model
    Automatic,
    #[allow(missing_docs)] // documentation missing in model
    AwsBackup,
    #[allow(missing_docs)] // documentation missing in model
    UserInitiated,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for BackupType {
    fn from(s: &str) -> Self {
        match s {
            "AUTOMATIC" => BackupType::Automatic,
            "AWS_BACKUP" => BackupType::AwsBackup,
            "USER_INITIATED" => BackupType::UserInitiated,
            other => BackupType::Unknown(crate::types::UnknownVariantValue(other.to_owned())),
        }
    }
}
impl std::str::FromStr for BackupType {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(BackupType::from(s))
    }
}
impl BackupType {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            BackupType::Automatic => "AUTOMATIC",
            BackupType::AwsBackup => "AWS_BACKUP",
            BackupType::UserInitiated => "USER_INITIATED",
            BackupType::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["AUTOMATIC", "AWS_BACKUP", "USER_INITIATED"]
    }
}
impl AsRef<str> for BackupType {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>If backup creation fails, this structure contains the details of that failure.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct BackupFailureDetails {
    /// <p>A message describing the backup-creation failure.</p>
    #[doc(hidden)]
    pub message: std::option::Option<std::string::String>,
}
impl BackupFailureDetails {
    /// <p>A message describing the backup-creation failure.</p>
    pub fn message(&self) -> std::option::Option<&str> {
        self.message.as_deref()
    }
}
/// See [`BackupFailureDetails`](crate::model::BackupFailureDetails).
pub mod backup_failure_details {

    /// A builder for [`BackupFailureDetails`](crate::model::BackupFailureDetails).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) message: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>A message describing the backup-creation failure.</p>
        pub fn message(mut self, input: impl Into<std::string::String>) -> Self {
            self.message = Some(input.into());
            self
        }
        /// <p>A message describing the backup-creation failure.</p>
        pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.message = input;
            self
        }
        /// Consumes the builder and constructs a [`BackupFailureDetails`](crate::model::BackupFailureDetails).
        pub fn build(self) -> crate::model::BackupFailureDetails {
            crate::model::BackupFailureDetails {
                message: self.message,
            }
        }
    }
}
impl BackupFailureDetails {
    /// Creates a new builder-style object to manufacture [`BackupFailureDetails`](crate::model::BackupFailureDetails).
    pub fn builder() -> crate::model::backup_failure_details::Builder {
        crate::model::backup_failure_details::Builder::default()
    }
}

/// When writing a match expression against `BackupLifecycle`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let backuplifecycle = unimplemented!();
/// match backuplifecycle {
///     BackupLifecycle::Available => { /* ... */ },
///     BackupLifecycle::Copying => { /* ... */ },
///     BackupLifecycle::Creating => { /* ... */ },
///     BackupLifecycle::Deleted => { /* ... */ },
///     BackupLifecycle::Failed => { /* ... */ },
///     BackupLifecycle::Pending => { /* ... */ },
///     BackupLifecycle::Transferring => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `backuplifecycle` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `BackupLifecycle::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `BackupLifecycle::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `BackupLifecycle::NewFeature` is defined.
/// Specifically, when `backuplifecycle` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `BackupLifecycle::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
/// <p>The lifecycle status of the backup.</p>
/// <ul>
/// <li>
/// <p>
/// <code>AVAILABLE</code> - The backup is fully available.</p>
/// </li>
/// <li>
/// <p>
/// <code>PENDING</code> - For user-initiated backups on Lustre file systems only; Amazon FSx hasn't started creating the backup.</p>
/// </li>
/// <li>
/// <p>
/// <code>CREATING</code> - Amazon FSx is creating the new user-initiated backup.</p>
/// </li>
/// <li>
/// <p>
/// <code>TRANSFERRING</code> - For user-initiated backups on Lustre file systems only; Amazon FSx is backing up the file
/// system.</p>
/// </li>
/// <li>
/// <p>
/// <code>COPYING</code> - Amazon FSx is copying the backup.</p>
/// </li>
/// <li>
/// <p>
/// <code>DELETED</code> - Amazon FSx deleted the backup and it's no longer
/// available.</p>
/// </li>
/// <li>
/// <p>
/// <code>FAILED</code> - Amazon FSx couldn't finish the backup.</p>
/// </li>
/// </ul>
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum BackupLifecycle {
    #[allow(missing_docs)] // documentation missing in model
    Available,
    #[allow(missing_docs)] // documentation missing in model
    Copying,
    #[allow(missing_docs)] // documentation missing in model
    Creating,
    #[allow(missing_docs)] // documentation missing in model
    Deleted,
    #[allow(missing_docs)] // documentation missing in model
    Failed,
    #[allow(missing_docs)] // documentation missing in model
    Pending,
    #[allow(missing_docs)] // documentation missing in model
    Transferring,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for BackupLifecycle {
    fn from(s: &str) -> Self {
        match s {
            "AVAILABLE" => BackupLifecycle::Available,
            "COPYING" => BackupLifecycle::Copying,
            "CREATING" => BackupLifecycle::Creating,
            "DELETED" => BackupLifecycle::Deleted,
            "FAILED" => BackupLifecycle::Failed,
            "PENDING" => BackupLifecycle::Pending,
            "TRANSFERRING" => BackupLifecycle::Transferring,
            other => BackupLifecycle::Unknown(crate::types::UnknownVariantValue(other.to_owned())),
        }
    }
}
impl std::str::FromStr for BackupLifecycle {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(BackupLifecycle::from(s))
    }
}
impl BackupLifecycle {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            BackupLifecycle::Available => "AVAILABLE",
            BackupLifecycle::Copying => "COPYING",
            BackupLifecycle::Creating => "CREATING",
            BackupLifecycle::Deleted => "DELETED",
            BackupLifecycle::Failed => "FAILED",
            BackupLifecycle::Pending => "PENDING",
            BackupLifecycle::Transferring => "TRANSFERRING",
            BackupLifecycle::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &[
            "AVAILABLE",
            "COPYING",
            "CREATING",
            "DELETED",
            "FAILED",
            "PENDING",
            "TRANSFERRING",
        ]
    }
}
impl AsRef<str> for BackupLifecycle {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>The response object for the Amazon FSx for NetApp ONTAP volume being deleted in the <code>DeleteVolume</code> operation.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct DeleteVolumeOntapResponse {
    /// <p>The ID of the source backup. Specifies the backup that you are copying.</p>
    #[doc(hidden)]
    pub final_backup_id: std::option::Option<std::string::String>,
    /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
    #[doc(hidden)]
    pub final_backup_tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl DeleteVolumeOntapResponse {
    /// <p>The ID of the source backup. Specifies the backup that you are copying.</p>
    pub fn final_backup_id(&self) -> std::option::Option<&str> {
        self.final_backup_id.as_deref()
    }
    /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
    pub fn final_backup_tags(&self) -> std::option::Option<&[crate::model::Tag]> {
        self.final_backup_tags.as_deref()
    }
}
/// See [`DeleteVolumeOntapResponse`](crate::model::DeleteVolumeOntapResponse).
pub mod delete_volume_ontap_response {

    /// A builder for [`DeleteVolumeOntapResponse`](crate::model::DeleteVolumeOntapResponse).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) final_backup_id: std::option::Option<std::string::String>,
        pub(crate) final_backup_tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
    }
    impl Builder {
        /// <p>The ID of the source backup. Specifies the backup that you are copying.</p>
        pub fn final_backup_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.final_backup_id = Some(input.into());
            self
        }
        /// <p>The ID of the source backup. Specifies the backup that you are copying.</p>
        pub fn set_final_backup_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.final_backup_id = input;
            self
        }
        /// Appends an item to `final_backup_tags`.
        ///
        /// To override the contents of this collection use [`set_final_backup_tags`](Self::set_final_backup_tags).
        ///
        /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
        pub fn final_backup_tags(mut self, input: crate::model::Tag) -> Self {
            let mut v = self.final_backup_tags.unwrap_or_default();
            v.push(input);
            self.final_backup_tags = Some(v);
            self
        }
        /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
        pub fn set_final_backup_tags(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        ) -> Self {
            self.final_backup_tags = input;
            self
        }
        /// Consumes the builder and constructs a [`DeleteVolumeOntapResponse`](crate::model::DeleteVolumeOntapResponse).
        pub fn build(self) -> crate::model::DeleteVolumeOntapResponse {
            crate::model::DeleteVolumeOntapResponse {
                final_backup_id: self.final_backup_id,
                final_backup_tags: self.final_backup_tags,
            }
        }
    }
}
impl DeleteVolumeOntapResponse {
    /// Creates a new builder-style object to manufacture [`DeleteVolumeOntapResponse`](crate::model::DeleteVolumeOntapResponse).
    pub fn builder() -> crate::model::delete_volume_ontap_response::Builder {
        crate::model::delete_volume_ontap_response::Builder::default()
    }
}

/// <p>A value that specifies whether to delete all child volumes and snapshots. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct DeleteVolumeOpenZfsConfiguration {
    /// <p>To delete the volume's child volumes, snapshots, and clones, use the string <code>DELETE_CHILD_VOLUMES_AND_SNAPSHOTS</code>.</p>
    #[doc(hidden)]
    pub options: std::option::Option<std::vec::Vec<crate::model::DeleteOpenZfsVolumeOption>>,
}
impl DeleteVolumeOpenZfsConfiguration {
    /// <p>To delete the volume's child volumes, snapshots, and clones, use the string <code>DELETE_CHILD_VOLUMES_AND_SNAPSHOTS</code>.</p>
    pub fn options(&self) -> std::option::Option<&[crate::model::DeleteOpenZfsVolumeOption]> {
        self.options.as_deref()
    }
}
/// See [`DeleteVolumeOpenZfsConfiguration`](crate::model::DeleteVolumeOpenZfsConfiguration).
pub mod delete_volume_open_zfs_configuration {

    /// A builder for [`DeleteVolumeOpenZfsConfiguration`](crate::model::DeleteVolumeOpenZfsConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) options:
            std::option::Option<std::vec::Vec<crate::model::DeleteOpenZfsVolumeOption>>,
    }
    impl Builder {
        /// Appends an item to `options`.
        ///
        /// To override the contents of this collection use [`set_options`](Self::set_options).
        ///
        /// <p>To delete the volume's child volumes, snapshots, and clones, use the string <code>DELETE_CHILD_VOLUMES_AND_SNAPSHOTS</code>.</p>
        pub fn options(mut self, input: crate::model::DeleteOpenZfsVolumeOption) -> Self {
            let mut v = self.options.unwrap_or_default();
            v.push(input);
            self.options = Some(v);
            self
        }
        /// <p>To delete the volume's child volumes, snapshots, and clones, use the string <code>DELETE_CHILD_VOLUMES_AND_SNAPSHOTS</code>.</p>
        pub fn set_options(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::DeleteOpenZfsVolumeOption>>,
        ) -> Self {
            self.options = input;
            self
        }
        /// Consumes the builder and constructs a [`DeleteVolumeOpenZfsConfiguration`](crate::model::DeleteVolumeOpenZfsConfiguration).
        pub fn build(self) -> crate::model::DeleteVolumeOpenZfsConfiguration {
            crate::model::DeleteVolumeOpenZfsConfiguration {
                options: self.options,
            }
        }
    }
}
impl DeleteVolumeOpenZfsConfiguration {
    /// Creates a new builder-style object to manufacture [`DeleteVolumeOpenZfsConfiguration`](crate::model::DeleteVolumeOpenZfsConfiguration).
    pub fn builder() -> crate::model::delete_volume_open_zfs_configuration::Builder {
        crate::model::delete_volume_open_zfs_configuration::Builder::default()
    }
}

/// When writing a match expression against `DeleteOpenZfsVolumeOption`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let deleteopenzfsvolumeoption = unimplemented!();
/// match deleteopenzfsvolumeoption {
///     DeleteOpenZfsVolumeOption::DeleteChildVolumesAndSnapshots => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `deleteopenzfsvolumeoption` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `DeleteOpenZfsVolumeOption::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `DeleteOpenZfsVolumeOption::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `DeleteOpenZfsVolumeOption::NewFeature` is defined.
/// Specifically, when `deleteopenzfsvolumeoption` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `DeleteOpenZfsVolumeOption::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum DeleteOpenZfsVolumeOption {
    #[allow(missing_docs)] // documentation missing in model
    DeleteChildVolumesAndSnapshots,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for DeleteOpenZfsVolumeOption {
    fn from(s: &str) -> Self {
        match s {
            "DELETE_CHILD_VOLUMES_AND_SNAPSHOTS" => {
                DeleteOpenZfsVolumeOption::DeleteChildVolumesAndSnapshots
            }
            other => DeleteOpenZfsVolumeOption::Unknown(crate::types::UnknownVariantValue(
                other.to_owned(),
            )),
        }
    }
}
impl std::str::FromStr for DeleteOpenZfsVolumeOption {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(DeleteOpenZfsVolumeOption::from(s))
    }
}
impl DeleteOpenZfsVolumeOption {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            DeleteOpenZfsVolumeOption::DeleteChildVolumesAndSnapshots => {
                "DELETE_CHILD_VOLUMES_AND_SNAPSHOTS"
            }
            DeleteOpenZfsVolumeOption::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["DELETE_CHILD_VOLUMES_AND_SNAPSHOTS"]
    }
}
impl AsRef<str> for DeleteOpenZfsVolumeOption {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>Use to specify skipping a final backup, or to add tags to a final backup.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct DeleteVolumeOntapConfiguration {
    /// <p>Set to true if you want to skip taking a final backup of the volume you are deleting.</p>
    #[doc(hidden)]
    pub skip_final_backup: std::option::Option<bool>,
    /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
    #[doc(hidden)]
    pub final_backup_tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl DeleteVolumeOntapConfiguration {
    /// <p>Set to true if you want to skip taking a final backup of the volume you are deleting.</p>
    pub fn skip_final_backup(&self) -> std::option::Option<bool> {
        self.skip_final_backup
    }
    /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
    pub fn final_backup_tags(&self) -> std::option::Option<&[crate::model::Tag]> {
        self.final_backup_tags.as_deref()
    }
}
/// See [`DeleteVolumeOntapConfiguration`](crate::model::DeleteVolumeOntapConfiguration).
pub mod delete_volume_ontap_configuration {

    /// A builder for [`DeleteVolumeOntapConfiguration`](crate::model::DeleteVolumeOntapConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) skip_final_backup: std::option::Option<bool>,
        pub(crate) final_backup_tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
    }
    impl Builder {
        /// <p>Set to true if you want to skip taking a final backup of the volume you are deleting.</p>
        pub fn skip_final_backup(mut self, input: bool) -> Self {
            self.skip_final_backup = Some(input);
            self
        }
        /// <p>Set to true if you want to skip taking a final backup of the volume you are deleting.</p>
        pub fn set_skip_final_backup(mut self, input: std::option::Option<bool>) -> Self {
            self.skip_final_backup = input;
            self
        }
        /// Appends an item to `final_backup_tags`.
        ///
        /// To override the contents of this collection use [`set_final_backup_tags`](Self::set_final_backup_tags).
        ///
        /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
        pub fn final_backup_tags(mut self, input: crate::model::Tag) -> Self {
            let mut v = self.final_backup_tags.unwrap_or_default();
            v.push(input);
            self.final_backup_tags = Some(v);
            self
        }
        /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
        pub fn set_final_backup_tags(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        ) -> Self {
            self.final_backup_tags = input;
            self
        }
        /// Consumes the builder and constructs a [`DeleteVolumeOntapConfiguration`](crate::model::DeleteVolumeOntapConfiguration).
        pub fn build(self) -> crate::model::DeleteVolumeOntapConfiguration {
            crate::model::DeleteVolumeOntapConfiguration {
                skip_final_backup: self.skip_final_backup,
                final_backup_tags: self.final_backup_tags,
            }
        }
    }
}
impl DeleteVolumeOntapConfiguration {
    /// Creates a new builder-style object to manufacture [`DeleteVolumeOntapConfiguration`](crate::model::DeleteVolumeOntapConfiguration).
    pub fn builder() -> crate::model::delete_volume_ontap_configuration::Builder {
        crate::model::delete_volume_ontap_configuration::Builder::default()
    }
}

/// <p>The response object for the Amazon FSx for OpenZFS file system that's being deleted in the <code>DeleteFileSystem</code> operation.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct DeleteFileSystemOpenZfsResponse {
    /// <p>The ID of the source backup. Specifies the backup that you are copying.</p>
    #[doc(hidden)]
    pub final_backup_id: std::option::Option<std::string::String>,
    /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
    #[doc(hidden)]
    pub final_backup_tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl DeleteFileSystemOpenZfsResponse {
    /// <p>The ID of the source backup. Specifies the backup that you are copying.</p>
    pub fn final_backup_id(&self) -> std::option::Option<&str> {
        self.final_backup_id.as_deref()
    }
    /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
    pub fn final_backup_tags(&self) -> std::option::Option<&[crate::model::Tag]> {
        self.final_backup_tags.as_deref()
    }
}
/// See [`DeleteFileSystemOpenZfsResponse`](crate::model::DeleteFileSystemOpenZfsResponse).
pub mod delete_file_system_open_zfs_response {

    /// A builder for [`DeleteFileSystemOpenZfsResponse`](crate::model::DeleteFileSystemOpenZfsResponse).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) final_backup_id: std::option::Option<std::string::String>,
        pub(crate) final_backup_tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
    }
    impl Builder {
        /// <p>The ID of the source backup. Specifies the backup that you are copying.</p>
        pub fn final_backup_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.final_backup_id = Some(input.into());
            self
        }
        /// <p>The ID of the source backup. Specifies the backup that you are copying.</p>
        pub fn set_final_backup_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.final_backup_id = input;
            self
        }
        /// Appends an item to `final_backup_tags`.
        ///
        /// To override the contents of this collection use [`set_final_backup_tags`](Self::set_final_backup_tags).
        ///
        /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
        pub fn final_backup_tags(mut self, input: crate::model::Tag) -> Self {
            let mut v = self.final_backup_tags.unwrap_or_default();
            v.push(input);
            self.final_backup_tags = Some(v);
            self
        }
        /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
        pub fn set_final_backup_tags(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        ) -> Self {
            self.final_backup_tags = input;
            self
        }
        /// Consumes the builder and constructs a [`DeleteFileSystemOpenZfsResponse`](crate::model::DeleteFileSystemOpenZfsResponse).
        pub fn build(self) -> crate::model::DeleteFileSystemOpenZfsResponse {
            crate::model::DeleteFileSystemOpenZfsResponse {
                final_backup_id: self.final_backup_id,
                final_backup_tags: self.final_backup_tags,
            }
        }
    }
}
impl DeleteFileSystemOpenZfsResponse {
    /// Creates a new builder-style object to manufacture [`DeleteFileSystemOpenZfsResponse`](crate::model::DeleteFileSystemOpenZfsResponse).
    pub fn builder() -> crate::model::delete_file_system_open_zfs_response::Builder {
        crate::model::delete_file_system_open_zfs_response::Builder::default()
    }
}

/// <p>The response object for the Amazon FSx for Lustre file system being deleted in the <code>DeleteFileSystem</code> operation.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct DeleteFileSystemLustreResponse {
    /// <p>The ID of the final backup for this file system.</p>
    #[doc(hidden)]
    pub final_backup_id: std::option::Option<std::string::String>,
    /// <p>The set of tags applied to the final backup.</p>
    #[doc(hidden)]
    pub final_backup_tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl DeleteFileSystemLustreResponse {
    /// <p>The ID of the final backup for this file system.</p>
    pub fn final_backup_id(&self) -> std::option::Option<&str> {
        self.final_backup_id.as_deref()
    }
    /// <p>The set of tags applied to the final backup.</p>
    pub fn final_backup_tags(&self) -> std::option::Option<&[crate::model::Tag]> {
        self.final_backup_tags.as_deref()
    }
}
/// See [`DeleteFileSystemLustreResponse`](crate::model::DeleteFileSystemLustreResponse).
pub mod delete_file_system_lustre_response {

    /// A builder for [`DeleteFileSystemLustreResponse`](crate::model::DeleteFileSystemLustreResponse).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) final_backup_id: std::option::Option<std::string::String>,
        pub(crate) final_backup_tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
    }
    impl Builder {
        /// <p>The ID of the final backup for this file system.</p>
        pub fn final_backup_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.final_backup_id = Some(input.into());
            self
        }
        /// <p>The ID of the final backup for this file system.</p>
        pub fn set_final_backup_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.final_backup_id = input;
            self
        }
        /// Appends an item to `final_backup_tags`.
        ///
        /// To override the contents of this collection use [`set_final_backup_tags`](Self::set_final_backup_tags).
        ///
        /// <p>The set of tags applied to the final backup.</p>
        pub fn final_backup_tags(mut self, input: crate::model::Tag) -> Self {
            let mut v = self.final_backup_tags.unwrap_or_default();
            v.push(input);
            self.final_backup_tags = Some(v);
            self
        }
        /// <p>The set of tags applied to the final backup.</p>
        pub fn set_final_backup_tags(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        ) -> Self {
            self.final_backup_tags = input;
            self
        }
        /// Consumes the builder and constructs a [`DeleteFileSystemLustreResponse`](crate::model::DeleteFileSystemLustreResponse).
        pub fn build(self) -> crate::model::DeleteFileSystemLustreResponse {
            crate::model::DeleteFileSystemLustreResponse {
                final_backup_id: self.final_backup_id,
                final_backup_tags: self.final_backup_tags,
            }
        }
    }
}
impl DeleteFileSystemLustreResponse {
    /// Creates a new builder-style object to manufacture [`DeleteFileSystemLustreResponse`](crate::model::DeleteFileSystemLustreResponse).
    pub fn builder() -> crate::model::delete_file_system_lustre_response::Builder {
        crate::model::delete_file_system_lustre_response::Builder::default()
    }
}

/// <p>The response object for the Microsoft Windows file system used in the <code>DeleteFileSystem</code> operation.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct DeleteFileSystemWindowsResponse {
    /// <p>The ID of the final backup for this file system.</p>
    #[doc(hidden)]
    pub final_backup_id: std::option::Option<std::string::String>,
    /// <p>The set of tags applied to the final backup.</p>
    #[doc(hidden)]
    pub final_backup_tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl DeleteFileSystemWindowsResponse {
    /// <p>The ID of the final backup for this file system.</p>
    pub fn final_backup_id(&self) -> std::option::Option<&str> {
        self.final_backup_id.as_deref()
    }
    /// <p>The set of tags applied to the final backup.</p>
    pub fn final_backup_tags(&self) -> std::option::Option<&[crate::model::Tag]> {
        self.final_backup_tags.as_deref()
    }
}
/// See [`DeleteFileSystemWindowsResponse`](crate::model::DeleteFileSystemWindowsResponse).
pub mod delete_file_system_windows_response {

    /// A builder for [`DeleteFileSystemWindowsResponse`](crate::model::DeleteFileSystemWindowsResponse).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) final_backup_id: std::option::Option<std::string::String>,
        pub(crate) final_backup_tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
    }
    impl Builder {
        /// <p>The ID of the final backup for this file system.</p>
        pub fn final_backup_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.final_backup_id = Some(input.into());
            self
        }
        /// <p>The ID of the final backup for this file system.</p>
        pub fn set_final_backup_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.final_backup_id = input;
            self
        }
        /// Appends an item to `final_backup_tags`.
        ///
        /// To override the contents of this collection use [`set_final_backup_tags`](Self::set_final_backup_tags).
        ///
        /// <p>The set of tags applied to the final backup.</p>
        pub fn final_backup_tags(mut self, input: crate::model::Tag) -> Self {
            let mut v = self.final_backup_tags.unwrap_or_default();
            v.push(input);
            self.final_backup_tags = Some(v);
            self
        }
        /// <p>The set of tags applied to the final backup.</p>
        pub fn set_final_backup_tags(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        ) -> Self {
            self.final_backup_tags = input;
            self
        }
        /// Consumes the builder and constructs a [`DeleteFileSystemWindowsResponse`](crate::model::DeleteFileSystemWindowsResponse).
        pub fn build(self) -> crate::model::DeleteFileSystemWindowsResponse {
            crate::model::DeleteFileSystemWindowsResponse {
                final_backup_id: self.final_backup_id,
                final_backup_tags: self.final_backup_tags,
            }
        }
    }
}
impl DeleteFileSystemWindowsResponse {
    /// Creates a new builder-style object to manufacture [`DeleteFileSystemWindowsResponse`](crate::model::DeleteFileSystemWindowsResponse).
    pub fn builder() -> crate::model::delete_file_system_windows_response::Builder {
        crate::model::delete_file_system_windows_response::Builder::default()
    }
}

/// <p>The configuration object for the Amazon FSx for OpenZFS file system used in the <code>DeleteFileSystem</code> operation.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct DeleteFileSystemOpenZfsConfiguration {
    /// <p>By default, Amazon FSx for OpenZFS takes a final backup on your behalf when the <code>DeleteFileSystem</code> operation is invoked. Doing this helps protect you from data loss, and we highly recommend taking the final backup. If you want to skip taking a final backup, set this value to <code>true</code>.</p>
    #[doc(hidden)]
    pub skip_final_backup: std::option::Option<bool>,
    /// <p>A list of tags to apply to the file system's final backup.</p>
    #[doc(hidden)]
    pub final_backup_tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
    /// <p>To delete a file system if there are child volumes present below the root volume, use the string <code>DELETE_CHILD_VOLUMES_AND_SNAPSHOTS</code>. If your file system has child volumes and you don't use this option, the delete request will fail.</p>
    #[doc(hidden)]
    pub options: std::option::Option<std::vec::Vec<crate::model::DeleteFileSystemOpenZfsOption>>,
}
impl DeleteFileSystemOpenZfsConfiguration {
    /// <p>By default, Amazon FSx for OpenZFS takes a final backup on your behalf when the <code>DeleteFileSystem</code> operation is invoked. Doing this helps protect you from data loss, and we highly recommend taking the final backup. If you want to skip taking a final backup, set this value to <code>true</code>.</p>
    pub fn skip_final_backup(&self) -> std::option::Option<bool> {
        self.skip_final_backup
    }
    /// <p>A list of tags to apply to the file system's final backup.</p>
    pub fn final_backup_tags(&self) -> std::option::Option<&[crate::model::Tag]> {
        self.final_backup_tags.as_deref()
    }
    /// <p>To delete a file system if there are child volumes present below the root volume, use the string <code>DELETE_CHILD_VOLUMES_AND_SNAPSHOTS</code>. If your file system has child volumes and you don't use this option, the delete request will fail.</p>
    pub fn options(&self) -> std::option::Option<&[crate::model::DeleteFileSystemOpenZfsOption]> {
        self.options.as_deref()
    }
}
/// See [`DeleteFileSystemOpenZfsConfiguration`](crate::model::DeleteFileSystemOpenZfsConfiguration).
pub mod delete_file_system_open_zfs_configuration {

    /// A builder for [`DeleteFileSystemOpenZfsConfiguration`](crate::model::DeleteFileSystemOpenZfsConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) skip_final_backup: std::option::Option<bool>,
        pub(crate) final_backup_tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        pub(crate) options:
            std::option::Option<std::vec::Vec<crate::model::DeleteFileSystemOpenZfsOption>>,
    }
    impl Builder {
        /// <p>By default, Amazon FSx for OpenZFS takes a final backup on your behalf when the <code>DeleteFileSystem</code> operation is invoked. Doing this helps protect you from data loss, and we highly recommend taking the final backup. If you want to skip taking a final backup, set this value to <code>true</code>.</p>
        pub fn skip_final_backup(mut self, input: bool) -> Self {
            self.skip_final_backup = Some(input);
            self
        }
        /// <p>By default, Amazon FSx for OpenZFS takes a final backup on your behalf when the <code>DeleteFileSystem</code> operation is invoked. Doing this helps protect you from data loss, and we highly recommend taking the final backup. If you want to skip taking a final backup, set this value to <code>true</code>.</p>
        pub fn set_skip_final_backup(mut self, input: std::option::Option<bool>) -> Self {
            self.skip_final_backup = input;
            self
        }
        /// Appends an item to `final_backup_tags`.
        ///
        /// To override the contents of this collection use [`set_final_backup_tags`](Self::set_final_backup_tags).
        ///
        /// <p>A list of tags to apply to the file system's final backup.</p>
        pub fn final_backup_tags(mut self, input: crate::model::Tag) -> Self {
            let mut v = self.final_backup_tags.unwrap_or_default();
            v.push(input);
            self.final_backup_tags = Some(v);
            self
        }
        /// <p>A list of tags to apply to the file system's final backup.</p>
        pub fn set_final_backup_tags(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        ) -> Self {
            self.final_backup_tags = input;
            self
        }
        /// Appends an item to `options`.
        ///
        /// To override the contents of this collection use [`set_options`](Self::set_options).
        ///
        /// <p>To delete a file system if there are child volumes present below the root volume, use the string <code>DELETE_CHILD_VOLUMES_AND_SNAPSHOTS</code>. If your file system has child volumes and you don't use this option, the delete request will fail.</p>
        pub fn options(mut self, input: crate::model::DeleteFileSystemOpenZfsOption) -> Self {
            let mut v = self.options.unwrap_or_default();
            v.push(input);
            self.options = Some(v);
            self
        }
        /// <p>To delete a file system if there are child volumes present below the root volume, use the string <code>DELETE_CHILD_VOLUMES_AND_SNAPSHOTS</code>. If your file system has child volumes and you don't use this option, the delete request will fail.</p>
        pub fn set_options(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::DeleteFileSystemOpenZfsOption>>,
        ) -> Self {
            self.options = input;
            self
        }
        /// Consumes the builder and constructs a [`DeleteFileSystemOpenZfsConfiguration`](crate::model::DeleteFileSystemOpenZfsConfiguration).
        pub fn build(self) -> crate::model::DeleteFileSystemOpenZfsConfiguration {
            crate::model::DeleteFileSystemOpenZfsConfiguration {
                skip_final_backup: self.skip_final_backup,
                final_backup_tags: self.final_backup_tags,
                options: self.options,
            }
        }
    }
}
impl DeleteFileSystemOpenZfsConfiguration {
    /// Creates a new builder-style object to manufacture [`DeleteFileSystemOpenZfsConfiguration`](crate::model::DeleteFileSystemOpenZfsConfiguration).
    pub fn builder() -> crate::model::delete_file_system_open_zfs_configuration::Builder {
        crate::model::delete_file_system_open_zfs_configuration::Builder::default()
    }
}

/// When writing a match expression against `DeleteFileSystemOpenZfsOption`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let deletefilesystemopenzfsoption = unimplemented!();
/// match deletefilesystemopenzfsoption {
///     DeleteFileSystemOpenZfsOption::DeleteChildVolumesAndSnapshots => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `deletefilesystemopenzfsoption` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `DeleteFileSystemOpenZfsOption::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `DeleteFileSystemOpenZfsOption::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `DeleteFileSystemOpenZfsOption::NewFeature` is defined.
/// Specifically, when `deletefilesystemopenzfsoption` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `DeleteFileSystemOpenZfsOption::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum DeleteFileSystemOpenZfsOption {
    #[allow(missing_docs)] // documentation missing in model
    DeleteChildVolumesAndSnapshots,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for DeleteFileSystemOpenZfsOption {
    fn from(s: &str) -> Self {
        match s {
            "DELETE_CHILD_VOLUMES_AND_SNAPSHOTS" => {
                DeleteFileSystemOpenZfsOption::DeleteChildVolumesAndSnapshots
            }
            other => DeleteFileSystemOpenZfsOption::Unknown(crate::types::UnknownVariantValue(
                other.to_owned(),
            )),
        }
    }
}
impl std::str::FromStr for DeleteFileSystemOpenZfsOption {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(DeleteFileSystemOpenZfsOption::from(s))
    }
}
impl DeleteFileSystemOpenZfsOption {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            DeleteFileSystemOpenZfsOption::DeleteChildVolumesAndSnapshots => {
                "DELETE_CHILD_VOLUMES_AND_SNAPSHOTS"
            }
            DeleteFileSystemOpenZfsOption::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["DELETE_CHILD_VOLUMES_AND_SNAPSHOTS"]
    }
}
impl AsRef<str> for DeleteFileSystemOpenZfsOption {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>The configuration object for the Amazon FSx for Lustre file system being deleted in the <code>DeleteFileSystem</code> operation.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct DeleteFileSystemLustreConfiguration {
    /// <p>Set <code>SkipFinalBackup</code> to false if you want to take a final backup of the file system you are deleting. By default, Amazon FSx will not take a final backup on your behalf when the <code>DeleteFileSystem</code> operation is invoked. (Default = true)</p> <note>
    /// <p>The <code>fsx:CreateBackup</code> permission is required if you set <code>SkipFinalBackup</code> to <code>false</code> in order to delete the file system and take a final backup.</p>
    /// </note>
    #[doc(hidden)]
    pub skip_final_backup: std::option::Option<bool>,
    /// <p>Use if <code>SkipFinalBackup</code> is set to <code>false</code>, and you want to apply an array of tags to the final backup. If you have set the file system property <code>CopyTagsToBackups</code> to true, and you specify one or more <code>FinalBackupTags</code> when deleting a file system, Amazon FSx will not copy any existing file system tags to the backup.</p>
    #[doc(hidden)]
    pub final_backup_tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl DeleteFileSystemLustreConfiguration {
    /// <p>Set <code>SkipFinalBackup</code> to false if you want to take a final backup of the file system you are deleting. By default, Amazon FSx will not take a final backup on your behalf when the <code>DeleteFileSystem</code> operation is invoked. (Default = true)</p> <note>
    /// <p>The <code>fsx:CreateBackup</code> permission is required if you set <code>SkipFinalBackup</code> to <code>false</code> in order to delete the file system and take a final backup.</p>
    /// </note>
    pub fn skip_final_backup(&self) -> std::option::Option<bool> {
        self.skip_final_backup
    }
    /// <p>Use if <code>SkipFinalBackup</code> is set to <code>false</code>, and you want to apply an array of tags to the final backup. If you have set the file system property <code>CopyTagsToBackups</code> to true, and you specify one or more <code>FinalBackupTags</code> when deleting a file system, Amazon FSx will not copy any existing file system tags to the backup.</p>
    pub fn final_backup_tags(&self) -> std::option::Option<&[crate::model::Tag]> {
        self.final_backup_tags.as_deref()
    }
}
/// See [`DeleteFileSystemLustreConfiguration`](crate::model::DeleteFileSystemLustreConfiguration).
pub mod delete_file_system_lustre_configuration {

    /// A builder for [`DeleteFileSystemLustreConfiguration`](crate::model::DeleteFileSystemLustreConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) skip_final_backup: std::option::Option<bool>,
        pub(crate) final_backup_tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
    }
    impl Builder {
        /// <p>Set <code>SkipFinalBackup</code> to false if you want to take a final backup of the file system you are deleting. By default, Amazon FSx will not take a final backup on your behalf when the <code>DeleteFileSystem</code> operation is invoked. (Default = true)</p> <note>
        /// <p>The <code>fsx:CreateBackup</code> permission is required if you set <code>SkipFinalBackup</code> to <code>false</code> in order to delete the file system and take a final backup.</p>
        /// </note>
        pub fn skip_final_backup(mut self, input: bool) -> Self {
            self.skip_final_backup = Some(input);
            self
        }
        /// <p>Set <code>SkipFinalBackup</code> to false if you want to take a final backup of the file system you are deleting. By default, Amazon FSx will not take a final backup on your behalf when the <code>DeleteFileSystem</code> operation is invoked. (Default = true)</p> <note>
        /// <p>The <code>fsx:CreateBackup</code> permission is required if you set <code>SkipFinalBackup</code> to <code>false</code> in order to delete the file system and take a final backup.</p>
        /// </note>
        pub fn set_skip_final_backup(mut self, input: std::option::Option<bool>) -> Self {
            self.skip_final_backup = input;
            self
        }
        /// Appends an item to `final_backup_tags`.
        ///
        /// To override the contents of this collection use [`set_final_backup_tags`](Self::set_final_backup_tags).
        ///
        /// <p>Use if <code>SkipFinalBackup</code> is set to <code>false</code>, and you want to apply an array of tags to the final backup. If you have set the file system property <code>CopyTagsToBackups</code> to true, and you specify one or more <code>FinalBackupTags</code> when deleting a file system, Amazon FSx will not copy any existing file system tags to the backup.</p>
        pub fn final_backup_tags(mut self, input: crate::model::Tag) -> Self {
            let mut v = self.final_backup_tags.unwrap_or_default();
            v.push(input);
            self.final_backup_tags = Some(v);
            self
        }
        /// <p>Use if <code>SkipFinalBackup</code> is set to <code>false</code>, and you want to apply an array of tags to the final backup. If you have set the file system property <code>CopyTagsToBackups</code> to true, and you specify one or more <code>FinalBackupTags</code> when deleting a file system, Amazon FSx will not copy any existing file system tags to the backup.</p>
        pub fn set_final_backup_tags(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        ) -> Self {
            self.final_backup_tags = input;
            self
        }
        /// Consumes the builder and constructs a [`DeleteFileSystemLustreConfiguration`](crate::model::DeleteFileSystemLustreConfiguration).
        pub fn build(self) -> crate::model::DeleteFileSystemLustreConfiguration {
            crate::model::DeleteFileSystemLustreConfiguration {
                skip_final_backup: self.skip_final_backup,
                final_backup_tags: self.final_backup_tags,
            }
        }
    }
}
impl DeleteFileSystemLustreConfiguration {
    /// Creates a new builder-style object to manufacture [`DeleteFileSystemLustreConfiguration`](crate::model::DeleteFileSystemLustreConfiguration).
    pub fn builder() -> crate::model::delete_file_system_lustre_configuration::Builder {
        crate::model::delete_file_system_lustre_configuration::Builder::default()
    }
}

/// <p>The configuration object for the Microsoft Windows file system used in the <code>DeleteFileSystem</code> operation.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct DeleteFileSystemWindowsConfiguration {
    /// <p>By default, Amazon FSx for Windows takes a final backup on your behalf when the <code>DeleteFileSystem</code> operation is invoked. Doing this helps protect you from data loss, and we highly recommend taking the final backup. If you want to skip this backup, use this flag to do so.</p>
    #[doc(hidden)]
    pub skip_final_backup: std::option::Option<bool>,
    /// <p>A set of tags for your final backup.</p>
    #[doc(hidden)]
    pub final_backup_tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl DeleteFileSystemWindowsConfiguration {
    /// <p>By default, Amazon FSx for Windows takes a final backup on your behalf when the <code>DeleteFileSystem</code> operation is invoked. Doing this helps protect you from data loss, and we highly recommend taking the final backup. If you want to skip this backup, use this flag to do so.</p>
    pub fn skip_final_backup(&self) -> std::option::Option<bool> {
        self.skip_final_backup
    }
    /// <p>A set of tags for your final backup.</p>
    pub fn final_backup_tags(&self) -> std::option::Option<&[crate::model::Tag]> {
        self.final_backup_tags.as_deref()
    }
}
/// See [`DeleteFileSystemWindowsConfiguration`](crate::model::DeleteFileSystemWindowsConfiguration).
pub mod delete_file_system_windows_configuration {

    /// A builder for [`DeleteFileSystemWindowsConfiguration`](crate::model::DeleteFileSystemWindowsConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) skip_final_backup: std::option::Option<bool>,
        pub(crate) final_backup_tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
    }
    impl Builder {
        /// <p>By default, Amazon FSx for Windows takes a final backup on your behalf when the <code>DeleteFileSystem</code> operation is invoked. Doing this helps protect you from data loss, and we highly recommend taking the final backup. If you want to skip this backup, use this flag to do so.</p>
        pub fn skip_final_backup(mut self, input: bool) -> Self {
            self.skip_final_backup = Some(input);
            self
        }
        /// <p>By default, Amazon FSx for Windows takes a final backup on your behalf when the <code>DeleteFileSystem</code> operation is invoked. Doing this helps protect you from data loss, and we highly recommend taking the final backup. If you want to skip this backup, use this flag to do so.</p>
        pub fn set_skip_final_backup(mut self, input: std::option::Option<bool>) -> Self {
            self.skip_final_backup = input;
            self
        }
        /// Appends an item to `final_backup_tags`.
        ///
        /// To override the contents of this collection use [`set_final_backup_tags`](Self::set_final_backup_tags).
        ///
        /// <p>A set of tags for your final backup.</p>
        pub fn final_backup_tags(mut self, input: crate::model::Tag) -> Self {
            let mut v = self.final_backup_tags.unwrap_or_default();
            v.push(input);
            self.final_backup_tags = Some(v);
            self
        }
        /// <p>A set of tags for your final backup.</p>
        pub fn set_final_backup_tags(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        ) -> Self {
            self.final_backup_tags = input;
            self
        }
        /// Consumes the builder and constructs a [`DeleteFileSystemWindowsConfiguration`](crate::model::DeleteFileSystemWindowsConfiguration).
        pub fn build(self) -> crate::model::DeleteFileSystemWindowsConfiguration {
            crate::model::DeleteFileSystemWindowsConfiguration {
                skip_final_backup: self.skip_final_backup,
                final_backup_tags: self.final_backup_tags,
            }
        }
    }
}
impl DeleteFileSystemWindowsConfiguration {
    /// Creates a new builder-style object to manufacture [`DeleteFileSystemWindowsConfiguration`](crate::model::DeleteFileSystemWindowsConfiguration).
    pub fn builder() -> crate::model::delete_file_system_windows_configuration::Builder {
        crate::model::delete_file_system_windows_configuration::Builder::default()
    }
}

/// <p>Specifies the configuration of the ONTAP volume that you are creating.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct CreateOntapVolumeConfiguration {
    /// <p>Specifies the location in the SVM's namespace where the volume is mounted. The <code>JunctionPath</code> must have a leading forward slash, such as <code>/vol3</code>.</p>
    #[doc(hidden)]
    pub junction_path: std::option::Option<std::string::String>,
    /// <p>Specifies the security style for the volume. If a volume's security style is not specified, it is automatically set to the root volume's security style. The security style determines the type of permissions that FSx for ONTAP uses to control data access. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-volumes.html#volume-security-style">Volume security style</a> in the <i>Amazon FSx for NetApp ONTAP User Guide</i>. Specify one of the following values:</p>
    /// <ul>
    /// <li> <p> <code>UNIX</code> if the file system is managed by a UNIX administrator, the majority of users are NFS clients, and an application accessing the data uses a UNIX user as the service account. </p> </li>
    /// <li> <p> <code>NTFS</code> if the file system is managed by a Windows administrator, the majority of users are SMB clients, and an application accessing the data uses a Windows user as the service account.</p> </li>
    /// <li> <p> <code>MIXED</code> if the file system is managed by both UNIX and Windows administrators and users consist of both NFS and SMB clients.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub security_style: std::option::Option<crate::model::SecurityStyle>,
    /// <p>Specifies the size of the volume, in megabytes (MB), that you are creating.</p>
    #[doc(hidden)]
    pub size_in_megabytes: std::option::Option<i32>,
    /// <p>Set to true to enable deduplication, compression, and compaction storage efficiency features on the volume.</p>
    #[doc(hidden)]
    pub storage_efficiency_enabled: std::option::Option<bool>,
    /// <p>Specifies the ONTAP SVM in which to create the volume.</p>
    #[doc(hidden)]
    pub storage_virtual_machine_id: std::option::Option<std::string::String>,
    /// <p>Describes the data tiering policy for an ONTAP volume. When enabled, Amazon FSx for ONTAP's intelligent tiering automatically transitions a volume's data between the file system's primary storage and capacity pool storage based on your access patterns.</p>
    /// <p>Valid tiering policies are the following:</p>
    /// <ul>
    /// <li> <p> <code>SNAPSHOT_ONLY</code> - (Default value) moves cold snapshots to the capacity pool storage tier.</p> </li>
    /// </ul>
    /// <ul>
    /// <li> <p> <code>AUTO</code> - moves cold user data and snapshots to the capacity pool storage tier based on your access patterns.</p> </li>
    /// </ul>
    /// <ul>
    /// <li> <p> <code>ALL</code> - moves all user data blocks in both the active file system and Snapshot copies to the storage pool tier.</p> </li>
    /// </ul>
    /// <ul>
    /// <li> <p> <code>NONE</code> - keeps a volume's data in the primary storage tier, preventing it from being moved to the capacity pool tier.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub tiering_policy: std::option::Option<crate::model::TieringPolicy>,
    /// <p>Specifies the type of volume you are creating. Valid values are the following:</p>
    /// <ul>
    /// <li> <p> <code>RW</code> specifies a read/write volume. <code>RW</code> is the default.</p> </li>
    /// <li> <p> <code>DP</code> specifies a data-protection volume. A <code>DP</code> volume is read-only and can be used as the destination of a NetApp SnapMirror relationship.</p> </li>
    /// </ul>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/volume-types">Volume types</a> in the <i>Amazon FSx for NetApp ONTAP User Guide</i>.</p>
    #[doc(hidden)]
    pub ontap_volume_type: std::option::Option<crate::model::InputOntapVolumeType>,
    /// <p>Specifies the snapshot policy for the volume. There are three built-in snapshot policies:</p>
    /// <ul>
    /// <li> <p> <code>default</code>: This is the default policy. A maximum of six hourly snapshots taken five minutes past the hour. A maximum of two daily snapshots taken Monday through Saturday at 10 minutes after midnight. A maximum of two weekly snapshots taken every Sunday at 15 minutes after midnight.</p> </li>
    /// <li> <p> <code>default-1weekly</code>: This policy is the same as the <code>default</code> policy except that it only retains one snapshot from the weekly schedule.</p> </li>
    /// <li> <p> <code>none</code>: This policy does not take any snapshots. This policy can be assigned to volumes to prevent automatic snapshots from being taken.</p> </li>
    /// </ul>
    /// <p>You can also provide the name of a custom policy that you created with the ONTAP CLI or REST API.</p>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/snapshots-ontap.html#snapshot-policies">Snapshot policies</a> in the <i>Amazon FSx for NetApp ONTAP User Guide</i>.</p>
    #[doc(hidden)]
    pub snapshot_policy: std::option::Option<std::string::String>,
    /// <p>A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to false. If it's set to true, all tags for the volume are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the volume, regardless of this value.</p>
    #[doc(hidden)]
    pub copy_tags_to_backups: std::option::Option<bool>,
}
impl CreateOntapVolumeConfiguration {
    /// <p>Specifies the location in the SVM's namespace where the volume is mounted. The <code>JunctionPath</code> must have a leading forward slash, such as <code>/vol3</code>.</p>
    pub fn junction_path(&self) -> std::option::Option<&str> {
        self.junction_path.as_deref()
    }
    /// <p>Specifies the security style for the volume. If a volume's security style is not specified, it is automatically set to the root volume's security style. The security style determines the type of permissions that FSx for ONTAP uses to control data access. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-volumes.html#volume-security-style">Volume security style</a> in the <i>Amazon FSx for NetApp ONTAP User Guide</i>. Specify one of the following values:</p>
    /// <ul>
    /// <li> <p> <code>UNIX</code> if the file system is managed by a UNIX administrator, the majority of users are NFS clients, and an application accessing the data uses a UNIX user as the service account. </p> </li>
    /// <li> <p> <code>NTFS</code> if the file system is managed by a Windows administrator, the majority of users are SMB clients, and an application accessing the data uses a Windows user as the service account.</p> </li>
    /// <li> <p> <code>MIXED</code> if the file system is managed by both UNIX and Windows administrators and users consist of both NFS and SMB clients.</p> </li>
    /// </ul>
    pub fn security_style(&self) -> std::option::Option<&crate::model::SecurityStyle> {
        self.security_style.as_ref()
    }
    /// <p>Specifies the size of the volume, in megabytes (MB), that you are creating.</p>
    pub fn size_in_megabytes(&self) -> std::option::Option<i32> {
        self.size_in_megabytes
    }
    /// <p>Set to true to enable deduplication, compression, and compaction storage efficiency features on the volume.</p>
    pub fn storage_efficiency_enabled(&self) -> std::option::Option<bool> {
        self.storage_efficiency_enabled
    }
    /// <p>Specifies the ONTAP SVM in which to create the volume.</p>
    pub fn storage_virtual_machine_id(&self) -> std::option::Option<&str> {
        self.storage_virtual_machine_id.as_deref()
    }
    /// <p>Describes the data tiering policy for an ONTAP volume. When enabled, Amazon FSx for ONTAP's intelligent tiering automatically transitions a volume's data between the file system's primary storage and capacity pool storage based on your access patterns.</p>
    /// <p>Valid tiering policies are the following:</p>
    /// <ul>
    /// <li> <p> <code>SNAPSHOT_ONLY</code> - (Default value) moves cold snapshots to the capacity pool storage tier.</p> </li>
    /// </ul>
    /// <ul>
    /// <li> <p> <code>AUTO</code> - moves cold user data and snapshots to the capacity pool storage tier based on your access patterns.</p> </li>
    /// </ul>
    /// <ul>
    /// <li> <p> <code>ALL</code> - moves all user data blocks in both the active file system and Snapshot copies to the storage pool tier.</p> </li>
    /// </ul>
    /// <ul>
    /// <li> <p> <code>NONE</code> - keeps a volume's data in the primary storage tier, preventing it from being moved to the capacity pool tier.</p> </li>
    /// </ul>
    pub fn tiering_policy(&self) -> std::option::Option<&crate::model::TieringPolicy> {
        self.tiering_policy.as_ref()
    }
    /// <p>Specifies the type of volume you are creating. Valid values are the following:</p>
    /// <ul>
    /// <li> <p> <code>RW</code> specifies a read/write volume. <code>RW</code> is the default.</p> </li>
    /// <li> <p> <code>DP</code> specifies a data-protection volume. A <code>DP</code> volume is read-only and can be used as the destination of a NetApp SnapMirror relationship.</p> </li>
    /// </ul>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/volume-types">Volume types</a> in the <i>Amazon FSx for NetApp ONTAP User Guide</i>.</p>
    pub fn ontap_volume_type(&self) -> std::option::Option<&crate::model::InputOntapVolumeType> {
        self.ontap_volume_type.as_ref()
    }
    /// <p>Specifies the snapshot policy for the volume. There are three built-in snapshot policies:</p>
    /// <ul>
    /// <li> <p> <code>default</code>: This is the default policy. A maximum of six hourly snapshots taken five minutes past the hour. A maximum of two daily snapshots taken Monday through Saturday at 10 minutes after midnight. A maximum of two weekly snapshots taken every Sunday at 15 minutes after midnight.</p> </li>
    /// <li> <p> <code>default-1weekly</code>: This policy is the same as the <code>default</code> policy except that it only retains one snapshot from the weekly schedule.</p> </li>
    /// <li> <p> <code>none</code>: This policy does not take any snapshots. This policy can be assigned to volumes to prevent automatic snapshots from being taken.</p> </li>
    /// </ul>
    /// <p>You can also provide the name of a custom policy that you created with the ONTAP CLI or REST API.</p>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/snapshots-ontap.html#snapshot-policies">Snapshot policies</a> in the <i>Amazon FSx for NetApp ONTAP User Guide</i>.</p>
    pub fn snapshot_policy(&self) -> std::option::Option<&str> {
        self.snapshot_policy.as_deref()
    }
    /// <p>A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to false. If it's set to true, all tags for the volume are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the volume, regardless of this value.</p>
    pub fn copy_tags_to_backups(&self) -> std::option::Option<bool> {
        self.copy_tags_to_backups
    }
}
/// See [`CreateOntapVolumeConfiguration`](crate::model::CreateOntapVolumeConfiguration).
pub mod create_ontap_volume_configuration {

    /// A builder for [`CreateOntapVolumeConfiguration`](crate::model::CreateOntapVolumeConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) junction_path: std::option::Option<std::string::String>,
        pub(crate) security_style: std::option::Option<crate::model::SecurityStyle>,
        pub(crate) size_in_megabytes: std::option::Option<i32>,
        pub(crate) storage_efficiency_enabled: std::option::Option<bool>,
        pub(crate) storage_virtual_machine_id: std::option::Option<std::string::String>,
        pub(crate) tiering_policy: std::option::Option<crate::model::TieringPolicy>,
        pub(crate) ontap_volume_type: std::option::Option<crate::model::InputOntapVolumeType>,
        pub(crate) snapshot_policy: std::option::Option<std::string::String>,
        pub(crate) copy_tags_to_backups: std::option::Option<bool>,
    }
    impl Builder {
        /// <p>Specifies the location in the SVM's namespace where the volume is mounted. The <code>JunctionPath</code> must have a leading forward slash, such as <code>/vol3</code>.</p>
        pub fn junction_path(mut self, input: impl Into<std::string::String>) -> Self {
            self.junction_path = Some(input.into());
            self
        }
        /// <p>Specifies the location in the SVM's namespace where the volume is mounted. The <code>JunctionPath</code> must have a leading forward slash, such as <code>/vol3</code>.</p>
        pub fn set_junction_path(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.junction_path = input;
            self
        }
        /// <p>Specifies the security style for the volume. If a volume's security style is not specified, it is automatically set to the root volume's security style. The security style determines the type of permissions that FSx for ONTAP uses to control data access. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-volumes.html#volume-security-style">Volume security style</a> in the <i>Amazon FSx for NetApp ONTAP User Guide</i>. Specify one of the following values:</p>
        /// <ul>
        /// <li> <p> <code>UNIX</code> if the file system is managed by a UNIX administrator, the majority of users are NFS clients, and an application accessing the data uses a UNIX user as the service account. </p> </li>
        /// <li> <p> <code>NTFS</code> if the file system is managed by a Windows administrator, the majority of users are SMB clients, and an application accessing the data uses a Windows user as the service account.</p> </li>
        /// <li> <p> <code>MIXED</code> if the file system is managed by both UNIX and Windows administrators and users consist of both NFS and SMB clients.</p> </li>
        /// </ul>
        pub fn security_style(mut self, input: crate::model::SecurityStyle) -> Self {
            self.security_style = Some(input);
            self
        }
        /// <p>Specifies the security style for the volume. If a volume's security style is not specified, it is automatically set to the root volume's security style. The security style determines the type of permissions that FSx for ONTAP uses to control data access. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-volumes.html#volume-security-style">Volume security style</a> in the <i>Amazon FSx for NetApp ONTAP User Guide</i>. Specify one of the following values:</p>
        /// <ul>
        /// <li> <p> <code>UNIX</code> if the file system is managed by a UNIX administrator, the majority of users are NFS clients, and an application accessing the data uses a UNIX user as the service account. </p> </li>
        /// <li> <p> <code>NTFS</code> if the file system is managed by a Windows administrator, the majority of users are SMB clients, and an application accessing the data uses a Windows user as the service account.</p> </li>
        /// <li> <p> <code>MIXED</code> if the file system is managed by both UNIX and Windows administrators and users consist of both NFS and SMB clients.</p> </li>
        /// </ul>
        pub fn set_security_style(
            mut self,
            input: std::option::Option<crate::model::SecurityStyle>,
        ) -> Self {
            self.security_style = input;
            self
        }
        /// <p>Specifies the size of the volume, in megabytes (MB), that you are creating.</p>
        pub fn size_in_megabytes(mut self, input: i32) -> Self {
            self.size_in_megabytes = Some(input);
            self
        }
        /// <p>Specifies the size of the volume, in megabytes (MB), that you are creating.</p>
        pub fn set_size_in_megabytes(mut self, input: std::option::Option<i32>) -> Self {
            self.size_in_megabytes = input;
            self
        }
        /// <p>Set to true to enable deduplication, compression, and compaction storage efficiency features on the volume.</p>
        pub fn storage_efficiency_enabled(mut self, input: bool) -> Self {
            self.storage_efficiency_enabled = Some(input);
            self
        }
        /// <p>Set to true to enable deduplication, compression, and compaction storage efficiency features on the volume.</p>
        pub fn set_storage_efficiency_enabled(mut self, input: std::option::Option<bool>) -> Self {
            self.storage_efficiency_enabled = input;
            self
        }
        /// <p>Specifies the ONTAP SVM in which to create the volume.</p>
        pub fn storage_virtual_machine_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.storage_virtual_machine_id = Some(input.into());
            self
        }
        /// <p>Specifies the ONTAP SVM in which to create the volume.</p>
        pub fn set_storage_virtual_machine_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.storage_virtual_machine_id = input;
            self
        }
        /// <p>Describes the data tiering policy for an ONTAP volume. When enabled, Amazon FSx for ONTAP's intelligent tiering automatically transitions a volume's data between the file system's primary storage and capacity pool storage based on your access patterns.</p>
        /// <p>Valid tiering policies are the following:</p>
        /// <ul>
        /// <li> <p> <code>SNAPSHOT_ONLY</code> - (Default value) moves cold snapshots to the capacity pool storage tier.</p> </li>
        /// </ul>
        /// <ul>
        /// <li> <p> <code>AUTO</code> - moves cold user data and snapshots to the capacity pool storage tier based on your access patterns.</p> </li>
        /// </ul>
        /// <ul>
        /// <li> <p> <code>ALL</code> - moves all user data blocks in both the active file system and Snapshot copies to the storage pool tier.</p> </li>
        /// </ul>
        /// <ul>
        /// <li> <p> <code>NONE</code> - keeps a volume's data in the primary storage tier, preventing it from being moved to the capacity pool tier.</p> </li>
        /// </ul>
        pub fn tiering_policy(mut self, input: crate::model::TieringPolicy) -> Self {
            self.tiering_policy = Some(input);
            self
        }
        /// <p>Describes the data tiering policy for an ONTAP volume. When enabled, Amazon FSx for ONTAP's intelligent tiering automatically transitions a volume's data between the file system's primary storage and capacity pool storage based on your access patterns.</p>
        /// <p>Valid tiering policies are the following:</p>
        /// <ul>
        /// <li> <p> <code>SNAPSHOT_ONLY</code> - (Default value) moves cold snapshots to the capacity pool storage tier.</p> </li>
        /// </ul>
        /// <ul>
        /// <li> <p> <code>AUTO</code> - moves cold user data and snapshots to the capacity pool storage tier based on your access patterns.</p> </li>
        /// </ul>
        /// <ul>
        /// <li> <p> <code>ALL</code> - moves all user data blocks in both the active file system and Snapshot copies to the storage pool tier.</p> </li>
        /// </ul>
        /// <ul>
        /// <li> <p> <code>NONE</code> - keeps a volume's data in the primary storage tier, preventing it from being moved to the capacity pool tier.</p> </li>
        /// </ul>
        pub fn set_tiering_policy(
            mut self,
            input: std::option::Option<crate::model::TieringPolicy>,
        ) -> Self {
            self.tiering_policy = input;
            self
        }
        /// <p>Specifies the type of volume you are creating. Valid values are the following:</p>
        /// <ul>
        /// <li> <p> <code>RW</code> specifies a read/write volume. <code>RW</code> is the default.</p> </li>
        /// <li> <p> <code>DP</code> specifies a data-protection volume. A <code>DP</code> volume is read-only and can be used as the destination of a NetApp SnapMirror relationship.</p> </li>
        /// </ul>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/volume-types">Volume types</a> in the <i>Amazon FSx for NetApp ONTAP User Guide</i>.</p>
        pub fn ontap_volume_type(mut self, input: crate::model::InputOntapVolumeType) -> Self {
            self.ontap_volume_type = Some(input);
            self
        }
        /// <p>Specifies the type of volume you are creating. Valid values are the following:</p>
        /// <ul>
        /// <li> <p> <code>RW</code> specifies a read/write volume. <code>RW</code> is the default.</p> </li>
        /// <li> <p> <code>DP</code> specifies a data-protection volume. A <code>DP</code> volume is read-only and can be used as the destination of a NetApp SnapMirror relationship.</p> </li>
        /// </ul>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/volume-types">Volume types</a> in the <i>Amazon FSx for NetApp ONTAP User Guide</i>.</p>
        pub fn set_ontap_volume_type(
            mut self,
            input: std::option::Option<crate::model::InputOntapVolumeType>,
        ) -> Self {
            self.ontap_volume_type = input;
            self
        }
        /// <p>Specifies the snapshot policy for the volume. There are three built-in snapshot policies:</p>
        /// <ul>
        /// <li> <p> <code>default</code>: This is the default policy. A maximum of six hourly snapshots taken five minutes past the hour. A maximum of two daily snapshots taken Monday through Saturday at 10 minutes after midnight. A maximum of two weekly snapshots taken every Sunday at 15 minutes after midnight.</p> </li>
        /// <li> <p> <code>default-1weekly</code>: This policy is the same as the <code>default</code> policy except that it only retains one snapshot from the weekly schedule.</p> </li>
        /// <li> <p> <code>none</code>: This policy does not take any snapshots. This policy can be assigned to volumes to prevent automatic snapshots from being taken.</p> </li>
        /// </ul>
        /// <p>You can also provide the name of a custom policy that you created with the ONTAP CLI or REST API.</p>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/snapshots-ontap.html#snapshot-policies">Snapshot policies</a> in the <i>Amazon FSx for NetApp ONTAP User Guide</i>.</p>
        pub fn snapshot_policy(mut self, input: impl Into<std::string::String>) -> Self {
            self.snapshot_policy = Some(input.into());
            self
        }
        /// <p>Specifies the snapshot policy for the volume. There are three built-in snapshot policies:</p>
        /// <ul>
        /// <li> <p> <code>default</code>: This is the default policy. A maximum of six hourly snapshots taken five minutes past the hour. A maximum of two daily snapshots taken Monday through Saturday at 10 minutes after midnight. A maximum of two weekly snapshots taken every Sunday at 15 minutes after midnight.</p> </li>
        /// <li> <p> <code>default-1weekly</code>: This policy is the same as the <code>default</code> policy except that it only retains one snapshot from the weekly schedule.</p> </li>
        /// <li> <p> <code>none</code>: This policy does not take any snapshots. This policy can be assigned to volumes to prevent automatic snapshots from being taken.</p> </li>
        /// </ul>
        /// <p>You can also provide the name of a custom policy that you created with the ONTAP CLI or REST API.</p>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/snapshots-ontap.html#snapshot-policies">Snapshot policies</a> in the <i>Amazon FSx for NetApp ONTAP User Guide</i>.</p>
        pub fn set_snapshot_policy(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.snapshot_policy = input;
            self
        }
        /// <p>A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to false. If it's set to true, all tags for the volume are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the volume, regardless of this value.</p>
        pub fn copy_tags_to_backups(mut self, input: bool) -> Self {
            self.copy_tags_to_backups = Some(input);
            self
        }
        /// <p>A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to false. If it's set to true, all tags for the volume are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the volume, regardless of this value.</p>
        pub fn set_copy_tags_to_backups(mut self, input: std::option::Option<bool>) -> Self {
            self.copy_tags_to_backups = input;
            self
        }
        /// Consumes the builder and constructs a [`CreateOntapVolumeConfiguration`](crate::model::CreateOntapVolumeConfiguration).
        pub fn build(self) -> crate::model::CreateOntapVolumeConfiguration {
            crate::model::CreateOntapVolumeConfiguration {
                junction_path: self.junction_path,
                security_style: self.security_style,
                size_in_megabytes: self.size_in_megabytes,
                storage_efficiency_enabled: self.storage_efficiency_enabled,
                storage_virtual_machine_id: self.storage_virtual_machine_id,
                tiering_policy: self.tiering_policy,
                ontap_volume_type: self.ontap_volume_type,
                snapshot_policy: self.snapshot_policy,
                copy_tags_to_backups: self.copy_tags_to_backups,
            }
        }
    }
}
impl CreateOntapVolumeConfiguration {
    /// Creates a new builder-style object to manufacture [`CreateOntapVolumeConfiguration`](crate::model::CreateOntapVolumeConfiguration).
    pub fn builder() -> crate::model::create_ontap_volume_configuration::Builder {
        crate::model::create_ontap_volume_configuration::Builder::default()
    }
}

/// When writing a match expression against `InputOntapVolumeType`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let inputontapvolumetype = unimplemented!();
/// match inputontapvolumetype {
///     InputOntapVolumeType::Dp => { /* ... */ },
///     InputOntapVolumeType::Rw => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `inputontapvolumetype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `InputOntapVolumeType::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `InputOntapVolumeType::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `InputOntapVolumeType::NewFeature` is defined.
/// Specifically, when `inputontapvolumetype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `InputOntapVolumeType::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum InputOntapVolumeType {
    #[allow(missing_docs)] // documentation missing in model
    Dp,
    #[allow(missing_docs)] // documentation missing in model
    Rw,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for InputOntapVolumeType {
    fn from(s: &str) -> Self {
        match s {
            "DP" => InputOntapVolumeType::Dp,
            "RW" => InputOntapVolumeType::Rw,
            other => {
                InputOntapVolumeType::Unknown(crate::types::UnknownVariantValue(other.to_owned()))
            }
        }
    }
}
impl std::str::FromStr for InputOntapVolumeType {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(InputOntapVolumeType::from(s))
    }
}
impl InputOntapVolumeType {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            InputOntapVolumeType::Dp => "DP",
            InputOntapVolumeType::Rw => "RW",
            InputOntapVolumeType::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &["DP", "RW"]
    }
}
impl AsRef<str> for InputOntapVolumeType {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>Specifies the configuration of the Amazon FSx for OpenZFS volume that you are creating.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct CreateOpenZfsVolumeConfiguration {
    /// <p>The ID of the volume to use as the parent volume of the volume that you are creating.</p>
    #[doc(hidden)]
    pub parent_volume_id: std::option::Option<std::string::String>,
    /// <p>Specifies the amount of storage in gibibytes (GiB) to reserve from the parent volume. Setting <code>StorageCapacityReservationGiB</code> guarantees that the specified amount of storage space on the parent volume will always be available for the volume. You can't reserve more storage than the parent volume has. To <i>not</i> specify a storage capacity reservation, set this to <code>0</code> or <code>-1</code>. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/managing-volumes.html#volume-properties">Volume properties</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
    #[doc(hidden)]
    pub storage_capacity_reservation_gi_b: std::option::Option<i32>,
    /// <p>Sets the maximum storage size in gibibytes (GiB) for the volume. You can specify a quota that is larger than the storage on the parent volume. A volume quota limits the amount of storage that the volume can consume to the configured amount, but does not guarantee the space will be available on the parent volume. To guarantee quota space, you must also set <code>StorageCapacityReservationGiB</code>. To <i>not</i> specify a storage capacity quota, set this to <code>-1</code>. </p>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/managing-volumes.html#volume-properties">Volume properties</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
    #[doc(hidden)]
    pub storage_capacity_quota_gi_b: std::option::Option<i32>,
    /// <p>Specifies the suggested block size for a volume in a ZFS dataset, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. We recommend using the default setting for the majority of use cases. Generally, workloads that write in fixed small or large record sizes may benefit from setting a custom record size, like database workloads (small record size) or media streaming workloads (large record size). For additional guidance on when to set a custom record size, see <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#record-size-performance"> ZFS Record size</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
    #[doc(hidden)]
    pub record_size_ki_b: std::option::Option<i32>,
    /// <p>Specifies the method used to compress the data on the volume. The compression type is <code>NONE</code> by default.</p>
    /// <ul>
    /// <li> <p> <code>NONE</code> - Doesn't compress the data on the volume. <code>NONE</code> is the default.</p> </li>
    /// <li> <p> <code>ZSTD</code> - Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. ZSTD compression provides a higher level of data compression and higher read throughput performance than LZ4 compression.</p> </li>
    /// <li> <p> <code>LZ4</code> - Compresses the data in the volume using the LZ4 compression algorithm. LZ4 compression provides a lower level of compression and higher write throughput performance than ZSTD compression.</p> </li>
    /// </ul>
    /// <p>For more information about volume compression types and the performance of your Amazon FSx for OpenZFS file system, see <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#performance-tips-zfs"> Tips for maximizing performance</a> File system and volume settings in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
    #[doc(hidden)]
    pub data_compression_type: std::option::Option<crate::model::OpenZfsDataCompressionType>,
    /// <p>A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is <code>true</code>, and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value.</p>
    #[doc(hidden)]
    pub copy_tags_to_snapshots: std::option::Option<bool>,
    /// <p>The configuration object that specifies the snapshot to use as the origin of the data for the volume.</p>
    #[doc(hidden)]
    pub origin_snapshot:
        std::option::Option<crate::model::CreateOpenZfsOriginSnapshotConfiguration>,
    /// <p>A Boolean value indicating whether the volume is read-only.</p>
    #[doc(hidden)]
    pub read_only: std::option::Option<bool>,
    /// <p>The configuration object for mounting a Network File System (NFS) file system.</p>
    #[doc(hidden)]
    pub nfs_exports: std::option::Option<std::vec::Vec<crate::model::OpenZfsNfsExport>>,
    /// <p>An object specifying how much storage users or groups can use on the volume.</p>
    #[doc(hidden)]
    pub user_and_group_quotas:
        std::option::Option<std::vec::Vec<crate::model::OpenZfsUserOrGroupQuota>>,
}
impl CreateOpenZfsVolumeConfiguration {
    /// <p>The ID of the volume to use as the parent volume of the volume that you are creating.</p>
    pub fn parent_volume_id(&self) -> std::option::Option<&str> {
        self.parent_volume_id.as_deref()
    }
    /// <p>Specifies the amount of storage in gibibytes (GiB) to reserve from the parent volume. Setting <code>StorageCapacityReservationGiB</code> guarantees that the specified amount of storage space on the parent volume will always be available for the volume. You can't reserve more storage than the parent volume has. To <i>not</i> specify a storage capacity reservation, set this to <code>0</code> or <code>-1</code>. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/managing-volumes.html#volume-properties">Volume properties</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
    pub fn storage_capacity_reservation_gi_b(&self) -> std::option::Option<i32> {
        self.storage_capacity_reservation_gi_b
    }
    /// <p>Sets the maximum storage size in gibibytes (GiB) for the volume. You can specify a quota that is larger than the storage on the parent volume. A volume quota limits the amount of storage that the volume can consume to the configured amount, but does not guarantee the space will be available on the parent volume. To guarantee quota space, you must also set <code>StorageCapacityReservationGiB</code>. To <i>not</i> specify a storage capacity quota, set this to <code>-1</code>. </p>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/managing-volumes.html#volume-properties">Volume properties</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
    pub fn storage_capacity_quota_gi_b(&self) -> std::option::Option<i32> {
        self.storage_capacity_quota_gi_b
    }
    /// <p>Specifies the suggested block size for a volume in a ZFS dataset, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. We recommend using the default setting for the majority of use cases. Generally, workloads that write in fixed small or large record sizes may benefit from setting a custom record size, like database workloads (small record size) or media streaming workloads (large record size). For additional guidance on when to set a custom record size, see <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#record-size-performance"> ZFS Record size</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
    pub fn record_size_ki_b(&self) -> std::option::Option<i32> {
        self.record_size_ki_b
    }
    /// <p>Specifies the method used to compress the data on the volume. The compression type is <code>NONE</code> by default.</p>
    /// <ul>
    /// <li> <p> <code>NONE</code> - Doesn't compress the data on the volume. <code>NONE</code> is the default.</p> </li>
    /// <li> <p> <code>ZSTD</code> - Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. ZSTD compression provides a higher level of data compression and higher read throughput performance than LZ4 compression.</p> </li>
    /// <li> <p> <code>LZ4</code> - Compresses the data in the volume using the LZ4 compression algorithm. LZ4 compression provides a lower level of compression and higher write throughput performance than ZSTD compression.</p> </li>
    /// </ul>
    /// <p>For more information about volume compression types and the performance of your Amazon FSx for OpenZFS file system, see <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#performance-tips-zfs"> Tips for maximizing performance</a> File system and volume settings in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
    pub fn data_compression_type(
        &self,
    ) -> std::option::Option<&crate::model::OpenZfsDataCompressionType> {
        self.data_compression_type.as_ref()
    }
    /// <p>A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is <code>true</code>, and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value.</p>
    pub fn copy_tags_to_snapshots(&self) -> std::option::Option<bool> {
        self.copy_tags_to_snapshots
    }
    /// <p>The configuration object that specifies the snapshot to use as the origin of the data for the volume.</p>
    pub fn origin_snapshot(
        &self,
    ) -> std::option::Option<&crate::model::CreateOpenZfsOriginSnapshotConfiguration> {
        self.origin_snapshot.as_ref()
    }
    /// <p>A Boolean value indicating whether the volume is read-only.</p>
    pub fn read_only(&self) -> std::option::Option<bool> {
        self.read_only
    }
    /// <p>The configuration object for mounting a Network File System (NFS) file system.</p>
    pub fn nfs_exports(&self) -> std::option::Option<&[crate::model::OpenZfsNfsExport]> {
        self.nfs_exports.as_deref()
    }
    /// <p>An object specifying how much storage users or groups can use on the volume.</p>
    pub fn user_and_group_quotas(
        &self,
    ) -> std::option::Option<&[crate::model::OpenZfsUserOrGroupQuota]> {
        self.user_and_group_quotas.as_deref()
    }
}
/// See [`CreateOpenZfsVolumeConfiguration`](crate::model::CreateOpenZfsVolumeConfiguration).
pub mod create_open_zfs_volume_configuration {

    /// A builder for [`CreateOpenZfsVolumeConfiguration`](crate::model::CreateOpenZfsVolumeConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) parent_volume_id: std::option::Option<std::string::String>,
        pub(crate) storage_capacity_reservation_gi_b: std::option::Option<i32>,
        pub(crate) storage_capacity_quota_gi_b: std::option::Option<i32>,
        pub(crate) record_size_ki_b: std::option::Option<i32>,
        pub(crate) data_compression_type:
            std::option::Option<crate::model::OpenZfsDataCompressionType>,
        pub(crate) copy_tags_to_snapshots: std::option::Option<bool>,
        pub(crate) origin_snapshot:
            std::option::Option<crate::model::CreateOpenZfsOriginSnapshotConfiguration>,
        pub(crate) read_only: std::option::Option<bool>,
        pub(crate) nfs_exports: std::option::Option<std::vec::Vec<crate::model::OpenZfsNfsExport>>,
        pub(crate) user_and_group_quotas:
            std::option::Option<std::vec::Vec<crate::model::OpenZfsUserOrGroupQuota>>,
    }
    impl Builder {
        /// <p>The ID of the volume to use as the parent volume of the volume that you are creating.</p>
        pub fn parent_volume_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.parent_volume_id = Some(input.into());
            self
        }
        /// <p>The ID of the volume to use as the parent volume of the volume that you are creating.</p>
        pub fn set_parent_volume_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.parent_volume_id = input;
            self
        }
        /// <p>Specifies the amount of storage in gibibytes (GiB) to reserve from the parent volume. Setting <code>StorageCapacityReservationGiB</code> guarantees that the specified amount of storage space on the parent volume will always be available for the volume. You can't reserve more storage than the parent volume has. To <i>not</i> specify a storage capacity reservation, set this to <code>0</code> or <code>-1</code>. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/managing-volumes.html#volume-properties">Volume properties</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
        pub fn storage_capacity_reservation_gi_b(mut self, input: i32) -> Self {
            self.storage_capacity_reservation_gi_b = Some(input);
            self
        }
        /// <p>Specifies the amount of storage in gibibytes (GiB) to reserve from the parent volume. Setting <code>StorageCapacityReservationGiB</code> guarantees that the specified amount of storage space on the parent volume will always be available for the volume. You can't reserve more storage than the parent volume has. To <i>not</i> specify a storage capacity reservation, set this to <code>0</code> or <code>-1</code>. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/managing-volumes.html#volume-properties">Volume properties</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
        pub fn set_storage_capacity_reservation_gi_b(
            mut self,
            input: std::option::Option<i32>,
        ) -> Self {
            self.storage_capacity_reservation_gi_b = input;
            self
        }
        /// <p>Sets the maximum storage size in gibibytes (GiB) for the volume. You can specify a quota that is larger than the storage on the parent volume. A volume quota limits the amount of storage that the volume can consume to the configured amount, but does not guarantee the space will be available on the parent volume. To guarantee quota space, you must also set <code>StorageCapacityReservationGiB</code>. To <i>not</i> specify a storage capacity quota, set this to <code>-1</code>. </p>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/managing-volumes.html#volume-properties">Volume properties</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
        pub fn storage_capacity_quota_gi_b(mut self, input: i32) -> Self {
            self.storage_capacity_quota_gi_b = Some(input);
            self
        }
        /// <p>Sets the maximum storage size in gibibytes (GiB) for the volume. You can specify a quota that is larger than the storage on the parent volume. A volume quota limits the amount of storage that the volume can consume to the configured amount, but does not guarantee the space will be available on the parent volume. To guarantee quota space, you must also set <code>StorageCapacityReservationGiB</code>. To <i>not</i> specify a storage capacity quota, set this to <code>-1</code>. </p>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/managing-volumes.html#volume-properties">Volume properties</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
        pub fn set_storage_capacity_quota_gi_b(mut self, input: std::option::Option<i32>) -> Self {
            self.storage_capacity_quota_gi_b = input;
            self
        }
        /// <p>Specifies the suggested block size for a volume in a ZFS dataset, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. We recommend using the default setting for the majority of use cases. Generally, workloads that write in fixed small or large record sizes may benefit from setting a custom record size, like database workloads (small record size) or media streaming workloads (large record size). For additional guidance on when to set a custom record size, see <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#record-size-performance"> ZFS Record size</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
        pub fn record_size_ki_b(mut self, input: i32) -> Self {
            self.record_size_ki_b = Some(input);
            self
        }
        /// <p>Specifies the suggested block size for a volume in a ZFS dataset, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. We recommend using the default setting for the majority of use cases. Generally, workloads that write in fixed small or large record sizes may benefit from setting a custom record size, like database workloads (small record size) or media streaming workloads (large record size). For additional guidance on when to set a custom record size, see <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#record-size-performance"> ZFS Record size</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
        pub fn set_record_size_ki_b(mut self, input: std::option::Option<i32>) -> Self {
            self.record_size_ki_b = input;
            self
        }
        /// <p>Specifies the method used to compress the data on the volume. The compression type is <code>NONE</code> by default.</p>
        /// <ul>
        /// <li> <p> <code>NONE</code> - Doesn't compress the data on the volume. <code>NONE</code> is the default.</p> </li>
        /// <li> <p> <code>ZSTD</code> - Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. ZSTD compression provides a higher level of data compression and higher read throughput performance than LZ4 compression.</p> </li>
        /// <li> <p> <code>LZ4</code> - Compresses the data in the volume using the LZ4 compression algorithm. LZ4 compression provides a lower level of compression and higher write throughput performance than ZSTD compression.</p> </li>
        /// </ul>
        /// <p>For more information about volume compression types and the performance of your Amazon FSx for OpenZFS file system, see <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#performance-tips-zfs"> Tips for maximizing performance</a> File system and volume settings in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
        pub fn data_compression_type(
            mut self,
            input: crate::model::OpenZfsDataCompressionType,
        ) -> Self {
            self.data_compression_type = Some(input);
            self
        }
        /// <p>Specifies the method used to compress the data on the volume. The compression type is <code>NONE</code> by default.</p>
        /// <ul>
        /// <li> <p> <code>NONE</code> - Doesn't compress the data on the volume. <code>NONE</code> is the default.</p> </li>
        /// <li> <p> <code>ZSTD</code> - Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. ZSTD compression provides a higher level of data compression and higher read throughput performance than LZ4 compression.</p> </li>
        /// <li> <p> <code>LZ4</code> - Compresses the data in the volume using the LZ4 compression algorithm. LZ4 compression provides a lower level of compression and higher write throughput performance than ZSTD compression.</p> </li>
        /// </ul>
        /// <p>For more information about volume compression types and the performance of your Amazon FSx for OpenZFS file system, see <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#performance-tips-zfs"> Tips for maximizing performance</a> File system and volume settings in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
        pub fn set_data_compression_type(
            mut self,
            input: std::option::Option<crate::model::OpenZfsDataCompressionType>,
        ) -> Self {
            self.data_compression_type = input;
            self
        }
        /// <p>A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is <code>true</code>, and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value.</p>
        pub fn copy_tags_to_snapshots(mut self, input: bool) -> Self {
            self.copy_tags_to_snapshots = Some(input);
            self
        }
        /// <p>A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is <code>true</code>, and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value.</p>
        pub fn set_copy_tags_to_snapshots(mut self, input: std::option::Option<bool>) -> Self {
            self.copy_tags_to_snapshots = input;
            self
        }
        /// <p>The configuration object that specifies the snapshot to use as the origin of the data for the volume.</p>
        pub fn origin_snapshot(
            mut self,
            input: crate::model::CreateOpenZfsOriginSnapshotConfiguration,
        ) -> Self {
            self.origin_snapshot = Some(input);
            self
        }
        /// <p>The configuration object that specifies the snapshot to use as the origin of the data for the volume.</p>
        pub fn set_origin_snapshot(
            mut self,
            input: std::option::Option<crate::model::CreateOpenZfsOriginSnapshotConfiguration>,
        ) -> Self {
            self.origin_snapshot = input;
            self
        }
        /// <p>A Boolean value indicating whether the volume is read-only.</p>
        pub fn read_only(mut self, input: bool) -> Self {
            self.read_only = Some(input);
            self
        }
        /// <p>A Boolean value indicating whether the volume is read-only.</p>
        pub fn set_read_only(mut self, input: std::option::Option<bool>) -> Self {
            self.read_only = input;
            self
        }
        /// Appends an item to `nfs_exports`.
        ///
        /// To override the contents of this collection use [`set_nfs_exports`](Self::set_nfs_exports).
        ///
        /// <p>The configuration object for mounting a Network File System (NFS) file system.</p>
        pub fn nfs_exports(mut self, input: crate::model::OpenZfsNfsExport) -> Self {
            let mut v = self.nfs_exports.unwrap_or_default();
            v.push(input);
            self.nfs_exports = Some(v);
            self
        }
        /// <p>The configuration object for mounting a Network File System (NFS) file system.</p>
        pub fn set_nfs_exports(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::OpenZfsNfsExport>>,
        ) -> Self {
            self.nfs_exports = input;
            self
        }
        /// Appends an item to `user_and_group_quotas`.
        ///
        /// To override the contents of this collection use [`set_user_and_group_quotas`](Self::set_user_and_group_quotas).
        ///
        /// <p>An object specifying how much storage users or groups can use on the volume.</p>
        pub fn user_and_group_quotas(
            mut self,
            input: crate::model::OpenZfsUserOrGroupQuota,
        ) -> Self {
            let mut v = self.user_and_group_quotas.unwrap_or_default();
            v.push(input);
            self.user_and_group_quotas = Some(v);
            self
        }
        /// <p>An object specifying how much storage users or groups can use on the volume.</p>
        pub fn set_user_and_group_quotas(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::OpenZfsUserOrGroupQuota>>,
        ) -> Self {
            self.user_and_group_quotas = input;
            self
        }
        /// Consumes the builder and constructs a [`CreateOpenZfsVolumeConfiguration`](crate::model::CreateOpenZfsVolumeConfiguration).
        pub fn build(self) -> crate::model::CreateOpenZfsVolumeConfiguration {
            crate::model::CreateOpenZfsVolumeConfiguration {
                parent_volume_id: self.parent_volume_id,
                storage_capacity_reservation_gi_b: self.storage_capacity_reservation_gi_b,
                storage_capacity_quota_gi_b: self.storage_capacity_quota_gi_b,
                record_size_ki_b: self.record_size_ki_b,
                data_compression_type: self.data_compression_type,
                copy_tags_to_snapshots: self.copy_tags_to_snapshots,
                origin_snapshot: self.origin_snapshot,
                read_only: self.read_only,
                nfs_exports: self.nfs_exports,
                user_and_group_quotas: self.user_and_group_quotas,
            }
        }
    }
}
impl CreateOpenZfsVolumeConfiguration {
    /// Creates a new builder-style object to manufacture [`CreateOpenZfsVolumeConfiguration`](crate::model::CreateOpenZfsVolumeConfiguration).
    pub fn builder() -> crate::model::create_open_zfs_volume_configuration::Builder {
        crate::model::create_open_zfs_volume_configuration::Builder::default()
    }
}

/// <p>The snapshot configuration to use when creating an OpenZFS volume from a snapshot. </p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct CreateOpenZfsOriginSnapshotConfiguration {
    /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
    #[doc(hidden)]
    pub snapshot_arn: std::option::Option<std::string::String>,
    /// <p>The strategy used when copying data from the snapshot to the new volume. </p>
    /// <ul>
    /// <li> <p> <code>CLONE</code> - The new volume references the data in the origin snapshot. Cloning a snapshot is faster than copying data from the snapshot to a new volume and doesn't consume disk throughput. However, the origin snapshot can't be deleted if there is a volume using its copied data. </p> </li>
    /// <li> <p> <code>FULL_COPY</code> - Copies all data from the snapshot to the new volume. </p> </li>
    /// </ul>
    #[doc(hidden)]
    pub copy_strategy: std::option::Option<crate::model::OpenZfsCopyStrategy>,
}
impl CreateOpenZfsOriginSnapshotConfiguration {
    /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
    pub fn snapshot_arn(&self) -> std::option::Option<&str> {
        self.snapshot_arn.as_deref()
    }
    /// <p>The strategy used when copying data from the snapshot to the new volume. </p>
    /// <ul>
    /// <li> <p> <code>CLONE</code> - The new volume references the data in the origin snapshot. Cloning a snapshot is faster than copying data from the snapshot to a new volume and doesn't consume disk throughput. However, the origin snapshot can't be deleted if there is a volume using its copied data. </p> </li>
    /// <li> <p> <code>FULL_COPY</code> - Copies all data from the snapshot to the new volume. </p> </li>
    /// </ul>
    pub fn copy_strategy(&self) -> std::option::Option<&crate::model::OpenZfsCopyStrategy> {
        self.copy_strategy.as_ref()
    }
}
/// See [`CreateOpenZfsOriginSnapshotConfiguration`](crate::model::CreateOpenZfsOriginSnapshotConfiguration).
pub mod create_open_zfs_origin_snapshot_configuration {

    /// A builder for [`CreateOpenZfsOriginSnapshotConfiguration`](crate::model::CreateOpenZfsOriginSnapshotConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) snapshot_arn: std::option::Option<std::string::String>,
        pub(crate) copy_strategy: std::option::Option<crate::model::OpenZfsCopyStrategy>,
    }
    impl Builder {
        /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
        pub fn snapshot_arn(mut self, input: impl Into<std::string::String>) -> Self {
            self.snapshot_arn = Some(input.into());
            self
        }
        /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
        pub fn set_snapshot_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.snapshot_arn = input;
            self
        }
        /// <p>The strategy used when copying data from the snapshot to the new volume. </p>
        /// <ul>
        /// <li> <p> <code>CLONE</code> - The new volume references the data in the origin snapshot. Cloning a snapshot is faster than copying data from the snapshot to a new volume and doesn't consume disk throughput. However, the origin snapshot can't be deleted if there is a volume using its copied data. </p> </li>
        /// <li> <p> <code>FULL_COPY</code> - Copies all data from the snapshot to the new volume. </p> </li>
        /// </ul>
        pub fn copy_strategy(mut self, input: crate::model::OpenZfsCopyStrategy) -> Self {
            self.copy_strategy = Some(input);
            self
        }
        /// <p>The strategy used when copying data from the snapshot to the new volume. </p>
        /// <ul>
        /// <li> <p> <code>CLONE</code> - The new volume references the data in the origin snapshot. Cloning a snapshot is faster than copying data from the snapshot to a new volume and doesn't consume disk throughput. However, the origin snapshot can't be deleted if there is a volume using its copied data. </p> </li>
        /// <li> <p> <code>FULL_COPY</code> - Copies all data from the snapshot to the new volume. </p> </li>
        /// </ul>
        pub fn set_copy_strategy(
            mut self,
            input: std::option::Option<crate::model::OpenZfsCopyStrategy>,
        ) -> Self {
            self.copy_strategy = input;
            self
        }
        /// Consumes the builder and constructs a [`CreateOpenZfsOriginSnapshotConfiguration`](crate::model::CreateOpenZfsOriginSnapshotConfiguration).
        pub fn build(self) -> crate::model::CreateOpenZfsOriginSnapshotConfiguration {
            crate::model::CreateOpenZfsOriginSnapshotConfiguration {
                snapshot_arn: self.snapshot_arn,
                copy_strategy: self.copy_strategy,
            }
        }
    }
}
impl CreateOpenZfsOriginSnapshotConfiguration {
    /// Creates a new builder-style object to manufacture [`CreateOpenZfsOriginSnapshotConfiguration`](crate::model::CreateOpenZfsOriginSnapshotConfiguration).
    pub fn builder() -> crate::model::create_open_zfs_origin_snapshot_configuration::Builder {
        crate::model::create_open_zfs_origin_snapshot_configuration::Builder::default()
    }
}

/// When writing a match expression against `ActiveDirectoryErrorType`, it is important to ensure
/// your code is forward-compatible. That is, if a match arm handles a case for a
/// feature that is supported by the service but has not been represented as an enum
/// variant in a current version of SDK, your code should continue to work when you
/// upgrade SDK to a future version in which the enum does include a variant for that
/// feature.
///
/// Here is an example of how you can make a match expression forward-compatible:
///
/// ```text
/// # let activedirectoryerrortype = unimplemented!();
/// match activedirectoryerrortype {
///     ActiveDirectoryErrorType::DomainNotFound => { /* ... */ },
///     ActiveDirectoryErrorType::IncompatibleDomainMode => { /* ... */ },
///     ActiveDirectoryErrorType::InvalidDomainStage => { /* ... */ },
///     ActiveDirectoryErrorType::WrongVpc => { /* ... */ },
///     other @ _ if other.as_str() == "NewFeature" => { /* handles a case for `NewFeature` */ },
///     _ => { /* ... */ },
/// }
/// ```
/// The above code demonstrates that when `activedirectoryerrortype` represents
/// `NewFeature`, the execution path will lead to the second last match arm,
/// even though the enum does not contain a variant `ActiveDirectoryErrorType::NewFeature`
/// in the current version of SDK. The reason is that the variable `other`,
/// created by the `@` operator, is bound to
/// `ActiveDirectoryErrorType::Unknown(UnknownVariantValue("NewFeature".to_owned()))`
/// and calling `as_str` on it yields `"NewFeature"`.
/// This match expression is forward-compatible when executed with a newer
/// version of SDK where the variant `ActiveDirectoryErrorType::NewFeature` is defined.
/// Specifically, when `activedirectoryerrortype` represents `NewFeature`,
/// the execution path will hit the second last match arm as before by virtue of
/// calling `as_str` on `ActiveDirectoryErrorType::NewFeature` also yielding `"NewFeature"`.
///
/// Explicitly matching on the `Unknown` variant should
/// be avoided for two reasons:
/// - The inner data `UnknownVariantValue` is opaque, and no further information can be extracted.
/// - It might inadvertently shadow other intended match arms.
/// <p>The type of error relating to Microsoft Active Directory. NOT_FOUND means that no
/// directory was found by specifying the given directory. INCOMPATIBLE_MODE means that the
/// directory specified is not a Microsoft AD directory. WRONG_VPC means that the specified
/// directory isn't accessible from the specified VPC. WRONG_STAGE means that the
/// specified directory isn't currently in the ACTIVE state.</p>
#[non_exhaustive]
#[derive(
    std::clone::Clone,
    std::cmp::Eq,
    std::cmp::Ord,
    std::cmp::PartialEq,
    std::cmp::PartialOrd,
    std::fmt::Debug,
    std::hash::Hash,
)]
pub enum ActiveDirectoryErrorType {
    #[allow(missing_docs)] // documentation missing in model
    DomainNotFound,
    #[allow(missing_docs)] // documentation missing in model
    IncompatibleDomainMode,
    #[allow(missing_docs)] // documentation missing in model
    InvalidDomainStage,
    #[allow(missing_docs)] // documentation missing in model
    WrongVpc,
    /// `Unknown` contains new variants that have been added since this code was generated.
    Unknown(crate::types::UnknownVariantValue),
}
impl std::convert::From<&str> for ActiveDirectoryErrorType {
    fn from(s: &str) -> Self {
        match s {
            "DOMAIN_NOT_FOUND" => ActiveDirectoryErrorType::DomainNotFound,
            "INCOMPATIBLE_DOMAIN_MODE" => ActiveDirectoryErrorType::IncompatibleDomainMode,
            "INVALID_DOMAIN_STAGE" => ActiveDirectoryErrorType::InvalidDomainStage,
            "WRONG_VPC" => ActiveDirectoryErrorType::WrongVpc,
            other => ActiveDirectoryErrorType::Unknown(crate::types::UnknownVariantValue(
                other.to_owned(),
            )),
        }
    }
}
impl std::str::FromStr for ActiveDirectoryErrorType {
    type Err = std::convert::Infallible;

    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
        Ok(ActiveDirectoryErrorType::from(s))
    }
}
impl ActiveDirectoryErrorType {
    /// Returns the `&str` value of the enum member.
    pub fn as_str(&self) -> &str {
        match self {
            ActiveDirectoryErrorType::DomainNotFound => "DOMAIN_NOT_FOUND",
            ActiveDirectoryErrorType::IncompatibleDomainMode => "INCOMPATIBLE_DOMAIN_MODE",
            ActiveDirectoryErrorType::InvalidDomainStage => "INVALID_DOMAIN_STAGE",
            ActiveDirectoryErrorType::WrongVpc => "WRONG_VPC",
            ActiveDirectoryErrorType::Unknown(value) => value.as_str(),
        }
    }
    /// Returns all the `&str` values of the enum members.
    pub const fn values() -> &'static [&'static str] {
        &[
            "DOMAIN_NOT_FOUND",
            "INCOMPATIBLE_DOMAIN_MODE",
            "INVALID_DOMAIN_STAGE",
            "WRONG_VPC",
        ]
    }
}
impl AsRef<str> for ActiveDirectoryErrorType {
    fn as_ref(&self) -> &str {
        self.as_str()
    }
}

/// <p>The configuration that Amazon FSx uses to join the ONTAP storage virtual machine (SVM) to your self-managed (including on-premises) Microsoft Active Directory (AD) directory.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct CreateSvmActiveDirectoryConfiguration {
    /// <p>The NetBIOS name of the Active Directory computer object that will be created for your SVM.</p>
    #[doc(hidden)]
    pub net_bios_name: std::option::Option<std::string::String>,
    /// <p>The configuration that Amazon FSx uses to join a FSx for Windows File Server file system or an ONTAP storage virtual machine (SVM) to a self-managed (including on-premises) Microsoft Active Directory (AD) directory. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/self-managed-AD.html"> Using Amazon FSx with your self-managed Microsoft Active Directory</a> or <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-svms.html">Managing SVMs</a>.</p>
    #[doc(hidden)]
    pub self_managed_active_directory_configuration:
        std::option::Option<crate::model::SelfManagedActiveDirectoryConfiguration>,
}
impl CreateSvmActiveDirectoryConfiguration {
    /// <p>The NetBIOS name of the Active Directory computer object that will be created for your SVM.</p>
    pub fn net_bios_name(&self) -> std::option::Option<&str> {
        self.net_bios_name.as_deref()
    }
    /// <p>The configuration that Amazon FSx uses to join a FSx for Windows File Server file system or an ONTAP storage virtual machine (SVM) to a self-managed (including on-premises) Microsoft Active Directory (AD) directory. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/self-managed-AD.html"> Using Amazon FSx with your self-managed Microsoft Active Directory</a> or <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-svms.html">Managing SVMs</a>.</p>
    pub fn self_managed_active_directory_configuration(
        &self,
    ) -> std::option::Option<&crate::model::SelfManagedActiveDirectoryConfiguration> {
        self.self_managed_active_directory_configuration.as_ref()
    }
}
/// See [`CreateSvmActiveDirectoryConfiguration`](crate::model::CreateSvmActiveDirectoryConfiguration).
pub mod create_svm_active_directory_configuration {

    /// A builder for [`CreateSvmActiveDirectoryConfiguration`](crate::model::CreateSvmActiveDirectoryConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) net_bios_name: std::option::Option<std::string::String>,
        pub(crate) self_managed_active_directory_configuration:
            std::option::Option<crate::model::SelfManagedActiveDirectoryConfiguration>,
    }
    impl Builder {
        /// <p>The NetBIOS name of the Active Directory computer object that will be created for your SVM.</p>
        pub fn net_bios_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.net_bios_name = Some(input.into());
            self
        }
        /// <p>The NetBIOS name of the Active Directory computer object that will be created for your SVM.</p>
        pub fn set_net_bios_name(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.net_bios_name = input;
            self
        }
        /// <p>The configuration that Amazon FSx uses to join a FSx for Windows File Server file system or an ONTAP storage virtual machine (SVM) to a self-managed (including on-premises) Microsoft Active Directory (AD) directory. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/self-managed-AD.html"> Using Amazon FSx with your self-managed Microsoft Active Directory</a> or <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-svms.html">Managing SVMs</a>.</p>
        pub fn self_managed_active_directory_configuration(
            mut self,
            input: crate::model::SelfManagedActiveDirectoryConfiguration,
        ) -> Self {
            self.self_managed_active_directory_configuration = Some(input);
            self
        }
        /// <p>The configuration that Amazon FSx uses to join a FSx for Windows File Server file system or an ONTAP storage virtual machine (SVM) to a self-managed (including on-premises) Microsoft Active Directory (AD) directory. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/self-managed-AD.html"> Using Amazon FSx with your self-managed Microsoft Active Directory</a> or <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-svms.html">Managing SVMs</a>.</p>
        pub fn set_self_managed_active_directory_configuration(
            mut self,
            input: std::option::Option<crate::model::SelfManagedActiveDirectoryConfiguration>,
        ) -> Self {
            self.self_managed_active_directory_configuration = input;
            self
        }
        /// Consumes the builder and constructs a [`CreateSvmActiveDirectoryConfiguration`](crate::model::CreateSvmActiveDirectoryConfiguration).
        pub fn build(self) -> crate::model::CreateSvmActiveDirectoryConfiguration {
            crate::model::CreateSvmActiveDirectoryConfiguration {
                net_bios_name: self.net_bios_name,
                self_managed_active_directory_configuration: self
                    .self_managed_active_directory_configuration,
            }
        }
    }
}
impl CreateSvmActiveDirectoryConfiguration {
    /// Creates a new builder-style object to manufacture [`CreateSvmActiveDirectoryConfiguration`](crate::model::CreateSvmActiveDirectoryConfiguration).
    pub fn builder() -> crate::model::create_svm_active_directory_configuration::Builder {
        crate::model::create_svm_active_directory_configuration::Builder::default()
    }
}

/// <p>The configuration that Amazon FSx uses to join a FSx for Windows File Server file system or an ONTAP storage virtual machine (SVM) to a self-managed (including on-premises) Microsoft Active Directory (AD) directory. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/self-managed-AD.html"> Using Amazon FSx with your self-managed Microsoft Active Directory</a> or <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-svms.html">Managing SVMs</a>.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct SelfManagedActiveDirectoryConfiguration {
    /// <p>The fully qualified domain name of the self-managed AD directory, such as <code>corp.example.com</code>.</p>
    #[doc(hidden)]
    pub domain_name: std::option::Option<std::string::String>,
    /// <p>(Optional) The fully qualified distinguished name of the organizational unit within your self-managed AD directory. Amazon FSx only accepts OU as the direct parent of the file system. An example is <code>OU=FSx,DC=yourdomain,DC=corp,DC=com</code>. To learn more, see <a href="https://tools.ietf.org/html/rfc2253">RFC 2253</a>. If none is provided, the FSx file system is created in the default location of your self-managed AD directory. </p> <important>
    /// <p>Only Organizational Unit (OU) objects can be the direct parent of the file system that you're creating.</p>
    /// </important>
    #[doc(hidden)]
    pub organizational_unit_distinguished_name: std::option::Option<std::string::String>,
    /// <p>(Optional) The name of the domain group whose members are granted administrative privileges for the file system. Administrative privileges include taking ownership of files and folders, setting audit controls (audit ACLs) on files and folders, and administering the file system remotely by using the FSx Remote PowerShell. The group that you specify must already exist in your domain. If you don't provide one, your AD domain's Domain Admins group is used.</p>
    #[doc(hidden)]
    pub file_system_administrators_group: std::option::Option<std::string::String>,
    /// <p>The user name for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain. This account must have the permission to join computers to the domain in the organizational unit provided in <code>OrganizationalUnitDistinguishedName</code>, or in the default location of your AD domain.</p>
    #[doc(hidden)]
    pub user_name: std::option::Option<std::string::String>,
    /// <p>The password for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain.</p>
    #[doc(hidden)]
    pub password: std::option::Option<std::string::String>,
    /// <p>A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory. </p>
    #[doc(hidden)]
    pub dns_ips: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl SelfManagedActiveDirectoryConfiguration {
    /// <p>The fully qualified domain name of the self-managed AD directory, such as <code>corp.example.com</code>.</p>
    pub fn domain_name(&self) -> std::option::Option<&str> {
        self.domain_name.as_deref()
    }
    /// <p>(Optional) The fully qualified distinguished name of the organizational unit within your self-managed AD directory. Amazon FSx only accepts OU as the direct parent of the file system. An example is <code>OU=FSx,DC=yourdomain,DC=corp,DC=com</code>. To learn more, see <a href="https://tools.ietf.org/html/rfc2253">RFC 2253</a>. If none is provided, the FSx file system is created in the default location of your self-managed AD directory. </p> <important>
    /// <p>Only Organizational Unit (OU) objects can be the direct parent of the file system that you're creating.</p>
    /// </important>
    pub fn organizational_unit_distinguished_name(&self) -> std::option::Option<&str> {
        self.organizational_unit_distinguished_name.as_deref()
    }
    /// <p>(Optional) The name of the domain group whose members are granted administrative privileges for the file system. Administrative privileges include taking ownership of files and folders, setting audit controls (audit ACLs) on files and folders, and administering the file system remotely by using the FSx Remote PowerShell. The group that you specify must already exist in your domain. If you don't provide one, your AD domain's Domain Admins group is used.</p>
    pub fn file_system_administrators_group(&self) -> std::option::Option<&str> {
        self.file_system_administrators_group.as_deref()
    }
    /// <p>The user name for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain. This account must have the permission to join computers to the domain in the organizational unit provided in <code>OrganizationalUnitDistinguishedName</code>, or in the default location of your AD domain.</p>
    pub fn user_name(&self) -> std::option::Option<&str> {
        self.user_name.as_deref()
    }
    /// <p>The password for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain.</p>
    pub fn password(&self) -> std::option::Option<&str> {
        self.password.as_deref()
    }
    /// <p>A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory. </p>
    pub fn dns_ips(&self) -> std::option::Option<&[std::string::String]> {
        self.dns_ips.as_deref()
    }
}
impl std::fmt::Debug for SelfManagedActiveDirectoryConfiguration {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        let mut formatter = f.debug_struct("SelfManagedActiveDirectoryConfiguration");
        formatter.field("domain_name", &self.domain_name);
        formatter.field(
            "organizational_unit_distinguished_name",
            &self.organizational_unit_distinguished_name,
        );
        formatter.field(
            "file_system_administrators_group",
            &self.file_system_administrators_group,
        );
        formatter.field("user_name", &self.user_name);
        formatter.field("password", &"*** Sensitive Data Redacted ***");
        formatter.field("dns_ips", &self.dns_ips);
        formatter.finish()
    }
}
/// See [`SelfManagedActiveDirectoryConfiguration`](crate::model::SelfManagedActiveDirectoryConfiguration).
pub mod self_managed_active_directory_configuration {

    /// A builder for [`SelfManagedActiveDirectoryConfiguration`](crate::model::SelfManagedActiveDirectoryConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default)]
    pub struct Builder {
        pub(crate) domain_name: std::option::Option<std::string::String>,
        pub(crate) organizational_unit_distinguished_name: std::option::Option<std::string::String>,
        pub(crate) file_system_administrators_group: std::option::Option<std::string::String>,
        pub(crate) user_name: std::option::Option<std::string::String>,
        pub(crate) password: std::option::Option<std::string::String>,
        pub(crate) dns_ips: std::option::Option<std::vec::Vec<std::string::String>>,
    }
    impl Builder {
        /// <p>The fully qualified domain name of the self-managed AD directory, such as <code>corp.example.com</code>.</p>
        pub fn domain_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.domain_name = Some(input.into());
            self
        }
        /// <p>The fully qualified domain name of the self-managed AD directory, such as <code>corp.example.com</code>.</p>
        pub fn set_domain_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.domain_name = input;
            self
        }
        /// <p>(Optional) The fully qualified distinguished name of the organizational unit within your self-managed AD directory. Amazon FSx only accepts OU as the direct parent of the file system. An example is <code>OU=FSx,DC=yourdomain,DC=corp,DC=com</code>. To learn more, see <a href="https://tools.ietf.org/html/rfc2253">RFC 2253</a>. If none is provided, the FSx file system is created in the default location of your self-managed AD directory. </p> <important>
        /// <p>Only Organizational Unit (OU) objects can be the direct parent of the file system that you're creating.</p>
        /// </important>
        pub fn organizational_unit_distinguished_name(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.organizational_unit_distinguished_name = Some(input.into());
            self
        }
        /// <p>(Optional) The fully qualified distinguished name of the organizational unit within your self-managed AD directory. Amazon FSx only accepts OU as the direct parent of the file system. An example is <code>OU=FSx,DC=yourdomain,DC=corp,DC=com</code>. To learn more, see <a href="https://tools.ietf.org/html/rfc2253">RFC 2253</a>. If none is provided, the FSx file system is created in the default location of your self-managed AD directory. </p> <important>
        /// <p>Only Organizational Unit (OU) objects can be the direct parent of the file system that you're creating.</p>
        /// </important>
        pub fn set_organizational_unit_distinguished_name(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.organizational_unit_distinguished_name = input;
            self
        }
        /// <p>(Optional) The name of the domain group whose members are granted administrative privileges for the file system. Administrative privileges include taking ownership of files and folders, setting audit controls (audit ACLs) on files and folders, and administering the file system remotely by using the FSx Remote PowerShell. The group that you specify must already exist in your domain. If you don't provide one, your AD domain's Domain Admins group is used.</p>
        pub fn file_system_administrators_group(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.file_system_administrators_group = Some(input.into());
            self
        }
        /// <p>(Optional) The name of the domain group whose members are granted administrative privileges for the file system. Administrative privileges include taking ownership of files and folders, setting audit controls (audit ACLs) on files and folders, and administering the file system remotely by using the FSx Remote PowerShell. The group that you specify must already exist in your domain. If you don't provide one, your AD domain's Domain Admins group is used.</p>
        pub fn set_file_system_administrators_group(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.file_system_administrators_group = input;
            self
        }
        /// <p>The user name for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain. This account must have the permission to join computers to the domain in the organizational unit provided in <code>OrganizationalUnitDistinguishedName</code>, or in the default location of your AD domain.</p>
        pub fn user_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.user_name = Some(input.into());
            self
        }
        /// <p>The user name for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain. This account must have the permission to join computers to the domain in the organizational unit provided in <code>OrganizationalUnitDistinguishedName</code>, or in the default location of your AD domain.</p>
        pub fn set_user_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.user_name = input;
            self
        }
        /// <p>The password for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain.</p>
        pub fn password(mut self, input: impl Into<std::string::String>) -> Self {
            self.password = Some(input.into());
            self
        }
        /// <p>The password for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain.</p>
        pub fn set_password(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.password = input;
            self
        }
        /// Appends an item to `dns_ips`.
        ///
        /// To override the contents of this collection use [`set_dns_ips`](Self::set_dns_ips).
        ///
        /// <p>A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory. </p>
        pub fn dns_ips(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.dns_ips.unwrap_or_default();
            v.push(input.into());
            self.dns_ips = Some(v);
            self
        }
        /// <p>A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory. </p>
        pub fn set_dns_ips(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.dns_ips = input;
            self
        }
        /// Consumes the builder and constructs a [`SelfManagedActiveDirectoryConfiguration`](crate::model::SelfManagedActiveDirectoryConfiguration).
        pub fn build(self) -> crate::model::SelfManagedActiveDirectoryConfiguration {
            crate::model::SelfManagedActiveDirectoryConfiguration {
                domain_name: self.domain_name,
                organizational_unit_distinguished_name: self.organizational_unit_distinguished_name,
                file_system_administrators_group: self.file_system_administrators_group,
                user_name: self.user_name,
                password: self.password,
                dns_ips: self.dns_ips,
            }
        }
    }
    impl std::fmt::Debug for Builder {
        fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
            let mut formatter = f.debug_struct("Builder");
            formatter.field("domain_name", &self.domain_name);
            formatter.field(
                "organizational_unit_distinguished_name",
                &self.organizational_unit_distinguished_name,
            );
            formatter.field(
                "file_system_administrators_group",
                &self.file_system_administrators_group,
            );
            formatter.field("user_name", &self.user_name);
            formatter.field("password", &"*** Sensitive Data Redacted ***");
            formatter.field("dns_ips", &self.dns_ips);
            formatter.finish()
        }
    }
}
impl SelfManagedActiveDirectoryConfiguration {
    /// Creates a new builder-style object to manufacture [`SelfManagedActiveDirectoryConfiguration`](crate::model::SelfManagedActiveDirectoryConfiguration).
    pub fn builder() -> crate::model::self_managed_active_directory_configuration::Builder {
        crate::model::self_managed_active_directory_configuration::Builder::default()
    }
}

/// <p>The Amazon FSx for OpenZFS configuration properties for the file system that you are creating.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct CreateFileSystemOpenZfsConfiguration {
    /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
    #[doc(hidden)]
    pub automatic_backup_retention_days: std::option::Option<i32>,
    /// <p>A Boolean value indicating whether tags for the file system should be copied to backups. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is <code>true</code>, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.</p>
    #[doc(hidden)]
    pub copy_tags_to_backups: std::option::Option<bool>,
    /// <p>A Boolean value indicating whether tags for the file system should be copied to volumes. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the file system are copied to volumes where the user doesn't specify tags. If this value is <code>true</code>, and you specify one or more tags, only the specified tags are copied to volumes. If you specify one or more tags when creating the volume, no tags are copied from the file system, regardless of this value.</p>
    #[doc(hidden)]
    pub copy_tags_to_volumes: std::option::Option<bool>,
    /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
    #[doc(hidden)]
    pub daily_automatic_backup_start_time: std::option::Option<std::string::String>,
    /// <p>Specifies the file system deployment type. Single AZ deployment types are configured for redundancy within a single Availability Zone in an Amazon Web Services Region . Valid values are the following:</p>
    /// <ul>
    /// <li> <p> <code>SINGLE_AZ_1</code>- (Default) Creates file systems with throughput capacities of 64 - 4,096 MB/s. <code>Single_AZ_1</code> is available in all Amazon Web Services Regions where Amazon FSx for OpenZFS is available, except US West (Oregon).</p> </li>
    /// <li> <p> <code>SINGLE_AZ_2</code>- Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache. <code>Single_AZ_2</code> is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), and Europe (Ireland) Amazon Web Services Regions.</p> </li>
    /// </ul>
    /// <p>For more information, see: <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/availability-durability.html#available-aws-regions">Deployment type availability</a> and <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#zfs-fs-performance">File system performance</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
    #[doc(hidden)]
    pub deployment_type: std::option::Option<crate::model::OpenZfsDeploymentType>,
    /// <p>Specifies the throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second (MB/s). Valid values depend on the DeploymentType you choose, as follows:</p>
    /// <ul>
    /// <li> <p>For <code>SINGLE_AZ_1</code>, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s.</p> </li>
    /// <li> <p>For <code>SINGLE_AZ_2</code>, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MB/s.</p> </li>
    /// </ul>
    /// <p>You pay for additional throughput capacity that you provision.</p>
    #[doc(hidden)]
    pub throughput_capacity: std::option::Option<i32>,
    /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
    /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
    /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
    /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
    #[doc(hidden)]
    pub weekly_maintenance_start_time: std::option::Option<std::string::String>,
    /// <p>The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of the total number of provisioned SSD IOPS and how the amount was provisioned (by the customer or by the system).</p>
    #[doc(hidden)]
    pub disk_iops_configuration: std::option::Option<crate::model::DiskIopsConfiguration>,
    /// <p>The configuration Amazon FSx uses when creating the root value of the Amazon FSx for OpenZFS file system. All volumes are children of the root volume. </p>
    #[doc(hidden)]
    pub root_volume_configuration:
        std::option::Option<crate::model::OpenZfsCreateRootVolumeConfiguration>,
}
impl CreateFileSystemOpenZfsConfiguration {
    /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
    pub fn automatic_backup_retention_days(&self) -> std::option::Option<i32> {
        self.automatic_backup_retention_days
    }
    /// <p>A Boolean value indicating whether tags for the file system should be copied to backups. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is <code>true</code>, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.</p>
    pub fn copy_tags_to_backups(&self) -> std::option::Option<bool> {
        self.copy_tags_to_backups
    }
    /// <p>A Boolean value indicating whether tags for the file system should be copied to volumes. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the file system are copied to volumes where the user doesn't specify tags. If this value is <code>true</code>, and you specify one or more tags, only the specified tags are copied to volumes. If you specify one or more tags when creating the volume, no tags are copied from the file system, regardless of this value.</p>
    pub fn copy_tags_to_volumes(&self) -> std::option::Option<bool> {
        self.copy_tags_to_volumes
    }
    /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
    pub fn daily_automatic_backup_start_time(&self) -> std::option::Option<&str> {
        self.daily_automatic_backup_start_time.as_deref()
    }
    /// <p>Specifies the file system deployment type. Single AZ deployment types are configured for redundancy within a single Availability Zone in an Amazon Web Services Region . Valid values are the following:</p>
    /// <ul>
    /// <li> <p> <code>SINGLE_AZ_1</code>- (Default) Creates file systems with throughput capacities of 64 - 4,096 MB/s. <code>Single_AZ_1</code> is available in all Amazon Web Services Regions where Amazon FSx for OpenZFS is available, except US West (Oregon).</p> </li>
    /// <li> <p> <code>SINGLE_AZ_2</code>- Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache. <code>Single_AZ_2</code> is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), and Europe (Ireland) Amazon Web Services Regions.</p> </li>
    /// </ul>
    /// <p>For more information, see: <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/availability-durability.html#available-aws-regions">Deployment type availability</a> and <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#zfs-fs-performance">File system performance</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
    pub fn deployment_type(&self) -> std::option::Option<&crate::model::OpenZfsDeploymentType> {
        self.deployment_type.as_ref()
    }
    /// <p>Specifies the throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second (MB/s). Valid values depend on the DeploymentType you choose, as follows:</p>
    /// <ul>
    /// <li> <p>For <code>SINGLE_AZ_1</code>, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s.</p> </li>
    /// <li> <p>For <code>SINGLE_AZ_2</code>, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MB/s.</p> </li>
    /// </ul>
    /// <p>You pay for additional throughput capacity that you provision.</p>
    pub fn throughput_capacity(&self) -> std::option::Option<i32> {
        self.throughput_capacity
    }
    /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
    /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
    /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
    /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
    pub fn weekly_maintenance_start_time(&self) -> std::option::Option<&str> {
        self.weekly_maintenance_start_time.as_deref()
    }
    /// <p>The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of the total number of provisioned SSD IOPS and how the amount was provisioned (by the customer or by the system).</p>
    pub fn disk_iops_configuration(
        &self,
    ) -> std::option::Option<&crate::model::DiskIopsConfiguration> {
        self.disk_iops_configuration.as_ref()
    }
    /// <p>The configuration Amazon FSx uses when creating the root value of the Amazon FSx for OpenZFS file system. All volumes are children of the root volume. </p>
    pub fn root_volume_configuration(
        &self,
    ) -> std::option::Option<&crate::model::OpenZfsCreateRootVolumeConfiguration> {
        self.root_volume_configuration.as_ref()
    }
}
/// See [`CreateFileSystemOpenZfsConfiguration`](crate::model::CreateFileSystemOpenZfsConfiguration).
pub mod create_file_system_open_zfs_configuration {

    /// A builder for [`CreateFileSystemOpenZfsConfiguration`](crate::model::CreateFileSystemOpenZfsConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) automatic_backup_retention_days: std::option::Option<i32>,
        pub(crate) copy_tags_to_backups: std::option::Option<bool>,
        pub(crate) copy_tags_to_volumes: std::option::Option<bool>,
        pub(crate) daily_automatic_backup_start_time: std::option::Option<std::string::String>,
        pub(crate) deployment_type: std::option::Option<crate::model::OpenZfsDeploymentType>,
        pub(crate) throughput_capacity: std::option::Option<i32>,
        pub(crate) weekly_maintenance_start_time: std::option::Option<std::string::String>,
        pub(crate) disk_iops_configuration:
            std::option::Option<crate::model::DiskIopsConfiguration>,
        pub(crate) root_volume_configuration:
            std::option::Option<crate::model::OpenZfsCreateRootVolumeConfiguration>,
    }
    impl Builder {
        /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
        pub fn automatic_backup_retention_days(mut self, input: i32) -> Self {
            self.automatic_backup_retention_days = Some(input);
            self
        }
        /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
        pub fn set_automatic_backup_retention_days(
            mut self,
            input: std::option::Option<i32>,
        ) -> Self {
            self.automatic_backup_retention_days = input;
            self
        }
        /// <p>A Boolean value indicating whether tags for the file system should be copied to backups. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is <code>true</code>, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.</p>
        pub fn copy_tags_to_backups(mut self, input: bool) -> Self {
            self.copy_tags_to_backups = Some(input);
            self
        }
        /// <p>A Boolean value indicating whether tags for the file system should be copied to backups. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is <code>true</code>, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.</p>
        pub fn set_copy_tags_to_backups(mut self, input: std::option::Option<bool>) -> Self {
            self.copy_tags_to_backups = input;
            self
        }
        /// <p>A Boolean value indicating whether tags for the file system should be copied to volumes. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the file system are copied to volumes where the user doesn't specify tags. If this value is <code>true</code>, and you specify one or more tags, only the specified tags are copied to volumes. If you specify one or more tags when creating the volume, no tags are copied from the file system, regardless of this value.</p>
        pub fn copy_tags_to_volumes(mut self, input: bool) -> Self {
            self.copy_tags_to_volumes = Some(input);
            self
        }
        /// <p>A Boolean value indicating whether tags for the file system should be copied to volumes. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the file system are copied to volumes where the user doesn't specify tags. If this value is <code>true</code>, and you specify one or more tags, only the specified tags are copied to volumes. If you specify one or more tags when creating the volume, no tags are copied from the file system, regardless of this value.</p>
        pub fn set_copy_tags_to_volumes(mut self, input: std::option::Option<bool>) -> Self {
            self.copy_tags_to_volumes = input;
            self
        }
        /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
        pub fn daily_automatic_backup_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.daily_automatic_backup_start_time = Some(input.into());
            self
        }
        /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
        pub fn set_daily_automatic_backup_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.daily_automatic_backup_start_time = input;
            self
        }
        /// <p>Specifies the file system deployment type. Single AZ deployment types are configured for redundancy within a single Availability Zone in an Amazon Web Services Region . Valid values are the following:</p>
        /// <ul>
        /// <li> <p> <code>SINGLE_AZ_1</code>- (Default) Creates file systems with throughput capacities of 64 - 4,096 MB/s. <code>Single_AZ_1</code> is available in all Amazon Web Services Regions where Amazon FSx for OpenZFS is available, except US West (Oregon).</p> </li>
        /// <li> <p> <code>SINGLE_AZ_2</code>- Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache. <code>Single_AZ_2</code> is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), and Europe (Ireland) Amazon Web Services Regions.</p> </li>
        /// </ul>
        /// <p>For more information, see: <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/availability-durability.html#available-aws-regions">Deployment type availability</a> and <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#zfs-fs-performance">File system performance</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
        pub fn deployment_type(mut self, input: crate::model::OpenZfsDeploymentType) -> Self {
            self.deployment_type = Some(input);
            self
        }
        /// <p>Specifies the file system deployment type. Single AZ deployment types are configured for redundancy within a single Availability Zone in an Amazon Web Services Region . Valid values are the following:</p>
        /// <ul>
        /// <li> <p> <code>SINGLE_AZ_1</code>- (Default) Creates file systems with throughput capacities of 64 - 4,096 MB/s. <code>Single_AZ_1</code> is available in all Amazon Web Services Regions where Amazon FSx for OpenZFS is available, except US West (Oregon).</p> </li>
        /// <li> <p> <code>SINGLE_AZ_2</code>- Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache. <code>Single_AZ_2</code> is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), and Europe (Ireland) Amazon Web Services Regions.</p> </li>
        /// </ul>
        /// <p>For more information, see: <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/availability-durability.html#available-aws-regions">Deployment type availability</a> and <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#zfs-fs-performance">File system performance</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
        pub fn set_deployment_type(
            mut self,
            input: std::option::Option<crate::model::OpenZfsDeploymentType>,
        ) -> Self {
            self.deployment_type = input;
            self
        }
        /// <p>Specifies the throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second (MB/s). Valid values depend on the DeploymentType you choose, as follows:</p>
        /// <ul>
        /// <li> <p>For <code>SINGLE_AZ_1</code>, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s.</p> </li>
        /// <li> <p>For <code>SINGLE_AZ_2</code>, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MB/s.</p> </li>
        /// </ul>
        /// <p>You pay for additional throughput capacity that you provision.</p>
        pub fn throughput_capacity(mut self, input: i32) -> Self {
            self.throughput_capacity = Some(input);
            self
        }
        /// <p>Specifies the throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second (MB/s). Valid values depend on the DeploymentType you choose, as follows:</p>
        /// <ul>
        /// <li> <p>For <code>SINGLE_AZ_1</code>, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s.</p> </li>
        /// <li> <p>For <code>SINGLE_AZ_2</code>, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MB/s.</p> </li>
        /// </ul>
        /// <p>You pay for additional throughput capacity that you provision.</p>
        pub fn set_throughput_capacity(mut self, input: std::option::Option<i32>) -> Self {
            self.throughput_capacity = input;
            self
        }
        /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
        /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
        /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
        /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
        pub fn weekly_maintenance_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = Some(input.into());
            self
        }
        /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
        /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
        /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
        /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
        pub fn set_weekly_maintenance_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = input;
            self
        }
        /// <p>The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of the total number of provisioned SSD IOPS and how the amount was provisioned (by the customer or by the system).</p>
        pub fn disk_iops_configuration(
            mut self,
            input: crate::model::DiskIopsConfiguration,
        ) -> Self {
            self.disk_iops_configuration = Some(input);
            self
        }
        /// <p>The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of the total number of provisioned SSD IOPS and how the amount was provisioned (by the customer or by the system).</p>
        pub fn set_disk_iops_configuration(
            mut self,
            input: std::option::Option<crate::model::DiskIopsConfiguration>,
        ) -> Self {
            self.disk_iops_configuration = input;
            self
        }
        /// <p>The configuration Amazon FSx uses when creating the root value of the Amazon FSx for OpenZFS file system. All volumes are children of the root volume. </p>
        pub fn root_volume_configuration(
            mut self,
            input: crate::model::OpenZfsCreateRootVolumeConfiguration,
        ) -> Self {
            self.root_volume_configuration = Some(input);
            self
        }
        /// <p>The configuration Amazon FSx uses when creating the root value of the Amazon FSx for OpenZFS file system. All volumes are children of the root volume. </p>
        pub fn set_root_volume_configuration(
            mut self,
            input: std::option::Option<crate::model::OpenZfsCreateRootVolumeConfiguration>,
        ) -> Self {
            self.root_volume_configuration = input;
            self
        }
        /// Consumes the builder and constructs a [`CreateFileSystemOpenZfsConfiguration`](crate::model::CreateFileSystemOpenZfsConfiguration).
        pub fn build(self) -> crate::model::CreateFileSystemOpenZfsConfiguration {
            crate::model::CreateFileSystemOpenZfsConfiguration {
                automatic_backup_retention_days: self.automatic_backup_retention_days,
                copy_tags_to_backups: self.copy_tags_to_backups,
                copy_tags_to_volumes: self.copy_tags_to_volumes,
                daily_automatic_backup_start_time: self.daily_automatic_backup_start_time,
                deployment_type: self.deployment_type,
                throughput_capacity: self.throughput_capacity,
                weekly_maintenance_start_time: self.weekly_maintenance_start_time,
                disk_iops_configuration: self.disk_iops_configuration,
                root_volume_configuration: self.root_volume_configuration,
            }
        }
    }
}
impl CreateFileSystemOpenZfsConfiguration {
    /// Creates a new builder-style object to manufacture [`CreateFileSystemOpenZfsConfiguration`](crate::model::CreateFileSystemOpenZfsConfiguration).
    pub fn builder() -> crate::model::create_file_system_open_zfs_configuration::Builder {
        crate::model::create_file_system_open_zfs_configuration::Builder::default()
    }
}

/// <p>The configuration of an Amazon FSx for OpenZFS root volume.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct OpenZfsCreateRootVolumeConfiguration {
    /// <p>Specifies the record size of an OpenZFS root volume, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the default record size. Database workflows can benefit from a smaller record size, while streaming workflows can benefit from a larger record size. For additional guidance on setting a custom record size, see <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#performance-tips-zfs"> Tips for maximizing performance</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
    #[doc(hidden)]
    pub record_size_ki_b: std::option::Option<i32>,
    /// <p>Specifies the method used to compress the data on the volume. The compression type is <code>NONE</code> by default.</p>
    /// <ul>
    /// <li> <p> <code>NONE</code> - Doesn't compress the data on the volume. <code>NONE</code> is the default.</p> </li>
    /// <li> <p> <code>ZSTD</code> - Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. Compared to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.</p> </li>
    /// <li> <p> <code>LZ4</code> - Compresses the data in the volume using the LZ4 compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive and delivers higher write throughput speeds.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub data_compression_type: std::option::Option<crate::model::OpenZfsDataCompressionType>,
    /// <p>The configuration object for mounting a file system.</p>
    #[doc(hidden)]
    pub nfs_exports: std::option::Option<std::vec::Vec<crate::model::OpenZfsNfsExport>>,
    /// <p>An object specifying how much storage users or groups can use on the volume.</p>
    #[doc(hidden)]
    pub user_and_group_quotas:
        std::option::Option<std::vec::Vec<crate::model::OpenZfsUserOrGroupQuota>>,
    /// <p>A Boolean value indicating whether tags for the volume should be copied to snapshots of the volume. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is <code>true</code> and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value. </p>
    #[doc(hidden)]
    pub copy_tags_to_snapshots: std::option::Option<bool>,
    /// <p>A Boolean value indicating whether the volume is read-only. Setting this value to <code>true</code> can be useful after you have completed changes to a volume and no longer want changes to occur. </p>
    #[doc(hidden)]
    pub read_only: std::option::Option<bool>,
}
impl OpenZfsCreateRootVolumeConfiguration {
    /// <p>Specifies the record size of an OpenZFS root volume, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the default record size. Database workflows can benefit from a smaller record size, while streaming workflows can benefit from a larger record size. For additional guidance on setting a custom record size, see <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#performance-tips-zfs"> Tips for maximizing performance</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
    pub fn record_size_ki_b(&self) -> std::option::Option<i32> {
        self.record_size_ki_b
    }
    /// <p>Specifies the method used to compress the data on the volume. The compression type is <code>NONE</code> by default.</p>
    /// <ul>
    /// <li> <p> <code>NONE</code> - Doesn't compress the data on the volume. <code>NONE</code> is the default.</p> </li>
    /// <li> <p> <code>ZSTD</code> - Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. Compared to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.</p> </li>
    /// <li> <p> <code>LZ4</code> - Compresses the data in the volume using the LZ4 compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive and delivers higher write throughput speeds.</p> </li>
    /// </ul>
    pub fn data_compression_type(
        &self,
    ) -> std::option::Option<&crate::model::OpenZfsDataCompressionType> {
        self.data_compression_type.as_ref()
    }
    /// <p>The configuration object for mounting a file system.</p>
    pub fn nfs_exports(&self) -> std::option::Option<&[crate::model::OpenZfsNfsExport]> {
        self.nfs_exports.as_deref()
    }
    /// <p>An object specifying how much storage users or groups can use on the volume.</p>
    pub fn user_and_group_quotas(
        &self,
    ) -> std::option::Option<&[crate::model::OpenZfsUserOrGroupQuota]> {
        self.user_and_group_quotas.as_deref()
    }
    /// <p>A Boolean value indicating whether tags for the volume should be copied to snapshots of the volume. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is <code>true</code> and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value. </p>
    pub fn copy_tags_to_snapshots(&self) -> std::option::Option<bool> {
        self.copy_tags_to_snapshots
    }
    /// <p>A Boolean value indicating whether the volume is read-only. Setting this value to <code>true</code> can be useful after you have completed changes to a volume and no longer want changes to occur. </p>
    pub fn read_only(&self) -> std::option::Option<bool> {
        self.read_only
    }
}
/// See [`OpenZfsCreateRootVolumeConfiguration`](crate::model::OpenZfsCreateRootVolumeConfiguration).
pub mod open_zfs_create_root_volume_configuration {

    /// A builder for [`OpenZfsCreateRootVolumeConfiguration`](crate::model::OpenZfsCreateRootVolumeConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) record_size_ki_b: std::option::Option<i32>,
        pub(crate) data_compression_type:
            std::option::Option<crate::model::OpenZfsDataCompressionType>,
        pub(crate) nfs_exports: std::option::Option<std::vec::Vec<crate::model::OpenZfsNfsExport>>,
        pub(crate) user_and_group_quotas:
            std::option::Option<std::vec::Vec<crate::model::OpenZfsUserOrGroupQuota>>,
        pub(crate) copy_tags_to_snapshots: std::option::Option<bool>,
        pub(crate) read_only: std::option::Option<bool>,
    }
    impl Builder {
        /// <p>Specifies the record size of an OpenZFS root volume, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the default record size. Database workflows can benefit from a smaller record size, while streaming workflows can benefit from a larger record size. For additional guidance on setting a custom record size, see <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#performance-tips-zfs"> Tips for maximizing performance</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
        pub fn record_size_ki_b(mut self, input: i32) -> Self {
            self.record_size_ki_b = Some(input);
            self
        }
        /// <p>Specifies the record size of an OpenZFS root volume, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the default record size. Database workflows can benefit from a smaller record size, while streaming workflows can benefit from a larger record size. For additional guidance on setting a custom record size, see <a href="https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#performance-tips-zfs"> Tips for maximizing performance</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>
        pub fn set_record_size_ki_b(mut self, input: std::option::Option<i32>) -> Self {
            self.record_size_ki_b = input;
            self
        }
        /// <p>Specifies the method used to compress the data on the volume. The compression type is <code>NONE</code> by default.</p>
        /// <ul>
        /// <li> <p> <code>NONE</code> - Doesn't compress the data on the volume. <code>NONE</code> is the default.</p> </li>
        /// <li> <p> <code>ZSTD</code> - Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. Compared to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.</p> </li>
        /// <li> <p> <code>LZ4</code> - Compresses the data in the volume using the LZ4 compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive and delivers higher write throughput speeds.</p> </li>
        /// </ul>
        pub fn data_compression_type(
            mut self,
            input: crate::model::OpenZfsDataCompressionType,
        ) -> Self {
            self.data_compression_type = Some(input);
            self
        }
        /// <p>Specifies the method used to compress the data on the volume. The compression type is <code>NONE</code> by default.</p>
        /// <ul>
        /// <li> <p> <code>NONE</code> - Doesn't compress the data on the volume. <code>NONE</code> is the default.</p> </li>
        /// <li> <p> <code>ZSTD</code> - Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. Compared to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.</p> </li>
        /// <li> <p> <code>LZ4</code> - Compresses the data in the volume using the LZ4 compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive and delivers higher write throughput speeds.</p> </li>
        /// </ul>
        pub fn set_data_compression_type(
            mut self,
            input: std::option::Option<crate::model::OpenZfsDataCompressionType>,
        ) -> Self {
            self.data_compression_type = input;
            self
        }
        /// Appends an item to `nfs_exports`.
        ///
        /// To override the contents of this collection use [`set_nfs_exports`](Self::set_nfs_exports).
        ///
        /// <p>The configuration object for mounting a file system.</p>
        pub fn nfs_exports(mut self, input: crate::model::OpenZfsNfsExport) -> Self {
            let mut v = self.nfs_exports.unwrap_or_default();
            v.push(input);
            self.nfs_exports = Some(v);
            self
        }
        /// <p>The configuration object for mounting a file system.</p>
        pub fn set_nfs_exports(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::OpenZfsNfsExport>>,
        ) -> Self {
            self.nfs_exports = input;
            self
        }
        /// Appends an item to `user_and_group_quotas`.
        ///
        /// To override the contents of this collection use [`set_user_and_group_quotas`](Self::set_user_and_group_quotas).
        ///
        /// <p>An object specifying how much storage users or groups can use on the volume.</p>
        pub fn user_and_group_quotas(
            mut self,
            input: crate::model::OpenZfsUserOrGroupQuota,
        ) -> Self {
            let mut v = self.user_and_group_quotas.unwrap_or_default();
            v.push(input);
            self.user_and_group_quotas = Some(v);
            self
        }
        /// <p>An object specifying how much storage users or groups can use on the volume.</p>
        pub fn set_user_and_group_quotas(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::OpenZfsUserOrGroupQuota>>,
        ) -> Self {
            self.user_and_group_quotas = input;
            self
        }
        /// <p>A Boolean value indicating whether tags for the volume should be copied to snapshots of the volume. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is <code>true</code> and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value. </p>
        pub fn copy_tags_to_snapshots(mut self, input: bool) -> Self {
            self.copy_tags_to_snapshots = Some(input);
            self
        }
        /// <p>A Boolean value indicating whether tags for the volume should be copied to snapshots of the volume. This value defaults to <code>false</code>. If it's set to <code>true</code>, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is <code>true</code> and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value. </p>
        pub fn set_copy_tags_to_snapshots(mut self, input: std::option::Option<bool>) -> Self {
            self.copy_tags_to_snapshots = input;
            self
        }
        /// <p>A Boolean value indicating whether the volume is read-only. Setting this value to <code>true</code> can be useful after you have completed changes to a volume and no longer want changes to occur. </p>
        pub fn read_only(mut self, input: bool) -> Self {
            self.read_only = Some(input);
            self
        }
        /// <p>A Boolean value indicating whether the volume is read-only. Setting this value to <code>true</code> can be useful after you have completed changes to a volume and no longer want changes to occur. </p>
        pub fn set_read_only(mut self, input: std::option::Option<bool>) -> Self {
            self.read_only = input;
            self
        }
        /// Consumes the builder and constructs a [`OpenZfsCreateRootVolumeConfiguration`](crate::model::OpenZfsCreateRootVolumeConfiguration).
        pub fn build(self) -> crate::model::OpenZfsCreateRootVolumeConfiguration {
            crate::model::OpenZfsCreateRootVolumeConfiguration {
                record_size_ki_b: self.record_size_ki_b,
                data_compression_type: self.data_compression_type,
                nfs_exports: self.nfs_exports,
                user_and_group_quotas: self.user_and_group_quotas,
                copy_tags_to_snapshots: self.copy_tags_to_snapshots,
                read_only: self.read_only,
            }
        }
    }
}
impl OpenZfsCreateRootVolumeConfiguration {
    /// Creates a new builder-style object to manufacture [`OpenZfsCreateRootVolumeConfiguration`](crate::model::OpenZfsCreateRootVolumeConfiguration).
    pub fn builder() -> crate::model::open_zfs_create_root_volume_configuration::Builder {
        crate::model::open_zfs_create_root_volume_configuration::Builder::default()
    }
}

/// <p>The Lustre configuration for the file system being created.</p> <note>
/// <p>The following parameters are not supported for file systems with the <code>Persistent_2</code> deployment type. Instead, use <code>CreateDataRepositoryAssociation</code> to create a data repository association to link your Lustre file system to a data repository.</p>
/// <ul>
/// <li> <p> <code>AutoImportPolicy</code> </p> </li>
/// <li> <p> <code>ExportPath</code> </p> </li>
/// <li> <p> <code>ImportedChunkSize</code> </p> </li>
/// <li> <p> <code>ImportPath</code> </p> </li>
/// </ul>
/// </note>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct CreateFileSystemLustreConfiguration {
    /// <p>(Optional) The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone, where d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.</p>
    #[doc(hidden)]
    pub weekly_maintenance_start_time: std::option::Option<std::string::String>,
    /// <p>(Optional) The path to the Amazon S3 bucket (including the optional prefix) that you're using as the data repository for your Amazon FSx for Lustre file system. The root of your FSx for Lustre file system will be mapped to the root of the Amazon S3 bucket you select. An example is <code>s3://import-bucket/optional-prefix</code>. If you specify a prefix after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file system.</p> <note>
    /// <p>This parameter is not supported for file systems with the <code>Persistent_2</code> deployment type. Instead, use <code>CreateDataRepositoryAssociation</code> to create a data repository association to link your Lustre file system to a data repository.</p>
    /// </note>
    #[doc(hidden)]
    pub import_path: std::option::Option<std::string::String>,
    /// <p>(Optional) Available with <code>Scratch</code> and <code>Persistent_1</code> deployment types. Specifies the path in the Amazon S3 bucket where the root of your Amazon FSx file system is exported. The path must use the same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to which new and changed data is to be exported from your Amazon FSx for Lustre file system. If an <code>ExportPath</code> value is not provided, Amazon FSx sets a default export path, <code>s3://import-bucket/FSxLustre[creation-timestamp]</code>. The timestamp is in UTC format, for example <code>s3://import-bucket/FSxLustre20181105T222312Z</code>.</p>
    /// <p>The Amazon S3 export bucket must be the same as the import bucket specified by <code>ImportPath</code>. If you specify only a bucket name, such as <code>s3://import-bucket</code>, you get a 1:1 mapping of file system objects to S3 bucket objects. This mapping means that the input data in S3 is overwritten on export. If you provide a custom prefix in the export path, such as <code>s3://import-bucket/[custom-optional-prefix]</code>, Amazon FSx exports the contents of your file system to that export prefix in the Amazon S3 bucket.</p> <note>
    /// <p>This parameter is not supported for file systems with the <code>Persistent_2</code> deployment type. Instead, use <code>CreateDataRepositoryAssociation</code> to create a data repository association to link your Lustre file system to a data repository.</p>
    /// </note>
    #[doc(hidden)]
    pub export_path: std::option::Option<std::string::String>,
    /// <p>(Optional) For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.</p>
    /// <p>The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.</p>
    /// <p>This parameter is not supported for file systems with the <code>Persistent_2</code> deployment type. Instead, use <code>CreateDataRepositoryAssociation</code> to create a data repository association to link your Lustre file system to a data repository.</p>
    #[doc(hidden)]
    pub imported_file_chunk_size: std::option::Option<i32>,
    /// <p>(Optional) Choose <code>SCRATCH_1</code> and <code>SCRATCH_2</code> deployment types when you need temporary storage and shorter-term processing of data. The <code>SCRATCH_2</code> deployment type provides in-transit encryption of data and higher burst throughput capacity than <code>SCRATCH_1</code>.</p>
    /// <p>Choose <code>PERSISTENT_1</code> for longer-term storage and for throughput-focused workloads that aren’t latency-sensitive. <code>PERSISTENT_1</code> supports encryption of data in transit, and is available in all Amazon Web Services Regions in which FSx for Lustre is available.</p>
    /// <p>Choose <code>PERSISTENT_2</code> for longer-term storage and for latency-sensitive workloads that require the highest levels of IOPS/throughput. <code>PERSISTENT_2</code> supports SSD storage, and offers higher <code>PerUnitStorageThroughput</code> (up to 1000 MB/s/TiB). <code>PERSISTENT_2</code> is available in a limited number of Amazon Web Services Regions. For more information, and an up-to-date list of Amazon Web Services Regions in which <code>PERSISTENT_2</code> is available, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-fsx-lustre.html#lustre-deployment-types">File system deployment options for FSx for Lustre</a> in the <i>Amazon FSx for Lustre User Guide</i>.</p> <note>
    /// <p>If you choose <code>PERSISTENT_2</code>, and you set <code>FileSystemTypeVersion</code> to <code>2.10</code>, the <code>CreateFileSystem</code> operation fails.</p>
    /// </note>
    /// <p>Encryption of data in transit is automatically turned on when you access <code>SCRATCH_2</code>, <code>PERSISTENT_1</code> and <code>PERSISTENT_2</code> file systems from Amazon EC2 instances that <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/data-                 protection.html">support automatic encryption</a> in the Amazon Web Services Regions where they are available. For more information about encryption in transit for FSx for Lustre file systems, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/encryption-in-transit-fsxl.html">Encrypting data in transit</a> in the <i>Amazon FSx for Lustre User Guide</i>. </p>
    /// <p>(Default = <code>SCRATCH_1</code>)</p>
    #[doc(hidden)]
    pub deployment_type: std::option::Option<crate::model::LustreDeploymentType>,
    /// <p> (Optional) Available with <code>Scratch</code> and <code>Persistent_1</code> deployment types. When you create your file system, your existing S3 objects appear as file and directory listings. Use this property to choose how Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. <code>AutoImportPolicy</code> can have the following values:</p>
    /// <ul>
    /// <li> <p> <code>NONE</code> - (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the linked S3 bucket when the file system is created. FSx does not update file and directory listings for any new or changed objects after choosing this option.</p> </li>
    /// <li> <p> <code>NEW</code> - AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that do not currently exist in the FSx file system. </p> </li>
    /// <li> <p> <code>NEW_CHANGED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose this option.</p> </li>
    /// <li> <p> <code>NEW_CHANGED_DELETED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.</p> </li>
    /// </ul>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/older-deployment-types.html#legacy-auto-import-from-s3"> Automatically import updates from your S3 bucket</a>.</p> <note>
    /// <p>This parameter is not supported for file systems with the <code>Persistent_2</code> deployment type. Instead, use <code>CreateDataRepositoryAssociation</code> to create a data repository association to link your Lustre file system to a data repository.</p>
    /// </note>
    #[doc(hidden)]
    pub auto_import_policy: std::option::Option<crate::model::AutoImportPolicyType>,
    /// <p>Required with <code>PERSISTENT_1</code> and <code>PERSISTENT_2</code> deployment types, provisions the amount of read and write throughput for each 1 tebibyte (TiB) of file system storage capacity, in MB/s/TiB. File system throughput capacity is calculated by multiplying file system storage capacity (TiB) by the <code>PerUnitStorageThroughput</code> (MB/s/TiB). For a 2.4-TiB file system, provisioning 50 MB/s/TiB of <code>PerUnitStorageThroughput</code> yields 120 MB/s of file system throughput. You pay for the amount of throughput that you provision. </p>
    /// <p>Valid values:</p>
    /// <ul>
    /// <li> <p>For <code>PERSISTENT_1</code> SSD storage: 50, 100, 200 MB/s/TiB.</p> </li>
    /// <li> <p>For <code>PERSISTENT_1</code> HDD storage: 12, 40 MB/s/TiB.</p> </li>
    /// <li> <p>For <code>PERSISTENT_2</code> SSD storage: 125, 250, 500, 1000 MB/s/TiB.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub per_unit_storage_throughput: std::option::Option<i32>,
    /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
    #[doc(hidden)]
    pub daily_automatic_backup_start_time: std::option::Option<std::string::String>,
    /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
    #[doc(hidden)]
    pub automatic_backup_retention_days: std::option::Option<i32>,
    /// <p>(Optional) Not available for use with file systems that are linked to a data repository. A boolean flag indicating whether tags for the file system should be copied to backups. The default value is false. If <code>CopyTagsToBackups</code> is set to true, all file system tags are copied to all automatic and user-initiated backups when the user doesn't specify any backup-specific tags. If <code>CopyTagsToBackups</code> is set to true and you specify one or more backup tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.</p>
    /// <p>(Default = <code>false</code>)</p>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-backups-fsx.html"> Working with backups</a> in the <i>Amazon FSx for Lustre User Guide</i>.</p>
    #[doc(hidden)]
    pub copy_tags_to_backups: std::option::Option<bool>,
    /// <p>The type of drive cache used by <code>PERSISTENT_1</code> file systems that are provisioned with HDD storage devices. This parameter is required when storage type is HDD. Set this property to <code>READ</code> to improve the performance for frequently accessed files by caching up to 20% of the total storage capacity of the file system.</p>
    /// <p>This parameter is required when <code>StorageType</code> is set to <code>HDD</code>.</p>
    #[doc(hidden)]
    pub drive_cache_type: std::option::Option<crate::model::DriveCacheType>,
    /// <p>Sets the data compression configuration for the file system. <code>DataCompressionType</code> can have the following values:</p>
    /// <ul>
    /// <li> <p> <code>NONE</code> - (Default) Data compression is turned off when the file system is created.</p> </li>
    /// <li> <p> <code>LZ4</code> - Data compression is turned on with the LZ4 algorithm.</p> </li>
    /// </ul>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/data-compression.html">Lustre data compression</a> in the <i>Amazon FSx for Lustre User Guide</i>.</p>
    #[doc(hidden)]
    pub data_compression_type: std::option::Option<crate::model::DataCompressionType>,
    /// <p>The Lustre logging configuration used when creating an Amazon FSx for Lustre file system. When logging is enabled, Lustre logs error and warning events for data repositories associated with your file system to Amazon CloudWatch Logs.</p>
    #[doc(hidden)]
    pub log_configuration: std::option::Option<crate::model::LustreLogCreateConfiguration>,
    /// <p>The Lustre root squash configuration used when creating an Amazon FSx for Lustre file system. When enabled, root squash restricts root-level access from clients that try to access your file system as a root user.</p>
    #[doc(hidden)]
    pub root_squash_configuration: std::option::Option<crate::model::LustreRootSquashConfiguration>,
}
impl CreateFileSystemLustreConfiguration {
    /// <p>(Optional) The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone, where d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.</p>
    pub fn weekly_maintenance_start_time(&self) -> std::option::Option<&str> {
        self.weekly_maintenance_start_time.as_deref()
    }
    /// <p>(Optional) The path to the Amazon S3 bucket (including the optional prefix) that you're using as the data repository for your Amazon FSx for Lustre file system. The root of your FSx for Lustre file system will be mapped to the root of the Amazon S3 bucket you select. An example is <code>s3://import-bucket/optional-prefix</code>. If you specify a prefix after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file system.</p> <note>
    /// <p>This parameter is not supported for file systems with the <code>Persistent_2</code> deployment type. Instead, use <code>CreateDataRepositoryAssociation</code> to create a data repository association to link your Lustre file system to a data repository.</p>
    /// </note>
    pub fn import_path(&self) -> std::option::Option<&str> {
        self.import_path.as_deref()
    }
    /// <p>(Optional) Available with <code>Scratch</code> and <code>Persistent_1</code> deployment types. Specifies the path in the Amazon S3 bucket where the root of your Amazon FSx file system is exported. The path must use the same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to which new and changed data is to be exported from your Amazon FSx for Lustre file system. If an <code>ExportPath</code> value is not provided, Amazon FSx sets a default export path, <code>s3://import-bucket/FSxLustre[creation-timestamp]</code>. The timestamp is in UTC format, for example <code>s3://import-bucket/FSxLustre20181105T222312Z</code>.</p>
    /// <p>The Amazon S3 export bucket must be the same as the import bucket specified by <code>ImportPath</code>. If you specify only a bucket name, such as <code>s3://import-bucket</code>, you get a 1:1 mapping of file system objects to S3 bucket objects. This mapping means that the input data in S3 is overwritten on export. If you provide a custom prefix in the export path, such as <code>s3://import-bucket/[custom-optional-prefix]</code>, Amazon FSx exports the contents of your file system to that export prefix in the Amazon S3 bucket.</p> <note>
    /// <p>This parameter is not supported for file systems with the <code>Persistent_2</code> deployment type. Instead, use <code>CreateDataRepositoryAssociation</code> to create a data repository association to link your Lustre file system to a data repository.</p>
    /// </note>
    pub fn export_path(&self) -> std::option::Option<&str> {
        self.export_path.as_deref()
    }
    /// <p>(Optional) For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.</p>
    /// <p>The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.</p>
    /// <p>This parameter is not supported for file systems with the <code>Persistent_2</code> deployment type. Instead, use <code>CreateDataRepositoryAssociation</code> to create a data repository association to link your Lustre file system to a data repository.</p>
    pub fn imported_file_chunk_size(&self) -> std::option::Option<i32> {
        self.imported_file_chunk_size
    }
    /// <p>(Optional) Choose <code>SCRATCH_1</code> and <code>SCRATCH_2</code> deployment types when you need temporary storage and shorter-term processing of data. The <code>SCRATCH_2</code> deployment type provides in-transit encryption of data and higher burst throughput capacity than <code>SCRATCH_1</code>.</p>
    /// <p>Choose <code>PERSISTENT_1</code> for longer-term storage and for throughput-focused workloads that aren’t latency-sensitive. <code>PERSISTENT_1</code> supports encryption of data in transit, and is available in all Amazon Web Services Regions in which FSx for Lustre is available.</p>
    /// <p>Choose <code>PERSISTENT_2</code> for longer-term storage and for latency-sensitive workloads that require the highest levels of IOPS/throughput. <code>PERSISTENT_2</code> supports SSD storage, and offers higher <code>PerUnitStorageThroughput</code> (up to 1000 MB/s/TiB). <code>PERSISTENT_2</code> is available in a limited number of Amazon Web Services Regions. For more information, and an up-to-date list of Amazon Web Services Regions in which <code>PERSISTENT_2</code> is available, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-fsx-lustre.html#lustre-deployment-types">File system deployment options for FSx for Lustre</a> in the <i>Amazon FSx for Lustre User Guide</i>.</p> <note>
    /// <p>If you choose <code>PERSISTENT_2</code>, and you set <code>FileSystemTypeVersion</code> to <code>2.10</code>, the <code>CreateFileSystem</code> operation fails.</p>
    /// </note>
    /// <p>Encryption of data in transit is automatically turned on when you access <code>SCRATCH_2</code>, <code>PERSISTENT_1</code> and <code>PERSISTENT_2</code> file systems from Amazon EC2 instances that <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/data-                 protection.html">support automatic encryption</a> in the Amazon Web Services Regions where they are available. For more information about encryption in transit for FSx for Lustre file systems, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/encryption-in-transit-fsxl.html">Encrypting data in transit</a> in the <i>Amazon FSx for Lustre User Guide</i>. </p>
    /// <p>(Default = <code>SCRATCH_1</code>)</p>
    pub fn deployment_type(&self) -> std::option::Option<&crate::model::LustreDeploymentType> {
        self.deployment_type.as_ref()
    }
    /// <p> (Optional) Available with <code>Scratch</code> and <code>Persistent_1</code> deployment types. When you create your file system, your existing S3 objects appear as file and directory listings. Use this property to choose how Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. <code>AutoImportPolicy</code> can have the following values:</p>
    /// <ul>
    /// <li> <p> <code>NONE</code> - (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the linked S3 bucket when the file system is created. FSx does not update file and directory listings for any new or changed objects after choosing this option.</p> </li>
    /// <li> <p> <code>NEW</code> - AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that do not currently exist in the FSx file system. </p> </li>
    /// <li> <p> <code>NEW_CHANGED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose this option.</p> </li>
    /// <li> <p> <code>NEW_CHANGED_DELETED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.</p> </li>
    /// </ul>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/older-deployment-types.html#legacy-auto-import-from-s3"> Automatically import updates from your S3 bucket</a>.</p> <note>
    /// <p>This parameter is not supported for file systems with the <code>Persistent_2</code> deployment type. Instead, use <code>CreateDataRepositoryAssociation</code> to create a data repository association to link your Lustre file system to a data repository.</p>
    /// </note>
    pub fn auto_import_policy(&self) -> std::option::Option<&crate::model::AutoImportPolicyType> {
        self.auto_import_policy.as_ref()
    }
    /// <p>Required with <code>PERSISTENT_1</code> and <code>PERSISTENT_2</code> deployment types, provisions the amount of read and write throughput for each 1 tebibyte (TiB) of file system storage capacity, in MB/s/TiB. File system throughput capacity is calculated by multiplying file system storage capacity (TiB) by the <code>PerUnitStorageThroughput</code> (MB/s/TiB). For a 2.4-TiB file system, provisioning 50 MB/s/TiB of <code>PerUnitStorageThroughput</code> yields 120 MB/s of file system throughput. You pay for the amount of throughput that you provision. </p>
    /// <p>Valid values:</p>
    /// <ul>
    /// <li> <p>For <code>PERSISTENT_1</code> SSD storage: 50, 100, 200 MB/s/TiB.</p> </li>
    /// <li> <p>For <code>PERSISTENT_1</code> HDD storage: 12, 40 MB/s/TiB.</p> </li>
    /// <li> <p>For <code>PERSISTENT_2</code> SSD storage: 125, 250, 500, 1000 MB/s/TiB.</p> </li>
    /// </ul>
    pub fn per_unit_storage_throughput(&self) -> std::option::Option<i32> {
        self.per_unit_storage_throughput
    }
    /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
    pub fn daily_automatic_backup_start_time(&self) -> std::option::Option<&str> {
        self.daily_automatic_backup_start_time.as_deref()
    }
    /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
    pub fn automatic_backup_retention_days(&self) -> std::option::Option<i32> {
        self.automatic_backup_retention_days
    }
    /// <p>(Optional) Not available for use with file systems that are linked to a data repository. A boolean flag indicating whether tags for the file system should be copied to backups. The default value is false. If <code>CopyTagsToBackups</code> is set to true, all file system tags are copied to all automatic and user-initiated backups when the user doesn't specify any backup-specific tags. If <code>CopyTagsToBackups</code> is set to true and you specify one or more backup tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.</p>
    /// <p>(Default = <code>false</code>)</p>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-backups-fsx.html"> Working with backups</a> in the <i>Amazon FSx for Lustre User Guide</i>.</p>
    pub fn copy_tags_to_backups(&self) -> std::option::Option<bool> {
        self.copy_tags_to_backups
    }
    /// <p>The type of drive cache used by <code>PERSISTENT_1</code> file systems that are provisioned with HDD storage devices. This parameter is required when storage type is HDD. Set this property to <code>READ</code> to improve the performance for frequently accessed files by caching up to 20% of the total storage capacity of the file system.</p>
    /// <p>This parameter is required when <code>StorageType</code> is set to <code>HDD</code>.</p>
    pub fn drive_cache_type(&self) -> std::option::Option<&crate::model::DriveCacheType> {
        self.drive_cache_type.as_ref()
    }
    /// <p>Sets the data compression configuration for the file system. <code>DataCompressionType</code> can have the following values:</p>
    /// <ul>
    /// <li> <p> <code>NONE</code> - (Default) Data compression is turned off when the file system is created.</p> </li>
    /// <li> <p> <code>LZ4</code> - Data compression is turned on with the LZ4 algorithm.</p> </li>
    /// </ul>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/data-compression.html">Lustre data compression</a> in the <i>Amazon FSx for Lustre User Guide</i>.</p>
    pub fn data_compression_type(&self) -> std::option::Option<&crate::model::DataCompressionType> {
        self.data_compression_type.as_ref()
    }
    /// <p>The Lustre logging configuration used when creating an Amazon FSx for Lustre file system. When logging is enabled, Lustre logs error and warning events for data repositories associated with your file system to Amazon CloudWatch Logs.</p>
    pub fn log_configuration(
        &self,
    ) -> std::option::Option<&crate::model::LustreLogCreateConfiguration> {
        self.log_configuration.as_ref()
    }
    /// <p>The Lustre root squash configuration used when creating an Amazon FSx for Lustre file system. When enabled, root squash restricts root-level access from clients that try to access your file system as a root user.</p>
    pub fn root_squash_configuration(
        &self,
    ) -> std::option::Option<&crate::model::LustreRootSquashConfiguration> {
        self.root_squash_configuration.as_ref()
    }
}
/// See [`CreateFileSystemLustreConfiguration`](crate::model::CreateFileSystemLustreConfiguration).
pub mod create_file_system_lustre_configuration {

    /// A builder for [`CreateFileSystemLustreConfiguration`](crate::model::CreateFileSystemLustreConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) weekly_maintenance_start_time: std::option::Option<std::string::String>,
        pub(crate) import_path: std::option::Option<std::string::String>,
        pub(crate) export_path: std::option::Option<std::string::String>,
        pub(crate) imported_file_chunk_size: std::option::Option<i32>,
        pub(crate) deployment_type: std::option::Option<crate::model::LustreDeploymentType>,
        pub(crate) auto_import_policy: std::option::Option<crate::model::AutoImportPolicyType>,
        pub(crate) per_unit_storage_throughput: std::option::Option<i32>,
        pub(crate) daily_automatic_backup_start_time: std::option::Option<std::string::String>,
        pub(crate) automatic_backup_retention_days: std::option::Option<i32>,
        pub(crate) copy_tags_to_backups: std::option::Option<bool>,
        pub(crate) drive_cache_type: std::option::Option<crate::model::DriveCacheType>,
        pub(crate) data_compression_type: std::option::Option<crate::model::DataCompressionType>,
        pub(crate) log_configuration:
            std::option::Option<crate::model::LustreLogCreateConfiguration>,
        pub(crate) root_squash_configuration:
            std::option::Option<crate::model::LustreRootSquashConfiguration>,
    }
    impl Builder {
        /// <p>(Optional) The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone, where d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.</p>
        pub fn weekly_maintenance_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = Some(input.into());
            self
        }
        /// <p>(Optional) The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone, where d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.</p>
        pub fn set_weekly_maintenance_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = input;
            self
        }
        /// <p>(Optional) The path to the Amazon S3 bucket (including the optional prefix) that you're using as the data repository for your Amazon FSx for Lustre file system. The root of your FSx for Lustre file system will be mapped to the root of the Amazon S3 bucket you select. An example is <code>s3://import-bucket/optional-prefix</code>. If you specify a prefix after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file system.</p> <note>
        /// <p>This parameter is not supported for file systems with the <code>Persistent_2</code> deployment type. Instead, use <code>CreateDataRepositoryAssociation</code> to create a data repository association to link your Lustre file system to a data repository.</p>
        /// </note>
        pub fn import_path(mut self, input: impl Into<std::string::String>) -> Self {
            self.import_path = Some(input.into());
            self
        }
        /// <p>(Optional) The path to the Amazon S3 bucket (including the optional prefix) that you're using as the data repository for your Amazon FSx for Lustre file system. The root of your FSx for Lustre file system will be mapped to the root of the Amazon S3 bucket you select. An example is <code>s3://import-bucket/optional-prefix</code>. If you specify a prefix after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file system.</p> <note>
        /// <p>This parameter is not supported for file systems with the <code>Persistent_2</code> deployment type. Instead, use <code>CreateDataRepositoryAssociation</code> to create a data repository association to link your Lustre file system to a data repository.</p>
        /// </note>
        pub fn set_import_path(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.import_path = input;
            self
        }
        /// <p>(Optional) Available with <code>Scratch</code> and <code>Persistent_1</code> deployment types. Specifies the path in the Amazon S3 bucket where the root of your Amazon FSx file system is exported. The path must use the same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to which new and changed data is to be exported from your Amazon FSx for Lustre file system. If an <code>ExportPath</code> value is not provided, Amazon FSx sets a default export path, <code>s3://import-bucket/FSxLustre[creation-timestamp]</code>. The timestamp is in UTC format, for example <code>s3://import-bucket/FSxLustre20181105T222312Z</code>.</p>
        /// <p>The Amazon S3 export bucket must be the same as the import bucket specified by <code>ImportPath</code>. If you specify only a bucket name, such as <code>s3://import-bucket</code>, you get a 1:1 mapping of file system objects to S3 bucket objects. This mapping means that the input data in S3 is overwritten on export. If you provide a custom prefix in the export path, such as <code>s3://import-bucket/[custom-optional-prefix]</code>, Amazon FSx exports the contents of your file system to that export prefix in the Amazon S3 bucket.</p> <note>
        /// <p>This parameter is not supported for file systems with the <code>Persistent_2</code> deployment type. Instead, use <code>CreateDataRepositoryAssociation</code> to create a data repository association to link your Lustre file system to a data repository.</p>
        /// </note>
        pub fn export_path(mut self, input: impl Into<std::string::String>) -> Self {
            self.export_path = Some(input.into());
            self
        }
        /// <p>(Optional) Available with <code>Scratch</code> and <code>Persistent_1</code> deployment types. Specifies the path in the Amazon S3 bucket where the root of your Amazon FSx file system is exported. The path must use the same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to which new and changed data is to be exported from your Amazon FSx for Lustre file system. If an <code>ExportPath</code> value is not provided, Amazon FSx sets a default export path, <code>s3://import-bucket/FSxLustre[creation-timestamp]</code>. The timestamp is in UTC format, for example <code>s3://import-bucket/FSxLustre20181105T222312Z</code>.</p>
        /// <p>The Amazon S3 export bucket must be the same as the import bucket specified by <code>ImportPath</code>. If you specify only a bucket name, such as <code>s3://import-bucket</code>, you get a 1:1 mapping of file system objects to S3 bucket objects. This mapping means that the input data in S3 is overwritten on export. If you provide a custom prefix in the export path, such as <code>s3://import-bucket/[custom-optional-prefix]</code>, Amazon FSx exports the contents of your file system to that export prefix in the Amazon S3 bucket.</p> <note>
        /// <p>This parameter is not supported for file systems with the <code>Persistent_2</code> deployment type. Instead, use <code>CreateDataRepositoryAssociation</code> to create a data repository association to link your Lustre file system to a data repository.</p>
        /// </note>
        pub fn set_export_path(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.export_path = input;
            self
        }
        /// <p>(Optional) For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.</p>
        /// <p>The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.</p>
        /// <p>This parameter is not supported for file systems with the <code>Persistent_2</code> deployment type. Instead, use <code>CreateDataRepositoryAssociation</code> to create a data repository association to link your Lustre file system to a data repository.</p>
        pub fn imported_file_chunk_size(mut self, input: i32) -> Self {
            self.imported_file_chunk_size = Some(input);
            self
        }
        /// <p>(Optional) For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.</p>
        /// <p>The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.</p>
        /// <p>This parameter is not supported for file systems with the <code>Persistent_2</code> deployment type. Instead, use <code>CreateDataRepositoryAssociation</code> to create a data repository association to link your Lustre file system to a data repository.</p>
        pub fn set_imported_file_chunk_size(mut self, input: std::option::Option<i32>) -> Self {
            self.imported_file_chunk_size = input;
            self
        }
        /// <p>(Optional) Choose <code>SCRATCH_1</code> and <code>SCRATCH_2</code> deployment types when you need temporary storage and shorter-term processing of data. The <code>SCRATCH_2</code> deployment type provides in-transit encryption of data and higher burst throughput capacity than <code>SCRATCH_1</code>.</p>
        /// <p>Choose <code>PERSISTENT_1</code> for longer-term storage and for throughput-focused workloads that aren’t latency-sensitive. <code>PERSISTENT_1</code> supports encryption of data in transit, and is available in all Amazon Web Services Regions in which FSx for Lustre is available.</p>
        /// <p>Choose <code>PERSISTENT_2</code> for longer-term storage and for latency-sensitive workloads that require the highest levels of IOPS/throughput. <code>PERSISTENT_2</code> supports SSD storage, and offers higher <code>PerUnitStorageThroughput</code> (up to 1000 MB/s/TiB). <code>PERSISTENT_2</code> is available in a limited number of Amazon Web Services Regions. For more information, and an up-to-date list of Amazon Web Services Regions in which <code>PERSISTENT_2</code> is available, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-fsx-lustre.html#lustre-deployment-types">File system deployment options for FSx for Lustre</a> in the <i>Amazon FSx for Lustre User Guide</i>.</p> <note>
        /// <p>If you choose <code>PERSISTENT_2</code>, and you set <code>FileSystemTypeVersion</code> to <code>2.10</code>, the <code>CreateFileSystem</code> operation fails.</p>
        /// </note>
        /// <p>Encryption of data in transit is automatically turned on when you access <code>SCRATCH_2</code>, <code>PERSISTENT_1</code> and <code>PERSISTENT_2</code> file systems from Amazon EC2 instances that <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/data-                 protection.html">support automatic encryption</a> in the Amazon Web Services Regions where they are available. For more information about encryption in transit for FSx for Lustre file systems, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/encryption-in-transit-fsxl.html">Encrypting data in transit</a> in the <i>Amazon FSx for Lustre User Guide</i>. </p>
        /// <p>(Default = <code>SCRATCH_1</code>)</p>
        pub fn deployment_type(mut self, input: crate::model::LustreDeploymentType) -> Self {
            self.deployment_type = Some(input);
            self
        }
        /// <p>(Optional) Choose <code>SCRATCH_1</code> and <code>SCRATCH_2</code> deployment types when you need temporary storage and shorter-term processing of data. The <code>SCRATCH_2</code> deployment type provides in-transit encryption of data and higher burst throughput capacity than <code>SCRATCH_1</code>.</p>
        /// <p>Choose <code>PERSISTENT_1</code> for longer-term storage and for throughput-focused workloads that aren’t latency-sensitive. <code>PERSISTENT_1</code> supports encryption of data in transit, and is available in all Amazon Web Services Regions in which FSx for Lustre is available.</p>
        /// <p>Choose <code>PERSISTENT_2</code> for longer-term storage and for latency-sensitive workloads that require the highest levels of IOPS/throughput. <code>PERSISTENT_2</code> supports SSD storage, and offers higher <code>PerUnitStorageThroughput</code> (up to 1000 MB/s/TiB). <code>PERSISTENT_2</code> is available in a limited number of Amazon Web Services Regions. For more information, and an up-to-date list of Amazon Web Services Regions in which <code>PERSISTENT_2</code> is available, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-fsx-lustre.html#lustre-deployment-types">File system deployment options for FSx for Lustre</a> in the <i>Amazon FSx for Lustre User Guide</i>.</p> <note>
        /// <p>If you choose <code>PERSISTENT_2</code>, and you set <code>FileSystemTypeVersion</code> to <code>2.10</code>, the <code>CreateFileSystem</code> operation fails.</p>
        /// </note>
        /// <p>Encryption of data in transit is automatically turned on when you access <code>SCRATCH_2</code>, <code>PERSISTENT_1</code> and <code>PERSISTENT_2</code> file systems from Amazon EC2 instances that <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/data-                 protection.html">support automatic encryption</a> in the Amazon Web Services Regions where they are available. For more information about encryption in transit for FSx for Lustre file systems, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/encryption-in-transit-fsxl.html">Encrypting data in transit</a> in the <i>Amazon FSx for Lustre User Guide</i>. </p>
        /// <p>(Default = <code>SCRATCH_1</code>)</p>
        pub fn set_deployment_type(
            mut self,
            input: std::option::Option<crate::model::LustreDeploymentType>,
        ) -> Self {
            self.deployment_type = input;
            self
        }
        /// <p> (Optional) Available with <code>Scratch</code> and <code>Persistent_1</code> deployment types. When you create your file system, your existing S3 objects appear as file and directory listings. Use this property to choose how Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. <code>AutoImportPolicy</code> can have the following values:</p>
        /// <ul>
        /// <li> <p> <code>NONE</code> - (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the linked S3 bucket when the file system is created. FSx does not update file and directory listings for any new or changed objects after choosing this option.</p> </li>
        /// <li> <p> <code>NEW</code> - AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that do not currently exist in the FSx file system. </p> </li>
        /// <li> <p> <code>NEW_CHANGED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose this option.</p> </li>
        /// <li> <p> <code>NEW_CHANGED_DELETED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.</p> </li>
        /// </ul>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/older-deployment-types.html#legacy-auto-import-from-s3"> Automatically import updates from your S3 bucket</a>.</p> <note>
        /// <p>This parameter is not supported for file systems with the <code>Persistent_2</code> deployment type. Instead, use <code>CreateDataRepositoryAssociation</code> to create a data repository association to link your Lustre file system to a data repository.</p>
        /// </note>
        pub fn auto_import_policy(mut self, input: crate::model::AutoImportPolicyType) -> Self {
            self.auto_import_policy = Some(input);
            self
        }
        /// <p> (Optional) Available with <code>Scratch</code> and <code>Persistent_1</code> deployment types. When you create your file system, your existing S3 objects appear as file and directory listings. Use this property to choose how Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. <code>AutoImportPolicy</code> can have the following values:</p>
        /// <ul>
        /// <li> <p> <code>NONE</code> - (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the linked S3 bucket when the file system is created. FSx does not update file and directory listings for any new or changed objects after choosing this option.</p> </li>
        /// <li> <p> <code>NEW</code> - AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that do not currently exist in the FSx file system. </p> </li>
        /// <li> <p> <code>NEW_CHANGED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose this option.</p> </li>
        /// <li> <p> <code>NEW_CHANGED_DELETED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.</p> </li>
        /// </ul>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/older-deployment-types.html#legacy-auto-import-from-s3"> Automatically import updates from your S3 bucket</a>.</p> <note>
        /// <p>This parameter is not supported for file systems with the <code>Persistent_2</code> deployment type. Instead, use <code>CreateDataRepositoryAssociation</code> to create a data repository association to link your Lustre file system to a data repository.</p>
        /// </note>
        pub fn set_auto_import_policy(
            mut self,
            input: std::option::Option<crate::model::AutoImportPolicyType>,
        ) -> Self {
            self.auto_import_policy = input;
            self
        }
        /// <p>Required with <code>PERSISTENT_1</code> and <code>PERSISTENT_2</code> deployment types, provisions the amount of read and write throughput for each 1 tebibyte (TiB) of file system storage capacity, in MB/s/TiB. File system throughput capacity is calculated by multiplying file system storage capacity (TiB) by the <code>PerUnitStorageThroughput</code> (MB/s/TiB). For a 2.4-TiB file system, provisioning 50 MB/s/TiB of <code>PerUnitStorageThroughput</code> yields 120 MB/s of file system throughput. You pay for the amount of throughput that you provision. </p>
        /// <p>Valid values:</p>
        /// <ul>
        /// <li> <p>For <code>PERSISTENT_1</code> SSD storage: 50, 100, 200 MB/s/TiB.</p> </li>
        /// <li> <p>For <code>PERSISTENT_1</code> HDD storage: 12, 40 MB/s/TiB.</p> </li>
        /// <li> <p>For <code>PERSISTENT_2</code> SSD storage: 125, 250, 500, 1000 MB/s/TiB.</p> </li>
        /// </ul>
        pub fn per_unit_storage_throughput(mut self, input: i32) -> Self {
            self.per_unit_storage_throughput = Some(input);
            self
        }
        /// <p>Required with <code>PERSISTENT_1</code> and <code>PERSISTENT_2</code> deployment types, provisions the amount of read and write throughput for each 1 tebibyte (TiB) of file system storage capacity, in MB/s/TiB. File system throughput capacity is calculated by multiplying file system storage capacity (TiB) by the <code>PerUnitStorageThroughput</code> (MB/s/TiB). For a 2.4-TiB file system, provisioning 50 MB/s/TiB of <code>PerUnitStorageThroughput</code> yields 120 MB/s of file system throughput. You pay for the amount of throughput that you provision. </p>
        /// <p>Valid values:</p>
        /// <ul>
        /// <li> <p>For <code>PERSISTENT_1</code> SSD storage: 50, 100, 200 MB/s/TiB.</p> </li>
        /// <li> <p>For <code>PERSISTENT_1</code> HDD storage: 12, 40 MB/s/TiB.</p> </li>
        /// <li> <p>For <code>PERSISTENT_2</code> SSD storage: 125, 250, 500, 1000 MB/s/TiB.</p> </li>
        /// </ul>
        pub fn set_per_unit_storage_throughput(mut self, input: std::option::Option<i32>) -> Self {
            self.per_unit_storage_throughput = input;
            self
        }
        /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
        pub fn daily_automatic_backup_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.daily_automatic_backup_start_time = Some(input.into());
            self
        }
        /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
        pub fn set_daily_automatic_backup_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.daily_automatic_backup_start_time = input;
            self
        }
        /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
        pub fn automatic_backup_retention_days(mut self, input: i32) -> Self {
            self.automatic_backup_retention_days = Some(input);
            self
        }
        /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
        pub fn set_automatic_backup_retention_days(
            mut self,
            input: std::option::Option<i32>,
        ) -> Self {
            self.automatic_backup_retention_days = input;
            self
        }
        /// <p>(Optional) Not available for use with file systems that are linked to a data repository. A boolean flag indicating whether tags for the file system should be copied to backups. The default value is false. If <code>CopyTagsToBackups</code> is set to true, all file system tags are copied to all automatic and user-initiated backups when the user doesn't specify any backup-specific tags. If <code>CopyTagsToBackups</code> is set to true and you specify one or more backup tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.</p>
        /// <p>(Default = <code>false</code>)</p>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-backups-fsx.html"> Working with backups</a> in the <i>Amazon FSx for Lustre User Guide</i>.</p>
        pub fn copy_tags_to_backups(mut self, input: bool) -> Self {
            self.copy_tags_to_backups = Some(input);
            self
        }
        /// <p>(Optional) Not available for use with file systems that are linked to a data repository. A boolean flag indicating whether tags for the file system should be copied to backups. The default value is false. If <code>CopyTagsToBackups</code> is set to true, all file system tags are copied to all automatic and user-initiated backups when the user doesn't specify any backup-specific tags. If <code>CopyTagsToBackups</code> is set to true and you specify one or more backup tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.</p>
        /// <p>(Default = <code>false</code>)</p>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-backups-fsx.html"> Working with backups</a> in the <i>Amazon FSx for Lustre User Guide</i>.</p>
        pub fn set_copy_tags_to_backups(mut self, input: std::option::Option<bool>) -> Self {
            self.copy_tags_to_backups = input;
            self
        }
        /// <p>The type of drive cache used by <code>PERSISTENT_1</code> file systems that are provisioned with HDD storage devices. This parameter is required when storage type is HDD. Set this property to <code>READ</code> to improve the performance for frequently accessed files by caching up to 20% of the total storage capacity of the file system.</p>
        /// <p>This parameter is required when <code>StorageType</code> is set to <code>HDD</code>.</p>
        pub fn drive_cache_type(mut self, input: crate::model::DriveCacheType) -> Self {
            self.drive_cache_type = Some(input);
            self
        }
        /// <p>The type of drive cache used by <code>PERSISTENT_1</code> file systems that are provisioned with HDD storage devices. This parameter is required when storage type is HDD. Set this property to <code>READ</code> to improve the performance for frequently accessed files by caching up to 20% of the total storage capacity of the file system.</p>
        /// <p>This parameter is required when <code>StorageType</code> is set to <code>HDD</code>.</p>
        pub fn set_drive_cache_type(
            mut self,
            input: std::option::Option<crate::model::DriveCacheType>,
        ) -> Self {
            self.drive_cache_type = input;
            self
        }
        /// <p>Sets the data compression configuration for the file system. <code>DataCompressionType</code> can have the following values:</p>
        /// <ul>
        /// <li> <p> <code>NONE</code> - (Default) Data compression is turned off when the file system is created.</p> </li>
        /// <li> <p> <code>LZ4</code> - Data compression is turned on with the LZ4 algorithm.</p> </li>
        /// </ul>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/data-compression.html">Lustre data compression</a> in the <i>Amazon FSx for Lustre User Guide</i>.</p>
        pub fn data_compression_type(mut self, input: crate::model::DataCompressionType) -> Self {
            self.data_compression_type = Some(input);
            self
        }
        /// <p>Sets the data compression configuration for the file system. <code>DataCompressionType</code> can have the following values:</p>
        /// <ul>
        /// <li> <p> <code>NONE</code> - (Default) Data compression is turned off when the file system is created.</p> </li>
        /// <li> <p> <code>LZ4</code> - Data compression is turned on with the LZ4 algorithm.</p> </li>
        /// </ul>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/LustreGuide/data-compression.html">Lustre data compression</a> in the <i>Amazon FSx for Lustre User Guide</i>.</p>
        pub fn set_data_compression_type(
            mut self,
            input: std::option::Option<crate::model::DataCompressionType>,
        ) -> Self {
            self.data_compression_type = input;
            self
        }
        /// <p>The Lustre logging configuration used when creating an Amazon FSx for Lustre file system. When logging is enabled, Lustre logs error and warning events for data repositories associated with your file system to Amazon CloudWatch Logs.</p>
        pub fn log_configuration(
            mut self,
            input: crate::model::LustreLogCreateConfiguration,
        ) -> Self {
            self.log_configuration = Some(input);
            self
        }
        /// <p>The Lustre logging configuration used when creating an Amazon FSx for Lustre file system. When logging is enabled, Lustre logs error and warning events for data repositories associated with your file system to Amazon CloudWatch Logs.</p>
        pub fn set_log_configuration(
            mut self,
            input: std::option::Option<crate::model::LustreLogCreateConfiguration>,
        ) -> Self {
            self.log_configuration = input;
            self
        }
        /// <p>The Lustre root squash configuration used when creating an Amazon FSx for Lustre file system. When enabled, root squash restricts root-level access from clients that try to access your file system as a root user.</p>
        pub fn root_squash_configuration(
            mut self,
            input: crate::model::LustreRootSquashConfiguration,
        ) -> Self {
            self.root_squash_configuration = Some(input);
            self
        }
        /// <p>The Lustre root squash configuration used when creating an Amazon FSx for Lustre file system. When enabled, root squash restricts root-level access from clients that try to access your file system as a root user.</p>
        pub fn set_root_squash_configuration(
            mut self,
            input: std::option::Option<crate::model::LustreRootSquashConfiguration>,
        ) -> Self {
            self.root_squash_configuration = input;
            self
        }
        /// Consumes the builder and constructs a [`CreateFileSystemLustreConfiguration`](crate::model::CreateFileSystemLustreConfiguration).
        pub fn build(self) -> crate::model::CreateFileSystemLustreConfiguration {
            crate::model::CreateFileSystemLustreConfiguration {
                weekly_maintenance_start_time: self.weekly_maintenance_start_time,
                import_path: self.import_path,
                export_path: self.export_path,
                imported_file_chunk_size: self.imported_file_chunk_size,
                deployment_type: self.deployment_type,
                auto_import_policy: self.auto_import_policy,
                per_unit_storage_throughput: self.per_unit_storage_throughput,
                daily_automatic_backup_start_time: self.daily_automatic_backup_start_time,
                automatic_backup_retention_days: self.automatic_backup_retention_days,
                copy_tags_to_backups: self.copy_tags_to_backups,
                drive_cache_type: self.drive_cache_type,
                data_compression_type: self.data_compression_type,
                log_configuration: self.log_configuration,
                root_squash_configuration: self.root_squash_configuration,
            }
        }
    }
}
impl CreateFileSystemLustreConfiguration {
    /// Creates a new builder-style object to manufacture [`CreateFileSystemLustreConfiguration`](crate::model::CreateFileSystemLustreConfiguration).
    pub fn builder() -> crate::model::create_file_system_lustre_configuration::Builder {
        crate::model::create_file_system_lustre_configuration::Builder::default()
    }
}

/// <p>The configuration object for the Microsoft Windows file system used in <code>CreateFileSystem</code> and <code>CreateFileSystemFromBackup</code> operations.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct CreateFileSystemWindowsConfiguration {
    /// <p>The ID for an existing Amazon Web Services Managed Microsoft Active Directory (AD) instance that the file system should join when it's created.</p>
    #[doc(hidden)]
    pub active_directory_id: std::option::Option<std::string::String>,
    /// <p>The configuration that Amazon FSx uses to join a FSx for Windows File Server file system or an ONTAP storage virtual machine (SVM) to a self-managed (including on-premises) Microsoft Active Directory (AD) directory. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/self-managed-AD.html"> Using Amazon FSx with your self-managed Microsoft Active Directory</a> or <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-svms.html">Managing SVMs</a>.</p>
    #[doc(hidden)]
    pub self_managed_active_directory_configuration:
        std::option::Option<crate::model::SelfManagedActiveDirectoryConfiguration>,
    /// <p>Specifies the file system deployment type, valid values are the following:</p>
    /// <ul>
    /// <li> <p> <code>MULTI_AZ_1</code> - Deploys a high availability file system that is configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. You can only deploy a Multi-AZ file system in Amazon Web Services Regions that have a minimum of three Availability Zones. Also supports HDD storage type</p> </li>
    /// <li> <p> <code>SINGLE_AZ_1</code> - (Default) Choose to deploy a file system that is configured for single AZ redundancy.</p> </li>
    /// <li> <p> <code>SINGLE_AZ_2</code> - The latest generation Single AZ file system. Specifies a file system that is configured for single AZ redundancy and supports HDD storage type.</p> </li>
    /// </ul>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/high-availability-multiAZ.html"> Availability and Durability: Single-AZ and Multi-AZ File Systems</a>.</p>
    #[doc(hidden)]
    pub deployment_type: std::option::Option<crate::model::WindowsDeploymentType>,
    /// <p>Required when <code>DeploymentType</code> is set to <code>MULTI_AZ_1</code>. This specifies the subnet in which you want the preferred file server to be located. For in-Amazon Web Services applications, we recommend that you launch your clients in the same Availability Zone (AZ) as your preferred file server to reduce cross-AZ data transfer costs and minimize latency. </p>
    #[doc(hidden)]
    pub preferred_subnet_id: std::option::Option<std::string::String>,
    /// <p>Sets the throughput capacity of an Amazon FSx file system, measured in megabytes per second (MB/s), in 2 to the <i>n</i>th increments, between 2^3 (8) and 2^11 (2048).</p>
    #[doc(hidden)]
    pub throughput_capacity: std::option::Option<i32>,
    /// <p>The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone, where d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.</p>
    #[doc(hidden)]
    pub weekly_maintenance_start_time: std::option::Option<std::string::String>,
    /// <p>The preferred time to take daily automatic backups, formatted HH:MM in the UTC time zone.</p>
    #[doc(hidden)]
    pub daily_automatic_backup_start_time: std::option::Option<std::string::String>,
    /// <p>The number of days to retain automatic backups. The default is to retain backups for 7 days. Setting this value to 0 disables the creation of automatic backups. The maximum retention period for backups is 90 days.</p>
    #[doc(hidden)]
    pub automatic_backup_retention_days: std::option::Option<i32>,
    /// <p>A boolean flag indicating whether tags for the file system should be copied to backups. This value defaults to false. If it's set to true, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.</p>
    #[doc(hidden)]
    pub copy_tags_to_backups: std::option::Option<bool>,
    /// <p>An array of one or more DNS alias names that you want to associate with the Amazon FSx file system. Aliases allow you to use existing DNS names to access the data in your Amazon FSx file system. You can associate up to 50 aliases with a file system at any time. You can associate additional DNS aliases after you create the file system using the AssociateFileSystemAliases operation. You can remove DNS aliases from the file system after it is created using the DisassociateFileSystemAliases operation. You only need to specify the alias name in the request payload.</p>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-dns-aliases.html">Working with DNS Aliases</a> and <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/walkthrough05-file-system-custom-CNAME.html">Walkthrough 5: Using DNS aliases to access your file system</a>, including additional steps you must take to be able to access your file system using a DNS alias.</p>
    /// <p>An alias name has to meet the following requirements:</p>
    /// <ul>
    /// <li> <p>Formatted as a fully-qualified domain name (FQDN), <code>hostname.domain</code>, for example, <code>accounting.example.com</code>.</p> </li>
    /// <li> <p>Can contain alphanumeric characters, the underscore (_), and the hyphen (-).</p> </li>
    /// <li> <p>Cannot start or end with a hyphen.</p> </li>
    /// <li> <p>Can start with a numeric.</p> </li>
    /// </ul>
    /// <p>For DNS alias names, Amazon FSx stores alphabetic characters as lowercase letters (a-z), regardless of how you specify them: as uppercase letters, lowercase letters, or the corresponding letters in escape codes.</p>
    #[doc(hidden)]
    pub aliases: std::option::Option<std::vec::Vec<std::string::String>>,
    /// <p>The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system.</p>
    #[doc(hidden)]
    pub audit_log_configuration:
        std::option::Option<crate::model::WindowsAuditLogCreateConfiguration>,
}
impl CreateFileSystemWindowsConfiguration {
    /// <p>The ID for an existing Amazon Web Services Managed Microsoft Active Directory (AD) instance that the file system should join when it's created.</p>
    pub fn active_directory_id(&self) -> std::option::Option<&str> {
        self.active_directory_id.as_deref()
    }
    /// <p>The configuration that Amazon FSx uses to join a FSx for Windows File Server file system or an ONTAP storage virtual machine (SVM) to a self-managed (including on-premises) Microsoft Active Directory (AD) directory. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/self-managed-AD.html"> Using Amazon FSx with your self-managed Microsoft Active Directory</a> or <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-svms.html">Managing SVMs</a>.</p>
    pub fn self_managed_active_directory_configuration(
        &self,
    ) -> std::option::Option<&crate::model::SelfManagedActiveDirectoryConfiguration> {
        self.self_managed_active_directory_configuration.as_ref()
    }
    /// <p>Specifies the file system deployment type, valid values are the following:</p>
    /// <ul>
    /// <li> <p> <code>MULTI_AZ_1</code> - Deploys a high availability file system that is configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. You can only deploy a Multi-AZ file system in Amazon Web Services Regions that have a minimum of three Availability Zones. Also supports HDD storage type</p> </li>
    /// <li> <p> <code>SINGLE_AZ_1</code> - (Default) Choose to deploy a file system that is configured for single AZ redundancy.</p> </li>
    /// <li> <p> <code>SINGLE_AZ_2</code> - The latest generation Single AZ file system. Specifies a file system that is configured for single AZ redundancy and supports HDD storage type.</p> </li>
    /// </ul>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/high-availability-multiAZ.html"> Availability and Durability: Single-AZ and Multi-AZ File Systems</a>.</p>
    pub fn deployment_type(&self) -> std::option::Option<&crate::model::WindowsDeploymentType> {
        self.deployment_type.as_ref()
    }
    /// <p>Required when <code>DeploymentType</code> is set to <code>MULTI_AZ_1</code>. This specifies the subnet in which you want the preferred file server to be located. For in-Amazon Web Services applications, we recommend that you launch your clients in the same Availability Zone (AZ) as your preferred file server to reduce cross-AZ data transfer costs and minimize latency. </p>
    pub fn preferred_subnet_id(&self) -> std::option::Option<&str> {
        self.preferred_subnet_id.as_deref()
    }
    /// <p>Sets the throughput capacity of an Amazon FSx file system, measured in megabytes per second (MB/s), in 2 to the <i>n</i>th increments, between 2^3 (8) and 2^11 (2048).</p>
    pub fn throughput_capacity(&self) -> std::option::Option<i32> {
        self.throughput_capacity
    }
    /// <p>The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone, where d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.</p>
    pub fn weekly_maintenance_start_time(&self) -> std::option::Option<&str> {
        self.weekly_maintenance_start_time.as_deref()
    }
    /// <p>The preferred time to take daily automatic backups, formatted HH:MM in the UTC time zone.</p>
    pub fn daily_automatic_backup_start_time(&self) -> std::option::Option<&str> {
        self.daily_automatic_backup_start_time.as_deref()
    }
    /// <p>The number of days to retain automatic backups. The default is to retain backups for 7 days. Setting this value to 0 disables the creation of automatic backups. The maximum retention period for backups is 90 days.</p>
    pub fn automatic_backup_retention_days(&self) -> std::option::Option<i32> {
        self.automatic_backup_retention_days
    }
    /// <p>A boolean flag indicating whether tags for the file system should be copied to backups. This value defaults to false. If it's set to true, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.</p>
    pub fn copy_tags_to_backups(&self) -> std::option::Option<bool> {
        self.copy_tags_to_backups
    }
    /// <p>An array of one or more DNS alias names that you want to associate with the Amazon FSx file system. Aliases allow you to use existing DNS names to access the data in your Amazon FSx file system. You can associate up to 50 aliases with a file system at any time. You can associate additional DNS aliases after you create the file system using the AssociateFileSystemAliases operation. You can remove DNS aliases from the file system after it is created using the DisassociateFileSystemAliases operation. You only need to specify the alias name in the request payload.</p>
    /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-dns-aliases.html">Working with DNS Aliases</a> and <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/walkthrough05-file-system-custom-CNAME.html">Walkthrough 5: Using DNS aliases to access your file system</a>, including additional steps you must take to be able to access your file system using a DNS alias.</p>
    /// <p>An alias name has to meet the following requirements:</p>
    /// <ul>
    /// <li> <p>Formatted as a fully-qualified domain name (FQDN), <code>hostname.domain</code>, for example, <code>accounting.example.com</code>.</p> </li>
    /// <li> <p>Can contain alphanumeric characters, the underscore (_), and the hyphen (-).</p> </li>
    /// <li> <p>Cannot start or end with a hyphen.</p> </li>
    /// <li> <p>Can start with a numeric.</p> </li>
    /// </ul>
    /// <p>For DNS alias names, Amazon FSx stores alphabetic characters as lowercase letters (a-z), regardless of how you specify them: as uppercase letters, lowercase letters, or the corresponding letters in escape codes.</p>
    pub fn aliases(&self) -> std::option::Option<&[std::string::String]> {
        self.aliases.as_deref()
    }
    /// <p>The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system.</p>
    pub fn audit_log_configuration(
        &self,
    ) -> std::option::Option<&crate::model::WindowsAuditLogCreateConfiguration> {
        self.audit_log_configuration.as_ref()
    }
}
/// See [`CreateFileSystemWindowsConfiguration`](crate::model::CreateFileSystemWindowsConfiguration).
pub mod create_file_system_windows_configuration {

    /// A builder for [`CreateFileSystemWindowsConfiguration`](crate::model::CreateFileSystemWindowsConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) active_directory_id: std::option::Option<std::string::String>,
        pub(crate) self_managed_active_directory_configuration:
            std::option::Option<crate::model::SelfManagedActiveDirectoryConfiguration>,
        pub(crate) deployment_type: std::option::Option<crate::model::WindowsDeploymentType>,
        pub(crate) preferred_subnet_id: std::option::Option<std::string::String>,
        pub(crate) throughput_capacity: std::option::Option<i32>,
        pub(crate) weekly_maintenance_start_time: std::option::Option<std::string::String>,
        pub(crate) daily_automatic_backup_start_time: std::option::Option<std::string::String>,
        pub(crate) automatic_backup_retention_days: std::option::Option<i32>,
        pub(crate) copy_tags_to_backups: std::option::Option<bool>,
        pub(crate) aliases: std::option::Option<std::vec::Vec<std::string::String>>,
        pub(crate) audit_log_configuration:
            std::option::Option<crate::model::WindowsAuditLogCreateConfiguration>,
    }
    impl Builder {
        /// <p>The ID for an existing Amazon Web Services Managed Microsoft Active Directory (AD) instance that the file system should join when it's created.</p>
        pub fn active_directory_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.active_directory_id = Some(input.into());
            self
        }
        /// <p>The ID for an existing Amazon Web Services Managed Microsoft Active Directory (AD) instance that the file system should join when it's created.</p>
        pub fn set_active_directory_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.active_directory_id = input;
            self
        }
        /// <p>The configuration that Amazon FSx uses to join a FSx for Windows File Server file system or an ONTAP storage virtual machine (SVM) to a self-managed (including on-premises) Microsoft Active Directory (AD) directory. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/self-managed-AD.html"> Using Amazon FSx with your self-managed Microsoft Active Directory</a> or <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-svms.html">Managing SVMs</a>.</p>
        pub fn self_managed_active_directory_configuration(
            mut self,
            input: crate::model::SelfManagedActiveDirectoryConfiguration,
        ) -> Self {
            self.self_managed_active_directory_configuration = Some(input);
            self
        }
        /// <p>The configuration that Amazon FSx uses to join a FSx for Windows File Server file system or an ONTAP storage virtual machine (SVM) to a self-managed (including on-premises) Microsoft Active Directory (AD) directory. For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/self-managed-AD.html"> Using Amazon FSx with your self-managed Microsoft Active Directory</a> or <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-svms.html">Managing SVMs</a>.</p>
        pub fn set_self_managed_active_directory_configuration(
            mut self,
            input: std::option::Option<crate::model::SelfManagedActiveDirectoryConfiguration>,
        ) -> Self {
            self.self_managed_active_directory_configuration = input;
            self
        }
        /// <p>Specifies the file system deployment type, valid values are the following:</p>
        /// <ul>
        /// <li> <p> <code>MULTI_AZ_1</code> - Deploys a high availability file system that is configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. You can only deploy a Multi-AZ file system in Amazon Web Services Regions that have a minimum of three Availability Zones. Also supports HDD storage type</p> </li>
        /// <li> <p> <code>SINGLE_AZ_1</code> - (Default) Choose to deploy a file system that is configured for single AZ redundancy.</p> </li>
        /// <li> <p> <code>SINGLE_AZ_2</code> - The latest generation Single AZ file system. Specifies a file system that is configured for single AZ redundancy and supports HDD storage type.</p> </li>
        /// </ul>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/high-availability-multiAZ.html"> Availability and Durability: Single-AZ and Multi-AZ File Systems</a>.</p>
        pub fn deployment_type(mut self, input: crate::model::WindowsDeploymentType) -> Self {
            self.deployment_type = Some(input);
            self
        }
        /// <p>Specifies the file system deployment type, valid values are the following:</p>
        /// <ul>
        /// <li> <p> <code>MULTI_AZ_1</code> - Deploys a high availability file system that is configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. You can only deploy a Multi-AZ file system in Amazon Web Services Regions that have a minimum of three Availability Zones. Also supports HDD storage type</p> </li>
        /// <li> <p> <code>SINGLE_AZ_1</code> - (Default) Choose to deploy a file system that is configured for single AZ redundancy.</p> </li>
        /// <li> <p> <code>SINGLE_AZ_2</code> - The latest generation Single AZ file system. Specifies a file system that is configured for single AZ redundancy and supports HDD storage type.</p> </li>
        /// </ul>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/high-availability-multiAZ.html"> Availability and Durability: Single-AZ and Multi-AZ File Systems</a>.</p>
        pub fn set_deployment_type(
            mut self,
            input: std::option::Option<crate::model::WindowsDeploymentType>,
        ) -> Self {
            self.deployment_type = input;
            self
        }
        /// <p>Required when <code>DeploymentType</code> is set to <code>MULTI_AZ_1</code>. This specifies the subnet in which you want the preferred file server to be located. For in-Amazon Web Services applications, we recommend that you launch your clients in the same Availability Zone (AZ) as your preferred file server to reduce cross-AZ data transfer costs and minimize latency. </p>
        pub fn preferred_subnet_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.preferred_subnet_id = Some(input.into());
            self
        }
        /// <p>Required when <code>DeploymentType</code> is set to <code>MULTI_AZ_1</code>. This specifies the subnet in which you want the preferred file server to be located. For in-Amazon Web Services applications, we recommend that you launch your clients in the same Availability Zone (AZ) as your preferred file server to reduce cross-AZ data transfer costs and minimize latency. </p>
        pub fn set_preferred_subnet_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.preferred_subnet_id = input;
            self
        }
        /// <p>Sets the throughput capacity of an Amazon FSx file system, measured in megabytes per second (MB/s), in 2 to the <i>n</i>th increments, between 2^3 (8) and 2^11 (2048).</p>
        pub fn throughput_capacity(mut self, input: i32) -> Self {
            self.throughput_capacity = Some(input);
            self
        }
        /// <p>Sets the throughput capacity of an Amazon FSx file system, measured in megabytes per second (MB/s), in 2 to the <i>n</i>th increments, between 2^3 (8) and 2^11 (2048).</p>
        pub fn set_throughput_capacity(mut self, input: std::option::Option<i32>) -> Self {
            self.throughput_capacity = input;
            self
        }
        /// <p>The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone, where d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.</p>
        pub fn weekly_maintenance_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = Some(input.into());
            self
        }
        /// <p>The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone, where d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.</p>
        pub fn set_weekly_maintenance_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = input;
            self
        }
        /// <p>The preferred time to take daily automatic backups, formatted HH:MM in the UTC time zone.</p>
        pub fn daily_automatic_backup_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.daily_automatic_backup_start_time = Some(input.into());
            self
        }
        /// <p>The preferred time to take daily automatic backups, formatted HH:MM in the UTC time zone.</p>
        pub fn set_daily_automatic_backup_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.daily_automatic_backup_start_time = input;
            self
        }
        /// <p>The number of days to retain automatic backups. The default is to retain backups for 7 days. Setting this value to 0 disables the creation of automatic backups. The maximum retention period for backups is 90 days.</p>
        pub fn automatic_backup_retention_days(mut self, input: i32) -> Self {
            self.automatic_backup_retention_days = Some(input);
            self
        }
        /// <p>The number of days to retain automatic backups. The default is to retain backups for 7 days. Setting this value to 0 disables the creation of automatic backups. The maximum retention period for backups is 90 days.</p>
        pub fn set_automatic_backup_retention_days(
            mut self,
            input: std::option::Option<i32>,
        ) -> Self {
            self.automatic_backup_retention_days = input;
            self
        }
        /// <p>A boolean flag indicating whether tags for the file system should be copied to backups. This value defaults to false. If it's set to true, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.</p>
        pub fn copy_tags_to_backups(mut self, input: bool) -> Self {
            self.copy_tags_to_backups = Some(input);
            self
        }
        /// <p>A boolean flag indicating whether tags for the file system should be copied to backups. This value defaults to false. If it's set to true, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.</p>
        pub fn set_copy_tags_to_backups(mut self, input: std::option::Option<bool>) -> Self {
            self.copy_tags_to_backups = input;
            self
        }
        /// Appends an item to `aliases`.
        ///
        /// To override the contents of this collection use [`set_aliases`](Self::set_aliases).
        ///
        /// <p>An array of one or more DNS alias names that you want to associate with the Amazon FSx file system. Aliases allow you to use existing DNS names to access the data in your Amazon FSx file system. You can associate up to 50 aliases with a file system at any time. You can associate additional DNS aliases after you create the file system using the AssociateFileSystemAliases operation. You can remove DNS aliases from the file system after it is created using the DisassociateFileSystemAliases operation. You only need to specify the alias name in the request payload.</p>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-dns-aliases.html">Working with DNS Aliases</a> and <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/walkthrough05-file-system-custom-CNAME.html">Walkthrough 5: Using DNS aliases to access your file system</a>, including additional steps you must take to be able to access your file system using a DNS alias.</p>
        /// <p>An alias name has to meet the following requirements:</p>
        /// <ul>
        /// <li> <p>Formatted as a fully-qualified domain name (FQDN), <code>hostname.domain</code>, for example, <code>accounting.example.com</code>.</p> </li>
        /// <li> <p>Can contain alphanumeric characters, the underscore (_), and the hyphen (-).</p> </li>
        /// <li> <p>Cannot start or end with a hyphen.</p> </li>
        /// <li> <p>Can start with a numeric.</p> </li>
        /// </ul>
        /// <p>For DNS alias names, Amazon FSx stores alphabetic characters as lowercase letters (a-z), regardless of how you specify them: as uppercase letters, lowercase letters, or the corresponding letters in escape codes.</p>
        pub fn aliases(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.aliases.unwrap_or_default();
            v.push(input.into());
            self.aliases = Some(v);
            self
        }
        /// <p>An array of one or more DNS alias names that you want to associate with the Amazon FSx file system. Aliases allow you to use existing DNS names to access the data in your Amazon FSx file system. You can associate up to 50 aliases with a file system at any time. You can associate additional DNS aliases after you create the file system using the AssociateFileSystemAliases operation. You can remove DNS aliases from the file system after it is created using the DisassociateFileSystemAliases operation. You only need to specify the alias name in the request payload.</p>
        /// <p>For more information, see <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-dns-aliases.html">Working with DNS Aliases</a> and <a href="https://docs.aws.amazon.com/fsx/latest/WindowsGuide/walkthrough05-file-system-custom-CNAME.html">Walkthrough 5: Using DNS aliases to access your file system</a>, including additional steps you must take to be able to access your file system using a DNS alias.</p>
        /// <p>An alias name has to meet the following requirements:</p>
        /// <ul>
        /// <li> <p>Formatted as a fully-qualified domain name (FQDN), <code>hostname.domain</code>, for example, <code>accounting.example.com</code>.</p> </li>
        /// <li> <p>Can contain alphanumeric characters, the underscore (_), and the hyphen (-).</p> </li>
        /// <li> <p>Cannot start or end with a hyphen.</p> </li>
        /// <li> <p>Can start with a numeric.</p> </li>
        /// </ul>
        /// <p>For DNS alias names, Amazon FSx stores alphabetic characters as lowercase letters (a-z), regardless of how you specify them: as uppercase letters, lowercase letters, or the corresponding letters in escape codes.</p>
        pub fn set_aliases(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.aliases = input;
            self
        }
        /// <p>The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system.</p>
        pub fn audit_log_configuration(
            mut self,
            input: crate::model::WindowsAuditLogCreateConfiguration,
        ) -> Self {
            self.audit_log_configuration = Some(input);
            self
        }
        /// <p>The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system.</p>
        pub fn set_audit_log_configuration(
            mut self,
            input: std::option::Option<crate::model::WindowsAuditLogCreateConfiguration>,
        ) -> Self {
            self.audit_log_configuration = input;
            self
        }
        /// Consumes the builder and constructs a [`CreateFileSystemWindowsConfiguration`](crate::model::CreateFileSystemWindowsConfiguration).
        pub fn build(self) -> crate::model::CreateFileSystemWindowsConfiguration {
            crate::model::CreateFileSystemWindowsConfiguration {
                active_directory_id: self.active_directory_id,
                self_managed_active_directory_configuration: self
                    .self_managed_active_directory_configuration,
                deployment_type: self.deployment_type,
                preferred_subnet_id: self.preferred_subnet_id,
                throughput_capacity: self.throughput_capacity,
                weekly_maintenance_start_time: self.weekly_maintenance_start_time,
                daily_automatic_backup_start_time: self.daily_automatic_backup_start_time,
                automatic_backup_retention_days: self.automatic_backup_retention_days,
                copy_tags_to_backups: self.copy_tags_to_backups,
                aliases: self.aliases,
                audit_log_configuration: self.audit_log_configuration,
            }
        }
    }
}
impl CreateFileSystemWindowsConfiguration {
    /// Creates a new builder-style object to manufacture [`CreateFileSystemWindowsConfiguration`](crate::model::CreateFileSystemWindowsConfiguration).
    pub fn builder() -> crate::model::create_file_system_windows_configuration::Builder {
        crate::model::create_file_system_windows_configuration::Builder::default()
    }
}

/// <p>The ONTAP configuration properties of the FSx for ONTAP file system that you are creating.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateFileSystemOntapConfiguration {
    /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
    #[doc(hidden)]
    pub automatic_backup_retention_days: std::option::Option<i32>,
    /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
    #[doc(hidden)]
    pub daily_automatic_backup_start_time: std::option::Option<std::string::String>,
    /// <p>Specifies the FSx for ONTAP file system deployment type to use in creating the file system. </p>
    /// <ul>
    /// <li> <p> <code>MULTI_AZ_1</code> - (Default) A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. </p> </li>
    /// <li> <p> <code>SINGLE_AZ_1</code> - A file system configured for Single-AZ redundancy.</p> </li>
    /// </ul>
    /// <p>For information about the use cases for Multi-AZ and Single-AZ deployments, refer to <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/high-availability-AZ.html">Choosing a file system deployment type</a>. </p>
    #[doc(hidden)]
    pub deployment_type: std::option::Option<crate::model::OntapDeploymentType>,
    /// <p>(Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API, Amazon FSx selects an unused IP address range for you from the 198.19.* range. By default in the Amazon FSx console, Amazon FSx chooses the last 64 IP addresses from the VPC’s primary CIDR range to use as the endpoint IP address range for the file system. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables.</p>
    #[doc(hidden)]
    pub endpoint_ip_address_range: std::option::Option<std::string::String>,
    /// <p>The ONTAP administrative password for the <code>fsxadmin</code> user with which you administer your file system using the NetApp ONTAP CLI and REST API.</p>
    #[doc(hidden)]
    pub fsx_admin_password: std::option::Option<std::string::String>,
    /// <p>The SSD IOPS configuration for the FSx for ONTAP file system.</p>
    #[doc(hidden)]
    pub disk_iops_configuration: std::option::Option<crate::model::DiskIopsConfiguration>,
    /// <p>Required when <code>DeploymentType</code> is set to <code>MULTI_AZ_1</code>. This specifies the subnet in which you want the preferred file server to be located.</p>
    #[doc(hidden)]
    pub preferred_subnet_id: std::option::Option<std::string::String>,
    /// <p>(Multi-AZ only) Specifies the virtual private cloud (VPC) route tables in which your file system's endpoints will be created. You should specify all VPC route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table.</p>
    #[doc(hidden)]
    pub route_table_ids: std::option::Option<std::vec::Vec<std::string::String>>,
    /// <p>Sets the throughput capacity for the file system that you're creating. Valid values are 128, 256, 512, 1024, 2048, and 4096 MBps.</p>
    #[doc(hidden)]
    pub throughput_capacity: std::option::Option<i32>,
    /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
    /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
    /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
    /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
    #[doc(hidden)]
    pub weekly_maintenance_start_time: std::option::Option<std::string::String>,
}
impl CreateFileSystemOntapConfiguration {
    /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
    pub fn automatic_backup_retention_days(&self) -> std::option::Option<i32> {
        self.automatic_backup_retention_days
    }
    /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
    pub fn daily_automatic_backup_start_time(&self) -> std::option::Option<&str> {
        self.daily_automatic_backup_start_time.as_deref()
    }
    /// <p>Specifies the FSx for ONTAP file system deployment type to use in creating the file system. </p>
    /// <ul>
    /// <li> <p> <code>MULTI_AZ_1</code> - (Default) A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. </p> </li>
    /// <li> <p> <code>SINGLE_AZ_1</code> - A file system configured for Single-AZ redundancy.</p> </li>
    /// </ul>
    /// <p>For information about the use cases for Multi-AZ and Single-AZ deployments, refer to <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/high-availability-AZ.html">Choosing a file system deployment type</a>. </p>
    pub fn deployment_type(&self) -> std::option::Option<&crate::model::OntapDeploymentType> {
        self.deployment_type.as_ref()
    }
    /// <p>(Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API, Amazon FSx selects an unused IP address range for you from the 198.19.* range. By default in the Amazon FSx console, Amazon FSx chooses the last 64 IP addresses from the VPC’s primary CIDR range to use as the endpoint IP address range for the file system. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables.</p>
    pub fn endpoint_ip_address_range(&self) -> std::option::Option<&str> {
        self.endpoint_ip_address_range.as_deref()
    }
    /// <p>The ONTAP administrative password for the <code>fsxadmin</code> user with which you administer your file system using the NetApp ONTAP CLI and REST API.</p>
    pub fn fsx_admin_password(&self) -> std::option::Option<&str> {
        self.fsx_admin_password.as_deref()
    }
    /// <p>The SSD IOPS configuration for the FSx for ONTAP file system.</p>
    pub fn disk_iops_configuration(
        &self,
    ) -> std::option::Option<&crate::model::DiskIopsConfiguration> {
        self.disk_iops_configuration.as_ref()
    }
    /// <p>Required when <code>DeploymentType</code> is set to <code>MULTI_AZ_1</code>. This specifies the subnet in which you want the preferred file server to be located.</p>
    pub fn preferred_subnet_id(&self) -> std::option::Option<&str> {
        self.preferred_subnet_id.as_deref()
    }
    /// <p>(Multi-AZ only) Specifies the virtual private cloud (VPC) route tables in which your file system's endpoints will be created. You should specify all VPC route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table.</p>
    pub fn route_table_ids(&self) -> std::option::Option<&[std::string::String]> {
        self.route_table_ids.as_deref()
    }
    /// <p>Sets the throughput capacity for the file system that you're creating. Valid values are 128, 256, 512, 1024, 2048, and 4096 MBps.</p>
    pub fn throughput_capacity(&self) -> std::option::Option<i32> {
        self.throughput_capacity
    }
    /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
    /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
    /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
    /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
    pub fn weekly_maintenance_start_time(&self) -> std::option::Option<&str> {
        self.weekly_maintenance_start_time.as_deref()
    }
}
impl std::fmt::Debug for CreateFileSystemOntapConfiguration {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        let mut formatter = f.debug_struct("CreateFileSystemOntapConfiguration");
        formatter.field(
            "automatic_backup_retention_days",
            &self.automatic_backup_retention_days,
        );
        formatter.field(
            "daily_automatic_backup_start_time",
            &self.daily_automatic_backup_start_time,
        );
        formatter.field("deployment_type", &self.deployment_type);
        formatter.field("endpoint_ip_address_range", &self.endpoint_ip_address_range);
        formatter.field("fsx_admin_password", &"*** Sensitive Data Redacted ***");
        formatter.field("disk_iops_configuration", &self.disk_iops_configuration);
        formatter.field("preferred_subnet_id", &self.preferred_subnet_id);
        formatter.field("route_table_ids", &self.route_table_ids);
        formatter.field("throughput_capacity", &self.throughput_capacity);
        formatter.field(
            "weekly_maintenance_start_time",
            &self.weekly_maintenance_start_time,
        );
        formatter.finish()
    }
}
/// See [`CreateFileSystemOntapConfiguration`](crate::model::CreateFileSystemOntapConfiguration).
pub mod create_file_system_ontap_configuration {

    /// A builder for [`CreateFileSystemOntapConfiguration`](crate::model::CreateFileSystemOntapConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default)]
    pub struct Builder {
        pub(crate) automatic_backup_retention_days: std::option::Option<i32>,
        pub(crate) daily_automatic_backup_start_time: std::option::Option<std::string::String>,
        pub(crate) deployment_type: std::option::Option<crate::model::OntapDeploymentType>,
        pub(crate) endpoint_ip_address_range: std::option::Option<std::string::String>,
        pub(crate) fsx_admin_password: std::option::Option<std::string::String>,
        pub(crate) disk_iops_configuration:
            std::option::Option<crate::model::DiskIopsConfiguration>,
        pub(crate) preferred_subnet_id: std::option::Option<std::string::String>,
        pub(crate) route_table_ids: std::option::Option<std::vec::Vec<std::string::String>>,
        pub(crate) throughput_capacity: std::option::Option<i32>,
        pub(crate) weekly_maintenance_start_time: std::option::Option<std::string::String>,
    }
    impl Builder {
        /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
        pub fn automatic_backup_retention_days(mut self, input: i32) -> Self {
            self.automatic_backup_retention_days = Some(input);
            self
        }
        /// <p>The number of days to retain automatic backups. Setting this property to <code>0</code> disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is <code>0</code>.</p>
        pub fn set_automatic_backup_retention_days(
            mut self,
            input: std::option::Option<i32>,
        ) -> Self {
            self.automatic_backup_retention_days = input;
            self
        }
        /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
        pub fn daily_automatic_backup_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.daily_automatic_backup_start_time = Some(input.into());
            self
        }
        /// <p>A recurring daily time, in the format <code>HH:MM</code>. <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. For example, <code>05:00</code> specifies 5 AM daily. </p>
        pub fn set_daily_automatic_backup_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.daily_automatic_backup_start_time = input;
            self
        }
        /// <p>Specifies the FSx for ONTAP file system deployment type to use in creating the file system. </p>
        /// <ul>
        /// <li> <p> <code>MULTI_AZ_1</code> - (Default) A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. </p> </li>
        /// <li> <p> <code>SINGLE_AZ_1</code> - A file system configured for Single-AZ redundancy.</p> </li>
        /// </ul>
        /// <p>For information about the use cases for Multi-AZ and Single-AZ deployments, refer to <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/high-availability-AZ.html">Choosing a file system deployment type</a>. </p>
        pub fn deployment_type(mut self, input: crate::model::OntapDeploymentType) -> Self {
            self.deployment_type = Some(input);
            self
        }
        /// <p>Specifies the FSx for ONTAP file system deployment type to use in creating the file system. </p>
        /// <ul>
        /// <li> <p> <code>MULTI_AZ_1</code> - (Default) A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. </p> </li>
        /// <li> <p> <code>SINGLE_AZ_1</code> - A file system configured for Single-AZ redundancy.</p> </li>
        /// </ul>
        /// <p>For information about the use cases for Multi-AZ and Single-AZ deployments, refer to <a href="https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/high-availability-AZ.html">Choosing a file system deployment type</a>. </p>
        pub fn set_deployment_type(
            mut self,
            input: std::option::Option<crate::model::OntapDeploymentType>,
        ) -> Self {
            self.deployment_type = input;
            self
        }
        /// <p>(Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API, Amazon FSx selects an unused IP address range for you from the 198.19.* range. By default in the Amazon FSx console, Amazon FSx chooses the last 64 IP addresses from the VPC’s primary CIDR range to use as the endpoint IP address range for the file system. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables.</p>
        pub fn endpoint_ip_address_range(mut self, input: impl Into<std::string::String>) -> Self {
            self.endpoint_ip_address_range = Some(input.into());
            self
        }
        /// <p>(Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API, Amazon FSx selects an unused IP address range for you from the 198.19.* range. By default in the Amazon FSx console, Amazon FSx chooses the last 64 IP addresses from the VPC’s primary CIDR range to use as the endpoint IP address range for the file system. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables.</p>
        pub fn set_endpoint_ip_address_range(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.endpoint_ip_address_range = input;
            self
        }
        /// <p>The ONTAP administrative password for the <code>fsxadmin</code> user with which you administer your file system using the NetApp ONTAP CLI and REST API.</p>
        pub fn fsx_admin_password(mut self, input: impl Into<std::string::String>) -> Self {
            self.fsx_admin_password = Some(input.into());
            self
        }
        /// <p>The ONTAP administrative password for the <code>fsxadmin</code> user with which you administer your file system using the NetApp ONTAP CLI and REST API.</p>
        pub fn set_fsx_admin_password(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.fsx_admin_password = input;
            self
        }
        /// <p>The SSD IOPS configuration for the FSx for ONTAP file system.</p>
        pub fn disk_iops_configuration(
            mut self,
            input: crate::model::DiskIopsConfiguration,
        ) -> Self {
            self.disk_iops_configuration = Some(input);
            self
        }
        /// <p>The SSD IOPS configuration for the FSx for ONTAP file system.</p>
        pub fn set_disk_iops_configuration(
            mut self,
            input: std::option::Option<crate::model::DiskIopsConfiguration>,
        ) -> Self {
            self.disk_iops_configuration = input;
            self
        }
        /// <p>Required when <code>DeploymentType</code> is set to <code>MULTI_AZ_1</code>. This specifies the subnet in which you want the preferred file server to be located.</p>
        pub fn preferred_subnet_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.preferred_subnet_id = Some(input.into());
            self
        }
        /// <p>Required when <code>DeploymentType</code> is set to <code>MULTI_AZ_1</code>. This specifies the subnet in which you want the preferred file server to be located.</p>
        pub fn set_preferred_subnet_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.preferred_subnet_id = input;
            self
        }
        /// Appends an item to `route_table_ids`.
        ///
        /// To override the contents of this collection use [`set_route_table_ids`](Self::set_route_table_ids).
        ///
        /// <p>(Multi-AZ only) Specifies the virtual private cloud (VPC) route tables in which your file system's endpoints will be created. You should specify all VPC route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table.</p>
        pub fn route_table_ids(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.route_table_ids.unwrap_or_default();
            v.push(input.into());
            self.route_table_ids = Some(v);
            self
        }
        /// <p>(Multi-AZ only) Specifies the virtual private cloud (VPC) route tables in which your file system's endpoints will be created. You should specify all VPC route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table.</p>
        pub fn set_route_table_ids(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.route_table_ids = input;
            self
        }
        /// <p>Sets the throughput capacity for the file system that you're creating. Valid values are 128, 256, 512, 1024, 2048, and 4096 MBps.</p>
        pub fn throughput_capacity(mut self, input: i32) -> Self {
            self.throughput_capacity = Some(input);
            self
        }
        /// <p>Sets the throughput capacity for the file system that you're creating. Valid values are 128, 256, 512, 1024, 2048, and 4096 MBps.</p>
        pub fn set_throughput_capacity(mut self, input: std::option::Option<i32>) -> Self {
            self.throughput_capacity = input;
            self
        }
        /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
        /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
        /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
        /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
        pub fn weekly_maintenance_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = Some(input.into());
            self
        }
        /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
        /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
        /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
        /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
        pub fn set_weekly_maintenance_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = input;
            self
        }
        /// Consumes the builder and constructs a [`CreateFileSystemOntapConfiguration`](crate::model::CreateFileSystemOntapConfiguration).
        pub fn build(self) -> crate::model::CreateFileSystemOntapConfiguration {
            crate::model::CreateFileSystemOntapConfiguration {
                automatic_backup_retention_days: self.automatic_backup_retention_days,
                daily_automatic_backup_start_time: self.daily_automatic_backup_start_time,
                deployment_type: self.deployment_type,
                endpoint_ip_address_range: self.endpoint_ip_address_range,
                fsx_admin_password: self.fsx_admin_password,
                disk_iops_configuration: self.disk_iops_configuration,
                preferred_subnet_id: self.preferred_subnet_id,
                route_table_ids: self.route_table_ids,
                throughput_capacity: self.throughput_capacity,
                weekly_maintenance_start_time: self.weekly_maintenance_start_time,
            }
        }
    }
    impl std::fmt::Debug for Builder {
        fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
            let mut formatter = f.debug_struct("Builder");
            formatter.field(
                "automatic_backup_retention_days",
                &self.automatic_backup_retention_days,
            );
            formatter.field(
                "daily_automatic_backup_start_time",
                &self.daily_automatic_backup_start_time,
            );
            formatter.field("deployment_type", &self.deployment_type);
            formatter.field("endpoint_ip_address_range", &self.endpoint_ip_address_range);
            formatter.field("fsx_admin_password", &"*** Sensitive Data Redacted ***");
            formatter.field("disk_iops_configuration", &self.disk_iops_configuration);
            formatter.field("preferred_subnet_id", &self.preferred_subnet_id);
            formatter.field("route_table_ids", &self.route_table_ids);
            formatter.field("throughput_capacity", &self.throughput_capacity);
            formatter.field(
                "weekly_maintenance_start_time",
                &self.weekly_maintenance_start_time,
            );
            formatter.finish()
        }
    }
}
impl CreateFileSystemOntapConfiguration {
    /// Creates a new builder-style object to manufacture [`CreateFileSystemOntapConfiguration`](crate::model::CreateFileSystemOntapConfiguration).
    pub fn builder() -> crate::model::create_file_system_ontap_configuration::Builder {
        crate::model::create_file_system_ontap_configuration::Builder::default()
    }
}

/// <p>The response object for the Amazon File Cache resource being created in the <code>CreateFileCache</code> operation.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct FileCacheCreating {
    /// <p>An Amazon Web Services account ID. This ID is a 12-digit number that you use to construct Amazon Resource Names (ARNs) for resources.</p>
    #[doc(hidden)]
    pub owner_id: std::option::Option<std::string::String>,
    /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
    #[doc(hidden)]
    pub creation_time: std::option::Option<aws_smithy_types::DateTime>,
    /// <p>The system-generated, unique ID of the cache.</p>
    #[doc(hidden)]
    pub file_cache_id: std::option::Option<std::string::String>,
    /// <p>The type of cache, which must be <code>LUSTRE</code>.</p>
    #[doc(hidden)]
    pub file_cache_type: std::option::Option<crate::model::FileCacheType>,
    /// <p>The Lustre version of the cache, which must be <code>2.12</code>.</p>
    #[doc(hidden)]
    pub file_cache_type_version: std::option::Option<std::string::String>,
    /// <p>The lifecycle status of the cache. The following are the possible values and what they mean:</p>
    /// <ul>
    /// <li> <p> <code>AVAILABLE</code> - The cache is in a healthy state, and is reachable and available for use.</p> </li>
    /// <li> <p> <code>CREATING</code> - The new cache is being created.</p> </li>
    /// <li> <p> <code>DELETING</code> - An existing cache is being deleted.</p> </li>
    /// <li> <p> <code>UPDATING</code> - The cache is undergoing a customer-initiated update.</p> </li>
    /// <li> <p> <code>FAILED</code> - An existing cache has experienced an unrecoverable failure. When creating a new cache, the cache was unable to be created.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub lifecycle: std::option::Option<crate::model::FileCacheLifecycle>,
    /// <p>A structure providing details of any failures that occurred.</p>
    #[doc(hidden)]
    pub failure_details: std::option::Option<crate::model::FileCacheFailureDetails>,
    /// <p>The storage capacity of the cache in gibibytes (GiB).</p>
    #[doc(hidden)]
    pub storage_capacity: std::option::Option<i32>,
    /// <p>The ID of your virtual private cloud (VPC). For more information, see <a href="https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html">VPC and subnets</a> in the <i>Amazon VPC User Guide</i>.</p>
    #[doc(hidden)]
    pub vpc_id: std::option::Option<std::string::String>,
    /// <p>A list of subnet IDs that the cache will be accessible from. You can specify only one subnet ID in a call to the <code>CreateFileCache</code> operation.</p>
    #[doc(hidden)]
    pub subnet_ids: std::option::Option<std::vec::Vec<std::string::String>>,
    /// <p>A list of network interface IDs.</p>
    #[doc(hidden)]
    pub network_interface_ids: std::option::Option<std::vec::Vec<std::string::String>>,
    /// <p>The Domain Name System (DNS) name for the cache.</p>
    #[doc(hidden)]
    pub dns_name: std::option::Option<std::string::String>,
    /// <p>Specifies the ID of the Key Management Service (KMS) key to use for encrypting data on an Amazon File Cache. If a <code>KmsKeyId</code> isn't specified, the Amazon FSx-managed KMS key for your account is used. For more information, see <a href="https://docs.aws.amazon.com/kms/latest/APIReference/API_Encrypt.html">Encrypt</a> in the <i>Key Management Service API Reference</i>.</p>
    #[doc(hidden)]
    pub kms_key_id: std::option::Option<std::string::String>,
    /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
    #[doc(hidden)]
    pub resource_arn: std::option::Option<std::string::String>,
    /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
    #[doc(hidden)]
    pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
    /// <p>A boolean flag indicating whether tags for the cache should be copied to data repository associations.</p>
    #[doc(hidden)]
    pub copy_tags_to_data_repository_associations: std::option::Option<bool>,
    /// <p>The configuration for the Amazon File Cache resource.</p>
    #[doc(hidden)]
    pub lustre_configuration: std::option::Option<crate::model::FileCacheLustreConfiguration>,
    /// <p>A list of IDs of data repository associations that are associated with this cache.</p>
    #[doc(hidden)]
    pub data_repository_association_ids: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl FileCacheCreating {
    /// <p>An Amazon Web Services account ID. This ID is a 12-digit number that you use to construct Amazon Resource Names (ARNs) for resources.</p>
    pub fn owner_id(&self) -> std::option::Option<&str> {
        self.owner_id.as_deref()
    }
    /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
    pub fn creation_time(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
        self.creation_time.as_ref()
    }
    /// <p>The system-generated, unique ID of the cache.</p>
    pub fn file_cache_id(&self) -> std::option::Option<&str> {
        self.file_cache_id.as_deref()
    }
    /// <p>The type of cache, which must be <code>LUSTRE</code>.</p>
    pub fn file_cache_type(&self) -> std::option::Option<&crate::model::FileCacheType> {
        self.file_cache_type.as_ref()
    }
    /// <p>The Lustre version of the cache, which must be <code>2.12</code>.</p>
    pub fn file_cache_type_version(&self) -> std::option::Option<&str> {
        self.file_cache_type_version.as_deref()
    }
    /// <p>The lifecycle status of the cache. The following are the possible values and what they mean:</p>
    /// <ul>
    /// <li> <p> <code>AVAILABLE</code> - The cache is in a healthy state, and is reachable and available for use.</p> </li>
    /// <li> <p> <code>CREATING</code> - The new cache is being created.</p> </li>
    /// <li> <p> <code>DELETING</code> - An existing cache is being deleted.</p> </li>
    /// <li> <p> <code>UPDATING</code> - The cache is undergoing a customer-initiated update.</p> </li>
    /// <li> <p> <code>FAILED</code> - An existing cache has experienced an unrecoverable failure. When creating a new cache, the cache was unable to be created.</p> </li>
    /// </ul>
    pub fn lifecycle(&self) -> std::option::Option<&crate::model::FileCacheLifecycle> {
        self.lifecycle.as_ref()
    }
    /// <p>A structure providing details of any failures that occurred.</p>
    pub fn failure_details(&self) -> std::option::Option<&crate::model::FileCacheFailureDetails> {
        self.failure_details.as_ref()
    }
    /// <p>The storage capacity of the cache in gibibytes (GiB).</p>
    pub fn storage_capacity(&self) -> std::option::Option<i32> {
        self.storage_capacity
    }
    /// <p>The ID of your virtual private cloud (VPC). For more information, see <a href="https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html">VPC and subnets</a> in the <i>Amazon VPC User Guide</i>.</p>
    pub fn vpc_id(&self) -> std::option::Option<&str> {
        self.vpc_id.as_deref()
    }
    /// <p>A list of subnet IDs that the cache will be accessible from. You can specify only one subnet ID in a call to the <code>CreateFileCache</code> operation.</p>
    pub fn subnet_ids(&self) -> std::option::Option<&[std::string::String]> {
        self.subnet_ids.as_deref()
    }
    /// <p>A list of network interface IDs.</p>
    pub fn network_interface_ids(&self) -> std::option::Option<&[std::string::String]> {
        self.network_interface_ids.as_deref()
    }
    /// <p>The Domain Name System (DNS) name for the cache.</p>
    pub fn dns_name(&self) -> std::option::Option<&str> {
        self.dns_name.as_deref()
    }
    /// <p>Specifies the ID of the Key Management Service (KMS) key to use for encrypting data on an Amazon File Cache. If a <code>KmsKeyId</code> isn't specified, the Amazon FSx-managed KMS key for your account is used. For more information, see <a href="https://docs.aws.amazon.com/kms/latest/APIReference/API_Encrypt.html">Encrypt</a> in the <i>Key Management Service API Reference</i>.</p>
    pub fn kms_key_id(&self) -> std::option::Option<&str> {
        self.kms_key_id.as_deref()
    }
    /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
    pub fn resource_arn(&self) -> std::option::Option<&str> {
        self.resource_arn.as_deref()
    }
    /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
    pub fn tags(&self) -> std::option::Option<&[crate::model::Tag]> {
        self.tags.as_deref()
    }
    /// <p>A boolean flag indicating whether tags for the cache should be copied to data repository associations.</p>
    pub fn copy_tags_to_data_repository_associations(&self) -> std::option::Option<bool> {
        self.copy_tags_to_data_repository_associations
    }
    /// <p>The configuration for the Amazon File Cache resource.</p>
    pub fn lustre_configuration(
        &self,
    ) -> std::option::Option<&crate::model::FileCacheLustreConfiguration> {
        self.lustre_configuration.as_ref()
    }
    /// <p>A list of IDs of data repository associations that are associated with this cache.</p>
    pub fn data_repository_association_ids(&self) -> std::option::Option<&[std::string::String]> {
        self.data_repository_association_ids.as_deref()
    }
}
/// See [`FileCacheCreating`](crate::model::FileCacheCreating).
pub mod file_cache_creating {

    /// A builder for [`FileCacheCreating`](crate::model::FileCacheCreating).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) owner_id: std::option::Option<std::string::String>,
        pub(crate) creation_time: std::option::Option<aws_smithy_types::DateTime>,
        pub(crate) file_cache_id: std::option::Option<std::string::String>,
        pub(crate) file_cache_type: std::option::Option<crate::model::FileCacheType>,
        pub(crate) file_cache_type_version: std::option::Option<std::string::String>,
        pub(crate) lifecycle: std::option::Option<crate::model::FileCacheLifecycle>,
        pub(crate) failure_details: std::option::Option<crate::model::FileCacheFailureDetails>,
        pub(crate) storage_capacity: std::option::Option<i32>,
        pub(crate) vpc_id: std::option::Option<std::string::String>,
        pub(crate) subnet_ids: std::option::Option<std::vec::Vec<std::string::String>>,
        pub(crate) network_interface_ids: std::option::Option<std::vec::Vec<std::string::String>>,
        pub(crate) dns_name: std::option::Option<std::string::String>,
        pub(crate) kms_key_id: std::option::Option<std::string::String>,
        pub(crate) resource_arn: std::option::Option<std::string::String>,
        pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        pub(crate) copy_tags_to_data_repository_associations: std::option::Option<bool>,
        pub(crate) lustre_configuration:
            std::option::Option<crate::model::FileCacheLustreConfiguration>,
        pub(crate) data_repository_association_ids:
            std::option::Option<std::vec::Vec<std::string::String>>,
    }
    impl Builder {
        /// <p>An Amazon Web Services account ID. This ID is a 12-digit number that you use to construct Amazon Resource Names (ARNs) for resources.</p>
        pub fn owner_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.owner_id = Some(input.into());
            self
        }
        /// <p>An Amazon Web Services account ID. This ID is a 12-digit number that you use to construct Amazon Resource Names (ARNs) for resources.</p>
        pub fn set_owner_id(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.owner_id = input;
            self
        }
        /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
        pub fn creation_time(mut self, input: aws_smithy_types::DateTime) -> Self {
            self.creation_time = Some(input);
            self
        }
        /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
        pub fn set_creation_time(
            mut self,
            input: std::option::Option<aws_smithy_types::DateTime>,
        ) -> Self {
            self.creation_time = input;
            self
        }
        /// <p>The system-generated, unique ID of the cache.</p>
        pub fn file_cache_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.file_cache_id = Some(input.into());
            self
        }
        /// <p>The system-generated, unique ID of the cache.</p>
        pub fn set_file_cache_id(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.file_cache_id = input;
            self
        }
        /// <p>The type of cache, which must be <code>LUSTRE</code>.</p>
        pub fn file_cache_type(mut self, input: crate::model::FileCacheType) -> Self {
            self.file_cache_type = Some(input);
            self
        }
        /// <p>The type of cache, which must be <code>LUSTRE</code>.</p>
        pub fn set_file_cache_type(
            mut self,
            input: std::option::Option<crate::model::FileCacheType>,
        ) -> Self {
            self.file_cache_type = input;
            self
        }
        /// <p>The Lustre version of the cache, which must be <code>2.12</code>.</p>
        pub fn file_cache_type_version(mut self, input: impl Into<std::string::String>) -> Self {
            self.file_cache_type_version = Some(input.into());
            self
        }
        /// <p>The Lustre version of the cache, which must be <code>2.12</code>.</p>
        pub fn set_file_cache_type_version(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.file_cache_type_version = input;
            self
        }
        /// <p>The lifecycle status of the cache. The following are the possible values and what they mean:</p>
        /// <ul>
        /// <li> <p> <code>AVAILABLE</code> - The cache is in a healthy state, and is reachable and available for use.</p> </li>
        /// <li> <p> <code>CREATING</code> - The new cache is being created.</p> </li>
        /// <li> <p> <code>DELETING</code> - An existing cache is being deleted.</p> </li>
        /// <li> <p> <code>UPDATING</code> - The cache is undergoing a customer-initiated update.</p> </li>
        /// <li> <p> <code>FAILED</code> - An existing cache has experienced an unrecoverable failure. When creating a new cache, the cache was unable to be created.</p> </li>
        /// </ul>
        pub fn lifecycle(mut self, input: crate::model::FileCacheLifecycle) -> Self {
            self.lifecycle = Some(input);
            self
        }
        /// <p>The lifecycle status of the cache. The following are the possible values and what they mean:</p>
        /// <ul>
        /// <li> <p> <code>AVAILABLE</code> - The cache is in a healthy state, and is reachable and available for use.</p> </li>
        /// <li> <p> <code>CREATING</code> - The new cache is being created.</p> </li>
        /// <li> <p> <code>DELETING</code> - An existing cache is being deleted.</p> </li>
        /// <li> <p> <code>UPDATING</code> - The cache is undergoing a customer-initiated update.</p> </li>
        /// <li> <p> <code>FAILED</code> - An existing cache has experienced an unrecoverable failure. When creating a new cache, the cache was unable to be created.</p> </li>
        /// </ul>
        pub fn set_lifecycle(
            mut self,
            input: std::option::Option<crate::model::FileCacheLifecycle>,
        ) -> Self {
            self.lifecycle = input;
            self
        }
        /// <p>A structure providing details of any failures that occurred.</p>
        pub fn failure_details(mut self, input: crate::model::FileCacheFailureDetails) -> Self {
            self.failure_details = Some(input);
            self
        }
        /// <p>A structure providing details of any failures that occurred.</p>
        pub fn set_failure_details(
            mut self,
            input: std::option::Option<crate::model::FileCacheFailureDetails>,
        ) -> Self {
            self.failure_details = input;
            self
        }
        /// <p>The storage capacity of the cache in gibibytes (GiB).</p>
        pub fn storage_capacity(mut self, input: i32) -> Self {
            self.storage_capacity = Some(input);
            self
        }
        /// <p>The storage capacity of the cache in gibibytes (GiB).</p>
        pub fn set_storage_capacity(mut self, input: std::option::Option<i32>) -> Self {
            self.storage_capacity = input;
            self
        }
        /// <p>The ID of your virtual private cloud (VPC). For more information, see <a href="https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html">VPC and subnets</a> in the <i>Amazon VPC User Guide</i>.</p>
        pub fn vpc_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.vpc_id = Some(input.into());
            self
        }
        /// <p>The ID of your virtual private cloud (VPC). For more information, see <a href="https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html">VPC and subnets</a> in the <i>Amazon VPC User Guide</i>.</p>
        pub fn set_vpc_id(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.vpc_id = input;
            self
        }
        /// Appends an item to `subnet_ids`.
        ///
        /// To override the contents of this collection use [`set_subnet_ids`](Self::set_subnet_ids).
        ///
        /// <p>A list of subnet IDs that the cache will be accessible from. You can specify only one subnet ID in a call to the <code>CreateFileCache</code> operation.</p>
        pub fn subnet_ids(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.subnet_ids.unwrap_or_default();
            v.push(input.into());
            self.subnet_ids = Some(v);
            self
        }
        /// <p>A list of subnet IDs that the cache will be accessible from. You can specify only one subnet ID in a call to the <code>CreateFileCache</code> operation.</p>
        pub fn set_subnet_ids(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.subnet_ids = input;
            self
        }
        /// Appends an item to `network_interface_ids`.
        ///
        /// To override the contents of this collection use [`set_network_interface_ids`](Self::set_network_interface_ids).
        ///
        /// <p>A list of network interface IDs.</p>
        pub fn network_interface_ids(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.network_interface_ids.unwrap_or_default();
            v.push(input.into());
            self.network_interface_ids = Some(v);
            self
        }
        /// <p>A list of network interface IDs.</p>
        pub fn set_network_interface_ids(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.network_interface_ids = input;
            self
        }
        /// <p>The Domain Name System (DNS) name for the cache.</p>
        pub fn dns_name(mut self, input: impl Into<std::string::String>) -> Self {
            self.dns_name = Some(input.into());
            self
        }
        /// <p>The Domain Name System (DNS) name for the cache.</p>
        pub fn set_dns_name(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.dns_name = input;
            self
        }
        /// <p>Specifies the ID of the Key Management Service (KMS) key to use for encrypting data on an Amazon File Cache. If a <code>KmsKeyId</code> isn't specified, the Amazon FSx-managed KMS key for your account is used. For more information, see <a href="https://docs.aws.amazon.com/kms/latest/APIReference/API_Encrypt.html">Encrypt</a> in the <i>Key Management Service API Reference</i>.</p>
        pub fn kms_key_id(mut self, input: impl Into<std::string::String>) -> Self {
            self.kms_key_id = Some(input.into());
            self
        }
        /// <p>Specifies the ID of the Key Management Service (KMS) key to use for encrypting data on an Amazon File Cache. If a <code>KmsKeyId</code> isn't specified, the Amazon FSx-managed KMS key for your account is used. For more information, see <a href="https://docs.aws.amazon.com/kms/latest/APIReference/API_Encrypt.html">Encrypt</a> in the <i>Key Management Service API Reference</i>.</p>
        pub fn set_kms_key_id(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.kms_key_id = input;
            self
        }
        /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
        pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
            self.resource_arn = Some(input.into());
            self
        }
        /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
        pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
            self.resource_arn = input;
            self
        }
        /// Appends an item to `tags`.
        ///
        /// To override the contents of this collection use [`set_tags`](Self::set_tags).
        ///
        /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
        pub fn tags(mut self, input: crate::model::Tag) -> Self {
            let mut v = self.tags.unwrap_or_default();
            v.push(input);
            self.tags = Some(v);
            self
        }
        /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
        pub fn set_tags(
            mut self,
            input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
        ) -> Self {
            self.tags = input;
            self
        }
        /// <p>A boolean flag indicating whether tags for the cache should be copied to data repository associations.</p>
        pub fn copy_tags_to_data_repository_associations(mut self, input: bool) -> Self {
            self.copy_tags_to_data_repository_associations = Some(input);
            self
        }
        /// <p>A boolean flag indicating whether tags for the cache should be copied to data repository associations.</p>
        pub fn set_copy_tags_to_data_repository_associations(
            mut self,
            input: std::option::Option<bool>,
        ) -> Self {
            self.copy_tags_to_data_repository_associations = input;
            self
        }
        /// <p>The configuration for the Amazon File Cache resource.</p>
        pub fn lustre_configuration(
            mut self,
            input: crate::model::FileCacheLustreConfiguration,
        ) -> Self {
            self.lustre_configuration = Some(input);
            self
        }
        /// <p>The configuration for the Amazon File Cache resource.</p>
        pub fn set_lustre_configuration(
            mut self,
            input: std::option::Option<crate::model::FileCacheLustreConfiguration>,
        ) -> Self {
            self.lustre_configuration = input;
            self
        }
        /// Appends an item to `data_repository_association_ids`.
        ///
        /// To override the contents of this collection use [`set_data_repository_association_ids`](Self::set_data_repository_association_ids).
        ///
        /// <p>A list of IDs of data repository associations that are associated with this cache.</p>
        pub fn data_repository_association_ids(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            let mut v = self.data_repository_association_ids.unwrap_or_default();
            v.push(input.into());
            self.data_repository_association_ids = Some(v);
            self
        }
        /// <p>A list of IDs of data repository associations that are associated with this cache.</p>
        pub fn set_data_repository_association_ids(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.data_repository_association_ids = input;
            self
        }
        /// Consumes the builder and constructs a [`FileCacheCreating`](crate::model::FileCacheCreating).
        pub fn build(self) -> crate::model::FileCacheCreating {
            crate::model::FileCacheCreating {
                owner_id: self.owner_id,
                creation_time: self.creation_time,
                file_cache_id: self.file_cache_id,
                file_cache_type: self.file_cache_type,
                file_cache_type_version: self.file_cache_type_version,
                lifecycle: self.lifecycle,
                failure_details: self.failure_details,
                storage_capacity: self.storage_capacity,
                vpc_id: self.vpc_id,
                subnet_ids: self.subnet_ids,
                network_interface_ids: self.network_interface_ids,
                dns_name: self.dns_name,
                kms_key_id: self.kms_key_id,
                resource_arn: self.resource_arn,
                tags: self.tags,
                copy_tags_to_data_repository_associations: self
                    .copy_tags_to_data_repository_associations,
                lustre_configuration: self.lustre_configuration,
                data_repository_association_ids: self.data_repository_association_ids,
            }
        }
    }
}
impl FileCacheCreating {
    /// Creates a new builder-style object to manufacture [`FileCacheCreating`](crate::model::FileCacheCreating).
    pub fn builder() -> crate::model::file_cache_creating::Builder {
        crate::model::file_cache_creating::Builder::default()
    }
}

/// <p>The configuration for a data repository association (DRA) to be created during the Amazon File Cache resource creation. The DRA links the cache to either an Amazon S3 bucket or prefix, or a Network File System (NFS) data repository that supports the NFSv3 protocol.</p>
/// <p>The DRA does not support automatic import or automatic export.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct FileCacheDataRepositoryAssociation {
    /// <p>A path on the cache that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the name is required. Two data repository associations cannot have overlapping cache paths. For example, if a data repository is associated with cache path <code>/ns1/</code>, then you cannot link another data repository with cache path <code>/ns1/ns2</code>.</p>
    /// <p>This path specifies where in your cache files will be exported from. This cache directory can be linked to only one data repository, and no data repository other can be linked to the directory.</p> <note>
    /// <p>The cache path can only be set to root (/) on an NFS DRA when <code>DataRepositorySubdirectories</code> is specified. If you specify root (/) as the cache path, you can create only one DRA on the cache.</p>
    /// <p>The cache path cannot be set to root (/) for an S3 DRA.</p>
    /// </note>
    #[doc(hidden)]
    pub file_cache_path: std::option::Option<std::string::String>,
    /// <p>The path to the S3 or NFS data repository that links to the cache. You must provide one of the following paths:</p>
    /// <ul>
    /// <li> <p>The path can be an NFS data repository that links to the cache. The path can be in one of two formats:</p>
    /// <ul>
    /// <li> <p>If you are not using the <code>DataRepositorySubdirectories</code> parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format <code>nsf://nfs-domain-name/exportpath</code>. You can therefore link a single NFS Export to a single data repository association.</p> </li>
    /// <li> <p>If you are using the <code>DataRepositorySubdirectories</code> parameter, the path is the domain name of the NFS file system in the format <code>nfs://filer-domain-name</code>, which indicates the root of the subdirectories specified with the <code>DataRepositorySubdirectories</code> parameter.</p> </li>
    /// </ul> </li>
    /// <li> <p>The path can be an S3 bucket or prefix in the format <code>s3://myBucket/myPrefix/</code>.</p> </li>
    /// </ul>
    #[doc(hidden)]
    pub data_repository_path: std::option::Option<std::string::String>,
    /// <p>A list of NFS Exports that will be linked with this data repository association. The Export paths are in the format <code>/exportpath1</code>. To use this parameter, you must configure <code>DataRepositoryPath</code> as the domain name of the NFS file system. The NFS file system domain name in effect is the root of the subdirectories. Note that <code>DataRepositorySubdirectories</code> is not supported for S3 data repositories.</p>
    #[doc(hidden)]
    pub data_repository_subdirectories: std::option::Option<std::vec::Vec<std::string::String>>,
    /// <p>The configuration for a data repository association that links an Amazon File Cache resource to an NFS data repository.</p>
    #[doc(hidden)]
    pub nfs: std::option::Option<crate::model::FileCacheNfsConfiguration>,
}
impl FileCacheDataRepositoryAssociation {
    /// <p>A path on the cache that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the name is required. Two data repository associations cannot have overlapping cache paths. For example, if a data repository is associated with cache path <code>/ns1/</code>, then you cannot link another data repository with cache path <code>/ns1/ns2</code>.</p>
    /// <p>This path specifies where in your cache files will be exported from. This cache directory can be linked to only one data repository, and no data repository other can be linked to the directory.</p> <note>
    /// <p>The cache path can only be set to root (/) on an NFS DRA when <code>DataRepositorySubdirectories</code> is specified. If you specify root (/) as the cache path, you can create only one DRA on the cache.</p>
    /// <p>The cache path cannot be set to root (/) for an S3 DRA.</p>
    /// </note>
    pub fn file_cache_path(&self) -> std::option::Option<&str> {
        self.file_cache_path.as_deref()
    }
    /// <p>The path to the S3 or NFS data repository that links to the cache. You must provide one of the following paths:</p>
    /// <ul>
    /// <li> <p>The path can be an NFS data repository that links to the cache. The path can be in one of two formats:</p>
    /// <ul>
    /// <li> <p>If you are not using the <code>DataRepositorySubdirectories</code> parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format <code>nsf://nfs-domain-name/exportpath</code>. You can therefore link a single NFS Export to a single data repository association.</p> </li>
    /// <li> <p>If you are using the <code>DataRepositorySubdirectories</code> parameter, the path is the domain name of the NFS file system in the format <code>nfs://filer-domain-name</code>, which indicates the root of the subdirectories specified with the <code>DataRepositorySubdirectories</code> parameter.</p> </li>
    /// </ul> </li>
    /// <li> <p>The path can be an S3 bucket or prefix in the format <code>s3://myBucket/myPrefix/</code>.</p> </li>
    /// </ul>
    pub fn data_repository_path(&self) -> std::option::Option<&str> {
        self.data_repository_path.as_deref()
    }
    /// <p>A list of NFS Exports that will be linked with this data repository association. The Export paths are in the format <code>/exportpath1</code>. To use this parameter, you must configure <code>DataRepositoryPath</code> as the domain name of the NFS file system. The NFS file system domain name in effect is the root of the subdirectories. Note that <code>DataRepositorySubdirectories</code> is not supported for S3 data repositories.</p>
    pub fn data_repository_subdirectories(&self) -> std::option::Option<&[std::string::String]> {
        self.data_repository_subdirectories.as_deref()
    }
    /// <p>The configuration for a data repository association that links an Amazon File Cache resource to an NFS data repository.</p>
    pub fn nfs(&self) -> std::option::Option<&crate::model::FileCacheNfsConfiguration> {
        self.nfs.as_ref()
    }
}
/// See [`FileCacheDataRepositoryAssociation`](crate::model::FileCacheDataRepositoryAssociation).
pub mod file_cache_data_repository_association {

    /// A builder for [`FileCacheDataRepositoryAssociation`](crate::model::FileCacheDataRepositoryAssociation).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) file_cache_path: std::option::Option<std::string::String>,
        pub(crate) data_repository_path: std::option::Option<std::string::String>,
        pub(crate) data_repository_subdirectories:
            std::option::Option<std::vec::Vec<std::string::String>>,
        pub(crate) nfs: std::option::Option<crate::model::FileCacheNfsConfiguration>,
    }
    impl Builder {
        /// <p>A path on the cache that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the name is required. Two data repository associations cannot have overlapping cache paths. For example, if a data repository is associated with cache path <code>/ns1/</code>, then you cannot link another data repository with cache path <code>/ns1/ns2</code>.</p>
        /// <p>This path specifies where in your cache files will be exported from. This cache directory can be linked to only one data repository, and no data repository other can be linked to the directory.</p> <note>
        /// <p>The cache path can only be set to root (/) on an NFS DRA when <code>DataRepositorySubdirectories</code> is specified. If you specify root (/) as the cache path, you can create only one DRA on the cache.</p>
        /// <p>The cache path cannot be set to root (/) for an S3 DRA.</p>
        /// </note>
        pub fn file_cache_path(mut self, input: impl Into<std::string::String>) -> Self {
            self.file_cache_path = Some(input.into());
            self
        }
        /// <p>A path on the cache that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the name is required. Two data repository associations cannot have overlapping cache paths. For example, if a data repository is associated with cache path <code>/ns1/</code>, then you cannot link another data repository with cache path <code>/ns1/ns2</code>.</p>
        /// <p>This path specifies where in your cache files will be exported from. This cache directory can be linked to only one data repository, and no data repository other can be linked to the directory.</p> <note>
        /// <p>The cache path can only be set to root (/) on an NFS DRA when <code>DataRepositorySubdirectories</code> is specified. If you specify root (/) as the cache path, you can create only one DRA on the cache.</p>
        /// <p>The cache path cannot be set to root (/) for an S3 DRA.</p>
        /// </note>
        pub fn set_file_cache_path(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.file_cache_path = input;
            self
        }
        /// <p>The path to the S3 or NFS data repository that links to the cache. You must provide one of the following paths:</p>
        /// <ul>
        /// <li> <p>The path can be an NFS data repository that links to the cache. The path can be in one of two formats:</p>
        /// <ul>
        /// <li> <p>If you are not using the <code>DataRepositorySubdirectories</code> parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format <code>nsf://nfs-domain-name/exportpath</code>. You can therefore link a single NFS Export to a single data repository association.</p> </li>
        /// <li> <p>If you are using the <code>DataRepositorySubdirectories</code> parameter, the path is the domain name of the NFS file system in the format <code>nfs://filer-domain-name</code>, which indicates the root of the subdirectories specified with the <code>DataRepositorySubdirectories</code> parameter.</p> </li>
        /// </ul> </li>
        /// <li> <p>The path can be an S3 bucket or prefix in the format <code>s3://myBucket/myPrefix/</code>.</p> </li>
        /// </ul>
        pub fn data_repository_path(mut self, input: impl Into<std::string::String>) -> Self {
            self.data_repository_path = Some(input.into());
            self
        }
        /// <p>The path to the S3 or NFS data repository that links to the cache. You must provide one of the following paths:</p>
        /// <ul>
        /// <li> <p>The path can be an NFS data repository that links to the cache. The path can be in one of two formats:</p>
        /// <ul>
        /// <li> <p>If you are not using the <code>DataRepositorySubdirectories</code> parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format <code>nsf://nfs-domain-name/exportpath</code>. You can therefore link a single NFS Export to a single data repository association.</p> </li>
        /// <li> <p>If you are using the <code>DataRepositorySubdirectories</code> parameter, the path is the domain name of the NFS file system in the format <code>nfs://filer-domain-name</code>, which indicates the root of the subdirectories specified with the <code>DataRepositorySubdirectories</code> parameter.</p> </li>
        /// </ul> </li>
        /// <li> <p>The path can be an S3 bucket or prefix in the format <code>s3://myBucket/myPrefix/</code>.</p> </li>
        /// </ul>
        pub fn set_data_repository_path(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.data_repository_path = input;
            self
        }
        /// Appends an item to `data_repository_subdirectories`.
        ///
        /// To override the contents of this collection use [`set_data_repository_subdirectories`](Self::set_data_repository_subdirectories).
        ///
        /// <p>A list of NFS Exports that will be linked with this data repository association. The Export paths are in the format <code>/exportpath1</code>. To use this parameter, you must configure <code>DataRepositoryPath</code> as the domain name of the NFS file system. The NFS file system domain name in effect is the root of the subdirectories. Note that <code>DataRepositorySubdirectories</code> is not supported for S3 data repositories.</p>
        pub fn data_repository_subdirectories(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            let mut v = self.data_repository_subdirectories.unwrap_or_default();
            v.push(input.into());
            self.data_repository_subdirectories = Some(v);
            self
        }
        /// <p>A list of NFS Exports that will be linked with this data repository association. The Export paths are in the format <code>/exportpath1</code>. To use this parameter, you must configure <code>DataRepositoryPath</code> as the domain name of the NFS file system. The NFS file system domain name in effect is the root of the subdirectories. Note that <code>DataRepositorySubdirectories</code> is not supported for S3 data repositories.</p>
        pub fn set_data_repository_subdirectories(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.data_repository_subdirectories = input;
            self
        }
        /// <p>The configuration for a data repository association that links an Amazon File Cache resource to an NFS data repository.</p>
        pub fn nfs(mut self, input: crate::model::FileCacheNfsConfiguration) -> Self {
            self.nfs = Some(input);
            self
        }
        /// <p>The configuration for a data repository association that links an Amazon File Cache resource to an NFS data repository.</p>
        pub fn set_nfs(
            mut self,
            input: std::option::Option<crate::model::FileCacheNfsConfiguration>,
        ) -> Self {
            self.nfs = input;
            self
        }
        /// Consumes the builder and constructs a [`FileCacheDataRepositoryAssociation`](crate::model::FileCacheDataRepositoryAssociation).
        pub fn build(self) -> crate::model::FileCacheDataRepositoryAssociation {
            crate::model::FileCacheDataRepositoryAssociation {
                file_cache_path: self.file_cache_path,
                data_repository_path: self.data_repository_path,
                data_repository_subdirectories: self.data_repository_subdirectories,
                nfs: self.nfs,
            }
        }
    }
}
impl FileCacheDataRepositoryAssociation {
    /// Creates a new builder-style object to manufacture [`FileCacheDataRepositoryAssociation`](crate::model::FileCacheDataRepositoryAssociation).
    pub fn builder() -> crate::model::file_cache_data_repository_association::Builder {
        crate::model::file_cache_data_repository_association::Builder::default()
    }
}

/// <p>The configuration for an NFS data repository association (DRA) created during the creation of the Amazon File Cache resource.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct FileCacheNfsConfiguration {
    /// <p>The version of the NFS (Network File System) protocol of the NFS data repository. The only supported value is <code>NFS3</code>, which indicates that the data repository must support the NFSv3 protocol.</p>
    #[doc(hidden)]
    pub version: std::option::Option<crate::model::NfsVersion>,
    /// <p>A list of up to 2 IP addresses of DNS servers used to resolve the NFS file system domain name. The provided IP addresses can either be the IP addresses of a DNS forwarder or resolver that the customer manages and runs inside the customer VPC, or the IP addresses of the on-premises DNS servers.</p>
    #[doc(hidden)]
    pub dns_ips: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl FileCacheNfsConfiguration {
    /// <p>The version of the NFS (Network File System) protocol of the NFS data repository. The only supported value is <code>NFS3</code>, which indicates that the data repository must support the NFSv3 protocol.</p>
    pub fn version(&self) -> std::option::Option<&crate::model::NfsVersion> {
        self.version.as_ref()
    }
    /// <p>A list of up to 2 IP addresses of DNS servers used to resolve the NFS file system domain name. The provided IP addresses can either be the IP addresses of a DNS forwarder or resolver that the customer manages and runs inside the customer VPC, or the IP addresses of the on-premises DNS servers.</p>
    pub fn dns_ips(&self) -> std::option::Option<&[std::string::String]> {
        self.dns_ips.as_deref()
    }
}
/// See [`FileCacheNfsConfiguration`](crate::model::FileCacheNfsConfiguration).
pub mod file_cache_nfs_configuration {

    /// A builder for [`FileCacheNfsConfiguration`](crate::model::FileCacheNfsConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) version: std::option::Option<crate::model::NfsVersion>,
        pub(crate) dns_ips: std::option::Option<std::vec::Vec<std::string::String>>,
    }
    impl Builder {
        /// <p>The version of the NFS (Network File System) protocol of the NFS data repository. The only supported value is <code>NFS3</code>, which indicates that the data repository must support the NFSv3 protocol.</p>
        pub fn version(mut self, input: crate::model::NfsVersion) -> Self {
            self.version = Some(input);
            self
        }
        /// <p>The version of the NFS (Network File System) protocol of the NFS data repository. The only supported value is <code>NFS3</code>, which indicates that the data repository must support the NFSv3 protocol.</p>
        pub fn set_version(mut self, input: std::option::Option<crate::model::NfsVersion>) -> Self {
            self.version = input;
            self
        }
        /// Appends an item to `dns_ips`.
        ///
        /// To override the contents of this collection use [`set_dns_ips`](Self::set_dns_ips).
        ///
        /// <p>A list of up to 2 IP addresses of DNS servers used to resolve the NFS file system domain name. The provided IP addresses can either be the IP addresses of a DNS forwarder or resolver that the customer manages and runs inside the customer VPC, or the IP addresses of the on-premises DNS servers.</p>
        pub fn dns_ips(mut self, input: impl Into<std::string::String>) -> Self {
            let mut v = self.dns_ips.unwrap_or_default();
            v.push(input.into());
            self.dns_ips = Some(v);
            self
        }
        /// <p>A list of up to 2 IP addresses of DNS servers used to resolve the NFS file system domain name. The provided IP addresses can either be the IP addresses of a DNS forwarder or resolver that the customer manages and runs inside the customer VPC, or the IP addresses of the on-premises DNS servers.</p>
        pub fn set_dns_ips(
            mut self,
            input: std::option::Option<std::vec::Vec<std::string::String>>,
        ) -> Self {
            self.dns_ips = input;
            self
        }
        /// Consumes the builder and constructs a [`FileCacheNfsConfiguration`](crate::model::FileCacheNfsConfiguration).
        pub fn build(self) -> crate::model::FileCacheNfsConfiguration {
            crate::model::FileCacheNfsConfiguration {
                version: self.version,
                dns_ips: self.dns_ips,
            }
        }
    }
}
impl FileCacheNfsConfiguration {
    /// Creates a new builder-style object to manufacture [`FileCacheNfsConfiguration`](crate::model::FileCacheNfsConfiguration).
    pub fn builder() -> crate::model::file_cache_nfs_configuration::Builder {
        crate::model::file_cache_nfs_configuration::Builder::default()
    }
}

/// <p>The Amazon File Cache configuration for the cache that you are creating.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct CreateFileCacheLustreConfiguration {
    /// <p>Provisions the amount of read and write throughput for each 1 tebibyte (TiB) of cache storage capacity, in MB/s/TiB. The only supported value is <code>1000</code>.</p>
    #[doc(hidden)]
    pub per_unit_storage_throughput: std::option::Option<i32>,
    /// <p>Specifies the cache deployment type, which must be <code>CACHE_1</code>.</p>
    #[doc(hidden)]
    pub deployment_type: std::option::Option<crate::model::FileCacheLustreDeploymentType>,
    /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
    /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
    /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
    /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
    #[doc(hidden)]
    pub weekly_maintenance_start_time: std::option::Option<std::string::String>,
    /// <p>The configuration for a Lustre MDT (Metadata Target) storage volume.</p>
    #[doc(hidden)]
    pub metadata_configuration:
        std::option::Option<crate::model::FileCacheLustreMetadataConfiguration>,
}
impl CreateFileCacheLustreConfiguration {
    /// <p>Provisions the amount of read and write throughput for each 1 tebibyte (TiB) of cache storage capacity, in MB/s/TiB. The only supported value is <code>1000</code>.</p>
    pub fn per_unit_storage_throughput(&self) -> std::option::Option<i32> {
        self.per_unit_storage_throughput
    }
    /// <p>Specifies the cache deployment type, which must be <code>CACHE_1</code>.</p>
    pub fn deployment_type(
        &self,
    ) -> std::option::Option<&crate::model::FileCacheLustreDeploymentType> {
        self.deployment_type.as_ref()
    }
    /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
    /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
    /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
    /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
    pub fn weekly_maintenance_start_time(&self) -> std::option::Option<&str> {
        self.weekly_maintenance_start_time.as_deref()
    }
    /// <p>The configuration for a Lustre MDT (Metadata Target) storage volume.</p>
    pub fn metadata_configuration(
        &self,
    ) -> std::option::Option<&crate::model::FileCacheLustreMetadataConfiguration> {
        self.metadata_configuration.as_ref()
    }
}
/// See [`CreateFileCacheLustreConfiguration`](crate::model::CreateFileCacheLustreConfiguration).
pub mod create_file_cache_lustre_configuration {

    /// A builder for [`CreateFileCacheLustreConfiguration`](crate::model::CreateFileCacheLustreConfiguration).
    #[derive(std::clone::Clone, std::cmp::PartialEq, std::default::Default, std::fmt::Debug)]
    pub struct Builder {
        pub(crate) per_unit_storage_throughput: std::option::Option<i32>,
        pub(crate) deployment_type:
            std::option::Option<crate::model::FileCacheLustreDeploymentType>,
        pub(crate) weekly_maintenance_start_time: std::option::Option<std::string::String>,
        pub(crate) metadata_configuration:
            std::option::Option<crate::model::FileCacheLustreMetadataConfiguration>,
    }
    impl Builder {
        /// <p>Provisions the amount of read and write throughput for each 1 tebibyte (TiB) of cache storage capacity, in MB/s/TiB. The only supported value is <code>1000</code>.</p>
        pub fn per_unit_storage_throughput(mut self, input: i32) -> Self {
            self.per_unit_storage_throughput = Some(input);
            self
        }
        /// <p>Provisions the amount of read and write throughput for each 1 tebibyte (TiB) of cache storage capacity, in MB/s/TiB. The only supported value is <code>1000</code>.</p>
        pub fn set_per_unit_storage_throughput(mut self, input: std::option::Option<i32>) -> Self {
            self.per_unit_storage_throughput = input;
            self
        }
        /// <p>Specifies the cache deployment type, which must be <code>CACHE_1</code>.</p>
        pub fn deployment_type(
            mut self,
            input: crate::model::FileCacheLustreDeploymentType,
        ) -> Self {
            self.deployment_type = Some(input);
            self
        }
        /// <p>Specifies the cache deployment type, which must be <code>CACHE_1</code>.</p>
        pub fn set_deployment_type(
            mut self,
            input: std::option::Option<crate::model::FileCacheLustreDeploymentType>,
        ) -> Self {
            self.deployment_type = input;
            self
        }
        /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
        /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
        /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
        /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
        pub fn weekly_maintenance_start_time(
            mut self,
            input: impl Into<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = Some(input.into());
            self
        }
        /// <p>A recurring weekly time, in the format <code>D:HH:MM</code>. </p>
        /// <p> <code>D</code> is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see <a href="https://en.wikipedia.org/wiki/ISO_week_date">the ISO-8601 spec as described on Wikipedia</a>.</p>
        /// <p> <code>HH</code> is the zero-padded hour of the day (0-23), and <code>MM</code> is the zero-padded minute of the hour. </p>
        /// <p>For example, <code>1:05:00</code> specifies maintenance at 5 AM Monday.</p>
        pub fn set_weekly_maintenance_start_time(
            mut self,
            input: std::option::Option<std::string::String>,
        ) -> Self {
            self.weekly_maintenance_start_time = input;
            self
        }
        /// <p>The configuration for a Lustre MDT (Metadata Target) storage volume.</p>
        pub fn metadata_configuration(
            mut self,
            input: crate::model::FileCacheLustreMetadataConfiguration,
        ) -> Self {
            self.metadata_configuration = Some(input);
            self
        }
        /// <p>The configuration for a Lustre MDT (Metadata Target) storage volume.</p>
        pub fn set_metadata_configuration(
            mut self,
            input: std::option::Option<crate::model::FileCacheLustreMetadataConfiguration>,
        ) -> Self {
            self.metadata_configuration = input;
            self
        }
        /// Consumes the builder and constructs a [`CreateFileCacheLustreConfiguration`](crate::model::CreateFileCacheLustreConfiguration).
        pub fn build(self) -> crate::model::CreateFileCacheLustreConfiguration {
            crate::model::CreateFileCacheLustreConfiguration {
                per_unit_storage_throughput: self.per_unit_storage_throughput,
                deployment_type: self.deployment_type,
                weekly_maintenance_start_time: self.weekly_maintenance_start_time,
                metadata_configuration: self.metadata_configuration,
            }
        }
    }
}
impl CreateFileCacheLustreConfiguration {
    /// Creates a new builder-style object to manufacture [`CreateFileCacheLustreConfiguration`](crate::model::CreateFileCacheLustreConfiguration).
    pub fn builder() -> crate::model::create_file_cache_lustre_configuration::Builder {
        crate::model::create_file_cache_lustre_configuration::Builder::default()
    }
}