Skip to main content

aws_sdk_fsx/types/
_data_repository_association.rs

1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2
3/// <p>The configuration of a data repository association that links an Amazon FSx for Lustre file system to an Amazon S3 bucket or an Amazon File Cache resource to an Amazon S3 bucket or an NFS file system. The data repository association configuration object is returned in the response of the following operations:</p>
4/// <ul>
5/// <li>
6/// <p><code>CreateDataRepositoryAssociation</code></p></li>
7/// <li>
8/// <p><code>UpdateDataRepositoryAssociation</code></p></li>
9/// <li>
10/// <p><code>DescribeDataRepositoryAssociations</code></p></li>
11/// </ul>
12/// <p>Data repository associations are supported on Amazon File Cache resources and all FSx for Lustre 2.12 and 2.15 file systems, excluding Intelligent-Tiering and <code>scratch_1</code> file systems.</p>
13#[non_exhaustive]
14#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)]
15pub struct DataRepositoryAssociation {
16    /// <p>The system-generated, unique ID of the data repository association.</p>
17    pub association_id: ::std::option::Option<::std::string::String>,
18    /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
19    pub resource_arn: ::std::option::Option<::std::string::String>,
20    /// <p>The globally unique ID of the file system, assigned by Amazon FSx.</p>
21    pub file_system_id: ::std::option::Option<::std::string::String>,
22    /// <p>Describes the state of a data repository association. The lifecycle can have the following values:</p>
23    /// <ul>
24    /// <li>
25    /// <p><code>CREATING</code> - The data repository association between the file system or cache and the data repository is being created. The data repository is unavailable.</p></li>
26    /// <li>
27    /// <p><code>AVAILABLE</code> - The data repository association is available for use.</p></li>
28    /// <li>
29    /// <p><code>MISCONFIGURED</code> - The data repository association is misconfigured. Until the configuration is corrected, automatic import and automatic export will not work (only for Amazon FSx for Lustre).</p></li>
30    /// <li>
31    /// <p><code>UPDATING</code> - The data repository association is undergoing a customer initiated update that might affect its availability.</p></li>
32    /// <li>
33    /// <p><code>DELETING</code> - The data repository association is undergoing a customer initiated deletion.</p></li>
34    /// <li>
35    /// <p><code>FAILED</code> - The data repository association is in a terminal state that cannot be recovered.</p></li>
36    /// </ul>
37    pub lifecycle: ::std::option::Option<crate::types::DataRepositoryLifecycle>,
38    /// <p>Provides detailed information about the data repository if its <code>Lifecycle</code> is set to <code>MISCONFIGURED</code> or <code>FAILED</code>.</p>
39    pub failure_details: ::std::option::Option<crate::types::DataRepositoryFailureDetails>,
40    /// <p>A path on the Amazon FSx for Lustre file system that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path <code>/ns1/</code>, then you cannot link another data repository with file system path <code>/ns1/ns2</code>.</p>
41    /// <p>This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.</p><note>
42    /// <p>If you specify only a forward slash (<code>/</code>) as the file system path, you can link only one data repository to the file system. You can only specify "/" as the file system path for the first data repository associated with a file system.</p>
43    /// </note>
44    pub file_system_path: ::std::option::Option<::std::string::String>,
45    /// <p>The path to the data repository that will be linked to the cache or file system.</p>
46    /// <ul>
47    /// <li>
48    /// <p>For Amazon File Cache, the path can be an NFS data repository that will be linked to the cache. The path can be in one of two formats:</p>
49    /// <ul>
50    /// <li>
51    /// <p>If you are not using the <code>DataRepositorySubdirectories</code> parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format <code>nsf://nfs-domain-name/exportpath</code>. You can therefore link a single NFS Export to a single data repository association.</p></li>
52    /// <li>
53    /// <p>If you are using the <code>DataRepositorySubdirectories</code> parameter, the path is the domain name of the NFS file system in the format <code>nfs://filer-domain-name</code>, which indicates the root of the subdirectories specified with the <code>DataRepositorySubdirectories</code> parameter.</p></li>
54    /// </ul></li>
55    /// <li>
56    /// <p>For Amazon File Cache, the path can be an S3 bucket or prefix in the format <code>s3://bucket-name/prefix/</code> (where <code>prefix</code> is optional).</p></li>
57    /// <li>
58    /// <p>For Amazon FSx for Lustre, the path can be an S3 bucket or prefix in the format <code>s3://bucket-name/prefix/</code> (where <code>prefix</code> is optional).</p></li>
59    /// </ul>
60    pub data_repository_path: ::std::option::Option<::std::string::String>,
61    /// <p>A boolean flag indicating whether an import data repository task to import metadata should run after the data repository association is created. The task runs if this flag is set to <code>true</code>.</p><note>
62    /// <p><code>BatchImportMetaDataOnCreate</code> is not supported for data repositories linked to an Amazon File Cache resource.</p>
63    /// </note>
64    pub batch_import_meta_data_on_create: ::std::option::Option<bool>,
65    /// <p>For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system or cache.</p>
66    /// <p>The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.</p>
67    pub imported_file_chunk_size: ::std::option::Option<i32>,
68    /// <p>The configuration for an Amazon S3 data repository linked to an Amazon FSx for Lustre file system with a data repository association.</p>
69    pub s3: ::std::option::Option<crate::types::S3DataRepositoryConfiguration>,
70    /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
71    pub tags: ::std::option::Option<::std::vec::Vec<crate::types::Tag>>,
72    /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
73    pub creation_time: ::std::option::Option<::aws_smithy_types::DateTime>,
74    /// <p>The globally unique ID of the Amazon File Cache resource.</p>
75    pub file_cache_id: ::std::option::Option<::std::string::String>,
76    /// <p>A path on the Amazon File Cache that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the path is required. Two data repository associations cannot have overlapping cache paths. For example, if a data repository is associated with cache path <code>/ns1/</code>, then you cannot link another data repository with cache path <code>/ns1/ns2</code>.</p>
77    /// <p>This path specifies the directory in your cache where files will be exported from. This cache directory can be linked to only one data repository (S3 or NFS) and no other data repository can be linked to the directory.</p><note>
78    /// <p>The cache path can only be set to root (/) on an NFS DRA when <code>DataRepositorySubdirectories</code> is specified. If you specify root (/) as the cache path, you can create only one DRA on the cache.</p>
79    /// <p>The cache path cannot be set to root (/) for an S3 DRA.</p>
80    /// </note>
81    pub file_cache_path: ::std::option::Option<::std::string::String>,
82    /// <p>For Amazon File Cache, a list of NFS Exports that will be linked with an NFS data repository association. All the subdirectories must be on a single NFS file system. The Export paths are in the format <code>/exportpath1</code>. To use this parameter, you must configure <code>DataRepositoryPath</code> as the domain name of the NFS file system. The NFS file system domain name in effect is the root of the subdirectories. Note that <code>DataRepositorySubdirectories</code> is not supported for S3 data repositories.</p>
83    pub data_repository_subdirectories: ::std::option::Option<::std::vec::Vec<::std::string::String>>,
84    /// <p>The configuration for an NFS data repository linked to an Amazon File Cache resource with a data repository association.</p>
85    pub nfs: ::std::option::Option<crate::types::NfsDataRepositoryConfiguration>,
86}
87impl DataRepositoryAssociation {
88    /// <p>The system-generated, unique ID of the data repository association.</p>
89    pub fn association_id(&self) -> ::std::option::Option<&str> {
90        self.association_id.as_deref()
91    }
92    /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
93    pub fn resource_arn(&self) -> ::std::option::Option<&str> {
94        self.resource_arn.as_deref()
95    }
96    /// <p>The globally unique ID of the file system, assigned by Amazon FSx.</p>
97    pub fn file_system_id(&self) -> ::std::option::Option<&str> {
98        self.file_system_id.as_deref()
99    }
100    /// <p>Describes the state of a data repository association. The lifecycle can have the following values:</p>
101    /// <ul>
102    /// <li>
103    /// <p><code>CREATING</code> - The data repository association between the file system or cache and the data repository is being created. The data repository is unavailable.</p></li>
104    /// <li>
105    /// <p><code>AVAILABLE</code> - The data repository association is available for use.</p></li>
106    /// <li>
107    /// <p><code>MISCONFIGURED</code> - The data repository association is misconfigured. Until the configuration is corrected, automatic import and automatic export will not work (only for Amazon FSx for Lustre).</p></li>
108    /// <li>
109    /// <p><code>UPDATING</code> - The data repository association is undergoing a customer initiated update that might affect its availability.</p></li>
110    /// <li>
111    /// <p><code>DELETING</code> - The data repository association is undergoing a customer initiated deletion.</p></li>
112    /// <li>
113    /// <p><code>FAILED</code> - The data repository association is in a terminal state that cannot be recovered.</p></li>
114    /// </ul>
115    pub fn lifecycle(&self) -> ::std::option::Option<&crate::types::DataRepositoryLifecycle> {
116        self.lifecycle.as_ref()
117    }
118    /// <p>Provides detailed information about the data repository if its <code>Lifecycle</code> is set to <code>MISCONFIGURED</code> or <code>FAILED</code>.</p>
119    pub fn failure_details(&self) -> ::std::option::Option<&crate::types::DataRepositoryFailureDetails> {
120        self.failure_details.as_ref()
121    }
122    /// <p>A path on the Amazon FSx for Lustre file system that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path <code>/ns1/</code>, then you cannot link another data repository with file system path <code>/ns1/ns2</code>.</p>
123    /// <p>This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.</p><note>
124    /// <p>If you specify only a forward slash (<code>/</code>) as the file system path, you can link only one data repository to the file system. You can only specify "/" as the file system path for the first data repository associated with a file system.</p>
125    /// </note>
126    pub fn file_system_path(&self) -> ::std::option::Option<&str> {
127        self.file_system_path.as_deref()
128    }
129    /// <p>The path to the data repository that will be linked to the cache or file system.</p>
130    /// <ul>
131    /// <li>
132    /// <p>For Amazon File Cache, the path can be an NFS data repository that will be linked to the cache. The path can be in one of two formats:</p>
133    /// <ul>
134    /// <li>
135    /// <p>If you are not using the <code>DataRepositorySubdirectories</code> parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format <code>nsf://nfs-domain-name/exportpath</code>. You can therefore link a single NFS Export to a single data repository association.</p></li>
136    /// <li>
137    /// <p>If you are using the <code>DataRepositorySubdirectories</code> parameter, the path is the domain name of the NFS file system in the format <code>nfs://filer-domain-name</code>, which indicates the root of the subdirectories specified with the <code>DataRepositorySubdirectories</code> parameter.</p></li>
138    /// </ul></li>
139    /// <li>
140    /// <p>For Amazon File Cache, the path can be an S3 bucket or prefix in the format <code>s3://bucket-name/prefix/</code> (where <code>prefix</code> is optional).</p></li>
141    /// <li>
142    /// <p>For Amazon FSx for Lustre, the path can be an S3 bucket or prefix in the format <code>s3://bucket-name/prefix/</code> (where <code>prefix</code> is optional).</p></li>
143    /// </ul>
144    pub fn data_repository_path(&self) -> ::std::option::Option<&str> {
145        self.data_repository_path.as_deref()
146    }
147    /// <p>A boolean flag indicating whether an import data repository task to import metadata should run after the data repository association is created. The task runs if this flag is set to <code>true</code>.</p><note>
148    /// <p><code>BatchImportMetaDataOnCreate</code> is not supported for data repositories linked to an Amazon File Cache resource.</p>
149    /// </note>
150    pub fn batch_import_meta_data_on_create(&self) -> ::std::option::Option<bool> {
151        self.batch_import_meta_data_on_create
152    }
153    /// <p>For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system or cache.</p>
154    /// <p>The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.</p>
155    pub fn imported_file_chunk_size(&self) -> ::std::option::Option<i32> {
156        self.imported_file_chunk_size
157    }
158    /// <p>The configuration for an Amazon S3 data repository linked to an Amazon FSx for Lustre file system with a data repository association.</p>
159    pub fn s3(&self) -> ::std::option::Option<&crate::types::S3DataRepositoryConfiguration> {
160        self.s3.as_ref()
161    }
162    /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
163    ///
164    /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.tags.is_none()`.
165    pub fn tags(&self) -> &[crate::types::Tag] {
166        self.tags.as_deref().unwrap_or_default()
167    }
168    /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
169    pub fn creation_time(&self) -> ::std::option::Option<&::aws_smithy_types::DateTime> {
170        self.creation_time.as_ref()
171    }
172    /// <p>The globally unique ID of the Amazon File Cache resource.</p>
173    pub fn file_cache_id(&self) -> ::std::option::Option<&str> {
174        self.file_cache_id.as_deref()
175    }
176    /// <p>A path on the Amazon File Cache that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the path is required. Two data repository associations cannot have overlapping cache paths. For example, if a data repository is associated with cache path <code>/ns1/</code>, then you cannot link another data repository with cache path <code>/ns1/ns2</code>.</p>
177    /// <p>This path specifies the directory in your cache where files will be exported from. This cache directory can be linked to only one data repository (S3 or NFS) and no other data repository can be linked to the directory.</p><note>
178    /// <p>The cache path can only be set to root (/) on an NFS DRA when <code>DataRepositorySubdirectories</code> is specified. If you specify root (/) as the cache path, you can create only one DRA on the cache.</p>
179    /// <p>The cache path cannot be set to root (/) for an S3 DRA.</p>
180    /// </note>
181    pub fn file_cache_path(&self) -> ::std::option::Option<&str> {
182        self.file_cache_path.as_deref()
183    }
184    /// <p>For Amazon File Cache, a list of NFS Exports that will be linked with an NFS data repository association. All the subdirectories must be on a single NFS file system. The Export paths are in the format <code>/exportpath1</code>. To use this parameter, you must configure <code>DataRepositoryPath</code> as the domain name of the NFS file system. The NFS file system domain name in effect is the root of the subdirectories. Note that <code>DataRepositorySubdirectories</code> is not supported for S3 data repositories.</p>
185    ///
186    /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.data_repository_subdirectories.is_none()`.
187    pub fn data_repository_subdirectories(&self) -> &[::std::string::String] {
188        self.data_repository_subdirectories.as_deref().unwrap_or_default()
189    }
190    /// <p>The configuration for an NFS data repository linked to an Amazon File Cache resource with a data repository association.</p>
191    pub fn nfs(&self) -> ::std::option::Option<&crate::types::NfsDataRepositoryConfiguration> {
192        self.nfs.as_ref()
193    }
194}
195impl DataRepositoryAssociation {
196    /// Creates a new builder-style object to manufacture [`DataRepositoryAssociation`](crate::types::DataRepositoryAssociation).
197    pub fn builder() -> crate::types::builders::DataRepositoryAssociationBuilder {
198        crate::types::builders::DataRepositoryAssociationBuilder::default()
199    }
200}
201
202/// A builder for [`DataRepositoryAssociation`](crate::types::DataRepositoryAssociation).
203#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default, ::std::fmt::Debug)]
204#[non_exhaustive]
205pub struct DataRepositoryAssociationBuilder {
206    pub(crate) association_id: ::std::option::Option<::std::string::String>,
207    pub(crate) resource_arn: ::std::option::Option<::std::string::String>,
208    pub(crate) file_system_id: ::std::option::Option<::std::string::String>,
209    pub(crate) lifecycle: ::std::option::Option<crate::types::DataRepositoryLifecycle>,
210    pub(crate) failure_details: ::std::option::Option<crate::types::DataRepositoryFailureDetails>,
211    pub(crate) file_system_path: ::std::option::Option<::std::string::String>,
212    pub(crate) data_repository_path: ::std::option::Option<::std::string::String>,
213    pub(crate) batch_import_meta_data_on_create: ::std::option::Option<bool>,
214    pub(crate) imported_file_chunk_size: ::std::option::Option<i32>,
215    pub(crate) s3: ::std::option::Option<crate::types::S3DataRepositoryConfiguration>,
216    pub(crate) tags: ::std::option::Option<::std::vec::Vec<crate::types::Tag>>,
217    pub(crate) creation_time: ::std::option::Option<::aws_smithy_types::DateTime>,
218    pub(crate) file_cache_id: ::std::option::Option<::std::string::String>,
219    pub(crate) file_cache_path: ::std::option::Option<::std::string::String>,
220    pub(crate) data_repository_subdirectories: ::std::option::Option<::std::vec::Vec<::std::string::String>>,
221    pub(crate) nfs: ::std::option::Option<crate::types::NfsDataRepositoryConfiguration>,
222}
223impl DataRepositoryAssociationBuilder {
224    /// <p>The system-generated, unique ID of the data repository association.</p>
225    pub fn association_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
226        self.association_id = ::std::option::Option::Some(input.into());
227        self
228    }
229    /// <p>The system-generated, unique ID of the data repository association.</p>
230    pub fn set_association_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
231        self.association_id = input;
232        self
233    }
234    /// <p>The system-generated, unique ID of the data repository association.</p>
235    pub fn get_association_id(&self) -> &::std::option::Option<::std::string::String> {
236        &self.association_id
237    }
238    /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
239    pub fn resource_arn(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
240        self.resource_arn = ::std::option::Option::Some(input.into());
241        self
242    }
243    /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
244    pub fn set_resource_arn(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
245        self.resource_arn = input;
246        self
247    }
248    /// <p>The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services resources. We require an ARN when you need to specify a resource unambiguously across all of Amazon Web Services. For more information, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>Amazon Web Services General Reference</i>.</p>
249    pub fn get_resource_arn(&self) -> &::std::option::Option<::std::string::String> {
250        &self.resource_arn
251    }
252    /// <p>The globally unique ID of the file system, assigned by Amazon FSx.</p>
253    pub fn file_system_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
254        self.file_system_id = ::std::option::Option::Some(input.into());
255        self
256    }
257    /// <p>The globally unique ID of the file system, assigned by Amazon FSx.</p>
258    pub fn set_file_system_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
259        self.file_system_id = input;
260        self
261    }
262    /// <p>The globally unique ID of the file system, assigned by Amazon FSx.</p>
263    pub fn get_file_system_id(&self) -> &::std::option::Option<::std::string::String> {
264        &self.file_system_id
265    }
266    /// <p>Describes the state of a data repository association. The lifecycle can have the following values:</p>
267    /// <ul>
268    /// <li>
269    /// <p><code>CREATING</code> - The data repository association between the file system or cache and the data repository is being created. The data repository is unavailable.</p></li>
270    /// <li>
271    /// <p><code>AVAILABLE</code> - The data repository association is available for use.</p></li>
272    /// <li>
273    /// <p><code>MISCONFIGURED</code> - The data repository association is misconfigured. Until the configuration is corrected, automatic import and automatic export will not work (only for Amazon FSx for Lustre).</p></li>
274    /// <li>
275    /// <p><code>UPDATING</code> - The data repository association is undergoing a customer initiated update that might affect its availability.</p></li>
276    /// <li>
277    /// <p><code>DELETING</code> - The data repository association is undergoing a customer initiated deletion.</p></li>
278    /// <li>
279    /// <p><code>FAILED</code> - The data repository association is in a terminal state that cannot be recovered.</p></li>
280    /// </ul>
281    pub fn lifecycle(mut self, input: crate::types::DataRepositoryLifecycle) -> Self {
282        self.lifecycle = ::std::option::Option::Some(input);
283        self
284    }
285    /// <p>Describes the state of a data repository association. The lifecycle can have the following values:</p>
286    /// <ul>
287    /// <li>
288    /// <p><code>CREATING</code> - The data repository association between the file system or cache and the data repository is being created. The data repository is unavailable.</p></li>
289    /// <li>
290    /// <p><code>AVAILABLE</code> - The data repository association is available for use.</p></li>
291    /// <li>
292    /// <p><code>MISCONFIGURED</code> - The data repository association is misconfigured. Until the configuration is corrected, automatic import and automatic export will not work (only for Amazon FSx for Lustre).</p></li>
293    /// <li>
294    /// <p><code>UPDATING</code> - The data repository association is undergoing a customer initiated update that might affect its availability.</p></li>
295    /// <li>
296    /// <p><code>DELETING</code> - The data repository association is undergoing a customer initiated deletion.</p></li>
297    /// <li>
298    /// <p><code>FAILED</code> - The data repository association is in a terminal state that cannot be recovered.</p></li>
299    /// </ul>
300    pub fn set_lifecycle(mut self, input: ::std::option::Option<crate::types::DataRepositoryLifecycle>) -> Self {
301        self.lifecycle = input;
302        self
303    }
304    /// <p>Describes the state of a data repository association. The lifecycle can have the following values:</p>
305    /// <ul>
306    /// <li>
307    /// <p><code>CREATING</code> - The data repository association between the file system or cache and the data repository is being created. The data repository is unavailable.</p></li>
308    /// <li>
309    /// <p><code>AVAILABLE</code> - The data repository association is available for use.</p></li>
310    /// <li>
311    /// <p><code>MISCONFIGURED</code> - The data repository association is misconfigured. Until the configuration is corrected, automatic import and automatic export will not work (only for Amazon FSx for Lustre).</p></li>
312    /// <li>
313    /// <p><code>UPDATING</code> - The data repository association is undergoing a customer initiated update that might affect its availability.</p></li>
314    /// <li>
315    /// <p><code>DELETING</code> - The data repository association is undergoing a customer initiated deletion.</p></li>
316    /// <li>
317    /// <p><code>FAILED</code> - The data repository association is in a terminal state that cannot be recovered.</p></li>
318    /// </ul>
319    pub fn get_lifecycle(&self) -> &::std::option::Option<crate::types::DataRepositoryLifecycle> {
320        &self.lifecycle
321    }
322    /// <p>Provides detailed information about the data repository if its <code>Lifecycle</code> is set to <code>MISCONFIGURED</code> or <code>FAILED</code>.</p>
323    pub fn failure_details(mut self, input: crate::types::DataRepositoryFailureDetails) -> Self {
324        self.failure_details = ::std::option::Option::Some(input);
325        self
326    }
327    /// <p>Provides detailed information about the data repository if its <code>Lifecycle</code> is set to <code>MISCONFIGURED</code> or <code>FAILED</code>.</p>
328    pub fn set_failure_details(mut self, input: ::std::option::Option<crate::types::DataRepositoryFailureDetails>) -> Self {
329        self.failure_details = input;
330        self
331    }
332    /// <p>Provides detailed information about the data repository if its <code>Lifecycle</code> is set to <code>MISCONFIGURED</code> or <code>FAILED</code>.</p>
333    pub fn get_failure_details(&self) -> &::std::option::Option<crate::types::DataRepositoryFailureDetails> {
334        &self.failure_details
335    }
336    /// <p>A path on the Amazon FSx for Lustre file system that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path <code>/ns1/</code>, then you cannot link another data repository with file system path <code>/ns1/ns2</code>.</p>
337    /// <p>This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.</p><note>
338    /// <p>If you specify only a forward slash (<code>/</code>) as the file system path, you can link only one data repository to the file system. You can only specify "/" as the file system path for the first data repository associated with a file system.</p>
339    /// </note>
340    pub fn file_system_path(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
341        self.file_system_path = ::std::option::Option::Some(input.into());
342        self
343    }
344    /// <p>A path on the Amazon FSx for Lustre file system that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path <code>/ns1/</code>, then you cannot link another data repository with file system path <code>/ns1/ns2</code>.</p>
345    /// <p>This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.</p><note>
346    /// <p>If you specify only a forward slash (<code>/</code>) as the file system path, you can link only one data repository to the file system. You can only specify "/" as the file system path for the first data repository associated with a file system.</p>
347    /// </note>
348    pub fn set_file_system_path(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
349        self.file_system_path = input;
350        self
351    }
352    /// <p>A path on the Amazon FSx for Lustre file system that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path <code>/ns1/</code>, then you cannot link another data repository with file system path <code>/ns1/ns2</code>.</p>
353    /// <p>This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.</p><note>
354    /// <p>If you specify only a forward slash (<code>/</code>) as the file system path, you can link only one data repository to the file system. You can only specify "/" as the file system path for the first data repository associated with a file system.</p>
355    /// </note>
356    pub fn get_file_system_path(&self) -> &::std::option::Option<::std::string::String> {
357        &self.file_system_path
358    }
359    /// <p>The path to the data repository that will be linked to the cache or file system.</p>
360    /// <ul>
361    /// <li>
362    /// <p>For Amazon File Cache, the path can be an NFS data repository that will be linked to the cache. The path can be in one of two formats:</p>
363    /// <ul>
364    /// <li>
365    /// <p>If you are not using the <code>DataRepositorySubdirectories</code> parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format <code>nsf://nfs-domain-name/exportpath</code>. You can therefore link a single NFS Export to a single data repository association.</p></li>
366    /// <li>
367    /// <p>If you are using the <code>DataRepositorySubdirectories</code> parameter, the path is the domain name of the NFS file system in the format <code>nfs://filer-domain-name</code>, which indicates the root of the subdirectories specified with the <code>DataRepositorySubdirectories</code> parameter.</p></li>
368    /// </ul></li>
369    /// <li>
370    /// <p>For Amazon File Cache, the path can be an S3 bucket or prefix in the format <code>s3://bucket-name/prefix/</code> (where <code>prefix</code> is optional).</p></li>
371    /// <li>
372    /// <p>For Amazon FSx for Lustre, the path can be an S3 bucket or prefix in the format <code>s3://bucket-name/prefix/</code> (where <code>prefix</code> is optional).</p></li>
373    /// </ul>
374    pub fn data_repository_path(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
375        self.data_repository_path = ::std::option::Option::Some(input.into());
376        self
377    }
378    /// <p>The path to the data repository that will be linked to the cache or file system.</p>
379    /// <ul>
380    /// <li>
381    /// <p>For Amazon File Cache, the path can be an NFS data repository that will be linked to the cache. The path can be in one of two formats:</p>
382    /// <ul>
383    /// <li>
384    /// <p>If you are not using the <code>DataRepositorySubdirectories</code> parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format <code>nsf://nfs-domain-name/exportpath</code>. You can therefore link a single NFS Export to a single data repository association.</p></li>
385    /// <li>
386    /// <p>If you are using the <code>DataRepositorySubdirectories</code> parameter, the path is the domain name of the NFS file system in the format <code>nfs://filer-domain-name</code>, which indicates the root of the subdirectories specified with the <code>DataRepositorySubdirectories</code> parameter.</p></li>
387    /// </ul></li>
388    /// <li>
389    /// <p>For Amazon File Cache, the path can be an S3 bucket or prefix in the format <code>s3://bucket-name/prefix/</code> (where <code>prefix</code> is optional).</p></li>
390    /// <li>
391    /// <p>For Amazon FSx for Lustre, the path can be an S3 bucket or prefix in the format <code>s3://bucket-name/prefix/</code> (where <code>prefix</code> is optional).</p></li>
392    /// </ul>
393    pub fn set_data_repository_path(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
394        self.data_repository_path = input;
395        self
396    }
397    /// <p>The path to the data repository that will be linked to the cache or file system.</p>
398    /// <ul>
399    /// <li>
400    /// <p>For Amazon File Cache, the path can be an NFS data repository that will be linked to the cache. The path can be in one of two formats:</p>
401    /// <ul>
402    /// <li>
403    /// <p>If you are not using the <code>DataRepositorySubdirectories</code> parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format <code>nsf://nfs-domain-name/exportpath</code>. You can therefore link a single NFS Export to a single data repository association.</p></li>
404    /// <li>
405    /// <p>If you are using the <code>DataRepositorySubdirectories</code> parameter, the path is the domain name of the NFS file system in the format <code>nfs://filer-domain-name</code>, which indicates the root of the subdirectories specified with the <code>DataRepositorySubdirectories</code> parameter.</p></li>
406    /// </ul></li>
407    /// <li>
408    /// <p>For Amazon File Cache, the path can be an S3 bucket or prefix in the format <code>s3://bucket-name/prefix/</code> (where <code>prefix</code> is optional).</p></li>
409    /// <li>
410    /// <p>For Amazon FSx for Lustre, the path can be an S3 bucket or prefix in the format <code>s3://bucket-name/prefix/</code> (where <code>prefix</code> is optional).</p></li>
411    /// </ul>
412    pub fn get_data_repository_path(&self) -> &::std::option::Option<::std::string::String> {
413        &self.data_repository_path
414    }
415    /// <p>A boolean flag indicating whether an import data repository task to import metadata should run after the data repository association is created. The task runs if this flag is set to <code>true</code>.</p><note>
416    /// <p><code>BatchImportMetaDataOnCreate</code> is not supported for data repositories linked to an Amazon File Cache resource.</p>
417    /// </note>
418    pub fn batch_import_meta_data_on_create(mut self, input: bool) -> Self {
419        self.batch_import_meta_data_on_create = ::std::option::Option::Some(input);
420        self
421    }
422    /// <p>A boolean flag indicating whether an import data repository task to import metadata should run after the data repository association is created. The task runs if this flag is set to <code>true</code>.</p><note>
423    /// <p><code>BatchImportMetaDataOnCreate</code> is not supported for data repositories linked to an Amazon File Cache resource.</p>
424    /// </note>
425    pub fn set_batch_import_meta_data_on_create(mut self, input: ::std::option::Option<bool>) -> Self {
426        self.batch_import_meta_data_on_create = input;
427        self
428    }
429    /// <p>A boolean flag indicating whether an import data repository task to import metadata should run after the data repository association is created. The task runs if this flag is set to <code>true</code>.</p><note>
430    /// <p><code>BatchImportMetaDataOnCreate</code> is not supported for data repositories linked to an Amazon File Cache resource.</p>
431    /// </note>
432    pub fn get_batch_import_meta_data_on_create(&self) -> &::std::option::Option<bool> {
433        &self.batch_import_meta_data_on_create
434    }
435    /// <p>For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system or cache.</p>
436    /// <p>The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.</p>
437    pub fn imported_file_chunk_size(mut self, input: i32) -> Self {
438        self.imported_file_chunk_size = ::std::option::Option::Some(input);
439        self
440    }
441    /// <p>For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system or cache.</p>
442    /// <p>The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.</p>
443    pub fn set_imported_file_chunk_size(mut self, input: ::std::option::Option<i32>) -> Self {
444        self.imported_file_chunk_size = input;
445        self
446    }
447    /// <p>For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system or cache.</p>
448    /// <p>The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.</p>
449    pub fn get_imported_file_chunk_size(&self) -> &::std::option::Option<i32> {
450        &self.imported_file_chunk_size
451    }
452    /// <p>The configuration for an Amazon S3 data repository linked to an Amazon FSx for Lustre file system with a data repository association.</p>
453    pub fn s3(mut self, input: crate::types::S3DataRepositoryConfiguration) -> Self {
454        self.s3 = ::std::option::Option::Some(input);
455        self
456    }
457    /// <p>The configuration for an Amazon S3 data repository linked to an Amazon FSx for Lustre file system with a data repository association.</p>
458    pub fn set_s3(mut self, input: ::std::option::Option<crate::types::S3DataRepositoryConfiguration>) -> Self {
459        self.s3 = input;
460        self
461    }
462    /// <p>The configuration for an Amazon S3 data repository linked to an Amazon FSx for Lustre file system with a data repository association.</p>
463    pub fn get_s3(&self) -> &::std::option::Option<crate::types::S3DataRepositoryConfiguration> {
464        &self.s3
465    }
466    /// Appends an item to `tags`.
467    ///
468    /// To override the contents of this collection use [`set_tags`](Self::set_tags).
469    ///
470    /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
471    pub fn tags(mut self, input: crate::types::Tag) -> Self {
472        let mut v = self.tags.unwrap_or_default();
473        v.push(input);
474        self.tags = ::std::option::Option::Some(v);
475        self
476    }
477    /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
478    pub fn set_tags(mut self, input: ::std::option::Option<::std::vec::Vec<crate::types::Tag>>) -> Self {
479        self.tags = input;
480        self
481    }
482    /// <p>A list of <code>Tag</code> values, with a maximum of 50 elements.</p>
483    pub fn get_tags(&self) -> &::std::option::Option<::std::vec::Vec<crate::types::Tag>> {
484        &self.tags
485    }
486    /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
487    pub fn creation_time(mut self, input: ::aws_smithy_types::DateTime) -> Self {
488        self.creation_time = ::std::option::Option::Some(input);
489        self
490    }
491    /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
492    pub fn set_creation_time(mut self, input: ::std::option::Option<::aws_smithy_types::DateTime>) -> Self {
493        self.creation_time = input;
494        self
495    }
496    /// <p>The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.</p>
497    pub fn get_creation_time(&self) -> &::std::option::Option<::aws_smithy_types::DateTime> {
498        &self.creation_time
499    }
500    /// <p>The globally unique ID of the Amazon File Cache resource.</p>
501    pub fn file_cache_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
502        self.file_cache_id = ::std::option::Option::Some(input.into());
503        self
504    }
505    /// <p>The globally unique ID of the Amazon File Cache resource.</p>
506    pub fn set_file_cache_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
507        self.file_cache_id = input;
508        self
509    }
510    /// <p>The globally unique ID of the Amazon File Cache resource.</p>
511    pub fn get_file_cache_id(&self) -> &::std::option::Option<::std::string::String> {
512        &self.file_cache_id
513    }
514    /// <p>A path on the Amazon File Cache that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the path is required. Two data repository associations cannot have overlapping cache paths. For example, if a data repository is associated with cache path <code>/ns1/</code>, then you cannot link another data repository with cache path <code>/ns1/ns2</code>.</p>
515    /// <p>This path specifies the directory in your cache where files will be exported from. This cache directory can be linked to only one data repository (S3 or NFS) and no other data repository can be linked to the directory.</p><note>
516    /// <p>The cache path can only be set to root (/) on an NFS DRA when <code>DataRepositorySubdirectories</code> is specified. If you specify root (/) as the cache path, you can create only one DRA on the cache.</p>
517    /// <p>The cache path cannot be set to root (/) for an S3 DRA.</p>
518    /// </note>
519    pub fn file_cache_path(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
520        self.file_cache_path = ::std::option::Option::Some(input.into());
521        self
522    }
523    /// <p>A path on the Amazon File Cache that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the path is required. Two data repository associations cannot have overlapping cache paths. For example, if a data repository is associated with cache path <code>/ns1/</code>, then you cannot link another data repository with cache path <code>/ns1/ns2</code>.</p>
524    /// <p>This path specifies the directory in your cache where files will be exported from. This cache directory can be linked to only one data repository (S3 or NFS) and no other data repository can be linked to the directory.</p><note>
525    /// <p>The cache path can only be set to root (/) on an NFS DRA when <code>DataRepositorySubdirectories</code> is specified. If you specify root (/) as the cache path, you can create only one DRA on the cache.</p>
526    /// <p>The cache path cannot be set to root (/) for an S3 DRA.</p>
527    /// </note>
528    pub fn set_file_cache_path(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
529        self.file_cache_path = input;
530        self
531    }
532    /// <p>A path on the Amazon File Cache that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the path is required. Two data repository associations cannot have overlapping cache paths. For example, if a data repository is associated with cache path <code>/ns1/</code>, then you cannot link another data repository with cache path <code>/ns1/ns2</code>.</p>
533    /// <p>This path specifies the directory in your cache where files will be exported from. This cache directory can be linked to only one data repository (S3 or NFS) and no other data repository can be linked to the directory.</p><note>
534    /// <p>The cache path can only be set to root (/) on an NFS DRA when <code>DataRepositorySubdirectories</code> is specified. If you specify root (/) as the cache path, you can create only one DRA on the cache.</p>
535    /// <p>The cache path cannot be set to root (/) for an S3 DRA.</p>
536    /// </note>
537    pub fn get_file_cache_path(&self) -> &::std::option::Option<::std::string::String> {
538        &self.file_cache_path
539    }
540    /// Appends an item to `data_repository_subdirectories`.
541    ///
542    /// To override the contents of this collection use [`set_data_repository_subdirectories`](Self::set_data_repository_subdirectories).
543    ///
544    /// <p>For Amazon File Cache, a list of NFS Exports that will be linked with an NFS data repository association. All the subdirectories must be on a single NFS file system. The Export paths are in the format <code>/exportpath1</code>. To use this parameter, you must configure <code>DataRepositoryPath</code> as the domain name of the NFS file system. The NFS file system domain name in effect is the root of the subdirectories. Note that <code>DataRepositorySubdirectories</code> is not supported for S3 data repositories.</p>
545    pub fn data_repository_subdirectories(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
546        let mut v = self.data_repository_subdirectories.unwrap_or_default();
547        v.push(input.into());
548        self.data_repository_subdirectories = ::std::option::Option::Some(v);
549        self
550    }
551    /// <p>For Amazon File Cache, a list of NFS Exports that will be linked with an NFS data repository association. All the subdirectories must be on a single NFS file system. The Export paths are in the format <code>/exportpath1</code>. To use this parameter, you must configure <code>DataRepositoryPath</code> as the domain name of the NFS file system. The NFS file system domain name in effect is the root of the subdirectories. Note that <code>DataRepositorySubdirectories</code> is not supported for S3 data repositories.</p>
552    pub fn set_data_repository_subdirectories(mut self, input: ::std::option::Option<::std::vec::Vec<::std::string::String>>) -> Self {
553        self.data_repository_subdirectories = input;
554        self
555    }
556    /// <p>For Amazon File Cache, a list of NFS Exports that will be linked with an NFS data repository association. All the subdirectories must be on a single NFS file system. The Export paths are in the format <code>/exportpath1</code>. To use this parameter, you must configure <code>DataRepositoryPath</code> as the domain name of the NFS file system. The NFS file system domain name in effect is the root of the subdirectories. Note that <code>DataRepositorySubdirectories</code> is not supported for S3 data repositories.</p>
557    pub fn get_data_repository_subdirectories(&self) -> &::std::option::Option<::std::vec::Vec<::std::string::String>> {
558        &self.data_repository_subdirectories
559    }
560    /// <p>The configuration for an NFS data repository linked to an Amazon File Cache resource with a data repository association.</p>
561    pub fn nfs(mut self, input: crate::types::NfsDataRepositoryConfiguration) -> Self {
562        self.nfs = ::std::option::Option::Some(input);
563        self
564    }
565    /// <p>The configuration for an NFS data repository linked to an Amazon File Cache resource with a data repository association.</p>
566    pub fn set_nfs(mut self, input: ::std::option::Option<crate::types::NfsDataRepositoryConfiguration>) -> Self {
567        self.nfs = input;
568        self
569    }
570    /// <p>The configuration for an NFS data repository linked to an Amazon File Cache resource with a data repository association.</p>
571    pub fn get_nfs(&self) -> &::std::option::Option<crate::types::NfsDataRepositoryConfiguration> {
572        &self.nfs
573    }
574    /// Consumes the builder and constructs a [`DataRepositoryAssociation`](crate::types::DataRepositoryAssociation).
575    pub fn build(self) -> crate::types::DataRepositoryAssociation {
576        crate::types::DataRepositoryAssociation {
577            association_id: self.association_id,
578            resource_arn: self.resource_arn,
579            file_system_id: self.file_system_id,
580            lifecycle: self.lifecycle,
581            failure_details: self.failure_details,
582            file_system_path: self.file_system_path,
583            data_repository_path: self.data_repository_path,
584            batch_import_meta_data_on_create: self.batch_import_meta_data_on_create,
585            imported_file_chunk_size: self.imported_file_chunk_size,
586            s3: self.s3,
587            tags: self.tags,
588            creation_time: self.creation_time,
589            file_cache_id: self.file_cache_id,
590            file_cache_path: self.file_cache_path,
591            data_repository_subdirectories: self.data_repository_subdirectories,
592            nfs: self.nfs,
593        }
594    }
595}