aws_sdk_glue/operation/create_crawler/_create_crawler_input.rs
1// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
2#[allow(missing_docs)] // documentation missing in model
3#[non_exhaustive]
4#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)]
5pub struct CreateCrawlerInput {
6 /// <p>Name of the new crawler.</p>
7 pub name: ::std::option::Option<::std::string::String>,
8 /// <p>The IAM role or Amazon Resource Name (ARN) of an IAM role used by the new crawler to access customer resources.</p>
9 pub role: ::std::option::Option<::std::string::String>,
10 /// <p>The Glue database where results are written, such as: <code>arn:aws:daylight:us-east-1::database/sometable/*</code>.</p>
11 pub database_name: ::std::option::Option<::std::string::String>,
12 /// <p>A description of the new crawler.</p>
13 pub description: ::std::option::Option<::std::string::String>,
14 /// <p>A list of collection of targets to crawl.</p>
15 pub targets: ::std::option::Option<crate::types::CrawlerTargets>,
16 /// <p>A <code>cron</code> expression used to specify the schedule (see <a href="https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html">Time-Based Schedules for Jobs and Crawlers</a>. For example, to run something every day at 12:15 UTC, you would specify: <code>cron(15 12 * * ? *)</code>.</p>
17 pub schedule: ::std::option::Option<::std::string::String>,
18 /// <p>A list of custom classifiers that the user has registered. By default, all built-in classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification.</p>
19 pub classifiers: ::std::option::Option<::std::vec::Vec<::std::string::String>>,
20 /// <p>The table prefix used for catalog tables that are created.</p>
21 pub table_prefix: ::std::option::Option<::std::string::String>,
22 /// <p>The policy for the crawler's update and deletion behavior.</p>
23 pub schema_change_policy: ::std::option::Option<crate::types::SchemaChangePolicy>,
24 /// <p>A policy that specifies whether to crawl the entire dataset again, or to crawl only folders that were added since the last crawler run.</p>
25 pub recrawl_policy: ::std::option::Option<crate::types::RecrawlPolicy>,
26 /// <p>Specifies data lineage configuration settings for the crawler.</p>
27 pub lineage_configuration: ::std::option::Option<crate::types::LineageConfiguration>,
28 /// <p>Specifies Lake Formation configuration settings for the crawler.</p>
29 pub lake_formation_configuration: ::std::option::Option<crate::types::LakeFormationConfiguration>,
30 /// <p>Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see <a href="https://docs.aws.amazon.com/glue/latest/dg/crawler-configuration.html">Setting crawler configuration options</a>.</p>
31 pub configuration: ::std::option::Option<::std::string::String>,
32 /// <p>The name of the <code>SecurityConfiguration</code> structure to be used by this crawler.</p>
33 pub crawler_security_configuration: ::std::option::Option<::std::string::String>,
34 /// <p>The tags to use with this crawler request. You may use tags to limit access to the crawler. For more information about tags in Glue, see <a href="https://docs.aws.amazon.com/glue/latest/dg/monitor-tags.html">Amazon Web Services Tags in Glue</a> in the developer guide.</p>
35 pub tags: ::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>>,
36}
37impl CreateCrawlerInput {
38 /// <p>Name of the new crawler.</p>
39 pub fn name(&self) -> ::std::option::Option<&str> {
40 self.name.as_deref()
41 }
42 /// <p>The IAM role or Amazon Resource Name (ARN) of an IAM role used by the new crawler to access customer resources.</p>
43 pub fn role(&self) -> ::std::option::Option<&str> {
44 self.role.as_deref()
45 }
46 /// <p>The Glue database where results are written, such as: <code>arn:aws:daylight:us-east-1::database/sometable/*</code>.</p>
47 pub fn database_name(&self) -> ::std::option::Option<&str> {
48 self.database_name.as_deref()
49 }
50 /// <p>A description of the new crawler.</p>
51 pub fn description(&self) -> ::std::option::Option<&str> {
52 self.description.as_deref()
53 }
54 /// <p>A list of collection of targets to crawl.</p>
55 pub fn targets(&self) -> ::std::option::Option<&crate::types::CrawlerTargets> {
56 self.targets.as_ref()
57 }
58 /// <p>A <code>cron</code> expression used to specify the schedule (see <a href="https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html">Time-Based Schedules for Jobs and Crawlers</a>. For example, to run something every day at 12:15 UTC, you would specify: <code>cron(15 12 * * ? *)</code>.</p>
59 pub fn schedule(&self) -> ::std::option::Option<&str> {
60 self.schedule.as_deref()
61 }
62 /// <p>A list of custom classifiers that the user has registered. By default, all built-in classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification.</p>
63 ///
64 /// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.classifiers.is_none()`.
65 pub fn classifiers(&self) -> &[::std::string::String] {
66 self.classifiers.as_deref().unwrap_or_default()
67 }
68 /// <p>The table prefix used for catalog tables that are created.</p>
69 pub fn table_prefix(&self) -> ::std::option::Option<&str> {
70 self.table_prefix.as_deref()
71 }
72 /// <p>The policy for the crawler's update and deletion behavior.</p>
73 pub fn schema_change_policy(&self) -> ::std::option::Option<&crate::types::SchemaChangePolicy> {
74 self.schema_change_policy.as_ref()
75 }
76 /// <p>A policy that specifies whether to crawl the entire dataset again, or to crawl only folders that were added since the last crawler run.</p>
77 pub fn recrawl_policy(&self) -> ::std::option::Option<&crate::types::RecrawlPolicy> {
78 self.recrawl_policy.as_ref()
79 }
80 /// <p>Specifies data lineage configuration settings for the crawler.</p>
81 pub fn lineage_configuration(&self) -> ::std::option::Option<&crate::types::LineageConfiguration> {
82 self.lineage_configuration.as_ref()
83 }
84 /// <p>Specifies Lake Formation configuration settings for the crawler.</p>
85 pub fn lake_formation_configuration(&self) -> ::std::option::Option<&crate::types::LakeFormationConfiguration> {
86 self.lake_formation_configuration.as_ref()
87 }
88 /// <p>Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see <a href="https://docs.aws.amazon.com/glue/latest/dg/crawler-configuration.html">Setting crawler configuration options</a>.</p>
89 pub fn configuration(&self) -> ::std::option::Option<&str> {
90 self.configuration.as_deref()
91 }
92 /// <p>The name of the <code>SecurityConfiguration</code> structure to be used by this crawler.</p>
93 pub fn crawler_security_configuration(&self) -> ::std::option::Option<&str> {
94 self.crawler_security_configuration.as_deref()
95 }
96 /// <p>The tags to use with this crawler request. You may use tags to limit access to the crawler. For more information about tags in Glue, see <a href="https://docs.aws.amazon.com/glue/latest/dg/monitor-tags.html">Amazon Web Services Tags in Glue</a> in the developer guide.</p>
97 pub fn tags(&self) -> ::std::option::Option<&::std::collections::HashMap<::std::string::String, ::std::string::String>> {
98 self.tags.as_ref()
99 }
100}
101impl CreateCrawlerInput {
102 /// Creates a new builder-style object to manufacture [`CreateCrawlerInput`](crate::operation::create_crawler::CreateCrawlerInput).
103 pub fn builder() -> crate::operation::create_crawler::builders::CreateCrawlerInputBuilder {
104 crate::operation::create_crawler::builders::CreateCrawlerInputBuilder::default()
105 }
106}
107
108/// A builder for [`CreateCrawlerInput`](crate::operation::create_crawler::CreateCrawlerInput).
109#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default, ::std::fmt::Debug)]
110#[non_exhaustive]
111pub struct CreateCrawlerInputBuilder {
112 pub(crate) name: ::std::option::Option<::std::string::String>,
113 pub(crate) role: ::std::option::Option<::std::string::String>,
114 pub(crate) database_name: ::std::option::Option<::std::string::String>,
115 pub(crate) description: ::std::option::Option<::std::string::String>,
116 pub(crate) targets: ::std::option::Option<crate::types::CrawlerTargets>,
117 pub(crate) schedule: ::std::option::Option<::std::string::String>,
118 pub(crate) classifiers: ::std::option::Option<::std::vec::Vec<::std::string::String>>,
119 pub(crate) table_prefix: ::std::option::Option<::std::string::String>,
120 pub(crate) schema_change_policy: ::std::option::Option<crate::types::SchemaChangePolicy>,
121 pub(crate) recrawl_policy: ::std::option::Option<crate::types::RecrawlPolicy>,
122 pub(crate) lineage_configuration: ::std::option::Option<crate::types::LineageConfiguration>,
123 pub(crate) lake_formation_configuration: ::std::option::Option<crate::types::LakeFormationConfiguration>,
124 pub(crate) configuration: ::std::option::Option<::std::string::String>,
125 pub(crate) crawler_security_configuration: ::std::option::Option<::std::string::String>,
126 pub(crate) tags: ::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>>,
127}
128impl CreateCrawlerInputBuilder {
129 /// <p>Name of the new crawler.</p>
130 /// This field is required.
131 pub fn name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
132 self.name = ::std::option::Option::Some(input.into());
133 self
134 }
135 /// <p>Name of the new crawler.</p>
136 pub fn set_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
137 self.name = input;
138 self
139 }
140 /// <p>Name of the new crawler.</p>
141 pub fn get_name(&self) -> &::std::option::Option<::std::string::String> {
142 &self.name
143 }
144 /// <p>The IAM role or Amazon Resource Name (ARN) of an IAM role used by the new crawler to access customer resources.</p>
145 /// This field is required.
146 pub fn role(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
147 self.role = ::std::option::Option::Some(input.into());
148 self
149 }
150 /// <p>The IAM role or Amazon Resource Name (ARN) of an IAM role used by the new crawler to access customer resources.</p>
151 pub fn set_role(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
152 self.role = input;
153 self
154 }
155 /// <p>The IAM role or Amazon Resource Name (ARN) of an IAM role used by the new crawler to access customer resources.</p>
156 pub fn get_role(&self) -> &::std::option::Option<::std::string::String> {
157 &self.role
158 }
159 /// <p>The Glue database where results are written, such as: <code>arn:aws:daylight:us-east-1::database/sometable/*</code>.</p>
160 pub fn database_name(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
161 self.database_name = ::std::option::Option::Some(input.into());
162 self
163 }
164 /// <p>The Glue database where results are written, such as: <code>arn:aws:daylight:us-east-1::database/sometable/*</code>.</p>
165 pub fn set_database_name(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
166 self.database_name = input;
167 self
168 }
169 /// <p>The Glue database where results are written, such as: <code>arn:aws:daylight:us-east-1::database/sometable/*</code>.</p>
170 pub fn get_database_name(&self) -> &::std::option::Option<::std::string::String> {
171 &self.database_name
172 }
173 /// <p>A description of the new crawler.</p>
174 pub fn description(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
175 self.description = ::std::option::Option::Some(input.into());
176 self
177 }
178 /// <p>A description of the new crawler.</p>
179 pub fn set_description(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
180 self.description = input;
181 self
182 }
183 /// <p>A description of the new crawler.</p>
184 pub fn get_description(&self) -> &::std::option::Option<::std::string::String> {
185 &self.description
186 }
187 /// <p>A list of collection of targets to crawl.</p>
188 /// This field is required.
189 pub fn targets(mut self, input: crate::types::CrawlerTargets) -> Self {
190 self.targets = ::std::option::Option::Some(input);
191 self
192 }
193 /// <p>A list of collection of targets to crawl.</p>
194 pub fn set_targets(mut self, input: ::std::option::Option<crate::types::CrawlerTargets>) -> Self {
195 self.targets = input;
196 self
197 }
198 /// <p>A list of collection of targets to crawl.</p>
199 pub fn get_targets(&self) -> &::std::option::Option<crate::types::CrawlerTargets> {
200 &self.targets
201 }
202 /// <p>A <code>cron</code> expression used to specify the schedule (see <a href="https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html">Time-Based Schedules for Jobs and Crawlers</a>. For example, to run something every day at 12:15 UTC, you would specify: <code>cron(15 12 * * ? *)</code>.</p>
203 pub fn schedule(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
204 self.schedule = ::std::option::Option::Some(input.into());
205 self
206 }
207 /// <p>A <code>cron</code> expression used to specify the schedule (see <a href="https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html">Time-Based Schedules for Jobs and Crawlers</a>. For example, to run something every day at 12:15 UTC, you would specify: <code>cron(15 12 * * ? *)</code>.</p>
208 pub fn set_schedule(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
209 self.schedule = input;
210 self
211 }
212 /// <p>A <code>cron</code> expression used to specify the schedule (see <a href="https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html">Time-Based Schedules for Jobs and Crawlers</a>. For example, to run something every day at 12:15 UTC, you would specify: <code>cron(15 12 * * ? *)</code>.</p>
213 pub fn get_schedule(&self) -> &::std::option::Option<::std::string::String> {
214 &self.schedule
215 }
216 /// Appends an item to `classifiers`.
217 ///
218 /// To override the contents of this collection use [`set_classifiers`](Self::set_classifiers).
219 ///
220 /// <p>A list of custom classifiers that the user has registered. By default, all built-in classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification.</p>
221 pub fn classifiers(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
222 let mut v = self.classifiers.unwrap_or_default();
223 v.push(input.into());
224 self.classifiers = ::std::option::Option::Some(v);
225 self
226 }
227 /// <p>A list of custom classifiers that the user has registered. By default, all built-in classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification.</p>
228 pub fn set_classifiers(mut self, input: ::std::option::Option<::std::vec::Vec<::std::string::String>>) -> Self {
229 self.classifiers = input;
230 self
231 }
232 /// <p>A list of custom classifiers that the user has registered. By default, all built-in classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification.</p>
233 pub fn get_classifiers(&self) -> &::std::option::Option<::std::vec::Vec<::std::string::String>> {
234 &self.classifiers
235 }
236 /// <p>The table prefix used for catalog tables that are created.</p>
237 pub fn table_prefix(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
238 self.table_prefix = ::std::option::Option::Some(input.into());
239 self
240 }
241 /// <p>The table prefix used for catalog tables that are created.</p>
242 pub fn set_table_prefix(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
243 self.table_prefix = input;
244 self
245 }
246 /// <p>The table prefix used for catalog tables that are created.</p>
247 pub fn get_table_prefix(&self) -> &::std::option::Option<::std::string::String> {
248 &self.table_prefix
249 }
250 /// <p>The policy for the crawler's update and deletion behavior.</p>
251 pub fn schema_change_policy(mut self, input: crate::types::SchemaChangePolicy) -> Self {
252 self.schema_change_policy = ::std::option::Option::Some(input);
253 self
254 }
255 /// <p>The policy for the crawler's update and deletion behavior.</p>
256 pub fn set_schema_change_policy(mut self, input: ::std::option::Option<crate::types::SchemaChangePolicy>) -> Self {
257 self.schema_change_policy = input;
258 self
259 }
260 /// <p>The policy for the crawler's update and deletion behavior.</p>
261 pub fn get_schema_change_policy(&self) -> &::std::option::Option<crate::types::SchemaChangePolicy> {
262 &self.schema_change_policy
263 }
264 /// <p>A policy that specifies whether to crawl the entire dataset again, or to crawl only folders that were added since the last crawler run.</p>
265 pub fn recrawl_policy(mut self, input: crate::types::RecrawlPolicy) -> Self {
266 self.recrawl_policy = ::std::option::Option::Some(input);
267 self
268 }
269 /// <p>A policy that specifies whether to crawl the entire dataset again, or to crawl only folders that were added since the last crawler run.</p>
270 pub fn set_recrawl_policy(mut self, input: ::std::option::Option<crate::types::RecrawlPolicy>) -> Self {
271 self.recrawl_policy = input;
272 self
273 }
274 /// <p>A policy that specifies whether to crawl the entire dataset again, or to crawl only folders that were added since the last crawler run.</p>
275 pub fn get_recrawl_policy(&self) -> &::std::option::Option<crate::types::RecrawlPolicy> {
276 &self.recrawl_policy
277 }
278 /// <p>Specifies data lineage configuration settings for the crawler.</p>
279 pub fn lineage_configuration(mut self, input: crate::types::LineageConfiguration) -> Self {
280 self.lineage_configuration = ::std::option::Option::Some(input);
281 self
282 }
283 /// <p>Specifies data lineage configuration settings for the crawler.</p>
284 pub fn set_lineage_configuration(mut self, input: ::std::option::Option<crate::types::LineageConfiguration>) -> Self {
285 self.lineage_configuration = input;
286 self
287 }
288 /// <p>Specifies data lineage configuration settings for the crawler.</p>
289 pub fn get_lineage_configuration(&self) -> &::std::option::Option<crate::types::LineageConfiguration> {
290 &self.lineage_configuration
291 }
292 /// <p>Specifies Lake Formation configuration settings for the crawler.</p>
293 pub fn lake_formation_configuration(mut self, input: crate::types::LakeFormationConfiguration) -> Self {
294 self.lake_formation_configuration = ::std::option::Option::Some(input);
295 self
296 }
297 /// <p>Specifies Lake Formation configuration settings for the crawler.</p>
298 pub fn set_lake_formation_configuration(mut self, input: ::std::option::Option<crate::types::LakeFormationConfiguration>) -> Self {
299 self.lake_formation_configuration = input;
300 self
301 }
302 /// <p>Specifies Lake Formation configuration settings for the crawler.</p>
303 pub fn get_lake_formation_configuration(&self) -> &::std::option::Option<crate::types::LakeFormationConfiguration> {
304 &self.lake_formation_configuration
305 }
306 /// <p>Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see <a href="https://docs.aws.amazon.com/glue/latest/dg/crawler-configuration.html">Setting crawler configuration options</a>.</p>
307 pub fn configuration(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
308 self.configuration = ::std::option::Option::Some(input.into());
309 self
310 }
311 /// <p>Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see <a href="https://docs.aws.amazon.com/glue/latest/dg/crawler-configuration.html">Setting crawler configuration options</a>.</p>
312 pub fn set_configuration(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
313 self.configuration = input;
314 self
315 }
316 /// <p>Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see <a href="https://docs.aws.amazon.com/glue/latest/dg/crawler-configuration.html">Setting crawler configuration options</a>.</p>
317 pub fn get_configuration(&self) -> &::std::option::Option<::std::string::String> {
318 &self.configuration
319 }
320 /// <p>The name of the <code>SecurityConfiguration</code> structure to be used by this crawler.</p>
321 pub fn crawler_security_configuration(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
322 self.crawler_security_configuration = ::std::option::Option::Some(input.into());
323 self
324 }
325 /// <p>The name of the <code>SecurityConfiguration</code> structure to be used by this crawler.</p>
326 pub fn set_crawler_security_configuration(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
327 self.crawler_security_configuration = input;
328 self
329 }
330 /// <p>The name of the <code>SecurityConfiguration</code> structure to be used by this crawler.</p>
331 pub fn get_crawler_security_configuration(&self) -> &::std::option::Option<::std::string::String> {
332 &self.crawler_security_configuration
333 }
334 /// Adds a key-value pair to `tags`.
335 ///
336 /// To override the contents of this collection use [`set_tags`](Self::set_tags).
337 ///
338 /// <p>The tags to use with this crawler request. You may use tags to limit access to the crawler. For more information about tags in Glue, see <a href="https://docs.aws.amazon.com/glue/latest/dg/monitor-tags.html">Amazon Web Services Tags in Glue</a> in the developer guide.</p>
339 pub fn tags(mut self, k: impl ::std::convert::Into<::std::string::String>, v: impl ::std::convert::Into<::std::string::String>) -> Self {
340 let mut hash_map = self.tags.unwrap_or_default();
341 hash_map.insert(k.into(), v.into());
342 self.tags = ::std::option::Option::Some(hash_map);
343 self
344 }
345 /// <p>The tags to use with this crawler request. You may use tags to limit access to the crawler. For more information about tags in Glue, see <a href="https://docs.aws.amazon.com/glue/latest/dg/monitor-tags.html">Amazon Web Services Tags in Glue</a> in the developer guide.</p>
346 pub fn set_tags(mut self, input: ::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>>) -> Self {
347 self.tags = input;
348 self
349 }
350 /// <p>The tags to use with this crawler request. You may use tags to limit access to the crawler. For more information about tags in Glue, see <a href="https://docs.aws.amazon.com/glue/latest/dg/monitor-tags.html">Amazon Web Services Tags in Glue</a> in the developer guide.</p>
351 pub fn get_tags(&self) -> &::std::option::Option<::std::collections::HashMap<::std::string::String, ::std::string::String>> {
352 &self.tags
353 }
354 /// Consumes the builder and constructs a [`CreateCrawlerInput`](crate::operation::create_crawler::CreateCrawlerInput).
355 pub fn build(
356 self,
357 ) -> ::std::result::Result<crate::operation::create_crawler::CreateCrawlerInput, ::aws_smithy_types::error::operation::BuildError> {
358 ::std::result::Result::Ok(crate::operation::create_crawler::CreateCrawlerInput {
359 name: self.name,
360 role: self.role,
361 database_name: self.database_name,
362 description: self.description,
363 targets: self.targets,
364 schedule: self.schedule,
365 classifiers: self.classifiers,
366 table_prefix: self.table_prefix,
367 schema_change_policy: self.schema_change_policy,
368 recrawl_policy: self.recrawl_policy,
369 lineage_configuration: self.lineage_configuration,
370 lake_formation_configuration: self.lake_formation_configuration,
371 configuration: self.configuration,
372 crawler_security_configuration: self.crawler_security_configuration,
373 tags: self.tags,
374 })
375 }
376}