1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
/// <p>Additional connection options for the connector.</p>
#[non_exhaustive]
#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)]
pub struct JdbcConnectorOptions {
/// <p>Extra condition clause to filter data from source. For example:</p>
/// <p><code>BillingCity='Mountain View'</code></p>
/// <p>When using a query instead of a table name, you should validate that the query works with the specified <code>filterPredicate</code>.</p>
pub filter_predicate: ::std::option::Option<::std::string::String>,
/// <p>The name of an integer column that is used for partitioning. This option works only when it's included with <code>lowerBound</code>, <code>upperBound</code>, and <code>numPartitions</code>. This option works the same way as in the Spark SQL JDBC reader.</p>
pub partition_column: ::std::option::Option<::std::string::String>,
/// <p>The minimum value of <code>partitionColumn</code> that is used to decide partition stride.</p>
pub lower_bound: ::std::option::Option<i64>,
/// <p>The maximum value of <code>partitionColumn</code> that is used to decide partition stride.</p>
pub upper_bound: ::std::option::Option<i64>,
/// <p>The number of partitions. This value, along with <code>lowerBound</code> (inclusive) and <code>upperBound</code> (exclusive), form partition strides for generated <code>WHERE</code> clause expressions that are used to split the <code>partitionColumn</code>.</p>
pub num_partitions: ::std::option::Option<i64>,
/// <p>The name of the job bookmark keys on which to sort.</p>
pub job_bookmark_keys: ::std::option::Option<::std::vec::Vec<::std::string::String>>,
/// <p>Specifies an ascending or descending sort order.</p>
pub job_bookmark_keys_sort_order: ::std::option::Option<::std::string::String>,
/// <p>Custom data type mapping that builds a mapping from a JDBC data type to an Glue data type. For example, the option <code>"dataTypeMapping":{"FLOAT":"STRING"}</code> maps data fields of JDBC type <code>FLOAT</code> into the Java <code>String</code> type by calling the <code>ResultSet.getString()</code> method of the driver, and uses it to build the Glue record. The <code>ResultSet</code> object is implemented by each driver, so the behavior is specific to the driver you use. Refer to the documentation for your JDBC driver to understand how the driver performs the conversions.</p>
pub data_type_mapping: ::std::option::Option<::std::collections::HashMap<crate::types::JdbcDataType, crate::types::GlueRecordType>>,
}
impl JdbcConnectorOptions {
/// <p>Extra condition clause to filter data from source. For example:</p>
/// <p><code>BillingCity='Mountain View'</code></p>
/// <p>When using a query instead of a table name, you should validate that the query works with the specified <code>filterPredicate</code>.</p>
pub fn filter_predicate(&self) -> ::std::option::Option<&str> {
self.filter_predicate.as_deref()
}
/// <p>The name of an integer column that is used for partitioning. This option works only when it's included with <code>lowerBound</code>, <code>upperBound</code>, and <code>numPartitions</code>. This option works the same way as in the Spark SQL JDBC reader.</p>
pub fn partition_column(&self) -> ::std::option::Option<&str> {
self.partition_column.as_deref()
}
/// <p>The minimum value of <code>partitionColumn</code> that is used to decide partition stride.</p>
pub fn lower_bound(&self) -> ::std::option::Option<i64> {
self.lower_bound
}
/// <p>The maximum value of <code>partitionColumn</code> that is used to decide partition stride.</p>
pub fn upper_bound(&self) -> ::std::option::Option<i64> {
self.upper_bound
}
/// <p>The number of partitions. This value, along with <code>lowerBound</code> (inclusive) and <code>upperBound</code> (exclusive), form partition strides for generated <code>WHERE</code> clause expressions that are used to split the <code>partitionColumn</code>.</p>
pub fn num_partitions(&self) -> ::std::option::Option<i64> {
self.num_partitions
}
/// <p>The name of the job bookmark keys on which to sort.</p>
///
/// If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use `.job_bookmark_keys.is_none()`.
pub fn job_bookmark_keys(&self) -> &[::std::string::String] {
self.job_bookmark_keys.as_deref().unwrap_or_default()
}
/// <p>Specifies an ascending or descending sort order.</p>
pub fn job_bookmark_keys_sort_order(&self) -> ::std::option::Option<&str> {
self.job_bookmark_keys_sort_order.as_deref()
}
/// <p>Custom data type mapping that builds a mapping from a JDBC data type to an Glue data type. For example, the option <code>"dataTypeMapping":{"FLOAT":"STRING"}</code> maps data fields of JDBC type <code>FLOAT</code> into the Java <code>String</code> type by calling the <code>ResultSet.getString()</code> method of the driver, and uses it to build the Glue record. The <code>ResultSet</code> object is implemented by each driver, so the behavior is specific to the driver you use. Refer to the documentation for your JDBC driver to understand how the driver performs the conversions.</p>
pub fn data_type_mapping(&self) -> ::std::option::Option<&::std::collections::HashMap<crate::types::JdbcDataType, crate::types::GlueRecordType>> {
self.data_type_mapping.as_ref()
}
}
impl JdbcConnectorOptions {
/// Creates a new builder-style object to manufacture [`JdbcConnectorOptions`](crate::types::JdbcConnectorOptions).
pub fn builder() -> crate::types::builders::JdbcConnectorOptionsBuilder {
crate::types::builders::JdbcConnectorOptionsBuilder::default()
}
}
/// A builder for [`JdbcConnectorOptions`](crate::types::JdbcConnectorOptions).
#[non_exhaustive]
#[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::default::Default, ::std::fmt::Debug)]
pub struct JdbcConnectorOptionsBuilder {
pub(crate) filter_predicate: ::std::option::Option<::std::string::String>,
pub(crate) partition_column: ::std::option::Option<::std::string::String>,
pub(crate) lower_bound: ::std::option::Option<i64>,
pub(crate) upper_bound: ::std::option::Option<i64>,
pub(crate) num_partitions: ::std::option::Option<i64>,
pub(crate) job_bookmark_keys: ::std::option::Option<::std::vec::Vec<::std::string::String>>,
pub(crate) job_bookmark_keys_sort_order: ::std::option::Option<::std::string::String>,
pub(crate) data_type_mapping: ::std::option::Option<::std::collections::HashMap<crate::types::JdbcDataType, crate::types::GlueRecordType>>,
}
impl JdbcConnectorOptionsBuilder {
/// <p>Extra condition clause to filter data from source. For example:</p>
/// <p><code>BillingCity='Mountain View'</code></p>
/// <p>When using a query instead of a table name, you should validate that the query works with the specified <code>filterPredicate</code>.</p>
pub fn filter_predicate(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
self.filter_predicate = ::std::option::Option::Some(input.into());
self
}
/// <p>Extra condition clause to filter data from source. For example:</p>
/// <p><code>BillingCity='Mountain View'</code></p>
/// <p>When using a query instead of a table name, you should validate that the query works with the specified <code>filterPredicate</code>.</p>
pub fn set_filter_predicate(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
self.filter_predicate = input;
self
}
/// <p>Extra condition clause to filter data from source. For example:</p>
/// <p><code>BillingCity='Mountain View'</code></p>
/// <p>When using a query instead of a table name, you should validate that the query works with the specified <code>filterPredicate</code>.</p>
pub fn get_filter_predicate(&self) -> &::std::option::Option<::std::string::String> {
&self.filter_predicate
}
/// <p>The name of an integer column that is used for partitioning. This option works only when it's included with <code>lowerBound</code>, <code>upperBound</code>, and <code>numPartitions</code>. This option works the same way as in the Spark SQL JDBC reader.</p>
pub fn partition_column(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
self.partition_column = ::std::option::Option::Some(input.into());
self
}
/// <p>The name of an integer column that is used for partitioning. This option works only when it's included with <code>lowerBound</code>, <code>upperBound</code>, and <code>numPartitions</code>. This option works the same way as in the Spark SQL JDBC reader.</p>
pub fn set_partition_column(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
self.partition_column = input;
self
}
/// <p>The name of an integer column that is used for partitioning. This option works only when it's included with <code>lowerBound</code>, <code>upperBound</code>, and <code>numPartitions</code>. This option works the same way as in the Spark SQL JDBC reader.</p>
pub fn get_partition_column(&self) -> &::std::option::Option<::std::string::String> {
&self.partition_column
}
/// <p>The minimum value of <code>partitionColumn</code> that is used to decide partition stride.</p>
pub fn lower_bound(mut self, input: i64) -> Self {
self.lower_bound = ::std::option::Option::Some(input);
self
}
/// <p>The minimum value of <code>partitionColumn</code> that is used to decide partition stride.</p>
pub fn set_lower_bound(mut self, input: ::std::option::Option<i64>) -> Self {
self.lower_bound = input;
self
}
/// <p>The minimum value of <code>partitionColumn</code> that is used to decide partition stride.</p>
pub fn get_lower_bound(&self) -> &::std::option::Option<i64> {
&self.lower_bound
}
/// <p>The maximum value of <code>partitionColumn</code> that is used to decide partition stride.</p>
pub fn upper_bound(mut self, input: i64) -> Self {
self.upper_bound = ::std::option::Option::Some(input);
self
}
/// <p>The maximum value of <code>partitionColumn</code> that is used to decide partition stride.</p>
pub fn set_upper_bound(mut self, input: ::std::option::Option<i64>) -> Self {
self.upper_bound = input;
self
}
/// <p>The maximum value of <code>partitionColumn</code> that is used to decide partition stride.</p>
pub fn get_upper_bound(&self) -> &::std::option::Option<i64> {
&self.upper_bound
}
/// <p>The number of partitions. This value, along with <code>lowerBound</code> (inclusive) and <code>upperBound</code> (exclusive), form partition strides for generated <code>WHERE</code> clause expressions that are used to split the <code>partitionColumn</code>.</p>
pub fn num_partitions(mut self, input: i64) -> Self {
self.num_partitions = ::std::option::Option::Some(input);
self
}
/// <p>The number of partitions. This value, along with <code>lowerBound</code> (inclusive) and <code>upperBound</code> (exclusive), form partition strides for generated <code>WHERE</code> clause expressions that are used to split the <code>partitionColumn</code>.</p>
pub fn set_num_partitions(mut self, input: ::std::option::Option<i64>) -> Self {
self.num_partitions = input;
self
}
/// <p>The number of partitions. This value, along with <code>lowerBound</code> (inclusive) and <code>upperBound</code> (exclusive), form partition strides for generated <code>WHERE</code> clause expressions that are used to split the <code>partitionColumn</code>.</p>
pub fn get_num_partitions(&self) -> &::std::option::Option<i64> {
&self.num_partitions
}
/// Appends an item to `job_bookmark_keys`.
///
/// To override the contents of this collection use [`set_job_bookmark_keys`](Self::set_job_bookmark_keys).
///
/// <p>The name of the job bookmark keys on which to sort.</p>
pub fn job_bookmark_keys(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
let mut v = self.job_bookmark_keys.unwrap_or_default();
v.push(input.into());
self.job_bookmark_keys = ::std::option::Option::Some(v);
self
}
/// <p>The name of the job bookmark keys on which to sort.</p>
pub fn set_job_bookmark_keys(mut self, input: ::std::option::Option<::std::vec::Vec<::std::string::String>>) -> Self {
self.job_bookmark_keys = input;
self
}
/// <p>The name of the job bookmark keys on which to sort.</p>
pub fn get_job_bookmark_keys(&self) -> &::std::option::Option<::std::vec::Vec<::std::string::String>> {
&self.job_bookmark_keys
}
/// <p>Specifies an ascending or descending sort order.</p>
pub fn job_bookmark_keys_sort_order(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self {
self.job_bookmark_keys_sort_order = ::std::option::Option::Some(input.into());
self
}
/// <p>Specifies an ascending or descending sort order.</p>
pub fn set_job_bookmark_keys_sort_order(mut self, input: ::std::option::Option<::std::string::String>) -> Self {
self.job_bookmark_keys_sort_order = input;
self
}
/// <p>Specifies an ascending or descending sort order.</p>
pub fn get_job_bookmark_keys_sort_order(&self) -> &::std::option::Option<::std::string::String> {
&self.job_bookmark_keys_sort_order
}
/// Adds a key-value pair to `data_type_mapping`.
///
/// To override the contents of this collection use [`set_data_type_mapping`](Self::set_data_type_mapping).
///
/// <p>Custom data type mapping that builds a mapping from a JDBC data type to an Glue data type. For example, the option <code>"dataTypeMapping":{"FLOAT":"STRING"}</code> maps data fields of JDBC type <code>FLOAT</code> into the Java <code>String</code> type by calling the <code>ResultSet.getString()</code> method of the driver, and uses it to build the Glue record. The <code>ResultSet</code> object is implemented by each driver, so the behavior is specific to the driver you use. Refer to the documentation for your JDBC driver to understand how the driver performs the conversions.</p>
pub fn data_type_mapping(mut self, k: crate::types::JdbcDataType, v: crate::types::GlueRecordType) -> Self {
let mut hash_map = self.data_type_mapping.unwrap_or_default();
hash_map.insert(k, v);
self.data_type_mapping = ::std::option::Option::Some(hash_map);
self
}
/// <p>Custom data type mapping that builds a mapping from a JDBC data type to an Glue data type. For example, the option <code>"dataTypeMapping":{"FLOAT":"STRING"}</code> maps data fields of JDBC type <code>FLOAT</code> into the Java <code>String</code> type by calling the <code>ResultSet.getString()</code> method of the driver, and uses it to build the Glue record. The <code>ResultSet</code> object is implemented by each driver, so the behavior is specific to the driver you use. Refer to the documentation for your JDBC driver to understand how the driver performs the conversions.</p>
pub fn set_data_type_mapping(
mut self,
input: ::std::option::Option<::std::collections::HashMap<crate::types::JdbcDataType, crate::types::GlueRecordType>>,
) -> Self {
self.data_type_mapping = input;
self
}
/// <p>Custom data type mapping that builds a mapping from a JDBC data type to an Glue data type. For example, the option <code>"dataTypeMapping":{"FLOAT":"STRING"}</code> maps data fields of JDBC type <code>FLOAT</code> into the Java <code>String</code> type by calling the <code>ResultSet.getString()</code> method of the driver, and uses it to build the Glue record. The <code>ResultSet</code> object is implemented by each driver, so the behavior is specific to the driver you use. Refer to the documentation for your JDBC driver to understand how the driver performs the conversions.</p>
pub fn get_data_type_mapping(
&self,
) -> &::std::option::Option<::std::collections::HashMap<crate::types::JdbcDataType, crate::types::GlueRecordType>> {
&self.data_type_mapping
}
/// Consumes the builder and constructs a [`JdbcConnectorOptions`](crate::types::JdbcConnectorOptions).
pub fn build(self) -> crate::types::JdbcConnectorOptions {
crate::types::JdbcConnectorOptions {
filter_predicate: self.filter_predicate,
partition_column: self.partition_column,
lower_bound: self.lower_bound,
upper_bound: self.upper_bound,
num_partitions: self.num_partitions,
job_bookmark_keys: self.job_bookmark_keys,
job_bookmark_keys_sort_order: self.job_bookmark_keys_sort_order,
data_type_mapping: self.data_type_mapping,
}
}
}