Struct aws_sdk_glue::types::CodeGenConfigurationNode
source · #[non_exhaustive]pub struct CodeGenConfigurationNode {Show 71 fields
pub athena_connector_source: Option<AthenaConnectorSource>,
pub jdbc_connector_source: Option<JdbcConnectorSource>,
pub spark_connector_source: Option<SparkConnectorSource>,
pub catalog_source: Option<CatalogSource>,
pub redshift_source: Option<RedshiftSource>,
pub s3_catalog_source: Option<S3CatalogSource>,
pub s3_csv_source: Option<S3CsvSource>,
pub s3_json_source: Option<S3JsonSource>,
pub s3_parquet_source: Option<S3ParquetSource>,
pub relational_catalog_source: Option<RelationalCatalogSource>,
pub dynamo_db_catalog_source: Option<DynamoDbCatalogSource>,
pub jdbc_connector_target: Option<JdbcConnectorTarget>,
pub spark_connector_target: Option<SparkConnectorTarget>,
pub catalog_target: Option<BasicCatalogTarget>,
pub redshift_target: Option<RedshiftTarget>,
pub s3_catalog_target: Option<S3CatalogTarget>,
pub s3_glue_parquet_target: Option<S3GlueParquetTarget>,
pub s3_direct_target: Option<S3DirectTarget>,
pub apply_mapping: Option<ApplyMapping>,
pub select_fields: Option<SelectFields>,
pub drop_fields: Option<DropFields>,
pub rename_field: Option<RenameField>,
pub spigot: Option<Spigot>,
pub join: Option<Join>,
pub split_fields: Option<SplitFields>,
pub select_from_collection: Option<SelectFromCollection>,
pub fill_missing_values: Option<FillMissingValues>,
pub filter: Option<Filter>,
pub custom_code: Option<CustomCode>,
pub spark_sql: Option<SparkSql>,
pub direct_kinesis_source: Option<DirectKinesisSource>,
pub direct_kafka_source: Option<DirectKafkaSource>,
pub catalog_kinesis_source: Option<CatalogKinesisSource>,
pub catalog_kafka_source: Option<CatalogKafkaSource>,
pub drop_null_fields: Option<DropNullFields>,
pub merge: Option<Merge>,
pub union: Option<Union>,
pub pii_detection: Option<PiiDetection>,
pub aggregate: Option<Aggregate>,
pub drop_duplicates: Option<DropDuplicates>,
pub governed_catalog_target: Option<GovernedCatalogTarget>,
pub governed_catalog_source: Option<GovernedCatalogSource>,
pub microsoft_sql_server_catalog_source: Option<MicrosoftSqlServerCatalogSource>,
pub my_sql_catalog_source: Option<MySqlCatalogSource>,
pub oracle_sql_catalog_source: Option<OracleSqlCatalogSource>,
pub postgre_sql_catalog_source: Option<PostgreSqlCatalogSource>,
pub microsoft_sql_server_catalog_target: Option<MicrosoftSqlServerCatalogTarget>,
pub my_sql_catalog_target: Option<MySqlCatalogTarget>,
pub oracle_sql_catalog_target: Option<OracleSqlCatalogTarget>,
pub postgre_sql_catalog_target: Option<PostgreSqlCatalogTarget>,
pub dynamic_transform: Option<DynamicTransform>,
pub evaluate_data_quality: Option<EvaluateDataQuality>,
pub s3_catalog_hudi_source: Option<S3CatalogHudiSource>,
pub catalog_hudi_source: Option<CatalogHudiSource>,
pub s3_hudi_source: Option<S3HudiSource>,
pub s3_hudi_catalog_target: Option<S3HudiCatalogTarget>,
pub s3_hudi_direct_target: Option<S3HudiDirectTarget>,
pub direct_jdbc_source: Option<DirectJdbcSource>,
pub s3_catalog_delta_source: Option<S3CatalogDeltaSource>,
pub catalog_delta_source: Option<CatalogDeltaSource>,
pub s3_delta_source: Option<S3DeltaSource>,
pub s3_delta_catalog_target: Option<S3DeltaCatalogTarget>,
pub s3_delta_direct_target: Option<S3DeltaDirectTarget>,
pub amazon_redshift_source: Option<AmazonRedshiftSource>,
pub amazon_redshift_target: Option<AmazonRedshiftTarget>,
pub evaluate_data_quality_multi_frame: Option<EvaluateDataQualityMultiFrame>,
pub recipe: Option<Recipe>,
pub snowflake_source: Option<SnowflakeSource>,
pub snowflake_target: Option<SnowflakeTarget>,
pub connector_data_source: Option<ConnectorDataSource>,
pub connector_data_target: Option<ConnectorDataTarget>,
}
Expand description
CodeGenConfigurationNode
enumerates all valid Node types. One and only one of its member variables can be populated.
Fields (Non-exhaustive)§
This struct is marked as non-exhaustive
Struct { .. }
syntax; cannot be matched against without a wildcard ..
; and struct update syntax will not work.athena_connector_source: Option<AthenaConnectorSource>
Specifies a connector to an Amazon Athena data source.
jdbc_connector_source: Option<JdbcConnectorSource>
Specifies a connector to a JDBC data source.
spark_connector_source: Option<SparkConnectorSource>
Specifies a connector to an Apache Spark data source.
catalog_source: Option<CatalogSource>
Specifies a data store in the Glue Data Catalog.
redshift_source: Option<RedshiftSource>
Specifies an Amazon Redshift data store.
s3_catalog_source: Option<S3CatalogSource>
Specifies an Amazon S3 data store in the Glue Data Catalog.
s3_csv_source: Option<S3CsvSource>
Specifies a command-separated value (CSV) data store stored in Amazon S3.
s3_json_source: Option<S3JsonSource>
Specifies a JSON data store stored in Amazon S3.
s3_parquet_source: Option<S3ParquetSource>
Specifies an Apache Parquet data store stored in Amazon S3.
relational_catalog_source: Option<RelationalCatalogSource>
Specifies a relational catalog data store in the Glue Data Catalog.
dynamo_db_catalog_source: Option<DynamoDbCatalogSource>
Specifies a DynamoDBC Catalog data store in the Glue Data Catalog.
jdbc_connector_target: Option<JdbcConnectorTarget>
Specifies a data target that writes to Amazon S3 in Apache Parquet columnar storage.
spark_connector_target: Option<SparkConnectorTarget>
Specifies a target that uses an Apache Spark connector.
catalog_target: Option<BasicCatalogTarget>
Specifies a target that uses a Glue Data Catalog table.
redshift_target: Option<RedshiftTarget>
Specifies a target that uses Amazon Redshift.
s3_catalog_target: Option<S3CatalogTarget>
Specifies a data target that writes to Amazon S3 using the Glue Data Catalog.
s3_glue_parquet_target: Option<S3GlueParquetTarget>
Specifies a data target that writes to Amazon S3 in Apache Parquet columnar storage.
s3_direct_target: Option<S3DirectTarget>
Specifies a data target that writes to Amazon S3.
apply_mapping: Option<ApplyMapping>
Specifies a transform that maps data property keys in the data source to data property keys in the data target. You can rename keys, modify the data types for keys, and choose which keys to drop from the dataset.
select_fields: Option<SelectFields>
Specifies a transform that chooses the data property keys that you want to keep.
drop_fields: Option<DropFields>
Specifies a transform that chooses the data property keys that you want to drop.
rename_field: Option<RenameField>
Specifies a transform that renames a single data property key.
spigot: Option<Spigot>
Specifies a transform that writes samples of the data to an Amazon S3 bucket.
join: Option<Join>
Specifies a transform that joins two datasets into one dataset using a comparison phrase on the specified data property keys. You can use inner, outer, left, right, left semi, and left anti joins.
split_fields: Option<SplitFields>
Specifies a transform that splits data property keys into two DynamicFrames
. The output is a collection of DynamicFrames
: one with selected data property keys, and one with the remaining data property keys.
select_from_collection: Option<SelectFromCollection>
Specifies a transform that chooses one DynamicFrame
from a collection of DynamicFrames
. The output is the selected DynamicFrame
fill_missing_values: Option<FillMissingValues>
Specifies a transform that locates records in the dataset that have missing values and adds a new field with a value determined by imputation. The input data set is used to train the machine learning model that determines what the missing value should be.
filter: Option<Filter>
Specifies a transform that splits a dataset into two, based on a filter condition.
custom_code: Option<CustomCode>
Specifies a transform that uses custom code you provide to perform the data transformation. The output is a collection of DynamicFrames.
spark_sql: Option<SparkSql>
Specifies a transform where you enter a SQL query using Spark SQL syntax to transform the data. The output is a single DynamicFrame
.
direct_kinesis_source: Option<DirectKinesisSource>
Specifies a direct Amazon Kinesis data source.
direct_kafka_source: Option<DirectKafkaSource>
Specifies an Apache Kafka data store.
catalog_kinesis_source: Option<CatalogKinesisSource>
Specifies a Kinesis data source in the Glue Data Catalog.
catalog_kafka_source: Option<CatalogKafkaSource>
Specifies an Apache Kafka data store in the Data Catalog.
drop_null_fields: Option<DropNullFields>
Specifies a transform that removes columns from the dataset if all values in the column are 'null'. By default, Glue Studio will recognize null objects, but some values such as empty strings, strings that are "null", -1 integers or other placeholders such as zeros, are not automatically recognized as nulls.
merge: Option<Merge>
Specifies a transform that merges a DynamicFrame
with a staging DynamicFrame
based on the specified primary keys to identify records. Duplicate records (records with the same primary keys) are not de-duplicated.
union: Option<Union>
Specifies a transform that combines the rows from two or more datasets into a single result.
pii_detection: Option<PiiDetection>
Specifies a transform that identifies, removes or masks PII data.
aggregate: Option<Aggregate>
Specifies a transform that groups rows by chosen fields and computes the aggregated value by specified function.
drop_duplicates: Option<DropDuplicates>
Specifies a transform that removes rows of repeating data from a data set.
governed_catalog_target: Option<GovernedCatalogTarget>
Specifies a data target that writes to a goverened catalog.
governed_catalog_source: Option<GovernedCatalogSource>
Specifies a data source in a goverened Data Catalog.
microsoft_sql_server_catalog_source: Option<MicrosoftSqlServerCatalogSource>
Specifies a Microsoft SQL server data source in the Glue Data Catalog.
my_sql_catalog_source: Option<MySqlCatalogSource>
Specifies a MySQL data source in the Glue Data Catalog.
oracle_sql_catalog_source: Option<OracleSqlCatalogSource>
Specifies an Oracle data source in the Glue Data Catalog.
postgre_sql_catalog_source: Option<PostgreSqlCatalogSource>
Specifies a PostgresSQL data source in the Glue Data Catalog.
microsoft_sql_server_catalog_target: Option<MicrosoftSqlServerCatalogTarget>
Specifies a target that uses Microsoft SQL.
my_sql_catalog_target: Option<MySqlCatalogTarget>
Specifies a target that uses MySQL.
oracle_sql_catalog_target: Option<OracleSqlCatalogTarget>
Specifies a target that uses Oracle SQL.
postgre_sql_catalog_target: Option<PostgreSqlCatalogTarget>
Specifies a target that uses Postgres SQL.
dynamic_transform: Option<DynamicTransform>
Specifies a custom visual transform created by a user.
evaluate_data_quality: Option<EvaluateDataQuality>
Specifies your data quality evaluation criteria.
s3_catalog_hudi_source: Option<S3CatalogHudiSource>
Specifies a Hudi data source that is registered in the Glue Data Catalog. The data source must be stored in Amazon S3.
catalog_hudi_source: Option<CatalogHudiSource>
Specifies a Hudi data source that is registered in the Glue Data Catalog.
s3_hudi_source: Option<S3HudiSource>
Specifies a Hudi data source stored in Amazon S3.
s3_hudi_catalog_target: Option<S3HudiCatalogTarget>
Specifies a target that writes to a Hudi data source in the Glue Data Catalog.
s3_hudi_direct_target: Option<S3HudiDirectTarget>
Specifies a target that writes to a Hudi data source in Amazon S3.
direct_jdbc_source: Option<DirectJdbcSource>
Specifies the direct JDBC source connection.
s3_catalog_delta_source: Option<S3CatalogDeltaSource>
Specifies a Delta Lake data source that is registered in the Glue Data Catalog. The data source must be stored in Amazon S3.
catalog_delta_source: Option<CatalogDeltaSource>
Specifies a Delta Lake data source that is registered in the Glue Data Catalog.
s3_delta_source: Option<S3DeltaSource>
Specifies a Delta Lake data source stored in Amazon S3.
s3_delta_catalog_target: Option<S3DeltaCatalogTarget>
Specifies a target that writes to a Delta Lake data source in the Glue Data Catalog.
s3_delta_direct_target: Option<S3DeltaDirectTarget>
Specifies a target that writes to a Delta Lake data source in Amazon S3.
amazon_redshift_source: Option<AmazonRedshiftSource>
Specifies a target that writes to a data source in Amazon Redshift.
amazon_redshift_target: Option<AmazonRedshiftTarget>
Specifies a target that writes to a data target in Amazon Redshift.
evaluate_data_quality_multi_frame: Option<EvaluateDataQualityMultiFrame>
Specifies your data quality evaluation criteria. Allows multiple input data and returns a collection of Dynamic Frames.
recipe: Option<Recipe>
Specifies a Glue DataBrew recipe node.
snowflake_source: Option<SnowflakeSource>
Specifies a Snowflake data source.
snowflake_target: Option<SnowflakeTarget>
Specifies a target that writes to a Snowflake data source.
connector_data_source: Option<ConnectorDataSource>
Specifies a source generated with standard connection options.
connector_data_target: Option<ConnectorDataTarget>
Specifies a target generated with standard connection options.
Implementations§
source§impl CodeGenConfigurationNode
impl CodeGenConfigurationNode
sourcepub fn athena_connector_source(&self) -> Option<&AthenaConnectorSource>
pub fn athena_connector_source(&self) -> Option<&AthenaConnectorSource>
Specifies a connector to an Amazon Athena data source.
sourcepub fn jdbc_connector_source(&self) -> Option<&JdbcConnectorSource>
pub fn jdbc_connector_source(&self) -> Option<&JdbcConnectorSource>
Specifies a connector to a JDBC data source.
sourcepub fn spark_connector_source(&self) -> Option<&SparkConnectorSource>
pub fn spark_connector_source(&self) -> Option<&SparkConnectorSource>
Specifies a connector to an Apache Spark data source.
sourcepub fn catalog_source(&self) -> Option<&CatalogSource>
pub fn catalog_source(&self) -> Option<&CatalogSource>
Specifies a data store in the Glue Data Catalog.
sourcepub fn redshift_source(&self) -> Option<&RedshiftSource>
pub fn redshift_source(&self) -> Option<&RedshiftSource>
Specifies an Amazon Redshift data store.
sourcepub fn s3_catalog_source(&self) -> Option<&S3CatalogSource>
pub fn s3_catalog_source(&self) -> Option<&S3CatalogSource>
Specifies an Amazon S3 data store in the Glue Data Catalog.
sourcepub fn s3_csv_source(&self) -> Option<&S3CsvSource>
pub fn s3_csv_source(&self) -> Option<&S3CsvSource>
Specifies a command-separated value (CSV) data store stored in Amazon S3.
sourcepub fn s3_json_source(&self) -> Option<&S3JsonSource>
pub fn s3_json_source(&self) -> Option<&S3JsonSource>
Specifies a JSON data store stored in Amazon S3.
sourcepub fn s3_parquet_source(&self) -> Option<&S3ParquetSource>
pub fn s3_parquet_source(&self) -> Option<&S3ParquetSource>
Specifies an Apache Parquet data store stored in Amazon S3.
sourcepub fn relational_catalog_source(&self) -> Option<&RelationalCatalogSource>
pub fn relational_catalog_source(&self) -> Option<&RelationalCatalogSource>
Specifies a relational catalog data store in the Glue Data Catalog.
sourcepub fn dynamo_db_catalog_source(&self) -> Option<&DynamoDbCatalogSource>
pub fn dynamo_db_catalog_source(&self) -> Option<&DynamoDbCatalogSource>
Specifies a DynamoDBC Catalog data store in the Glue Data Catalog.
sourcepub fn jdbc_connector_target(&self) -> Option<&JdbcConnectorTarget>
pub fn jdbc_connector_target(&self) -> Option<&JdbcConnectorTarget>
Specifies a data target that writes to Amazon S3 in Apache Parquet columnar storage.
sourcepub fn spark_connector_target(&self) -> Option<&SparkConnectorTarget>
pub fn spark_connector_target(&self) -> Option<&SparkConnectorTarget>
Specifies a target that uses an Apache Spark connector.
sourcepub fn catalog_target(&self) -> Option<&BasicCatalogTarget>
pub fn catalog_target(&self) -> Option<&BasicCatalogTarget>
Specifies a target that uses a Glue Data Catalog table.
sourcepub fn redshift_target(&self) -> Option<&RedshiftTarget>
pub fn redshift_target(&self) -> Option<&RedshiftTarget>
Specifies a target that uses Amazon Redshift.
sourcepub fn s3_catalog_target(&self) -> Option<&S3CatalogTarget>
pub fn s3_catalog_target(&self) -> Option<&S3CatalogTarget>
Specifies a data target that writes to Amazon S3 using the Glue Data Catalog.
sourcepub fn s3_glue_parquet_target(&self) -> Option<&S3GlueParquetTarget>
pub fn s3_glue_parquet_target(&self) -> Option<&S3GlueParquetTarget>
Specifies a data target that writes to Amazon S3 in Apache Parquet columnar storage.
sourcepub fn s3_direct_target(&self) -> Option<&S3DirectTarget>
pub fn s3_direct_target(&self) -> Option<&S3DirectTarget>
Specifies a data target that writes to Amazon S3.
sourcepub fn apply_mapping(&self) -> Option<&ApplyMapping>
pub fn apply_mapping(&self) -> Option<&ApplyMapping>
Specifies a transform that maps data property keys in the data source to data property keys in the data target. You can rename keys, modify the data types for keys, and choose which keys to drop from the dataset.
sourcepub fn select_fields(&self) -> Option<&SelectFields>
pub fn select_fields(&self) -> Option<&SelectFields>
Specifies a transform that chooses the data property keys that you want to keep.
sourcepub fn drop_fields(&self) -> Option<&DropFields>
pub fn drop_fields(&self) -> Option<&DropFields>
Specifies a transform that chooses the data property keys that you want to drop.
sourcepub fn rename_field(&self) -> Option<&RenameField>
pub fn rename_field(&self) -> Option<&RenameField>
Specifies a transform that renames a single data property key.
sourcepub fn spigot(&self) -> Option<&Spigot>
pub fn spigot(&self) -> Option<&Spigot>
Specifies a transform that writes samples of the data to an Amazon S3 bucket.
sourcepub fn join(&self) -> Option<&Join>
pub fn join(&self) -> Option<&Join>
Specifies a transform that joins two datasets into one dataset using a comparison phrase on the specified data property keys. You can use inner, outer, left, right, left semi, and left anti joins.
sourcepub fn split_fields(&self) -> Option<&SplitFields>
pub fn split_fields(&self) -> Option<&SplitFields>
Specifies a transform that splits data property keys into two DynamicFrames
. The output is a collection of DynamicFrames
: one with selected data property keys, and one with the remaining data property keys.
sourcepub fn select_from_collection(&self) -> Option<&SelectFromCollection>
pub fn select_from_collection(&self) -> Option<&SelectFromCollection>
Specifies a transform that chooses one DynamicFrame
from a collection of DynamicFrames
. The output is the selected DynamicFrame
sourcepub fn fill_missing_values(&self) -> Option<&FillMissingValues>
pub fn fill_missing_values(&self) -> Option<&FillMissingValues>
Specifies a transform that locates records in the dataset that have missing values and adds a new field with a value determined by imputation. The input data set is used to train the machine learning model that determines what the missing value should be.
sourcepub fn filter(&self) -> Option<&Filter>
pub fn filter(&self) -> Option<&Filter>
Specifies a transform that splits a dataset into two, based on a filter condition.
sourcepub fn custom_code(&self) -> Option<&CustomCode>
pub fn custom_code(&self) -> Option<&CustomCode>
Specifies a transform that uses custom code you provide to perform the data transformation. The output is a collection of DynamicFrames.
sourcepub fn spark_sql(&self) -> Option<&SparkSql>
pub fn spark_sql(&self) -> Option<&SparkSql>
Specifies a transform where you enter a SQL query using Spark SQL syntax to transform the data. The output is a single DynamicFrame
.
sourcepub fn direct_kinesis_source(&self) -> Option<&DirectKinesisSource>
pub fn direct_kinesis_source(&self) -> Option<&DirectKinesisSource>
Specifies a direct Amazon Kinesis data source.
sourcepub fn direct_kafka_source(&self) -> Option<&DirectKafkaSource>
pub fn direct_kafka_source(&self) -> Option<&DirectKafkaSource>
Specifies an Apache Kafka data store.
sourcepub fn catalog_kinesis_source(&self) -> Option<&CatalogKinesisSource>
pub fn catalog_kinesis_source(&self) -> Option<&CatalogKinesisSource>
Specifies a Kinesis data source in the Glue Data Catalog.
sourcepub fn catalog_kafka_source(&self) -> Option<&CatalogKafkaSource>
pub fn catalog_kafka_source(&self) -> Option<&CatalogKafkaSource>
Specifies an Apache Kafka data store in the Data Catalog.
sourcepub fn drop_null_fields(&self) -> Option<&DropNullFields>
pub fn drop_null_fields(&self) -> Option<&DropNullFields>
Specifies a transform that removes columns from the dataset if all values in the column are 'null'. By default, Glue Studio will recognize null objects, but some values such as empty strings, strings that are "null", -1 integers or other placeholders such as zeros, are not automatically recognized as nulls.
sourcepub fn merge(&self) -> Option<&Merge>
pub fn merge(&self) -> Option<&Merge>
Specifies a transform that merges a DynamicFrame
with a staging DynamicFrame
based on the specified primary keys to identify records. Duplicate records (records with the same primary keys) are not de-duplicated.
sourcepub fn union(&self) -> Option<&Union>
pub fn union(&self) -> Option<&Union>
Specifies a transform that combines the rows from two or more datasets into a single result.
sourcepub fn pii_detection(&self) -> Option<&PiiDetection>
pub fn pii_detection(&self) -> Option<&PiiDetection>
Specifies a transform that identifies, removes or masks PII data.
sourcepub fn aggregate(&self) -> Option<&Aggregate>
pub fn aggregate(&self) -> Option<&Aggregate>
Specifies a transform that groups rows by chosen fields and computes the aggregated value by specified function.
sourcepub fn drop_duplicates(&self) -> Option<&DropDuplicates>
pub fn drop_duplicates(&self) -> Option<&DropDuplicates>
Specifies a transform that removes rows of repeating data from a data set.
sourcepub fn governed_catalog_target(&self) -> Option<&GovernedCatalogTarget>
pub fn governed_catalog_target(&self) -> Option<&GovernedCatalogTarget>
Specifies a data target that writes to a goverened catalog.
sourcepub fn governed_catalog_source(&self) -> Option<&GovernedCatalogSource>
pub fn governed_catalog_source(&self) -> Option<&GovernedCatalogSource>
Specifies a data source in a goverened Data Catalog.
sourcepub fn microsoft_sql_server_catalog_source(
&self,
) -> Option<&MicrosoftSqlServerCatalogSource>
pub fn microsoft_sql_server_catalog_source( &self, ) -> Option<&MicrosoftSqlServerCatalogSource>
Specifies a Microsoft SQL server data source in the Glue Data Catalog.
sourcepub fn my_sql_catalog_source(&self) -> Option<&MySqlCatalogSource>
pub fn my_sql_catalog_source(&self) -> Option<&MySqlCatalogSource>
Specifies a MySQL data source in the Glue Data Catalog.
sourcepub fn oracle_sql_catalog_source(&self) -> Option<&OracleSqlCatalogSource>
pub fn oracle_sql_catalog_source(&self) -> Option<&OracleSqlCatalogSource>
Specifies an Oracle data source in the Glue Data Catalog.
sourcepub fn postgre_sql_catalog_source(&self) -> Option<&PostgreSqlCatalogSource>
pub fn postgre_sql_catalog_source(&self) -> Option<&PostgreSqlCatalogSource>
Specifies a PostgresSQL data source in the Glue Data Catalog.
sourcepub fn microsoft_sql_server_catalog_target(
&self,
) -> Option<&MicrosoftSqlServerCatalogTarget>
pub fn microsoft_sql_server_catalog_target( &self, ) -> Option<&MicrosoftSqlServerCatalogTarget>
Specifies a target that uses Microsoft SQL.
sourcepub fn my_sql_catalog_target(&self) -> Option<&MySqlCatalogTarget>
pub fn my_sql_catalog_target(&self) -> Option<&MySqlCatalogTarget>
Specifies a target that uses MySQL.
sourcepub fn oracle_sql_catalog_target(&self) -> Option<&OracleSqlCatalogTarget>
pub fn oracle_sql_catalog_target(&self) -> Option<&OracleSqlCatalogTarget>
Specifies a target that uses Oracle SQL.
sourcepub fn postgre_sql_catalog_target(&self) -> Option<&PostgreSqlCatalogTarget>
pub fn postgre_sql_catalog_target(&self) -> Option<&PostgreSqlCatalogTarget>
Specifies a target that uses Postgres SQL.
sourcepub fn dynamic_transform(&self) -> Option<&DynamicTransform>
pub fn dynamic_transform(&self) -> Option<&DynamicTransform>
Specifies a custom visual transform created by a user.
sourcepub fn evaluate_data_quality(&self) -> Option<&EvaluateDataQuality>
pub fn evaluate_data_quality(&self) -> Option<&EvaluateDataQuality>
Specifies your data quality evaluation criteria.
sourcepub fn s3_catalog_hudi_source(&self) -> Option<&S3CatalogHudiSource>
pub fn s3_catalog_hudi_source(&self) -> Option<&S3CatalogHudiSource>
Specifies a Hudi data source that is registered in the Glue Data Catalog. The data source must be stored in Amazon S3.
sourcepub fn catalog_hudi_source(&self) -> Option<&CatalogHudiSource>
pub fn catalog_hudi_source(&self) -> Option<&CatalogHudiSource>
Specifies a Hudi data source that is registered in the Glue Data Catalog.
sourcepub fn s3_hudi_source(&self) -> Option<&S3HudiSource>
pub fn s3_hudi_source(&self) -> Option<&S3HudiSource>
Specifies a Hudi data source stored in Amazon S3.
sourcepub fn s3_hudi_catalog_target(&self) -> Option<&S3HudiCatalogTarget>
pub fn s3_hudi_catalog_target(&self) -> Option<&S3HudiCatalogTarget>
Specifies a target that writes to a Hudi data source in the Glue Data Catalog.
sourcepub fn s3_hudi_direct_target(&self) -> Option<&S3HudiDirectTarget>
pub fn s3_hudi_direct_target(&self) -> Option<&S3HudiDirectTarget>
Specifies a target that writes to a Hudi data source in Amazon S3.
sourcepub fn direct_jdbc_source(&self) -> Option<&DirectJdbcSource>
pub fn direct_jdbc_source(&self) -> Option<&DirectJdbcSource>
Specifies the direct JDBC source connection.
sourcepub fn s3_catalog_delta_source(&self) -> Option<&S3CatalogDeltaSource>
pub fn s3_catalog_delta_source(&self) -> Option<&S3CatalogDeltaSource>
Specifies a Delta Lake data source that is registered in the Glue Data Catalog. The data source must be stored in Amazon S3.
sourcepub fn catalog_delta_source(&self) -> Option<&CatalogDeltaSource>
pub fn catalog_delta_source(&self) -> Option<&CatalogDeltaSource>
Specifies a Delta Lake data source that is registered in the Glue Data Catalog.
sourcepub fn s3_delta_source(&self) -> Option<&S3DeltaSource>
pub fn s3_delta_source(&self) -> Option<&S3DeltaSource>
Specifies a Delta Lake data source stored in Amazon S3.
sourcepub fn s3_delta_catalog_target(&self) -> Option<&S3DeltaCatalogTarget>
pub fn s3_delta_catalog_target(&self) -> Option<&S3DeltaCatalogTarget>
Specifies a target that writes to a Delta Lake data source in the Glue Data Catalog.
sourcepub fn s3_delta_direct_target(&self) -> Option<&S3DeltaDirectTarget>
pub fn s3_delta_direct_target(&self) -> Option<&S3DeltaDirectTarget>
Specifies a target that writes to a Delta Lake data source in Amazon S3.
sourcepub fn amazon_redshift_source(&self) -> Option<&AmazonRedshiftSource>
pub fn amazon_redshift_source(&self) -> Option<&AmazonRedshiftSource>
Specifies a target that writes to a data source in Amazon Redshift.
sourcepub fn amazon_redshift_target(&self) -> Option<&AmazonRedshiftTarget>
pub fn amazon_redshift_target(&self) -> Option<&AmazonRedshiftTarget>
Specifies a target that writes to a data target in Amazon Redshift.
sourcepub fn evaluate_data_quality_multi_frame(
&self,
) -> Option<&EvaluateDataQualityMultiFrame>
pub fn evaluate_data_quality_multi_frame( &self, ) -> Option<&EvaluateDataQualityMultiFrame>
Specifies your data quality evaluation criteria. Allows multiple input data and returns a collection of Dynamic Frames.
sourcepub fn snowflake_source(&self) -> Option<&SnowflakeSource>
pub fn snowflake_source(&self) -> Option<&SnowflakeSource>
Specifies a Snowflake data source.
sourcepub fn snowflake_target(&self) -> Option<&SnowflakeTarget>
pub fn snowflake_target(&self) -> Option<&SnowflakeTarget>
Specifies a target that writes to a Snowflake data source.
sourcepub fn connector_data_source(&self) -> Option<&ConnectorDataSource>
pub fn connector_data_source(&self) -> Option<&ConnectorDataSource>
Specifies a source generated with standard connection options.
sourcepub fn connector_data_target(&self) -> Option<&ConnectorDataTarget>
pub fn connector_data_target(&self) -> Option<&ConnectorDataTarget>
Specifies a target generated with standard connection options.
source§impl CodeGenConfigurationNode
impl CodeGenConfigurationNode
sourcepub fn builder() -> CodeGenConfigurationNodeBuilder
pub fn builder() -> CodeGenConfigurationNodeBuilder
Creates a new builder-style object to manufacture CodeGenConfigurationNode
.
Trait Implementations§
source§impl Clone for CodeGenConfigurationNode
impl Clone for CodeGenConfigurationNode
source§fn clone(&self) -> CodeGenConfigurationNode
fn clone(&self) -> CodeGenConfigurationNode
1.0.0 · source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source
. Read moresource§impl Debug for CodeGenConfigurationNode
impl Debug for CodeGenConfigurationNode
source§impl PartialEq for CodeGenConfigurationNode
impl PartialEq for CodeGenConfigurationNode
source§fn eq(&self, other: &CodeGenConfigurationNode) -> bool
fn eq(&self, other: &CodeGenConfigurationNode) -> bool
self
and other
values to be equal, and is used
by ==
.impl StructuralPartialEq for CodeGenConfigurationNode
Auto Trait Implementations§
impl Freeze for CodeGenConfigurationNode
impl RefUnwindSafe for CodeGenConfigurationNode
impl Send for CodeGenConfigurationNode
impl Sync for CodeGenConfigurationNode
impl Unpin for CodeGenConfigurationNode
impl UnwindSafe for CodeGenConfigurationNode
Blanket Implementations§
source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
source§impl<T> Instrument for T
impl<T> Instrument for T
source§fn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
source§fn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
source§impl<T> IntoEither for T
impl<T> IntoEither for T
source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
self
into a Left
variant of Either<Self, Self>
if into_left
is true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moresource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
self
into a Left
variant of Either<Self, Self>
if into_left(&self)
returns true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read more