#[non_exhaustive]
pub struct AmazonRedshiftNodeData {
Show 26 fields pub access_type: Option<String>, pub source_type: Option<String>, pub connection: Option<Option>, pub schema: Option<Option>, pub table: Option<Option>, pub catalog_database: Option<Option>, pub catalog_table: Option<Option>, pub catalog_redshift_schema: Option<String>, pub catalog_redshift_table: Option<String>, pub temp_dir: Option<String>, pub iam_role: Option<Option>, pub advanced_options: Option<Vec<AmazonRedshiftAdvancedOption>>, pub sample_query: Option<String>, pub pre_action: Option<String>, pub post_action: Option<String>, pub action: Option<String>, pub table_prefix: Option<String>, pub upsert: bool, pub merge_action: Option<String>, pub merge_when_matched: Option<String>, pub merge_when_not_matched: Option<String>, pub merge_clause: Option<String>, pub crawler_connection: Option<String>, pub table_schema: Option<Vec<Option>>, pub staging_table: Option<String>, pub selected_columns: Option<Vec<Option>>,
}
Expand description

Specifies an Amazon Redshift node.

Fields (Non-exhaustive)§

This struct is marked as non-exhaustive
Non-exhaustive structs could have additional fields added in future. Therefore, non-exhaustive structs cannot be constructed in external crates using the traditional Struct { .. } syntax; cannot be matched against without a wildcard ..; and struct update syntax will not work.
§access_type: Option<String>

The access type for the Redshift connection. Can be a direct connection or catalog connections.

§source_type: Option<String>

The source type to specify whether a specific table is the source or a custom query.

§connection: Option<Option>

The Glue connection to the Redshift cluster.

§schema: Option<Option>

The Redshift schema name when working with a direct connection.

§table: Option<Option>

The Redshift table name when working with a direct connection.

§catalog_database: Option<Option>

The name of the Glue Data Catalog database when working with a data catalog.

§catalog_table: Option<Option>

The Glue Data Catalog table name when working with a data catalog.

§catalog_redshift_schema: Option<String>

The Redshift schema name when working with a data catalog.

§catalog_redshift_table: Option<String>

The database table to read from.

§temp_dir: Option<String>

The Amazon S3 path where temporary data can be staged when copying out of the database.

§iam_role: Option<Option>

Optional. The role name use when connection to S3. The IAM role ill default to the role on the job when left blank.

§advanced_options: Option<Vec<AmazonRedshiftAdvancedOption>>

Optional values when connecting to the Redshift cluster.

§sample_query: Option<String>

The SQL used to fetch the data from a Redshift sources when the SourceType is 'query'.

§pre_action: Option<String>

The SQL used before a MERGE or APPEND with upsert is run.

§post_action: Option<String>

The SQL used before a MERGE or APPEND with upsert is run.

§action: Option<String>

Specifies how writing to a Redshift cluser will occur.

§table_prefix: Option<String>

Specifies the prefix to a table.

§upsert: bool

The action used on Redshift sinks when doing an APPEND.

§merge_action: Option<String>

The action used when to detemine how a MERGE in a Redshift sink will be handled.

§merge_when_matched: Option<String>

The action used when to detemine how a MERGE in a Redshift sink will be handled when an existing record matches a new record.

§merge_when_not_matched: Option<String>

The action used when to detemine how a MERGE in a Redshift sink will be handled when an existing record doesn't match a new record.

§merge_clause: Option<String>

The SQL used in a custom merge to deal with matching records.

§crawler_connection: Option<String>

Specifies the name of the connection that is associated with the catalog table used.

§table_schema: Option<Vec<Option>>

The array of schema output for a given node.

§staging_table: Option<String>

The name of the temporary staging table that is used when doing a MERGE or APPEND with upsert.

§selected_columns: Option<Vec<Option>>

The list of column names used to determine a matching record when doing a MERGE or APPEND with upsert.

Implementations§

source§

impl AmazonRedshiftNodeData

source

pub fn access_type(&self) -> Option<&str>

The access type for the Redshift connection. Can be a direct connection or catalog connections.

source

pub fn source_type(&self) -> Option<&str>

The source type to specify whether a specific table is the source or a custom query.

source

pub fn connection(&self) -> Option<&Option>

The Glue connection to the Redshift cluster.

source

pub fn schema(&self) -> Option<&Option>

The Redshift schema name when working with a direct connection.

source

pub fn table(&self) -> Option<&Option>

The Redshift table name when working with a direct connection.

source

pub fn catalog_database(&self) -> Option<&Option>

The name of the Glue Data Catalog database when working with a data catalog.

source

pub fn catalog_table(&self) -> Option<&Option>

The Glue Data Catalog table name when working with a data catalog.

source

pub fn catalog_redshift_schema(&self) -> Option<&str>

The Redshift schema name when working with a data catalog.

source

pub fn catalog_redshift_table(&self) -> Option<&str>

The database table to read from.

source

pub fn temp_dir(&self) -> Option<&str>

The Amazon S3 path where temporary data can be staged when copying out of the database.

source

pub fn iam_role(&self) -> Option<&Option>

Optional. The role name use when connection to S3. The IAM role ill default to the role on the job when left blank.

source

pub fn advanced_options(&self) -> &[AmazonRedshiftAdvancedOption]

Optional values when connecting to the Redshift cluster.

If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use .advanced_options.is_none().

source

pub fn sample_query(&self) -> Option<&str>

The SQL used to fetch the data from a Redshift sources when the SourceType is 'query'.

source

pub fn pre_action(&self) -> Option<&str>

The SQL used before a MERGE or APPEND with upsert is run.

source

pub fn post_action(&self) -> Option<&str>

The SQL used before a MERGE or APPEND with upsert is run.

source

pub fn action(&self) -> Option<&str>

Specifies how writing to a Redshift cluser will occur.

source

pub fn table_prefix(&self) -> Option<&str>

Specifies the prefix to a table.

source

pub fn upsert(&self) -> bool

The action used on Redshift sinks when doing an APPEND.

source

pub fn merge_action(&self) -> Option<&str>

The action used when to detemine how a MERGE in a Redshift sink will be handled.

source

pub fn merge_when_matched(&self) -> Option<&str>

The action used when to detemine how a MERGE in a Redshift sink will be handled when an existing record matches a new record.

source

pub fn merge_when_not_matched(&self) -> Option<&str>

The action used when to detemine how a MERGE in a Redshift sink will be handled when an existing record doesn't match a new record.

source

pub fn merge_clause(&self) -> Option<&str>

The SQL used in a custom merge to deal with matching records.

source

pub fn crawler_connection(&self) -> Option<&str>

Specifies the name of the connection that is associated with the catalog table used.

source

pub fn table_schema(&self) -> &[Option]

The array of schema output for a given node.

If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use .table_schema.is_none().

source

pub fn staging_table(&self) -> Option<&str>

The name of the temporary staging table that is used when doing a MERGE or APPEND with upsert.

source

pub fn selected_columns(&self) -> &[Option]

The list of column names used to determine a matching record when doing a MERGE or APPEND with upsert.

If no value was sent for this field, a default will be set. If you want to determine if no value was sent, use .selected_columns.is_none().

source§

impl AmazonRedshiftNodeData

source

pub fn builder() -> AmazonRedshiftNodeDataBuilder

Creates a new builder-style object to manufacture AmazonRedshiftNodeData.

Trait Implementations§

source§

impl Clone for AmazonRedshiftNodeData

source§

fn clone(&self) -> AmazonRedshiftNodeData

Returns a copy of the value. Read more
1.0.0 · source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
source§

impl Debug for AmazonRedshiftNodeData

source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
source§

impl PartialEq for AmazonRedshiftNodeData

source§

fn eq(&self, other: &AmazonRedshiftNodeData) -> bool

This method tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

This method tests for !=. The default implementation is almost always sufficient, and should not be overridden without very good reason.
source§

impl StructuralPartialEq for AmazonRedshiftNodeData

Auto Trait Implementations§

Blanket Implementations§

source§

impl<T> Any for Twhere T: 'static + ?Sized,

source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
source§

impl<T> Borrow<T> for Twhere T: ?Sized,

source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
source§

impl<T> BorrowMut<T> for Twhere T: ?Sized,

source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
source§

impl<T> From<T> for T

source§

fn from(t: T) -> T

Returns the argument unchanged.

source§

impl<T> Instrument for T

source§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided Span, returning an Instrumented wrapper. Read more
source§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
source§

impl<T, U> Into<U> for Twhere U: From<T>,

source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

source§

impl<Unshared, Shared> IntoShared<Shared> for Unsharedwhere Shared: FromUnshared<Unshared>,

source§

fn into_shared(self) -> Shared

Creates a shared type from an unshared type.
source§

impl<T> Same for T

§

type Output = T

Should always be Self
source§

impl<T> ToOwned for Twhere T: Clone,

§

type Owned = T

The resulting type after obtaining ownership.
source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
source§

impl<T, U> TryFrom<U> for Twhere U: Into<T>,

§

type Error = Infallible

The type returned in the event of a conversion error.
source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
source§

impl<T, U> TryInto<U> for Twhere U: TryFrom<T>,

§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
source§

impl<T> WithSubscriber for T

source§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a WithDispatch wrapper. Read more
source§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a WithDispatch wrapper. Read more