#[non_exhaustive]
pub struct KafkaStreamingSourceOptionsBuilder { /* private fields */ }
Expand description

Implementations§

source§

impl KafkaStreamingSourceOptionsBuilder

source

pub fn bootstrap_servers(self, input: impl Into<String>) -> Self

A list of bootstrap server URLs, for example, as b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094. This option must be specified in the API call or defined in the table metadata in the Data Catalog.

source

pub fn set_bootstrap_servers(self, input: Option<String>) -> Self

A list of bootstrap server URLs, for example, as b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094. This option must be specified in the API call or defined in the table metadata in the Data Catalog.

source

pub fn get_bootstrap_servers(&self) -> &Option<String>

A list of bootstrap server URLs, for example, as b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094. This option must be specified in the API call or defined in the table metadata in the Data Catalog.

source

pub fn security_protocol(self, input: impl Into<String>) -> Self

The protocol used to communicate with brokers. The possible values are "SSL" or "PLAINTEXT".

source

pub fn set_security_protocol(self, input: Option<String>) -> Self

The protocol used to communicate with brokers. The possible values are "SSL" or "PLAINTEXT".

source

pub fn get_security_protocol(&self) -> &Option<String>

The protocol used to communicate with brokers. The possible values are "SSL" or "PLAINTEXT".

source

pub fn connection_name(self, input: impl Into<String>) -> Self

The name of the connection.

source

pub fn set_connection_name(self, input: Option<String>) -> Self

The name of the connection.

source

pub fn get_connection_name(&self) -> &Option<String>

The name of the connection.

source

pub fn topic_name(self, input: impl Into<String>) -> Self

The topic name as specified in Apache Kafka. You must specify at least one of "topicName", "assign" or "subscribePattern".

source

pub fn set_topic_name(self, input: Option<String>) -> Self

The topic name as specified in Apache Kafka. You must specify at least one of "topicName", "assign" or "subscribePattern".

source

pub fn get_topic_name(&self) -> &Option<String>

The topic name as specified in Apache Kafka. You must specify at least one of "topicName", "assign" or "subscribePattern".

source

pub fn assign(self, input: impl Into<String>) -> Self

The specific TopicPartitions to consume. You must specify at least one of "topicName", "assign" or "subscribePattern".

source

pub fn set_assign(self, input: Option<String>) -> Self

The specific TopicPartitions to consume. You must specify at least one of "topicName", "assign" or "subscribePattern".

source

pub fn get_assign(&self) -> &Option<String>

The specific TopicPartitions to consume. You must specify at least one of "topicName", "assign" or "subscribePattern".

source

pub fn subscribe_pattern(self, input: impl Into<String>) -> Self

A Java regex string that identifies the topic list to subscribe to. You must specify at least one of "topicName", "assign" or "subscribePattern".

source

pub fn set_subscribe_pattern(self, input: Option<String>) -> Self

A Java regex string that identifies the topic list to subscribe to. You must specify at least one of "topicName", "assign" or "subscribePattern".

source

pub fn get_subscribe_pattern(&self) -> &Option<String>

A Java regex string that identifies the topic list to subscribe to. You must specify at least one of "topicName", "assign" or "subscribePattern".

source

pub fn classification(self, input: impl Into<String>) -> Self

An optional classification.

source

pub fn set_classification(self, input: Option<String>) -> Self

An optional classification.

source

pub fn get_classification(&self) -> &Option<String>

An optional classification.

source

pub fn delimiter(self, input: impl Into<String>) -> Self

Specifies the delimiter character.

source

pub fn set_delimiter(self, input: Option<String>) -> Self

Specifies the delimiter character.

source

pub fn get_delimiter(&self) -> &Option<String>

Specifies the delimiter character.

source

pub fn starting_offsets(self, input: impl Into<String>) -> Self

The starting position in the Kafka topic to read data from. The possible values are "earliest" or "latest". The default value is "latest".

source

pub fn set_starting_offsets(self, input: Option<String>) -> Self

The starting position in the Kafka topic to read data from. The possible values are "earliest" or "latest". The default value is "latest".

source

pub fn get_starting_offsets(&self) -> &Option<String>

The starting position in the Kafka topic to read data from. The possible values are "earliest" or "latest". The default value is "latest".

source

pub fn ending_offsets(self, input: impl Into<String>) -> Self

The end point when a batch query is ended. Possible values are either "latest" or a JSON string that specifies an ending offset for each TopicPartition.

source

pub fn set_ending_offsets(self, input: Option<String>) -> Self

The end point when a batch query is ended. Possible values are either "latest" or a JSON string that specifies an ending offset for each TopicPartition.

source

pub fn get_ending_offsets(&self) -> &Option<String>

The end point when a batch query is ended. Possible values are either "latest" or a JSON string that specifies an ending offset for each TopicPartition.

source

pub fn poll_timeout_ms(self, input: i64) -> Self

The timeout in milliseconds to poll data from Kafka in Spark job executors. The default value is 512.

source

pub fn set_poll_timeout_ms(self, input: Option<i64>) -> Self

The timeout in milliseconds to poll data from Kafka in Spark job executors. The default value is 512.

source

pub fn get_poll_timeout_ms(&self) -> &Option<i64>

The timeout in milliseconds to poll data from Kafka in Spark job executors. The default value is 512.

source

pub fn num_retries(self, input: i32) -> Self

The number of times to retry before failing to fetch Kafka offsets. The default value is 3.

source

pub fn set_num_retries(self, input: Option<i32>) -> Self

The number of times to retry before failing to fetch Kafka offsets. The default value is 3.

source

pub fn get_num_retries(&self) -> &Option<i32>

The number of times to retry before failing to fetch Kafka offsets. The default value is 3.

source

pub fn retry_interval_ms(self, input: i64) -> Self

The time in milliseconds to wait before retrying to fetch Kafka offsets. The default value is 10.

source

pub fn set_retry_interval_ms(self, input: Option<i64>) -> Self

The time in milliseconds to wait before retrying to fetch Kafka offsets. The default value is 10.

source

pub fn get_retry_interval_ms(&self) -> &Option<i64>

The time in milliseconds to wait before retrying to fetch Kafka offsets. The default value is 10.

source

pub fn max_offsets_per_trigger(self, input: i64) -> Self

The rate limit on the maximum number of offsets that are processed per trigger interval. The specified total number of offsets is proportionally split across topicPartitions of different volumes. The default value is null, which means that the consumer reads all offsets until the known latest offset.

source

pub fn set_max_offsets_per_trigger(self, input: Option<i64>) -> Self

The rate limit on the maximum number of offsets that are processed per trigger interval. The specified total number of offsets is proportionally split across topicPartitions of different volumes. The default value is null, which means that the consumer reads all offsets until the known latest offset.

source

pub fn get_max_offsets_per_trigger(&self) -> &Option<i64>

The rate limit on the maximum number of offsets that are processed per trigger interval. The specified total number of offsets is proportionally split across topicPartitions of different volumes. The default value is null, which means that the consumer reads all offsets until the known latest offset.

source

pub fn min_partitions(self, input: i32) -> Self

The desired minimum number of partitions to read from Kafka. The default value is null, which means that the number of spark partitions is equal to the number of Kafka partitions.

source

pub fn set_min_partitions(self, input: Option<i32>) -> Self

The desired minimum number of partitions to read from Kafka. The default value is null, which means that the number of spark partitions is equal to the number of Kafka partitions.

source

pub fn get_min_partitions(&self) -> &Option<i32>

The desired minimum number of partitions to read from Kafka. The default value is null, which means that the number of spark partitions is equal to the number of Kafka partitions.

source

pub fn include_headers(self, input: bool) -> Self

Whether to include the Kafka headers. When the option is set to "true", the data output will contain an additional column named "glue_streaming_kafka_headers" with type Array\[Struct(key: String, value: String)\]. The default value is "false". This option is available in Glue version 3.0 or later only.

source

pub fn set_include_headers(self, input: Option<bool>) -> Self

Whether to include the Kafka headers. When the option is set to "true", the data output will contain an additional column named "glue_streaming_kafka_headers" with type Array\[Struct(key: String, value: String)\]. The default value is "false". This option is available in Glue version 3.0 or later only.

source

pub fn get_include_headers(&self) -> &Option<bool>

Whether to include the Kafka headers. When the option is set to "true", the data output will contain an additional column named "glue_streaming_kafka_headers" with type Array\[Struct(key: String, value: String)\]. The default value is "false". This option is available in Glue version 3.0 or later only.

source

pub fn add_record_timestamp(self, input: impl Into<String>) -> Self

When this option is set to 'true', the data output will contain an additional column named "__src_timestamp" that indicates the time when the corresponding record received by the topic. The default value is 'false'. This option is supported in Glue version 4.0 or later.

source

pub fn set_add_record_timestamp(self, input: Option<String>) -> Self

When this option is set to 'true', the data output will contain an additional column named "__src_timestamp" that indicates the time when the corresponding record received by the topic. The default value is 'false'. This option is supported in Glue version 4.0 or later.

source

pub fn get_add_record_timestamp(&self) -> &Option<String>

When this option is set to 'true', the data output will contain an additional column named "__src_timestamp" that indicates the time when the corresponding record received by the topic. The default value is 'false'. This option is supported in Glue version 4.0 or later.

source

pub fn emit_consumer_lag_metrics(self, input: impl Into<String>) -> Self

When this option is set to 'true', for each batch, it will emit the metrics for the duration between the oldest record received by the topic and the time it arrives in Glue to CloudWatch. The metric's name is "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This option is supported in Glue version 4.0 or later.

source

pub fn set_emit_consumer_lag_metrics(self, input: Option<String>) -> Self

When this option is set to 'true', for each batch, it will emit the metrics for the duration between the oldest record received by the topic and the time it arrives in Glue to CloudWatch. The metric's name is "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This option is supported in Glue version 4.0 or later.

source

pub fn get_emit_consumer_lag_metrics(&self) -> &Option<String>

When this option is set to 'true', for each batch, it will emit the metrics for the duration between the oldest record received by the topic and the time it arrives in Glue to CloudWatch. The metric's name is "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This option is supported in Glue version 4.0 or later.

source

pub fn starting_timestamp(self, input: DateTime) -> Self

The timestamp of the record in the Kafka topic to start reading data from. The possible values are a timestamp string in UTC format of the pattern yyyy-mm-ddTHH:MM:SSZ (where Z represents a UTC timezone offset with a +/-. For example: "2023-04-04T08:00:00+08:00").

Only one of StartingTimestamp or StartingOffsets must be set.

source

pub fn set_starting_timestamp(self, input: Option<DateTime>) -> Self

The timestamp of the record in the Kafka topic to start reading data from. The possible values are a timestamp string in UTC format of the pattern yyyy-mm-ddTHH:MM:SSZ (where Z represents a UTC timezone offset with a +/-. For example: "2023-04-04T08:00:00+08:00").

Only one of StartingTimestamp or StartingOffsets must be set.

source

pub fn get_starting_timestamp(&self) -> &Option<DateTime>

The timestamp of the record in the Kafka topic to start reading data from. The possible values are a timestamp string in UTC format of the pattern yyyy-mm-ddTHH:MM:SSZ (where Z represents a UTC timezone offset with a +/-. For example: "2023-04-04T08:00:00+08:00").

Only one of StartingTimestamp or StartingOffsets must be set.

source

pub fn build(self) -> KafkaStreamingSourceOptions

Consumes the builder and constructs a KafkaStreamingSourceOptions.

Trait Implementations§

source§

impl Clone for KafkaStreamingSourceOptionsBuilder

source§

fn clone(&self) -> KafkaStreamingSourceOptionsBuilder

Returns a copy of the value. Read more
1.0.0 · source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
source§

impl Debug for KafkaStreamingSourceOptionsBuilder

source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
source§

impl Default for KafkaStreamingSourceOptionsBuilder

source§

fn default() -> KafkaStreamingSourceOptionsBuilder

Returns the “default value” for a type. Read more
source§

impl PartialEq for KafkaStreamingSourceOptionsBuilder

source§

fn eq(&self, other: &KafkaStreamingSourceOptionsBuilder) -> bool

This method tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

This method tests for !=. The default implementation is almost always sufficient, and should not be overridden without very good reason.
source§

impl StructuralPartialEq for KafkaStreamingSourceOptionsBuilder

Auto Trait Implementations§

Blanket Implementations§

source§

impl<T> Any for T
where T: 'static + ?Sized,

source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
source§

impl<T> Borrow<T> for T
where T: ?Sized,

source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
source§

impl<T> From<T> for T

source§

fn from(t: T) -> T

Returns the argument unchanged.

source§

impl<T> Instrument for T

source§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided Span, returning an Instrumented wrapper. Read more
source§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
source§

impl<T, U> Into<U> for T
where U: From<T>,

source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

source§

impl<T> IntoEither for T

source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
source§

impl<Unshared, Shared> IntoShared<Shared> for Unshared
where Shared: FromUnshared<Unshared>,

source§

fn into_shared(self) -> Shared

Creates a shared type from an unshared type.
source§

impl<T> Same for T

§

type Output = T

Should always be Self
source§

impl<T> ToOwned for T
where T: Clone,

§

type Owned = T

The resulting type after obtaining ownership.
source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

§

type Error = Infallible

The type returned in the event of a conversion error.
source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
source§

impl<T> WithSubscriber for T

source§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>
where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a WithDispatch wrapper. Read more
source§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a WithDispatch wrapper. Read more