[][src]Trait rdkafka::consumer::Consumer

pub trait Consumer<C: ConsumerContext = DefaultConsumerContext> {
    fn get_base_consumer(&self) -> &BaseConsumer<C>;

    fn subscribe(&self, topics: &[&str]) -> KafkaResult<()> { ... }
fn unsubscribe(&self) { ... }
fn assign(&self, assignment: &TopicPartitionList) -> KafkaResult<()> { ... }
fn seek<T: Into<Timeout>>(
        &self,
        topic: &str,
        partition: i32,
        offset: Offset,
        timeout: T
    ) -> KafkaResult<()> { ... }
fn commit(
        &self,
        topic_partition_list: &TopicPartitionList,
        mode: CommitMode
    ) -> KafkaResult<()> { ... }
fn commit_consumer_state(&self, mode: CommitMode) -> KafkaResult<()> { ... }
fn commit_message(
        &self,
        message: &BorrowedMessage,
        mode: CommitMode
    ) -> KafkaResult<()> { ... }
fn store_offset(&self, message: &BorrowedMessage) -> KafkaResult<()> { ... }
fn store_offsets(&self, tpl: &TopicPartitionList) -> KafkaResult<()> { ... }
fn subscription(&self) -> KafkaResult<TopicPartitionList> { ... }
fn assignment(&self) -> KafkaResult<TopicPartitionList> { ... }
fn committed<T>(&self, timeout: T) -> KafkaResult<TopicPartitionList>
    where
        T: Into<Timeout>,
        Self: Sized
, { ... }
fn committed_offsets<T>(
        &self,
        tpl: TopicPartitionList,
        timeout: T
    ) -> KafkaResult<TopicPartitionList>
    where
        T: Into<Timeout>
, { ... }
fn offsets_for_timestamp<T>(
        &self,
        timestamp: i64,
        timeout: T
    ) -> KafkaResult<TopicPartitionList>
    where
        T: Into<Timeout>,
        Self: Sized
, { ... }
fn offsets_for_times<T>(
        &self,
        timestamps: TopicPartitionList,
        timeout: T
    ) -> KafkaResult<TopicPartitionList>
    where
        T: Into<Timeout>,
        Self: Sized
, { ... }
fn position(&self) -> KafkaResult<TopicPartitionList> { ... }
fn fetch_metadata<T>(
        &self,
        topic: Option<&str>,
        timeout: T
    ) -> KafkaResult<Metadata>
    where
        T: Into<Timeout>,
        Self: Sized
, { ... }
fn fetch_watermarks<T>(
        &self,
        topic: &str,
        partition: i32,
        timeout: T
    ) -> KafkaResult<(i64, i64)>
    where
        T: Into<Timeout>,
        Self: Sized
, { ... }
fn fetch_group_list<T>(
        &self,
        group: Option<&str>,
        timeout: T
    ) -> KafkaResult<GroupList>
    where
        T: Into<Timeout>,
        Self: Sized
, { ... }
fn pause(&self, partitions: &TopicPartitionList) -> KafkaResult<()> { ... }
fn resume(&self, partitions: &TopicPartitionList) -> KafkaResult<()> { ... }
fn client(&self) -> &Client<C> { ... } }

Common trait for all consumers.

Note about object safety

Doing type erasure on consumers is expected to be rare (eg. Box<Consumer>). Therefore, the API is optimised for the case where a concrete type is available. As a result, some methods are not available on trait objects, since they are generic.

If there's still the need to erase the type, the generic methods can still be reached through the get_base_consumer method.

Required methods

fn get_base_consumer(&self) -> &BaseConsumer<C>

Returns a reference to the BaseConsumer.

Loading content...

Provided methods

fn subscribe(&self, topics: &[&str]) -> KafkaResult<()>

Subscribe the consumer to a list of topics.

fn unsubscribe(&self)

Unsubscribe the current subscription list.

fn assign(&self, assignment: &TopicPartitionList) -> KafkaResult<()>

Manually assign topics and partitions to the consumer. If used, automatic consumer rebalance won't be activated.

fn seek<T: Into<Timeout>>(
    &self,
    topic: &str,
    partition: i32,
    offset: Offset,
    timeout: T
) -> KafkaResult<()>

Seek to offset for the specified topic and partition. After a successful call to seek, the next poll of the consumer will return the message with offset.

fn commit(
    &self,
    topic_partition_list: &TopicPartitionList,
    mode: CommitMode
) -> KafkaResult<()>

Commits the offset of the specified message. The commit can be sync (blocking), or async. Notice that when a specific offset is committed, all the previous offsets are considered committed as well. Use this method only if you are processing messages in order.

fn commit_consumer_state(&self, mode: CommitMode) -> KafkaResult<()>

Commit the current consumer state. Notice that if the consumer fails after a message has been received, but before the message has been processed by the user code, this might lead to data loss. Check the "at-least-once delivery" section in the readme for more information.

fn commit_message(
    &self,
    message: &BorrowedMessage,
    mode: CommitMode
) -> KafkaResult<()>

Commit the provided message. Note that this will also automatically commit every message with lower offset within the same partition.

fn store_offset(&self, message: &BorrowedMessage) -> KafkaResult<()>

Store offset for this message to be used on the next (auto)commit. When using this enable.auto.offset.store should be set to false in the config.

fn store_offsets(&self, tpl: &TopicPartitionList) -> KafkaResult<()>

Store offsets to be used on the next (auto)commit. When using this enable.auto.offset.store should be set to false in the config.

fn subscription(&self) -> KafkaResult<TopicPartitionList>

Returns the current topic subscription.

fn assignment(&self) -> KafkaResult<TopicPartitionList>

Returns the current partition assignment.

fn committed<T>(&self, timeout: T) -> KafkaResult<TopicPartitionList> where
    T: Into<Timeout>,
    Self: Sized

Retrieve committed offsets for topics and partitions.

fn committed_offsets<T>(
    &self,
    tpl: TopicPartitionList,
    timeout: T
) -> KafkaResult<TopicPartitionList> where
    T: Into<Timeout>, 

Retrieve committed offsets for specified topics and partitions.

fn offsets_for_timestamp<T>(
    &self,
    timestamp: i64,
    timeout: T
) -> KafkaResult<TopicPartitionList> where
    T: Into<Timeout>,
    Self: Sized

Lookup the offsets for this consumer's partitions by timestamp.

fn offsets_for_times<T>(
    &self,
    timestamps: TopicPartitionList,
    timeout: T
) -> KafkaResult<TopicPartitionList> where
    T: Into<Timeout>,
    Self: Sized

Look up the offsets for the specified partitions by timestamp.

fn position(&self) -> KafkaResult<TopicPartitionList>

Retrieve current positions (offsets) for topics and partitions.

fn fetch_metadata<T>(
    &self,
    topic: Option<&str>,
    timeout: T
) -> KafkaResult<Metadata> where
    T: Into<Timeout>,
    Self: Sized

Returns the metadata information for the specified topic, or for all topics in the cluster if no topic is specified.

fn fetch_watermarks<T>(
    &self,
    topic: &str,
    partition: i32,
    timeout: T
) -> KafkaResult<(i64, i64)> where
    T: Into<Timeout>,
    Self: Sized

Returns the metadata information for all the topics in the cluster.

fn fetch_group_list<T>(
    &self,
    group: Option<&str>,
    timeout: T
) -> KafkaResult<GroupList> where
    T: Into<Timeout>,
    Self: Sized

Returns the group membership information for the given group. If no group is specified, all groups will be returned.

fn pause(&self, partitions: &TopicPartitionList) -> KafkaResult<()>

Pause consumption for the provided list of partitions.

fn resume(&self, partitions: &TopicPartitionList) -> KafkaResult<()>

Resume consumption for the provided list of partitions.

fn client(&self) -> &Client<C>

Returns the Client underlying this consumer.

Loading content...

Implementors

impl<C: ConsumerContext> Consumer<StreamConsumerContext<C>> for StreamConsumer<C>[src]

impl<C: ConsumerContext> Consumer<C> for BaseConsumer<C>[src]

fn offsets_for_times<T: Into<Timeout>>(
    &self,
    timestamps: TopicPartitionList,
    timeout: T
) -> KafkaResult<TopicPartitionList>
[src]

timestamps is a TopicPartitionList with timestamps instead of offsets.

Loading content...