[−][src]Struct rdkafka::consumer::base_consumer::BaseConsumer
Low level wrapper around the librdkafka consumer. This consumer requires to be periodically polled to make progress on rebalance, callbacks and to receive messages.
Methods
impl<C: ConsumerContext> BaseConsumer<C>
[src]
pub fn context(&self) -> &C
[src]
Returns the context used to create this consumer.
pub fn poll<T: Into<Timeout>>(
&self,
timeout: T
) -> Option<KafkaResult<BorrowedMessage>>
[src]
&self,
timeout: T
) -> Option<KafkaResult<BorrowedMessage>>
Polls the consumer for new messages.
It won't block for more than the specified timeout. Use zero Duration
for non-blocking
call. With no timeout it blocks until an event is received.
This method should be called at regular intervals, even if no message is expected, to serve any queued callbacks waiting to be called. This is especially important for automatic consumer rebalance, as the rebalance function will be executed by the thread calling the poll() function.
Lifetime
The returned message lives in the memory of the consumer and cannot outlive it.
ⓘImportant traits for Iter<'a, C>pub fn iter(&self) -> Iter<C>
[src]
Returns an iterator over the available messages.
It repeatedly calls poll
with no timeout.
Note that it's also possible to iterate over the consumer directly.
Examples
All these are equivalent and will receive messages without timing out.
loop { let message = consumer.poll(None); // Handle the message }
for message in consumer.iter() { // Handle the message }
for message in &consumer { // Handle the message }
Trait Implementations
impl<C: ConsumerContext> Consumer<C> for BaseConsumer<C>
[src]
fn get_base_consumer(&self) -> &BaseConsumer<C>
[src]
fn subscribe(&self, topics: &[&str]) -> KafkaResult<()>
[src]
fn unsubscribe(&self)
[src]
fn assign(&self, assignment: &TopicPartitionList) -> KafkaResult<()>
[src]
fn seek<T: Into<Timeout>>(
&self,
topic: &str,
partition: i32,
offset: Offset,
timeout: T
) -> KafkaResult<()>
[src]
&self,
topic: &str,
partition: i32,
offset: Offset,
timeout: T
) -> KafkaResult<()>
fn commit(
&self,
topic_partition_list: &TopicPartitionList,
mode: CommitMode
) -> KafkaResult<()>
[src]
&self,
topic_partition_list: &TopicPartitionList,
mode: CommitMode
) -> KafkaResult<()>
fn commit_consumer_state(&self, mode: CommitMode) -> KafkaResult<()>
[src]
fn commit_message(
&self,
message: &BorrowedMessage,
mode: CommitMode
) -> KafkaResult<()>
[src]
&self,
message: &BorrowedMessage,
mode: CommitMode
) -> KafkaResult<()>
fn store_offset(&self, message: &BorrowedMessage) -> KafkaResult<()>
[src]
fn store_offsets(&self, tpl: &TopicPartitionList) -> KafkaResult<()>
[src]
fn subscription(&self) -> KafkaResult<TopicPartitionList>
[src]
fn assignment(&self) -> KafkaResult<TopicPartitionList>
[src]
fn committed<T: Into<Timeout>>(
&self,
timeout: T
) -> KafkaResult<TopicPartitionList>
[src]
&self,
timeout: T
) -> KafkaResult<TopicPartitionList>
fn committed_offsets<T: Into<Timeout>>(
&self,
tpl: TopicPartitionList,
timeout: T
) -> KafkaResult<TopicPartitionList>
[src]
&self,
tpl: TopicPartitionList,
timeout: T
) -> KafkaResult<TopicPartitionList>
fn offsets_for_timestamp<T: Into<Timeout>>(
&self,
timestamp: i64,
timeout: T
) -> KafkaResult<TopicPartitionList>
[src]
&self,
timestamp: i64,
timeout: T
) -> KafkaResult<TopicPartitionList>
fn offsets_for_times<T: Into<Timeout>>(
&self,
timestamps: TopicPartitionList,
timeout: T
) -> KafkaResult<TopicPartitionList>
[src]
&self,
timestamps: TopicPartitionList,
timeout: T
) -> KafkaResult<TopicPartitionList>
timestamps
is a TopicPartitionList
with timestamps instead of offsets.
fn position(&self) -> KafkaResult<TopicPartitionList>
[src]
fn fetch_metadata<T: Into<Timeout>>(
&self,
topic: Option<&str>,
timeout: T
) -> KafkaResult<Metadata>
[src]
&self,
topic: Option<&str>,
timeout: T
) -> KafkaResult<Metadata>
fn fetch_watermarks<T: Into<Timeout>>(
&self,
topic: &str,
partition: i32,
timeout: T
) -> KafkaResult<(i64, i64)>
[src]
&self,
topic: &str,
partition: i32,
timeout: T
) -> KafkaResult<(i64, i64)>
fn fetch_group_list<T: Into<Timeout>>(
&self,
group: Option<&str>,
timeout: T
) -> KafkaResult<GroupList>
[src]
&self,
group: Option<&str>,
timeout: T
) -> KafkaResult<GroupList>
fn pause(&self, partitions: &TopicPartitionList) -> KafkaResult<()>
[src]
fn resume(&self, partitions: &TopicPartitionList) -> KafkaResult<()>
[src]
impl<C: ConsumerContext> Drop for BaseConsumer<C>
[src]
impl FromClientConfig for BaseConsumer
[src]
fn from_config(config: &ClientConfig) -> KafkaResult<BaseConsumer>
[src]
impl<C: ConsumerContext> FromClientConfigAndContext<C> for BaseConsumer<C>
[src]
Creates a new BaseConsumer
starting from a ClientConfig
.
fn from_config_and_context(
config: &ClientConfig,
context: C
) -> KafkaResult<BaseConsumer<C>>
[src]
config: &ClientConfig,
context: C
) -> KafkaResult<BaseConsumer<C>>
impl<'a, C: ConsumerContext + 'a> IntoIterator for &'a BaseConsumer<C>
[src]
type Item = KafkaResult<BorrowedMessage<'a>>
The type of the elements being iterated over.
type IntoIter = Iter<'a, C>
Which kind of iterator are we turning this into?
fn into_iter(self) -> Self::IntoIter
[src]
Auto Trait Implementations
impl<C> RefUnwindSafe for BaseConsumer<C> where
C: RefUnwindSafe,
C: RefUnwindSafe,
impl<C> Send for BaseConsumer<C>
impl<C> Sync for BaseConsumer<C>
impl<C> Unpin for BaseConsumer<C>
impl<C> UnwindSafe for BaseConsumer<C> where
C: UnwindSafe,
C: UnwindSafe,
Blanket Implementations
impl<T> Any for T where
T: 'static + ?Sized,
[src]
T: 'static + ?Sized,
impl<T> Borrow<T> for T where
T: ?Sized,
[src]
T: ?Sized,
impl<T> BorrowMut<T> for T where
T: ?Sized,
[src]
T: ?Sized,
fn borrow_mut(&mut self) -> &mut T
[src]
impl<T> From<T> for T
[src]
impl<T, U> Into<U> for T where
U: From<T>,
[src]
U: From<T>,
impl<T, U> TryFrom<U> for T where
U: Into<T>,
[src]
U: Into<T>,
type Error = Infallible
The type returned in the event of a conversion error.
fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>
[src]
impl<T, U> TryInto<U> for T where
U: TryFrom<T>,
[src]
U: TryFrom<T>,