Struct rdkafka::consumer::base_consumer::BaseConsumer [−][src]
A low-level consumer that requires manual polling.
This consumer must be periodically polled to make progress on rebalancing, callbacks and to receive messages.
Implementations
impl<C> BaseConsumer<C> where
C: ConsumerContext,
[src]
C: ConsumerContext,
pub fn poll<T: Into<Timeout>>(
&self,
timeout: T
) -> Option<KafkaResult<BorrowedMessage<'_>>>
[src]
&self,
timeout: T
) -> Option<KafkaResult<BorrowedMessage<'_>>>
Polls the consumer for new messages.
It won’t block for more than the specified timeout. Use zero Duration
for non-blocking
call. With no timeout it blocks until an event is received.
This method should be called at regular intervals, even if no message is expected, to serve any queued callbacks waiting to be called. This is especially important for automatic consumer rebalance, as the rebalance function will be executed by the thread calling the poll() function.
Lifetime
The returned message lives in the memory of the consumer and cannot outlive it.
pub fn iter(&self) -> Iter<'_, C>ⓘNotable traits for Iter<'a, C>
impl<'a, C> Iterator for Iter<'a, C> where
C: ConsumerContext, type Item = KafkaResult<BorrowedMessage<'a>>;
[src]
Notable traits for Iter<'a, C>
impl<'a, C> Iterator for Iter<'a, C> where
C: ConsumerContext, type Item = KafkaResult<BorrowedMessage<'a>>;
Returns an iterator over the available messages.
It repeatedly calls poll
with no timeout.
Note that it’s also possible to iterate over the consumer directly.
Examples
All these are equivalent and will receive messages without timing out.
loop { let message = consumer.poll(None); // Handle the message }
for message in consumer.iter() { // Handle the message }
for message in &consumer { // Handle the message }
pub fn split_partition_queue(
self: &Arc<Self>,
topic: &str,
partition: i32
) -> Option<PartitionQueue<C>>
[src]
self: &Arc<Self>,
topic: &str,
partition: i32
) -> Option<PartitionQueue<C>>
Splits messages for the specified partition into their own queue.
If the topic
or partition
is invalid, returns None
.
After calling this method, newly-fetched messages for the specified
partition will be returned via PartitionQueue::poll
rather than
BaseConsumer::poll
. Note that there may be buffered messages for the
specified partition that will continue to be returned by
BaseConsumer::poll
. For best results, call split_partition_queue
before the first call to BaseConsumer::poll
.
You must continue to call BaseConsumer::poll
, even if no messages are
expected, to serve callbacks.
Note that calling Consumer::assign
will deactivate any existing
partition queues. You will need to call this method for every partition
that should be split after every call to assign
.
Beware that this method is implemented for &Arc<Self>
, not &self
.
You will need to wrap your consumer in an Arc
in order to call this
method. This design permits moving the partition queue to another thread
while ensuring the partition queue does not outlive the consumer.
Trait Implementations
impl<C> Consumer<C> for BaseConsumer<C> where
C: ConsumerContext,
[src]
C: ConsumerContext,
fn client(&self) -> &Client<C>
[src]
fn group_metadata(&self) -> Option<ConsumerGroupMetadata>
[src]
fn subscribe(&self, topics: &[&str]) -> KafkaResult<()>
[src]
fn unsubscribe(&self)
[src]
fn assign(&self, assignment: &TopicPartitionList) -> KafkaResult<()>
[src]
fn seek<T: Into<Timeout>>(
&self,
topic: &str,
partition: i32,
offset: Offset,
timeout: T
) -> KafkaResult<()>
[src]
&self,
topic: &str,
partition: i32,
offset: Offset,
timeout: T
) -> KafkaResult<()>
fn commit(
&self,
topic_partition_list: &TopicPartitionList,
mode: CommitMode
) -> KafkaResult<()>
[src]
&self,
topic_partition_list: &TopicPartitionList,
mode: CommitMode
) -> KafkaResult<()>
fn commit_consumer_state(&self, mode: CommitMode) -> KafkaResult<()>
[src]
fn commit_message(
&self,
message: &BorrowedMessage<'_>,
mode: CommitMode
) -> KafkaResult<()>
[src]
&self,
message: &BorrowedMessage<'_>,
mode: CommitMode
) -> KafkaResult<()>
fn store_offset(&self, message: &BorrowedMessage<'_>) -> KafkaResult<()>
[src]
fn store_offsets(&self, tpl: &TopicPartitionList) -> KafkaResult<()>
[src]
fn subscription(&self) -> KafkaResult<TopicPartitionList>
[src]
fn assignment(&self) -> KafkaResult<TopicPartitionList>
[src]
fn committed<T: Into<Timeout>>(
&self,
timeout: T
) -> KafkaResult<TopicPartitionList>
[src]
&self,
timeout: T
) -> KafkaResult<TopicPartitionList>
fn committed_offsets<T: Into<Timeout>>(
&self,
tpl: TopicPartitionList,
timeout: T
) -> KafkaResult<TopicPartitionList>
[src]
&self,
tpl: TopicPartitionList,
timeout: T
) -> KafkaResult<TopicPartitionList>
fn offsets_for_timestamp<T: Into<Timeout>>(
&self,
timestamp: i64,
timeout: T
) -> KafkaResult<TopicPartitionList>
[src]
&self,
timestamp: i64,
timeout: T
) -> KafkaResult<TopicPartitionList>
fn offsets_for_times<T: Into<Timeout>>(
&self,
timestamps: TopicPartitionList,
timeout: T
) -> KafkaResult<TopicPartitionList>
[src]
&self,
timestamps: TopicPartitionList,
timeout: T
) -> KafkaResult<TopicPartitionList>
fn position(&self) -> KafkaResult<TopicPartitionList>
[src]
fn fetch_metadata<T: Into<Timeout>>(
&self,
topic: Option<&str>,
timeout: T
) -> KafkaResult<Metadata>
[src]
&self,
topic: Option<&str>,
timeout: T
) -> KafkaResult<Metadata>
fn fetch_watermarks<T: Into<Timeout>>(
&self,
topic: &str,
partition: i32,
timeout: T
) -> KafkaResult<(i64, i64)>
[src]
&self,
topic: &str,
partition: i32,
timeout: T
) -> KafkaResult<(i64, i64)>
fn fetch_group_list<T: Into<Timeout>>(
&self,
group: Option<&str>,
timeout: T
) -> KafkaResult<GroupList>
[src]
&self,
group: Option<&str>,
timeout: T
) -> KafkaResult<GroupList>
fn pause(&self, partitions: &TopicPartitionList) -> KafkaResult<()>
[src]
fn resume(&self, partitions: &TopicPartitionList) -> KafkaResult<()>
[src]
fn context(&self) -> &Arc<C>
[src]
impl<C> Drop for BaseConsumer<C> where
C: ConsumerContext,
[src]
C: ConsumerContext,
impl FromClientConfig for BaseConsumer
[src]
fn from_config(config: &ClientConfig) -> KafkaResult<BaseConsumer>
[src]
impl<C: ConsumerContext> FromClientConfigAndContext<C> for BaseConsumer<C>
[src]
Creates a new BaseConsumer
starting from a ClientConfig
.
fn from_config_and_context(
config: &ClientConfig,
context: C
) -> KafkaResult<BaseConsumer<C>>
[src]
config: &ClientConfig,
context: C
) -> KafkaResult<BaseConsumer<C>>
impl<'a, C> IntoIterator for &'a BaseConsumer<C> where
C: ConsumerContext,
[src]
C: ConsumerContext,
type Item = KafkaResult<BorrowedMessage<'a>>
The type of the elements being iterated over.
type IntoIter = Iter<'a, C>
Which kind of iterator are we turning this into?
fn into_iter(self) -> Self::IntoIter
[src]
Auto Trait Implementations
impl<C> RefUnwindSafe for BaseConsumer<C> where
C: RefUnwindSafe,
C: RefUnwindSafe,
impl<C> Send for BaseConsumer<C>
impl<C> Sync for BaseConsumer<C>
impl<C> Unpin for BaseConsumer<C>
impl<C> UnwindSafe for BaseConsumer<C> where
C: RefUnwindSafe,
C: RefUnwindSafe,
Blanket Implementations
impl<T> Any for T where
T: 'static + ?Sized,
[src]
T: 'static + ?Sized,
impl<T> Borrow<T> for T where
T: ?Sized,
[src]
T: ?Sized,
impl<T> BorrowMut<T> for T where
T: ?Sized,
[src]
T: ?Sized,
pub fn borrow_mut(&mut self) -> &mut T
[src]
impl<T> From<T> for T
[src]
impl<T, U> Into<U> for T where
U: From<T>,
[src]
U: From<T>,
impl<T, U> TryFrom<U> for T where
U: Into<T>,
[src]
U: Into<T>,
type Error = Infallible
The type returned in the event of a conversion error.
pub fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>
[src]
impl<T, U> TryInto<U> for T where
U: TryFrom<T>,
[src]
U: TryFrom<T>,