Struct rdkafka::producer::BaseProducer [−][src]
Lowest level Kafka producer.
The BaseProducer
needs to be polled at regular intervals in order to serve
queued delivery report callbacks (for more information, refer to the
module-level documentation). This producer can be cheaply cloned to create a
new reference to the same underlying producer.
Example usage
This code will send a message to Kafka. No custom ProducerContext
is
specified, so the DefaultProducerContext
will be used. To see how to use
a producer context, refer to the examples in the examples
folder.
use rdkafka::config::ClientConfig; use rdkafka::producer::{BaseProducer, BaseRecord, Producer}; use std::time::Duration; let producer: BaseProducer = ClientConfig::new() .set("bootstrap.servers", "kafka:9092") .create() .expect("Producer creation error"); producer.send( BaseRecord::to("destination_topic") .payload("this is the payload") .key("and this is a key"), ).expect("Failed to enqueue"); // Poll at regular intervals to process all the asynchronous delivery events. for _ in 0..10 { producer.poll(Duration::from_millis(100)); } // And/or flush the producer before dropping it. producer.flush(Duration::from_secs(1));
Implementations
impl<C> BaseProducer<C> where
C: ProducerContext,
[src]
C: ProducerContext,
pub fn poll<T: Into<Timeout>>(&self, timeout: T) -> i32
[src]
Polls the producer, returning the number of events served.
Regular calls to poll
are required to process the events and execute
the message delivery callbacks.
pub fn send<'a, K: ?Sized, P: ?Sized>(
&self,
record: BaseRecord<'a, K, P, C::DeliveryOpaque>
) -> Result<(), (KafkaError, BaseRecord<'a, K, P, C::DeliveryOpaque>)> where
K: ToBytes,
P: ToBytes,
[src]
&self,
record: BaseRecord<'a, K, P, C::DeliveryOpaque>
) -> Result<(), (KafkaError, BaseRecord<'a, K, P, C::DeliveryOpaque>)> where
K: ToBytes,
P: ToBytes,
Sends a message to Kafka.
Message fields such as key, payload, partition, timestamp etc. are
provided to this method via a BaseRecord
. If the message is
correctly enqueued in the producer’s memory buffer, the method will take
ownership of the record and return immediately; in case of failure to
enqueue, the original record is returned, alongside an error code. If
the message fails to be produced after being enqueued in the buffer, the
ProducerContext::delivery
method will be called asynchronously, with
the provided ProducerContext::DeliveryOpaque
.
When no partition is specified the underlying Kafka library picks a partition based on a hash of the key. If no key is specified, a random partition will be used. To correctly handle errors, the delivery callback should be implemented.
Note that this method will never block.
Trait Implementations
impl<C> Clone for BaseProducer<C> where
C: ProducerContext,
[src]
C: ProducerContext,
fn clone(&self) -> BaseProducer<C>
[src]
pub fn clone_from(&mut self, source: &Self)
1.0.0[src]
impl FromClientConfig for BaseProducer<DefaultProducerContext>
[src]
fn from_config(
config: &ClientConfig
) -> KafkaResult<BaseProducer<DefaultProducerContext>>
[src]
config: &ClientConfig
) -> KafkaResult<BaseProducer<DefaultProducerContext>>
Creates a new BaseProducer
starting from a configuration.
impl<C> FromClientConfigAndContext<C> for BaseProducer<C> where
C: ProducerContext,
[src]
C: ProducerContext,
fn from_config_and_context(
config: &ClientConfig,
context: C
) -> KafkaResult<BaseProducer<C>>
[src]
config: &ClientConfig,
context: C
) -> KafkaResult<BaseProducer<C>>
Creates a new BaseProducer
starting from a configuration and a
context.
impl<C> Producer<C> for BaseProducer<C> where
C: ProducerContext,
[src]
C: ProducerContext,
fn client(&self) -> &Client<C>
[src]
fn flush<T: Into<Timeout>>(&self, timeout: T)
[src]
fn in_flight_count(&self) -> i32
[src]
fn init_transactions<T: Into<Timeout>>(&self, timeout: T) -> KafkaResult<()>
[src]
fn begin_transaction(&self) -> KafkaResult<()>
[src]
fn send_offsets_to_transaction<T: Into<Timeout>>(
&self,
offsets: &TopicPartitionList,
cgm: &ConsumerGroupMetadata,
timeout: T
) -> KafkaResult<()>
[src]
&self,
offsets: &TopicPartitionList,
cgm: &ConsumerGroupMetadata,
timeout: T
) -> KafkaResult<()>
fn commit_transaction<T: Into<Timeout>>(&self, timeout: T) -> KafkaResult<()>
[src]
fn abort_transaction<T: Into<Timeout>>(&self, timeout: T) -> KafkaResult<()>
[src]
fn context(&self) -> &Arc<C>
[src]
Auto Trait Implementations
impl<C> RefUnwindSafe for BaseProducer<C> where
C: RefUnwindSafe,
C: RefUnwindSafe,
impl<C> Send for BaseProducer<C>
impl<C> Sync for BaseProducer<C>
impl<C> Unpin for BaseProducer<C>
impl<C> UnwindSafe for BaseProducer<C> where
C: RefUnwindSafe,
C: RefUnwindSafe,
Blanket Implementations
impl<T> Any for T where
T: 'static + ?Sized,
[src]
T: 'static + ?Sized,
impl<T> Borrow<T> for T where
T: ?Sized,
[src]
T: ?Sized,
impl<T> BorrowMut<T> for T where
T: ?Sized,
[src]
T: ?Sized,
pub fn borrow_mut(&mut self) -> &mut T
[src]
impl<T> From<T> for T
[src]
impl<T, U> Into<U> for T where
U: From<T>,
[src]
U: From<T>,
impl<T> ToOwned for T where
T: Clone,
[src]
T: Clone,
type Owned = T
The resulting type after obtaining ownership.
pub fn to_owned(&self) -> T
[src]
pub fn clone_into(&self, target: &mut T)
[src]
impl<T, U> TryFrom<U> for T where
U: Into<T>,
[src]
U: Into<T>,
type Error = Infallible
The type returned in the event of a conversion error.
pub fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>
[src]
impl<T, U> TryInto<U> for T where
U: TryFrom<T>,
[src]
U: TryFrom<T>,