pub struct Consumer<'a, const MCACHE_DEPTH: usize, const CHUNK_COUNT: usize, const CHUNK_SIZE: usize> { /* private fields */ }Implementations§
Source§impl<'a, const MCACHE_DEPTH: usize, const CHUNK_COUNT: usize, const CHUNK_SIZE: usize> Consumer<'a, MCACHE_DEPTH, CHUNK_COUNT, CHUNK_SIZE>
impl<'a, const MCACHE_DEPTH: usize, const CHUNK_COUNT: usize, const CHUNK_SIZE: usize> Consumer<'a, MCACHE_DEPTH, CHUNK_COUNT, CHUNK_SIZE>
Sourcepub fn new(
mcache: &'a MCache<MCACHE_DEPTH>,
dcache: &'a DCache<CHUNK_COUNT, CHUNK_SIZE>,
initial_seq: u64,
) -> Self
pub fn new( mcache: &'a MCache<MCACHE_DEPTH>, dcache: &'a DCache<CHUNK_COUNT, CHUNK_SIZE>, initial_seq: u64, ) -> Self
Create a consumer starting at the given sequence number.
Sourcepub fn with_flow_control(
mcache: &'a MCache<MCACHE_DEPTH>,
dcache: &'a DCache<CHUNK_COUNT, CHUNK_SIZE>,
fctl: &'a Fctl,
initial_seq: u64,
) -> Self
pub fn with_flow_control( mcache: &'a MCache<MCACHE_DEPTH>, dcache: &'a DCache<CHUNK_COUNT, CHUNK_SIZE>, fctl: &'a Fctl, initial_seq: u64, ) -> Self
Create a consumer with credit-based flow control.
The consumer will release a credit after consuming each fragment.
Use the same fctl instance as the producer.
Sourcepub fn with_metrics(self, metrics: &'a Metrics) -> Self
pub fn with_metrics(self, metrics: &'a Metrics) -> Self
Attach metrics tracking to this consumer.
Returns a new consumer with the same configuration plus metrics.
Sourcepub fn poll(&mut self) -> Result<Option<Fragment<'a, CHUNK_SIZE>>, TangoError>
pub fn poll(&mut self) -> Result<Option<Fragment<'a, CHUNK_SIZE>>, TangoError>
Poll for the next fragment without blocking.
Returns:
Ok(Some(fragment))if a fragment was availableOk(None)if the sequence is not ready yetErr(TangoError::Overrun)if the consumer was lapped
Sourcepub fn wait(&mut self) -> Result<Option<Fragment<'a, CHUNK_SIZE>>, TangoError>
pub fn wait(&mut self) -> Result<Option<Fragment<'a, CHUNK_SIZE>>, TangoError>
Busy-wait for the next fragment.
Returns:
Ok(Some(fragment))if a fragment was receivedOk(None)if the mcache was stoppedErr(TangoError::Overrun)if the consumer was lapped
Sourcepub fn release_credits(&self, count: u64)
pub fn release_credits(&self, count: u64)
Manually release credits (useful for batch processing).
Call this after you’re done processing a batch of fragments if you want to delay credit release for better throughput.
Sourcepub fn poll_batch(
&mut self,
max_count: usize,
) -> Result<Vec<Fragment<'a, CHUNK_SIZE>>, TangoError>
pub fn poll_batch( &mut self, max_count: usize, ) -> Result<Vec<Fragment<'a, CHUNK_SIZE>>, TangoError>
Poll for multiple fragments at once, up to max_count.
Returns a vector of fragments (up to max_count) that were available.
Stops on the first NotReady or error.
This is more efficient than calling poll() in a loop when you expect
multiple messages to be available.
Only available with the std feature.
Trait Implementations§
Source§impl<'a, const MCACHE_DEPTH: usize, const CHUNK_COUNT: usize, const CHUNK_SIZE: usize> Debug for Consumer<'a, MCACHE_DEPTH, CHUNK_COUNT, CHUNK_SIZE>
impl<'a, const MCACHE_DEPTH: usize, const CHUNK_COUNT: usize, const CHUNK_SIZE: usize> Debug for Consumer<'a, MCACHE_DEPTH, CHUNK_COUNT, CHUNK_SIZE>
Source§impl<'a, const MCACHE_DEPTH: usize, const CHUNK_COUNT: usize, const CHUNK_SIZE: usize> IntoIterator for Consumer<'a, MCACHE_DEPTH, CHUNK_COUNT, CHUNK_SIZE>
impl<'a, const MCACHE_DEPTH: usize, const CHUNK_COUNT: usize, const CHUNK_SIZE: usize> IntoIterator for Consumer<'a, MCACHE_DEPTH, CHUNK_COUNT, CHUNK_SIZE>
Source§fn into_iter(self) -> Self::IntoIter
fn into_iter(self) -> Self::IntoIter
Convert this consumer into an iterator that busy-waits for messages.
The iterator yields Result<Fragment, TangoError> and will:
- Yield
Ok(fragment)for each successfully consumed message - Yield
Err(TangoError::Overrun)if the consumer was lapped - Return
Nonewhen the MCache is stopped
§Example
for result in consumer {
match result {
Ok(fragment) => println!("Got: {:?}", fragment.payload.as_slice()),
Err(e) => eprintln!("Error: {}", e),
}
}