var searchIndex = {}; searchIndex["kafka"] = {"doc":"Clients for comunicating with a [Kafka](http://kafka.apache.org/)\ncluster.","items":[[0,"error","kafka","Error struct and methods",null,null],[4,"Error","kafka::error","The various errors this library can produce.",null,null],[13,"Io","","Input/Output error while communicating with Kafka",0,null],[13,"Kafka","","An error as reported by a remote Kafka server",0,null],[13,"InvalidInputSnappy","","Failure to decode a snappy compressed response from Kafka",0,null],[13,"UnexpectedEOF","","Failure to decode a response due to an insufficient number of bytes available",0,null],[13,"CodecError","","Failure to decode or encode a response or request respectively",0,null],[13,"StringDecodeError","","Failure to decode a string into a valid utf8 byte sequence",0,null],[13,"NoHostReachable","","Unable to reach any host",0,null],[4,"KafkaCode","","Various errors reported by a remote Kafka server.\nSee also [Kafka Errors](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes)",null,null],[13,"Unknown","","An unexpected server error",1,null],[13,"OffsetOutOfRange","","The requested offset is outside the range of offsets\nmaintained by the server for the given topic/partition",1,null],[13,"InvalidMessage","","This indicates that a message contents does not match its CRC",1,null],[13,"UnknownTopicOrPartition","","This request is for a topic or partition that does not exist\non this broker.",1,null],[13,"InvalidMessageSize","","The message has a negative size",1,null],[13,"LeaderNotAvailable","","This error is thrown if we are in the middle of a leadership\nelection and there is currently no leader for this partition\nand hence it is unavailable for writes.",1,null],[13,"NotLeaderForPartition","","This error is thrown if the client attempts to send messages\nto a replica that is not the leader for some partition. It\nindicates that the clients metadata is out of date.",1,null],[13,"RequestTimedOut","","This error is thrown if the request exceeds the user-specified\ntime limit in the request.",1,null],[13,"BrokerNotAvailable","","This is not a client facing error and is used mostly by tools\nwhen a broker is not alive.",1,null],[13,"ReplicaNotAvailable","","If replica is expected on a broker, but is not (this can be\nsafely ignored).",1,null],[13,"MessageSizeTooLarge","","The server has a configurable maximum message size to avoid\nunbounded memory allocation. This error is thrown if the\nclient attempt to produce a message larger than this maximum.",1,null],[13,"StaleControllerEpochCode","","Internal error code for broker-to-broker communication.",1,null],[13,"OffsetMetadataTooLargeCode","","If you specify a string larger than configured maximum for\noffset metadata",1,null],[13,"OffsetsLoadInProgressCode","","The broker returns this error code for an offset fetch request\nif it is still loading offsets (after a leader change for that\noffsets topic partition), or in response to group membership\nrequests (such as heartbeats) when group metadata is being\nloaded by the coordinator.",1,null],[13,"ConsumerCoordinatorNotAvailableCode","","The broker returns this error code for group coordinator\nrequests, offset commits, and most group management requests\nif the offsets topic has not yet been created, or if the group\ncoordinator is not active.",1,null],[13,"NotCoordinatorForConsumerCode","","The broker returns this error code if it receives an offset\nfetch or commit request for a group that it is not a\ncoordinator for.",1,null],[13,"InvalidTopicCode","","For a request which attempts to access an invalid topic\n(e.g. one which has an illegal name), or if an attempt is made\nto write to an internal topic (such as the consumer offsets\ntopic).",1,null],[13,"RecordListTooLargeCode","","If a message batch in a produce request exceeds the maximum\nconfigured segment size.",1,null],[13,"NotEnoughReplicasCode","","Returned from a produce request when the number of in-sync\nreplicas is lower than the configured minimum and requiredAcks is\n-1.",1,null],[13,"NotEnoughReplicasAfterAppendCode","","Returned from a produce request when the message was written\nto the log, but with fewer in-sync replicas than required.",1,null],[13,"InvalidRequiredAcksCode","","Returned from a produce request if the requested requiredAcks is\ninvalid (anything other than -1, 1, or 0).",1,null],[13,"IllegalGenerationCode","","Returned from group membership requests (such as heartbeats) when\nthe generation id provided in the request is not the current\ngeneration.",1,null],[13,"InconsistentGroupProtocolCode","","Returned in join group when the member provides a protocol type or\nset of protocols which is not compatible with the current group.",1,null],[13,"InvalidGroupIdCode","","Returned in join group when the groupId is empty or null.",1,null],[13,"UnknownMemberIdCode","","Returned from group requests (offset commits/fetches, heartbeats,\netc) when the memberId is not in the current generation.",1,null],[13,"InvalidSessionTimeoutCode","","Return in join group when the requested session timeout is outside\nof the allowed range on the broker",1,null],[13,"RebalanceInProgressCode","","Returned in heartbeat requests when the coordinator has begun\nrebalancing the group. This indicates to the client that it\nshould rejoin the group.",1,null],[13,"InvalidCommitOffsetSizeCode","","This error indicates that an offset commit was rejected because of\noversize metadata.",1,null],[13,"TopicAuthorizationFailedCode","","Returned by the broker when the client is not authorized to access\nthe requested topic.",1,null],[13,"GroupAuthorizationFailedCode","","Returned by the broker when the client is not authorized to access\na particular groupId.",1,null],[13,"ClusterAuthorizationFailedCode","","Returned by the broker when the client is not authorized to use an\ninter-broker or administrative API.",1,null],[6,"Result","","A type for results generated by this crate's functions where the `Err` type\nis hard-wired to `enums::Error`.",null,null],[11,"fmt","","",0,null],[11,"clone","","",1,null],[11,"fmt","","",1,null],[11,"from","","",0,{"inputs":[{"name":"error"}],"output":{"name":"error"}}],[11,"from","","",0,{"inputs":[{"name":"error"}],"output":{"name":"error"}}],[11,"clone","","",0,null],[11,"description","","",0,null],[11,"cause","","",0,null],[11,"fmt","","",0,null],[0,"client","kafka","Kafka Client - A mid-level abstraction for a kafka cluster\nallowing building higher level constructs.",null,null],[3,"PartitionOffset","kafka::client","A retrieved offset for a particular partition in the context of an\nalready known topic.",null,null],[12,"offset","","",2,null],[12,"partition","","",2,null],[3,"TopicPartitionOffset","","A retrieved offset of a particular topic partition.",null,null],[12,"offset","","",3,null],[12,"topic","","",3,null],[12,"partition","","",3,null],[3,"KafkaClient","","Client struct keeping track of brokers and topic metadata.",null,null],[3,"FetchGroupOffset","","Data point identifying a topic partition to fetch a group's offset\nfor. See `KafkaClient::fetch_group_offsets`.",null,null],[12,"topic","","The topic to fetch the group offset for",4,null],[12,"partition","","The partition to fetch the group offset for",4,null],[3,"CommitOffset","","Data point identifying a particular topic partition offset to be\ncommited.\nSee `KafkaClient::commit_offsets`.",null,null],[12,"offset","","The offset to be committed",5,null],[12,"topic","","The topic to commit the offset for",5,null],[12,"partition","","The partition to commit the offset for",5,null],[3,"ProduceMessage","","Message data to be sent/produced to a particular topic partition.\nSee `KafkaClient::produce_messages` and `Producer::send`.",null,null],[12,"key","","The "key" data of this message.",6,null],[12,"value","","The "value" data of this message.",6,null],[12,"topic","","The topic to produce this message to.",6,null],[12,"partition","","The partition (of the corresponding topic) to produce this\nmessage to.",6,null],[3,"FetchPartition","","Partition related request data for fetching messages.\nSee `KafkaClient::fetch_messages`.",null,null],[12,"topic","","The topic to fetch messages from.",7,null],[12,"offset","","The offset as of which to fetch messages.",7,null],[12,"partition","","The partition to fetch messasges from.",7,null],[12,"max_bytes","","Specifies the max. amount of data to fetch (for this\npartition.) This implicitely defines the biggest message the\nclient can accept. If this value is too small, no messages\ncan be delivered. Setting this size should be in sync with\nthe producers to the partition.",7,null],[4,"Compression","","Compression types supported by kafka. The numeral values of this\nenumeration correspond to the compression encoding in the\nattributes of a Message in the protocol.",null,null],[13,"NONE","","",8,null],[13,"GZIP","","",8,null],[13,"SNAPPY","","",8,null],[4,"FetchOffset","","Possible values when querying a topic's offset.\nSee `KafkaClient::fetch_offsets`.",null,null],[13,"Earliest","","Receive the earliest available offset.",9,null],[13,"Latest","","Receive the latest offset.",9,null],[13,"ByTime","","Used to ask for all messages before a certain time (ms); unix\ntimestamp in milliseconds. See also\nhttps://cwiki.apache.org/confluence/display/KAFKA/Writing+a+Driver+for+Kafka#WritingaDriverforKafka-Offsets",9,null],[0,"fetch","","A representation of fetched messages from Kafka.",null,null],[3,"Response","kafka::client::fetch","The result of a "fetch messages" request from a particular Kafka\nbroker. Such a response can contain messages for multiple topic\npartitions.",null,null],[3,"Topic","","The result of a "fetch messages" request from a particular Kafka\nbroker for a single topic only. Beside the name of the topic,\nthis structure provides an iterator over the topic partitions from\nwhich messages were requested.",null,null],[3,"Partition","","The result of a "fetch messages" request from a particular Kafka\nbroker for a single topic partition only. Beside the partition\nidentifier, this structure provides an iterator over the actually\nrequested message data.",null,null],[3,"Data","","The successfully fetched data payload for a particular partition.",null,null],[3,"Message","","A fetched message from a remote Kafka broker for a particular\ntopic partition.",null,null],[12,"offset","","The offset at which this message resides in the remote kafka\nbroker topic partition.",10,null],[12,"key","","The "key" data of this message. Empty if there is no such\ndata for this message.",10,null],[12,"value","","The value data of this message. Empty if there is no such\ndata for this message.",10,null],[11,"correlation_id","","Retrieves the id corresponding to the fetch messages request\n(provided for debugging purposes only).",11,null],[11,"topics","","Provides an iterator over all the topics and the fetched data\nrelative to these topics.",11,null],[11,"topic","","Retrieves the identifier/name of the represented topic.",12,null],[11,"partitions","","Provides an iterator over all the partitions of this topic for\nwhich messages were requested.",12,null],[11,"partition","","Retrieves the identifier of the represented partition.",13,null],[11,"data","","Retrieves the data payload for this partition.",13,null],[11,"highwatermark_offset","","Retrieves the so-called "high water mark offset" indicating\nthe "latest" offset for this partition at the remote broker.\nThis can be used by clients to find out how much behind the\nlatest available message they are.",14,null],[11,"messages","","Retrieves the fetched message data for this partition.",14,null],[11,"forget_before_offset","","*Mutates* this partition data in such a way that the next call\nto `messages()` will deliver a slice of messages with the\nproperty `msg.offset >= offset`. A convenient method to skip\npast a certain message offset in the retrieved data.",14,null],[11,"fmt","","",10,null],[0,"metadata","kafka::client","Types related to topic metadata for introspection by clients.\nExample: `KafkaClient::topics()`.",null,null],[3,"Topics","kafka::client::metadata","A view on the loaded metadata about topics and their partitions.",null,null],[3,"TopicIter","","An interator over topics.",null,null],[3,"TopicNames","","An iterator over the names of topics known to the originating\nkafka client.",null,null],[3,"Topic","","A view on the loaded metadata for a particular topic.",null,null],[3,"Partitions","","Metadata relevant to partitions of a particular topic.",null,null],[3,"PartitionIter","","An interator over a topic's partitions.",null,null],[3,"Partition","","Metadata about a particular topic partition.",null,null],[11,"new","","Constructs a view of the currently loaded topic metadata from\nthe specified kafka client.",15,{"inputs":[{"name":"kafkaclient"}],"output":{"name":"topics"}}],[11,"len","","Retrieves the number of the underlying topics.",15,null],[11,"iter","","Provides an iterator over the known topics.",15,null],[11,"names","","A conveniece method to return an iterator the topics' names.",15,null],[11,"contains","","A convenience method to determine whether the specified topic\nis known.",15,null],[11,"partitions","","Retrieves the partitions of a known topic.",15,null],[11,"partition_ids","","Retrieves a snapshot/copy of the partition ids available for\nthe specified topic. Note that the returned copy may get out\nof date if the underlying client's metadata gets refreshed.",15,null],[11,"fmt","","",15,null],[11,"into_iter","","",15,null],[11,"next","","",16,null],[11,"next","","",17,null],[11,"name","","Retrieves the name of this topic.",18,null],[11,"partitions","","Retrieves the list of known partitions for this topic.",18,null],[11,"partition_ids","","Retrieves a snapshot/copy of the partition ids available for\nthis topic. Note that the returned copy may get out of date\nif the underlying client's metadata gets refreshed.",18,null],[11,"fmt","","",18,null],[11,"len","","Retrieves the number of the underlying partitions.",19,null],[11,"is_empty","","Tests for `.len() > 0`.",19,null],[11,"iter","","Retrieves an iterator of the partitions for the underlying topic.",19,null],[11,"partition","","Finds a specified partition identified by its id.",19,null],[11,"fmt","","",19,null],[11,"into_iter","","",19,null],[11,"next","","",20,null],[11,"id","","Retrieves the identifier of this topic partition.",21,null],[11,"leader_id","","Retrives the node_id of the current leader of this partition.",21,null],[11,"leader_host","","Retrieves the host of the current leader of this partition.",21,null],[11,"fmt","","",21,null],[17,"DEFAULT_COMPRESSION","kafka::client","The default value for `KafkaClient::set_compression(..)`",null,null],[17,"DEFAULT_FETCH_MAX_WAIT_TIME","","The default value for `KafkaClient::set_fetch_max_wait_time(..)`",null,null],[17,"DEFAULT_FETCH_MIN_BYTES","","The default value for `KafkaClient::set_fetch_min_bytes(..)`",null,null],[17,"DEFAULT_FETCH_MAX_BYTES_PER_PARTITION","","The default value for `KafkaClient::set_fetch_max_bytes(..)`",null,null],[11,"fmt","","",22,null],[11,"clone","","",9,null],[11,"fmt","","",9,null],[11,"fmt","","",4,null],[11,"new","","",4,{"inputs":[{"name":"str"},{"name":"i32"}],"output":{"name":"self"}}],[11,"as_ref","","",4,null],[11,"fmt","","",5,null],[11,"new","","",5,{"inputs":[{"name":"str"},{"name":"i32"},{"name":"i64"}],"output":{"name":"self"}}],[11,"as_ref","","",5,null],[11,"fmt","","",6,null],[11,"as_ref","","",6,null],[11,"new","","A convenient constructor method to create a new produce\nmessage with all attributes specified.",6,{"inputs":[{"name":"str"},{"name":"i32"},{"name":"option"},{"name":"option"}],"output":{"name":"self"}}],[11,"fmt","","",7,null],[11,"new","","Creates a new "fetch messages" request structure with an\nunspecified `max_bytes`.",7,{"inputs":[{"name":"str"},{"name":"i32"},{"name":"i64"}],"output":{"name":"self"}}],[11,"with_max_bytes","","Sets the `max_bytes` value for the "fetch messages" request.",7,null],[11,"as_ref","","",7,null],[11,"new","","Creates a new instance of KafkaClient. Before being able to\nsuccessfully use the new client, you'll have to load metadata.",22,{"inputs":[{"name":"vec"}],"output":{"name":"kafkaclient"}}],[11,"hosts","","Exposes the hosts used for discovery of the target kafka\ncluster. This set of hosts corresponds to the values supplied\nto `KafkaClient::new`.",22,null],[11,"set_compression","","Sets the compression algorithm to use when sending out messages.",22,null],[11,"compression","","Retrieves the current `KafkaClient::set_compression` setting.",22,null],[11,"set_fetch_max_wait_time","","Sets the maximum time in milliseconds to wait for insufficient\ndata to become available when fetching messages.",22,null],[11,"fetch_max_wait_time","","Retrieves the current `KafkaClient::set_fetch_max_wait_time`\nsetting.",22,null],[11,"set_fetch_min_bytes","","Sets the minimum number of bytes of available data to wait for\nas long as specified by `KafkaClient::set_fetch_max_wait_time`\nwhen fetching messages.",22,null],[11,"fetch_min_bytes","","Retrieves the current `KafkaClient::set_fetch_min_bytes`\nsetting.",22,null],[11,"set_fetch_max_bytes_per_partition","","Sets the default maximum number of bytes to obtain from _a\nsingle kafka partition_ when fetching messages.",22,null],[11,"fetch_max_bytes_per_partition","","Retrieves the current\n`KafkaClient::set_fetch_max_bytes_per_partition` setting.",22,null],[11,"topics","","Provides a view onto the currently loaded metadata of known topics.",22,null],[11,"load_metadata_all","","Resets and loads metadata for all topics from the underlying\nbrokers.",22,null],[11,"load_metadata","","Reloads metadata for a list of supplied topics.",22,null],[11,"reset_metadata","","Clears metadata stored in the client. You must load metadata\nafter this call if you want to use the client.",22,null],[11,"fetch_offsets","","Fetch offsets for a list of topics",22,null],[11,"fetch_topic_offsets","","Fetch offset for a single topic.",22,null],[11,"fetch_messages","","Fetch messages from Kafka (multiple topic, partitions).",22,null],[11,"fetch_messages_for_partition","","Fetch messages from a single kafka partition.",22,null],[11,"produce_messages","","Send a message to Kafka",22,null],[11,"commit_offsets","","Commit offset for a topic partitions on behalf of a consumer group.",22,null],[11,"commit_offset","","Commit offset of a particular topic partition on behalf of a\nconsumer group.",22,null],[11,"fetch_group_offsets","","Fetch offset for a specified list of topic partitions of a consumer group",22,null],[11,"fetch_group_topic_offsets","","Fetch offset for all partitions of a particular topic of a consumer group",22,null],[0,"consumer","kafka","Kafka Consumer - A higher-level API for consuming a kafka topic.",null,null],[3,"Message","kafka::consumer","A fetched message from a remote Kafka broker for a particular\ntopic partition.",null,null],[12,"offset","","The offset at which this message resides in the remote kafka\nbroker topic partition.",10,null],[12,"key","","The "key" data of this message. Empty if there is no such\ndata for this message.",10,null],[12,"value","","The value data of this message. Empty if there is no such\ndata for this message.",10,null],[3,"Consumer","","The Kafka Consumer",null,null],[3,"MessageSets","","Messages retrieved from kafka in one fetch request. This is a\nconcatenation of blocks of messages successfully retrieved from\nthe consumed topic partitions. Each such partitions is guaranteed\nto be present at most once in this structure.",null,null],[3,"MessageSet","","A set of messages succesfully retrieved from a specific topic\npartition.",null,null],[3,"MessageSetsIter","","An iterator over the consumed topic partition message sets.",null,null],[3,"Builder","","A Kafka Consumer builder easing the process of setting up various\nconfiguration settings.",null,null],[17,"DEFAULT_RETRY_MAX_BYTES_LIMIT","","The default value for `Builder::with_retry_max_bytes_limit`.",null,null],[11,"from_client","","Starts building a consumer for the given topic on behalf of\nthe given group using the given kafka client.",23,{"inputs":[{"name":"kafkaclient"},{"name":"string"},{"name":"string"}],"output":{"name":"builder"}}],[11,"from_hosts","","Starts building a consumer for the given topic on behalf of\nthe given group bootstraping internally a new kafka client\nfrom the given kafka hosts.",23,{"inputs":[{"name":"vec"},{"name":"string"},{"name":"string"}],"output":{"name":"builder"}}],[11,"client","","Destroys this consumer returning back the underlying kafka client.",23,null],[11,"poll","","Polls for the next available message data.",23,null],[11,"single_partition_consumer","","Determines whether this consumer is set up to consume only a\nsingle partition.",23,null],[11,"last_consumed_message","","Retrieves the offset of the last "consumed" message in the\nspecified partition. Results in `None` if there is no such\n"consumed" message for this consumer's group in the underlying\ntopic.",23,null],[11,"consume_message","","Marks the message at the specified offset in the specified\npartition as consumed by the caller.",23,null],[11,"consume_messageset","","A convience method to mark the given message set consumed as a\nwhole by the caller. This is equivalent to marking the last\nmessage of the given set as consumed.",23,null],[11,"commit_consumed","","Persists the so-far "marked as consumed" messages (on behalf\nof this consumer's group for the underlying topic.)",23,null],[11,"is_empty","","Determines efficiently whether there are any consumeable\nmessages in this data set.",24,null],[11,"iter","","Iterates over the message sets delivering the fetched message\ndata of consumed topic partitions.",24,null],[11,"topic","","",25,null],[11,"partition","","",25,null],[11,"messages","","",25,null],[11,"next","","",26,null],[11,"fmt","","",27,null],[11,"with_partitions","","Explicitely specifies the partitions to consume.",27,null],[11,"with_fallback_offset","","Specifies the offset to use when none was committed for the\nunderlying group yet.",27,null],[11,"with_fetch_max_wait_time","","See `KafkaClient::set_fetch_max_wait_time`",27,null],[11,"with_fetch_min_bytes","","See `KafkaClient::set_fetch_min_bytes`",27,null],[11,"with_fetch_max_bytes_per_partition","","See `KafkaClient::set_fetch_max_bytes_per_partition`",27,null],[11,"with_retry_max_bytes_limit","","Specifies the upper bound of data bytes to allow fetching from\na kafka partition when retrying a fetch request due to a too\nbig message in the partition.",27,null],[11,"create","","Finally creates/builds a new consumer based on the so far\nsupplied settings.",27,null],[0,"producer","kafka","Kafka Producer - A higher-level API for sending messages to kafka topics.",null,null],[3,"Record","kafka::producer","A structure representing a message to be sent to Kafka through the\n`Producer` API. Such a message is basically a key/value pair\nspecifying the target topic and optionally the topic's partition.",null,null],[12,"key","","Key data of this (message) record.",28,null],[12,"value","","Value data of this (message) record.",28,null],[12,"topic","","Name of the topic this message is supposed to be delivered to.",28,null],[12,"partition","","The partition id of the topic to deliver this message to.\nThis partition may be `< 0` in which case it is considered\n"unspecified". A `Producer` will then typically try to derive\na partition on its own.",28,null],[3,"Producer","","The Kafka Producer",null,null],[3,"Builder","","A Kafka Producer builder easing the process of setting up various\nconfiguration settings.",null,null],[3,"Topics","","A description of available topics and their available partitions.",null,null],[3,"DefaultPartitioner","","As its name implies `DefaultPartitioner` is the default\npartitioner for `Producer`.",null,null],[4,"Compression","","Compression types supported by kafka. The numeral values of this\nenumeration correspond to the compression encoding in the\nattributes of a Message in the protocol.",null,null],[13,"NONE","","",8,null],[13,"GZIP","","",8,null],[13,"SNAPPY","","",8,null],[17,"DEFAULT_ACK_TIMEOUT","","The default value for `Builder::with_ack_timeout`.",null,null],[17,"DEFAULT_REQUIRED_ACKS","","The default value for `Builder::with_required_acks`.",null,null],[8,"AsBytes","","A trait used by `Producer` to obtain the bytes `Record::key` and\n`Record::value` represent. This leaves the choice of the types\nfor `key` and `value` with the client.",null,null],[10,"as_bytes","","",29,null],[8,"Partitioner","","A partitioner is given a chance to choose/redefine a partition for\na message to be sent to Kafka. See also\n`Record#with_partition`.",null,null],[10,"partition","","Supposed to inspect the given message and if desired re-assign\nthe message's target partition.",30,null],[11,"from_key_value","","Convenience function to create a new key/value record with an\n"unspecified" partition - this is, a partition set to a negative\nvalue.",28,{"inputs":[{"name":"str"},{"name":"k"},{"name":"v"}],"output":{"name":"record"}}],[11,"with_partition","","Convenience method to set the partition.",28,null],[11,"from_value","","Convenience function to create a new value only record with an\n"unspecified" partition - this is, a partition set to a negative\nvalue.",28,{"inputs":[{"name":"str"},{"name":"v"}],"output":{"name":"record"}}],[11,"fmt","","",28,null],[11,"from_client","","Starts building a new producer using the given Kafka client.",31,{"inputs":[{"name":"kafkaclient"}],"output":{"name":"builder"}}],[11,"from_hosts","","Starts building a producer bootstraping internally a new kafka\nclient from the given kafka hosts.",31,{"inputs":[{"name":"vec"}],"output":{"name":"builder"}}],[11,"client","","Destroys this producer returning the underlying kafka client.",31,null],[11,"send","","Synchronously send the specified message to Kafka.",31,null],[11,"send_all","","Synchronously send all of the specified messages to Kafka.",31,null],[11,"with_compression","","Sets the compression algorithm to use when sending out data.",32,null],[11,"with_ack_timeout","","Sets the maximum time in milliseconds the kafka brokers can\nawait the receipt of required acknowledgements (which is\nspecified through `Builder::with_required_acks`.) Note that\nKafka explicitely documents this not to be a hard limit.",32,null],[11,"with_required_acks","","Sets how many acknowledgements the kafka brokers should\nreceive before responding to sent messages. If it is 0 the\nservers will not send any response. If it is 1, the server\nwill wait the data is written to the local server log before\nsending a replying. If it is -1 the servers will block until\nthe messages are committed by all in sync replicas before\nreplaying. For any number `> 1` the servers will block\nwaiting for this number of acknowledgements to occur (but the\nservers will never wait for more acknowledgements than there\nare in-sync replicas).",32,null],[11,"with_partitioner","","Sets the partitioner to dispatch when sending messages without\nan explicit partition assignment.",32,null],[11,"create","","Finally creates/builds a new producer based on the so far\nsupplied settings.",32,null],[11,"partition_ids","","Retrieves a list of the identifiers of available partitions\nfor the given topic.",33,null],[11,"partition","","",34,null],[11,"default","","",34,{"inputs":[],"output":{"name":"self"}}],[11,"fmt","kafka::client","",2,null],[11,"fmt","","",3,null],[11,"correlation_id","kafka::client::fetch","Retrieves the id corresponding to the fetch messages request\n(provided for debugging purposes only).",11,null],[11,"topics","","Provides an iterator over all the topics and the fetched data\nrelative to these topics.",11,null],[11,"topic","","Retrieves the identifier/name of the represented topic.",12,null],[11,"partitions","","Provides an iterator over all the partitions of this topic for\nwhich messages were requested.",12,null],[11,"partition","","Retrieves the identifier of the represented partition.",13,null],[11,"data","","Retrieves the data payload for this partition.",13,null],[11,"highwatermark_offset","","Retrieves the so-called "high water mark offset" indicating\nthe "latest" offset for this partition at the remote broker.\nThis can be used by clients to find out how much behind the\nlatest available message they are.",14,null],[11,"messages","","Retrieves the fetched message data for this partition.",14,null],[11,"forget_before_offset","","*Mutates* this partition data in such a way that the next call\nto `messages()` will deliver a slice of messages with the\nproperty `msg.offset >= offset`. A convenient method to skip\npast a certain message offset in the retrieved data.",14,null],[11,"fmt","","",10,null],[11,"clone","kafka::client","",8,null],[11,"fmt","","",8,null],[11,"default","","",8,{"inputs":[],"output":{"name":"self"}}]],"paths":[[4,"Error"],[4,"KafkaCode"],[3,"PartitionOffset"],[3,"TopicPartitionOffset"],[3,"FetchGroupOffset"],[3,"CommitOffset"],[3,"ProduceMessage"],[3,"FetchPartition"],[4,"Compression"],[4,"FetchOffset"],[3,"Message"],[3,"Response"],[3,"Topic"],[3,"Partition"],[3,"Data"],[3,"Topics"],[3,"TopicIter"],[3,"TopicNames"],[3,"Topic"],[3,"Partitions"],[3,"PartitionIter"],[3,"Partition"],[3,"KafkaClient"],[3,"Consumer"],[3,"MessageSets"],[3,"MessageSet"],[3,"MessageSetsIter"],[3,"Builder"],[3,"Record"],[8,"AsBytes"],[8,"Partitioner"],[3,"Producer"],[3,"Builder"],[3,"Topics"],[3,"DefaultPartitioner"]]}; initSearch(searchIndex);