1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
//! This crate provides a fast mpmc multicast queue. //! It's based on the queue design from the LMAX Disruptor, with a few improvements: //! //! * It acts as a futures stream/sink, so you can set up high-performance pipelines //! //! * It can dynamically add/remove senders, and each stream can have multiple receivers //! //! * It has fast runtime fallbacks for whenr there's a single consumer and/or a single producer //! //! * It works on 32 bit systems without any performance or capability penalty //! //! * In most cases, one can view data written directly into the queue without copying it //! //! In many cases, MultiQueue will be a good replacement for channels and it's broadcast //! capabilities can replace more complex concurrency systems with a single queue. //! //! #Queue Model: //! MultiQueue functions similarly to the LMAX Disruptor from a high level view. //! There's an incoming FIFO data stream that is broadcast to a set of subscribers //! as if there were multiple streams being written to. //! There are two main differences: //! * MultiQueue transparently supports switching between single and multiple producers. //! * Each broadcast stream can be shared among multiple consumers. //! //! The last part makes the model a bit confusing, since there's a difference between a //! stream of data and something consuming that stream. To make things worse, each consumer //! may not actually see each value on the stream. Instead, multiple consumers may act on //! a single stream each getting unique access to certain elements. //! //! A helpful mental model may be to think about this as if each stream was really just an mpmc //! queue that was getting pushed to, and the MultiQueue structure just assembled a bunch //! together behind the scenes. This isn't the case of course, but it's helpful for thinking. //! //! An diagram that represents a general use case of the queue where each consumer has unique //! access to a stream is below - the # stand in for producers and @ stands in for the consumer of //! each stream, each with a label. The lines are meant to show the data flow through the queue. //! //! ```text //! -> # @-1 //! \ / //! -> -> -> @-2 //! / \ //! -> # @-3 //! ``` //! //! This is a pretty standard broadcast queue setup - //! for each element sent in, it is seen on each stream by that's streams consumer. //! //! //! However, in MultiQueue, each logical consumer might actually be demultiplexed //! across many actual consumers, like below. //! //! ```text //! -> # @-1 //! \ / //! -> -> -> @-2' (really @+@+@ each compete for a spot) //! / \ //! -> # @-3 //! ``` //! //! If this diagram is redrawn with each of the producers sending in a //! sequenced element (time goes left to right): //! //! ```text //! t=1|t=2| t=3 | t=4| //! 1 -> # @-1 (1, 2) //! \ / //! -> 2 -> 1 -> -> @-2' (really @ (1) + @ (2) + @ (nothing yet)) //! / \ //! 2 -> # @-3 (1, 2) //!``` //! //! If one imagines this as a webserver, the streams for @-1 and @-3 might be doing random //! webservery work like some logging or metrics gathering and can handle //! the workload completely on one core, @-2 is doing expensive work handling requests //! and is split into multiple workers dealing with the data stream. //! //! //! #Usage: //! From the receiving side, this behaves quite similarly to a channel receiver. //! The .recv function will block until data is available and then return the data. //! //! For senders, there is only .try_send (except for the futures sink, which can park), //! This is due to performance and api reasons - you should handle backlog instead of just blocking. //! //! # Example: SPSC channel //! //! ``` //! extern crate multiqueue; //! //! use std::thread; //! //! let (send, recv) = multiqueue::multiqueue(10); //! //! let handle = thread::spawn(move || { //! for val in recv { //! println!("Got {}", val); //! } //! }); //! //! for i in 0..10 { //! send.try_send(i).unwrap(); //! } //! //! // Drop the sender to close the queue //! drop(send); //! //! handle.join(); //! //! // prints //! // Got 0 //! // Got 1 //! // Got 2 //! // etc //! ``` //! //! # Example: SPSC broadcasting //! //! ``` //! extern crate multiqueue; //! //! use std::thread; //! //! let (send, recv) = multiqueue::multiqueue(4); //! let mut handles = vec![]; //! for i in 0..2 { // or n //! let cur_recv = recv.add_stream(); //! handles.push(thread::spawn(move || { //! for val in cur_recv { //! println!("Stream {} got {}", i, val); //! } //! })); //! } //! //! // Take notice that I drop the reader - this removes it from //! // the queue, meaning that the readers in the new threads //! // won't get starved by the lack of progress from recv //! recv.unsubscribe(); //! //! for i in 0..10 { //! // Don't do this busy loop in real stuff unless you're really sure //! loop { //! if send.try_send(i).is_ok() { //! break; //! } //! } //! } //! //! // Drop the sender to close the queue //! drop(send); //! //! for t in handles { //! t.join(); //! } //! //! // prints along the lines of //! // Stream 0 got 0 //! // Stream 0 got 1 //! // Stream 1 got 0 //! // Stream 0 got 2 //! // Stream 1 got 1 //! // etc //! //! ``` //! //! //! # Example: SPMC broadcast //! //! ``` //! extern crate multiqueue; //! //! use std::thread; //! //! let (send, recv) = multiqueue::multiqueue(4); //! //! let mut handles = vec![]; //! //! for i in 0..2 { // or n //! let cur_recv = recv.add_stream(); //! for j in 0..2 { //! let stream_consumer = cur_recv.clone(); //! handles.push(thread::spawn(move || { //! for val in stream_consumer { //! println!("Stream {} consumer {} got {}", i, j, val); //! } //! })); //! } //! // cur_recv is dropped here //! } //! //! // Take notice that I drop the reader - this removes it from //! // the queue, meaning that the readers in the new threads //! // won't get starved by the lack of progress from recv //! recv.unsubscribe(); //! //! for i in 0..10 { //! // Don't do this busy loop in real stuff unless you're really sure //! loop { //! if send.try_send(i).is_ok() { //! break; //! } //! } //! } //! drop(send); //! //! for t in handles { //! t.join(); //! } //! //! // prints along the lines of //! // Stream 0 consumer 1 got 2 //! // Stream 0 consumer 0 got 0 //! // Stream 1 consumer 0 got 0 //! // Stream 0 consumer 1 got 1 //! // Stream 1 consumer 1 got 1 //! // Stream 1 consumer 0 got 2 //! // etc //! //! // some join mechanics here //! ``` //! //! # Example: Usage menagerie //! //! ``` //! extern crate multiqueue; //! //! use std::thread; //! //! let (send, recv) = multiqueue::multiqueue(4); //! let mut handles = vec![]; //! //! // start like before //! for i in 0..2 { // or n //! let cur_recv = recv.add_stream(); //! for j in 0..2 { //! let stream_consumer = cur_recv.clone(); //! handles.push(thread::spawn(move || //! for val in stream_consumer { //! println!("Stream {} consumer {} got {}", i, j, val); //! } //! )); //! } //! // cur_recv is dropped here //! } //! //! // On this stream, since there's only one consumer, //! // the receiver can be made into a SingleReceiver //! // which can view items inline in the queue //! let single_recv = recv.add_stream().into_single().unwrap(); //! //! handles.push(thread::spawn(move || //! for val in single_recv.iter_with(|item_ref| 10 * *item_ref) { //! println!("{}", val); //! } //! )); //! //! // Same as above, except this time we just want to iterate until the receiver is empty //! let single_recv_2 = recv.add_stream().into_single().unwrap(); //! //! handles.push(thread::spawn(move || //! for val in single_recv_2.partial_iter_with(|item_ref| 10 * *item_ref) { //! println!("{}", val); //! } //! )); //! //! // Take notice that I drop the reader - this removes it from //! // the queue, meaning that the readers in the new threads //! // won't get starved by the lack of progress from recv //! recv.unsubscribe(); //! //! // Many senders to give all the receivers something //! for _ in 0..3 { //! let cur_send = send.clone(); //! handles.push(thread::spawn(move || //! for i in 0..10 { //! loop { //! if cur_send.try_send(i).is_ok() { //! break; //! } //! } //! } //! )); //! } //! drop(send); //! //! for t in handles { //! t.join(); //! } //! ``` mod alloc; mod atomicsignal; mod consume; mod countedindex; mod maybe_acquire; mod memory; mod multiqueue; mod read_cursor; pub mod wait; pub use multiqueue::{multiqueue, multiqueue_with, Receiver, SingleReceiver, Sender, FuturesReceiver, FuturesSender, futures_multiqueue};