1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
use std::collections::BinaryHeap;
use std::sync::atomic::{self, AtomicBool, AtomicUsize};

use parking_lot::{Condvar, Mutex};

use crate::*;

/// A queue which orders messages by priority
#[derive(Debug)]
pub struct PriorityQueue<M, P>
where
    M: Send,
    P: PartialOrd + Ord,
{
    heap:                   Mutex<BinaryHeap<Message<M, P>>>,
    pub(crate) in_progress: AtomicUsize,
    is_drained:             AtomicBool,
    notify:                 Condvar,
}

impl<M, P> Default for PriorityQueue<M, P>
where
    M: Send,
    P: PartialOrd + Ord,
{
    fn default() -> Self {
        Self::new()
    }
}

enum Notify {
    None,
    One,
    All,
}

impl<M, P> PriorityQueue<M, P>
where
    M: Send,
    P: PartialOrd + Ord,
{
    /// Create a new PriorityQueue
    pub fn new() -> PriorityQueue<M, P> {
        PriorityQueue {
            heap:        Mutex::new(BinaryHeap::new()),
            in_progress: AtomicUsize::new(0),
            is_drained:  AtomicBool::new(true),
            notify:      Condvar::new(),
        }
    }

    /// Inserts all elements from the stash to the PriorityQueue, empties stash.
    /// This function waits until the on the queue is locked.
    pub fn sync(&self, stash: &mut Stash<M, P>) {
        let mut notify = Notify::None;

        let msgs = &mut stash.msgs;

        if !msgs.is_empty() {
            if msgs.len() == 1 {
                notify = Notify::One;
            } else {
                notify = Notify::All;
            }

            let mut lock = self.heap.lock();
            msgs.drain(..).for_each(|e| {
                lock.push(e);
            });
        }

        self.notify(notify);
    }

    /// Pushes an message with prio onto the queue, uses a Stash as temporary storage when the
    /// queue is contended. Drains the stash in the uncontended case.
    /// This function does not wait for the lock on the queue.
    pub fn send(&self, message: M, prio: P, stash: &mut Stash<M, P>) {
        self.in_progress.fetch_add(1, atomic::Ordering::SeqCst);
        self.is_drained.store(false, atomic::Ordering::SeqCst);

        let mut notify = Notify::None;
        let msgs = &mut stash.msgs;

        if let Some(mut lock) = self.heap.try_lock() {
            if msgs.is_empty() {
                notify = Notify::One;
            } else {
                notify = Notify::All;
                msgs.drain(..).for_each(|e| {
                    lock.push(e);
                });
            }
            lock.push(Message::Msg(message, prio));
        } else {
            msgs.push(Message::Msg(message, prio));
        }

        self.notify(notify);
    }

    /// Pushes a message with prio onto the queue, drains the Stash first.
    /// This function waits until the on the queue is locked.
    pub fn send_sync(&self, message: M, prio: P, stash: &mut Stash<M, P>) {
        self.in_progress.fetch_add(1, atomic::Ordering::SeqCst);
        self.is_drained.store(false, atomic::Ordering::SeqCst);

        let notify;
        let msgs = &mut stash.msgs;

        let mut lock = self.heap.lock();
        if msgs.is_empty() {
            notify = Notify::One;
        } else {
            notify = Notify::All;
            msgs.drain(..).for_each(|e| {
                lock.push(e);
            });
        }
        lock.push(Message::Msg(message, prio));

        self.notify(notify);
    }

    /// Pushes an message to the Stash. will not try to send data to the queue.
    /// Use this to combine some messages together before calling sync() to send them.
    /// This function does not wait for the lock on the queue.
    pub fn send_stashed(&self, message: M, prio: P, stash: &mut Stash<M, P>) {
        self.in_progress.fetch_add(1, atomic::Ordering::SeqCst);
        self.is_drained.store(false, atomic::Ordering::SeqCst);

        stash.msgs.push(Message::Msg(message, prio));
    }

    /// Combines the above to collect at least 'batch_size' messages in the stash before
    /// trying to send them out.  Use this to batch some messages together before calling
    /// sync() to send them.  This function does not wait for the lock on the queue.
    pub fn send_batched(&self, message: M, prio: P, batch_size: usize, stash: &mut Stash<M, P>) {
        if stash.len() <= batch_size {
            // append to the stash
            self.send_stashed(message, prio, stash);
        } else {
            // try to send
            self.send(message, prio, stash);
        }
    }

    /// Pushes a message with prio onto the queue without using a stash.  This function waits
    /// until the on the queue is locked. No stash involved, this should be not used with
    /// threads that have a stash since it won't get drained first. Can be used to send
    /// synchronous out-of-band message bypassing the stash.
    pub fn send_nostash(&self, message: M, prio: P) {
        self.in_progress.fetch_add(1, atomic::Ordering::SeqCst);
        self.is_drained.store(false, atomic::Ordering::SeqCst);
        self.heap.lock().push(Message::Msg(message, prio));
        self.notify(Notify::One);
    }

    /// Send the 'Drained' message
    pub(crate) fn send_drained(&self) {
        if self
            .is_drained
            .compare_exchange(
                false,
                true,
                atomic::Ordering::SeqCst,
                atomic::Ordering::SeqCst,
            )
            .is_ok()
        {
            self.in_progress.fetch_add(1, atomic::Ordering::SeqCst);
            self.heap.lock().push(Message::Drained);
            self.notify(Notify::One);
        }
    }

    /// With lock already hold.
    pub(crate) fn send_drained_with_lock(&self, lock: &mut BinaryHeap<Message<M, P>>) {
        if self
            .is_drained
            .compare_exchange(
                false,
                true,
                atomic::Ordering::SeqCst,
                atomic::Ordering::SeqCst,
            )
            .is_ok()
        {
            self.in_progress.fetch_add(1, atomic::Ordering::SeqCst);
            lock.push(Message::Drained);
            self.notify(Notify::One);
        }
    }

    /// Returns the smallest message from a queue. This message is wraped in a ReceiveGuard/Message
    pub fn recv_guard(&self) -> ReceiveGuard<M, P> {
        let mut lock = self.heap.lock();
        while lock.is_empty() {
            self.notify.wait(&mut lock);
        }

        let message = lock.pop().unwrap();

        ReceiveGuard::new(message, self)
    }

    /// Try to get the smallest message from a queue. Will return Some<ReceiveGuard> when a
    /// message is available. This will not wait on the queue lock.
    pub fn try_recv_guard(&self) -> Option<ReceiveGuard<M, P>> {
        match self.heap.try_lock() {
            Some(mut queue) => queue.pop().map(|message| ReceiveGuard::new(message, self)),
            None => None,
        }
    }

    /// Try to get the smallest message from a queue. Will return Some<ReceiveGuard> when a
    /// message is available. This will wait on the queue lock but return None when the queue
    /// is empty.
    pub fn maybe_recv_guard(&self) -> Option<ReceiveGuard<M, P>> {
        self.heap
            .lock()
            .pop()
            .map(|message| ReceiveGuard::new(message, self))
    }

    /// Returns the smallest message from a queue.
    pub fn recv(&self) -> Message<M, P> {
        let mut lock = self.heap.lock();
        while lock.is_empty() {
            self.notify.wait(&mut lock);
        }

        let msg = lock.pop().unwrap();
        if self.in_progress.fetch_sub(1, atomic::Ordering::SeqCst) == 1 {
            self.send_drained_with_lock(&mut lock);
        }

        msg
    }

    /// Try to get the smallest message from a queue. Will return Some<Message> when a message
    /// is available. This will not wait on the queue lock.
    pub fn try_recv(&self) -> Option<Message<M, P>> {
        self.heap.try_lock().and_then(|mut lock| {
            lock.pop().map(|msg| {
                if self.in_progress.fetch_sub(1, atomic::Ordering::SeqCst) == 1 {
                    self.send_drained_with_lock(&mut lock);
                }
                msg
            })
        })
    }

    /// Try to get the smallest message from a queue. Will return Some<Message> when a message
    /// is available. This will wait on the queue lock but return None when the queue
    /// is empty.
    pub fn maybe_recv(&self) -> Option<Message<M, P>> {
        self.heap.lock().pop().map(|message| {
            if self.in_progress.fetch_sub(1, atomic::Ordering::SeqCst) == 1 {
                self.send_drained();
            }
            message
        })
    }

    /// Returns the number of messages in flight. This is the .len() plus any receiver that
    /// still holds a guard.  Note: Informal only, this method will be racy when other threads
    /// modify the PriorityQueue.
    pub fn in_progress(&self) -> usize {
        self.in_progress.load(atomic::Ordering::Relaxed)
    }

    /// Returns true when the Stash contains no messages.  Note: Informal only, this method
    /// will be racy when other threads modify the PriorityQueue.
    pub fn is_empty(&self) -> bool {
        self.heap.lock().is_empty()
    }

    /// Returns the number of messages in the stash.  Note: Informal only, this method will be
    /// racy when other threads modify the PriorityQueue.
    pub fn len(&self) -> usize {
        self.heap.lock().len()
    }

    // Note: no capacity(), future versions may use another heap implementation

    /// Reserves capacity for at least `additional` more elements to be inserted in the
    /// PriorityQueue.
    pub fn reserve(&self, additional: usize) {
        self.heap.lock().reserve(additional);
    }

    /// Wakes waiting threads.
    fn notify(&self, notify: Notify) {
        match notify {
            Notify::None => {}
            Notify::One => {
                self.notify.notify_one();
            }
            Notify::All => {
                self.notify.notify_all();
            }
        }
    }
}