1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
//! Ring buffer implementation, that does immutable reads.

use std::cell::UnsafeCell;
use std::marker;
use std::ops::{Index, IndexMut};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering, ATOMIC_USIZE_INIT};

/// The reader id is used by readers to tell the storage where the last read ended.
#[derive(Debug)]
pub struct ReaderId<T> {
    reader_id: usize,
    buffer_id: usize,
    alive: Arc<AtomicBool>,
    m: marker::PhantomData<T>,
}

impl<T> ReaderId<T> {
    /// Create a new reader id
    pub fn new(reader_id: usize, buffer_id: usize, alive: Arc<AtomicBool>) -> ReaderId<T> {
        ReaderId {
            reader_id,
            buffer_id,
            alive,
            m: marker::PhantomData,
        }
    }
}

impl<T> Drop for ReaderId<T> {
    fn drop(&mut self) {
        self.alive.store(false, Ordering::Relaxed);
    }
}

#[derive(Debug)]
struct InternalReaderId {
    written: usize,
    index: usize,
    alive: Arc<AtomicBool>,
}

/// This static value helps assign unique ids to every storage which are then propagated to
/// registered reader IDs, preventing reader IDs from being used with the wrong storage.
/// It's very important to prevent this because otherwise the unsafe code used for reading
/// could cause memory corruption.
static RING_BUFFER_ID: AtomicUsize = ATOMIC_USIZE_INIT;

/// Ring buffer, holding data of type `T`
#[derive(Debug)]
pub struct RingBufferStorage<T> {
    pub(crate) data: Vec<T>,
    buffer_id: usize,
    reader_internal: Vec<UnsafeCell<InternalReaderId>>,
    write_index: usize,
    written: usize,
    reset_written: usize,
    /// Used for caching the current longest reader so we only have to check that reader for
    /// determining if growth is necessary.
    current_longest_reader: usize,
}

unsafe impl<T> Sync for RingBufferStorage<T>
where
    T: Sync,
{
}

impl<T: 'static> RingBufferStorage<T> {
    /// Create a new ring buffer with the given max size.
    pub fn new(size: usize) -> Self {
        RingBufferStorage {
            data: Vec::with_capacity(size),
            buffer_id: RING_BUFFER_ID.fetch_add(1, Ordering::Relaxed),
            reader_internal: Vec::new(),
            write_index: 0,
            written: 0,
            reset_written: size * 1000,
            current_longest_reader: 0,
        }
    }

    /// Iterates over all elements of `iter` and pushes them to the buffer.
    pub fn iter_write<I>(&mut self, iter: I)
    where
        I: IntoIterator<Item = T>,
    {
        for d in iter {
            self.single_write(d);
        }
    }

    /// Removes all elements from a `Vec` and pushes them to the ringbuffer.
    pub fn drain_vec_write(&mut self, data: &mut Vec<T>) {
        self.iter_write(data.drain(..));
    }

    fn needs_growth(&mut self) -> bool {
        let mut cached_num_written = 0;
        let cache_valid = if self.reader_internal.len() > self.current_longest_reader {
            let cached_reader =
                unsafe { &*self.reader_internal[self.current_longest_reader].get() };
            cached_num_written = if self.written < cached_reader.written {
                self.written + (self.reset_written - cached_reader.written)
            } else {
                self.written - cached_reader.written
            };
            cached_reader.alive.load(Ordering::Relaxed) && cached_num_written > 1
        } else {
            false
        };
        if cache_valid {
            cached_num_written > self.data.len()
        } else {
            let (longest_reader, num_written) = self.reader_internal
                .iter()
                .map(|ref internal| unsafe { &*internal.get() })
                .enumerate()
                .filter_map(|(i, internal)| {
                    if internal.alive.load(Ordering::Relaxed) {
                        Some((i, &internal.written))
                    } else {
                        None
                    }
                })
                .fold((0, 0), |(index, max_written), (i, &written)| {
                    let num_written = if self.written < written {
                        self.written + (self.reset_written - written)
                    } else {
                        self.written - written
                    };
                    if num_written > max_written {
                        (i, num_written)
                    } else {
                        (index, max_written)
                    }
                });
            self.current_longest_reader = longest_reader;
            num_written > self.data.len()
        }
    }

    /// Write a single data point into the ringbuffer.
    pub fn single_write(&mut self, data: T) {
        self.written += 1;
        if self.written > self.reset_written {
            self.written = 0;
        }
        let need_growth = self.needs_growth();
        if need_growth {
            self.data.insert(self.write_index, data);
            // In order to avoid pushing events that have already been read back into a readable
            // range we're also going to push any readers that are ahead of us in the buffer
            // forward one as well.
            for i in 0..self.reader_internal.len() {
                unsafe {
                    if (*self.reader_internal[i].get()).index > self.write_index {
                        (*self.reader_internal[i].get()).index += 1;
                    }
                }
            }
            self.write_index += 1;
        } else if self.data.len() != 0 {
            // Check if we need to loop the write index.
            if self.write_index == self.data.len() {
                // If we're looping the write index then the meaning of any read indices at the end
                // has changed, so we need to loop them too.
                for i in 0..self.reader_internal.len() {
                    unsafe {
                        if (*self.reader_internal[i].get()).index == self.write_index {
                            (*self.reader_internal[i].get()).index = 0;
                        }
                    }
                }
                // Loop the write index
                self.write_index = 0;
            }
            self.data[self.write_index] = data;
            self.write_index += 1;
        }
    }

    /// Create a new reader id for this ringbuffer.
    pub fn new_reader_id(&mut self) -> ReaderId<T> {
        let alive;
        // Attempt to re-use positions from dropped readers.
        let new_id = self.reader_internal
            .iter()
            .map(|ref internal| unsafe { &*internal.get() })
            .position(|internal| !internal.alive.load(Ordering::Relaxed));
        match new_id {
            Some(new_id) => {
                let reader = unsafe { &mut *self.reader_internal[new_id].get() };
                reader.written = self.written;
                reader.index = self.write_index;
                reader.alive.store(true, Ordering::Relaxed);
                alive = reader.alive.clone();
            }
            None => {
                alive = Arc::new(AtomicBool::new(true));
                self.reader_internal.push(UnsafeCell::new(InternalReaderId {
                    written: self.written,
                    index: self.write_index,
                    alive: alive.clone(),
                }));
            }
        }
        ReaderId::new(
            new_id.unwrap_or(self.reader_internal.len() - 1),
            self.buffer_id,
            alive,
        )
    }

    /// Read data from the ringbuffer, starting where the last read ended, and up to where the last
    /// data was written.
    pub fn read(&self, reader_id: &mut ReaderId<T>) -> StorageIterator<T> {
        assert!(
            reader_id.buffer_id == self.buffer_id,
            "ReaderID used with an event buffer it's not registered to.  Not permitted!"
        );
        let written = unsafe { (*self.reader_internal[reader_id.reader_id].get()).written };
        let num_written = if self.written < written {
            self.written + (self.reset_written - written)
        } else {
            self.written - written
        };

        // read index is sometimes kept at maximum in case the buffer grows, but if the buffer
        // hasn't grown then we need to interpret the maximum index as 0.
        let read_index = unsafe { (*self.reader_internal[reader_id.reader_id].get()).index };

        // Update the reader indice inside the storage.  This is safe because the only time this
        // value can be updated is when there is both a mutable reference to the reader ID
        // and an immutable reference to the storage.  We also guaranteed above that this reader id
        // was created by this storage.
        unsafe {
            let pointer: *mut InternalReaderId = self.reader_internal[reader_id.reader_id].get();
            (*pointer).written = self.written;
            (*pointer).index = self.write_index;
        }
        StorageIterator {
            storage: &self,
            current: read_index,
            end: self.write_index,
            started: num_written == 0,
        }
    }
}

/// Iterator over a slice of data in `RingBufferStorage`.
#[derive(Debug)]
pub struct StorageIterator<'a, T: 'a> {
    storage: &'a RingBufferStorage<T>,
    current: usize,
    end: usize,
    // needed when we should read the whole buffer, because then current == end for the first value
    // needs special handling for empty iterator, needs to be forced to true for that corner case
    started: bool,
}

impl<'a, T> Iterator for StorageIterator<'a, T> {
    type Item = &'a T;

    fn next(&mut self) -> Option<&'a T> {
        if self.started && self.current == self.end {
            None
        } else {
            self.started = true;
            if self.current == self.storage.data.len() && self.end != self.storage.data.len() {
                self.current = 0;
            }
            let item = &self.storage[self.current];
            self.current += 1;
            if self.current == self.storage.data.len() && self.end != self.storage.data.len() {
                self.current = 0;
            }
            Some(item)
        }
    }
}

impl<T> Index<usize> for RingBufferStorage<T> {
    type Output = T;

    fn index(&self, index: usize) -> &Self::Output {
        &self.data[index]
    }
}

impl<T> IndexMut<usize> for RingBufferStorage<T> {
    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
        &mut self.data[index]
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[derive(Debug, Clone, PartialEq)]
    struct Test {
        pub id: u32,
    }

    #[derive(Debug, Clone, PartialEq)]
    struct Test2 {
        pub id: u32,
    }

    #[test]
    fn test_empty_write() {
        let mut buffer = RingBufferStorage::<Test>::new(10);
        buffer.drain_vec_write(&mut vec![]);
        assert_eq!(buffer.data.len(), 0);
    }

    #[test]
    fn test_too_large_write() {
        let mut buffer = RingBufferStorage::<Test>::new(10);
        // Events just go off into the void if there's no reader registered.
        let _reader = buffer.new_reader_id();
        buffer.drain_vec_write(&mut events(15));
        assert_eq!(buffer.data.len(), 15);
    }

    #[test]
    fn test_empty_read() {
        let mut buffer = RingBufferStorage::<Test>::new(10);
        let mut reader_id = buffer.new_reader_id();
        let data = buffer.read(&mut reader_id);
        assert_eq!(Vec::<Test>::default(), data.cloned().collect::<Vec<_>>())
    }

    #[test]
    fn test_empty_read_write_before_id() {
        let mut buffer = RingBufferStorage::<Test>::new(10);
        buffer.drain_vec_write(&mut events(2));
        let mut reader_id = buffer.new_reader_id();
        let data = buffer.read(&mut reader_id);
        assert_eq!(Vec::<Test>::default(), data.cloned().collect::<Vec<_>>())
    }

    #[test]
    fn test_read() {
        let mut buffer = RingBufferStorage::<Test>::new(10);
        let mut reader_id = buffer.new_reader_id();
        buffer.drain_vec_write(&mut events(2));
        assert_eq!(
            vec![Test { id: 0 }, Test { id: 1 }],
            buffer.read(&mut reader_id).cloned().collect::<Vec<_>>()
        );

        assert_eq!(
            Vec::<Test>::new(),
            buffer.read(&mut reader_id).cloned().collect::<Vec<_>>()
        );
    }

    #[test]
    fn test_write_overflow() {
        let mut buffer = RingBufferStorage::<Test>::new(3);
        let mut reader_id = buffer.new_reader_id();
        buffer.drain_vec_write(&mut events(4));
        let data = buffer.read(&mut reader_id);
        assert_eq!(
            vec![
                Test { id: 0 },
                Test { id: 1 },
                Test { id: 2 },
                Test { id: 3 },
            ],
            data.cloned().collect::<Vec<_>>()
        );
    }

    /// If you're getting a compilation error here this test has failed!
    #[test]
    fn test_send_sync() {
        trait SendSync: Send + Sync {
            fn is_send_sync() -> bool;
        }

        impl<T> SendSync for T
        where
            T: Send + Sync,
        {
            fn is_send_sync() -> bool {
                true
            }
        }

        assert!(RingBufferStorage::<Test>::is_send_sync());
        assert!(ReaderId::<Test>::is_send_sync());
    }

    #[test]
    fn test_reader_reuse() {
        let mut buffer = RingBufferStorage::<Test>::new(3);
        {
            let _reader_id = buffer.new_reader_id();
        }
        let _reader_id = buffer.new_reader_id();
        assert_eq!(buffer.reader_internal.len(), 1);
    }

    #[test]
    fn test_prevent_excess_growth() {
        let mut buffer = RingBufferStorage::<Test>::new(3);
        let mut reader_id = buffer.new_reader_id();
        buffer.drain_vec_write(&mut events(2));
        buffer.drain_vec_write(&mut events(2));
        // we wrote 0,1,0,1, if the buffer grew correctly we'll get all of these back.
        assert_eq!(
            vec![
                Test { id: 0 },
                Test { id: 1 },
                Test { id: 0 },
                Test { id: 1 },
            ],
            buffer.read(&mut reader_id).cloned().collect::<Vec<_>>()
        );

        buffer.drain_vec_write(&mut events(4));
        // After writing 4 more events the buffer should have no reason to grow beyond four.
        assert_eq!(buffer.data.len(), 4);
        assert_eq!(
            vec![
                Test { id: 0 },
                Test { id: 1 },
                Test { id: 2 },
                Test { id: 3 },
            ],
            buffer.read(&mut reader_id).cloned().collect::<Vec<_>>()
        );
    }

    #[test]
    fn test_write_slice() {
        let mut buffer = RingBufferStorage::<Test>::new(10);
        let mut reader_id = buffer.new_reader_id();
        buffer.iter_write(events(2));
        let data = buffer.read(&mut reader_id);
        assert_eq!(
            vec![Test { id: 0 }, Test { id: 1 }],
            data.cloned().collect::<Vec<_>>()
        );
    }

    fn events(n: u32) -> Vec<Test> {
        (0..n).map(|i| Test { id: i }).collect::<Vec<_>>()
    }
}