quicklog/serialize/
buffer.rs

1use crate::constants::MAX_SERIALIZE_BUFFER_CAPACITY;
2use once_cell::unsync::OnceCell;
3
4static mut BYTE_BUFFER: [u8; MAX_SERIALIZE_BUFFER_CAPACITY] = [0_u8; MAX_SERIALIZE_BUFFER_CAPACITY];
5
6pub static mut BUFFER: OnceCell<Buffer> = OnceCell::new();
7
8cfg_if::cfg_if! {
9    if #[cfg(debug_assertions)] {
10        use std::sync::atomic::{AtomicUsize, Ordering};
11
12        /// In debug, Buffer has an atomic read and write index which tracks if the write index
13        /// overruns the read index, or if the logging happens faster than the flushing.
14        ///
15        /// It panics when logging happens faster than flushing, which is an indication we need
16        /// a larger buffer size.
17        pub struct Buffer {
18            write_idx: AtomicUsize,
19            read_idx: AtomicUsize,
20        }
21
22        impl Buffer {
23            pub fn new() -> Buffer {
24                Buffer {
25                    write_idx: AtomicUsize::new(0),
26                    read_idx: AtomicUsize::new(0),
27                }
28            }
29
30            /// updates the read index and checks if read has overrun write
31            pub fn dealloc(&mut self, chunk_size: usize) {
32                let new_val = self.read_idx.fetch_add(chunk_size, Ordering::Release);
33                if new_val + chunk_size > MAX_SERIALIZE_BUFFER_CAPACITY {
34                    // due to behaviour that if we overrun end, we allocate from start,
35                    // we update read idx to start instead of end
36                    self.read_idx.store(chunk_size, Ordering::Release);
37                }
38
39                // in debug mode, assert that our invariant is true, that read will always
40                // be less than or equal to write, otherwise we broke the loop
41                let curr_write = self.write_idx.load(Ordering::Acquire);
42                let curr_read = self.read_idx.load(Ordering::Acquire);
43                assert!(
44                    curr_read <= curr_write,
45                    // TODO: State which env var to change to amend the buffer capacity
46                    "read index is greater than write index, this means logging is happening faster than flushing, you might want to increase the buffer capacity"
47                );
48            }
49
50            pub fn get_chunk_as_mut(&mut self, chunk_size: usize) -> &'static mut [u8] {
51                let curr_idx = self.write_idx.load(Ordering::Acquire);
52
53                if chunk_size > MAX_SERIALIZE_BUFFER_CAPACITY {
54                    panic!(
55                        "BUFFER size insufficient to support chunk_size: {}, please increase MAX_CAPACITY",
56                        chunk_size
57                    );
58                }
59
60                // loop back around if insufficient size
61                if curr_idx + chunk_size > MAX_SERIALIZE_BUFFER_CAPACITY {
62                    // gives two branches, one for debug, which panics when we overwrite the end with
63                    // serialize, and the other for production, which simply overwrites regardless
64                    // if in debug, check where we have read up to
65                    let curr_end = self.read_idx.load(Ordering::Acquire);
66                    if curr_end > chunk_size {
67                        self.write_idx.store(chunk_size, Ordering::Release);
68                        // safe, we have up to write_idx to alloc and we require less
69                        unsafe { &mut BYTE_BUFFER[0..chunk_size] }
70                    } else {
71                        // unsafe, we will overwrite existing items, panic!
72                        panic!("Writing index will overwrite read index! You might need a larger buffer capacity.")
73                    }
74                } else {
75                    // sufficient size before end
76                    self.write_idx
77                        .store(curr_idx + chunk_size, Ordering::Release);
78                    unsafe { &mut BYTE_BUFFER[curr_idx..curr_idx + chunk_size] }
79                }
80            }
81        }
82    } else {
83        /// In release, buffer only has a write_idx and there's no overhead
84        /// of using atomics
85        pub struct Buffer {
86            write_idx: usize,
87        }
88
89        impl Buffer {
90            pub fn new() -> Buffer {
91                Buffer {
92                    write_idx: 0
93                }
94            }
95
96            pub fn get_chunk_as_mut(&mut self, chunk_size: usize) -> &'static mut [u8] {
97                let curr_idx = self.write_idx;
98
99                if chunk_size > MAX_SERIALIZE_BUFFER_CAPACITY {
100                    panic!(
101                        "BUFFER size insufficient to support chunk_size: {}, please increase MAX_CAPACITY",
102                        chunk_size
103                    );
104                }
105
106                // loop back around if insufficient size
107                if curr_idx + chunk_size > MAX_SERIALIZE_BUFFER_CAPACITY {
108                    self.write_idx = chunk_size;
109                    // in release, overwrite existing items without panic
110                    unsafe { &mut BYTE_BUFFER[0..chunk_size] }
111                } else {
112                    self.write_idx += chunk_size;
113                    unsafe { &mut BYTE_BUFFER[curr_idx..curr_idx + chunk_size] }
114                }
115            }
116        }
117
118    }
119}
120
121impl Default for Buffer {
122    fn default() -> Self {
123        Self::new()
124    }
125}