1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
//! Stack based buffer

use core::{cmp, fmt, slice, mem, ptr, ops};
use crate::{Buf, ContBuf, ReadBuf, WriteBuf};

///Static buffer to raw bytes
///
///The size of the storage must be known at compile time, therefore it is suitable only for
///non-dynamic storages.
///
///While write semantics are pretty obvious, read behaviour is more complicated due to it being a
///static buffer.
///
///When performing `ReadBuf::read` memory is always being read from the beginning.
///So as with any other implementation read leads to consumption.
///But as this buffer is single chunk of static memory, such operation will require to shift
///already written bytes to the beginning (meaning each `ReadBuf::consume` involves a `memmove`
///unless consumed `len` is not equal to current `len`)
///
///In general it would be more effective to access memory as slice and then consume it, if needed.
pub struct Buffer<T: Sized> {
    inner: mem::MaybeUninit<T>,
    cursor: usize, //number of bytes written
}

impl<S: Sized> Buffer<S> {
    #[inline]
    ///Creates new instance
    pub const fn new() -> Self {
        Self {
            inner: mem::MaybeUninit::uninit(),
            cursor: 0,
        }
    }

    #[inline]
    ///Transforms buffer into ring buffer.
    pub const fn into_circular(self) -> Ring<S> {
        unsafe {
            Ring::from_parts(self, 0)
        }
    }

    #[inline]
    ///Creates new instance from parts.
    ///
    ///`cursor` - number of elements written. It is user responsibility to make sure it is not over
    ///actual capacity
    pub const unsafe fn from_parts(inner: mem::MaybeUninit<S>, cursor: usize) -> Self {
        Self {
            inner,
            cursor,
        }
    }

    #[inline]
    ///Splits buffer into parts.
    pub const fn into_parts(self) -> (mem::MaybeUninit<S>, usize) {
        (self.inner, self.cursor)
    }

    #[inline]
    ///Returns pointer  to the beginning of underlying buffer
    pub const fn as_ptr(&self) -> *const u8 {
        &self.inner as *const _ as *const u8
    }

    #[inline]
    ///Returns number of bytes left (not written yet)
    pub const fn remaining(&self) -> usize {
        Self::capacity() - self.cursor
    }

    #[inline]
    ///Returns slice to already written data.
    pub fn as_slice(&self) -> &[u8] {
        unsafe {
            slice::from_raw_parts(self.as_ptr(), self.cursor)
        }
    }

    #[inline]
    ///Returns mutable slice to already written data.
    pub fn as_mut_slice(&mut self) -> &mut [u8] {
        unsafe {
            slice::from_raw_parts_mut(self.as_ptr() as *mut u8, self.cursor)
        }
    }

    #[inline]
    ///Shortens the buffer.
    ///
    ///Does nothing if new `cursor` is after current position.
    pub fn truncate(&mut self, cursor: usize) {
        if cursor < self.cursor {
            self.cursor = cursor
        }
    }

    #[inline]
    ///Changes written length, without writing.
    ///
    ///When used, user must guarantee that these bytes are written.
    pub unsafe fn set_len(&mut self, cursor: usize) {
        debug_assert!(cursor <= Self::capacity());
        self.cursor = cursor
    }

    #[inline]
    ///Returns buffer overall capacity.
    pub const fn capacity() -> usize {
        mem::size_of::<S>()
    }

    #[inline]
    ///Returns number of bytes written.
    pub const fn len(&self) -> usize {
        self.cursor
    }
}

impl<S: Sized> ops::Index<usize> for Buffer<S> {
    type Output = u8;

    #[inline(always)]
    fn index(&self, index: usize) -> &Self::Output {
        debug_assert!(index < self.len());
        unsafe {
            &*self.as_ptr().offset(index as isize)
        }
    }
}

impl<S: Sized> ops::IndexMut<usize> for Buffer<S> {
    #[inline(always)]
    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
        debug_assert!(index < self.len());
        unsafe {
            &mut *(self.as_ptr().offset(index as isize) as *mut _)
        }
    }
}

impl<S: Sized> AsRef<[u8]> for Buffer<S> {
    #[inline(always)]
    fn as_ref(&self) -> &[u8] {
        self.as_slice()
    }
}

impl<S: Sized> fmt::Debug for Buffer<S> {
    #[inline(always)]
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        f.debug_list().entries(self.as_slice().iter()).finish()
    }
}

impl<S: Sized> Buf for Buffer<S> {
    #[inline(always)]
    fn capacity(&self) -> usize {
        Self::capacity()
    }

    #[inline(always)]
    fn len(&self) -> usize {
        self.cursor
    }
}

impl<S: Sized> WriteBuf for Buffer<S> {
    #[inline(always)]
    fn remaining(&self) -> usize {
        Self::remaining(self)
    }

    #[inline(always)]
    unsafe fn advance(&mut self, step: usize) {
        self.set_len(self.cursor + step);
    }

    unsafe fn write(&mut self, ptr: *const u8, size: usize) {
        debug_assert!(!ptr.is_null());

        ptr::copy_nonoverlapping(ptr, self.as_ptr().offset(self.cursor as isize) as *mut u8, size);
        self.advance(size);
    }
}

impl<S: Sized> ReadBuf for Buffer<S> {
    unsafe fn consume(&mut self, step: usize) {
        debug_assert!(step <= self.cursor);

        if step == 0 {
            return
        }

        let remaining = self.cursor.saturating_sub(step);

        if remaining != 0 {
            ptr::copy(self.as_ptr().offset(step as isize), self.as_ptr() as *mut u8, remaining);
        }

        self.set_len(remaining)
    }

    unsafe fn read(&mut self, ptr: *mut u8, size: usize) {
        debug_assert!(!ptr.is_null());

        ptr::copy_nonoverlapping(self.as_ptr(), ptr, size);
        self.consume(size);
    }
}

impl<S: Sized> ContBuf for Buffer<S> {
    #[inline(always)]
    fn as_read_slice(&self) -> &[u8] {
        self.as_slice()
    }

    #[inline(always)]
    fn as_read_slice_mut(&mut self) -> &mut [u8] {
        self.as_mut_slice()
    }

    #[inline(always)]
    fn as_write_slice(&mut self) -> &mut [mem::MaybeUninit<u8>] {
        unsafe {
            slice::from_raw_parts_mut(self.as_ptr().offset(self.cursor as isize) as *mut mem::MaybeUninit<u8>, Self::capacity() - self.cursor)
        }
    }
}

#[cfg(feature = "std")]
impl<S: Sized> std::io::Write for Buffer<S> {
    #[inline(always)]
    fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
        Ok(self.write_slice(buf))
    }

    #[inline(always)]
    fn flush(&mut self) -> std::io::Result<()> {
        Ok(())
    }
}

///Circular version of `Buffer`
///
///Because `Buffer` is circular, it always has remaining bytes to write.
///But care must be taken because without consuming already written bytes, it is easy to over-write
///as ring buffer always has capacity.
pub struct Ring<T: Sized> {
    buffer: Buffer<T>,
    read: usize
}

impl<S: Sized> Ring<S> {
    #[inline]
    ///Creates new instance
    pub const fn new() -> Self {
        unsafe {
            Self::from_parts(Buffer::new(), 0)
        }
    }

    #[inline]
    ///Creates new instance from parts
    pub const unsafe fn from_parts(buffer: Buffer<S>, read: usize) -> Self {
        Self {
            buffer,
            read
        }
    }

    #[inline]
    ///Creates new instance from parts
    pub const fn into_parts(self) -> (Buffer<S>, usize) {
        (self.buffer, self.read)
    }

    #[inline]
    const fn mask_idx(idx: usize) -> usize {
        idx & (Buffer::<S>::capacity() - 1)
    }

    ///Returns number of available elements
    pub const fn len(&self) -> usize {
        self.buffer.cursor - self.read
    }

    ///Returns whether buffer is empty.
    pub const fn is_empty(&self) -> bool {
        self.buffer.cursor == self.read
    }

    ///Returns whether buffer is full.
    pub const fn is_full(&self) -> bool {
        Buffer::<S>::capacity() == self.len()
    }
}

impl<S: Sized> ops::Index<usize> for Ring<S> {
    type Output = u8;

    #[inline(always)]
    fn index(&self, mut index: usize) -> &Self::Output {
        debug_assert!(index < self.len());
        index = Self::mask_idx(self.read.wrapping_add(index));
        unsafe {
            &*self.buffer.as_ptr().offset(index as isize)
        }
    }
}

impl<S: Sized> ops::IndexMut<usize> for Ring<S> {
    #[inline(always)]
    fn index_mut(&mut self, mut index: usize) -> &mut Self::Output {
        debug_assert!(index < self.len());
        index = Self::mask_idx(self.read.wrapping_add(index));
        unsafe {
            &mut *(self.buffer.as_ptr().offset(index as isize) as *mut _)
        }
    }
}

impl<S: Sized> Buf for Ring<S> {
    #[inline(always)]
    fn capacity(&self) -> usize {
        Buffer::<S>::capacity()
    }

    #[inline(always)]
    fn len(&self) -> usize {
        Self::len(self)
    }
}

impl<S: Sized> ReadBuf for Ring<S> {
    #[inline(always)]
    fn available(&self) -> usize {
        Self::len(self)
    }

    #[inline]
    unsafe fn consume(&mut self, step: usize) {
        self.read = self.read.wrapping_add(step);
    }

    unsafe fn read(&mut self, ptr: *mut u8, mut size: usize) {
        debug_assert!(!ptr.is_null());
        debug_assert!((Buffer::<S>::capacity() & (Buffer::<S>::capacity() - 1)) == 0, "Capacity is not power of 2");
        let idx = Self::mask_idx(self.read);
        let read_span = cmp::min(Buffer::<S>::capacity() - idx, size);

        ptr::copy_nonoverlapping(self.buffer.as_ptr().offset(idx as isize), ptr, read_span);
        self.consume(read_span);
        size -= read_span;

        if size > 0 {
            let avail_size = cmp::min(size, self.available());
            if avail_size > 0 {
                ptr::copy_nonoverlapping(self.buffer.as_ptr(), ptr.offset(read_span as isize), avail_size);
                self.consume(avail_size);
            }
        }
    }
}

impl<S: Sized> WriteBuf for Ring<S> {
    #[inline(always)]
    fn remaining(&self) -> usize {
        Buffer::<S>::capacity()
    }

    #[inline(always)]
    unsafe fn advance(&mut self, step: usize) {
        self.buffer.cursor = self.buffer.cursor.wrapping_add(step);

        let read_span = self.buffer.cursor - self.read;
        if read_span > Buffer::<S>::capacity() {
            //consume over-written bytes
            self.consume(read_span - Buffer::<S>::capacity());
        }
    }

    unsafe fn write(&mut self, ptr: *const u8, mut size: usize) {
        debug_assert!(!ptr.is_null());
        debug_assert!((Buffer::<S>::capacity() & (Buffer::<S>::capacity() - 1)) == 0, "Capacity is not power of 2");

        let cursor = Self::mask_idx(self.buffer.cursor);
        let mut write_span = cmp::min(Buffer::<S>::capacity() - cursor, size);

        ptr::copy_nonoverlapping(ptr, self.buffer.as_ptr().offset(cursor as isize) as *mut u8, write_span);
        size -= write_span;

        while size > 0 {
            let avail_size = cmp::min(size, Buffer::<S>::capacity());

            ptr::copy_nonoverlapping(ptr.offset(write_span as isize), self.buffer.as_ptr() as *mut u8, avail_size);
            size -= avail_size;
            write_span += avail_size;
        }

        self.advance(write_span);
    }
}