appendbuf/
lib.rs

1#![cfg_attr(test, deny(warnings))]
2#![deny(missing_docs)]
3
4//! # appendbuf
5//!
6//! A Sync append-only buffer with Send views.
7//!
8
9extern crate memalloc;
10
11use std::sync::atomic::{self, AtomicUsize, Ordering};
12use std::ops::Deref;
13use std::io::Read;
14use std::{io, mem, fmt};
15
16/// An append-only, atomically reference counted buffer.
17pub struct AppendBuf {
18    alloc: *mut AllocInfo,
19    position: usize
20}
21
22unsafe impl Send for AppendBuf {}
23unsafe impl Sync for AppendBuf {}
24
25struct AllocInfo {
26    refcount: AtomicUsize,
27    buf: [u8]
28}
29
30unsafe impl Send for AllocInfo {}
31unsafe impl Sync for AllocInfo {}
32
33/// A read-only view into an AppendBuf.
34pub struct Slice {
35    alloc: *mut AllocInfo,
36    offset: usize,
37    len: usize
38}
39
40unsafe impl Send for Slice {}
41unsafe impl Sync for Slice {}
42
43impl Slice {
44    /// Get a subslice starting from the passed offset.
45    pub fn slice_from(&self, offset: usize) -> Slice {
46        if self.len < offset {
47            panic!("Sliced past the end of an appendbuf::Slice,
48                   the length was {:?} and the desired offset was {:?}",
49                   self.len, offset);
50        }
51
52        self.allocinfo().increment();
53
54        Slice {
55            alloc: self.alloc,
56            offset: self.offset + offset,
57            len: self.len - offset
58        }
59    }
60
61    /// Get a subslice of the first len elements.
62    pub fn slice_to(&self, len: usize) -> Slice {
63        if self.len < len {
64            panic!("Sliced past the end of an appendbuf::Slice,
65                   the length was {:?} and the desired length was {:?}",
66                   self.len, len);
67        }
68
69        self.allocinfo().increment();
70
71        Slice {
72            alloc: self.alloc,
73            offset: self.offset,
74            len: len
75        }
76    }
77
78    /// Get a subslice starting at the passed `start` offset and ending at
79    /// the passed `end` offset.
80    pub fn slice(&self, start: usize, end: usize) -> Slice {
81        let slice = self.slice_from(start);
82        slice.slice_to(end - start)
83    }
84
85    fn allocinfo(&self) -> &AllocInfo {
86        unsafe { mem::transmute(self.alloc) }
87    }
88}
89
90impl AppendBuf {
91    /// Create a new, empty AppendBuf with the given capacity.
92    pub fn new(len: usize) -> AppendBuf {
93        AppendBuf {
94            alloc: unsafe { AllocInfo::allocate(len) },
95            position: 0
96        }
97    }
98
99    /// Create an AppendBuf from an existing Vec.
100    ///
101    /// The capacity of the AppendBuf is the capacity of the Vec with space extracted
102    /// at the beginning for the reference count. The reference count occupies the space
103    /// of a usize, and the Vec must have enough space for it.
104    ///
105    /// If the Vec is too short, it is returned in the error value.
106    pub fn from_buf(vec: Vec<u8>) -> Result<Self, Vec<u8>> {
107        if vec.capacity() < mem::size_of::<AtomicUsize>() {
108            return Err(vec)
109        }
110
111        let vec_len = vec.len();
112        let alloc_info = unsafe { AllocInfo::from_buf(vec) };
113
114        Ok(AppendBuf {
115            alloc: alloc_info,
116            position: vec_len - mem::size_of::<AtomicUsize>()
117        })
118    }
119
120    /// Create a new Slice of the entire AppendBuf so far.
121    pub fn slice(&self) -> Slice {
122        self.allocinfo().increment();
123
124        Slice {
125            alloc: self.alloc,
126            offset: 0,
127            len: self.position
128        }
129    }
130
131    /// Retrieve the amount of remaining space in the AppendBuf.
132    pub fn remaining(&self) -> usize {
133        self.allocinfo().buf.len() - self.position
134    }
135
136    /// Write the data in the passed buffer onto the AppendBuf.
137    ///
138    /// This is an alternative to using the implementation of `std::io::Write`
139    /// which does not unnecessarily use `Result`.
140    pub fn fill(&mut self, buf: &[u8]) -> usize {
141        use std::io::Write;
142
143        // FIXME: Use std::slice::bytes::copy_memory when it is stabilized.
144        let amount = self.get_write_buf().write(buf).unwrap();
145        self.position += amount;
146
147        amount
148    }
149
150    /// Get the remaining space in the AppendBuf for writing.
151    ///
152    /// If you wish the see the data written in subsequent Slices,
153    /// you must also call `advance` with the amount written.
154    ///
155    /// Reads from this buffer are reads into uninitalized memory,
156    /// and so should be carefully avoided.
157    pub fn get_write_buf(&mut self) -> &mut [u8] {
158        let position = self.position;
159         &mut self.allocinfo_mut().buf[position..]
160    }
161
162    /// Advance the position of the AppendBuf.
163    ///
164    /// You should only advance the buffer if you have written to a
165    /// buffer returned by `get_write_buf`.
166    pub unsafe fn advance(&mut self, amount: usize) {
167         self.position += amount;
168    }
169
170    /// Read from the given io::Read into the AppendBuf.
171    ///
172    /// Safety note: it is possible to read uninitalized memory if the
173    /// passed io::Read incorrectly reports the number of bytes written to
174    /// buffers passed to it.
175    pub fn read_from<R: Read>(&mut self, reader: &mut R) -> io::Result<usize> {
176        reader.read(self.get_write_buf()).map(|n| {
177            unsafe { self.advance(n) };
178            n
179        })
180    }
181
182    fn allocinfo(&self) -> &AllocInfo {
183        unsafe { mem::transmute(self.alloc) }
184    }
185
186    fn allocinfo_mut(&mut self) -> &mut AllocInfo {
187        unsafe { mem::transmute(self.alloc) }
188    }
189}
190
191impl fmt::Debug for AppendBuf {
192    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
193        fmt::Debug::fmt(&**self, f)
194    }
195}
196
197impl fmt::Debug for Slice {
198    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
199        fmt::Debug::fmt(&**self, f)
200    }
201}
202
203impl Deref for AppendBuf {
204    type Target = [u8];
205
206    fn deref(&self) -> &[u8] {
207        &self.allocinfo().buf[..self.position]
208    }
209}
210
211impl AsRef<[u8]> for AppendBuf {
212    fn as_ref(&self) -> &[u8] { self }
213}
214
215impl io::Write for AppendBuf {
216    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
217        Ok(self.fill(buf))
218    }
219
220    fn flush(&mut self) -> io::Result<()> { Ok(()) }
221}
222
223impl Deref for Slice {
224    type Target = [u8];
225
226    fn deref(&self) -> &[u8] {
227        unsafe { &(*self.alloc).buf[self.offset..self.offset + self.len] }
228    }
229}
230
231impl AsRef<[u8]> for Slice {
232    fn as_ref(&self) -> &[u8] { self }
233}
234
235impl Clone for Slice {
236    fn clone(&self) -> Slice {
237        self.allocinfo().increment();
238
239        Slice {
240            alloc: self.alloc,
241            offset: self.offset,
242            len: self.len
243        }
244    }
245}
246
247impl AllocInfo {
248    unsafe fn allocate(size: usize) -> *mut Self {
249        let alloc = memalloc::allocate(size + mem::size_of::<AtomicUsize>());
250        AllocInfo::from_raw_buf(alloc, size)
251    }
252
253    /// Creates an AllocInfo from a Vec.
254    ///
255    /// The Vec *must* have a capacity of *at least* `mem::size_of::<usize>()`.
256    unsafe fn from_buf(mut buf: Vec<u8>) -> *mut Self {
257        let refcount_size = mem::size_of::<AtomicUsize>();
258        let this = AllocInfo::from_raw_buf(buf.as_mut_ptr(), buf.capacity() - refcount_size);
259        mem::forget(buf);
260        this
261    }
262
263    /// Create an AllocInfo from a raw pointer.
264    ///
265    /// The pointer must point to an allocation of size `buf_cap + mem::size_of::<usize>()`.
266    unsafe fn from_raw_buf(buf: *mut u8, buf_cap: usize) -> *mut Self {
267        let this = mem::transmute::<_, *mut Self>((buf, buf_cap));
268        (*this).refcount = AtomicUsize::new(1);
269        this
270    }
271
272    #[inline(always)]
273    fn increment(&self) {
274         self.refcount.fetch_add(1, Ordering::Relaxed);
275    }
276
277    #[inline(always)]
278    unsafe fn decrement(&self) {
279        // Adapted from the implementation of Drop for std::sync::Arc.
280
281        // Because `fetch_sub` is already atomic, we do not need to synchronize
282        // with other threads unless we are going to deallocate the buffer.
283        if self.refcount.fetch_sub(1, Ordering::Release) != 1 { return }
284
285        // This fence is needed to prevent reordering of use of the data and
286        // deletion of the data. Because it is marked `Release`, the decreasing
287        // of the reference count synchronizes with this `Acquire` fence. This
288        // means that use of the data happens before decreasing the reference
289        // count, which happens before this fence, which happens before the
290        // deletion of the data.
291        //
292        // As explained in the [Boost documentation][1],
293        //
294        // > It is important to enforce any possible access to the object in one
295        // > thread (through an existing reference) to *happen before* deleting
296        // > the object in a different thread. This is achieved by a "release"
297        // > operation after dropping a reference (any access to the object
298        // > through this reference must obviously happened before), and an
299        // > "acquire" operation before deleting the object.
300        //
301        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
302        atomic::fence(Ordering::Acquire);
303
304        drop(mem::transmute::<&AllocInfo, Box<AllocInfo>>(self))
305    }
306}
307
308impl Drop for Slice {
309    fn drop(&mut self) {
310        unsafe { (*self.alloc).decrement() }
311    }
312}
313
314impl Drop for AppendBuf {
315    fn drop(&mut self) {
316        unsafe { (*self.alloc).decrement() }
317    }
318}
319
320fn _compile_test() {
321    fn _is_send_sync<T: Send + Sync>() {}
322    _is_send_sync::<AppendBuf>();
323    _is_send_sync::<Slice>();
324}
325
326#[test]
327fn test_write_and_slice() {
328    let mut buf = AppendBuf::new(10);
329    assert_eq!(buf.fill(&[1, 2, 3]), 3);
330    let slice = buf.slice();
331    assert_eq!(&*slice, &[1, 2, 3]);
332
333    assert_eq!(&*buf, &[1, 2, 3]);
334}
335
336#[test]
337fn test_overlong_write() {
338    let mut buf = AppendBuf::new(5);
339    assert_eq!(buf.fill(&[1, 2, 3, 4, 5, 6]), 5);
340    let slice = buf.slice();
341    assert_eq!(&*slice, &[1, 2, 3, 4, 5]);
342}
343
344#[test]
345fn test_slice_slicing() {
346    let data = &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
347
348    let mut buf = AppendBuf::new(10);
349    assert_eq!(buf.fill(data), 10);
350
351    assert_eq!(&*buf.slice(), data);
352    assert_eq!(&*buf.slice().slice_to(5), &data[..5]);
353    assert_eq!(&*buf.slice().slice_from(6), &data[6..]);
354    assert_eq!(&*buf.slice().slice(2, 7), &data[2..7]);
355}
356
357#[test]
358fn test_many_writes() {
359    let mut buf = AppendBuf::new(100);
360
361    assert_eq!(buf.fill(&[1, 2, 3, 4]), 4);
362    assert_eq!(buf.fill(&[10, 12, 13, 14, 15]), 5);
363    assert_eq!(buf.fill(&[34, 35]), 2);
364
365    assert_eq!(&*buf.slice(), &[1, 2, 3, 4, 10, 12, 13, 14, 15, 34, 35]);
366}
367
368#[test]
369fn test_slice_then_write() {
370    let mut buf = AppendBuf::new(20);
371    let empty = buf.slice();
372    assert_eq!(&*empty, &[]);
373
374    assert_eq!(buf.fill(&[5, 6, 7, 8]), 4);
375
376    let not_empty = buf.slice();
377    assert_eq!(&*empty, &[]);
378    assert_eq!(&*not_empty, &[5, 6, 7, 8]);
379
380    assert_eq!(buf.fill(&[9, 10, 11, 12, 13]), 5);
381    assert_eq!(&*empty, &[]);
382    assert_eq!(&*not_empty, &[5, 6, 7, 8]);
383    assert_eq!(&*buf.slice(), &[5, 6, 7, 8, 9, 10, 11, 12, 13]);
384}
385
386#[test]
387fn test_slice_bounds_edge_cases() {
388    let data = &[1, 2, 3, 4, 5, 6];
389
390    let mut buf = AppendBuf::new(data.len());
391    assert_eq!(buf.fill(data), data.len());
392
393    let slice = buf.slice().slice_to(data.len());
394    assert_eq!(&*slice, data);
395
396    let slice = buf.slice().slice_from(0);
397    assert_eq!(&*slice, data);
398}
399
400#[test]
401#[should_panic = "the desired offset"]
402fn test_slice_from_bounds_checks() {
403    let data = &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
404
405    let mut buf = AppendBuf::new(10);
406    assert_eq!(buf.fill(data), 10);
407
408    buf.slice().slice_from(100);
409}
410
411#[test]
412#[should_panic = "the desired length"]
413fn test_slice_to_bounds_checks() {
414    let data = &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
415
416    let mut buf = AppendBuf::new(10);
417    assert_eq!(buf.fill(data), 10);
418
419    buf.slice().slice_to(100);
420}
421
422#[test]
423fn test_convert_from_vec() {
424    let buf = vec![0, 0, 0, 0, 0, 0, 0, 0, // refcount
425                   1, 2, 3, 4, 5, 6, 7, 8]; // data
426    let append_buf = AppendBuf::from_buf(buf.clone()).unwrap();
427    assert_eq!(&*append_buf, &buf[8..]);
428
429    let buf = vec![0, 0, 0, 0]; // too short
430    assert_eq!(AppendBuf::from_buf(buf.clone()).unwrap_err(), buf);
431}
432