Skip to main content

generic_arraydeque/
io.rs

1use core::str::from_utf8;
2use std::io::{self, BufRead, IoSlice, Read, Write};
3
4use super::{ArrayLength, GenericArrayDeque};
5
6impl<N: ArrayLength> GenericArrayDeque<u8, N> {
7  #[cfg_attr(not(tarpaulin), inline(always))]
8  fn extend_bytes(&mut self, buf: &[u8]) {
9    let written = unsafe {
10      self.write_iter_wrapping(
11        self.to_physical_idx(self.len),
12        buf.iter().copied(),
13        buf.len(),
14      )
15    };
16
17    debug_assert_eq!(
18      buf.len(),
19      written,
20      "The number of items written to VecDeque doesn't match the TrustedLen size hint"
21    );
22  }
23}
24
25/// Read is implemented for `GenericArrayDeque<u8>` by consuming bytes from the front of the `GenericArrayDeque`.
26impl<N: ArrayLength> Read for GenericArrayDeque<u8, N> {
27  /// Fill `buf` with the contents of the "front" slice as returned by
28  /// [`as_slices`][`GenericArrayDeque::as_slices`]. If the contained byte slices of the `GenericArrayDeque` are
29  /// discontiguous, multiple calls to `read` will be needed to read the entire content.
30  #[inline]
31  fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
32    let (ref mut front, _) = self.as_slices();
33    let n = Read::read(front, buf)?;
34    self.drain(..n);
35    Ok(n)
36  }
37
38  #[inline]
39  fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
40    let (front, back) = self.as_slices();
41
42    // Use only the front buffer if it is big enough to fill `buf`, else use
43    // the back buffer too.
44    match SplitAtMut::split_at_mut_checked(buf, front.len()) {
45      None => buf.copy_from_slice(&front[..buf.len()]),
46      Some((buf_front, buf_back)) => match SplitAt::split_at_checked(back, buf_back.len()) {
47        Some((back, _)) => {
48          buf_front.copy_from_slice(front);
49          buf_back.copy_from_slice(back);
50        }
51        None => {
52          // Leave the buffered data in place — matches `VecDeque`'s
53          // behavior and lets the caller retry or fall back to `read`.
54          return Err(io::Error::new(
55            io::ErrorKind::UnexpectedEof,
56            "failed to fill whole buffer",
57          ));
58        }
59      },
60    }
61
62    self.drain(..buf.len());
63    Ok(())
64  }
65
66  #[inline]
67  fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
68    // The total len is known upfront so we can reserve it in a single call.
69    let len = self.len();
70    buf
71      .try_reserve(len)
72      .map_err(|_| io::ErrorKind::OutOfMemory)?;
73
74    let (front, back) = self.as_slices();
75    buf.extend_from_slice(front);
76    buf.extend_from_slice(back);
77    self.clear();
78    Ok(len)
79  }
80
81  #[inline]
82  fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
83    // A single UTF-8 codepoint may straddle the ring's split point, so
84    // validate the concatenated byte stream rather than each half.
85    // `make_contiguous` reorganizes the physical buffer in place; after it,
86    // `as_slices` returns everything in the front slice.
87    let bytes = self.make_contiguous();
88    let s = match from_utf8(bytes) {
89      Ok(s) => s,
90      Err(_) => {
91        return Err(io::Error::new(
92          io::ErrorKind::InvalidData,
93          "stream did not contain valid UTF-8",
94        ));
95      }
96    };
97
98    buf
99      .try_reserve(s.len())
100      .map_err(|_| io::ErrorKind::OutOfMemory)?;
101
102    let len = s.len();
103    buf.push_str(s);
104    // Match the `Read::read_to_string` contract: the source is consumed.
105    self.clear();
106    Ok(len)
107  }
108}
109
110/// BufRead is implemented for `GenericArrayDeque<u8>` by reading bytes from the front of the `GenericArrayDeque`.
111impl<N: ArrayLength> BufRead for GenericArrayDeque<u8, N> {
112  /// Returns the contents of the "front" slice as returned by
113  /// [`as_slices`][`GenericArrayDeque::as_slices`]. If the contained byte slices of the `GenericArrayDeque` are
114  /// discontiguous, multiple calls to `fill_buf` will be needed to read the entire content.
115  #[inline]
116  fn fill_buf(&mut self) -> io::Result<&[u8]> {
117    let (front, _) = self.as_slices();
118    Ok(front)
119  }
120
121  #[inline]
122  fn consume(&mut self, amt: usize) {
123    self.drain(..amt);
124  }
125}
126
127/// Write is implemented for `GenericArrayDeque<u8>` by appending to the `GenericArrayDeque`, growing it as needed.
128impl<N: ArrayLength> Write for GenericArrayDeque<u8, N> {
129  #[inline]
130  fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
131    let remaining = self.remaining_capacity();
132    if remaining == 0 || buf.is_empty() {
133      return Ok(0);
134    }
135
136    let n = remaining.min(buf.len());
137    self.extend_bytes(&buf[..n]);
138    Ok(n)
139  }
140
141  #[inline]
142  fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
143    // Behave like `write` concatenated over the buffers: write as much as
144    // fits and report the actual byte count. Returning `WriteZero` when
145    // the combined length exceeds capacity would be inconsistent with the
146    // scalar `write`, which performs partial writes.
147    let mut written = 0;
148    for buf in bufs {
149      let remaining = self.remaining_capacity();
150      if remaining == 0 {
151        break;
152      }
153      let n = remaining.min(buf.len());
154      if n == 0 {
155        continue;
156      }
157      self.extend_bytes(&buf[..n]);
158      written += n;
159      if n < buf.len() {
160        // Ran out of room mid-buffer; a further `write` would return 0.
161        break;
162      }
163    }
164    Ok(written)
165  }
166
167  #[inline]
168  fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
169    if buf.len() > self.remaining_capacity() {
170      return Err(io::Error::new(
171        io::ErrorKind::WriteZero,
172        "not enough capacity to write buffer",
173      ));
174    }
175    self.extend_bytes(buf);
176    Ok(())
177  }
178
179  #[inline]
180  fn flush(&mut self) -> io::Result<()> {
181    Ok(())
182  }
183}
184
185trait SplitAt {
186  #[allow(unstable_name_collisions)]
187  fn split_at_checked(&self, mid: usize) -> Option<(&Self, &Self)>;
188}
189
190trait SplitAtMut {
191  #[allow(unstable_name_collisions)]
192  fn split_at_mut_checked(&mut self, mid: usize) -> Option<(&mut Self, &mut Self)>;
193}
194
195#[rustversion::since(1.80)]
196impl<T> SplitAt for [T] {
197  #[allow(unstable_name_collisions)]
198  #[cfg_attr(not(tarpaulin), inline(always))]
199  fn split_at_checked(&self, mid: usize) -> Option<(&Self, &Self)> {
200    <[T]>::split_at_checked(self, mid)
201  }
202}
203
204#[rustversion::before(1.80)]
205impl<T> SplitAt for [T] {
206  #[allow(unstable_name_collisions)]
207  #[cfg_attr(not(tarpaulin), inline(always))]
208  fn split_at_checked(&self, mid: usize) -> Option<(&Self, &Self)> {
209    use core::slice::from_raw_parts;
210
211    let len = self.len();
212    if mid <= len {
213      // SAFETY: `0 <= mid <= self.len()`
214      Some(unsafe {
215        (
216          from_raw_parts(self.as_ptr(), mid),
217          from_raw_parts(self.as_ptr().add(mid), len - mid),
218        )
219      })
220    } else {
221      None
222    }
223  }
224}
225
226#[rustversion::since(1.80)]
227impl<T> SplitAtMut for [T] {
228  #[allow(unstable_name_collisions)]
229  #[cfg_attr(not(tarpaulin), inline(always))]
230  fn split_at_mut_checked(&mut self, mid: usize) -> Option<(&mut Self, &mut Self)> {
231    <[T]>::split_at_mut_checked(self, mid)
232  }
233}
234
235#[rustversion::before(1.80)]
236impl<T> SplitAtMut for [T] {
237  #[allow(unstable_name_collisions)]
238  #[cfg_attr(not(tarpaulin), inline(always))]
239  fn split_at_mut_checked(&mut self, mid: usize) -> Option<(&mut Self, &mut Self)> {
240    use core::slice::from_raw_parts_mut;
241    let len = self.len();
242    if mid <= len {
243      let len = self.len();
244      // SAFETY: `0 <= mid <= self.len()`, so the two slices do not overlap.
245      Some(unsafe {
246        (
247          from_raw_parts_mut(self.as_mut_ptr(), mid),
248          from_raw_parts_mut(self.as_mut_ptr().add(mid), len - mid),
249        )
250      })
251    } else {
252      None
253    }
254  }
255}
256
257#[cfg(test)]
258mod tests {
259  use crate::{
260    typenum::{U2, U4, U6, U8},
261    GenericArrayDeque,
262  };
263  use std::{
264    io::{self, BufRead, IoSlice, Read, Write},
265    string::String,
266    vec::Vec,
267  };
268
269  #[test]
270  fn read_consumes_front_slice() {
271    let mut deque = GenericArrayDeque::<u8, U8>::new();
272    for byte in b"hello" {
273      assert!(deque.push_back(*byte).is_none());
274    }
275
276    let mut buf = [0u8; 3];
277    let read = Read::read(&mut deque, &mut buf).unwrap();
278    assert_eq!(read, 3);
279    assert_eq!(&buf[..read], b"hel");
280    assert_eq!(deque.into_iter().collect::<Vec<_>>(), b"lo".to_vec());
281  }
282
283  #[test]
284  fn read_exact_handles_wrapped_storage() {
285    let mut deque = GenericArrayDeque::<u8, U4>::new();
286    for byte in b"abcd" {
287      assert!(deque.push_back(*byte).is_none());
288    }
289    assert_eq!(deque.pop_front(), Some(b'a'));
290    assert!(deque.push_back(b'e').is_none());
291
292    let mut buf = [0u8; 3];
293    deque.read_exact(&mut buf).unwrap();
294    assert_eq!(&buf, b"bcd");
295    assert_eq!(deque.into_iter().collect::<Vec<_>>(), vec![b'e']);
296  }
297
298  #[test]
299  fn read_exact_reports_eof() {
300    let mut deque = GenericArrayDeque::<u8, U4>::new();
301    assert!(deque.push_back(b'x').is_none());
302
303    let mut buf = [0u8; 2];
304    let err = Read::read_exact(&mut deque, &mut buf).unwrap_err();
305    assert_eq!(err.kind(), io::ErrorKind::UnexpectedEof);
306    // The buffered byte must remain in the deque — matches `VecDeque`.
307    assert_eq!(deque.len(), 1);
308    assert_eq!(deque.pop_front(), Some(b'x'));
309  }
310
311  #[test]
312  fn read_to_end_and_string_clear_buffer() {
313    let mut deque = GenericArrayDeque::<u8, U6>::new();
314    for byte in b"abc" {
315      assert!(deque.push_back(*byte).is_none());
316    }
317    let mut buf = Vec::new();
318    deque.read_to_end(&mut buf).unwrap();
319    assert_eq!(buf, b"abc");
320    assert!(deque.is_empty());
321
322    for byte in b"de" {
323      assert!(deque.push_back(*byte).is_none());
324    }
325    let mut string = String::new();
326    deque.read_to_string(&mut string).unwrap();
327    assert_eq!(string, "de");
328    // `read_to_string` consumes the source, like the `Read` contract demands.
329    assert!(deque.is_empty());
330
331    deque.clear();
332    deque.push_back(0xFF);
333    let mut invalid = String::new();
334    let err = deque.read_to_string(&mut invalid).unwrap_err();
335    assert_eq!(err.kind(), io::ErrorKind::InvalidData);
336  }
337
338  #[test]
339  fn bufread_fill_and_consume() {
340    let mut deque = GenericArrayDeque::<u8, U4>::new();
341    for byte in b"abcd" {
342      assert!(deque.push_back(*byte).is_none());
343    }
344
345    let buf = BufRead::fill_buf(&mut deque).unwrap();
346    assert_eq!(buf, b"abcd");
347    BufRead::consume(&mut deque, 3);
348    assert_eq!(deque.into_iter().collect::<Vec<_>>(), vec![b'd']);
349  }
350
351  #[test]
352  fn write_variants_respect_capacity() {
353    let mut deque = GenericArrayDeque::<u8, U4>::new();
354    let written = Write::write(&mut deque, b"abcdef").unwrap();
355    assert_eq!(written, 4);
356    assert_eq!(deque.len(), 4);
357
358    let mut deque = GenericArrayDeque::<u8, U8>::new();
359    let slices = [IoSlice::new(b"ab"), IoSlice::new(b"cd")];
360    assert_eq!(Write::write_vectored(&mut deque, &slices).unwrap(), 4);
361    assert_eq!(deque.len(), 4);
362    // When the combined length exceeds remaining capacity, `write_vectored`
363    // performs a partial write (matching scalar `write`) rather than erroring.
364    let overflow = [IoSlice::new(b"1234"), IoSlice::new(b"5678")];
365    let written = Write::write_vectored(&mut deque, &overflow).unwrap();
366    assert_eq!(written, 4);
367    assert_eq!(deque.len(), 8);
368
369    let mut deque = GenericArrayDeque::<u8, U4>::new();
370    Write::write_all(&mut deque, b"wxyz").unwrap();
371    let err = Write::write_all(&mut deque, b"overflow").unwrap_err();
372    assert_eq!(err.kind(), io::ErrorKind::WriteZero);
373
374    let mut deque = GenericArrayDeque::<u8, U2>::new();
375    Write::flush(&mut deque).unwrap();
376  }
377
378  // Regression: previously, `read_to_string` validated front and back slices
379  // independently, so a codepoint whose bytes straddled the ring boundary
380  // was rejected as InvalidData.
381  #[test]
382  fn read_to_string_accepts_utf8_across_ring_boundary() {
383    let mut deque = GenericArrayDeque::<u8, U4>::new();
384    // Push 3 padding bytes then `é`'s leading byte; rotate head so `é`
385    // ends up split across the physical buffer boundary.
386    for _ in 0..3 {
387      assert!(deque.push_back(b'x').is_none());
388    }
389    assert!(deque.push_back(0xC3).is_none());
390    for _ in 0..3 {
391      deque.pop_front();
392    }
393    assert!(deque.push_back(0xA9).is_none());
394    // Confirm the bytes are actually split (each half is invalid UTF-8 alone).
395    let (front, back) = deque.as_slices();
396    assert_eq!(front, &[0xC3]);
397    assert_eq!(back, &[0xA9]);
398
399    let mut s = String::new();
400    let n = deque.read_to_string(&mut s).unwrap();
401    assert_eq!(n, 2);
402    assert_eq!(s, "é");
403    assert!(deque.is_empty());
404  }
405
406  // Regression: `write_vectored` used to return `WriteZero` when the combined
407  // buffers exceeded remaining capacity, while `write` did partial writes.
408  #[test]
409  fn write_vectored_does_partial_writes() {
410    let mut deque = GenericArrayDeque::<u8, U4>::new();
411    let slices = [IoSlice::new(b"12"), IoSlice::new(b"345")];
412    let n = Write::write_vectored(&mut deque, &slices).unwrap();
413    assert_eq!(n, 4);
414    assert_eq!(deque.len(), 4);
415    assert_eq!(deque.iter().copied().collect::<Vec<_>>(), b"1234");
416  }
417
418  #[test]
419  fn read_exact_from_front_slice_only() {
420    // Exercise the `split_at_mut_checked` → None arm where `buf` fits entirely
421    // within the front slice.
422    let mut deque = GenericArrayDeque::<u8, U4>::new();
423    for byte in b"abcd" {
424      assert!(deque.push_back(*byte).is_none());
425    }
426    let mut buf = [0u8; 2];
427    deque.read_exact(&mut buf).unwrap();
428    assert_eq!(&buf, b"ab");
429    assert_eq!(deque.iter().copied().collect::<Vec<_>>(), b"cd");
430  }
431
432  #[test]
433  fn write_on_full_and_empty_buf_returns_zero() {
434    // `remaining == 0` branch.
435    let mut deque = GenericArrayDeque::<u8, U4>::new();
436    for byte in b"abcd" {
437      assert!(deque.push_back(*byte).is_none());
438    }
439    assert_eq!(Write::write(&mut deque, b"xx").unwrap(), 0);
440
441    // `buf.is_empty()` branch.
442    let mut deque = GenericArrayDeque::<u8, U4>::new();
443    assert_eq!(Write::write(&mut deque, b"").unwrap(), 0);
444  }
445
446  #[test]
447  fn write_vectored_skips_empty_slices() {
448    // Exercise the `n == 0 { continue }` arm without hitting the full-capacity
449    // break first.
450    let mut deque = GenericArrayDeque::<u8, U4>::new();
451    let slices = [IoSlice::new(b""), IoSlice::new(b"ab")];
452    let n = Write::write_vectored(&mut deque, &slices).unwrap();
453    assert_eq!(n, 2);
454    assert_eq!(deque.iter().copied().collect::<Vec<_>>(), b"ab");
455  }
456}