1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use crate::{
    assume,
    buffer::{
        reader::{storage::Chunk, Storage},
        writer,
    },
    ensure,
};
use core::cmp::Ordering;

/// Implementation of [`Storage`] that delegates to a [`bytes::Buf`] implementation.
pub struct Buf<'a, B: bytes::Buf> {
    buf: &'a mut B,
    /// tracks the number of bytes that need to be advanced in the Buf
    pending: usize,
}

impl<'a, B> Buf<'a, B>
where
    B: bytes::Buf,
{
    #[inline]
    pub fn new(buf: &'a mut B) -> Self {
        Self { buf, pending: 0 }
    }

    /// Advances any pending bytes that has been read in the underlying Buf
    #[inline]
    fn commit_pending(&mut self) {
        ensure!(self.pending > 0);
        unsafe {
            assume!(self.buf.remaining() >= self.pending);
            assume!(self.buf.chunk().len() >= self.pending);
        }
        self.buf.advance(self.pending);
        self.pending = 0;
    }
}

impl<'a, B> Storage for Buf<'a, B>
where
    B: bytes::Buf,
{
    type Error = core::convert::Infallible;

    #[inline]
    fn buffered_len(&self) -> usize {
        unsafe {
            assume!(self.buf.remaining() >= self.pending);
            assume!(self.buf.chunk().len() >= self.pending);
        }
        self.buf.remaining() - self.pending
    }

    #[inline]
    fn read_chunk(&mut self, watermark: usize) -> Result<Chunk, Self::Error> {
        self.commit_pending();
        let chunk = self.buf.chunk();
        let len = chunk.len().min(watermark);
        self.pending = len;
        Ok(chunk[..len].into())
    }

    #[inline]
    fn partial_copy_into<Dest>(&mut self, dest: &mut Dest) -> Result<Chunk, Self::Error>
    where
        Dest: writer::Storage + ?Sized,
    {
        self.commit_pending();

        ensure!(dest.has_remaining_capacity(), Ok(Chunk::empty()));

        loop {
            let chunk_len = self.buf.chunk().len();

            if chunk_len == 0 {
                debug_assert_eq!(
                    self.buf.remaining(),
                    0,
                    "buf returned empty chunk with remaining bytes"
                );
                return Ok(Chunk::empty());
            }

            match chunk_len.cmp(&dest.remaining_capacity()) {
                // if there's more chunks left, then copy this one out and keep going
                Ordering::Less if self.buf.remaining() > chunk_len => {
                    if Dest::SPECIALIZES_BYTES {
                        let chunk = self.buf.copy_to_bytes(chunk_len);
                        dest.put_bytes(chunk);
                    } else {
                        dest.put_slice(self.buf.chunk());
                        self.buf.advance(chunk_len);
                    }
                    continue;
                }
                Ordering::Less | Ordering::Equal => {
                    let chunk = self.buf.chunk();
                    self.pending = chunk.len();
                    return Ok(chunk.into());
                }
                Ordering::Greater => {
                    let len = dest.remaining_capacity();
                    let chunk = &self.buf.chunk()[..len];
                    self.pending = len;
                    return Ok(chunk.into());
                }
            }
        }
    }

    #[inline]
    fn copy_into<Dest>(&mut self, dest: &mut Dest) -> Result<(), Self::Error>
    where
        Dest: writer::Storage + ?Sized,
    {
        self.commit_pending();

        loop {
            let chunk = self.buf.chunk();
            let len = chunk.len().min(dest.remaining_capacity());

            ensure!(len > 0, Ok(()));

            if Dest::SPECIALIZES_BYTES {
                let chunk = self.buf.copy_to_bytes(len);
                dest.put_bytes(chunk);
            } else {
                dest.put_slice(&chunk[..len]);
                self.buf.advance(len);
            }
        }
    }
}

impl<'a, B> Drop for Buf<'a, B>
where
    B: bytes::Buf,
{
    #[inline]
    fn drop(&mut self) {
        // make sure we advance the consumed bytes on drop
        self.commit_pending();
    }
}