1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
use core::{
    fmt,
    ops::{Deref, DerefMut},
};

use std::io;

use tracing::trace;
use xitca_io::bytes::{Buf, BytesMut};
use xitca_unsafe_collection::{
    bytes::{read_buf, BufList, ChunkVectoredUninit},
    uninit::uninit_array,
};

pub use xitca_io::bytes::{BufInterest, BufRead, BufWrite};

/// a writable buffer with const generic guarded max size limit.
#[derive(Debug)]
pub struct ReadBuf<const LIMIT: usize>(BytesMut);

impl<const LIMIT: usize> ReadBuf<LIMIT> {
    #[inline(always)]
    pub fn new() -> Self {
        Self(BytesMut::new())
    }

    #[inline(always)]
    pub fn into_inner(self) -> BytesMut {
        self.0
    }
}

impl<const LIMIT: usize> From<BytesMut> for ReadBuf<LIMIT> {
    fn from(bytes: BytesMut) -> Self {
        Self(bytes)
    }
}

impl<const LIMIT: usize> Default for ReadBuf<LIMIT> {
    fn default() -> Self {
        Self::new()
    }
}

impl<const LIMIT: usize> Deref for ReadBuf<LIMIT> {
    type Target = BytesMut;

    #[inline]
    fn deref(&self) -> &Self::Target {
        &self.0
    }
}

impl<const LIMIT: usize> DerefMut for ReadBuf<LIMIT> {
    #[inline]
    fn deref_mut(&mut self) -> &mut Self::Target {
        &mut self.0
    }
}

impl<const LIMIT: usize> BufInterest for ReadBuf<LIMIT> {
    #[inline]
    fn want_write_buf(&self) -> bool {
        self.0.remaining() < LIMIT
    }

    fn want_write_io(&self) -> bool {
        unimplemented!()
    }
}

impl<const LIMIT: usize> BufRead for ReadBuf<LIMIT> {
    fn do_io<Io>(&mut self, io: &mut Io) -> io::Result<()>
    where
        Io: io::Read,
    {
        let len = self.0.len();
        loop {
            match read_buf(io, &mut self.0) {
                Ok(0) => {
                    if self.0.len() == len {
                        return Err(io::ErrorKind::UnexpectedEof.into());
                    }
                    break;
                }
                Ok(_) => {
                    if !self.want_write_buf() {
                        trace!(
                            "READ_BUF_LIMIT: {LIMIT} bytes reached. Entering backpressure(no log event for recovery)."
                        );
                        break;
                    }
                }
                Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => break,
                Err(e) => {
                    if self.0.len() == len {
                        return Err(e);
                    }
                    break;
                }
            }
        }
        Ok(())
    }
}

#[derive(Default)]
pub struct WriteBuf<const LIMIT: usize>(xitca_io::bytes::WriteBuf);

impl<const LIMIT: usize> WriteBuf<LIMIT> {
    #[inline]
    pub fn new() -> Self {
        Self(xitca_io::bytes::WriteBuf::new())
    }

    #[cfg(test)]
    pub fn buf(&self) -> &[u8] {
        self.0.buf()
    }
}

impl<const LIMIT: usize> BufInterest for WriteBuf<LIMIT> {
    #[inline]
    fn want_write_buf(&self) -> bool {
        self.0.len() < LIMIT
    }

    #[inline]
    fn want_write_io(&self) -> bool {
        self.0.want_write_io()
    }
}

impl<const LIMIT: usize> BufWrite for WriteBuf<LIMIT> {
    #[inline]
    fn write_buf<F, T, E>(&mut self, func: F) -> Result<T, E>
    where
        F: FnOnce(&mut BytesMut) -> Result<T, E>,
    {
        self.0.write_buf(func)
    }

    #[inline]
    fn do_io<Io: io::Write>(&mut self, io: &mut Io) -> io::Result<()> {
        self.0.do_io(io)
    }
}

// an internal buffer to collect writes before flushes
pub struct ListWriteBuf<B, const LIMIT: usize> {
    // Re-usable buffer that holds response head.
    // After head writing finished it's split and pushed to list.
    buf: BytesMut,
    // Deque of user buffers if strategy is Queue
    list: BufList<B, BUF_LIST_CNT>,
    want_flush: bool,
}

impl<B: Buf, const LIMIT: usize> Default for ListWriteBuf<B, LIMIT> {
    fn default() -> Self {
        Self {
            buf: BytesMut::new(),
            list: BufList::new(),
            want_flush: false,
        }
    }
}

impl<B: Buf, const LIMIT: usize> ListWriteBuf<B, LIMIT> {
    /// split buf field from Self.
    /// this is often coupled with [BufWrite::write_buf] method to obtain what has been written to
    /// the buf.
    pub fn split_buf(&mut self) -> BytesMut {
        self.buf.split()
    }

    /// add new buf to list.
    ///
    /// # Panics
    /// when push more items to list than the capacity. ListWriteBuf is strictly bounded.
    pub fn buffer<BB: Buf + Into<B>>(&mut self, buf: BB) {
        self.list.push(buf.into());
        // cross reference with <Self as BufWrite>::buf_write method.
        self.want_flush = false;
    }
}

impl<B: Buf, const LIMIT: usize> fmt::Debug for ListWriteBuf<B, LIMIT> {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        f.debug_struct("ListBuf")
            .field("remaining", &self.list.remaining())
            .finish()
    }
}

// buf list is forced to go in backpressure when it reaches this length.
// 32 is chosen for max of 16 pipelined http requests with a single body item.
const BUF_LIST_CNT: usize = 32;

impl<B, const LIMIT: usize> BufInterest for ListWriteBuf<B, LIMIT>
where
    B: Buf + ChunkVectoredUninit,
{
    #[inline]
    fn want_write_buf(&self) -> bool {
        self.list.remaining() < LIMIT && !self.list.is_full()
    }

    #[inline]
    fn want_write_io(&self) -> bool {
        self.list.remaining() != 0 || self.want_flush
    }
}

impl<B, const LIMIT: usize> BufWrite for ListWriteBuf<B, LIMIT>
where
    B: Buf + ChunkVectoredUninit,
{
    fn write_buf<F, T, E>(&mut self, func: F) -> Result<T, E>
    where
        F: FnOnce(&mut BytesMut) -> Result<T, E>,
    {
        // in ListWriteBuf the BytesMut is only used as temporary storage of buffer.
        // only when ListWriteBuf::buffer is called we set self.want_flush to false.
        func(&mut self.buf).map_err(|e| {
            self.buf.clear();
            e
        })
    }

    fn do_io<Io: io::Write>(&mut self, io: &mut Io) -> io::Result<()> {
        let queue = &mut self.list;
        loop {
            if self.want_flush {
                match io::Write::flush(io) {
                    Ok(_) => self.want_flush = false,
                    Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {}
                    Err(e) => return Err(e),
                }
                break;
            }

            let mut buf = uninit_array::<_, BUF_LIST_CNT>();
            let slice = queue.chunks_vectored_uninit_into_init(&mut buf);
            match io.write_vectored(slice) {
                Ok(0) => return write_zero(self.want_write_io()),
                Ok(n) => {
                    queue.advance(n);
                    if queue.is_empty() {
                        self.want_flush = true;
                    }
                }
                Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => break,
                Err(e) => return Err(e),
            }
        }

        Ok(())
    }
}

#[cold]
#[inline(never)]
fn write_zero(want_write: bool) -> io::Result<()> {
    assert!(
        want_write,
        "BufWrite::write must be called after BufInterest::want_write return true."
    );
    Err(io::ErrorKind::WriteZero.into())
}