protosocket 1.0.0

Message-oriented nonblocking tcp stream
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
use std::{
    collections::VecDeque,
    future::Future,
    io::IoSlice,
    pin::{pin, Pin},
    task::{Context, Poll},
};

use bytes::Buf;
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};

use crate::{
    encoding::Codec,
    interrupted,
    message_reactor::{MessageReactor, ReactorStatus},
    would_block, Decoder, DeserializeError, Encoder,
};

/// A bidirectional, message-oriented AsyncRead/AsyncWrite stream wrapper.
///
/// Connections are Futures that you spawn.
/// To send messages, you push them into the outbound message stream.
/// To receive messages, you implement a `MessageReactor`.
///
/// Inbound messages are not wrapped in a Stream, in order to avoid an
/// extra layer of async buffering. If you need to buffer messages or
/// forward them to a Stream, you can do so in the reactor. If you can
/// process them very quickly, you can handle them inline in the reactor
/// callback `on_messages`, which will let you reply as soon as possible.
pub struct Connection<
    // Bidirectional Stream type to use for this connection. Like `tokio::net::TcpStream`.
    TStream: tokio::io::AsyncRead + tokio::io::AsyncWrite + 'static,
    // The wire / message codec for this connection.
    TCodec: Codec,
    // The message reactor for this connection.
    TReactor: MessageReactor<Inbound = <TCodec as Decoder>::Message, Outbound = <TCodec as Encoder>::Message>,
> {
    stream: TStream,
    outbound_messages: spillway::Receiver<TReactor::LogicalOutbound>,
    send_buffer: VecDeque<<TCodec as Encoder>::Serialized>,
    receive_buffer_unread_index: usize,
    receive_buffer: Vec<u8>,
    max_buffer_length: usize,
    max_queued_send_messages: usize,
    buffer_allocation_increment: usize,
    codec: TCodec,
    reactor: TReactor,
}

impl<
        TStream: tokio::io::AsyncRead + tokio::io::AsyncWrite + 'static,
        TCodec: Codec,
        TReactor: MessageReactor<
            Inbound = <TCodec as Decoder>::Message,
            Outbound = <TCodec as Encoder>::Message,
        >,
    > std::fmt::Display for Connection<TStream, TCodec, TReactor>
{
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        let read_end = self.receive_buffer_unread_index;
        let read_capacity = self.receive_buffer.len();
        let write_queue = self.send_buffer.len();
        let write_length: usize = self.send_buffer.iter().map(|b| b.remaining()).sum();
        write!(f, "Connection: {{read{{end: {read_end}, capacity: {read_capacity}}}, write{{queue: {write_queue}, length: {write_length}}} }}")
    }
}

impl<
        TStream: tokio::io::AsyncRead + tokio::io::AsyncWrite + 'static,
        TCodec: Codec,
        TReactor: MessageReactor<
            Inbound = <TCodec as Decoder>::Message,
            Outbound = <TCodec as Encoder>::Message,
        >,
    > Unpin for Connection<TStream, TCodec, TReactor>
{
}

impl<
        TStream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin + 'static,
        TCodec: Codec,
        TReactor: MessageReactor<
            Inbound = <TCodec as Decoder>::Message,
            Outbound = <TCodec as Encoder>::Message,
        >,
    > Future for Connection<TStream, TCodec, TReactor>
{
    type Output = ();

    /// 1. Check for read readiness and read into the receive_buffer (up to max_buffer_length).
    /// 2. Dispatch messages as they are deserialized using the user-provided MessageReactor.
    /// 3. Poll the MessageReactor - do it here so we can respond to trivial messages _in this poll_.
    /// 4. Serialize messages from outbound_messages queue, up to max_queued_send_messages.
    /// 5. Send serialized messages.
    // #[tracing::instrument(skip_all, fields(self.name = %self.name))]
    #[cfg_attr(feature = "tracing", tracing::instrument(skip_all))]
    fn poll(mut self: Pin<&mut Self>, context: &mut Context<'_>) -> Poll<Self::Output> {
        // Step 1-3: Receive messages and react to them.
        log::trace!("polling receive");
        if self.as_mut().poll_receive(context).is_ready() {
            return Poll::Ready(());
        }

        // SAFETY: This is a structural pin. If I'm not moved then neither is this future.
        let structurally_pinned_reactor =
            unsafe { self.as_mut().map_unchecked_mut(|me| &mut me.reactor) };
        if structurally_pinned_reactor.poll(context).is_break() {
            log::debug!("reactor requested disconnect");
            return Poll::Ready(());
        }

        log::trace!("polling write");
        match self.poll_writev_buffers(context) {
            Ok(false) => {
                log::trace!("write stream is empty or registered for wake when writable");
            }
            Ok(true) => {
                log::debug!("write stream closed");
                return Poll::Ready(());
            }
            Err(e) => {
                log::warn!("error while writing to tcp stream: {e:?}");
                return Poll::Ready(());
            }
        }

        Poll::Pending
    }
}

impl<
        TStream: tokio::io::AsyncRead + tokio::io::AsyncWrite + 'static,
        TCodec: Codec,
        TReactor: MessageReactor<
            Inbound = <TCodec as Decoder>::Message,
            Outbound = <TCodec as Encoder>::Message,
        >,
    > Drop for Connection<TStream, TCodec, TReactor>
{
    fn drop(&mut self) {
        log::debug!("connection dropped")
    }
}

#[derive(Debug)]
enum ReadBufferState {
    /// Done consuming until external liveness is signaled
    Pending,
    /// Need to eagerly wake up again
    MoreToRead,
    /// Connection is disconnected or is to be disconnected
    Disconnected,
    /// Disconnected with an io error
    Error(std::io::Error),
}

impl<
        TStream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin + 'static,
        TCodec: Codec,
        TReactor: MessageReactor<
            Inbound = <TCodec as Decoder>::Message,
            Outbound = <TCodec as Encoder>::Message,
        >,
    > Connection<TStream, TCodec, TReactor>
{
    /// Create a new protosocket Connection with the given stream and reactor.
    ///
    /// Probably you are interested in the `protosocket-server` or `protosocket-prost` crates.
    #[allow(clippy::type_complexity, clippy::too_many_arguments)]
    pub fn new(
        stream: TStream,
        codec: TCodec,
        max_buffer_length: usize,
        buffer_allocation_increment: usize,
        max_queued_send_messages: usize,
        outbound_messages: spillway::Receiver<TReactor::LogicalOutbound>,
        reactor: TReactor,
    ) -> Self {
        // outbound must be queued so it can be called from any context
        Self {
            stream,
            outbound_messages,
            send_buffer: Default::default(),
            receive_buffer: Vec::new(),
            max_buffer_length,
            max_queued_send_messages,
            receive_buffer_unread_index: 0,
            buffer_allocation_increment,
            codec,
            reactor,
        }
    }

    /// ensure buffer state and read from the inbound stream
    #[cfg_attr(feature = "tracing", tracing::instrument(skip_all))]
    fn poll_read_inbound(&mut self, context: &mut Context<'_>) -> ReadBufferState {
        if self.receive_buffer.len() < self.max_buffer_length
            && self.receive_buffer.len() - self.receive_buffer_unread_index
                < self.buffer_allocation_increment
        {
            self.receive_buffer.resize(
                self.receive_buffer.len() + self.buffer_allocation_increment,
                0,
            );
        }

        if 0 < self.receive_buffer.len() - self.receive_buffer_unread_index {
            // We can (maybe) read from the connection.
            self.poll_read_from_stream(context)
        } else {
            log::debug!("receive is full {self}");
            ReadBufferState::MoreToRead
        }
    }

    /// process the receive buffer, deserializing bytes into messages
    #[cfg_attr(feature = "tracing", tracing::instrument(skip_all))]
    fn read_inbound_messages_and_react(&mut self) -> ReadBufferState {
        let mut buffer_cursor = 0;
        let state = loop {
            if buffer_cursor == self.receive_buffer_unread_index {
                break ReadBufferState::Pending;
            } else if self.receive_buffer_unread_index < buffer_cursor {
                break ReadBufferState::Error(std::io::Error::new(
                    std::io::ErrorKind::InvalidData,
                    "buffer cursor is beyond the end of the receive buffer. Deserializer must not consume more than the buffer length",
                ));
            }

            let buffer = &self.receive_buffer[buffer_cursor..self.receive_buffer_unread_index];
            log::trace!("decode {buffer:?}");
            match self.codec.decode(buffer) {
                Ok((length, message)) => {
                    buffer_cursor += length;
                    if self.reactor.on_inbound_message(message) == ReactorStatus::Disconnect {
                        log::debug!("reactor requested disconnect");
                        return ReadBufferState::Disconnected;
                    }
                }
                Err(e) => match e {
                    DeserializeError::IncompleteBuffer { next_message_size } => {
                        if self.max_buffer_length < next_message_size {
                            log::error!("tried to receive message that is too long. Resetting connection - max: {}, requested: {}", self.max_buffer_length, next_message_size);
                            return ReadBufferState::Disconnected;
                        }
                        log::debug!("waiting for the next message of length {next_message_size}");
                        break ReadBufferState::Pending;
                    }
                    DeserializeError::InvalidBuffer => {
                        log::error!("message was invalid - broken stream");
                        return ReadBufferState::Disconnected;
                    }
                    DeserializeError::SkipMessage { distance } => {
                        if self.receive_buffer_unread_index - buffer_cursor < distance {
                            log::trace!("cannot skip yet, need to read more. Skipping: {distance}, remaining:{}", self.receive_buffer_unread_index - buffer_cursor);
                            break ReadBufferState::Pending;
                        }
                        log::debug!("skipping message of length {distance}");
                        buffer_cursor += distance;
                    }
                },
            };
        };
        if buffer_cursor != 0 && buffer_cursor == self.receive_buffer_unread_index {
            log::trace!("read buffer complete - resetting: {self}");
            self.receive_buffer_unread_index = 0;
        } else if buffer_cursor != 0 {
            log::trace!("read buffer partially consumed - shifting: {self}");
            self.receive_buffer
                .copy_within(buffer_cursor..self.receive_buffer_unread_index, 0);
            self.receive_buffer_unread_index -= buffer_cursor;
        }
        state
    }

    /// read from the TcpStream
    #[cfg_attr(feature = "tracing", tracing::instrument(skip_all))]
    fn poll_read_from_stream(&mut self, context: &mut Context<'_>) -> ReadBufferState {
        let mut buffer = ReadBuf::new(&mut self.receive_buffer[self.receive_buffer_unread_index..]);
        match pin!(&mut self.stream).poll_read(context, &mut buffer) {
            Poll::Ready(Ok(_)) => {
                let distance = buffer.filled().len();
                if distance == 0 {
                    log::debug!("read 0 bytes, stream is closed");
                    ReadBufferState::Disconnected
                } else {
                    self.receive_buffer_unread_index += distance;
                    log::trace!(
                        "read from stream: {distance}b, total: {}b",
                        self.receive_buffer_unread_index
                    );
                    ReadBufferState::MoreToRead
                }
            }
            // Would block "errors" are the OS's way of saying that the
            // connection is not actually ready to perform this I/O operation.
            Poll::Ready(Err(ref err)) if would_block(err) => {
                log::debug!("read everything. No longer readable");
                ReadBufferState::Pending
            }
            Poll::Ready(Err(ref err)) if interrupted(err) => {
                log::trace!("interrupted, so try again later");
                ReadBufferState::MoreToRead
            }
            Poll::Ready(Err(err)) => {
                log::warn!("error while reading from tcp stream: {err:?}");
                ReadBufferState::Error(err)
            }
            Poll::Pending => {
                log::debug!("pending on read stream");
                ReadBufferState::Pending
            }
        }
    }

    /// This serializes work-in-progress messages and moves them over into the write queue
    #[cfg_attr(feature = "tracing", tracing::instrument(skip_all))]
    fn poll_serialize_outbound_messages(&mut self, context: &mut Context<'_>) -> Poll<()> {
        let max_outbound = self.max_queued_send_messages - self.send_buffer.len();
        if max_outbound == 0 {
            log::debug!("send is full: {self}");
            // pending on a network status event
            return Poll::Pending;
        }

        let start_len = self.send_buffer.len();
        for _ in 0..max_outbound {
            let message = match self.outbound_messages.poll_next(context) {
                Poll::Pending => {
                    log::debug!("no more messages to serialize, and we are pending for more");
                    break;
                }
                Poll::Ready(None) => {
                    log::info!("outbound message channel was closed");
                    return Poll::Ready(());
                }
                Poll::Ready(Some(next)) => next,
            };
            let message = self.reactor.on_outbound_message(message);
            let buffer = self.codec.encode(message);
            log::trace!(
                "serialized message and enqueueing outbound buffer: {}b",
                buffer.remaining()
            );
            // queue up a writev
            self.send_buffer.push_back(buffer);
        }
        let new_len = self.send_buffer.len();
        if start_len != new_len {
            log::debug!("serialized {} messages", new_len - start_len);
        }
        // This portion of poll is either pending for more messages, or it is the network's turn to be pending.
        // If the network is ready, it will push buffers and re-notify serialization.
        Poll::Pending
    }

    /// Send buffers to the tcp stream, and recycle them if they are fully written
    #[cfg_attr(feature = "tracing", tracing::instrument(skip_all))]
    fn poll_writev_buffers(
        &mut self,
        context: &mut Context<'_>,
    ) -> std::result::Result<bool, std::io::Error> {
        loop {
            if self.poll_serialize_outbound_messages(context).is_ready() {
                log::debug!("outbound channel closed");
                return Ok(true);
            }
            break if self.send_buffer.is_empty() {
                log::debug!("send buffer is empty");
                Ok(false)
            } else {
                // I need to figure out how to get this from the os rather than hardcoding.
                // 16 is the lowest I've seen mention of, and I've seen 1024 more commonly.
                const UIO_MAXIOV: usize = 256;

                // Use chunks_vectored rather than chunk() to correctly handle
                // Buf implementations that aren't backed by a single contiguous slice.
                let mut stack_buffers = [IoSlice::new(&[]); UIO_MAXIOV];
                let mut filled = 0;
                for buf in self.send_buffer.iter() {
                    if UIO_MAXIOV <= filled {
                        break;
                    }
                    let n = buf.chunks_vectored(&mut stack_buffers[filled..]);
                    if n == 0 {
                        break;
                    }
                    filled += n;
                }
                let buffers = &stack_buffers[..filled];

                #[cfg(feature = "tracing")]
                let span = tracing::span!(tracing::Level::INFO, "writing", buffers = buffers.len());
                #[cfg(feature = "tracing")]
                let span_guard = span.enter();
                let poll = pin!(&mut self.stream).poll_write_vectored(context, buffers);
                #[cfg(feature = "tracing")]
                drop(span_guard);
                match poll {
                    Poll::Pending => {
                        log::debug!("writev not ready - waiting for wake");
                        Ok(false)
                    }
                    Poll::Ready(Ok(0)) => {
                        log::info!("write stream was closed");
                        Ok(true)
                    }
                    Poll::Ready(Ok(written)) => {
                        log::debug!("writev sent {written}");
                        self.advance_send_buffers(written);
                        // we need to go around again to make sure we're either done writing or pending
                        continue;
                    }
                    // Would block "errors" are the OS's way of saying that the
                    // connection is not actually ready to perform this I/O operation.
                    Poll::Ready(Err(ref err)) if would_block(err) => {
                        log::debug!("would block - no longer writable");
                        continue;
                    }
                    Poll::Ready(Err(ref err)) if interrupted(err) => {
                        log::debug!("write interrupted - try again later");
                        continue;
                    }
                    // other errors terminate the stream
                    Poll::Ready(Err(err)) => {
                        log::warn!(
                            "error while writing to tcp stream: {err:?}, buffers: {}, {}b: {:?}",
                            buffers.len(),
                            buffers.iter().map(|b| b.len()).sum::<usize>(),
                            buffers.iter().map(|b| b.len()).collect::<Vec<_>>()
                        );
                        Err(err)
                    }
                }
            };
        }
    }

    /// Discard all written buffers
    #[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))]
    fn advance_send_buffers(&mut self, total_written: usize) {
        let mut written = total_written;
        while 0 < written {
            if let Some(mut front) = self.send_buffer.pop_front() {
                let remaining = front.remaining();
                if remaining <= written {
                    written -= remaining;
                    log::trace!("returning consumed buffer after sending final {remaining}b");
                    self.codec.return_buffer(front);
                } else {
                    // Walk the buffer forward. It needs to be the next bytes on the wire, so we'll put it back in front.
                    // Partial buffer consumption is relatively uncommon, but it definitely happens.
                    log::debug!("after writing {total_written}b, advancing partially written buffer of {remaining}b by {written}b");
                    front.advance(written);
                    self.send_buffer.push_front(front);
                    break;
                }
            } else {
                log::error!("rotated all buffers but {written} bytes unaccounted for");
                break;
            }
        }
    }

    #[cfg_attr(feature = "tracing", tracing::instrument(skip_all))]
    fn poll_receive(mut self: Pin<&mut Self>, context: &mut Context<'_>) -> Poll<()> {
        loop {
            match self.poll_read_inbound(context) {
                ReadBufferState::Pending => {
                    log::debug!("consumed all that I can from the read stream for now {self}");
                    return Poll::Pending;
                }
                ReadBufferState::MoreToRead => {
                    log::debug!("more to read");
                    self.read_inbound_messages_and_react();
                    continue;
                }
                ReadBufferState::Disconnected => {
                    log::info!("read connection closed");
                    return Poll::Ready(());
                }
                ReadBufferState::Error(e) => {
                    log::warn!("error while reading from tcp stream: {e:?}");
                    return Poll::Ready(());
                }
            }
        }
    }
}