s2-api 0.27.17

API types for S2, the durable streams API
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
#[cfg(feature = "axum")]
pub mod extract;

pub mod json;
pub mod proto;
pub mod s2s;
pub mod sse;

use std::time::Duration;

use futures::stream::BoxStream;
use itertools::Itertools as _;
use s2_common::{
    encryption::EncryptionSpec,
    record,
    types::{
        self,
        stream::{StreamName, StreamNamePrefix, StreamNameStartAfter},
    },
};
use serde::{Deserialize, Serialize};
use time::OffsetDateTime;

use super::config::StreamConfig;
use crate::{data::Format, mime::JsonOrProto};

#[rustfmt::skip]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))]
pub struct StreamInfo {
    /// Stream name.
    pub name: StreamName,
    /// Creation time in RFC 3339 format.
    #[serde(with = "time::serde::rfc3339")]
    pub created_at: OffsetDateTime,
    /// Deletion time in RFC 3339 format, if the stream is being deleted.
    #[serde(with = "time::serde::rfc3339::option")]
    pub deleted_at: Option<OffsetDateTime>,
}

impl From<types::stream::StreamInfo> for StreamInfo {
    fn from(value: types::stream::StreamInfo) -> Self {
        Self {
            name: value.name,
            created_at: value.created_at,
            deleted_at: value.deleted_at,
        }
    }
}

#[rustfmt::skip]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "utoipa", derive(utoipa::IntoParams))]
#[cfg_attr(feature = "utoipa", into_params(parameter_in = Query))]
pub struct ListStreamsRequest {
    /// Filter to streams whose names begin with this prefix.
    #[cfg_attr(feature = "utoipa", param(value_type = String, default = "", required = false))]
    pub prefix: Option<StreamNamePrefix>,
    /// Filter to streams whose names lexicographically start after this string.
    /// It must be greater than or equal to the `prefix` if specified.
    #[cfg_attr(feature = "utoipa", param(value_type = String, default = "", required = false))]
    pub start_after: Option<StreamNameStartAfter>,
    /// Number of results, up to a maximum of 1000.
    #[cfg_attr(feature = "utoipa", param(value_type = usize, maximum = 1000, default = 1000, required = false))]
    pub limit: Option<usize>,
}

super::impl_list_request_conversions!(
    ListStreamsRequest,
    types::stream::StreamNamePrefix,
    types::stream::StreamNameStartAfter
);

#[rustfmt::skip]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))]
pub struct ListStreamsResponse {
    /// Matching streams.
    #[cfg_attr(feature = "utoipa", schema(max_items = 1000))]
    pub streams: Vec<StreamInfo>,
    /// Indicates that there are more results that match the criteria.
    pub has_more: bool,
}

#[rustfmt::skip]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))]
pub struct CreateStreamRequest {
    /// Stream name that is unique to the basin.
    /// It can be between 1 and 512 bytes in length.
    pub stream: StreamName,
    /// Stream configuration.
    pub config: Option<StreamConfig>,
}

#[rustfmt::skip]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))]
/// Position of a record in a stream.
pub struct StreamPosition {
    /// Sequence number assigned by the service.
    pub seq_num: record::SeqNum,
    /// Timestamp, which may be client-specified or assigned by the service.
    /// If it is assigned by the service, it will represent milliseconds since Unix epoch.
    pub timestamp: record::Timestamp,
}

impl From<record::StreamPosition> for StreamPosition {
    fn from(pos: record::StreamPosition) -> Self {
        Self {
            seq_num: pos.seq_num,
            timestamp: pos.timestamp,
        }
    }
}

impl From<StreamPosition> for record::StreamPosition {
    fn from(pos: StreamPosition) -> Self {
        Self {
            seq_num: pos.seq_num,
            timestamp: pos.timestamp,
        }
    }
}

#[rustfmt::skip]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))]
pub struct TailResponse {
    /// Sequence number that will be assigned to the next record on the stream, and timestamp of the last record.
    pub tail: StreamPosition,
}

#[rustfmt::skip]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "utoipa", derive(utoipa::IntoParams))]
#[cfg_attr(feature = "utoipa", into_params(parameter_in = Query))]
pub struct ReadStart {
    /// Start from a sequence number.
    #[cfg_attr(feature = "utoipa", param(value_type = record::SeqNum, required = false))]
    pub seq_num: Option<record::SeqNum>,
    /// Start from a timestamp.
    #[cfg_attr(feature = "utoipa", param(value_type = record::Timestamp, required = false))]
    pub timestamp: Option<record::Timestamp>,
    /// Start from number of records before the next sequence number.
    #[cfg_attr(feature = "utoipa", param(value_type = u64, required = false))]
    pub tail_offset: Option<u64>,
    /// Start reading from the tail if the requested position is beyond it.
    /// Otherwise, a `416 Range Not Satisfiable` response is returned.
    #[cfg_attr(feature = "utoipa", param(value_type = bool, required = false))]
    pub clamp: Option<bool>,
}

impl TryFrom<ReadStart> for types::stream::ReadStart {
    type Error = types::ValidationError;

    fn try_from(value: ReadStart) -> Result<Self, Self::Error> {
        let from = match (value.seq_num, value.timestamp, value.tail_offset) {
            (Some(seq_num), None, None) => types::stream::ReadFrom::SeqNum(seq_num),
            (None, Some(timestamp), None) => types::stream::ReadFrom::Timestamp(timestamp),
            (None, None, Some(tail_offset)) => types::stream::ReadFrom::TailOffset(tail_offset),
            (None, None, None) => types::stream::ReadFrom::TailOffset(0),
            _ => {
                return Err(types::ValidationError(
                    "only one of seq_num, timestamp, or tail_offset can be provided".to_owned(),
                ));
            }
        };
        let clamp = value.clamp.unwrap_or(false);
        Ok(Self { from, clamp })
    }
}

#[rustfmt::skip]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "utoipa", derive(utoipa::IntoParams))]
#[cfg_attr(feature = "utoipa", into_params(parameter_in = Query))]
pub struct ReadEnd {
    /// Record count limit.
    /// Non-streaming reads are capped by the default limit of 1000 records.
    #[cfg_attr(feature = "utoipa", param(value_type = u64, required = false))]
    pub count: Option<usize>,
    /// Metered bytes limit.
    /// Non-streaming reads are capped by the default limit of 1 MiB.
    #[cfg_attr(feature = "utoipa", param(value_type = usize, required = false))]
    pub bytes: Option<usize>,
    /// Exclusive timestamp to read until.
    #[cfg_attr(feature = "utoipa", param(value_type = record::Timestamp, required = false))]
    pub until: Option<record::Timestamp>,
    /// Duration in seconds to wait for new records.
    /// The default duration is 0 if there is a bound on `count`, `bytes`, or `until`, and otherwise infinite.
    /// Non-streaming reads are always bounded on `count` and `bytes`, so you can achieve long poll semantics by specifying a non-zero duration up to 60 seconds.
    /// In the context of an SSE or S2S streaming read, the duration will bound how much time can elapse between records throughout the lifetime of the session.
    #[cfg_attr(feature = "utoipa", param(value_type = u32, required = false))]
    pub wait: Option<u32>,
}

impl From<ReadEnd> for types::stream::ReadEnd {
    fn from(value: ReadEnd) -> Self {
        Self {
            limit: s2_common::read_extent::ReadLimit::from_count_and_bytes(
                value.count,
                value.bytes,
            ),
            until: value.until.into(),
            wait: value.wait.map(|w| Duration::from_secs(w as u64)),
        }
    }
}

#[derive(Debug, Clone)]
pub enum ReadRequest {
    /// Unary
    Unary {
        encryption: EncryptionSpec,
        format: Format,
        response_mime: JsonOrProto,
    },
    /// Server-Sent Events streaming response
    EventStream {
        encryption: EncryptionSpec,
        format: Format,
        last_event_id: Option<sse::LastEventId>,
    },
    /// S2S streaming response
    S2s {
        encryption: EncryptionSpec,
        response_compression: s2s::CompressionAlgorithm,
    },
}

pub enum AppendRequest {
    /// Unary
    Unary {
        encryption: EncryptionSpec,
        input: types::stream::AppendInput,
        response_mime: JsonOrProto,
    },
    /// S2S bi-directional streaming
    S2s {
        encryption: EncryptionSpec,
        inputs: BoxStream<'static, Result<types::stream::AppendInput, AppendInputStreamError>>,
        response_compression: s2s::CompressionAlgorithm,
    },
}

impl std::fmt::Debug for AppendRequest {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        match self {
            AppendRequest::Unary {
                encryption,
                input,
                response_mime: response,
            } => f
                .debug_struct("AppendRequest::Unary")
                .field("encryption", encryption)
                .field("input", input)
                .field("response", response)
                .finish(),
            AppendRequest::S2s {
                encryption,
                response_compression,
                ..
            } => f
                .debug_struct("AppendRequest::S2s")
                .field("encryption", encryption)
                .field("response_compression", response_compression)
                .finish(),
        }
    }
}

#[derive(Debug, thiserror::Error)]
pub enum AppendInputStreamError {
    #[error("Failed to decode S2S frame: {0}")]
    FrameDecode(#[from] std::io::Error),
    #[error(transparent)]
    Validation(#[from] types::ValidationError),
}

#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))]
pub struct Header(pub String, pub String);

#[rustfmt::skip]
/// Record that is durably sequenced on a stream.
#[derive(Debug, Clone, Serialize)]
#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))]
pub struct SequencedRecord {
    /// Sequence number assigned by the service.
    pub seq_num: record::SeqNum,
    /// Timestamp for this record.
    pub timestamp: record::Timestamp,
    /// Series of name-value pairs for this record.
    #[serde(default, skip_serializing_if = "Vec::is_empty")]
    #[cfg_attr(feature = "utoipa", schema(required = false))]
    pub headers: Vec<Header>,
    /// Body of the record.
    #[serde(default, skip_serializing_if = "String::is_empty")]
    #[cfg_attr(feature = "utoipa", schema(required = false))]
    pub body: String,
}

impl SequencedRecord {
    pub fn encode(format: Format, record: record::SequencedRecord) -> Self {
        let (record::StreamPosition { seq_num, timestamp }, record) = record.into_parts();
        let (headers, body) = record.into_parts();
        Self {
            seq_num,
            timestamp,
            headers: headers
                .into_iter()
                .map(|h| Header(format.encode(&h.name), format.encode(&h.value)))
                .collect(),
            body: format.encode(&body),
        }
    }
}

#[rustfmt::skip]
/// Record to be appended to a stream.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))]
pub struct AppendRecord {
    /// Timestamp for this record.
    /// The service will always ensure monotonicity by adjusting it up if necessary to the maximum observed timestamp.
    /// Refer to stream timestamping configuration for the finer semantics around whether a client-specified timestamp is required, and whether it will be capped at the arrival time.
    pub timestamp: Option<record::Timestamp>,
    /// Series of name-value pairs for this record.
    #[serde(default, skip_serializing_if = "Vec::is_empty")]
    #[cfg_attr(feature = "utoipa", schema(required = false))]
    pub headers: Vec<Header>,
    /// Body of the record.
    #[serde(default, skip_serializing_if = "String::is_empty")]
    #[cfg_attr(feature = "utoipa", schema(required = false))]
    pub body: String,
}

impl AppendRecord {
    pub fn decode(
        self,
        format: Format,
    ) -> Result<types::stream::AppendRecord, types::ValidationError> {
        let headers = self
            .headers
            .into_iter()
            .map(|Header(name, value)| {
                Ok::<record::Header, types::ValidationError>(record::Header {
                    name: format.decode(name)?,
                    value: format.decode(value)?,
                })
            })
            .try_collect()?;

        let body = format.decode(self.body)?;

        let record = record::Record::try_from_parts(headers, body)
            .map_err(|e| e.to_string())?
            .into();

        let parts = types::stream::AppendRecordParts {
            timestamp: self.timestamp,
            record,
        };

        types::stream::AppendRecord::try_from(parts)
            .map_err(|e| types::ValidationError(e.to_string()))
    }
}

#[rustfmt::skip]
/// Payload of an `append` request.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))]
pub struct AppendInput {
    /// Batch of records to append atomically, which must contain at least one record, and no more than 1000.
    /// The total size of a batch of records may not exceed 1 MiB of metered bytes.
    pub records: Vec<AppendRecord>,
    /// Enforce that the sequence number assigned to the first record matches.
    pub match_seq_num: Option<record::SeqNum>,
    /// Enforce a fencing token, which starts out as an empty string that can be overridden by a `fence` command record.
    pub fencing_token: Option<record::FencingToken>,
}

impl AppendInput {
    pub fn decode(
        self,
        format: Format,
    ) -> Result<types::stream::AppendInput, types::ValidationError> {
        let records: Vec<types::stream::AppendRecord> = self
            .records
            .into_iter()
            .map(|record| record.decode(format))
            .try_collect()?;

        Ok(types::stream::AppendInput {
            records: types::stream::AppendRecordBatch::try_from(records)?,
            match_seq_num: self.match_seq_num,
            fencing_token: self.fencing_token,
        })
    }
}

#[rustfmt::skip]
/// Success response to an `append` request.
#[derive(Debug, Clone, Serialize)]
#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))]
pub struct AppendAck {
    /// Sequence number and timestamp of the first record that was appended.
    pub start: StreamPosition,
    /// Sequence number of the last record that was appended `+ 1`, and timestamp of the last record that was appended.
    /// The difference between `end.seq_num` and `start.seq_num` will be the number of records appended.
    pub end: StreamPosition,
    /// Sequence number that will be assigned to the next record on the stream, and timestamp of the last record on the stream.
    /// This can be greater than the `end` position in case of concurrent appends.
    pub tail: StreamPosition,
}

impl From<types::stream::AppendAck> for AppendAck {
    fn from(ack: types::stream::AppendAck) -> Self {
        Self {
            start: ack.start.into(),
            end: ack.end.into(),
            tail: ack.tail.into(),
        }
    }
}

#[rustfmt::skip]
/// Aborted due to a failed condition.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))]
#[serde(rename_all = "snake_case")]
pub enum AppendConditionFailed {
    /// Fencing token did not match.
    /// The expected fencing token is returned.
    #[cfg_attr(feature = "utoipa", schema(title = "fencing token"))]
    FencingTokenMismatch(record::FencingToken),
    /// Sequence number did not match the tail of the stream.
    /// The expected next sequence number is returned.
    #[cfg_attr(feature = "utoipa", schema(title = "seq num"))]
    SeqNumMismatch(record::SeqNum),
}

#[rustfmt::skip]
#[derive(Debug, Clone, Serialize)]
#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))]
pub struct ReadBatch {
    /// Records that are durably sequenced on the stream, retrieved based on the requested criteria.
    /// This can only be empty in response to a unary read (i.e. not SSE), if the request cannot be satisfied without violating an explicit bound (`count`, `bytes`, or `until`).
    pub records: Vec<SequencedRecord>,
    /// Sequence number that will be assigned to the next record on the stream, and timestamp of the last record.
    /// This will only be present when reading recent records.
    #[serde(skip_serializing_if = "Option::is_none")]
    pub tail: Option<StreamPosition>,
}

impl ReadBatch {
    pub fn encode(format: Format, batch: types::stream::ReadBatch) -> Self {
        Self {
            records: batch
                .records
                .into_iter()
                .map(|record| SequencedRecord::encode(format, record))
                .collect(),
            tail: batch.tail.map(Into::into),
        }
    }
}