log/model.rs
1//! Core data types for OpenData Log.
2//!
3//! This module defines the fundamental data structures used throughout the
4//! log API, including records for writing and entries for reading.
5
6use bytes::Bytes;
7
8/// Unique identifier for a segment.
9///
10/// Segment IDs are monotonically increasing integers assigned when segments
11/// are created. Use with [`LogRead::list_keys`](crate::LogRead::list_keys)
12/// to query keys within specific segments.
13pub type SegmentId = u32;
14
15/// Global sequence number for log entries.
16///
17/// Sequence numbers are monotonically increasing integers assigned to each
18/// entry at append time. They provide a total ordering across all keys in
19/// the log.
20pub type Sequence = u64;
21
22/// A segment of the log.
23///
24/// Segments partition the log into coarse-grained chunks based on time or
25/// other policies. Each segment has a unique identifier and tracks the
26/// starting sequence number for entries it contains.
27///
28/// Segments are the natural boundary for attaching metadata such as key
29/// listings. See [`LogRead::list_segments`](crate::LogRead::list_segments)
30/// for querying segments.
31///
32/// # Example
33///
34/// ```no_run
35/// # use log::{LogDb, LogRead, Config};
36/// # use common::StorageConfig;
37/// # #[tokio::main]
38/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
39/// # let config = Config { storage: StorageConfig::InMemory, ..Default::default() };
40/// # let log = LogDb::open(config).await?;
41/// let segments = log.list_segments(..).await?;
42/// for segment in segments {
43/// println!(
44/// "segment {}: start_seq={}, created at {}",
45/// segment.id, segment.start_seq, segment.start_time_ms
46/// );
47/// }
48/// # Ok(())
49/// # }
50/// ```
51#[derive(Debug, Clone, PartialEq, Eq)]
52pub struct Segment {
53 /// Unique segment identifier (monotonically increasing).
54 pub id: SegmentId,
55 /// First sequence number in this segment.
56 pub start_seq: Sequence,
57 /// Wall-clock time when this segment was created (ms since epoch).
58 pub start_time_ms: i64,
59}
60
61/// A record to be appended to the log.
62///
63/// Records are the unit of data written to the log. Each record consists of
64/// a key identifying the log stream and a value containing the payload.
65///
66/// # Key Selection
67///
68/// Keys determine how data is organized in the log. Each unique key represents
69/// an independent log stream with its own sequence of entries. Choose keys based
70/// on your access patterns:
71///
72/// - Use a single key for a simple append-only log
73/// - Use entity IDs as keys for per-entity event streams
74/// - Use composite keys (e.g., `tenant:entity`) for multi-tenant scenarios
75///
76/// # Example
77///
78/// ```
79/// use bytes::Bytes;
80/// use log::Record;
81///
82/// let record = Record {
83/// key: Bytes::from("orders"),
84/// value: Bytes::from(r#"{"id": "123", "amount": 99.99}"#),
85/// };
86/// ```
87#[derive(Debug, Clone, PartialEq, Eq)]
88pub struct Record {
89 /// The key identifying the log stream.
90 ///
91 /// All records with the same key form a single ordered log. Keys can be
92 /// any byte sequence but are typically human-readable identifiers.
93 pub key: Bytes,
94
95 /// The record payload.
96 ///
97 /// Values can contain any byte sequence. The log does not interpret
98 /// or validate the contents.
99 pub value: Bytes,
100}
101
102/// Output of an append operation.
103///
104/// Contains metadata about the appended records, including the starting
105/// sequence number assigned to the first record in the batch.
106///
107/// # Example
108///
109/// ```no_run
110/// # use log::{LogDb, Config, Record};
111/// # use bytes::Bytes;
112/// # use common::StorageConfig;
113/// # #[tokio::main]
114/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
115/// # let config = Config { storage: StorageConfig::InMemory, ..Default::default() };
116/// # let log = LogDb::open(config).await?;
117/// # let records = vec![Record { key: Bytes::from("k"), value: Bytes::from("v") }];
118/// let result = log.try_append(records).await?;
119/// println!("Appended starting at sequence {}", result.start_sequence);
120/// # Ok(())
121/// # }
122/// ```
123#[derive(Debug, Clone, PartialEq, Eq)]
124pub struct AppendOutput {
125 /// Sequence number assigned to the first record in the batch.
126 pub start_sequence: Sequence,
127}
128
129/// An entry read from the log.
130///
131/// Log entries are returned by [`LogIterator`](crate::LogIterator) and contain
132/// the original record data along with metadata assigned at append time.
133///
134/// # Sequence Numbers
135///
136/// Each entry has a globally unique sequence number assigned when it was
137/// appended. Within a single key's log, entries are ordered by sequence
138/// number, but the numbers are not contiguous—other keys' appends are
139/// interleaved in the global sequence.
140///
141/// # Example
142///
143/// ```no_run
144/// # use log::{LogDb, LogRead, Config};
145/// # use bytes::Bytes;
146/// # use common::StorageConfig;
147/// # #[tokio::main]
148/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
149/// # let config = Config { storage: StorageConfig::InMemory, ..Default::default() };
150/// # let log = LogDb::open(config).await?;
151/// # let key = Bytes::from("orders");
152/// let mut iter = log.scan(key, ..).await?;
153/// while let Some(entry) = iter.next().await? {
154/// println!(
155/// "key={:?}, seq={}, value={:?}",
156/// entry.key, entry.sequence, entry.value
157/// );
158/// }
159/// # Ok(())
160/// # }
161/// ```
162#[derive(Debug, Clone, PartialEq, Eq)]
163pub struct LogEntry {
164 /// The key of the log stream this entry belongs to.
165 pub key: Bytes,
166
167 /// The sequence number assigned to this entry.
168 ///
169 /// Sequence numbers are monotonically increasing within a key's log
170 /// and globally unique across all keys.
171 pub sequence: Sequence,
172
173 /// The record value.
174 pub value: Bytes,
175}