Skip to main content

timeseries_table_core/table/
error.rs

1//! Error types and SNAFU context selectors for the table layer.
2//!
3//! This module centralizes the `TableError` enum used by the public API and
4//! exposes context selectors (via `#[snafu(visibility(pub(crate)))]`) so
5//! implementation details in sibling modules can attach error context without
6//! re-exporting everything at the crate root. Keep new variants here to ensure
7//! consistent user-facing messages and to avoid scattering selectors.
8
9use std::collections::BTreeMap;
10
11use arrow::{datatypes::DataType, error::ArrowError};
12use chrono::{DateTime, Utc};
13use parquet::errors::ParquetError;
14use snafu::prelude::*;
15
16use crate::{
17    coverage::io::CoverageError,
18    formats::parquet::{SegmentCoverageError, SegmentEntityIdentityError},
19    metadata::schema_compat::SchemaCompatibilityError,
20    storage::StorageError,
21    transaction_log::{CommitError, SegmentId, TableKind, TimeBucket, segments::SegmentError},
22};
23
24/// Errors from high-level time-series table operations.
25///
26/// Each variant carries enough context for callers to surface actionable
27/// messages to users or implement retries where appropriate (for example,
28/// conflicts on optimistic concurrency control).
29#[derive(Debug, Snafu)]
30#[snafu(visibility(pub(crate)))]
31pub enum TableError {
32    /// Any error coming from the transaction log / commit machinery
33    /// (for example, OCC conflicts, storage failures, or corrupt commits).
34    #[snafu(display("Transaction log error: {source}"))]
35    TransactionLog {
36        /// Underlying transaction log / commit error.
37        #[snafu(source, backtrace)]
38        source: CommitError,
39    },
40
41    /// Attempting to open a table that has no commits at all (CURRENT == 0).
42    #[snafu(display("Cannot open table with no commits (CURRENT version is 0)"))]
43    EmptyTable,
44
45    /// The underlying table is not a time-series table (TableKind mismatch).
46    #[snafu(display("Table kind is {kind:?}, expected TableKind::TimeSeries"))]
47    NotTimeSeries {
48        /// The actual kind of the underlying table that was discovered.
49        kind: TableKind,
50    },
51
52    /// Attempt to create a table where commits already exist (idempotency guard for create).
53    #[snafu(display("Table already exists; current transaction log version is {current_version}"))]
54    AlreadyExists {
55        /// Current transaction log version that indicates the table already exists.
56        current_version: u64,
57    },
58
59    /// Segment-level metadata / Parquet error during append (for example, missing time column, unsupported type, corrupt stats).
60    #[snafu(display("Segment metadata error while appending: {source}"))]
61    SegmentMeta {
62        /// Underlying segment metadata error.
63        #[snafu(source, backtrace)]
64        source: SegmentError,
65    },
66
67    /// Schema compatibility error when appending a segment with incompatible schema (no evolution allowed in v0.1).
68    #[snafu(display("Schema compatibility error: {source}"))]
69    SchemaCompatibility {
70        /// Underlying schema compatibility error.
71        #[snafu(source)]
72        source: SchemaCompatibilityError,
73    },
74
75    /// Table has progressed past the initial metadata commit but still lacks
76    /// a canonical logical schema (invariant violation for v0.1).
77    #[snafu(display("Table has no logical_schema at version {version}; cannot append in v0.1"))]
78    MissingCanonicalSchema {
79        /// The transaction log version missing a canonical logical schema.
80        version: u64,
81    },
82
83    /// Storage error while accessing table data (read/write failure at the storage layer).
84    #[snafu(display("Storage error while accessing table data: {source}"))]
85    Storage {
86        /// Underlying storage error while reading or writing table data.
87        source: StorageError,
88    },
89
90    /// Start and end timestamps must satisfy start < end when scanning.
91    #[snafu(display("Invalid scan range: start={start}, end={end} (expect start < end)"))]
92    InvalidRange {
93        /// Inclusive/lower timestamp bound supplied by caller.
94        start: DateTime<Utc>,
95        /// Exclusive/upper timestamp bound supplied by caller.
96        end: DateTime<Utc>,
97    },
98
99    /// Parquet read/IO error during scanning or schema extraction.
100    #[snafu(display("Parquet read error: {source}"))]
101    ParquetRead {
102        /// Underlying Parquet error raised during read or schema extraction.
103        source: ParquetError,
104    },
105
106    /// Arrow compute or conversion error while materializing or filtering batches.
107    #[snafu(display("Arrow error while filtering batch: {source}"))]
108    Arrow {
109        /// Underlying Arrow error raised during batch conversion or filtering.
110        source: ArrowError,
111    },
112
113    /// Segment is missing the configured time column required for scans.
114    #[snafu(display("Missing time column {column} in segment"))]
115    MissingTimeColumn {
116        /// Name of the expected time column that was not found in the segment.
117        column: String,
118    },
119
120    /// Time column exists but has an unsupported Arrow type for scanning.
121    #[snafu(display("Unsupported time column {column} with type {datatype:?}"))]
122    UnsupportedTimeType {
123        /// Name of the time column with an unsupported type.
124        column: String,
125        /// Arrow data type encountered for the time column.
126        datatype: DataType,
127    },
128
129    /// Converting a timestamp to the requested unit would overflow `i64`.
130    #[snafu(display("Timestamp conversion overflow for column {column} (value: {timestamp})"))]
131    TimeConversionOverflow {
132        /// Name of the time column being converted.
133        column: String,
134        /// The timestamp value that could not be represented as i64 nanos.
135        timestamp: DateTime<Utc>,
136    },
137
138    /// Segment Coverage error.
139    #[snafu(display("Segment coverage error: {source}"))]
140    SegmentCoverage {
141        /// Underlying coverage error.
142        #[snafu(source, backtrace)]
143        source: SegmentCoverageError,
144    },
145
146    /// Table coverage pointer uses a bucket spec that doesn't match the table's index bucket.
147    #[snafu(display(
148        "Table coverage bucket spec mismatch: expected {expected:?}, found {actual:?} (from coverage version {pointer_version})"
149    ))]
150    TableCoverageBucketMismatch {
151        /// Bucket spec defined by the table's time index.
152        expected: TimeBucket,
153        /// Bucket spec recorded in the table coverage pointer.
154        actual: TimeBucket,
155        /// Log version where the mismatching coverage pointer was recorded.
156        pointer_version: u64,
157    },
158
159    /// Coverage sidecar read/write or computation error.
160    #[snafu(display("Coverage sidecar error: {source}"))]
161    CoverageSidecar {
162        /// Underlying Coverage error.
163        #[snafu(source, backtrace)]
164        source: CoverageError,
165    },
166
167    /// Appending would overlap existing coverage for the same segment path.
168    #[snafu(display(
169        "Coverage overlap while appending {segment_path}: {overlap_count} overlapping buckets (example={example_bucket:?})"
170    ))]
171    CoverageOverlap {
172        /// Relative path of the segment being appended.
173        segment_path: String,
174        /// Number of overlapping buckets detected.
175        overlap_count: u64,
176        /// Example overlapping bucket (if available) to aid debugging.
177        example_bucket: Option<u32>,
178    },
179
180    /// Existing segment lacks a coverage_path when coverage is required.
181    #[snafu(display(
182        "Cannot append because existing segment {segment_id} is missing coverage_path (required for coverage tracking)"
183    ))]
184    ExistingSegmentMissingCoverage {
185        /// Segment ID missing a coverage_path entry.
186        segment_id: SegmentId,
187    },
188
189    /// Reading the per-segment coverage sidecar failed while rebuilding coverage.
190    #[snafu(display(
191        "Cannot recover table coverage: failed to read segment coverage sidecar for {segment_id} at {coverage_path}: {source}"
192    ))]
193    SegmentCoverageSidecarRead {
194        /// Segment whose coverage sidecar could not be read.
195        segment_id: SegmentId,
196        /// Path to the coverage sidecar file that failed to read.
197        coverage_path: String,
198        /// Underlying coverage error (boxed to keep the variant size small).
199        #[snafu(source(from(CoverageError, Box::new)), backtrace)]
200        source: Box<CoverageError>,
201    },
202
203    /// Building an expected bucket bitmap would exceed the u32 bucket domain.
204    ///
205    /// In v0.1 we store bucket ids in RoaringBitmap (u32). If the requested
206    /// time range maps to bucket ids > u32::MAX, we must fail instead of
207    /// truncating in release builds.
208    #[snafu(display(
209        "Expected bucket domain overflows u32: last_bucket_id={last_bucket_id} (max={max})"
210    ))]
211    BucketDomainOverflow {
212        /// The last bucket id in the requested range (inclusive).
213        last_bucket_id: u64,
214        /// Always u32::MAX, included to make the error self-describing.
215        max: u32,
216    },
217
218    /// Table state is missing a coverage snapshot pointer when required.
219    #[snafu(display(
220        "Cannot append because table has segments but no table coverage snapshot pointer in state"
221    ))]
222    MissingTableCoveragePointer,
223
224    /// Failed to read or validate the entity identity stored in a segment.
225    #[snafu(display("Segment entity identity error: {source}"))]
226    SegmentEntityIdentity {
227        /// Underlying entity identity extraction error.
228        #[snafu(source, backtrace)]
229        source: SegmentEntityIdentityError,
230    },
231
232    /// Segment entity identity does not match the expected table identity.
233    #[snafu(display(
234        "Entity mismatch while appending {segment_path}: expected={expected:?}, found={found:?}"
235    ))]
236    EntityMismatch {
237        /// Relative path of the segment being appended.
238        segment_path: String,
239        /// Expected entity identity derived from table metadata or state.
240        expected: BTreeMap<String, String>,
241        /// Entity identity observed in the segment.
242        found: BTreeMap<String, String>,
243    },
244}