Skip to main content

scouter_dataframe/parquet/tracing/
engine.rs

1use crate::error::TraceEngineError;
2use crate::parquet::control::{get_pod_id, ControlTableEngine};
3use crate::parquet::tracing::traits::arrow_schema_to_delta;
4use crate::parquet::tracing::traits::attribute_field;
5use crate::parquet::tracing::traits::TraceSchemaExt;
6use crate::parquet::utils::{create_attr_match_udf, register_cloud_logstore_factories};
7use crate::storage::ObjectStore;
8use arrow::array::*;
9use arrow::datatypes::*;
10use arrow_array::RecordBatch;
11use chrono::{Datelike, Utc};
12use datafusion::prelude::SessionContext;
13use deltalake::datafusion::parquet::basic::{Compression, Encoding, ZstdLevel};
14use deltalake::datafusion::parquet::file::properties::{EnabledStatistics, WriterProperties};
15use deltalake::datafusion::parquet::schema::types::ColumnPath;
16use deltalake::operations::optimize::OptimizeType;
17use deltalake::{DeltaTable, DeltaTableBuilder, TableProperty};
18use scouter_settings::ObjectStorageSettings;
19use scouter_types::SpanId;
20use scouter_types::TraceId;
21use scouter_types::TraceSpanRecord;
22use scouter_types::{Attribute, SpanEvent, SpanLink};
23use serde_json::Value;
24use std::sync::Arc;
25use tokio::sync::oneshot;
26use tokio::sync::{mpsc, RwLock as AsyncRwLock};
27use tokio::time::{interval, Duration};
28use tracing::{debug, error, info, instrument};
29use url::Url;
30
31const TRACE_SPAN_TABLE_NAME: &str = "trace_spans";
32
33/// Control table task names for distributed coordination.
34const TASK_OPTIMIZE: &str = "trace_optimize";
35const TASK_RETENTION: &str = "trace_retention";
36
37/// Days from year-0001 to Unix epoch (1970-01-01), used to convert chrono → Arrow Date32.
38/// Equivalent to `NaiveDate::from_ymd_opt(1970, 1, 1).unwrap().num_days_from_ce()`.
39const UNIX_EPOCH_DAYS: i32 = 719_163;
40
41pub enum TableCommand {
42    Write {
43        spans: Vec<TraceSpanRecord>,
44        respond_to: oneshot::Sender<Result<(), TraceEngineError>>,
45    },
46    Optimize {
47        respond_to: oneshot::Sender<Result<(), TraceEngineError>>,
48    },
49    Vacuum {
50        retention_hours: u64,
51        respond_to: oneshot::Sender<Result<(), TraceEngineError>>,
52    },
53    Expire {
54        cutoff_date: chrono::NaiveDate,
55        respond_to: oneshot::Sender<Result<(), TraceEngineError>>,
56    },
57    Shutdown,
58}
59
60async fn build_url(object_store: &ObjectStore) -> Result<Url, TraceEngineError> {
61    let mut base = object_store.get_base_url()?;
62    let mut path = base.path().to_string();
63    if !path.ends_with('/') {
64        path.push('/');
65    }
66    path.push_str(TRACE_SPAN_TABLE_NAME);
67    base.set_path(&path);
68    Ok(base)
69}
70
71#[instrument(skip_all)]
72async fn create_table(
73    object_store: &ObjectStore,
74    table_url: Url,
75    schema: SchemaRef,
76) -> Result<DeltaTable, TraceEngineError> {
77    info!(
78        "Creating trace span table [{}://.../{} ]",
79        table_url.scheme(),
80        table_url
81            .path_segments()
82            .and_then(|mut s| s.next_back())
83            .unwrap_or(TRACE_SPAN_TABLE_NAME)
84    );
85
86    let store = object_store.as_dyn_object_store();
87    let table = DeltaTableBuilder::from_url(table_url.clone())?
88        .with_storage_backend(store, table_url)
89        .build()?;
90
91    let delta_fields = arrow_schema_to_delta(&schema);
92
93    table
94        .create()
95        .with_table_name(TRACE_SPAN_TABLE_NAME)
96        .with_columns(delta_fields)
97        .with_partition_columns(vec!["partition_date".to_string()])
98        .with_configuration_property(TableProperty::CheckpointInterval, Some("5"))
99        // Only collect min/max statistics for columns that benefit from data skipping.
100        .with_configuration_property(
101            TableProperty::DataSkippingStatsColumns,
102            Some("start_time,end_time,service_name,duration_ms,status_code,partition_date"),
103        )
104        .await
105        .map_err(Into::into)
106}
107
108#[instrument(skip_all)]
109async fn build_or_create_table(
110    object_store: &ObjectStore,
111    schema: SchemaRef,
112) -> Result<DeltaTable, TraceEngineError> {
113    register_cloud_logstore_factories();
114    let table_url = build_url(object_store).await?;
115    info!(
116        "Attempting to load trace span table [{}://.../{} ]",
117        table_url.scheme(),
118        table_url
119            .path_segments()
120            .and_then(|mut s| s.next_back())
121            .unwrap_or(TRACE_SPAN_TABLE_NAME)
122    );
123
124    // For all store types we check for an existing Delta table by attempting a load.
125    // Local tables can be checked cheaply via the filesystem; remote tables require
126    // an actual load attempt against the object store.
127    let is_delta_table = if table_url.scheme() == "file" {
128        if let Ok(path) = table_url.to_file_path() {
129            if !path.exists() {
130                info!("Creating directory for local table: {:?}", path);
131                std::fs::create_dir_all(&path)?;
132            }
133            path.join("_delta_log").exists()
134        } else {
135            false
136        }
137    } else {
138        let store = object_store.as_dyn_object_store();
139        match DeltaTableBuilder::from_url(table_url.clone()) {
140            Ok(builder) => builder
141                .with_storage_backend(store, table_url.clone())
142                .load()
143                .await
144                .is_ok(),
145            Err(_) => false,
146        }
147    };
148
149    if is_delta_table {
150        info!(
151            "Loaded existing trace span table [{}://.../{} ]",
152            table_url.scheme(),
153            table_url
154                .path_segments()
155                .and_then(|mut s| s.next_back())
156                .unwrap_or(TRACE_SPAN_TABLE_NAME)
157        );
158        let store = object_store.as_dyn_object_store();
159        let table = DeltaTableBuilder::from_url(table_url.clone())?
160            .with_storage_backend(store, table_url)
161            .load()
162            .await?;
163        Ok(table)
164    } else {
165        info!("Table does not exist, creating new table");
166        create_table(object_store, table_url, schema).await
167    }
168}
169
170/// Core trace span engine for high-throughput observability workloads.
171///
172/// Hierarchy fields (depth, span_order, path, root_span_id) are NOT stored — they are
173/// computed at query time via Rust DFS traversal. This matches how Jaeger/Zipkin operate and
174/// avoids ordering dependencies during ingest (spans may arrive out-of-order within a batch).
175pub struct TraceSpanDBEngine {
176    schema: Arc<Schema>,
177    pub object_store: ObjectStore,
178    table: Arc<AsyncRwLock<DeltaTable>>,
179    pub ctx: Arc<SessionContext>,
180    control: ControlTableEngine,
181}
182
183impl TraceSchemaExt for TraceSpanDBEngine {}
184
185impl TraceSpanDBEngine {
186    pub async fn new(storage_settings: &ObjectStorageSettings) -> Result<Self, TraceEngineError> {
187        let object_store = ObjectStore::new(storage_settings)?;
188        let schema = Arc::new(Self::create_schema());
189        let delta_table = build_or_create_table(&object_store, schema.clone()).await?;
190        let ctx = object_store.get_session()?;
191
192        // Register the match_attr UDF so DataFusion plans can use it for search_blob filtering.
193        // This must happen before any query is planned — UDFs live on the SessionContext.
194        ctx.register_udf(create_attr_match_udf());
195
196        // A freshly-created table has no committed Parquet files yet — table_provider()
197        // Defer registration until the first write populates the log.
198        if let Ok(provider) = delta_table.table_provider().await {
199            ctx.register_table(TRACE_SPAN_TABLE_NAME, provider)?;
200        } else {
201            info!("Empty table at init — deferring SessionContext registration until first write");
202        }
203        let control = ControlTableEngine::new(storage_settings, get_pod_id()).await?;
204
205        Ok(TraceSpanDBEngine {
206            schema,
207            object_store,
208            table: Arc::new(AsyncRwLock::new(delta_table)),
209            ctx: Arc::new(ctx),
210            control,
211        })
212    }
213
214    /// Build a RecordBatch from a vector of TraceSpanRecord (raw ingest type, no hierarchy).
215    pub fn build_batch(
216        &self,
217        spans: Vec<TraceSpanRecord>,
218    ) -> Result<RecordBatch, TraceEngineError> {
219        let start_time = std::time::Instant::now();
220        let mut builder = TraceSpanBatchBuilder::new(self.schema.clone());
221
222        for span in spans {
223            builder.append(&span)?;
224        }
225
226        let record_batch = builder
227            .finish()
228            .inspect_err(|e| error!("Failed to build RecordBatch: {}", e))?;
229
230        let duration = start_time.elapsed();
231        debug!(
232            "Built RecordBatch with {} rows in {:?}",
233            record_batch.num_rows(),
234            duration
235        );
236        Ok(record_batch)
237    }
238
239    /// Build the shared `WriterProperties` used for both ingest writes and Z-ORDER compaction.
240    fn build_writer_props() -> WriterProperties {
241        WriterProperties::builder()
242            // Row group size: creates ~4 groups per 128MB file so bloom + page stats
243            // prune within files, not just across files.
244            .set_max_row_group_size(32_768)
245            // Bloom filter on trace_id: skips ~99% of row groups for trace_id equality lookups.
246            .set_column_bloom_filter_enabled(ColumnPath::new(vec!["trace_id".to_string()]), true)
247            .set_column_bloom_filter_fpp(ColumnPath::new(vec!["trace_id".to_string()]), 0.01)
248            .set_column_bloom_filter_ndv(ColumnPath::new(vec!["trace_id".to_string()]), 32_768)
249            // service_name: low cardinality but hot lookup path — bloom skips row groups fast
250            .set_column_bloom_filter_enabled(
251                ColumnPath::new(vec!["service_name".to_string()]),
252                true,
253            )
254            .set_column_bloom_filter_fpp(ColumnPath::new(vec!["service_name".to_string()]), 0.01)
255            .set_column_bloom_filter_ndv(ColumnPath::new(vec!["service_name".to_string()]), 256)
256            // span_name: high cardinality equality queries (e.g. "grpc.unary/method")
257            .set_column_bloom_filter_enabled(ColumnPath::new(vec!["span_name".to_string()]), true)
258            .set_column_bloom_filter_fpp(ColumnPath::new(vec!["span_name".to_string()]), 0.01)
259            .set_column_bloom_filter_ndv(ColumnPath::new(vec!["span_name".to_string()]), 32_768)
260            // Page-level stats on start_time: finest-grained time pruning within row groups.
261            .set_column_statistics_enabled(
262                ColumnPath::new(vec!["start_time".to_string()]),
263                EnabledStatistics::Page,
264            )
265            // status_code: page-level min/max prunes pages for error-only queries.
266            // Do NOT use bloom filter: only 3 possible values (0/1/2), overhead > benefit.
267            .set_column_statistics_enabled(
268                ColumnPath::new(vec!["status_code".to_string()]),
269                EnabledStatistics::Page,
270            )
271            // Delta encoding on near-sorted integer columns: 4-8x compression on timestamps
272            // after Z-ORDER compaction; 2-4x on durations within a service.
273            .set_column_encoding(
274                ColumnPath::new(vec!["start_time".to_string()]),
275                Encoding::DELTA_BINARY_PACKED,
276            )
277            .set_column_encoding(
278                ColumnPath::new(vec!["duration_ms".to_string()]),
279                Encoding::DELTA_BINARY_PACKED,
280            )
281            // ZSTD level 3: ~40% better compression than SNAPPY on text columns;
282            // marginal decompression overhead is offset by reduced I/O.
283            .set_compression(Compression::ZSTD(ZstdLevel::try_new(3).unwrap()))
284            // Dictionary hint on span_name: high repetition similar to service_name.
285            .set_column_dictionary_enabled(ColumnPath::new(vec!["span_name".to_string()]), true)
286            .build()
287    }
288
289    /// Write spans to the Delta table (single-writer invariant via actor channel).
290    async fn write_spans(&self, spans: Vec<TraceSpanRecord>) -> Result<(), TraceEngineError> {
291        info!("Engine received write request for {} spans", spans.len());
292
293        let batch = self
294            .build_batch(spans)
295            .inspect_err(|e| error!("failed to build batch: {:?}", e))?;
296        info!("Built batch with {} rows", batch.num_rows());
297
298        let mut table_guard = self.table.write().await;
299        info!("Acquired table write lock");
300
301        // update_incremental is intentionally omitted here.
302        //
303        // This engine runs as a single-writer actor — no other process commits to this
304        // Delta table, so the in-memory state is always current. Calling update_incremental
305        // on a freshly-created empty table (version 0, no data files) causes the Delta
306        // Kernel to emit "Not a Delta table: No files in log segment", which mutates
307        // table_guard into a corrupted intermediate state before the error propagates.
308        // That corrupted clone then has no partition column metadata, producing unpartitioned
309        // flat Parquet files instead of partition_date=YYYY-MM-DD/ hive directories.
310
311        let current_table = table_guard.clone();
312
313        let updated_table = current_table
314            .write(vec![batch])
315            .with_save_mode(deltalake::protocol::SaveMode::Append)
316            .with_writer_properties(Self::build_writer_props())
317            // Always declare partition columns explicitly — do not rely solely on the
318            // in-memory snapshot, which can be stale after a failed update_incremental.
319            .with_partition_columns(vec!["partition_date".to_string()])
320            .await?;
321
322        info!("Successfully wrote batch to Delta Lake");
323
324        self.ctx.deregister_table(TRACE_SPAN_TABLE_NAME)?;
325        self.ctx
326            .register_table(TRACE_SPAN_TABLE_NAME, updated_table.table_provider().await?)?;
327
328        *table_guard = updated_table;
329
330        Ok(())
331    }
332
333    async fn optimize_table(&self) -> Result<(), TraceEngineError> {
334        let mut table_guard = self.table.write().await;
335
336        let current_table = table_guard.clone();
337
338        let (updated_table, _metrics) = current_table
339            .optimize()
340            .with_target_size(128 * 1024 * 1024)
341            .with_type(OptimizeType::ZOrder(vec![
342                "start_time".to_string(),
343                "service_name".to_string(),
344            ]))
345            // Bloom filters must be re-specified here — compaction rewrites all Parquet files
346            // from scratch using these properties. Without this, every compaction cycle
347            // silently discards all bloom filters on the rewritten files.
348            .with_writer_properties(Self::build_writer_props())
349            .await?;
350
351        self.ctx.deregister_table(TRACE_SPAN_TABLE_NAME)?;
352        self.ctx
353            .register_table(TRACE_SPAN_TABLE_NAME, updated_table.table_provider().await?)?;
354
355        *table_guard = updated_table;
356
357        Ok(())
358    }
359
360    async fn vacuum_table(&self, retention_hours: u64) -> Result<(), TraceEngineError> {
361        let mut table_guard = self.table.write().await;
362
363        let (updated_table, _metrics) = table_guard
364            .clone()
365            .vacuum()
366            .with_retention_period(chrono::Duration::hours(retention_hours as i64))
367            .with_enforce_retention_duration(false)
368            .await?;
369
370        self.ctx.deregister_table(TRACE_SPAN_TABLE_NAME)?;
371        self.ctx
372            .register_table(TRACE_SPAN_TABLE_NAME, updated_table.table_provider().await?)?;
373
374        *table_guard = updated_table;
375
376        Ok(())
377    }
378
379    /// Delete all rows with `partition_date` older than `cutoff_date`.
380    ///
381    /// This is a logical delete — it writes a new Delta log entry marking the rows as removed.
382    /// Physical disk space is not reclaimed until `vacuum_table()` runs afterwards.
383    async fn expire_table(&self, cutoff_date: chrono::NaiveDate) -> Result<(), TraceEngineError> {
384        let mut table_guard = self.table.write().await;
385
386        // CAST('YYYY-MM-DD' AS DATE) produces a Date32 that matches the partition column type,
387        // which allows Delta Lake to translate this into a partition directory filter.
388        let predicate = format!(
389            "partition_date < CAST('{}' AS DATE)",
390            cutoff_date.format("%Y-%m-%d")
391        );
392
393        let (updated_table, metrics) = table_guard
394            .clone()
395            .delete()
396            .with_predicate(predicate)
397            .await?;
398
399        info!(
400            "Expired {} rows older than {}",
401            metrics.num_deleted_rows, cutoff_date
402        );
403
404        self.ctx.deregister_table(TRACE_SPAN_TABLE_NAME)?;
405        self.ctx
406            .register_table(TRACE_SPAN_TABLE_NAME, updated_table.table_provider().await?)?;
407
408        *table_guard = updated_table;
409
410        Ok(())
411    }
412
413    /// Try to claim and run the optimize task via the control table.
414    ///
415    /// The control table's OCC ensures only one pod runs this at a time across
416    async fn try_run_optimize(&self, interval_hours: u64) {
417        match self.control.try_claim_task(TASK_OPTIMIZE).await {
418            Ok(true) => match self.optimize_table().await {
419                Ok(()) => {
420                    // Vacuum tombstoned files left behind by compaction.
421                    // retention_hours=0 is safe here because the single-writer invariant
422                    // guarantees no concurrent reader is using an older table version.
423                    if let Err(e) = self.vacuum_table(0).await {
424                        error!("Post-optimize vacuum failed: {}", e);
425                    }
426                    let _ = self
427                        .control
428                        .release_task(
429                            TASK_OPTIMIZE,
430                            chrono::Duration::hours(interval_hours as i64),
431                        )
432                        .await;
433                }
434                Err(e) => {
435                    error!("Optimize failed: {}", e);
436                    let _ = self.control.release_task_on_failure(TASK_OPTIMIZE).await;
437                }
438            },
439            Ok(false) => { /* not due or another pod owns it */ }
440            Err(e) => error!("Optimize claim check failed: {}", e),
441        }
442    }
443
444    /// Try to claim and run the retention task via the control table.
445    async fn try_run_retention(&self, retention_days: u32) {
446        match self.control.try_claim_task(TASK_RETENTION).await {
447            Ok(true) => {
448                let cutoff =
449                    (Utc::now() - chrono::Duration::days(retention_days as i64)).date_naive();
450                match self.expire_table(cutoff).await {
451                    Ok(()) => {
452                        // Reclaim disk space after logical delete
453                        let _ = self.vacuum_table(0).await;
454                        let _ = self
455                            .control
456                            .release_task(TASK_RETENTION, chrono::Duration::hours(24))
457                            .await;
458                    }
459                    Err(e) => {
460                        error!("Retention failed: {}", e);
461                        let _ = self.control.release_task_on_failure(TASK_RETENTION).await;
462                    }
463                }
464            }
465            Ok(false) => {}
466            Err(e) => error!("Retention claim check failed: {}", e),
467        }
468    }
469
470    #[instrument(skip_all, name = "trace_engine_actor")]
471    pub fn start_actor(
472        self,
473        compaction_interval_hours: u64,
474        retention_days: Option<u32>,
475    ) -> (mpsc::Sender<TableCommand>, tokio::task::JoinHandle<()>) {
476        let (tx, mut rx) = mpsc::channel::<TableCommand>(100);
477
478        let handle = tokio::spawn(async move {
479            // Poll every 5 minutes — the actual schedule is persisted in the
480            // control table's `next_run_at` and survives pod restarts.
481            let mut scheduler_ticker = interval(Duration::from_secs(5 * 60));
482            scheduler_ticker.tick().await; // skip immediate tick
483
484            loop {
485                tokio::select! {
486                    Some(cmd) = rx.recv() => {
487                        match cmd {
488                            TableCommand::Write { spans, respond_to } => {
489                                match self.write_spans(spans).await {
490                                    Ok(_) => { let _ = respond_to.send(Ok(())); }
491                                    Err(e) => {
492                                        tracing::error!("Write failed: {}", e);
493                                        let _ = respond_to.send(Err(e));
494                                    }
495                                }
496                            }
497                            TableCommand::Optimize { respond_to } => {
498                                let _ = respond_to.send(self.optimize_table().await);
499                                if let Err(e) = self.vacuum_table(0).await {
500                                    error!("Post-optimize vacuum failed: {}", e);
501                                }
502                            }
503                            TableCommand::Vacuum { retention_hours, respond_to } => {
504                                let _ = respond_to.send(self.vacuum_table(retention_hours).await);
505                            }
506                            TableCommand::Expire { cutoff_date, respond_to } => {
507                                let _ = respond_to.send(self.expire_table(cutoff_date).await);
508                            }
509                            TableCommand::Shutdown => {
510                                tracing::info!("Shutting down table engine");
511                                break;
512                            }
513                        }
514                    }
515                    _ = scheduler_ticker.tick() => {
516                        self.try_run_optimize(compaction_interval_hours).await;
517                        if let Some(days) = retention_days {
518                            self.try_run_retention(days).await;
519                        }
520                    }
521                }
522            }
523        });
524
525        (tx, handle)
526    }
527}
528
529/// Efficient builder for converting `TraceSpanRecord` (ingest type) into Arrow `RecordBatch`.
530///
531/// Hierarchy fields (depth, span_order, path, root_span_id) are NOT included — they are
532/// computed at query time from the flat span data stored here.
533pub struct TraceSpanBatchBuilder {
534    schema: SchemaRef,
535
536    // ID builders
537    trace_id: FixedSizeBinaryBuilder,
538    span_id: FixedSizeBinaryBuilder,
539    parent_span_id: FixedSizeBinaryBuilder,
540
541    // W3C Trace Context
542    flags: Int32Builder,
543    trace_state: StringBuilder,
544
545    // Instrumentation scope
546    scope_name: StringBuilder,
547    scope_version: StringBuilder,
548
549    // Metadata builders
550    service_name: StringDictionaryBuilder<Int32Type>,
551    span_name: StringBuilder,
552    span_kind: StringDictionaryBuilder<Int8Type>,
553
554    // Time builders
555    start_time: TimestampMicrosecondBuilder,
556    end_time: TimestampMicrosecondBuilder,
557    duration_ms: Int64Builder,
558
559    // Status builders
560    status_code: Int32Builder,
561    status_message: StringBuilder,
562
563    // Scouter-specific
564    label: StringBuilder,
565
566    // Attribute builders
567    attributes: MapBuilder<StringBuilder, StringViewBuilder>,
568    resource_attributes: MapBuilder<StringBuilder, StringViewBuilder>,
569
570    // Nested structure builders
571    events: ListBuilder<StructBuilder>,
572    links: ListBuilder<StructBuilder>,
573
574    // Payload builders
575    input: StringViewBuilder,
576    output: StringViewBuilder,
577
578    // Search optimizer
579    search_blob: StringViewBuilder,
580
581    // Partition key (days since Unix epoch)
582    partition_date: Date32Builder,
583}
584
585impl TraceSpanBatchBuilder {
586    pub fn new(schema: SchemaRef) -> Self {
587        let trace_id = FixedSizeBinaryBuilder::new(16);
588        let span_id = FixedSizeBinaryBuilder::new(8);
589        let parent_span_id = FixedSizeBinaryBuilder::new(8);
590
591        let flags = Int32Builder::new();
592        let trace_state = StringBuilder::new();
593
594        let scope_name = StringBuilder::new();
595        let scope_version = StringBuilder::new();
596
597        let service_name = StringDictionaryBuilder::<Int32Type>::new();
598        let span_name = StringBuilder::new();
599        let span_kind = StringDictionaryBuilder::<Int8Type>::new();
600
601        let start_time = TimestampMicrosecondBuilder::new().with_timezone("UTC");
602        let end_time = TimestampMicrosecondBuilder::new().with_timezone("UTC");
603        let duration_ms = Int64Builder::new();
604
605        let status_code = Int32Builder::new();
606        let status_message = StringBuilder::new();
607
608        let label = StringBuilder::new();
609
610        let map_field_name = MapFieldNames {
611            entry: "key_value".to_string(),
612            key: "key".to_string(),
613            value: "value".to_string(),
614        };
615        let attributes = MapBuilder::new(
616            Some(map_field_name.clone()),
617            StringBuilder::new(),
618            StringViewBuilder::new(),
619        );
620        let resource_attributes = MapBuilder::new(
621            Some(map_field_name.clone()),
622            StringBuilder::new(),
623            StringViewBuilder::new(),
624        );
625
626        let event_fields = vec![
627            Field::new("name", DataType::Utf8, false),
628            Field::new(
629                "timestamp",
630                DataType::Timestamp(TimeUnit::Microsecond, Some("UTC".into())),
631                false,
632            ),
633            attribute_field(),
634            Field::new("dropped_attributes_count", DataType::UInt32, false),
635        ];
636
637        let event_struct_builders = vec![
638            Box::new(StringBuilder::new()) as Box<dyn ArrayBuilder>,
639            Box::new(TimestampMicrosecondBuilder::new().with_timezone("UTC"))
640                as Box<dyn ArrayBuilder>,
641            Box::new(MapBuilder::new(
642                Some(map_field_name.clone()),
643                StringBuilder::new(),
644                StringViewBuilder::new(),
645            )) as Box<dyn ArrayBuilder>,
646            Box::new(UInt32Builder::new()) as Box<dyn ArrayBuilder>,
647        ];
648
649        let event_struct_builder = StructBuilder::new(event_fields, event_struct_builders);
650        let events = ListBuilder::new(event_struct_builder);
651
652        let link_fields = vec![
653            Field::new("trace_id", DataType::FixedSizeBinary(16), false),
654            Field::new("span_id", DataType::FixedSizeBinary(8), false),
655            Field::new("trace_state", DataType::Utf8, false),
656            attribute_field(),
657            Field::new("dropped_attributes_count", DataType::UInt32, false),
658        ];
659
660        let link_struct_builders = vec![
661            Box::new(FixedSizeBinaryBuilder::new(16)) as Box<dyn ArrayBuilder>,
662            Box::new(FixedSizeBinaryBuilder::new(8)) as Box<dyn ArrayBuilder>,
663            Box::new(StringBuilder::new()) as Box<dyn ArrayBuilder>,
664            Box::new(MapBuilder::new(
665                Some(map_field_name.clone()),
666                StringBuilder::new(),
667                StringViewBuilder::new(),
668            )) as Box<dyn ArrayBuilder>,
669            Box::new(UInt32Builder::new()) as Box<dyn ArrayBuilder>,
670        ];
671
672        let link_struct_builder = StructBuilder::new(link_fields, link_struct_builders);
673        let links = ListBuilder::new(link_struct_builder);
674
675        let input = StringViewBuilder::new();
676        let output = StringViewBuilder::new();
677        let search_blob = StringViewBuilder::new();
678        let partition_date = Date32Builder::new();
679
680        Self {
681            schema,
682            trace_id,
683            span_id,
684            parent_span_id,
685            flags,
686            trace_state,
687            scope_name,
688            scope_version,
689            service_name,
690            span_name,
691            span_kind,
692            start_time,
693            end_time,
694            duration_ms,
695            status_code,
696            status_message,
697            label,
698            attributes,
699            resource_attributes,
700            events,
701            links,
702            input,
703            output,
704            search_blob,
705            partition_date,
706        }
707    }
708
709    /// Append a single `TraceSpanRecord` to the batch.
710    pub fn append(&mut self, span: &TraceSpanRecord) -> Result<(), TraceEngineError> {
711        // IDs
712        let trace_bytes = span.trace_id.as_bytes();
713        self.trace_id
714            .append_value(trace_bytes)
715            .map_err(TraceEngineError::ArrowError)?;
716
717        let span_bytes = span.span_id.as_bytes();
718        self.span_id
719            .append_value(span_bytes)
720            .map_err(TraceEngineError::ArrowError)?;
721
722        match &span.parent_span_id {
723            Some(pid) => {
724                self.parent_span_id
725                    .append_value(pid.as_bytes())
726                    .map_err(TraceEngineError::ArrowError)?;
727            }
728            None => self.parent_span_id.append_null(),
729        }
730
731        // W3C Trace Context
732        self.flags.append_value(span.flags);
733        self.trace_state.append_value(&span.trace_state);
734
735        // Instrumentation scope
736        self.scope_name.append_value(&span.scope_name);
737        match &span.scope_version {
738            Some(v) => self.scope_version.append_value(v),
739            None => self.scope_version.append_null(),
740        }
741
742        // Metadata
743        self.service_name.append_value(&span.service_name);
744        self.span_name.append_value(&span.span_name);
745        // span_kind is a non-empty string in TraceSpanRecord — store as non-null
746        if span.span_kind.is_empty() {
747            self.span_kind.append_null();
748        } else {
749            self.span_kind.append_value(&span.span_kind);
750        }
751
752        // Timestamps
753        self.start_time
754            .append_value(span.start_time.timestamp_micros());
755        self.end_time.append_value(span.end_time.timestamp_micros());
756        self.duration_ms.append_value(span.duration_ms);
757
758        // Status
759        self.status_code.append_value(span.status_code);
760        if span.status_message.is_empty() {
761            self.status_message.append_null();
762        } else {
763            self.status_message.append_value(&span.status_message);
764        }
765
766        // Scouter-specific
767        match &span.label {
768            Some(l) => self.label.append_value(l),
769            None => self.label.append_null(),
770        }
771
772        // Attributes
773        self.append_attributes(&span.attributes).inspect_err(|e| {
774            error!(
775                "Failed to append attributes for span {}: {}",
776                span.span_id, e
777            )
778        })?;
779
780        // Resource attributes
781        self.append_resource_attributes(&span.resource_attributes)
782            .inspect_err(|e| {
783                error!(
784                    "Failed to append resource_attributes for span {}: {}",
785                    span.span_id, e
786                )
787            })?;
788
789        // Events
790        self.append_events(&span.events)
791            .inspect_err(|e| error!("Failed to append events for span {}: {}", span.span_id, e))?;
792
793        // Links
794        self.append_links(&span.links)
795            .inspect_err(|e| error!("Failed to append links for span {}: {}", span.span_id, e))?;
796
797        // Payloads
798        self.input.append_value(
799            serde_json::to_string(&span.input).unwrap_or_else(|_| "null".to_string()),
800        );
801
802        self.output.append_value(
803            serde_json::to_string(&span.output).unwrap_or_else(|_| "null".to_string()),
804        );
805
806        // Search blob
807        let search_text = Self::build_search_blob(span);
808        self.search_blob.append_value(search_text);
809
810        // Partition key — days since Unix epoch, derived from span start date
811        let days = span.start_time.date_naive().num_days_from_ce() - UNIX_EPOCH_DAYS;
812        self.partition_date.append_value(days);
813
814        Ok(())
815    }
816
817    fn append_attributes(&mut self, attributes: &[Attribute]) -> Result<(), TraceEngineError> {
818        for attr in attributes {
819            self.attributes.keys().append_value(&attr.key);
820            let value_str =
821                serde_json::to_string(&attr.value).unwrap_or_else(|_| "null".to_string());
822            self.attributes.values().append_value(value_str);
823        }
824        self.attributes.append(true)?;
825        Ok(())
826    }
827
828    fn append_resource_attributes(
829        &mut self,
830        attributes: &[Attribute],
831    ) -> Result<(), TraceEngineError> {
832        if attributes.is_empty() {
833            self.resource_attributes.append(false)?; // null map
834        } else {
835            for attr in attributes {
836                self.resource_attributes.keys().append_value(&attr.key);
837                let value_str =
838                    serde_json::to_string(&attr.value).unwrap_or_else(|_| "null".to_string());
839                self.resource_attributes.values().append_value(value_str);
840            }
841            self.resource_attributes.append(true)?;
842        }
843        Ok(())
844    }
845
846    fn append_events(&mut self, events: &[SpanEvent]) -> Result<(), TraceEngineError> {
847        let event_struct = self.events.values();
848        for event in events {
849            let name_builder = event_struct
850                .field_builder::<StringBuilder>(0)
851                .ok_or_else(|| TraceEngineError::DowncastError("event name builder"))?;
852            name_builder.append_value(&event.name);
853
854            let time_builder = event_struct
855                .field_builder::<TimestampMicrosecondBuilder>(1)
856                .ok_or_else(|| TraceEngineError::DowncastError("event timestamp builder"))?;
857            time_builder.append_value(event.timestamp.timestamp_micros());
858
859            let attr_builder = event_struct
860                .field_builder::<MapBuilder<StringBuilder, StringViewBuilder>>(2)
861                .ok_or_else(|| TraceEngineError::DowncastError("event attributes builder"))?;
862
863            for attr in &event.attributes {
864                attr_builder.keys().append_value(&attr.key);
865                let value_str =
866                    serde_json::to_string(&attr.value).unwrap_or_else(|_| "null".to_string());
867                attr_builder.values().append_value(value_str);
868            }
869            attr_builder.append(true)?;
870
871            let dropped_builder =
872                event_struct
873                    .field_builder::<UInt32Builder>(3)
874                    .ok_or_else(|| {
875                        TraceEngineError::DowncastError("dropped attributes count builder")
876                    })?;
877            dropped_builder.append_value(event.dropped_attributes_count);
878
879            event_struct.append(true);
880        }
881
882        self.events.append(true);
883        Ok(())
884    }
885
886    fn append_links(&mut self, links: &[SpanLink]) -> Result<(), TraceEngineError> {
887        let link_struct = self.links.values();
888
889        for link in links {
890            let trace_builder = link_struct
891                .field_builder::<FixedSizeBinaryBuilder>(0)
892                .ok_or_else(|| TraceEngineError::DowncastError("link trace_id builder"))?;
893
894            let trace_bytes = TraceId::hex_to_bytes(&link.trace_id).map_err(|e| {
895                TraceEngineError::InvalidHexId(link.trace_id.clone(), e.to_string())
896            })?;
897            trace_builder.append_value(&trace_bytes)?;
898
899            let span_builder = link_struct
900                .field_builder::<FixedSizeBinaryBuilder>(1)
901                .ok_or_else(|| TraceEngineError::DowncastError("link span_id builder"))?;
902
903            let span_bytes = SpanId::hex_to_bytes(&link.span_id)
904                .map_err(|e| TraceEngineError::InvalidHexId(link.span_id.clone(), e.to_string()))?;
905            span_builder.append_value(&span_bytes)?;
906
907            let state_builder = link_struct
908                .field_builder::<StringBuilder>(2)
909                .ok_or_else(|| TraceEngineError::DowncastError("link trace_state builder"))?;
910            state_builder.append_value(&link.trace_state);
911
912            let attr_builder = link_struct
913                .field_builder::<MapBuilder<StringBuilder, StringViewBuilder>>(3)
914                .ok_or_else(|| TraceEngineError::DowncastError("link attributes builder"))?;
915
916            for attr in &link.attributes {
917                attr_builder.keys().append_value(&attr.key);
918                let value_str =
919                    serde_json::to_string(&attr.value).unwrap_or_else(|_| "null".to_string());
920                attr_builder.values().append_value(value_str);
921            }
922            attr_builder.append(true)?;
923
924            let dropped_builder =
925                link_struct
926                    .field_builder::<UInt32Builder>(4)
927                    .ok_or_else(|| {
928                        TraceEngineError::DowncastError("link dropped attributes count builder")
929                    })?;
930            dropped_builder.append_value(link.dropped_attributes_count);
931
932            link_struct.append(true);
933        }
934
935        self.links.append(true);
936        Ok(())
937    }
938
939    /// Build a concatenated search string from `TraceSpanRecord` for full-text queries.
940    ///
941    /// Uses pipe-bounded tokens (`|key=value|`) to prevent false-positive substring matches
942    /// where a value contains something that looks like a different attribute key or value.
943    /// Queries use `%key=value%` patterns which match both old `key:value` archive data
944    /// and the new `|key=value|` format.
945    fn build_search_blob(span: &TraceSpanRecord) -> String {
946        let mut search = String::with_capacity(512);
947
948        // Pipe-bounded bare tokens for full-text (service, span, scope)
949        search.push('|');
950        search.push_str(&span.service_name);
951        search.push_str("| |");
952        search.push_str(&span.span_name);
953        search.push_str("| |");
954        search.push_str(&span.scope_name);
955        search.push('|');
956
957        if !span.status_message.is_empty() {
958            search.push_str(" |");
959            search.push_str(&span.status_message);
960            search.push('|');
961        }
962
963        // Pipe-bounded key=value tokens — standardize on `=` separator
964        for attr in &span.attributes {
965            search.push_str(" |");
966            search.push_str(&attr.key);
967            search.push('=');
968            match &attr.value {
969                Value::String(s) => search.push_str(s),
970                Value::Number(n) => search.push_str(&n.to_string()),
971                Value::Bool(b) => search.push_str(&b.to_string()),
972                Value::Null => {}
973                other => search.push_str(&other.to_string()),
974            }
975            search.push('|');
976        }
977
978        for event in &span.events {
979            search.push_str(" |");
980            search.push_str(&event.name);
981            search.push('|');
982        }
983
984        search
985    }
986
987    /// Finalize and build the RecordBatch. Column order must match `create_schema()`.
988    pub fn finish(mut self) -> Result<RecordBatch, TraceEngineError> {
989        let batch = RecordBatch::try_new(
990            self.schema.clone(),
991            vec![
992                Arc::new(self.trace_id.finish()),
993                Arc::new(self.span_id.finish()),
994                Arc::new(self.parent_span_id.finish()),
995                Arc::new(self.flags.finish()),
996                Arc::new(self.trace_state.finish()),
997                Arc::new(self.scope_name.finish()),
998                Arc::new(self.scope_version.finish()),
999                Arc::new(self.service_name.finish()),
1000                Arc::new(self.span_name.finish()),
1001                Arc::new(self.span_kind.finish()),
1002                Arc::new(self.start_time.finish()),
1003                Arc::new(self.end_time.finish()),
1004                Arc::new(self.duration_ms.finish()),
1005                Arc::new(self.status_code.finish()),
1006                Arc::new(self.status_message.finish()),
1007                Arc::new(self.label.finish()),
1008                Arc::new(self.attributes.finish()),
1009                Arc::new(self.resource_attributes.finish()),
1010                Arc::new(self.events.finish()),
1011                Arc::new(self.links.finish()),
1012                Arc::new(self.input.finish()),
1013                Arc::new(self.output.finish()),
1014                Arc::new(self.search_blob.finish()),
1015                Arc::new(self.partition_date.finish()),
1016            ],
1017        )?;
1018
1019        Ok(batch)
1020    }
1021}