Skip to main content

scouter_dataframe/parquet/tracing/
engine.rs

1use crate::error::TraceEngineError;
2use crate::parquet::control::{get_pod_id, ControlTableEngine};
3use crate::parquet::tracing::traits::arrow_schema_to_delta;
4use crate::parquet::tracing::traits::attribute_field;
5use crate::parquet::tracing::traits::TraceSchemaExt;
6use crate::parquet::utils::{create_attr_match_udf, register_cloud_logstore_factories};
7use crate::storage::ObjectStore;
8use arrow::array::*;
9use arrow::datatypes::*;
10use arrow_array::RecordBatch;
11use chrono::{Datelike, Utc};
12use datafusion::prelude::SessionContext;
13use deltalake::datafusion::parquet::basic::{Compression, Encoding, ZstdLevel};
14use deltalake::datafusion::parquet::file::properties::{EnabledStatistics, WriterProperties};
15use deltalake::datafusion::parquet::schema::types::ColumnPath;
16use deltalake::operations::optimize::OptimizeType;
17use deltalake::{DeltaTable, DeltaTableBuilder, TableProperty};
18use scouter_settings::ObjectStorageSettings;
19use scouter_types::SpanId;
20use scouter_types::TraceId;
21use scouter_types::TraceSpanRecord;
22use scouter_types::{Attribute, SpanEvent, SpanLink};
23use serde_json::Value;
24use std::sync::Arc;
25use tokio::sync::oneshot;
26use tokio::sync::{mpsc, RwLock as AsyncRwLock};
27use tokio::time::{interval, Duration};
28use tracing::{debug, error, info, instrument};
29use url::Url;
30
31const TRACE_SPAN_TABLE_NAME: &str = "trace_spans";
32
33/// Control table task names for distributed coordination.
34const TASK_OPTIMIZE: &str = "trace_optimize";
35const TASK_RETENTION: &str = "trace_retention";
36
37/// Days from year-0001 to Unix epoch (1970-01-01), used to convert chrono → Arrow Date32.
38/// Equivalent to `NaiveDate::from_ymd_opt(1970, 1, 1).unwrap().num_days_from_ce()`.
39const UNIX_EPOCH_DAYS: i32 = 719_163;
40
41pub enum TableCommand {
42    Write {
43        spans: Vec<TraceSpanRecord>,
44        respond_to: oneshot::Sender<Result<(), TraceEngineError>>,
45    },
46    Optimize {
47        respond_to: oneshot::Sender<Result<(), TraceEngineError>>,
48    },
49    Vacuum {
50        retention_hours: u64,
51        respond_to: oneshot::Sender<Result<(), TraceEngineError>>,
52    },
53    Expire {
54        cutoff_date: chrono::NaiveDate,
55        respond_to: oneshot::Sender<Result<(), TraceEngineError>>,
56    },
57    Shutdown,
58}
59
60async fn build_url(object_store: &ObjectStore) -> Result<Url, TraceEngineError> {
61    let mut base = object_store.get_base_url()?;
62    let mut path = base.path().to_string();
63    if !path.ends_with('/') {
64        path.push('/');
65    }
66    path.push_str(TRACE_SPAN_TABLE_NAME);
67    base.set_path(&path);
68    Ok(base)
69}
70
71#[instrument(skip_all)]
72async fn create_table(
73    object_store: &ObjectStore,
74    table_url: Url,
75    schema: SchemaRef,
76) -> Result<DeltaTable, TraceEngineError> {
77    info!(
78        "Creating trace span table [{}://.../{} ]",
79        table_url.scheme(),
80        table_url
81            .path_segments()
82            .and_then(|mut s| s.next_back())
83            .unwrap_or(TRACE_SPAN_TABLE_NAME)
84    );
85
86    let store = object_store.as_dyn_object_store();
87    let table = DeltaTableBuilder::from_url(table_url.clone())?
88        .with_storage_backend(store, table_url)
89        .build()?;
90
91    let delta_fields = arrow_schema_to_delta(&schema);
92
93    table
94        .create()
95        .with_table_name(TRACE_SPAN_TABLE_NAME)
96        .with_columns(delta_fields)
97        .with_partition_columns(vec!["partition_date".to_string()])
98        .with_configuration_property(TableProperty::CheckpointInterval, Some("5"))
99        // Only collect min/max statistics for columns that benefit from data skipping.
100        .with_configuration_property(
101            TableProperty::DataSkippingStatsColumns,
102            Some("start_time,end_time,service_name,duration_ms,status_code,partition_date"),
103        )
104        .await
105        .map_err(Into::into)
106}
107
108#[instrument(skip_all)]
109async fn build_or_create_table(
110    object_store: &ObjectStore,
111    schema: SchemaRef,
112) -> Result<DeltaTable, TraceEngineError> {
113    register_cloud_logstore_factories();
114    let table_url = build_url(object_store).await?;
115    info!(
116        "Attempting to load trace span table [{}://.../{} ]",
117        table_url.scheme(),
118        table_url
119            .path_segments()
120            .and_then(|mut s| s.next_back())
121            .unwrap_or(TRACE_SPAN_TABLE_NAME)
122    );
123
124    // For all store types we check for an existing Delta table by attempting a load.
125    // Local tables can be checked cheaply via the filesystem; remote tables require
126    // an actual load attempt against the object store.
127    let is_delta_table = if table_url.scheme() == "file" {
128        if let Ok(path) = table_url.to_file_path() {
129            if !path.exists() {
130                info!("Creating directory for local table: {:?}", path);
131                std::fs::create_dir_all(&path)?;
132            }
133            path.join("_delta_log").exists()
134        } else {
135            false
136        }
137    } else {
138        let store = object_store.as_dyn_object_store();
139        match DeltaTableBuilder::from_url(table_url.clone()) {
140            Ok(builder) => builder
141                .with_storage_backend(store, table_url.clone())
142                .load()
143                .await
144                .is_ok(),
145            Err(_) => false,
146        }
147    };
148
149    if is_delta_table {
150        info!(
151            "Loaded existing trace span table [{}://.../{} ]",
152            table_url.scheme(),
153            table_url
154                .path_segments()
155                .and_then(|mut s| s.next_back())
156                .unwrap_or(TRACE_SPAN_TABLE_NAME)
157        );
158        let store = object_store.as_dyn_object_store();
159        let table = DeltaTableBuilder::from_url(table_url.clone())?
160            .with_storage_backend(store, table_url)
161            .load()
162            .await?;
163        Ok(table)
164    } else {
165        info!("Table does not exist, creating new table");
166        create_table(object_store, table_url, schema).await
167    }
168}
169
170/// Core trace span engine for high-throughput observability workloads.
171///
172/// Hierarchy fields (depth, span_order, path, root_span_id) are NOT stored — they are
173/// computed at query time via Rust DFS traversal. This matches how Jaeger/Zipkin operate and
174/// avoids ordering dependencies during ingest (spans may arrive out-of-order within a batch).
175pub struct TraceSpanDBEngine {
176    schema: Arc<Schema>,
177    pub object_store: ObjectStore,
178    table: Arc<AsyncRwLock<DeltaTable>>,
179    pub ctx: Arc<SessionContext>,
180    control: ControlTableEngine,
181}
182
183impl TraceSchemaExt for TraceSpanDBEngine {}
184
185impl TraceSpanDBEngine {
186    pub async fn new(storage_settings: &ObjectStorageSettings) -> Result<Self, TraceEngineError> {
187        let object_store = ObjectStore::new(storage_settings)?;
188        let schema = Arc::new(Self::create_schema());
189        let delta_table = build_or_create_table(&object_store, schema.clone()).await?;
190        let ctx = object_store.get_session()?;
191
192        // Register the match_attr UDF so DataFusion plans can use it for search_blob filtering.
193        // This must happen before any query is planned — UDFs live on the SessionContext.
194        ctx.register_udf(create_attr_match_udf());
195
196        // A freshly-created table has no committed Parquet files yet — table_provider()
197        // Defer registration until the first write populates the log.
198        if let Ok(provider) = delta_table.table_provider().await {
199            ctx.register_table(TRACE_SPAN_TABLE_NAME, provider)?;
200        } else {
201            info!("Empty table at init — deferring SessionContext registration until first write");
202        }
203        let control = ControlTableEngine::new(storage_settings, get_pod_id()).await?;
204
205        Ok(TraceSpanDBEngine {
206            schema,
207            object_store,
208            table: Arc::new(AsyncRwLock::new(delta_table)),
209            ctx: Arc::new(ctx),
210            control,
211        })
212    }
213
214    /// Build a RecordBatch from a vector of TraceSpanRecord (raw ingest type, no hierarchy).
215    pub fn build_batch(
216        &self,
217        spans: Vec<TraceSpanRecord>,
218    ) -> Result<RecordBatch, TraceEngineError> {
219        let start_time = std::time::Instant::now();
220        let mut builder = TraceSpanBatchBuilder::new(self.schema.clone());
221
222        for span in spans {
223            builder.append(&span)?;
224        }
225
226        let record_batch = builder
227            .finish()
228            .inspect_err(|e| error!("Failed to build RecordBatch: {}", e))?;
229
230        let duration = start_time.elapsed();
231        debug!(
232            "Built RecordBatch with {} rows in {:?}",
233            record_batch.num_rows(),
234            duration
235        );
236        Ok(record_batch)
237    }
238
239    /// Build the shared `WriterProperties` used for both ingest writes and Z-ORDER compaction.
240    fn build_writer_props() -> WriterProperties {
241        WriterProperties::builder()
242            // Row group size: creates ~4 groups per 128MB file so bloom + page stats
243            // prune within files, not just across files.
244            .set_max_row_group_size(32_768)
245            // Bloom filter on trace_id: skips ~99% of row groups for trace_id equality lookups.
246            .set_column_bloom_filter_enabled(ColumnPath::new(vec!["trace_id".to_string()]), true)
247            .set_column_bloom_filter_fpp(ColumnPath::new(vec!["trace_id".to_string()]), 0.01)
248            .set_column_bloom_filter_ndv(ColumnPath::new(vec!["trace_id".to_string()]), 32_768)
249            // service_name: low cardinality but hot lookup path — bloom skips row groups fast
250            .set_column_bloom_filter_enabled(
251                ColumnPath::new(vec!["service_name".to_string()]),
252                true,
253            )
254            .set_column_bloom_filter_fpp(ColumnPath::new(vec!["service_name".to_string()]), 0.01)
255            .set_column_bloom_filter_ndv(ColumnPath::new(vec!["service_name".to_string()]), 256)
256            // span_name: high cardinality equality queries (e.g. "grpc.unary/method")
257            .set_column_bloom_filter_enabled(ColumnPath::new(vec!["span_name".to_string()]), true)
258            .set_column_bloom_filter_fpp(ColumnPath::new(vec!["span_name".to_string()]), 0.01)
259            .set_column_bloom_filter_ndv(ColumnPath::new(vec!["span_name".to_string()]), 32_768)
260            // Page-level stats on start_time: finest-grained time pruning within row groups.
261            .set_column_statistics_enabled(
262                ColumnPath::new(vec!["start_time".to_string()]),
263                EnabledStatistics::Page,
264            )
265            // status_code: page-level min/max prunes pages for error-only queries.
266            // Do NOT use bloom filter: only 3 possible values (0/1/2), overhead > benefit.
267            .set_column_statistics_enabled(
268                ColumnPath::new(vec!["status_code".to_string()]),
269                EnabledStatistics::Page,
270            )
271            // Delta encoding on near-sorted integer columns: 4-8x compression on timestamps
272            // after Z-ORDER compaction; 2-4x on durations within a service.
273            .set_column_encoding(
274                ColumnPath::new(vec!["start_time".to_string()]),
275                Encoding::DELTA_BINARY_PACKED,
276            )
277            .set_column_encoding(
278                ColumnPath::new(vec!["duration_ms".to_string()]),
279                Encoding::DELTA_BINARY_PACKED,
280            )
281            // ZSTD level 3: ~40% better compression than SNAPPY on text columns;
282            // marginal decompression overhead is offset by reduced I/O.
283            .set_compression(Compression::ZSTD(ZstdLevel::try_new(3).unwrap()))
284            // Dictionary hint on span_name: high repetition similar to service_name.
285            .set_column_dictionary_enabled(ColumnPath::new(vec!["span_name".to_string()]), true)
286            .build()
287    }
288
289    /// Write spans to the Delta table (single-writer invariant via actor channel).
290    async fn write_spans(&self, spans: Vec<TraceSpanRecord>) -> Result<(), TraceEngineError> {
291        info!("Engine received write request for {} spans", spans.len());
292
293        let batch = self
294            .build_batch(spans)
295            .inspect_err(|e| error!("failed to build batch: {:?}", e))?;
296        info!("Built batch with {} rows", batch.num_rows());
297
298        let mut table_guard = self.table.write().await;
299        info!("Acquired table write lock");
300
301        // update_incremental is intentionally omitted here.
302        //
303        // This engine runs as a single-writer actor — no other process commits to this
304        // Delta table, so the in-memory state is always current. Calling update_incremental
305        // on a freshly-created empty table (version 0, no data files) causes the Delta
306        // Kernel to emit "Not a Delta table: No files in log segment", which mutates
307        // table_guard into a corrupted intermediate state before the error propagates.
308        // That corrupted clone then has no partition column metadata, producing unpartitioned
309        // flat Parquet files instead of partition_date=YYYY-MM-DD/ hive directories.
310
311        let current_table = table_guard.clone();
312
313        let updated_table = current_table
314            .write(vec![batch])
315            .with_save_mode(deltalake::protocol::SaveMode::Append)
316            .with_writer_properties(Self::build_writer_props())
317            // Always declare partition columns explicitly — do not rely solely on the
318            // in-memory snapshot, which can be stale after a failed update_incremental.
319            .with_partition_columns(vec!["partition_date".to_string()])
320            .await?;
321
322        info!("Successfully wrote batch to Delta Lake");
323
324        self.ctx.deregister_table(TRACE_SPAN_TABLE_NAME)?;
325        self.ctx
326            .register_table(TRACE_SPAN_TABLE_NAME, updated_table.table_provider().await?)?;
327
328        *table_guard = updated_table;
329
330        Ok(())
331    }
332
333    async fn optimize_table(&self) -> Result<(), TraceEngineError> {
334        let mut table_guard = self.table.write().await;
335
336        let current_table = table_guard.clone();
337
338        let (updated_table, _metrics) = current_table
339            .optimize()
340            .with_target_size(128 * 1024 * 1024)
341            .with_type(OptimizeType::ZOrder(vec![
342                "start_time".to_string(),
343                "service_name".to_string(),
344            ]))
345            // Bloom filters must be re-specified here — compaction rewrites all Parquet files
346            // from scratch using these properties. Without this, every compaction cycle
347            // silently discards all bloom filters on the rewritten files.
348            .with_writer_properties(Self::build_writer_props())
349            .await?;
350
351        self.ctx.deregister_table(TRACE_SPAN_TABLE_NAME)?;
352        self.ctx
353            .register_table(TRACE_SPAN_TABLE_NAME, updated_table.table_provider().await?)?;
354
355        *table_guard = updated_table;
356
357        Ok(())
358    }
359
360    async fn vacuum_table(&self, retention_hours: u64) -> Result<(), TraceEngineError> {
361        let mut table_guard = self.table.write().await;
362
363        let (updated_table, _metrics) = table_guard
364            .clone()
365            .vacuum()
366            .with_retention_period(chrono::Duration::hours(retention_hours as i64))
367            .with_enforce_retention_duration(false)
368            .await?;
369
370        self.ctx.deregister_table(TRACE_SPAN_TABLE_NAME)?;
371        self.ctx
372            .register_table(TRACE_SPAN_TABLE_NAME, updated_table.table_provider().await?)?;
373
374        *table_guard = updated_table;
375
376        Ok(())
377    }
378
379    /// Delete all rows with `partition_date` older than `cutoff_date`.
380    ///
381    /// This is a logical delete — it writes a new Delta log entry marking the rows as removed.
382    /// Physical disk space is not reclaimed until `vacuum_table()` runs afterwards.
383    async fn expire_table(&self, cutoff_date: chrono::NaiveDate) -> Result<(), TraceEngineError> {
384        let mut table_guard = self.table.write().await;
385
386        // CAST('YYYY-MM-DD' AS DATE) produces a Date32 that matches the partition column type,
387        // which allows Delta Lake to translate this into a partition directory filter.
388        let predicate = format!(
389            "partition_date < CAST('{}' AS DATE)",
390            cutoff_date.format("%Y-%m-%d")
391        );
392
393        let (updated_table, metrics) = table_guard
394            .clone()
395            .delete()
396            .with_predicate(predicate)
397            .await?;
398
399        info!(
400            "Expired {} rows older than {}",
401            metrics.num_deleted_rows, cutoff_date
402        );
403
404        self.ctx.deregister_table(TRACE_SPAN_TABLE_NAME)?;
405        self.ctx
406            .register_table(TRACE_SPAN_TABLE_NAME, updated_table.table_provider().await?)?;
407
408        *table_guard = updated_table;
409
410        Ok(())
411    }
412
413    /// Try to claim and run the optimize task via the control table.
414    ///
415    /// The control table's OCC ensures only one pod runs this at a time across
416    /// the entire K8s deployment.
417    async fn try_run_optimize(&self, interval_hours: u64) {
418        match self.control.try_claim_task(TASK_OPTIMIZE).await {
419            Ok(true) => match self.optimize_table().await {
420                Ok(()) => {
421                    let _ = self
422                        .control
423                        .release_task(
424                            TASK_OPTIMIZE,
425                            chrono::Duration::hours(interval_hours as i64),
426                        )
427                        .await;
428                }
429                Err(e) => {
430                    error!("Optimize failed: {}", e);
431                    let _ = self.control.release_task_on_failure(TASK_OPTIMIZE).await;
432                }
433            },
434            Ok(false) => { /* not due or another pod owns it */ }
435            Err(e) => error!("Optimize claim check failed: {}", e),
436        }
437    }
438
439    /// Try to claim and run the retention task via the control table.
440    async fn try_run_retention(&self, retention_days: u32) {
441        match self.control.try_claim_task(TASK_RETENTION).await {
442            Ok(true) => {
443                let cutoff =
444                    (Utc::now() - chrono::Duration::days(retention_days as i64)).date_naive();
445                match self.expire_table(cutoff).await {
446                    Ok(()) => {
447                        // Reclaim disk space after logical delete
448                        let _ = self.vacuum_table(0).await;
449                        let _ = self
450                            .control
451                            .release_task(TASK_RETENTION, chrono::Duration::hours(24))
452                            .await;
453                    }
454                    Err(e) => {
455                        error!("Retention failed: {}", e);
456                        let _ = self.control.release_task_on_failure(TASK_RETENTION).await;
457                    }
458                }
459            }
460            Ok(false) => {}
461            Err(e) => error!("Retention claim check failed: {}", e),
462        }
463    }
464
465    #[instrument(skip_all, name = "trace_engine_actor")]
466    pub fn start_actor(
467        self,
468        compaction_interval_hours: u64,
469        retention_days: Option<u32>,
470    ) -> (mpsc::Sender<TableCommand>, tokio::task::JoinHandle<()>) {
471        let (tx, mut rx) = mpsc::channel::<TableCommand>(100);
472
473        let handle = tokio::spawn(async move {
474            // Poll every 5 minutes — the actual schedule is persisted in the
475            // control table's `next_run_at` and survives pod restarts.
476            let mut scheduler_ticker = interval(Duration::from_secs(5 * 60));
477            scheduler_ticker.tick().await; // skip immediate tick
478
479            loop {
480                tokio::select! {
481                    Some(cmd) = rx.recv() => {
482                        match cmd {
483                            TableCommand::Write { spans, respond_to } => {
484                                match self.write_spans(spans).await {
485                                    Ok(_) => { let _ = respond_to.send(Ok(())); }
486                                    Err(e) => {
487                                        tracing::error!("Write failed: {}", e);
488                                        let _ = respond_to.send(Err(e));
489                                    }
490                                }
491                            }
492                            TableCommand::Optimize { respond_to } => {
493                                // Direct admin request — bypass control table
494                                let _ = respond_to.send(self.optimize_table().await);
495                            }
496                            TableCommand::Vacuum { retention_hours, respond_to } => {
497                                let _ = respond_to.send(self.vacuum_table(retention_hours).await);
498                            }
499                            TableCommand::Expire { cutoff_date, respond_to } => {
500                                let _ = respond_to.send(self.expire_table(cutoff_date).await);
501                            }
502                            TableCommand::Shutdown => {
503                                tracing::info!("Shutting down table engine");
504                                break;
505                            }
506                        }
507                    }
508                    _ = scheduler_ticker.tick() => {
509                        self.try_run_optimize(compaction_interval_hours).await;
510                        if let Some(days) = retention_days {
511                            self.try_run_retention(days).await;
512                        }
513                    }
514                }
515            }
516        });
517
518        (tx, handle)
519    }
520}
521
522/// Efficient builder for converting `TraceSpanRecord` (ingest type) into Arrow `RecordBatch`.
523///
524/// Hierarchy fields (depth, span_order, path, root_span_id) are NOT included — they are
525/// computed at query time from the flat span data stored here.
526pub struct TraceSpanBatchBuilder {
527    schema: SchemaRef,
528
529    // ID builders
530    trace_id: FixedSizeBinaryBuilder,
531    span_id: FixedSizeBinaryBuilder,
532    parent_span_id: FixedSizeBinaryBuilder,
533
534    // W3C Trace Context
535    flags: Int32Builder,
536    trace_state: StringBuilder,
537
538    // Instrumentation scope
539    scope_name: StringBuilder,
540    scope_version: StringBuilder,
541
542    // Metadata builders
543    service_name: StringDictionaryBuilder<Int32Type>,
544    span_name: StringBuilder,
545    span_kind: StringDictionaryBuilder<Int8Type>,
546
547    // Time builders
548    start_time: TimestampMicrosecondBuilder,
549    end_time: TimestampMicrosecondBuilder,
550    duration_ms: Int64Builder,
551
552    // Status builders
553    status_code: Int32Builder,
554    status_message: StringBuilder,
555
556    // Scouter-specific
557    label: StringBuilder,
558
559    // Attribute builders
560    attributes: MapBuilder<StringBuilder, StringViewBuilder>,
561    resource_attributes: MapBuilder<StringBuilder, StringViewBuilder>,
562
563    // Nested structure builders
564    events: ListBuilder<StructBuilder>,
565    links: ListBuilder<StructBuilder>,
566
567    // Payload builders
568    input: StringViewBuilder,
569    output: StringViewBuilder,
570
571    // Search optimizer
572    search_blob: StringViewBuilder,
573
574    // Partition key (days since Unix epoch)
575    partition_date: Date32Builder,
576}
577
578impl TraceSpanBatchBuilder {
579    pub fn new(schema: SchemaRef) -> Self {
580        let trace_id = FixedSizeBinaryBuilder::new(16);
581        let span_id = FixedSizeBinaryBuilder::new(8);
582        let parent_span_id = FixedSizeBinaryBuilder::new(8);
583
584        let flags = Int32Builder::new();
585        let trace_state = StringBuilder::new();
586
587        let scope_name = StringBuilder::new();
588        let scope_version = StringBuilder::new();
589
590        let service_name = StringDictionaryBuilder::<Int32Type>::new();
591        let span_name = StringBuilder::new();
592        let span_kind = StringDictionaryBuilder::<Int8Type>::new();
593
594        let start_time = TimestampMicrosecondBuilder::new().with_timezone("UTC");
595        let end_time = TimestampMicrosecondBuilder::new().with_timezone("UTC");
596        let duration_ms = Int64Builder::new();
597
598        let status_code = Int32Builder::new();
599        let status_message = StringBuilder::new();
600
601        let label = StringBuilder::new();
602
603        let map_field_name = MapFieldNames {
604            entry: "key_value".to_string(),
605            key: "key".to_string(),
606            value: "value".to_string(),
607        };
608        let attributes = MapBuilder::new(
609            Some(map_field_name.clone()),
610            StringBuilder::new(),
611            StringViewBuilder::new(),
612        );
613        let resource_attributes = MapBuilder::new(
614            Some(map_field_name.clone()),
615            StringBuilder::new(),
616            StringViewBuilder::new(),
617        );
618
619        let event_fields = vec![
620            Field::new("name", DataType::Utf8, false),
621            Field::new(
622                "timestamp",
623                DataType::Timestamp(TimeUnit::Microsecond, Some("UTC".into())),
624                false,
625            ),
626            attribute_field(),
627            Field::new("dropped_attributes_count", DataType::UInt32, false),
628        ];
629
630        let event_struct_builders = vec![
631            Box::new(StringBuilder::new()) as Box<dyn ArrayBuilder>,
632            Box::new(TimestampMicrosecondBuilder::new().with_timezone("UTC"))
633                as Box<dyn ArrayBuilder>,
634            Box::new(MapBuilder::new(
635                Some(map_field_name.clone()),
636                StringBuilder::new(),
637                StringViewBuilder::new(),
638            )) as Box<dyn ArrayBuilder>,
639            Box::new(UInt32Builder::new()) as Box<dyn ArrayBuilder>,
640        ];
641
642        let event_struct_builder = StructBuilder::new(event_fields, event_struct_builders);
643        let events = ListBuilder::new(event_struct_builder);
644
645        let link_fields = vec![
646            Field::new("trace_id", DataType::FixedSizeBinary(16), false),
647            Field::new("span_id", DataType::FixedSizeBinary(8), false),
648            Field::new("trace_state", DataType::Utf8, false),
649            attribute_field(),
650            Field::new("dropped_attributes_count", DataType::UInt32, false),
651        ];
652
653        let link_struct_builders = vec![
654            Box::new(FixedSizeBinaryBuilder::new(16)) as Box<dyn ArrayBuilder>,
655            Box::new(FixedSizeBinaryBuilder::new(8)) as Box<dyn ArrayBuilder>,
656            Box::new(StringBuilder::new()) as Box<dyn ArrayBuilder>,
657            Box::new(MapBuilder::new(
658                Some(map_field_name.clone()),
659                StringBuilder::new(),
660                StringViewBuilder::new(),
661            )) as Box<dyn ArrayBuilder>,
662            Box::new(UInt32Builder::new()) as Box<dyn ArrayBuilder>,
663        ];
664
665        let link_struct_builder = StructBuilder::new(link_fields, link_struct_builders);
666        let links = ListBuilder::new(link_struct_builder);
667
668        let input = StringViewBuilder::new();
669        let output = StringViewBuilder::new();
670        let search_blob = StringViewBuilder::new();
671        let partition_date = Date32Builder::new();
672
673        Self {
674            schema,
675            trace_id,
676            span_id,
677            parent_span_id,
678            flags,
679            trace_state,
680            scope_name,
681            scope_version,
682            service_name,
683            span_name,
684            span_kind,
685            start_time,
686            end_time,
687            duration_ms,
688            status_code,
689            status_message,
690            label,
691            attributes,
692            resource_attributes,
693            events,
694            links,
695            input,
696            output,
697            search_blob,
698            partition_date,
699        }
700    }
701
702    /// Append a single `TraceSpanRecord` to the batch.
703    pub fn append(&mut self, span: &TraceSpanRecord) -> Result<(), TraceEngineError> {
704        // IDs
705        let trace_bytes = span.trace_id.as_bytes();
706        self.trace_id
707            .append_value(trace_bytes)
708            .map_err(TraceEngineError::ArrowError)?;
709
710        let span_bytes = span.span_id.as_bytes();
711        self.span_id
712            .append_value(span_bytes)
713            .map_err(TraceEngineError::ArrowError)?;
714
715        match &span.parent_span_id {
716            Some(pid) => {
717                self.parent_span_id
718                    .append_value(pid.as_bytes())
719                    .map_err(TraceEngineError::ArrowError)?;
720            }
721            None => self.parent_span_id.append_null(),
722        }
723
724        // W3C Trace Context
725        self.flags.append_value(span.flags);
726        self.trace_state.append_value(&span.trace_state);
727
728        // Instrumentation scope
729        self.scope_name.append_value(&span.scope_name);
730        match &span.scope_version {
731            Some(v) => self.scope_version.append_value(v),
732            None => self.scope_version.append_null(),
733        }
734
735        // Metadata
736        self.service_name.append_value(&span.service_name);
737        self.span_name.append_value(&span.span_name);
738        // span_kind is a non-empty string in TraceSpanRecord — store as non-null
739        if span.span_kind.is_empty() {
740            self.span_kind.append_null();
741        } else {
742            self.span_kind.append_value(&span.span_kind);
743        }
744
745        // Timestamps
746        self.start_time
747            .append_value(span.start_time.timestamp_micros());
748        self.end_time.append_value(span.end_time.timestamp_micros());
749        self.duration_ms.append_value(span.duration_ms);
750
751        // Status
752        self.status_code.append_value(span.status_code);
753        if span.status_message.is_empty() {
754            self.status_message.append_null();
755        } else {
756            self.status_message.append_value(&span.status_message);
757        }
758
759        // Scouter-specific
760        match &span.label {
761            Some(l) => self.label.append_value(l),
762            None => self.label.append_null(),
763        }
764
765        // Attributes
766        self.append_attributes(&span.attributes).inspect_err(|e| {
767            error!(
768                "Failed to append attributes for span {}: {}",
769                span.span_id, e
770            )
771        })?;
772
773        // Resource attributes
774        self.append_resource_attributes(&span.resource_attributes)
775            .inspect_err(|e| {
776                error!(
777                    "Failed to append resource_attributes for span {}: {}",
778                    span.span_id, e
779                )
780            })?;
781
782        // Events
783        self.append_events(&span.events)
784            .inspect_err(|e| error!("Failed to append events for span {}: {}", span.span_id, e))?;
785
786        // Links
787        self.append_links(&span.links)
788            .inspect_err(|e| error!("Failed to append links for span {}: {}", span.span_id, e))?;
789
790        // Payloads
791        self.input.append_value(
792            serde_json::to_string(&span.input).unwrap_or_else(|_| "null".to_string()),
793        );
794
795        self.output.append_value(
796            serde_json::to_string(&span.output).unwrap_or_else(|_| "null".to_string()),
797        );
798
799        // Search blob
800        let search_text = Self::build_search_blob(span);
801        self.search_blob.append_value(search_text);
802
803        // Partition key — days since Unix epoch, derived from span start date
804        let days = span.start_time.date_naive().num_days_from_ce() - UNIX_EPOCH_DAYS;
805        self.partition_date.append_value(days);
806
807        Ok(())
808    }
809
810    fn append_attributes(&mut self, attributes: &[Attribute]) -> Result<(), TraceEngineError> {
811        for attr in attributes {
812            self.attributes.keys().append_value(&attr.key);
813            let value_str =
814                serde_json::to_string(&attr.value).unwrap_or_else(|_| "null".to_string());
815            self.attributes.values().append_value(value_str);
816        }
817        self.attributes.append(true)?;
818        Ok(())
819    }
820
821    fn append_resource_attributes(
822        &mut self,
823        attributes: &[Attribute],
824    ) -> Result<(), TraceEngineError> {
825        if attributes.is_empty() {
826            self.resource_attributes.append(false)?; // null map
827        } else {
828            for attr in attributes {
829                self.resource_attributes.keys().append_value(&attr.key);
830                let value_str =
831                    serde_json::to_string(&attr.value).unwrap_or_else(|_| "null".to_string());
832                self.resource_attributes.values().append_value(value_str);
833            }
834            self.resource_attributes.append(true)?;
835        }
836        Ok(())
837    }
838
839    fn append_events(&mut self, events: &[SpanEvent]) -> Result<(), TraceEngineError> {
840        let event_struct = self.events.values();
841        for event in events {
842            let name_builder = event_struct
843                .field_builder::<StringBuilder>(0)
844                .ok_or_else(|| TraceEngineError::DowncastError("event name builder"))?;
845            name_builder.append_value(&event.name);
846
847            let time_builder = event_struct
848                .field_builder::<TimestampMicrosecondBuilder>(1)
849                .ok_or_else(|| TraceEngineError::DowncastError("event timestamp builder"))?;
850            time_builder.append_value(event.timestamp.timestamp_micros());
851
852            let attr_builder = event_struct
853                .field_builder::<MapBuilder<StringBuilder, StringViewBuilder>>(2)
854                .ok_or_else(|| TraceEngineError::DowncastError("event attributes builder"))?;
855
856            for attr in &event.attributes {
857                attr_builder.keys().append_value(&attr.key);
858                let value_str =
859                    serde_json::to_string(&attr.value).unwrap_or_else(|_| "null".to_string());
860                attr_builder.values().append_value(value_str);
861            }
862            attr_builder.append(true)?;
863
864            let dropped_builder =
865                event_struct
866                    .field_builder::<UInt32Builder>(3)
867                    .ok_or_else(|| {
868                        TraceEngineError::DowncastError("dropped attributes count builder")
869                    })?;
870            dropped_builder.append_value(event.dropped_attributes_count);
871
872            event_struct.append(true);
873        }
874
875        self.events.append(true);
876        Ok(())
877    }
878
879    fn append_links(&mut self, links: &[SpanLink]) -> Result<(), TraceEngineError> {
880        let link_struct = self.links.values();
881
882        for link in links {
883            let trace_builder = link_struct
884                .field_builder::<FixedSizeBinaryBuilder>(0)
885                .ok_or_else(|| TraceEngineError::DowncastError("link trace_id builder"))?;
886
887            let trace_bytes = TraceId::hex_to_bytes(&link.trace_id).map_err(|e| {
888                TraceEngineError::InvalidHexId(link.trace_id.clone(), e.to_string())
889            })?;
890            trace_builder.append_value(&trace_bytes)?;
891
892            let span_builder = link_struct
893                .field_builder::<FixedSizeBinaryBuilder>(1)
894                .ok_or_else(|| TraceEngineError::DowncastError("link span_id builder"))?;
895
896            let span_bytes = SpanId::hex_to_bytes(&link.span_id)
897                .map_err(|e| TraceEngineError::InvalidHexId(link.span_id.clone(), e.to_string()))?;
898            span_builder.append_value(&span_bytes)?;
899
900            let state_builder = link_struct
901                .field_builder::<StringBuilder>(2)
902                .ok_or_else(|| TraceEngineError::DowncastError("link trace_state builder"))?;
903            state_builder.append_value(&link.trace_state);
904
905            let attr_builder = link_struct
906                .field_builder::<MapBuilder<StringBuilder, StringViewBuilder>>(3)
907                .ok_or_else(|| TraceEngineError::DowncastError("link attributes builder"))?;
908
909            for attr in &link.attributes {
910                attr_builder.keys().append_value(&attr.key);
911                let value_str =
912                    serde_json::to_string(&attr.value).unwrap_or_else(|_| "null".to_string());
913                attr_builder.values().append_value(value_str);
914            }
915            attr_builder.append(true)?;
916
917            let dropped_builder =
918                link_struct
919                    .field_builder::<UInt32Builder>(4)
920                    .ok_or_else(|| {
921                        TraceEngineError::DowncastError("link dropped attributes count builder")
922                    })?;
923            dropped_builder.append_value(link.dropped_attributes_count);
924
925            link_struct.append(true);
926        }
927
928        self.links.append(true);
929        Ok(())
930    }
931
932    /// Build a concatenated search string from `TraceSpanRecord` for full-text queries.
933    ///
934    /// Uses pipe-bounded tokens (`|key=value|`) to prevent false-positive substring matches
935    /// where a value contains something that looks like a different attribute key or value.
936    /// Queries use `%key=value%` patterns which match both old `key:value` archive data
937    /// and the new `|key=value|` format.
938    fn build_search_blob(span: &TraceSpanRecord) -> String {
939        let mut search = String::with_capacity(512);
940
941        // Pipe-bounded bare tokens for full-text (service, span, scope)
942        search.push('|');
943        search.push_str(&span.service_name);
944        search.push_str("| |");
945        search.push_str(&span.span_name);
946        search.push_str("| |");
947        search.push_str(&span.scope_name);
948        search.push('|');
949
950        if !span.status_message.is_empty() {
951            search.push_str(" |");
952            search.push_str(&span.status_message);
953            search.push('|');
954        }
955
956        // Pipe-bounded key=value tokens — standardize on `=` separator
957        for attr in &span.attributes {
958            search.push_str(" |");
959            search.push_str(&attr.key);
960            search.push('=');
961            match &attr.value {
962                Value::String(s) => search.push_str(s),
963                Value::Number(n) => search.push_str(&n.to_string()),
964                Value::Bool(b) => search.push_str(&b.to_string()),
965                Value::Null => {}
966                other => search.push_str(&other.to_string()),
967            }
968            search.push('|');
969        }
970
971        for event in &span.events {
972            search.push_str(" |");
973            search.push_str(&event.name);
974            search.push('|');
975        }
976
977        search
978    }
979
980    /// Finalize and build the RecordBatch. Column order must match `create_schema()`.
981    pub fn finish(mut self) -> Result<RecordBatch, TraceEngineError> {
982        let batch = RecordBatch::try_new(
983            self.schema.clone(),
984            vec![
985                Arc::new(self.trace_id.finish()),
986                Arc::new(self.span_id.finish()),
987                Arc::new(self.parent_span_id.finish()),
988                Arc::new(self.flags.finish()),
989                Arc::new(self.trace_state.finish()),
990                Arc::new(self.scope_name.finish()),
991                Arc::new(self.scope_version.finish()),
992                Arc::new(self.service_name.finish()),
993                Arc::new(self.span_name.finish()),
994                Arc::new(self.span_kind.finish()),
995                Arc::new(self.start_time.finish()),
996                Arc::new(self.end_time.finish()),
997                Arc::new(self.duration_ms.finish()),
998                Arc::new(self.status_code.finish()),
999                Arc::new(self.status_message.finish()),
1000                Arc::new(self.label.finish()),
1001                Arc::new(self.attributes.finish()),
1002                Arc::new(self.resource_attributes.finish()),
1003                Arc::new(self.events.finish()),
1004                Arc::new(self.links.finish()),
1005                Arc::new(self.input.finish()),
1006                Arc::new(self.output.finish()),
1007                Arc::new(self.search_blob.finish()),
1008                Arc::new(self.partition_date.finish()),
1009            ],
1010        )?;
1011
1012        Ok(batch)
1013    }
1014}