Skip to main content

scouter_dataframe/
storage.rs

1use crate::caching_store::CachingStore;
2use crate::error::StorageError;
3use base64::prelude::*;
4use datafusion::prelude::{SessionConfig, SessionContext};
5use futures::TryStreamExt;
6use object_store::aws::{AmazonS3, AmazonS3Builder};
7use object_store::azure::{MicrosoftAzure, MicrosoftAzureBuilder};
8use object_store::gcp::{GoogleCloudStorage, GoogleCloudStorageBuilder};
9use object_store::local::LocalFileSystem;
10use object_store::path::Path;
11use object_store::ClientOptions;
12use object_store::ObjectStore as ObjStore;
13use scouter_settings::ObjectStorageSettings;
14use scouter_types::StorageType;
15use std::sync::Arc;
16use tracing::debug;
17use url::Url;
18
19/// HTTP client options for cloud object stores.
20///
21/// Enables TCP+TLS connection pooling so repeat queries reuse existing
22/// connections. Sized for high-concurrency GCS/S3 workloads where many
23/// parallel readers share the same host.
24fn cloud_client_options() -> ClientOptions {
25    ClientOptions::new()
26        .with_pool_idle_timeout(std::time::Duration::from_secs(120))
27        .with_pool_max_idle_per_host(64)
28        .with_timeout(std::time::Duration::from_secs(30))
29        .with_connect_timeout(std::time::Duration::from_secs(5))
30}
31
32/// Helper function to decode base64 encoded string
33fn decode_base64_str(service_base64_creds: &str) -> Result<String, StorageError> {
34    let decoded = BASE64_STANDARD.decode(service_base64_creds)?;
35
36    Ok(String::from_utf8(decoded)?)
37}
38
39/// Storage provider enum for common object stores
40#[derive(Debug, Clone)]
41enum StorageProvider {
42    Google(Arc<CachingStore<GoogleCloudStorage>>),
43    Aws(Arc<CachingStore<AmazonS3>>),
44    Local(Arc<CachingStore<LocalFileSystem>>),
45    Azure(Arc<CachingStore<MicrosoftAzure>>),
46}
47
48impl StorageProvider {
49    /// Return the inner object store as a type-erased `Arc<dyn ObjectStore>`.
50    ///
51    /// Used by the Delta Lake engine to bypass the storage factory via
52    /// `DeltaTableBuilder::with_storage_backend` — necessary for cloud stores
53    /// (GCS, S3, Azure) whose schemes are not registered in the default factory.
54    pub fn as_dyn_object_store(&self) -> Arc<dyn ObjStore> {
55        match self {
56            StorageProvider::Google(s) => s.clone() as Arc<dyn ObjStore>,
57            StorageProvider::Aws(s) => s.clone() as Arc<dyn ObjStore>,
58            StorageProvider::Local(s) => s.clone() as Arc<dyn ObjStore>,
59            StorageProvider::Azure(s) => s.clone() as Arc<dyn ObjStore>,
60        }
61    }
62
63    pub fn new(storage_settings: &ObjectStorageSettings) -> Result<Self, StorageError> {
64        let cache_bytes = storage_settings.object_cache_mb() * 1024 * 1024;
65
66        let store = match storage_settings.storage_type {
67            StorageType::Google => {
68                let mut builder = GoogleCloudStorageBuilder::from_env();
69
70                // Try to use base64 credentials if available
71                if let Ok(base64_creds) = std::env::var("GOOGLE_ACCOUNT_JSON_BASE64") {
72                    let key = decode_base64_str(&base64_creds)?;
73                    builder = builder.with_service_account_key(&key);
74                    debug!("Using base64 encoded service account key for Google Cloud Storage");
75                }
76
77                // Add bucket name and build
78                let storage = builder
79                    .with_bucket_name(storage_settings.storage_root())
80                    .with_client_options(cloud_client_options())
81                    .build()?;
82
83                StorageProvider::Google(Arc::new(CachingStore::new(storage, cache_bytes)))
84            }
85            StorageType::Aws => {
86                let storage = AmazonS3Builder::from_env()
87                    .with_bucket_name(storage_settings.storage_root())
88                    .with_region(storage_settings.region.clone())
89                    .with_client_options(cloud_client_options())
90                    .build()?;
91                StorageProvider::Aws(Arc::new(CachingStore::new(storage, cache_bytes)))
92            }
93            StorageType::Local => {
94                let storage = LocalFileSystem::new();
95                StorageProvider::Local(Arc::new(CachingStore::new(storage, cache_bytes)))
96            }
97            StorageType::Azure => {
98                // MicrosoftAzureBuilder::from_env() reads AZURE_STORAGE_ACCOUNT_NAME
99                // and AZURE_STORAGE_ACCOUNT_KEY specifically.  Many Azure tools
100                // (az CLI, Terraform, GitHub Actions) emit AZURE_STORAGE_ACCOUNT and
101                // AZURE_STORAGE_KEY instead.  Accept both so callers don't need to
102                // know which naming convention object_store expects.
103                let mut builder = MicrosoftAzureBuilder::from_env();
104
105                if std::env::var("AZURE_STORAGE_ACCOUNT_NAME").is_err() {
106                    if let Ok(account) = std::env::var("AZURE_STORAGE_ACCOUNT") {
107                        builder = builder.with_account(account);
108                    }
109                }
110                if std::env::var("AZURE_STORAGE_ACCOUNT_KEY").is_err() {
111                    if let Ok(key) = std::env::var("AZURE_STORAGE_KEY") {
112                        builder = builder.with_access_key(key);
113                    }
114                }
115
116                let storage = builder
117                    .with_container_name(storage_settings.storage_root())
118                    .with_client_options(cloud_client_options())
119                    .build()?;
120
121                StorageProvider::Azure(Arc::new(CachingStore::new(storage, cache_bytes)))
122            }
123        };
124
125        Ok(store)
126    }
127
128    pub fn get_base_url(
129        &self,
130        storage_settings: &ObjectStorageSettings,
131    ) -> Result<Url, StorageError> {
132        match self {
133            StorageProvider::Google(_) => Ok(Url::parse(&storage_settings.storage_uri)?),
134            StorageProvider::Aws(_) => Ok(Url::parse(&storage_settings.storage_uri)?),
135            StorageProvider::Local(_) => {
136                // Convert relative path to absolute path for local filesystem
137                let storage_path = std::path::PathBuf::from(storage_settings.storage_root());
138                let absolute_path = if storage_path.is_absolute() {
139                    storage_path
140                } else {
141                    std::env::current_dir()?.join(storage_path)
142                };
143
144                // Create file:// URL with absolute path
145                let url = Url::from_file_path(&absolute_path).map_err(|_| {
146                    StorageError::InvalidUrl(format!(
147                        "Failed to create file URL from path: {:?}",
148                        absolute_path
149                    ))
150                })?;
151                Ok(url)
152            }
153            StorageProvider::Azure(_) => Ok(Url::parse(&storage_settings.storage_uri)?),
154        }
155    }
156
157    pub fn get_session(
158        &self,
159        storage_settings: &ObjectStorageSettings,
160    ) -> Result<SessionContext, StorageError> {
161        let mut config = SessionConfig::new()
162            .with_target_partitions(
163                std::thread::available_parallelism()
164                    .map(|n| n.get())
165                    .unwrap_or(4),
166            )
167            .with_batch_size(8192)
168            .with_prefer_existing_sort(true)
169            .with_parquet_pruning(true)
170            .with_collect_statistics(true);
171
172        // Push filter predicates into the Parquet reader so only matching rows are decoded,
173        // and reorder predicates by selectivity so bloom filters (trace_id, entity_id) are
174        // evaluated before range checks (start_time), short-circuiting row evaluation early.
175        config.options_mut().execution.parquet.pushdown_filters = true;
176        config.options_mut().execution.parquet.reorder_filters = true;
177
178        // ── Parquet read-path tuning (GCS latency reduction) ──────────────
179        //
180        // Read at least 1MB from the end of each Parquet file in a single request.
181        // Default is 512KB. Our files have bloom filters on trace_id + entity_id
182        // and page-level statistics on start_time + status_code, so footers are
183        // larger than average. 1MB captures footer + column/offset indexes in one
184        // GCS round-trip instead of the default multi-step chain, saving 1-2
185        // round-trips (~30-60ms each) per file.
186        config.options_mut().execution.parquet.metadata_size_hint = Some(1024 * 1024);
187
188        // Bloom filters are written on trace_id and entity_id — ensure the reader
189        // consults them before decoding row groups. (Default is true in DF 52, but
190        // we're explicit to guard against version changes.)
191        config.options_mut().execution.parquet.bloom_filter_on_read = true;
192
193        // Read Utf8 columns as Utf8View and Binary as BinaryView for zero-copy.
194        // Our schema already uses Utf8View/BinaryView — this ensures DataFusion
195        // doesn't downgrade them when reading back from Parquet.
196        config
197            .options_mut()
198            .execution
199            .parquet
200            .schema_force_view_types = true;
201
202        // ── Listing / metadata concurrency ───────────────────────────────
203        //
204        // Number of files to stat in parallel when inferring schema or listing
205        // a Delta table's backing Parquet files. Default is 32. On GCS each
206        // stat is a separate HTTP HEAD; higher concurrency hides the per-file
207        // latency behind parallelism. 64 matches our pool_max_idle_per_host.
208        config.options_mut().execution.meta_fetch_concurrency = 64;
209
210        // ── Write-path tuning ────────────────────────────────────────────
211        //
212        // Increase write-side parallelism so compaction and flush can encode
213        // multiple row groups concurrently, reducing wall-clock write latency.
214        config
215            .options_mut()
216            .execution
217            .parquet
218            .maximum_parallel_row_group_writers = 4;
219
220        // Buffer more decoded record batches per stream before back-pressure
221        // kicks in, smoothing out bursty reads from GCS.
222        config
223            .options_mut()
224            .execution
225            .parquet
226            .maximum_buffered_record_batches_per_stream = 8;
227
228        let ctx = SessionContext::new_with_config(config);
229        let base_url = self.get_base_url(storage_settings)?;
230
231        match self {
232            StorageProvider::Google(store) => {
233                ctx.register_object_store(&base_url, store.clone());
234            }
235            StorageProvider::Aws(store) => {
236                ctx.register_object_store(&base_url, store.clone());
237            }
238            StorageProvider::Local(store) => {
239                ctx.register_object_store(&base_url, store.clone());
240            }
241            StorageProvider::Azure(store) => {
242                ctx.register_object_store(&base_url, store.clone());
243            }
244        }
245
246        Ok(ctx)
247    }
248
249    /// List files in the object store
250    ///
251    /// # Arguments
252    /// * `path` - The path to list files from. If None, lists all files in the root.
253    ///
254    /// # Returns
255    /// * `Result<Vec<String>, StorageError>` - A result containing a vector of file paths or an error.
256    pub async fn list(&self, path: Option<&Path>) -> Result<Vec<String>, StorageError> {
257        let stream = match self {
258            StorageProvider::Local(store) => store.list(path),
259            StorageProvider::Google(store) => store.list(path),
260            StorageProvider::Aws(store) => store.list(path),
261            StorageProvider::Azure(store) => store.list(path),
262        };
263
264        // Process each item in the stream
265        stream
266            .try_fold(Vec::new(), |mut files, meta| async move {
267                files.push(meta.location.to_string());
268                Ok(files)
269            })
270            .await
271            .map_err(Into::into)
272    }
273
274    pub async fn delete(&self, path: &Path) -> Result<(), StorageError> {
275        match self {
276            StorageProvider::Local(store) => {
277                store.delete(path).await?;
278                Ok(())
279            }
280            StorageProvider::Google(store) => {
281                store.delete(path).await?;
282                Ok(())
283            }
284            StorageProvider::Aws(store) => {
285                store.delete(path).await?;
286                Ok(())
287            }
288            StorageProvider::Azure(store) => {
289                store.delete(path).await?;
290                Ok(())
291            }
292        }
293    }
294}
295
296#[derive(Debug, Clone)]
297pub struct ObjectStore {
298    provider: StorageProvider,
299    pub storage_settings: ObjectStorageSettings,
300}
301
302impl ObjectStore {
303    /// Creates a new ObjectStore instance.
304    ///
305    /// # Arguments
306    /// * `storage_settings` - The settings for the object storage.
307    ///
308    /// # Returns
309    /// * `Result<ObjectStore, StorageError>` - A result containing the ObjectStore instance or an error.
310    pub fn new(storage_settings: &ObjectStorageSettings) -> Result<Self, StorageError> {
311        let store = StorageProvider::new(storage_settings)?;
312        Ok(ObjectStore {
313            provider: store,
314            storage_settings: storage_settings.clone(),
315        })
316    }
317
318    pub fn get_session(&self) -> Result<SessionContext, StorageError> {
319        let ctx = self.provider.get_session(&self.storage_settings)?;
320        Ok(ctx)
321    }
322
323    /// Return the inner object store as a type-erased `Arc<dyn ObjectStore>`.
324    ///
325    /// Pass this to `DeltaTableBuilder::with_storage_backend` to bypass the Delta Lake
326    /// storage factory (required for GCS, S3, and Azure).
327    pub fn as_dyn_object_store(&self) -> Arc<dyn ObjStore> {
328        self.provider.as_dyn_object_store()
329    }
330
331    /// Get the base URL for datafusion to use
332    pub fn get_base_url(&self) -> Result<Url, StorageError> {
333        self.provider.get_base_url(&self.storage_settings)
334    }
335
336    /// List files in the object store
337    ///
338    /// When path is None, lists from the root.
339    /// When path is provided, lists from that path.
340    ///
341    /// Note: The path parameter should NOT include the storage root - it's a relative path
342    /// that will be automatically combined with the storage root.
343    pub async fn list(&self, path: Option<&Path>) -> Result<Vec<String>, StorageError> {
344        self.provider.list(path).await
345    }
346
347    pub async fn delete(&self, path: &Path) -> Result<(), StorageError> {
348        self.provider.delete(path).await
349    }
350}