Skip to main content

scouter_dataframe/
storage.rs

1use crate::caching_store::CachingStore;
2use crate::error::StorageError;
3use base64::prelude::*;
4use datafusion::prelude::{SessionConfig, SessionContext};
5use futures::TryStreamExt;
6use object_store::aws::{AmazonS3, AmazonS3Builder};
7use object_store::azure::{MicrosoftAzure, MicrosoftAzureBuilder};
8use object_store::gcp::{GoogleCloudStorage, GoogleCloudStorageBuilder};
9use object_store::local::LocalFileSystem;
10use object_store::path::Path;
11use object_store::ClientOptions;
12use object_store::ObjectStore as ObjStore;
13use scouter_settings::ObjectStorageSettings;
14use scouter_types::StorageType;
15use std::sync::Arc;
16use tracing::debug;
17use url::Url;
18
19/// HTTP client options for cloud object stores.
20///
21/// Enables TCP+TLS connection pooling so repeat queries reuse existing
22/// connections. Sized for high-concurrency GCS/S3 workloads where many
23/// parallel readers share the same host.
24fn cloud_client_options() -> ClientOptions {
25    ClientOptions::new()
26        .with_pool_idle_timeout(std::time::Duration::from_secs(120))
27        .with_pool_max_idle_per_host(64)
28        .with_timeout(std::time::Duration::from_secs(30))
29        .with_connect_timeout(std::time::Duration::from_secs(5))
30}
31
32/// Helper function to decode base64 encoded string
33fn decode_base64_str(service_base64_creds: &str) -> Result<String, StorageError> {
34    let decoded = BASE64_STANDARD.decode(service_base64_creds)?;
35
36    Ok(String::from_utf8(decoded)?)
37}
38
39/// Storage provider enum for common object stores
40#[derive(Debug, Clone)]
41enum StorageProvider {
42    Google(Arc<CachingStore<GoogleCloudStorage>>),
43    Aws(Arc<CachingStore<AmazonS3>>),
44    Local(Arc<CachingStore<LocalFileSystem>>),
45    Azure(Arc<CachingStore<MicrosoftAzure>>),
46}
47
48impl StorageProvider {
49    /// Return the inner object store as a type-erased `Arc<dyn ObjectStore>`.
50    ///
51    /// Used by the Delta Lake engine to bypass the storage factory via
52    /// `DeltaTableBuilder::with_storage_backend` — necessary for cloud stores
53    /// (GCS, S3, Azure) whose schemes are not registered in the default factory.
54    pub fn as_dyn_object_store(&self) -> Arc<dyn ObjStore> {
55        match self {
56            StorageProvider::Google(s) => s.clone() as Arc<dyn ObjStore>,
57            StorageProvider::Aws(s) => s.clone() as Arc<dyn ObjStore>,
58            StorageProvider::Local(s) => s.clone() as Arc<dyn ObjStore>,
59            StorageProvider::Azure(s) => s.clone() as Arc<dyn ObjStore>,
60        }
61    }
62
63    pub fn new(storage_settings: &ObjectStorageSettings) -> Result<Self, StorageError> {
64        let cache_bytes = storage_settings.object_cache_mb() * 1024 * 1024;
65
66        let store = match storage_settings.storage_type {
67            StorageType::Google => {
68                let mut builder = GoogleCloudStorageBuilder::from_env();
69
70                // Try to use base64 credentials if available
71                if let Ok(base64_creds) = std::env::var("GOOGLE_ACCOUNT_JSON_BASE64") {
72                    let key = decode_base64_str(&base64_creds)?;
73                    builder = builder.with_service_account_key(&key);
74                    debug!("Using base64 encoded service account key for Google Cloud Storage");
75                }
76
77                // Add bucket name and build
78                let storage = builder
79                    .with_bucket_name(storage_settings.storage_root())
80                    .with_client_options(cloud_client_options())
81                    .build()?;
82
83                StorageProvider::Google(Arc::new(CachingStore::new(storage, cache_bytes)))
84            }
85            StorageType::Aws => {
86                let storage = AmazonS3Builder::from_env()
87                    .with_bucket_name(storage_settings.storage_root())
88                    .with_region(storage_settings.region.clone())
89                    .with_client_options(cloud_client_options())
90                    .build()?;
91                StorageProvider::Aws(Arc::new(CachingStore::new(storage, cache_bytes)))
92            }
93            StorageType::Local => {
94                let storage = LocalFileSystem::new();
95                StorageProvider::Local(Arc::new(CachingStore::new(storage, cache_bytes)))
96            }
97            StorageType::Azure => {
98                // MicrosoftAzureBuilder::from_env() reads AZURE_STORAGE_ACCOUNT_NAME
99                // and AZURE_STORAGE_ACCOUNT_KEY specifically.  Many Azure tools
100                // (az CLI, Terraform, GitHub Actions) emit AZURE_STORAGE_ACCOUNT and
101                // AZURE_STORAGE_KEY instead.  Accept both so callers don't need to
102                // know which naming convention object_store expects.
103                let mut builder = MicrosoftAzureBuilder::from_env();
104
105                if std::env::var("AZURE_STORAGE_ACCOUNT_NAME").is_err() {
106                    if let Ok(account) = std::env::var("AZURE_STORAGE_ACCOUNT") {
107                        builder = builder.with_account(account);
108                    }
109                }
110                if std::env::var("AZURE_STORAGE_ACCOUNT_KEY").is_err() {
111                    if let Ok(key) = std::env::var("AZURE_STORAGE_KEY") {
112                        builder = builder.with_access_key(key);
113                    }
114                }
115
116                let storage = builder
117                    .with_container_name(storage_settings.storage_root())
118                    .with_client_options(cloud_client_options())
119                    .build()?;
120
121                StorageProvider::Azure(Arc::new(CachingStore::new(storage, cache_bytes)))
122            }
123        };
124
125        Ok(store)
126    }
127
128    pub fn get_base_url(
129        &self,
130        storage_settings: &ObjectStorageSettings,
131    ) -> Result<Url, StorageError> {
132        match self {
133            StorageProvider::Google(_) => Ok(Url::parse(&storage_settings.storage_uri)?),
134            StorageProvider::Aws(_) => Ok(Url::parse(&storage_settings.storage_uri)?),
135            StorageProvider::Local(_) => {
136                // Convert relative path to absolute path for local filesystem
137                let storage_path = std::path::PathBuf::from(storage_settings.storage_root());
138                let absolute_path = if storage_path.is_absolute() {
139                    storage_path
140                } else {
141                    std::env::current_dir()?.join(storage_path)
142                };
143
144                // Create file:// URL with absolute path
145                let url = Url::from_file_path(&absolute_path).map_err(|_| {
146                    StorageError::InvalidUrl(format!(
147                        "Failed to create file URL from path: {:?}",
148                        absolute_path
149                    ))
150                })?;
151                Ok(url)
152            }
153            StorageProvider::Azure(_) => Ok(Url::parse(&storage_settings.storage_uri)?),
154        }
155    }
156
157    pub fn get_session(
158        &self,
159        storage_settings: &ObjectStorageSettings,
160    ) -> Result<SessionContext, StorageError> {
161        let mut config = SessionConfig::new()
162            .with_target_partitions(
163                std::thread::available_parallelism()
164                    .map(|n| n.get())
165                    .unwrap_or(4),
166            )
167            .with_batch_size(8192)
168            .with_prefer_existing_sort(true)
169            .with_parquet_pruning(true)
170            .with_collect_statistics(true);
171
172        // Push filter predicates into the Parquet reader so only matching rows are decoded,
173        // and reorder predicates by selectivity so bloom filters (trace_id, entity_id) are
174        // evaluated before range checks (start_time), short-circuiting row evaluation early.
175        config.options_mut().execution.parquet.pushdown_filters = true;
176        config.options_mut().execution.parquet.reorder_filters = true;
177
178        // ── Parquet read-path tuning (GCS latency reduction) ──────────────
179        //
180        // Read at least 1MB from the end of each Parquet file in a single request.
181        // Default is 512KB. Our files have bloom filters on trace_id + entity_id
182        // and page-level statistics on start_time + status_code, so footers are
183        // larger than average. 1MB captures footer + column/offset indexes in one
184        // GCS round-trip instead of the default multi-step chain, saving 1-2
185        // round-trips (~30-60ms each) per file.
186        config
187            .options_mut()
188            .execution
189            .parquet
190            .metadata_size_hint = Some(1024 * 1024);
191
192        // Bloom filters are written on trace_id and entity_id — ensure the reader
193        // consults them before decoding row groups. (Default is true in DF 52, but
194        // we're explicit to guard against version changes.)
195        config
196            .options_mut()
197            .execution
198            .parquet
199            .bloom_filter_on_read = true;
200
201        // Read Utf8 columns as Utf8View and Binary as BinaryView for zero-copy.
202        // Our schema already uses Utf8View/BinaryView — this ensures DataFusion
203        // doesn't downgrade them when reading back from Parquet.
204        config
205            .options_mut()
206            .execution
207            .parquet
208            .schema_force_view_types = true;
209
210        // ── Listing / metadata concurrency ───────────────────────────────
211        //
212        // Number of files to stat in parallel when inferring schema or listing
213        // a Delta table's backing Parquet files. Default is 32. On GCS each
214        // stat is a separate HTTP HEAD; higher concurrency hides the per-file
215        // latency behind parallelism. 64 matches our pool_max_idle_per_host.
216        config
217            .options_mut()
218            .execution
219            .meta_fetch_concurrency = 64;
220
221        // ── Write-path tuning ────────────────────────────────────────────
222        //
223        // Increase write-side parallelism so compaction and flush can encode
224        // multiple row groups concurrently, reducing wall-clock write latency.
225        config
226            .options_mut()
227            .execution
228            .parquet
229            .maximum_parallel_row_group_writers = 4;
230
231        // Buffer more decoded record batches per stream before back-pressure
232        // kicks in, smoothing out bursty reads from GCS.
233        config
234            .options_mut()
235            .execution
236            .parquet
237            .maximum_buffered_record_batches_per_stream = 8;
238
239        let ctx = SessionContext::new_with_config(config);
240        let base_url = self.get_base_url(storage_settings)?;
241
242        match self {
243            StorageProvider::Google(store) => {
244                ctx.register_object_store(&base_url, store.clone());
245            }
246            StorageProvider::Aws(store) => {
247                ctx.register_object_store(&base_url, store.clone());
248            }
249            StorageProvider::Local(store) => {
250                ctx.register_object_store(&base_url, store.clone());
251            }
252            StorageProvider::Azure(store) => {
253                ctx.register_object_store(&base_url, store.clone());
254            }
255        }
256
257        Ok(ctx)
258    }
259
260    /// List files in the object store
261    ///
262    /// # Arguments
263    /// * `path` - The path to list files from. If None, lists all files in the root.
264    ///
265    /// # Returns
266    /// * `Result<Vec<String>, StorageError>` - A result containing a vector of file paths or an error.
267    pub async fn list(&self, path: Option<&Path>) -> Result<Vec<String>, StorageError> {
268        let stream = match self {
269            StorageProvider::Local(store) => store.list(path),
270            StorageProvider::Google(store) => store.list(path),
271            StorageProvider::Aws(store) => store.list(path),
272            StorageProvider::Azure(store) => store.list(path),
273        };
274
275        // Process each item in the stream
276        stream
277            .try_fold(Vec::new(), |mut files, meta| async move {
278                files.push(meta.location.to_string());
279                Ok(files)
280            })
281            .await
282            .map_err(Into::into)
283    }
284
285    pub async fn delete(&self, path: &Path) -> Result<(), StorageError> {
286        match self {
287            StorageProvider::Local(store) => {
288                store.delete(path).await?;
289                Ok(())
290            }
291            StorageProvider::Google(store) => {
292                store.delete(path).await?;
293                Ok(())
294            }
295            StorageProvider::Aws(store) => {
296                store.delete(path).await?;
297                Ok(())
298            }
299            StorageProvider::Azure(store) => {
300                store.delete(path).await?;
301                Ok(())
302            }
303        }
304    }
305}
306
307#[derive(Debug, Clone)]
308pub struct ObjectStore {
309    provider: StorageProvider,
310    pub storage_settings: ObjectStorageSettings,
311}
312
313impl ObjectStore {
314    /// Creates a new ObjectStore instance.
315    ///
316    /// # Arguments
317    /// * `storage_settings` - The settings for the object storage.
318    ///
319    /// # Returns
320    /// * `Result<ObjectStore, StorageError>` - A result containing the ObjectStore instance or an error.
321    pub fn new(storage_settings: &ObjectStorageSettings) -> Result<Self, StorageError> {
322        let store = StorageProvider::new(storage_settings)?;
323        Ok(ObjectStore {
324            provider: store,
325            storage_settings: storage_settings.clone(),
326        })
327    }
328
329    pub fn get_session(&self) -> Result<SessionContext, StorageError> {
330        let ctx = self.provider.get_session(&self.storage_settings)?;
331        Ok(ctx)
332    }
333
334    /// Return the inner object store as a type-erased `Arc<dyn ObjectStore>`.
335    ///
336    /// Pass this to `DeltaTableBuilder::with_storage_backend` to bypass the Delta Lake
337    /// storage factory (required for GCS, S3, and Azure).
338    pub fn as_dyn_object_store(&self) -> Arc<dyn ObjStore> {
339        self.provider.as_dyn_object_store()
340    }
341
342    /// Get the base URL for datafusion to use
343    pub fn get_base_url(&self) -> Result<Url, StorageError> {
344        self.provider.get_base_url(&self.storage_settings)
345    }
346
347    /// List files in the object store
348    ///
349    /// When path is None, lists from the root.
350    /// When path is provided, lists from that path.
351    ///
352    /// Note: The path parameter should NOT include the storage root - it's a relative path
353    /// that will be automatically combined with the storage root.
354    pub async fn list(&self, path: Option<&Path>) -> Result<Vec<String>, StorageError> {
355        self.provider.list(path).await
356    }
357
358    pub async fn delete(&self, path: &Path) -> Result<(), StorageError> {
359        self.provider.delete(path).await
360    }
361}