datafusion_execution/
runtime_env.rs

1// Licensed to the Apache Software Foundation (ASF) under one
2// or more contributor license agreements.  See the NOTICE file
3// distributed with this work for additional information
4// regarding copyright ownership.  The ASF licenses this file
5// to you under the Apache License, Version 2.0 (the
6// "License"); you may not use this file except in compliance
7// with the License.  You may obtain a copy of the License at
8//
9//   http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing,
12// software distributed under the License is distributed on an
13// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14// KIND, either express or implied.  See the License for the
15// specific language governing permissions and limitations
16// under the License.
17
18//! Execution [`RuntimeEnv`] environment that manages access to object
19//! store, memory manager, disk manager.
20
21#[allow(deprecated)]
22use crate::disk_manager::DiskManagerConfig;
23use crate::{
24    disk_manager::{DiskManager, DiskManagerBuilder, DiskManagerMode},
25    memory_pool::{
26        GreedyMemoryPool, MemoryPool, TrackConsumersPool, UnboundedMemoryPool,
27    },
28    object_store::{DefaultObjectStoreRegistry, ObjectStoreRegistry},
29};
30
31use crate::cache::cache_manager::{CacheManager, CacheManagerConfig};
32#[cfg(feature = "parquet_encryption")]
33use crate::parquet_encryption::{EncryptionFactory, EncryptionFactoryRegistry};
34use datafusion_common::{config::ConfigEntry, Result};
35use object_store::ObjectStore;
36use std::path::PathBuf;
37use std::sync::Arc;
38use std::{
39    fmt::{Debug, Formatter},
40    num::NonZeroUsize,
41};
42use url::Url;
43
44#[derive(Clone)]
45/// Execution runtime environment that manages system resources such
46/// as memory, disk, cache and storage.
47///
48/// A [`RuntimeEnv`] can be created using [`RuntimeEnvBuilder`] and has the
49/// following resource management functionality:
50///
51/// * [`MemoryPool`]: Manage memory
52/// * [`DiskManager`]: Manage temporary files on local disk
53/// * [`CacheManager`]: Manage temporary cache data during the session lifetime
54/// * [`ObjectStoreRegistry`]: Manage mapping URLs to object store instances
55///
56/// # Example: Create default `RuntimeEnv`
57/// ```
58/// # use datafusion_execution::runtime_env::RuntimeEnv;
59/// let runtime_env = RuntimeEnv::default();
60/// ```
61///
62/// # Example: Create a `RuntimeEnv` from [`RuntimeEnvBuilder`] with a new memory pool
63/// ```
64/// # use std::sync::Arc;
65/// # use datafusion_execution::memory_pool::GreedyMemoryPool;
66/// # use datafusion_execution::runtime_env::{RuntimeEnv, RuntimeEnvBuilder};
67/// // restrict to using at most 100MB of memory
68/// let pool_size = 100 * 1024 * 1024;
69/// let runtime_env = RuntimeEnvBuilder::new()
70///   .with_memory_pool(Arc::new(GreedyMemoryPool::new(pool_size)))
71///   .build()
72///   .unwrap();
73/// ```
74pub struct RuntimeEnv {
75    /// Runtime memory management
76    pub memory_pool: Arc<dyn MemoryPool>,
77    /// Manage temporary files during query execution
78    pub disk_manager: Arc<DiskManager>,
79    /// Manage temporary cache during query execution
80    pub cache_manager: Arc<CacheManager>,
81    /// Object Store Registry
82    pub object_store_registry: Arc<dyn ObjectStoreRegistry>,
83    /// Parquet encryption factory registry
84    #[cfg(feature = "parquet_encryption")]
85    pub parquet_encryption_factory_registry: Arc<EncryptionFactoryRegistry>,
86}
87
88impl Debug for RuntimeEnv {
89    fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
90        write!(f, "RuntimeEnv")
91    }
92}
93
94impl RuntimeEnv {
95    /// Registers a custom `ObjectStore` to be used with a specific url.
96    /// This allows DataFusion to create external tables from urls that do not have
97    /// built in support such as `hdfs://namenode:port/...`.
98    ///
99    /// Returns the [`ObjectStore`] previously registered for this
100    /// scheme, if any.
101    ///
102    /// See [`ObjectStoreRegistry`] for more details
103    ///
104    /// # Example: Register local file system object store
105    /// ```
106    /// # use std::sync::Arc;
107    /// # use url::Url;
108    /// # use datafusion_execution::runtime_env::RuntimeEnv;
109    /// # let runtime_env = RuntimeEnv::default();
110    /// let url = Url::try_from("file://").unwrap();
111    /// let object_store = object_store::local::LocalFileSystem::new();
112    /// // register the object store with the runtime environment
113    /// runtime_env.register_object_store(&url, Arc::new(object_store));
114    /// ```
115    ///
116    /// # Example: Register remote URL object store like [Github](https://github.com)
117    ///
118    ///
119    /// ```
120    /// # use std::sync::Arc;
121    /// # use url::Url;
122    /// # use datafusion_execution::runtime_env::RuntimeEnv;
123    /// # let runtime_env = RuntimeEnv::default();
124    /// # // use local store for example as http feature is not enabled
125    /// # let http_store = object_store::local::LocalFileSystem::new();
126    /// // create a new object store via object_store::http::HttpBuilder;
127    /// let base_url = Url::parse("https://github.com").unwrap();
128    /// // (note this example can't depend on the http feature)
129    /// // let http_store = HttpBuilder::new()
130    /// //    .with_url(base_url.clone())
131    /// //    .build()
132    /// //    .unwrap();
133    /// // register the object store with the runtime environment
134    /// runtime_env.register_object_store(&base_url, Arc::new(http_store));
135    /// ```
136    pub fn register_object_store(
137        &self,
138        url: &Url,
139        object_store: Arc<dyn ObjectStore>,
140    ) -> Option<Arc<dyn ObjectStore>> {
141        self.object_store_registry.register_store(url, object_store)
142    }
143
144    /// Retrieves a `ObjectStore` instance for a url by consulting the
145    /// registry. See [`ObjectStoreRegistry::get_store`] for more
146    /// details.
147    pub fn object_store(&self, url: impl AsRef<Url>) -> Result<Arc<dyn ObjectStore>> {
148        self.object_store_registry.get_store(url.as_ref())
149    }
150
151    /// Register an [`EncryptionFactory`] with an associated identifier that can be later
152    /// used to configure encryption when reading or writing Parquet.
153    /// If an encryption factory with the same identifier was already registered, it is replaced and returned.
154    #[cfg(feature = "parquet_encryption")]
155    pub fn register_parquet_encryption_factory(
156        &self,
157        id: &str,
158        encryption_factory: Arc<dyn EncryptionFactory>,
159    ) -> Option<Arc<dyn EncryptionFactory>> {
160        self.parquet_encryption_factory_registry
161            .register_factory(id, encryption_factory)
162    }
163
164    /// Retrieve an [`EncryptionFactory`] by its identifier
165    #[cfg(feature = "parquet_encryption")]
166    pub fn parquet_encryption_factory(
167        &self,
168        id: &str,
169    ) -> Result<Arc<dyn EncryptionFactory>> {
170        self.parquet_encryption_factory_registry.get_factory(id)
171    }
172}
173
174impl Default for RuntimeEnv {
175    fn default() -> Self {
176        RuntimeEnvBuilder::new().build().unwrap()
177    }
178}
179
180/// Execution runtime configuration builder.
181///
182/// See example on [`RuntimeEnv`]
183#[derive(Clone)]
184pub struct RuntimeEnvBuilder {
185    #[allow(deprecated)]
186    /// DiskManager to manage temporary disk file usage
187    pub disk_manager: DiskManagerConfig,
188    /// DiskManager builder to manager temporary disk file usage
189    pub disk_manager_builder: Option<DiskManagerBuilder>,
190    /// [`MemoryPool`] from which to allocate memory
191    ///
192    /// Defaults to using an [`UnboundedMemoryPool`] if `None`
193    pub memory_pool: Option<Arc<dyn MemoryPool>>,
194    /// CacheManager to manage cache data
195    pub cache_manager: CacheManagerConfig,
196    /// ObjectStoreRegistry to get object store based on url
197    pub object_store_registry: Arc<dyn ObjectStoreRegistry>,
198    /// Parquet encryption factory registry
199    #[cfg(feature = "parquet_encryption")]
200    pub parquet_encryption_factory_registry: Arc<EncryptionFactoryRegistry>,
201}
202
203impl Default for RuntimeEnvBuilder {
204    fn default() -> Self {
205        Self::new()
206    }
207}
208
209impl RuntimeEnvBuilder {
210    /// New with default values
211    pub fn new() -> Self {
212        Self {
213            disk_manager: Default::default(),
214            disk_manager_builder: Default::default(),
215            memory_pool: Default::default(),
216            cache_manager: Default::default(),
217            object_store_registry: Arc::new(DefaultObjectStoreRegistry::default()),
218            #[cfg(feature = "parquet_encryption")]
219            parquet_encryption_factory_registry: Default::default(),
220        }
221    }
222
223    #[allow(deprecated)]
224    #[deprecated(since = "48.0.0", note = "Use with_disk_manager_builder instead")]
225    /// Customize disk manager
226    pub fn with_disk_manager(mut self, disk_manager: DiskManagerConfig) -> Self {
227        self.disk_manager = disk_manager;
228        self
229    }
230
231    /// Customize the disk manager builder
232    pub fn with_disk_manager_builder(mut self, disk_manager: DiskManagerBuilder) -> Self {
233        self.disk_manager_builder = Some(disk_manager);
234        self
235    }
236
237    /// Customize memory policy
238    pub fn with_memory_pool(mut self, memory_pool: Arc<dyn MemoryPool>) -> Self {
239        self.memory_pool = Some(memory_pool);
240        self
241    }
242
243    /// Customize cache policy
244    pub fn with_cache_manager(mut self, cache_manager: CacheManagerConfig) -> Self {
245        self.cache_manager = cache_manager;
246        self
247    }
248
249    /// Customize object store registry
250    pub fn with_object_store_registry(
251        mut self,
252        object_store_registry: Arc<dyn ObjectStoreRegistry>,
253    ) -> Self {
254        self.object_store_registry = object_store_registry;
255        self
256    }
257
258    /// Specify the total memory to use while running the DataFusion
259    /// plan to `max_memory * memory_fraction` in bytes.
260    ///
261    /// This defaults to using [`GreedyMemoryPool`] wrapped in the
262    /// [`TrackConsumersPool`] with a maximum of 5 consumers.
263    ///
264    /// Note DataFusion does not yet respect this limit in all cases.
265    pub fn with_memory_limit(self, max_memory: usize, memory_fraction: f64) -> Self {
266        let pool_size = (max_memory as f64 * memory_fraction) as usize;
267        self.with_memory_pool(Arc::new(TrackConsumersPool::new(
268            GreedyMemoryPool::new(pool_size),
269            NonZeroUsize::new(5).unwrap(),
270        )))
271    }
272
273    /// Use the specified path to create any needed temporary files
274    pub fn with_temp_file_path(mut self, path: impl Into<PathBuf>) -> Self {
275        let builder = self.disk_manager_builder.take().unwrap_or_default();
276        self.with_disk_manager_builder(
277            builder.with_mode(DiskManagerMode::Directories(vec![path.into()])),
278        )
279    }
280
281    /// Specify a limit on the size of the temporary file directory in bytes
282    pub fn with_max_temp_directory_size(mut self, size: u64) -> Self {
283        let builder = self.disk_manager_builder.take().unwrap_or_default();
284        self.with_disk_manager_builder(builder.with_max_temp_directory_size(size))
285    }
286
287    /// Specify the limit of the file-embedded metadata cache, in bytes.
288    pub fn with_metadata_cache_limit(mut self, limit: usize) -> Self {
289        self.cache_manager = self.cache_manager.with_metadata_cache_limit(limit);
290        self
291    }
292
293    /// Build a RuntimeEnv
294    pub fn build(self) -> Result<RuntimeEnv> {
295        let Self {
296            disk_manager,
297            disk_manager_builder,
298            memory_pool,
299            cache_manager,
300            object_store_registry,
301            #[cfg(feature = "parquet_encryption")]
302            parquet_encryption_factory_registry,
303        } = self;
304        let memory_pool =
305            memory_pool.unwrap_or_else(|| Arc::new(UnboundedMemoryPool::default()));
306
307        Ok(RuntimeEnv {
308            memory_pool,
309            disk_manager: if let Some(builder) = disk_manager_builder {
310                Arc::new(builder.build()?)
311            } else {
312                #[allow(deprecated)]
313                DiskManager::try_new(disk_manager)?
314            },
315            cache_manager: CacheManager::try_new(&cache_manager)?,
316            object_store_registry,
317            #[cfg(feature = "parquet_encryption")]
318            parquet_encryption_factory_registry,
319        })
320    }
321
322    /// Convenience method to create a new `Arc<RuntimeEnv>`
323    pub fn build_arc(self) -> Result<Arc<RuntimeEnv>> {
324        self.build().map(Arc::new)
325    }
326
327    /// Create a new RuntimeEnvBuilder from an existing RuntimeEnv
328    pub fn from_runtime_env(runtime_env: &RuntimeEnv) -> Self {
329        let cache_config = CacheManagerConfig {
330            table_files_statistics_cache: runtime_env
331                .cache_manager
332                .get_file_statistic_cache(),
333            list_files_cache: runtime_env.cache_manager.get_list_files_cache(),
334            file_metadata_cache: Some(
335                runtime_env.cache_manager.get_file_metadata_cache(),
336            ),
337            metadata_cache_limit: runtime_env.cache_manager.get_metadata_cache_limit(),
338        };
339
340        Self {
341            #[allow(deprecated)]
342            disk_manager: DiskManagerConfig::Existing(Arc::clone(
343                &runtime_env.disk_manager,
344            )),
345            disk_manager_builder: None,
346            memory_pool: Some(Arc::clone(&runtime_env.memory_pool)),
347            cache_manager: cache_config,
348            object_store_registry: Arc::clone(&runtime_env.object_store_registry),
349            #[cfg(feature = "parquet_encryption")]
350            parquet_encryption_factory_registry: Arc::clone(
351                &runtime_env.parquet_encryption_factory_registry,
352            ),
353        }
354    }
355
356    /// Returns a list of all available runtime configurations with their current values and descriptions
357    pub fn entries(&self) -> Vec<ConfigEntry> {
358        vec![
359            ConfigEntry {
360                key: "datafusion.runtime.memory_limit".to_string(),
361                value: None, // Default is system-dependent
362                description: "Maximum memory limit for query execution. Supports suffixes K (kilobytes), M (megabytes), and G (gigabytes). Example: '2G' for 2 gigabytes.",
363            },
364            ConfigEntry {
365                key: "datafusion.runtime.max_temp_directory_size".to_string(),
366                value: Some("100G".to_string()),
367                description: "Maximum temporary file directory size. Supports suffixes K (kilobytes), M (megabytes), and G (gigabytes). Example: '2G' for 2 gigabytes.",
368            },
369            ConfigEntry {
370                key: "datafusion.runtime.temp_directory".to_string(),
371                value: None, // Default is system-dependent
372                description: "The path to the temporary file directory.",
373            },
374            ConfigEntry {
375                key: "datafusion.runtime.metadata_cache_limit".to_string(),
376                value: Some("50M".to_owned()),
377                description: "Maximum memory to use for file metadata cache such as Parquet metadata. Supports suffixes K (kilobytes), M (megabytes), and G (gigabytes). Example: '2G' for 2 gigabytes.",
378            }
379        ]
380    }
381
382    /// Generate documentation that can be included in the user guide
383    pub fn generate_config_markdown() -> String {
384        use std::fmt::Write as _;
385
386        let s = Self::default();
387
388        let mut docs = "| key | default | description |\n".to_string();
389        docs += "|-----|---------|-------------|\n";
390        let mut entries = s.entries();
391        entries.sort_unstable_by(|a, b| a.key.cmp(&b.key));
392
393        for entry in &entries {
394            let _ = writeln!(
395                &mut docs,
396                "| {} | {} | {} |",
397                entry.key,
398                entry.value.as_deref().unwrap_or("NULL"),
399                entry.description
400            );
401        }
402        docs
403    }
404}