datafusion_execution/config.rs
1// Licensed to the Apache Software Foundation (ASF) under one
2// or more contributor license agreements. See the NOTICE file
3// distributed with this work for additional information
4// regarding copyright ownership. The ASF licenses this file
5// to you under the Apache License, Version 2.0 (the
6// "License"); you may not use this file except in compliance
7// with the License. You may obtain a copy of the License at
8//
9// http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing,
12// software distributed under the License is distributed on an
13// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14// KIND, either express or implied. See the License for the
15// specific language governing permissions and limitations
16// under the License.
17
18use std::{
19 any::{Any, TypeId},
20 collections::HashMap,
21 hash::{BuildHasherDefault, Hasher},
22 sync::Arc,
23};
24
25use datafusion_common::{
26 config::{ConfigExtension, ConfigOptions, SpillCompression},
27 Result, ScalarValue,
28};
29
30/// Configuration options for [`SessionContext`].
31///
32/// Can be passed to [`SessionContext::new_with_config`] to customize the configuration of DataFusion.
33///
34/// Options can be set using namespaces keys with `.` as the separator, where the
35/// namespace determines which configuration struct the value to routed to. All
36/// built-in options are under the `datafusion` namespace.
37///
38/// For example, the key `datafusion.execution.batch_size` will set [ExecutionOptions::batch_size][datafusion_common::config::ExecutionOptions::batch_size],
39/// because [ConfigOptions::execution] is [ExecutionOptions][datafusion_common::config::ExecutionOptions]. Similarly, the key
40/// `datafusion.execution.parquet.pushdown_filters` will set [ParquetOptions::pushdown_filters][datafusion_common::config::ParquetOptions::pushdown_filters],
41/// since [ExecutionOptions::parquet][datafusion_common::config::ExecutionOptions::parquet] is [ParquetOptions][datafusion_common::config::ParquetOptions].
42///
43/// Some options have convenience methods. For example [SessionConfig::with_batch_size] is
44/// shorthand for setting `datafusion.execution.batch_size`.
45///
46/// ```
47/// use datafusion_execution::config::SessionConfig;
48/// use datafusion_common::ScalarValue;
49///
50/// let config = SessionConfig::new()
51/// .set("datafusion.execution.batch_size", &ScalarValue::UInt64(Some(1234)))
52/// .set_bool("datafusion.execution.parquet.pushdown_filters", true);
53///
54/// assert_eq!(config.batch_size(), 1234);
55/// assert_eq!(config.options().execution.batch_size, 1234);
56/// assert_eq!(config.options().execution.parquet.pushdown_filters, true);
57/// ```
58///
59/// You can also directly mutate the options via [SessionConfig::options_mut].
60/// So the following is equivalent to the above:
61///
62/// ```
63/// # use datafusion_execution::config::SessionConfig;
64/// # use datafusion_common::ScalarValue;
65/// #
66/// let mut config = SessionConfig::new();
67/// config.options_mut().execution.batch_size = 1234;
68/// config.options_mut().execution.parquet.pushdown_filters = true;
69/// #
70/// # assert_eq!(config.batch_size(), 1234);
71/// # assert_eq!(config.options().execution.batch_size, 1234);
72/// # assert_eq!(config.options().execution.parquet.pushdown_filters, true);
73/// ```
74///
75/// ## Built-in options
76///
77/// | Namespace | Config struct |
78/// | --------- | ------------- |
79/// | `datafusion.catalog` | [CatalogOptions][datafusion_common::config::CatalogOptions] |
80/// | `datafusion.execution` | [ExecutionOptions][datafusion_common::config::ExecutionOptions] |
81/// | `datafusion.execution.parquet` | [ParquetOptions][datafusion_common::config::ParquetOptions] |
82/// | `datafusion.optimizer` | [OptimizerOptions][datafusion_common::config::OptimizerOptions] |
83/// | `datafusion.sql_parser` | [SqlParserOptions][datafusion_common::config::SqlParserOptions] |
84/// | `datafusion.explain` | [ExplainOptions][datafusion_common::config::ExplainOptions] |
85///
86/// ## Custom configuration
87///
88/// Configuration options can be extended. See [SessionConfig::with_extension] for details.
89///
90/// [`SessionContext`]: https://docs.rs/datafusion/latest/datafusion/execution/context/struct.SessionContext.html
91/// [`SessionContext::new_with_config`]: https://docs.rs/datafusion/latest/datafusion/execution/context/struct.SessionContext.html#method.new_with_config
92#[derive(Clone, Debug)]
93pub struct SessionConfig {
94 /// Configuration options
95 options: ConfigOptions,
96 /// Opaque extensions.
97 extensions: AnyMap,
98}
99
100impl Default for SessionConfig {
101 fn default() -> Self {
102 Self {
103 options: ConfigOptions::new(),
104 // Assume no extensions by default.
105 extensions: HashMap::with_capacity_and_hasher(
106 0,
107 BuildHasherDefault::default(),
108 ),
109 }
110 }
111}
112
113impl SessionConfig {
114 /// Create an execution config with default setting
115 pub fn new() -> Self {
116 Default::default()
117 }
118
119 /// Create an execution config with config options read from the environment
120 pub fn from_env() -> Result<Self> {
121 Ok(ConfigOptions::from_env()?.into())
122 }
123
124 /// Create new ConfigOptions struct, taking values from a string hash map.
125 pub fn from_string_hash_map(settings: &HashMap<String, String>) -> Result<Self> {
126 Ok(ConfigOptions::from_string_hash_map(settings)?.into())
127 }
128
129 /// Return a handle to the configuration options.
130 ///
131 /// Can be used to read the current configuration.
132 ///
133 /// ```
134 /// use datafusion_execution::config::SessionConfig;
135 ///
136 /// let config = SessionConfig::new();
137 /// assert!(config.options().execution.batch_size > 0);
138 /// ```
139 pub fn options(&self) -> &ConfigOptions {
140 &self.options
141 }
142
143 /// Return a mutable handle to the configuration options.
144 ///
145 /// Can be used to set configuration options.
146 ///
147 /// ```
148 /// use datafusion_execution::config::SessionConfig;
149 ///
150 /// let mut config = SessionConfig::new();
151 /// config.options_mut().execution.batch_size = 1024;
152 /// assert_eq!(config.options().execution.batch_size, 1024);
153 /// ```
154 pub fn options_mut(&mut self) -> &mut ConfigOptions {
155 &mut self.options
156 }
157
158 /// Set a configuration option
159 pub fn set(self, key: &str, value: &ScalarValue) -> Self {
160 self.set_str(key, &value.to_string())
161 }
162
163 /// Set a boolean configuration option
164 pub fn set_bool(self, key: &str, value: bool) -> Self {
165 self.set_str(key, &value.to_string())
166 }
167
168 /// Set a generic `u64` configuration option
169 pub fn set_u64(self, key: &str, value: u64) -> Self {
170 self.set_str(key, &value.to_string())
171 }
172
173 /// Set a generic `usize` configuration option
174 pub fn set_usize(self, key: &str, value: usize) -> Self {
175 self.set_str(key, &value.to_string())
176 }
177
178 /// Set a generic `str` configuration option
179 pub fn set_str(mut self, key: &str, value: &str) -> Self {
180 self.options.set(key, value).unwrap();
181 self
182 }
183
184 /// Customize batch size
185 pub fn with_batch_size(mut self, n: usize) -> Self {
186 // batch size must be greater than zero
187 assert!(n > 0);
188 self.options.execution.batch_size = n;
189 self
190 }
191
192 /// Customize [`target_partitions`]
193 ///
194 /// [`target_partitions`]: datafusion_common::config::ExecutionOptions::target_partitions
195 pub fn with_target_partitions(mut self, n: usize) -> Self {
196 self.options.execution.target_partitions = if n == 0 {
197 datafusion_common::config::ExecutionOptions::default().target_partitions
198 } else {
199 n
200 };
201 self
202 }
203
204 /// Insert new [ConfigExtension]
205 pub fn with_option_extension<T: ConfigExtension>(mut self, extension: T) -> Self {
206 self.options_mut().extensions.insert(extension);
207 self
208 }
209
210 /// Get [`target_partitions`]
211 ///
212 /// [`target_partitions`]: datafusion_common::config::ExecutionOptions::target_partitions
213 pub fn target_partitions(&self) -> usize {
214 self.options.execution.target_partitions
215 }
216
217 /// Is the information schema enabled?
218 pub fn information_schema(&self) -> bool {
219 self.options.catalog.information_schema
220 }
221
222 /// Should the context create the default catalog and schema?
223 pub fn create_default_catalog_and_schema(&self) -> bool {
224 self.options.catalog.create_default_catalog_and_schema
225 }
226
227 /// Are joins repartitioned during execution?
228 pub fn repartition_joins(&self) -> bool {
229 self.options.optimizer.repartition_joins
230 }
231
232 /// Are aggregates repartitioned during execution?
233 pub fn repartition_aggregations(&self) -> bool {
234 self.options.optimizer.repartition_aggregations
235 }
236
237 /// Are window functions repartitioned during execution?
238 pub fn repartition_window_functions(&self) -> bool {
239 self.options.optimizer.repartition_windows
240 }
241
242 /// Do we execute sorts in a per-partition fashion and merge afterwards,
243 /// or do we coalesce partitions first and sort globally?
244 pub fn repartition_sorts(&self) -> bool {
245 self.options.optimizer.repartition_sorts
246 }
247
248 /// Prefer existing sort (true) or maximize parallelism (false). See
249 /// [prefer_existing_sort] for more details
250 ///
251 /// [prefer_existing_sort]: datafusion_common::config::OptimizerOptions::prefer_existing_sort
252 pub fn prefer_existing_sort(&self) -> bool {
253 self.options.optimizer.prefer_existing_sort
254 }
255
256 /// Are statistics collected during execution?
257 pub fn collect_statistics(&self) -> bool {
258 self.options.execution.collect_statistics
259 }
260
261 /// Compression codec for spill file
262 pub fn spill_compression(&self) -> SpillCompression {
263 self.options.execution.spill_compression
264 }
265
266 /// Selects a name for the default catalog and schema
267 pub fn with_default_catalog_and_schema(
268 mut self,
269 catalog: impl Into<String>,
270 schema: impl Into<String>,
271 ) -> Self {
272 self.options.catalog.default_catalog = catalog.into();
273 self.options.catalog.default_schema = schema.into();
274 self
275 }
276
277 /// Controls whether the default catalog and schema will be automatically created
278 pub fn with_create_default_catalog_and_schema(mut self, create: bool) -> Self {
279 self.options.catalog.create_default_catalog_and_schema = create;
280 self
281 }
282
283 /// Enables or disables the inclusion of `information_schema` virtual tables
284 pub fn with_information_schema(mut self, enabled: bool) -> Self {
285 self.options.catalog.information_schema = enabled;
286 self
287 }
288
289 /// Enables or disables the use of repartitioning for joins to improve parallelism
290 pub fn with_repartition_joins(mut self, enabled: bool) -> Self {
291 self.options.optimizer.repartition_joins = enabled;
292 self
293 }
294
295 /// Enables or disables the use of repartitioning for aggregations to improve parallelism
296 pub fn with_repartition_aggregations(mut self, enabled: bool) -> Self {
297 self.options.optimizer.repartition_aggregations = enabled;
298 self
299 }
300
301 /// Sets minimum file range size for repartitioning scans
302 pub fn with_repartition_file_min_size(mut self, size: usize) -> Self {
303 self.options.optimizer.repartition_file_min_size = size;
304 self
305 }
306
307 /// Enables or disables the allowing unordered symmetric hash join
308 pub fn with_allow_symmetric_joins_without_pruning(mut self, enabled: bool) -> Self {
309 self.options.optimizer.allow_symmetric_joins_without_pruning = enabled;
310 self
311 }
312
313 /// Enables or disables the use of repartitioning for file scans
314 pub fn with_repartition_file_scans(mut self, enabled: bool) -> Self {
315 self.options.optimizer.repartition_file_scans = enabled;
316 self
317 }
318
319 /// Enables or disables the use of repartitioning for window functions to improve parallelism
320 pub fn with_repartition_windows(mut self, enabled: bool) -> Self {
321 self.options.optimizer.repartition_windows = enabled;
322 self
323 }
324
325 /// Enables or disables the use of per-partition sorting to improve parallelism
326 pub fn with_repartition_sorts(mut self, enabled: bool) -> Self {
327 self.options.optimizer.repartition_sorts = enabled;
328 self
329 }
330
331 /// Prefer existing sort (true) or maximize parallelism (false). See
332 /// [prefer_existing_sort] for more details
333 ///
334 /// [prefer_existing_sort]: datafusion_common::config::OptimizerOptions::prefer_existing_sort
335 pub fn with_prefer_existing_sort(mut self, enabled: bool) -> Self {
336 self.options.optimizer.prefer_existing_sort = enabled;
337 self
338 }
339
340 /// Prefer existing union (true). See [prefer_existing_union] for more details
341 ///
342 /// [prefer_existing_union]: datafusion_common::config::OptimizerOptions::prefer_existing_union
343 pub fn with_prefer_existing_union(mut self, enabled: bool) -> Self {
344 self.options.optimizer.prefer_existing_union = enabled;
345 self
346 }
347
348 /// Enables or disables the use of pruning predicate for parquet readers to skip row groups
349 pub fn with_parquet_pruning(mut self, enabled: bool) -> Self {
350 self.options.execution.parquet.pruning = enabled;
351 self
352 }
353
354 /// Returns true if pruning predicate should be used to skip parquet row groups
355 pub fn parquet_pruning(&self) -> bool {
356 self.options.execution.parquet.pruning
357 }
358
359 /// Returns true if bloom filter should be used to skip parquet row groups
360 pub fn parquet_bloom_filter_pruning(&self) -> bool {
361 self.options.execution.parquet.bloom_filter_on_read
362 }
363
364 /// Enables or disables the use of bloom filter for parquet readers to skip row groups
365 pub fn with_parquet_bloom_filter_pruning(mut self, enabled: bool) -> Self {
366 self.options.execution.parquet.bloom_filter_on_read = enabled;
367 self
368 }
369
370 /// Returns true if page index should be used to skip parquet data pages
371 pub fn parquet_page_index_pruning(&self) -> bool {
372 self.options.execution.parquet.enable_page_index
373 }
374
375 /// Enables or disables the use of page index for parquet readers to skip parquet data pages
376 pub fn with_parquet_page_index_pruning(mut self, enabled: bool) -> Self {
377 self.options.execution.parquet.enable_page_index = enabled;
378 self
379 }
380
381 /// Enables or disables the collection of statistics after listing files
382 pub fn with_collect_statistics(mut self, enabled: bool) -> Self {
383 self.options.execution.collect_statistics = enabled;
384 self
385 }
386
387 /// Get the currently configured batch size
388 pub fn batch_size(&self) -> usize {
389 self.options.execution.batch_size
390 }
391
392 /// Enables or disables the coalescence of small batches into larger batches
393 pub fn with_coalesce_batches(mut self, enabled: bool) -> Self {
394 self.options.execution.coalesce_batches = enabled;
395 self
396 }
397
398 /// Returns true if record batches will be examined between each operator
399 /// and small batches will be coalesced into larger batches.
400 pub fn coalesce_batches(&self) -> bool {
401 self.options.execution.coalesce_batches
402 }
403
404 /// Enables or disables the round robin repartition for increasing parallelism
405 pub fn with_round_robin_repartition(mut self, enabled: bool) -> Self {
406 self.options.optimizer.enable_round_robin_repartition = enabled;
407 self
408 }
409
410 /// Returns true if the physical plan optimizer will try to
411 /// add round robin repartition to increase parallelism to leverage more CPU cores.
412 pub fn round_robin_repartition(&self) -> bool {
413 self.options.optimizer.enable_round_robin_repartition
414 }
415
416 /// Set the size of [`sort_spill_reservation_bytes`] to control
417 /// memory pre-reservation
418 ///
419 /// [`sort_spill_reservation_bytes`]: datafusion_common::config::ExecutionOptions::sort_spill_reservation_bytes
420 pub fn with_sort_spill_reservation_bytes(
421 mut self,
422 sort_spill_reservation_bytes: usize,
423 ) -> Self {
424 self.options.execution.sort_spill_reservation_bytes =
425 sort_spill_reservation_bytes;
426 self
427 }
428
429 /// Set the compression codec [`spill_compression`] used when spilling data to disk.
430 ///
431 /// [`spill_compression`]: datafusion_common::config::ExecutionOptions::spill_compression
432 pub fn with_spill_compression(mut self, spill_compression: SpillCompression) -> Self {
433 self.options.execution.spill_compression = spill_compression;
434 self
435 }
436
437 /// Set the size of [`sort_in_place_threshold_bytes`] to control
438 /// how sort does things.
439 ///
440 /// [`sort_in_place_threshold_bytes`]: datafusion_common::config::ExecutionOptions::sort_in_place_threshold_bytes
441 pub fn with_sort_in_place_threshold_bytes(
442 mut self,
443 sort_in_place_threshold_bytes: usize,
444 ) -> Self {
445 self.options.execution.sort_in_place_threshold_bytes =
446 sort_in_place_threshold_bytes;
447 self
448 }
449
450 /// Enables or disables the enforcement of batch size in joins
451 pub fn with_enforce_batch_size_in_joins(
452 mut self,
453 enforce_batch_size_in_joins: bool,
454 ) -> Self {
455 self.options.execution.enforce_batch_size_in_joins = enforce_batch_size_in_joins;
456 self
457 }
458
459 /// Returns true if the joins will be enforced to output batches of the configured size
460 pub fn enforce_batch_size_in_joins(&self) -> bool {
461 self.options.execution.enforce_batch_size_in_joins
462 }
463
464 /// Convert configuration options to name-value pairs with values
465 /// converted to strings.
466 ///
467 /// Note that this method will eventually be deprecated and
468 /// replaced by [`options`].
469 ///
470 /// [`options`]: Self::options
471 pub fn to_props(&self) -> HashMap<String, String> {
472 let mut map = HashMap::new();
473 // copy configs from config_options
474 for entry in self.options.entries() {
475 map.insert(entry.key, entry.value.unwrap_or_default());
476 }
477
478 map
479 }
480
481 /// Add extensions.
482 ///
483 /// Extensions can be used to attach extra data to the session config -- e.g. tracing information or caches.
484 /// Extensions are opaque and the types are unknown to DataFusion itself, which makes them extremely flexible. [^1]
485 ///
486 /// Extensions are stored within an [`Arc`] so they do NOT require [`Clone`]. The are immutable. If you need to
487 /// modify their state over their lifetime -- e.g. for caches -- you need to establish some for of interior mutability.
488 ///
489 /// Extensions are indexed by their type `T`. If multiple values of the same type are provided, only the last one
490 /// will be kept.
491 ///
492 /// You may use [`get_extension`](Self::get_extension) to retrieve extensions.
493 ///
494 /// # Example
495 /// ```
496 /// use std::sync::Arc;
497 /// use datafusion_execution::config::SessionConfig;
498 ///
499 /// // application-specific extension types
500 /// struct Ext1(u8);
501 /// struct Ext2(u8);
502 /// struct Ext3(u8);
503 ///
504 /// let ext1a = Arc::new(Ext1(10));
505 /// let ext1b = Arc::new(Ext1(11));
506 /// let ext2 = Arc::new(Ext2(2));
507 ///
508 /// let cfg = SessionConfig::default()
509 /// // will only remember the last Ext1
510 /// .with_extension(Arc::clone(&ext1a))
511 /// .with_extension(Arc::clone(&ext1b))
512 /// .with_extension(Arc::clone(&ext2));
513 ///
514 /// let ext1_received = cfg.get_extension::<Ext1>().unwrap();
515 /// assert!(!Arc::ptr_eq(&ext1_received, &ext1a));
516 /// assert!(Arc::ptr_eq(&ext1_received, &ext1b));
517 ///
518 /// let ext2_received = cfg.get_extension::<Ext2>().unwrap();
519 /// assert!(Arc::ptr_eq(&ext2_received, &ext2));
520 ///
521 /// assert!(cfg.get_extension::<Ext3>().is_none());
522 /// ```
523 ///
524 /// [^1]: Compare that to [`ConfigOptions`] which only supports [`ScalarValue`] payloads.
525 pub fn with_extension<T>(mut self, ext: Arc<T>) -> Self
526 where
527 T: Send + Sync + 'static,
528 {
529 self.set_extension(ext);
530 self
531 }
532
533 /// Set extension. Pretty much the same as [`with_extension`](Self::with_extension), but take
534 /// mutable reference instead of owning it. Useful if you want to add another extension after
535 /// the [`SessionConfig`] is created.
536 ///
537 /// # Example
538 /// ```
539 /// use std::sync::Arc;
540 /// use datafusion_execution::config::SessionConfig;
541 ///
542 /// // application-specific extension types
543 /// struct Ext1(u8);
544 /// struct Ext2(u8);
545 /// struct Ext3(u8);
546 ///
547 /// let ext1a = Arc::new(Ext1(10));
548 /// let ext1b = Arc::new(Ext1(11));
549 /// let ext2 = Arc::new(Ext2(2));
550 ///
551 /// let mut cfg = SessionConfig::default();
552 ///
553 /// // will only remember the last Ext1
554 /// cfg.set_extension(Arc::clone(&ext1a));
555 /// cfg.set_extension(Arc::clone(&ext1b));
556 /// cfg.set_extension(Arc::clone(&ext2));
557 ///
558 /// let ext1_received = cfg.get_extension::<Ext1>().unwrap();
559 /// assert!(!Arc::ptr_eq(&ext1_received, &ext1a));
560 /// assert!(Arc::ptr_eq(&ext1_received, &ext1b));
561 ///
562 /// let ext2_received = cfg.get_extension::<Ext2>().unwrap();
563 /// assert!(Arc::ptr_eq(&ext2_received, &ext2));
564 ///
565 /// assert!(cfg.get_extension::<Ext3>().is_none());
566 /// ```
567 pub fn set_extension<T>(&mut self, ext: Arc<T>)
568 where
569 T: Send + Sync + 'static,
570 {
571 let ext = ext as Arc<dyn Any + Send + Sync + 'static>;
572 let id = TypeId::of::<T>();
573 self.extensions.insert(id, ext);
574 }
575
576 /// Get extension, if any for the specified type `T` exists.
577 ///
578 /// See [`with_extension`](Self::with_extension) on how to add attach extensions.
579 pub fn get_extension<T>(&self) -> Option<Arc<T>>
580 where
581 T: Send + Sync + 'static,
582 {
583 let id = TypeId::of::<T>();
584 self.extensions
585 .get(&id)
586 .cloned()
587 .map(|ext| Arc::downcast(ext).expect("TypeId unique"))
588 }
589}
590
591impl From<ConfigOptions> for SessionConfig {
592 fn from(options: ConfigOptions) -> Self {
593 Self {
594 options,
595 ..Default::default()
596 }
597 }
598}
599
600/// Map that holds opaque objects indexed by their type.
601///
602/// Data is wrapped into an [`Arc`] to enable [`Clone`] while still being [object safe].
603///
604/// [object safe]: https://doc.rust-lang.org/reference/items/traits.html#object-safety
605type AnyMap =
606 HashMap<TypeId, Arc<dyn Any + Send + Sync + 'static>, BuildHasherDefault<IdHasher>>;
607
608/// Hasher for [`AnyMap`].
609///
610/// With [`TypeId`]s as keys, there's no need to hash them. They are already hashes themselves, coming from the compiler.
611/// The [`IdHasher`] just holds the [`u64`] of the [`TypeId`], and then returns it, instead of doing any bit fiddling.
612#[derive(Default)]
613struct IdHasher(u64);
614
615impl Hasher for IdHasher {
616 fn write(&mut self, _: &[u8]) {
617 unreachable!("TypeId calls write_u64");
618 }
619
620 #[inline]
621 fn write_u64(&mut self, id: u64) {
622 self.0 = id;
623 }
624
625 #[inline]
626 fn finish(&self) -> u64 {
627 self.0
628 }
629}