1use std::{
7 collections::HashMap,
8 fmt::{self, Formatter},
9 sync::{Arc, Mutex, OnceLock},
10 time::Duration,
11};
12
13use chrono::{DateTime, Utc};
14
15use arrow_array::RecordBatch;
16use arrow_schema::Schema as ArrowSchema;
17use datafusion::physical_plan::metrics::MetricType;
18use datafusion::{
19 catalog::streaming::StreamingTable,
20 dataframe::DataFrame,
21 execution::{
22 TaskContext,
23 context::{SessionConfig, SessionContext},
24 disk_manager::DiskManagerBuilder,
25 memory_pool::FairSpillPool,
26 runtime_env::RuntimeEnvBuilder,
27 },
28 physical_plan::{
29 DisplayAs, DisplayFormatType, ExecutionPlan, PlanProperties, SendableRecordBatchStream,
30 analyze::AnalyzeExec,
31 display::DisplayableExecutionPlan,
32 execution_plan::{Boundedness, CardinalityEffect, EmissionType},
33 metrics::MetricValue,
34 stream::RecordBatchStreamAdapter,
35 streaming::PartitionStream,
36 },
37};
38use datafusion_common::{DataFusionError, Statistics};
39use datafusion_physical_expr::{EquivalenceProperties, Partitioning};
40
41use futures::{StreamExt, stream};
42use lance_arrow::SchemaExt;
43use lance_core::{
44 Error, Result,
45 utils::{
46 futures::FinallyStreamExt,
47 tracing::{EXECUTION_PLAN_RUN, StreamTracingExt, TRACE_EXECUTION},
48 },
49};
50use log::{debug, info, warn};
51use tracing::Span;
52
53use crate::udf::register_functions;
54use crate::{
55 chunker::StrictBatchSizeStream,
56 utils::{
57 BYTES_READ_METRIC, INDEX_COMPARISONS_METRIC, INDICES_LOADED_METRIC, IOPS_METRIC,
58 MetricsExt, PARTS_LOADED_METRIC, REQUESTS_METRIC,
59 },
60};
61
62pub struct OneShotExec {
70 stream: Mutex<Option<SendableRecordBatchStream>>,
71 schema: Arc<ArrowSchema>,
74 properties: Arc<PlanProperties>,
75}
76
77impl OneShotExec {
78 pub fn new(stream: SendableRecordBatchStream) -> Self {
80 let schema = stream.schema();
81 Self {
82 stream: Mutex::new(Some(stream)),
83 schema: schema.clone(),
84 properties: Arc::new(PlanProperties::new(
85 EquivalenceProperties::new(schema),
86 Partitioning::RoundRobinBatch(1),
87 EmissionType::Incremental,
88 Boundedness::Bounded,
89 )),
90 }
91 }
92
93 pub fn from_batch(batch: RecordBatch) -> Self {
94 let schema = batch.schema();
95 let stream = Box::pin(RecordBatchStreamAdapter::new(
96 schema,
97 stream::iter(vec![Ok(batch)]),
98 ));
99 Self::new(stream)
100 }
101}
102
103impl std::fmt::Debug for OneShotExec {
104 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
105 let stream = self.stream.lock().unwrap();
106 f.debug_struct("OneShotExec")
107 .field("exhausted", &stream.is_none())
108 .field("schema", self.schema.as_ref())
109 .finish()
110 }
111}
112
113impl DisplayAs for OneShotExec {
114 fn fmt_as(
115 &self,
116 t: datafusion::physical_plan::DisplayFormatType,
117 f: &mut std::fmt::Formatter,
118 ) -> std::fmt::Result {
119 let stream = self.stream.lock().unwrap();
120 let exhausted = if stream.is_some() { "" } else { "EXHAUSTED" };
121 let columns = self
122 .schema
123 .field_names()
124 .iter()
125 .cloned()
126 .cloned()
127 .collect::<Vec<_>>();
128 match t {
129 DisplayFormatType::Default | DisplayFormatType::Verbose => {
130 write!(
131 f,
132 "OneShotStream: {}columns=[{}]",
133 exhausted,
134 columns.join(",")
135 )
136 }
137 DisplayFormatType::TreeRender => {
138 write!(
139 f,
140 "OneShotStream\nexhausted={}\ncolumns=[{}]",
141 exhausted,
142 columns.join(",")
143 )
144 }
145 }
146 }
147}
148
149impl ExecutionPlan for OneShotExec {
150 fn name(&self) -> &str {
151 "OneShotExec"
152 }
153
154 fn as_any(&self) -> &dyn std::any::Any {
155 self
156 }
157
158 fn schema(&self) -> arrow_schema::SchemaRef {
159 self.schema.clone()
160 }
161
162 fn children(&self) -> Vec<&Arc<dyn ExecutionPlan>> {
163 vec![]
164 }
165
166 fn with_new_children(
167 self: Arc<Self>,
168 children: Vec<Arc<dyn ExecutionPlan>>,
169 ) -> datafusion_common::Result<Arc<dyn ExecutionPlan>> {
170 if !children.is_empty() {
172 return Err(datafusion_common::DataFusionError::Internal(
173 "OneShotExec does not support children".to_string(),
174 ));
175 }
176 Ok(self)
177 }
178
179 fn execute(
180 &self,
181 _partition: usize,
182 _context: Arc<datafusion::execution::TaskContext>,
183 ) -> datafusion_common::Result<SendableRecordBatchStream> {
184 let stream = self
185 .stream
186 .lock()
187 .map_err(|err| DataFusionError::Execution(err.to_string()))?
188 .take();
189 if let Some(stream) = stream {
190 Ok(stream)
191 } else {
192 Err(DataFusionError::Execution(
193 "OneShotExec has already been executed".to_string(),
194 ))
195 }
196 }
197
198 fn properties(&self) -> &Arc<datafusion::physical_plan::PlanProperties> {
199 &self.properties
200 }
201}
202
203struct TracedExec {
204 input: Arc<dyn ExecutionPlan>,
205 properties: Arc<PlanProperties>,
206 span: Span,
207}
208
209impl TracedExec {
210 pub fn new(input: Arc<dyn ExecutionPlan>, span: Span) -> Self {
211 Self {
212 properties: input.properties().clone(),
213 input,
214 span,
215 }
216 }
217}
218
219impl DisplayAs for TracedExec {
220 fn fmt_as(
221 &self,
222 t: datafusion::physical_plan::DisplayFormatType,
223 f: &mut std::fmt::Formatter,
224 ) -> std::fmt::Result {
225 match t {
226 DisplayFormatType::Default
227 | DisplayFormatType::Verbose
228 | DisplayFormatType::TreeRender => {
229 write!(f, "TracedExec")
230 }
231 }
232 }
233}
234
235impl std::fmt::Debug for TracedExec {
236 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
237 write!(f, "TracedExec")
238 }
239}
240impl ExecutionPlan for TracedExec {
241 fn name(&self) -> &str {
242 "TracedExec"
243 }
244
245 fn as_any(&self) -> &dyn std::any::Any {
246 self
247 }
248
249 fn properties(&self) -> &Arc<PlanProperties> {
250 &self.properties
251 }
252
253 fn children(&self) -> Vec<&Arc<dyn ExecutionPlan>> {
254 vec![&self.input]
255 }
256
257 fn with_new_children(
258 self: Arc<Self>,
259 children: Vec<Arc<dyn ExecutionPlan>>,
260 ) -> datafusion_common::Result<Arc<dyn ExecutionPlan>> {
261 Ok(Arc::new(Self {
262 input: children[0].clone(),
263 properties: self.properties.clone(),
264 span: self.span.clone(),
265 }))
266 }
267
268 fn execute(
269 &self,
270 partition: usize,
271 context: Arc<TaskContext>,
272 ) -> datafusion_common::Result<SendableRecordBatchStream> {
273 let _guard = self.span.enter();
274 let stream = self.input.execute(partition, context)?;
275 let schema = stream.schema();
276 let stream = stream.stream_in_span(self.span.clone());
277 Ok(Box::pin(RecordBatchStreamAdapter::new(schema, stream)))
278 }
279}
280
281pub type ExecutionStatsCallback = Arc<dyn Fn(&ExecutionSummaryCounts) + Send + Sync>;
283
284#[derive(Default, Clone)]
285pub struct LanceExecutionOptions {
286 pub use_spilling: bool,
287 pub mem_pool_size: Option<u64>,
288 pub max_temp_directory_size: Option<u64>,
289 pub batch_size: Option<usize>,
290 pub target_partition: Option<usize>,
291 pub execution_stats_callback: Option<ExecutionStatsCallback>,
292 pub skip_logging: bool,
293}
294
295impl std::fmt::Debug for LanceExecutionOptions {
296 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
297 f.debug_struct("LanceExecutionOptions")
298 .field("use_spilling", &self.use_spilling)
299 .field("mem_pool_size", &self.mem_pool_size)
300 .field("max_temp_directory_size", &self.max_temp_directory_size)
301 .field("batch_size", &self.batch_size)
302 .field("target_partition", &self.target_partition)
303 .field("skip_logging", &self.skip_logging)
304 .field(
305 "execution_stats_callback",
306 &self.execution_stats_callback.is_some(),
307 )
308 .finish()
309 }
310}
311
312const DEFAULT_LANCE_MEM_POOL_SIZE_PER_PARTITION: u64 = 100 * 1024 * 1024;
313const DEFAULT_LANCE_MAX_TEMP_DIRECTORY_SIZE: u64 = 100 * 1024 * 1024 * 1024; impl LanceExecutionOptions {
316 pub fn mem_pool_size(&self) -> u64 {
317 let num_partitions = self.target_partition.unwrap_or(1) as u64;
318 self.mem_pool_size.unwrap_or_else(|| {
319 std::env::var("LANCE_MEM_POOL_SIZE")
320 .map(|s| match s.parse::<u64>() {
321 Ok(v) => v,
322 Err(e) => {
323 warn!("Failed to parse LANCE_MEM_POOL_SIZE: {}, using default", e);
324 DEFAULT_LANCE_MEM_POOL_SIZE_PER_PARTITION * num_partitions
325 }
326 })
327 .unwrap_or(DEFAULT_LANCE_MEM_POOL_SIZE_PER_PARTITION * num_partitions)
328 })
329 }
330
331 pub fn max_temp_directory_size(&self) -> u64 {
332 self.max_temp_directory_size.unwrap_or_else(|| {
333 std::env::var("LANCE_MAX_TEMP_DIRECTORY_SIZE")
334 .map(|s| match s.parse::<u64>() {
335 Ok(v) => v,
336 Err(e) => {
337 warn!(
338 "Failed to parse LANCE_MAX_TEMP_DIRECTORY_SIZE: {}, using default",
339 e
340 );
341 DEFAULT_LANCE_MAX_TEMP_DIRECTORY_SIZE
342 }
343 })
344 .unwrap_or(DEFAULT_LANCE_MAX_TEMP_DIRECTORY_SIZE)
345 })
346 }
347
348 pub fn use_spilling(&self) -> bool {
349 if !self.use_spilling {
350 return false;
351 }
352 std::env::var("LANCE_BYPASS_SPILLING")
353 .map(|_| {
354 info!("Bypassing spilling because LANCE_BYPASS_SPILLING is set");
355 false
356 })
357 .unwrap_or(true)
358 }
359}
360
361pub fn new_session_context(options: &LanceExecutionOptions) -> SessionContext {
362 let mut session_config = SessionConfig::new();
363 let mut runtime_env_builder = RuntimeEnvBuilder::new();
364 if let Some(target_partition) = options.target_partition {
365 session_config = session_config.with_target_partitions(target_partition);
366 }
367 if options.use_spilling() {
368 let disk_manager_builder = DiskManagerBuilder::default()
369 .with_max_temp_directory_size(options.max_temp_directory_size());
370 runtime_env_builder = runtime_env_builder
371 .with_disk_manager_builder(disk_manager_builder)
372 .with_memory_pool(Arc::new(FairSpillPool::new(
373 options.mem_pool_size() as usize
374 )));
375 }
376 let runtime_env = runtime_env_builder.build_arc().unwrap();
377
378 let ctx = SessionContext::new_with_config_rt(session_config, runtime_env);
379 register_functions(&ctx);
380
381 ctx
382}
383
384#[derive(Clone, Debug, PartialEq, Eq, Hash)]
386struct SessionContextCacheKey {
387 mem_pool_size: u64,
388 max_temp_directory_size: u64,
389 target_partition: Option<usize>,
390 use_spilling: bool,
391}
392
393impl SessionContextCacheKey {
394 fn from_options(options: &LanceExecutionOptions) -> Self {
395 Self {
396 mem_pool_size: options.mem_pool_size(),
397 max_temp_directory_size: options.max_temp_directory_size(),
398 target_partition: options.target_partition,
399 use_spilling: options.use_spilling(),
400 }
401 }
402}
403
404struct CachedSessionContext {
405 context: SessionContext,
406 last_access: std::time::Instant,
407}
408
409fn get_session_cache() -> &'static Mutex<HashMap<SessionContextCacheKey, CachedSessionContext>> {
410 static SESSION_CACHE: OnceLock<Mutex<HashMap<SessionContextCacheKey, CachedSessionContext>>> =
411 OnceLock::new();
412 SESSION_CACHE.get_or_init(|| Mutex::new(HashMap::new()))
413}
414
415fn get_max_cache_size() -> usize {
416 const DEFAULT_CACHE_SIZE: usize = 4;
417 static MAX_CACHE_SIZE: OnceLock<usize> = OnceLock::new();
418 *MAX_CACHE_SIZE.get_or_init(|| {
419 std::env::var("LANCE_SESSION_CACHE_SIZE")
420 .ok()
421 .and_then(|v| v.parse().ok())
422 .unwrap_or(DEFAULT_CACHE_SIZE)
423 })
424}
425
426pub fn get_session_context(options: &LanceExecutionOptions) -> SessionContext {
427 let key = SessionContextCacheKey::from_options(options);
428 let mut cache = get_session_cache()
429 .lock()
430 .unwrap_or_else(|e| e.into_inner());
431
432 if let Some(entry) = cache.get_mut(&key) {
434 entry.last_access = std::time::Instant::now();
435 return entry.context.clone();
436 }
437
438 if cache.len() >= get_max_cache_size()
440 && let Some(lru_key) = cache
441 .iter()
442 .min_by_key(|(_, v)| v.last_access)
443 .map(|(k, _)| k.clone())
444 {
445 cache.remove(&lru_key);
446 }
447
448 let context = new_session_context(options);
449 cache.insert(
450 key,
451 CachedSessionContext {
452 context: context.clone(),
453 last_access: std::time::Instant::now(),
454 },
455 );
456 context
457}
458
459fn get_task_context(
460 session_ctx: &SessionContext,
461 options: &LanceExecutionOptions,
462) -> Arc<TaskContext> {
463 let mut state = session_ctx.state();
464 if let Some(batch_size) = options.batch_size.as_ref() {
465 state.config_mut().options_mut().execution.batch_size = *batch_size;
466 }
467
468 state.task_ctx()
469}
470
471#[derive(Default, Clone, Debug, PartialEq, Eq)]
472pub struct ExecutionSummaryCounts {
473 pub iops: usize,
475 pub requests: usize,
478 pub bytes_read: usize,
480 pub indices_loaded: usize,
482 pub parts_loaded: usize,
484 pub index_comparisons: usize,
486 pub all_counts: HashMap<String, usize>,
489 pub all_times: HashMap<String, usize>,
492}
493
494pub fn collect_execution_metrics(node: &dyn ExecutionPlan, counts: &mut ExecutionSummaryCounts) {
495 if let Some(metrics) = node.metrics() {
496 for (metric_name, count) in metrics.iter_counts() {
497 match metric_name.as_ref() {
498 IOPS_METRIC => counts.iops += count.value(),
499 REQUESTS_METRIC => counts.requests += count.value(),
500 BYTES_READ_METRIC => counts.bytes_read += count.value(),
501 INDICES_LOADED_METRIC => counts.indices_loaded += count.value(),
502 PARTS_LOADED_METRIC => counts.parts_loaded += count.value(),
503 INDEX_COMPARISONS_METRIC => counts.index_comparisons += count.value(),
504 _ => {
505 let existing = counts
506 .all_counts
507 .entry(metric_name.as_ref().to_string())
508 .or_insert(0);
509 *existing += count.value();
510 }
511 }
512 }
513 for (metric_name, time) in metrics.iter_times() {
514 let existing = counts
515 .all_times
516 .entry(metric_name.as_ref().to_string())
517 .or_insert(0);
518 *existing += time.value();
519 }
520 for (metric_name, gauge) in metrics.iter_gauges() {
522 match metric_name.as_ref() {
523 IOPS_METRIC => counts.iops += gauge.value(),
524 REQUESTS_METRIC => counts.requests += gauge.value(),
525 BYTES_READ_METRIC => counts.bytes_read += gauge.value(),
526 _ => {}
527 }
528 }
529 }
530 for child in node.children() {
531 collect_execution_metrics(child.as_ref(), counts);
532 }
533}
534
535fn report_plan_summary_metrics(plan: &dyn ExecutionPlan, options: &LanceExecutionOptions) {
536 let output_rows = plan
537 .metrics()
538 .map(|m| m.output_rows().unwrap_or(0))
539 .unwrap_or(0);
540 let mut counts = ExecutionSummaryCounts::default();
541 collect_execution_metrics(plan, &mut counts);
542 tracing::info!(
543 target: TRACE_EXECUTION,
544 r#type = EXECUTION_PLAN_RUN,
545 plan_summary = display_plan_one_liner(plan),
546 output_rows,
547 iops = counts.iops,
548 requests = counts.requests,
549 bytes_read = counts.bytes_read,
550 indices_loaded = counts.indices_loaded,
551 parts_loaded = counts.parts_loaded,
552 index_comparisons = counts.index_comparisons,
553 );
554 if let Some(callback) = options.execution_stats_callback.as_ref() {
555 callback(&counts);
556 }
557}
558
559fn display_plan_one_liner(plan: &dyn ExecutionPlan) -> String {
566 let mut output = String::new();
567
568 display_plan_one_liner_impl(plan, &mut output);
569
570 output
571}
572
573fn display_plan_one_liner_impl(plan: &dyn ExecutionPlan, output: &mut String) {
574 let name = plan.name().trim_end_matches("Exec");
576 output.push_str(name);
577
578 let children = plan.children();
579 if !children.is_empty() {
580 output.push('(');
581 for (i, child) in children.iter().enumerate() {
582 if i > 0 {
583 output.push(',');
584 }
585 display_plan_one_liner_impl(child.as_ref(), output);
586 }
587 output.push(')');
588 }
589}
590
591pub fn execute_plan(
595 plan: Arc<dyn ExecutionPlan>,
596 options: LanceExecutionOptions,
597) -> Result<SendableRecordBatchStream> {
598 if !options.skip_logging {
599 debug!(
600 "Executing plan:\n{}",
601 DisplayableExecutionPlan::new(plan.as_ref()).indent(true)
602 );
603 }
604
605 let session_ctx = get_session_context(&options);
606
607 assert_eq!(plan.properties().partitioning.partition_count(), 1);
610 let stream = plan.execute(0, get_task_context(&session_ctx, &options))?;
611
612 let schema = stream.schema();
613 let stream = stream.finally(move || {
614 if !options.skip_logging {
615 report_plan_summary_metrics(plan.as_ref(), &options);
616 }
617 });
618 Ok(Box::pin(RecordBatchStreamAdapter::new(schema, stream)))
619}
620
621pub async fn analyze_plan(
622 plan: Arc<dyn ExecutionPlan>,
623 options: LanceExecutionOptions,
624) -> Result<String> {
625 let plan = Arc::new(TracedExec::new(plan, Span::current()));
628
629 let schema = plan.schema();
630 let analyze = Arc::new(AnalyzeExec::new(
632 true,
633 true,
634 vec![MetricType::SUMMARY],
635 plan,
636 schema,
637 ));
638
639 let session_ctx = get_session_context(&options);
640 assert_eq!(analyze.properties().partitioning.partition_count(), 1);
641 let mut stream = analyze
642 .execute(0, get_task_context(&session_ctx, &options))
643 .map_err(|err| Error::io(format!("Failed to execute analyze plan: {}", err)))?;
644
645 while (stream.next().await).is_some() {}
647
648 let result = format_plan(analyze);
649 Ok(result)
650}
651
652pub fn format_plan(plan: Arc<dyn ExecutionPlan>) -> String {
653 struct CalculateVisitor {
655 highest_index: usize,
656 index_to_elapsed: HashMap<usize, Duration>,
657 }
658
659 struct SubtreeMetrics {
661 min_start: Option<DateTime<Utc>>,
662 max_end: Option<DateTime<Utc>>,
663 }
664
665 impl CalculateVisitor {
666 fn calculate_metrics(&mut self, plan: &Arc<dyn ExecutionPlan>) -> SubtreeMetrics {
667 self.highest_index += 1;
668 let plan_index = self.highest_index;
669
670 let (mut min_start, mut max_end) = Self::node_timerange(plan);
672
673 for child in plan.children() {
675 let child_metrics = self.calculate_metrics(child);
676 min_start = Self::min_option(min_start, child_metrics.min_start);
677 max_end = Self::max_option(max_end, child_metrics.max_end);
678 }
679
680 let elapsed = match (min_start, max_end) {
682 (Some(start), Some(end)) => Some((end - start).to_std().unwrap_or_default()),
683 _ => None,
684 };
685
686 if let Some(e) = elapsed {
687 self.index_to_elapsed.insert(plan_index, e);
688 }
689
690 SubtreeMetrics { min_start, max_end }
691 }
692
693 fn node_timerange(
694 plan: &Arc<dyn ExecutionPlan>,
695 ) -> (Option<DateTime<Utc>>, Option<DateTime<Utc>>) {
696 let Some(metrics) = plan.metrics() else {
697 return (None, None);
698 };
699 let min_start = metrics
700 .iter()
701 .filter_map(|m| match m.value() {
702 MetricValue::StartTimestamp(ts) => ts.value(),
703 _ => None,
704 })
705 .min();
706 let max_end = metrics
707 .iter()
708 .filter_map(|m| match m.value() {
709 MetricValue::EndTimestamp(ts) => ts.value(),
710 _ => None,
711 })
712 .max();
713 (min_start, max_end)
714 }
715
716 fn min_option(a: Option<DateTime<Utc>>, b: Option<DateTime<Utc>>) -> Option<DateTime<Utc>> {
717 [a, b].into_iter().flatten().min()
718 }
719
720 fn max_option(a: Option<DateTime<Utc>>, b: Option<DateTime<Utc>>) -> Option<DateTime<Utc>> {
721 [a, b].into_iter().flatten().max()
722 }
723 }
724
725 struct PrintVisitor {
727 highest_index: usize,
728 indent: usize,
729 }
730 impl PrintVisitor {
731 fn write_output(
732 &mut self,
733 plan: &Arc<dyn ExecutionPlan>,
734 f: &mut Formatter,
735 calcs: &CalculateVisitor,
736 ) -> std::fmt::Result {
737 self.highest_index += 1;
738 write!(f, "{:indent$}", "", indent = self.indent * 2)?;
739
740 let displayable =
742 datafusion::physical_plan::display::DisplayableExecutionPlan::new(plan.as_ref());
743 let plan_str = displayable.one_line().to_string();
744 let plan_str = plan_str.trim();
745
746 match calcs.index_to_elapsed.get(&self.highest_index) {
748 Some(elapsed) => match plan_str.find(": ") {
749 Some(i) => write!(
750 f,
751 "{}: elapsed={elapsed:?}, {}",
752 &plan_str[..i],
753 &plan_str[i + 2..]
754 )?,
755 None => write!(f, "{plan_str}, elapsed={elapsed:?}")?,
756 },
757 None => write!(f, "{plan_str}")?,
758 }
759
760 if let Some(metrics) = plan.metrics() {
761 let metrics = metrics
762 .aggregate_by_name()
763 .sorted_for_display()
764 .timestamps_removed();
765
766 write!(f, ", metrics=[{metrics}]")?;
767 } else {
768 write!(f, ", metrics=[]")?;
769 }
770 writeln!(f)?;
771 self.indent += 1;
772 for child in plan.children() {
773 self.write_output(child, f, calcs)?;
774 }
775 self.indent -= 1;
776 std::fmt::Result::Ok(())
777 }
778 }
779 struct PrintWrapper {
781 plan: Arc<dyn ExecutionPlan>,
782 }
783 impl fmt::Display for PrintWrapper {
784 fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
785 let mut calcs = CalculateVisitor {
786 highest_index: 0,
787 index_to_elapsed: HashMap::new(),
788 };
789 calcs.calculate_metrics(&self.plan);
790 let mut prints = PrintVisitor {
791 highest_index: 0,
792 indent: 0,
793 };
794 prints.write_output(&self.plan, f, &calcs)
795 }
796 }
797 let wrapper = PrintWrapper { plan };
798 format!("{}", wrapper)
799}
800
801pub trait SessionContextExt {
802 fn read_one_shot(
806 &self,
807 data: SendableRecordBatchStream,
808 ) -> datafusion::common::Result<DataFrame>;
809}
810
811pub struct OneShotPartitionStream {
812 data: Arc<Mutex<Option<SendableRecordBatchStream>>>,
813 schema: Arc<ArrowSchema>,
814}
815
816impl std::fmt::Debug for OneShotPartitionStream {
817 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
818 let data = self.data.lock().unwrap();
819 f.debug_struct("OneShotPartitionStream")
820 .field("exhausted", &data.is_none())
821 .field("schema", self.schema.as_ref())
822 .finish()
823 }
824}
825
826impl OneShotPartitionStream {
827 pub fn new(data: SendableRecordBatchStream) -> Self {
828 let schema = data.schema();
829 Self {
830 data: Arc::new(Mutex::new(Some(data))),
831 schema,
832 }
833 }
834}
835
836impl PartitionStream for OneShotPartitionStream {
837 fn schema(&self) -> &arrow_schema::SchemaRef {
838 &self.schema
839 }
840
841 fn execute(&self, _ctx: Arc<TaskContext>) -> SendableRecordBatchStream {
842 let mut stream = self.data.lock().unwrap();
843 stream
844 .take()
845 .expect("Attempt to consume a one shot dataframe multiple times")
846 }
847}
848
849impl SessionContextExt for SessionContext {
850 fn read_one_shot(
851 &self,
852 data: SendableRecordBatchStream,
853 ) -> datafusion::common::Result<DataFrame> {
854 let schema = data.schema();
855 let part_stream = Arc::new(OneShotPartitionStream::new(data));
856 let provider = StreamingTable::try_new(schema, vec![part_stream])?;
857 self.read_table(Arc::new(provider))
858 }
859}
860
861#[derive(Clone, Debug)]
862pub struct StrictBatchSizeExec {
863 input: Arc<dyn ExecutionPlan>,
864 batch_size: usize,
865}
866
867impl StrictBatchSizeExec {
868 pub fn new(input: Arc<dyn ExecutionPlan>, batch_size: usize) -> Self {
869 Self { input, batch_size }
870 }
871}
872
873impl DisplayAs for StrictBatchSizeExec {
874 fn fmt_as(
875 &self,
876 _t: datafusion::physical_plan::DisplayFormatType,
877 f: &mut std::fmt::Formatter,
878 ) -> std::fmt::Result {
879 write!(f, "StrictBatchSizeExec")
880 }
881}
882
883impl ExecutionPlan for StrictBatchSizeExec {
884 fn name(&self) -> &str {
885 "StrictBatchSizeExec"
886 }
887
888 fn as_any(&self) -> &dyn std::any::Any {
889 self
890 }
891
892 fn properties(&self) -> &Arc<PlanProperties> {
893 self.input.properties()
894 }
895
896 fn children(&self) -> Vec<&Arc<dyn ExecutionPlan>> {
897 vec![&self.input]
898 }
899
900 fn with_new_children(
901 self: Arc<Self>,
902 children: Vec<Arc<dyn ExecutionPlan>>,
903 ) -> datafusion_common::Result<Arc<dyn ExecutionPlan>> {
904 Ok(Arc::new(Self {
905 input: children[0].clone(),
906 batch_size: self.batch_size,
907 }))
908 }
909
910 fn execute(
911 &self,
912 partition: usize,
913 context: Arc<TaskContext>,
914 ) -> datafusion_common::Result<SendableRecordBatchStream> {
915 let stream = self.input.execute(partition, context)?;
916 let schema = stream.schema();
917 let stream = StrictBatchSizeStream::new(stream, self.batch_size);
918 Ok(Box::pin(RecordBatchStreamAdapter::new(schema, stream)))
919 }
920
921 fn maintains_input_order(&self) -> Vec<bool> {
922 vec![true]
923 }
924
925 fn benefits_from_input_partitioning(&self) -> Vec<bool> {
926 vec![false]
927 }
928
929 fn partition_statistics(
930 &self,
931 partition: Option<usize>,
932 ) -> datafusion_common::Result<Statistics> {
933 self.input.partition_statistics(partition)
934 }
935
936 fn cardinality_effect(&self) -> CardinalityEffect {
937 CardinalityEffect::Equal
938 }
939
940 fn supports_limit_pushdown(&self) -> bool {
941 true
942 }
943}
944
945#[derive(Clone, Debug)]
968pub struct HardCapBatchSizeExec {
969 input: Arc<dyn ExecutionPlan>,
970 max_bytes: usize,
971}
972
973impl HardCapBatchSizeExec {
974 pub fn new(input: Arc<dyn ExecutionPlan>, max_bytes: usize) -> Self {
975 Self { input, max_bytes }
976 }
977}
978
979impl DisplayAs for HardCapBatchSizeExec {
980 fn fmt_as(
981 &self,
982 _t: datafusion::physical_plan::DisplayFormatType,
983 f: &mut std::fmt::Formatter,
984 ) -> std::fmt::Result {
985 write!(f, "HardCapBatchSizeExec(max_bytes={})", self.max_bytes)
986 }
987}
988
989impl ExecutionPlan for HardCapBatchSizeExec {
990 fn name(&self) -> &str {
991 "HardCapBatchSizeExec"
992 }
993
994 fn as_any(&self) -> &dyn std::any::Any {
995 self
996 }
997
998 fn properties(&self) -> &Arc<PlanProperties> {
999 self.input.properties()
1000 }
1001
1002 fn children(&self) -> Vec<&Arc<dyn ExecutionPlan>> {
1003 vec![&self.input]
1004 }
1005
1006 fn with_new_children(
1007 self: Arc<Self>,
1008 children: Vec<Arc<dyn ExecutionPlan>>,
1009 ) -> datafusion_common::Result<Arc<dyn ExecutionPlan>> {
1010 Ok(Arc::new(Self {
1011 input: children[0].clone(),
1012 max_bytes: self.max_bytes,
1013 }))
1014 }
1015
1016 fn execute(
1017 &self,
1018 partition: usize,
1019 context: Arc<TaskContext>,
1020 ) -> datafusion_common::Result<SendableRecordBatchStream> {
1021 let stream = self.input.execute(partition, context)?;
1022 let schema = stream.schema();
1023 let max_bytes = self.max_bytes;
1024 let rechunked = lance_arrow::stream::rechunk_stream_by_size_deep_copy(
1025 stream,
1026 schema.clone(),
1027 0,
1028 max_bytes,
1029 );
1030 let validated = rechunked.map(move |result| {
1032 let batch = result?;
1033 if batch.num_rows() == 1 && batch.get_array_memory_size() > max_bytes {
1034 return Err(DataFusionError::External(Box::new(Error::invalid_input(
1035 format!(
1036 "a single row is {} bytes which exceeds the maximum allowed batch \
1037 size of {} bytes",
1038 batch.get_array_memory_size(),
1039 max_bytes,
1040 ),
1041 ))));
1042 }
1043 Ok(batch)
1044 });
1045 Ok(Box::pin(RecordBatchStreamAdapter::new(schema, validated)))
1046 }
1047
1048 fn maintains_input_order(&self) -> Vec<bool> {
1049 vec![true]
1050 }
1051
1052 fn benefits_from_input_partitioning(&self) -> Vec<bool> {
1053 vec![false]
1054 }
1055
1056 fn partition_statistics(
1057 &self,
1058 partition: Option<usize>,
1059 ) -> datafusion_common::Result<Statistics> {
1060 self.input.partition_statistics(partition)
1061 }
1062
1063 fn cardinality_effect(&self) -> CardinalityEffect {
1064 CardinalityEffect::Equal
1065 }
1066
1067 fn supports_limit_pushdown(&self) -> bool {
1068 true
1069 }
1070}
1071
1072#[cfg(test)]
1073mod tests {
1074 use super::*;
1075
1076 static CACHE_TEST_LOCK: std::sync::Mutex<()> = std::sync::Mutex::new(());
1078
1079 #[test]
1080 fn test_session_context_cache() {
1081 let _lock = CACHE_TEST_LOCK.lock().unwrap();
1082 let cache = get_session_cache();
1083
1084 cache.lock().unwrap().clear();
1086
1087 let opts1 = LanceExecutionOptions::default();
1089 let _ctx1 = get_session_context(&opts1);
1090
1091 {
1092 let cache_guard = cache.lock().unwrap();
1093 assert_eq!(cache_guard.len(), 1);
1094 }
1095
1096 let _ctx1_again = get_session_context(&opts1);
1098 {
1099 let cache_guard = cache.lock().unwrap();
1100 assert_eq!(cache_guard.len(), 1);
1101 }
1102
1103 let opts2 = LanceExecutionOptions {
1105 use_spilling: true,
1106 ..Default::default()
1107 };
1108 let _ctx2 = get_session_context(&opts2);
1109 {
1110 let cache_guard = cache.lock().unwrap();
1111 assert_eq!(cache_guard.len(), 2);
1112 }
1113 }
1114
1115 #[test]
1116 fn test_session_context_cache_lru_eviction() {
1117 let _lock = CACHE_TEST_LOCK.lock().unwrap();
1118 let cache = get_session_cache();
1119
1120 cache.lock().unwrap().clear();
1122
1123 let configs: Vec<LanceExecutionOptions> = (0..4)
1125 .map(|i| LanceExecutionOptions {
1126 mem_pool_size: Some((i + 1) as u64 * 1024 * 1024),
1127 ..Default::default()
1128 })
1129 .collect();
1130
1131 for config in &configs {
1132 let _ctx = get_session_context(config);
1133 }
1134
1135 {
1136 let cache_guard = cache.lock().unwrap();
1137 assert_eq!(cache_guard.len(), 4);
1138 }
1139
1140 std::thread::sleep(std::time::Duration::from_millis(1));
1143 let _ctx = get_session_context(&configs[0]);
1144
1145 let opts5 = LanceExecutionOptions {
1147 mem_pool_size: Some(5 * 1024 * 1024),
1148 ..Default::default()
1149 };
1150 let _ctx5 = get_session_context(&opts5);
1151
1152 {
1153 let cache_guard = cache.lock().unwrap();
1154 assert_eq!(cache_guard.len(), 4);
1155
1156 let key0 = SessionContextCacheKey::from_options(&configs[0]);
1158 assert!(
1159 cache_guard.contains_key(&key0),
1160 "config[0] should still be cached after recent access"
1161 );
1162
1163 let key1 = SessionContextCacheKey::from_options(&configs[1]);
1165 assert!(
1166 !cache_guard.contains_key(&key1),
1167 "config[1] should have been evicted"
1168 );
1169
1170 let key5 = SessionContextCacheKey::from_options(&opts5);
1172 assert!(
1173 cache_guard.contains_key(&key5),
1174 "new config should be cached"
1175 );
1176 }
1177 }
1178
1179 #[test]
1180 fn test_mem_pool_size_scales_with_partitions() {
1181 let default_per_partition = DEFAULT_LANCE_MEM_POOL_SIZE_PER_PARTITION;
1182
1183 let opts = LanceExecutionOptions::default();
1185 assert_eq!(opts.mem_pool_size(), default_per_partition);
1186
1187 let opts = LanceExecutionOptions {
1189 target_partition: Some(4),
1190 ..Default::default()
1191 };
1192 assert_eq!(opts.mem_pool_size(), default_per_partition * 4);
1193
1194 let opts = LanceExecutionOptions {
1196 target_partition: Some(8),
1197 ..Default::default()
1198 };
1199 assert_eq!(opts.mem_pool_size(), default_per_partition * 8);
1200
1201 let opts = LanceExecutionOptions {
1203 mem_pool_size: Some(50 * 1024 * 1024),
1204 target_partition: Some(8),
1205 ..Default::default()
1206 };
1207 assert_eq!(opts.mem_pool_size(), 50 * 1024 * 1024);
1208 }
1209}