Skip to main content

qail_pg/driver/
auto_mode.rs

1//! Auto strategy planner for count-oriented query execution.
2
3/// Chosen execution path for auto count planning.
4#[derive(Debug, Clone, Copy, PartialEq, Eq)]
5pub enum AutoCountPath {
6    /// Single command path using cached prepared execution.
7    SingleCached,
8    /// Pipeline path that parses each query in-batch.
9    PipelineOneShot,
10    /// Pipeline path that reuses cached prepared templates.
11    PipelineCached,
12    /// Parallel pool path using multiple connections.
13    PoolParallel,
14}
15
16/// Auto execution plan.
17#[derive(Debug, Clone, Copy, PartialEq, Eq)]
18pub struct AutoCountPlan {
19    pub path: AutoCountPath,
20    pub workers: usize,
21    pub chunk_size: usize,
22}
23
24impl AutoCountPlan {
25    /// Batch-size threshold where cached pipeline becomes preferable.
26    pub const CACHE_MIN_BATCH: usize = 8;
27    /// Batch-size threshold where pooled parallel execution becomes worthwhile.
28    pub const POOL_MIN_BATCH: usize = 4_096;
29    /// Keep enough work per worker to avoid coordination overhead.
30    pub const POOL_MIN_CHUNK_PER_WORKER: usize = 1_024;
31    /// Hard cap to avoid over-fragmenting a single request.
32    pub const POOL_MAX_WORKERS: usize = 16;
33
34    #[inline]
35    pub fn for_driver(batch_len: usize) -> Self {
36        if batch_len <= 1 {
37            return Self {
38                path: AutoCountPath::SingleCached,
39                workers: 1,
40                chunk_size: batch_len.max(1),
41            };
42        }
43
44        if batch_len < Self::CACHE_MIN_BATCH {
45            return Self {
46                path: AutoCountPath::PipelineOneShot,
47                workers: 1,
48                chunk_size: batch_len,
49            };
50        }
51
52        Self {
53            path: AutoCountPath::PipelineCached,
54            workers: 1,
55            chunk_size: batch_len,
56        }
57    }
58
59    #[inline]
60    pub fn for_pool(batch_len: usize, max_connections: usize, available_slots: usize) -> Self {
61        let driver_plan = Self::for_driver(batch_len);
62
63        if batch_len < Self::POOL_MIN_BATCH || max_connections < 2 || available_slots < 2 {
64            return driver_plan;
65        }
66
67        let by_chunk = batch_len / Self::POOL_MIN_CHUNK_PER_WORKER;
68        if by_chunk < 2 {
69            return driver_plan;
70        }
71
72        let workers = max_connections
73            .min(available_slots)
74            .min(Self::POOL_MAX_WORKERS)
75            .min(by_chunk);
76
77        if workers < 2 {
78            return driver_plan;
79        }
80
81        let chunk_size = batch_len.div_ceil(workers);
82        Self {
83            path: AutoCountPath::PoolParallel,
84            workers,
85            chunk_size,
86        }
87    }
88}
89
90#[cfg(test)]
91mod tests {
92    use super::{AutoCountPath, AutoCountPlan};
93
94    #[test]
95    fn driver_auto_plan_resolves_expected_paths() {
96        let p0 = AutoCountPlan::for_driver(0);
97        assert_eq!(p0.path, AutoCountPath::SingleCached);
98        assert_eq!(p0.workers, 1);
99
100        let p1 = AutoCountPlan::for_driver(1);
101        assert_eq!(p1.path, AutoCountPath::SingleCached);
102        assert_eq!(p1.chunk_size, 1);
103
104        let p2 = AutoCountPlan::for_driver(2);
105        assert_eq!(p2.path, AutoCountPath::PipelineOneShot);
106
107        let p8 = AutoCountPlan::for_driver(8);
108        assert_eq!(p8.path, AutoCountPath::PipelineCached);
109    }
110
111    #[test]
112    fn pool_auto_plan_falls_back_when_parallel_not_worth_it() {
113        let p = AutoCountPlan::for_pool(512, 10, 10);
114        assert_eq!(p.path, AutoCountPath::PipelineCached);
115        assert_eq!(p.workers, 1);
116    }
117
118    #[test]
119    fn pool_auto_plan_uses_parallel_when_thresholds_met() {
120        let p = AutoCountPlan::for_pool(16_384, 10, 9);
121        assert_eq!(p.path, AutoCountPath::PoolParallel);
122        assert!(p.workers >= 2);
123        assert!(p.chunk_size >= AutoCountPlan::POOL_MIN_CHUNK_PER_WORKER);
124    }
125}