cbtop/grammar/
strategy.rs1use super::resources::ResourceMapping;
4use super::workload::WorkloadSpec;
5
6#[derive(Debug, Clone, Copy, PartialEq, Eq)]
8pub enum SimdWidth {
9 Auto,
11 Sse2,
13 Avx2,
15 Avx512,
17 Neon,
19 Wasm,
21}
22
23#[derive(Debug, Clone, PartialEq)]
25pub enum GpuDevice {
26 Auto,
28 Id(u32),
30 Cuda(u32),
32 Wgpu(u32),
34}
35
36#[derive(Debug, Clone, PartialEq)]
38pub struct KernelSpec {
39 pub name: String,
41 pub block_size: (u32, u32, u32),
43 pub grid_size: Option<(u32, u32, u32)>,
45 pub shared_mem: usize,
47}
48
49#[derive(Debug, Clone, PartialEq)]
51pub enum ExecutionStrategy {
52 Sequential,
54 Simd { width: SimdWidth },
56 Parallel { threads: usize, chunk_size: usize },
58 Gpu {
60 device: GpuDevice,
61 kernel: Option<KernelSpec>,
62 },
63 Distributed { nodes: Vec<String> },
65 Hybrid { cpu_fraction: f64 },
67}
68
69impl ExecutionStrategy {
70 pub fn simd_auto() -> Self {
72 ExecutionStrategy::Simd {
73 width: SimdWidth::Auto,
74 }
75 }
76
77 pub fn simd(width: SimdWidth) -> Self {
79 ExecutionStrategy::Simd { width }
80 }
81
82 pub fn parallel(threads: usize) -> Self {
84 ExecutionStrategy::Parallel {
85 threads,
86 chunk_size: 1024,
87 }
88 }
89
90 pub fn gpu_auto() -> Self {
92 ExecutionStrategy::Gpu {
93 device: GpuDevice::Auto,
94 kernel: None,
95 }
96 }
97
98 pub fn gpu(device: GpuDevice) -> Self {
100 ExecutionStrategy::Gpu {
101 device,
102 kernel: None,
103 }
104 }
105}
106
107#[derive(Debug, Clone, PartialEq)]
109pub struct StrategyLayer {
110 pub strategy: ExecutionStrategy,
112 pub workload: Option<WorkloadSpec>,
114 pub resources: ResourceMapping,
116 pub priority: i32,
118}
119
120impl StrategyLayer {
121 pub fn new(strategy: ExecutionStrategy) -> Self {
123 Self {
124 strategy,
125 workload: None,
126 resources: ResourceMapping::default(),
127 priority: 0,
128 }
129 }
130
131 pub fn priority(mut self, priority: i32) -> Self {
133 self.priority = priority;
134 self
135 }
136}