1#![allow(non_camel_case_types)]
7
8use bitvec::prelude::BitVec;
9use wide::*;
10
11#[derive(Debug, Clone, Copy, PartialEq)]
12pub enum DataType {
13 Int32,
14 Int64,
15 Float32,
16 Float64,
17 String,
18 DateTime,
19}
20
21#[derive(Debug, Clone, Copy)]
22pub struct i64x8 {
23 low: i64x4,
24 high: i64x4,
25}
26
27impl i64x8 {
28 #[inline]
29 pub fn splat(value: i64) -> Self {
30 Self {
31 low: i64x4::splat(value),
32 high: i64x4::splat(value),
33 }
34 }
35
36 #[inline]
37 pub fn from_slice(slice: &[i64]) -> Self {
38 debug_assert_eq!(slice.len(), 8);
39 Self {
40 low: i64x4::from(&slice[0..4]),
41 high: i64x4::from(&slice[4..8]),
42 }
43 }
44
45 #[inline]
46 pub fn cmp_eq(self, other: Self) -> u8 {
47 let low_mask = self.low.cmp_eq(other.low).move_mask();
48 let high_mask = self.high.cmp_eq(other.high).move_mask();
49 (low_mask as u8) | ((high_mask as u8) << 4)
50 }
51
52 #[inline]
53 pub fn cmp_gt(self, other: Self) -> u8 {
54 let low_mask = self.low.cmp_gt(other.low).move_mask();
55 let high_mask = self.high.cmp_gt(other.high).move_mask();
56 (low_mask as u8) | ((high_mask as u8) << 4)
57 }
58
59 #[inline]
60 pub fn cmp_gte(self, other: Self) -> u8 {
61 !self.cmp_lt(other)
62 }
63
64 #[inline]
65 pub fn cmp_lt(self, other: Self) -> u8 {
66 let low_mask = self.low.cmp_lt(other.low).move_mask();
67 let high_mask = self.high.cmp_lt(other.high).move_mask();
68 (low_mask as u8) | ((high_mask as u8) << 4)
69 }
70
71 #[inline]
72 pub fn cmp_lte(self, other: Self) -> u8 {
73 !self.cmp_gt(other)
74 }
75
76 #[inline]
77 pub fn min(self, other: Self) -> Self {
78 let low_mask = self.low.cmp_gt(other.low);
79 let high_mask = self.high.cmp_gt(other.high);
80 Self {
81 low: low_mask.blend(other.low, self.low),
82 high: high_mask.blend(other.high, self.high),
83 }
84 }
85
86 #[inline]
87 pub fn max(self, other: Self) -> Self {
88 let low_mask = self.low.cmp_lt(other.low);
89 let high_mask = self.high.cmp_lt(other.high);
90 Self {
91 low: low_mask.blend(other.low, self.low),
92 high: high_mask.blend(other.high, self.high),
93 }
94 }
95}
96
97#[derive(Debug, Clone, Copy)]
98pub struct f64x8 {
99 low: f64x4,
100 high: f64x4,
101}
102
103impl f64x8 {
104 #[inline]
105 pub fn splat(value: f64) -> Self {
106 Self {
107 low: f64x4::splat(value),
108 high: f64x4::splat(value),
109 }
110 }
111
112 #[inline]
113 pub fn from_slice(slice: &[f64]) -> Self {
114 debug_assert_eq!(slice.len(), 8);
115 Self {
116 low: f64x4::from(&slice[0..4]),
117 high: f64x4::from(&slice[4..8]),
118 }
119 }
120
121 #[inline]
122 pub fn cmp_eq(self, other: Self) -> u8 {
123 let low_mask = self.low.cmp_eq(other.low).move_mask();
124 let high_mask = self.high.cmp_eq(other.high).move_mask();
125 (low_mask as u8) | ((high_mask as u8) << 4)
126 }
127
128 #[inline]
129 pub fn cmp_gt(self, other: Self) -> u8 {
130 let low_mask = self.low.cmp_gt(other.low).move_mask();
131 let high_mask = self.high.cmp_gt(other.high).move_mask();
132 (low_mask as u8) | ((high_mask as u8) << 4)
133 }
134
135 #[inline]
136 pub fn cmp_ge(self, other: Self) -> u8 {
137 let low_mask = self.low.cmp_ge(other.low).move_mask();
138 let high_mask = self.high.cmp_ge(other.high).move_mask();
139 (low_mask as u8) | ((high_mask as u8) << 4)
140 }
141
142 #[inline]
143 pub fn cmp_lt(self, other: Self) -> u8 {
144 let low_mask = self.low.cmp_lt(other.low).move_mask();
145 let high_mask = self.high.cmp_lt(other.high).move_mask();
146 (low_mask as u8) | ((high_mask as u8) << 4)
147 }
148
149 #[inline]
150 pub fn cmp_le(self, other: Self) -> u8 {
151 let low_mask = self.low.cmp_le(other.low).move_mask();
152 let high_mask = self.high.cmp_le(other.high).move_mask();
153 (low_mask as u8) | ((high_mask as u8) << 4)
154 }
155
156 #[inline]
157 pub fn min(self, other: Self) -> Self {
158 Self {
159 low: self.low.min(other.low),
160 high: self.high.min(other.high),
161 }
162 }
163
164 #[inline]
165 pub fn max(self, other: Self) -> Self {
166 Self {
167 low: self.low.max(other.low),
168 high: self.high.max(other.high),
169 }
170 }
171}
172
173#[derive(Debug, Clone, Copy)]
174pub struct u64x8 {
175 low: u64x4,
176 high: u64x4,
177}
178
179impl u64x8 {
180 #[inline]
181 pub fn splat(value: u64) -> Self {
182 Self {
183 low: u64x4::splat(value),
184 high: u64x4::splat(value),
185 }
186 }
187
188 #[inline]
189 pub fn from_slice(slice: &[u64]) -> Self {
190 debug_assert_eq!(slice.len(), 8);
191 Self {
192 low: u64x4::from(&slice[0..4]),
193 high: u64x4::from(&slice[4..8]),
194 }
195 }
196
197 #[inline]
198 pub fn cmp_eq(self, other: Self) -> u8 {
199 use std::mem::transmute;
201 let self_i64 = i64x8 {
202 low: unsafe { transmute::<u64x4, i64x4>(self.low) },
203 high: unsafe { transmute::<u64x4, i64x4>(self.high) },
204 };
205 let other_i64 = i64x8 {
206 low: unsafe { transmute::<u64x4, i64x4>(other.low) },
207 high: unsafe { transmute::<u64x4, i64x4>(other.high) },
208 };
209 self_i64.cmp_eq(other_i64)
210 }
211
212 #[inline]
213 pub fn cmp_gt(self, other: Self) -> u8 {
214 let mut result = 0u8;
215 let self_low: [u64; 4] = unsafe { std::mem::transmute::<u64x4, [u64; 4]>(self.low) };
216 let other_low: [u64; 4] = unsafe { std::mem::transmute::<u64x4, [u64; 4]>(other.low) };
217 let self_high: [u64; 4] = unsafe { std::mem::transmute::<u64x4, [u64; 4]>(self.high) };
218 let other_high: [u64; 4] = unsafe { std::mem::transmute::<u64x4, [u64; 4]>(other.high) };
219 for i in 0..4 {
220 if self_low[i] > other_low[i] {
221 result |= 1 << i;
222 }
223 }
224 for i in 0..4 {
225 if self_high[i] > other_high[i] {
226 result |= 1 << (i + 4);
227 }
228 }
229 result
230 }
231
232 #[inline]
233 pub fn cmp_gte(self, other: Self) -> u8 {
234 !self.cmp_lt(other)
235 }
236
237 #[inline]
238 pub fn cmp_lt(self, other: Self) -> u8 {
239 let mut result = 0u8;
240 let self_low: [u64; 4] = unsafe { std::mem::transmute::<u64x4, [u64; 4]>(self.low) };
241 let other_low: [u64; 4] = unsafe { std::mem::transmute::<u64x4, [u64; 4]>(other.low) };
242 let self_high: [u64; 4] = unsafe { std::mem::transmute::<u64x4, [u64; 4]>(self.high) };
243 let other_high: [u64; 4] = unsafe { std::mem::transmute::<u64x4, [u64; 4]>(other.high) };
244 for i in 0..4 {
245 if self_low[i] < other_low[i] {
246 result |= 1 << i;
247 }
248 }
249 for i in 0..4 {
250 if self_high[i] < other_high[i] {
251 result |= 1 << (i + 4);
252 }
253 }
254 result
255 }
256
257 #[inline]
258 pub fn cmp_lte(self, other: Self) -> u8 {
259 !self.cmp_gt(other)
260 }
261
262 #[inline]
263 pub fn min(self, other: Self) -> Self {
264 let self_low: [u64; 4] = unsafe { std::mem::transmute::<u64x4, [u64; 4]>(self.low) };
265 let other_low: [u64; 4] = unsafe { std::mem::transmute::<u64x4, [u64; 4]>(other.low) };
266 let self_high: [u64; 4] = unsafe { std::mem::transmute::<u64x4, [u64; 4]>(self.high) };
267 let other_high: [u64; 4] = unsafe { std::mem::transmute::<u64x4, [u64; 4]>(other.high) };
268 let mut result_low = [0u64; 4];
269 let mut result_high = [0u64; 4];
270 for i in 0..4 {
271 result_low[i] = self_low[i].min(other_low[i]);
272 }
273 for i in 0..4 {
274 result_high[i] = self_high[i].min(other_high[i]);
275 }
276 Self {
277 low: unsafe { std::mem::transmute::<[u64; 4], u64x4>(result_low) },
278 high: unsafe { std::mem::transmute::<[u64; 4], u64x4>(result_high) },
279 }
280 }
281
282 #[inline]
283 pub fn max(self, other: Self) -> Self {
284 let self_low: [u64; 4] = unsafe { std::mem::transmute::<u64x4, [u64; 4]>(self.low) };
285 let other_low: [u64; 4] = unsafe { std::mem::transmute::<u64x4, [u64; 4]>(other.low) };
286 let self_high: [u64; 4] = unsafe { std::mem::transmute::<u64x4, [u64; 4]>(self.high) };
287 let other_high: [u64; 4] = unsafe { std::mem::transmute::<u64x4, [u64; 4]>(other.high) };
288 let mut result_low = [0u64; 4];
289 let mut result_high = [0u64; 4];
290 for i in 0..4 {
291 result_low[i] = self_low[i].max(other_low[i]);
292 }
293 for i in 0..4 {
294 result_high[i] = self_high[i].max(other_high[i]);
295 }
296 Self {
297 low: unsafe { std::mem::transmute::<[u64; 4], u64x4>(result_low) },
298 high: unsafe { std::mem::transmute::<[u64; 4], u64x4>(result_high) },
299 }
300 }
301}
302pub fn mask8_rows_f32(
307 vals: &[f32],
308 nulls: &BitVec,
309 base: usize,
310 off: usize,
311 cmp: crate::expr::CmpOp,
312 thr: f32,
313) -> u8 {
314 let start = base + off;
315 let v = f32x8::from(&vals[start..start + 8]);
316 let t = f32x8::splat(thr);
317 let m = match cmp {
318 crate::expr::CmpOp::Eq => v.cmp_eq(t),
319 crate::expr::CmpOp::Neq => v.cmp_ne(t),
320 crate::expr::CmpOp::Lt => v.cmp_lt(t),
321 crate::expr::CmpOp::Lte => v.cmp_le(t),
322 crate::expr::CmpOp::Gt => v.cmp_gt(t),
323 crate::expr::CmpOp::Gte => v.cmp_ge(t),
324 };
325 let arr = m.to_array();
326 let mut bits: u8 = 0;
327 for (j, &val) in arr.iter().enumerate() {
328 let ok = val != 0.0;
329 let not_null = !nulls.get(start + j).map(|b| *b).unwrap_or(false);
330 if ok && not_null {
331 bits |= 1 << j;
332 }
333 }
334 bits
335}
336
337pub fn mask8_rows_i32(
338 vals: &[i32],
339 nulls: &BitVec,
340 base: usize,
341 off: usize,
342 cmp: crate::expr::CmpOp,
343 thr: i32,
344) -> u8 {
345 let start = base + off;
346 let v = i32x8::from(&vals[start..start + 8]);
347 let t = i32x8::splat(thr);
348 let arr: [i32; 8] = match cmp {
349 crate::expr::CmpOp::Eq => v.cmp_eq(t).to_array(),
350 crate::expr::CmpOp::Lt => v.cmp_lt(t).to_array(),
351 crate::expr::CmpOp::Gt => v.cmp_gt(t).to_array(),
352 crate::expr::CmpOp::Lte => {
353 let gt = v.cmp_gt(t).to_array();
354 let mut out = [0; 8];
355 for j in 0..8 {
356 out[j] = if gt[j] != 0 { 0 } else { 1 };
357 }
358 out
359 }
360 crate::expr::CmpOp::Gte => {
361 let lt = v.cmp_lt(t).to_array();
362 let mut out = [0; 8];
363 for j in 0..8 {
364 out[j] = if lt[j] != 0 { 0 } else { 1 };
365 }
366 out
367 }
368 crate::expr::CmpOp::Neq => {
369 let eq = v.cmp_eq(t).to_array();
370 let mut out = [0; 8];
371 for j in 0..8 {
372 out[j] = if eq[j] != 0 { 0 } else { 1 };
373 }
374 out
375 }
376 };
377 let mut bits: u8 = 0;
378 for (j, &val) in arr.iter().enumerate() {
379 let ok = val != 0;
380 let not_null = !nulls.get(start + j).map(|b| *b).unwrap_or(false);
381 if ok && not_null {
382 bits |= 1 << j;
383 }
384 }
385 bits
386}
387
388#[inline]
389pub fn mask8_rows_f64(
390 vals: &[f64],
391 nulls: &BitVec,
392 base: usize,
393 off: usize,
394 cmp: crate::expr::CmpOp,
395 thr: f64,
396) -> u8 {
397 let start = base + off;
398 let v = f64x8::from_slice(&vals[start..start + 8]);
399 let t = f64x8::splat(thr);
400 let bits = match cmp {
401 crate::expr::CmpOp::Eq => v.cmp_eq(t),
402 crate::expr::CmpOp::Neq => !v.cmp_eq(t),
403 crate::expr::CmpOp::Lt => v.cmp_lt(t),
404 crate::expr::CmpOp::Lte => v.cmp_le(t),
405 crate::expr::CmpOp::Gt => v.cmp_gt(t),
406 crate::expr::CmpOp::Gte => v.cmp_ge(t),
407 };
408 let mut nn: u8 = 0;
409 for j in 0..8 {
410 if !nulls.get(start + j).map(|b| *b).unwrap_or(false) {
411 nn |= 1 << j;
412 }
413 }
414 bits & nn
415}
416
417#[inline]
418pub fn mask8_rows_i64(
419 vals: &[i64],
420 nulls: &BitVec,
421 base: usize,
422 off: usize,
423 cmp: crate::expr::CmpOp,
424 thr: i64,
425) -> u8 {
426 let start = base + off;
427 let v = i64x8::from_slice(&vals[start..start + 8]);
428 let t = i64x8::splat(thr);
429 let bits = match cmp {
430 crate::expr::CmpOp::Eq => v.cmp_eq(t),
431 crate::expr::CmpOp::Neq => !v.cmp_eq(t),
432 crate::expr::CmpOp::Lt => v.cmp_lt(t),
433 crate::expr::CmpOp::Lte => v.cmp_lte(t),
434 crate::expr::CmpOp::Gt => v.cmp_gt(t),
435 crate::expr::CmpOp::Gte => v.cmp_gte(t),
436 };
437 let mut nn: u8 = 0;
438 for j in 0..8 {
439 if !nulls.get(start + j).map(|b| *b).unwrap_or(false) {
440 nn |= 1 << j;
441 }
442 }
443 bits & nn
444}
445
446#[inline]
447pub fn mask8_ranges_f32(
448 min: &[f32],
449 max: &[f32],
450 non_null: &[usize],
451 off: usize,
452 cmp: crate::expr::CmpOp,
453 thr: f32,
454) -> u8 {
455 let minv = f32x8::from(&min[off..off + 8]);
456 let maxv = f32x8::from(&max[off..off + 8]);
457 let t = f32x8::splat(thr);
458 let arr = match cmp {
459 crate::expr::CmpOp::Eq => (minv.cmp_le(t) & maxv.cmp_ge(t)).to_array(),
460 crate::expr::CmpOp::Lt => minv.cmp_lt(t).to_array(),
461 crate::expr::CmpOp::Lte => minv.cmp_le(t).to_array(),
462 crate::expr::CmpOp::Gt => maxv.cmp_gt(t).to_array(),
463 crate::expr::CmpOp::Gte => maxv.cmp_ge(t).to_array(),
464 crate::expr::CmpOp::Neq => [1.0; 8],
465 };
466 let mut bits: u8 = 0;
467 for j in 0..8 {
468 if arr[j] != 0.0 && non_null[off + j] > 0 {
469 bits |= 1 << j;
470 }
471 }
472 bits
473}
474
475#[inline]
476pub fn mask8_ranges_f64(
477 min: &[f64],
478 max: &[f64],
479 non_null: &[usize],
480 off: usize,
481 cmp: crate::expr::CmpOp,
482 thr: f64,
483) -> u8 {
484 let minv = f64x8::from_slice(&min[off..off + 8]);
485 let maxv = f64x8::from_slice(&max[off..off + 8]);
486 let t = f64x8::splat(thr);
487 let bits = match cmp {
488 crate::expr::CmpOp::Eq => minv.cmp_le(t) & maxv.cmp_ge(t),
489 crate::expr::CmpOp::Lt => minv.cmp_lt(t),
490 crate::expr::CmpOp::Lte => minv.cmp_le(t),
491 crate::expr::CmpOp::Gt => maxv.cmp_gt(t),
492 crate::expr::CmpOp::Gte => maxv.cmp_ge(t),
493 crate::expr::CmpOp::Neq => 0xFF,
494 };
495 let mut nn: u8 = 0;
496 for j in 0..8 {
497 if non_null[off + j] > 0 {
498 nn |= 1 << j;
499 }
500 }
501 bits & nn
502}
503
504#[inline]
505pub fn mask8_ranges_i32(
506 min: &[i32],
507 max: &[i32],
508 non_null: &[usize],
509 off: usize,
510 cmp: crate::expr::CmpOp,
511 thr: i32,
512) -> u8 {
513 let minv = i32x8::from(&min[off..off + 8]);
514 let maxv = i32x8::from(&max[off..off + 8]);
515 let t = i32x8::splat(thr);
516 let arr: [i32; 8] = match cmp {
517 crate::expr::CmpOp::Eq => {
518 let gt = minv.cmp_gt(t).to_array();
519 let lt = maxv.cmp_lt(t).to_array();
520 let mut out = [0; 8];
521 for j in 0..8 {
522 let lte = if gt[j] != 0 { 0 } else { 1 };
523 let gte = if lt[j] != 0 { 0 } else { 1 };
524 out[j] = if lte != 0 && gte != 0 { 1 } else { 0 };
525 }
526 out
527 }
528 crate::expr::CmpOp::Lt => minv.cmp_lt(t).to_array(),
529 crate::expr::CmpOp::Lte => {
530 let gt = minv.cmp_gt(t).to_array();
531 let mut out = [0; 8];
532 for j in 0..8 {
533 out[j] = if gt[j] != 0 { 0 } else { 1 };
534 }
535 out
536 }
537 crate::expr::CmpOp::Gt => maxv.cmp_gt(t).to_array(),
538 crate::expr::CmpOp::Gte => {
539 let lt = maxv.cmp_lt(t).to_array();
540 let mut out = [0; 8];
541 for j in 0..8 {
542 out[j] = if lt[j] != 0 { 0 } else { 1 };
543 }
544 out
545 }
546 crate::expr::CmpOp::Neq => [1; 8],
547 };
548 let mut bits: u8 = 0;
549 for j in 0..8 {
550 if arr[j] != 0 && non_null[off + j] > 0 {
551 bits |= 1 << j;
552 }
553 }
554 bits
555}
556
557#[inline]
558pub fn mask8_ranges_i64(
559 min: &[i64],
560 max: &[i64],
561 non_null: &[usize],
562 off: usize,
563 cmp: crate::expr::CmpOp,
564 thr: i64,
565) -> u8 {
566 let minv = i64x8::from_slice(&min[off..off + 8]);
567 let maxv = i64x8::from_slice(&max[off..off + 8]);
568 let t = i64x8::splat(thr);
569 let bits = match cmp {
570 crate::expr::CmpOp::Eq => minv.cmp_lte(t) & maxv.cmp_gte(t),
571 crate::expr::CmpOp::Lt => minv.cmp_lt(t),
572 crate::expr::CmpOp::Lte => minv.cmp_lte(t),
573 crate::expr::CmpOp::Gt => maxv.cmp_gt(t),
574 crate::expr::CmpOp::Gte => maxv.cmp_gte(t),
575 crate::expr::CmpOp::Neq => 0xFF,
576 };
577 let mut nn: u8 = 0;
578 for j in 0..8 {
579 if non_null[off + j] > 0 {
580 nn |= 1 << j;
581 }
582 }
583 bits & nn
584}
585
586#[inline]
587pub fn apply_rows_mask_f32(
588 vals: &[f32],
589 nulls: &BitVec,
590 base: usize,
591 len: usize,
592 cmp: crate::expr::CmpOp,
593 thr: f32,
594 out: &mut BitVec,
595) {
596 let mut off = 0;
597 while off + 8 <= len {
598 let bits = mask8_rows_f32(vals, nulls, base, off, cmp, thr);
599 for j in 0..8 {
600 if (bits >> j) & 1 == 1 {
601 out.set(off + j, true);
602 }
603 }
604 off += 8;
605 }
606 while off < len {
607 let v = vals[base + off];
608 let is_null = nulls.get(base + off).map(|b| *b).unwrap_or(false);
609 let sat = match cmp {
610 crate::expr::CmpOp::Eq => v == thr,
611 crate::expr::CmpOp::Neq => v != thr,
612 crate::expr::CmpOp::Lt => v < thr,
613 crate::expr::CmpOp::Lte => v <= thr,
614 crate::expr::CmpOp::Gt => v > thr,
615 crate::expr::CmpOp::Gte => v >= thr,
616 };
617 if sat && !is_null {
618 out.set(off, true);
619 }
620 off += 1;
621 }
622}
623
624#[inline]
625pub fn apply_rows_mask_i32(
626 vals: &[i32],
627 nulls: &BitVec,
628 base: usize,
629 len: usize,
630 cmp: crate::expr::CmpOp,
631 thr: i32,
632 out: &mut BitVec,
633) {
634 let mut off = 0;
635 while off + 8 <= len {
636 let bits = mask8_rows_i32(vals, nulls, base, off, cmp, thr);
637 for j in 0..8 {
638 if (bits >> j) & 1 == 1 {
639 out.set(off + j, true);
640 }
641 }
642 off += 8;
643 }
644 while off < len {
645 let v = vals[base + off];
646 let is_null = nulls.get(base + off).map(|b| *b).unwrap_or(false);
647 let sat = match cmp {
648 crate::expr::CmpOp::Eq => v == thr,
649 crate::expr::CmpOp::Neq => v != thr,
650 crate::expr::CmpOp::Lt => v < thr,
651 crate::expr::CmpOp::Lte => v <= thr,
652 crate::expr::CmpOp::Gt => v > thr,
653 crate::expr::CmpOp::Gte => v >= thr,
654 };
655 if sat && !is_null {
656 out.set(off, true);
657 }
658 off += 1;
659 }
660}
661
662#[inline]
663pub fn apply_rows_mask_f64(
664 vals: &[f64],
665 nulls: &BitVec,
666 base: usize,
667 len: usize,
668 cmp: crate::expr::CmpOp,
669 thr: f64,
670 out: &mut BitVec,
671) {
672 let mut off = 0;
673 while off + 8 <= len {
674 let bits = mask8_rows_f64(vals, nulls, base, off, cmp, thr);
675 for j in 0..8 {
676 if (bits >> j) & 1 == 1 {
677 out.set(off + j, true);
678 }
679 }
680 off += 8;
681 }
682 while off < len {
683 let is_null = nulls.get(base + off).map(|b| *b).unwrap_or(false);
684 let v = vals[base + off];
685 let sat = match cmp {
686 crate::expr::CmpOp::Eq => v == thr,
687 crate::expr::CmpOp::Neq => v != thr,
688 crate::expr::CmpOp::Lt => v < thr,
689 crate::expr::CmpOp::Lte => v <= thr,
690 crate::expr::CmpOp::Gt => v > thr,
691 crate::expr::CmpOp::Gte => v >= thr,
692 };
693 if sat && !is_null {
694 out.set(off, true);
695 }
696 off += 1;
697 }
698}
699
700#[inline]
701pub fn apply_rows_mask_i64(
702 vals: &[i64],
703 nulls: &BitVec,
704 base: usize,
705 len: usize,
706 cmp: crate::expr::CmpOp,
707 thr: i64,
708 out: &mut BitVec,
709) {
710 let mut off = 0;
711 while off + 8 <= len {
712 let bits = mask8_rows_i64(vals, nulls, base, off, cmp, thr);
713 for j in 0..8 {
714 if (bits >> j) & 1 == 1 {
715 out.set(off + j, true);
716 }
717 }
718 off += 8;
719 }
720 while off < len {
721 let is_null = nulls.get(base + off).map(|b| *b).unwrap_or(false);
722 let v = vals[base + off];
723 let sat = match cmp {
724 crate::expr::CmpOp::Eq => v == thr,
725 crate::expr::CmpOp::Neq => v != thr,
726 crate::expr::CmpOp::Lt => v < thr,
727 crate::expr::CmpOp::Lte => v <= thr,
728 crate::expr::CmpOp::Gt => v > thr,
729 crate::expr::CmpOp::Gte => v >= thr,
730 };
731 if sat && !is_null {
732 out.set(off, true);
733 }
734 off += 1;
735 }
736}
737
738#[inline]
740pub fn apply_chunk_mask_ranges_f32_bits(
741 min: &[f32],
742 max: &[f32],
743 non_null: &[usize],
744 n_chunks: usize,
745 cmp: crate::expr::CmpOp,
746 thr: f32,
747 out: &mut BitVec,
748) {
749 let mut i = 0;
750 while i + 8 <= n_chunks {
751 let bits = mask8_ranges_f32(min, max, non_null, i, cmp, thr);
752 for j in 0..8 {
753 if (bits >> j) & 1 == 1 {
754 out.set(i + j, true);
755 }
756 }
757 i += 8;
758 }
759 while i < n_chunks {
760 let mn = min[i];
761 let mx = max[i];
762 let sat = match cmp {
763 crate::expr::CmpOp::Eq => mn <= thr && thr <= mx,
764 crate::expr::CmpOp::Lt => mn < thr,
765 crate::expr::CmpOp::Lte => mn <= thr,
766 crate::expr::CmpOp::Gt => mx > thr,
767 crate::expr::CmpOp::Gte => mx >= thr,
768 crate::expr::CmpOp::Neq => true,
769 } && non_null[i] > 0;
770 if sat {
771 out.set(i, true);
772 }
773 i += 1;
774 }
775}
776
777#[inline]
778pub fn apply_chunk_mask_ranges_f64_bits(
779 min: &[f64],
780 max: &[f64],
781 non_null: &[usize],
782 n_chunks: usize,
783 cmp: crate::expr::CmpOp,
784 thr: f64,
785 out: &mut BitVec,
786) {
787 let mut i = 0;
788 while i + 8 <= n_chunks {
789 let bits = mask8_ranges_f64(min, max, non_null, i, cmp, thr);
790 for j in 0..8 {
791 if (bits >> j) & 1 == 1 {
792 out.set(i + j, true);
793 }
794 }
795 i += 8;
796 }
797 while i < n_chunks {
798 let mn = min[i];
799 let mx = max[i];
800 let sat = match cmp {
801 crate::expr::CmpOp::Eq => mn <= thr && thr <= mx,
802 crate::expr::CmpOp::Lt => mn < thr,
803 crate::expr::CmpOp::Lte => mn <= thr,
804 crate::expr::CmpOp::Gt => mx > thr,
805 crate::expr::CmpOp::Gte => mx >= thr,
806 crate::expr::CmpOp::Neq => true,
807 } && non_null[i] > 0;
808 if sat {
809 out.set(i, true);
810 }
811 i += 1;
812 }
813}
814
815#[inline]
816pub fn apply_chunk_mask_ranges_i32_bits(
817 min: &[i32],
818 max: &[i32],
819 non_null: &[usize],
820 n_chunks: usize,
821 cmp: crate::expr::CmpOp,
822 thr: i32,
823 out: &mut BitVec,
824) {
825 let mut i = 0;
826 while i + 8 <= n_chunks {
827 let bits = mask8_ranges_i32(min, max, non_null, i, cmp, thr);
828 for j in 0..8 {
829 if (bits >> j) & 1 == 1 {
830 out.set(i + j, true);
831 }
832 }
833 i += 8;
834 }
835 while i < n_chunks {
836 let mn = min[i];
837 let mx = max[i];
838 let sat = match cmp {
839 crate::expr::CmpOp::Eq => mn <= thr && thr <= mx,
840 crate::expr::CmpOp::Lt => mn < thr,
841 crate::expr::CmpOp::Lte => mn <= thr,
842 crate::expr::CmpOp::Gt => mx > thr,
843 crate::expr::CmpOp::Gte => mx >= thr,
844 crate::expr::CmpOp::Neq => true,
845 } && non_null[i] > 0;
846 if sat {
847 out.set(i, true);
848 }
849 i += 1;
850 }
851}
852
853#[inline]
854pub fn apply_chunk_mask_ranges_i64_bits(
855 min: &[i64],
856 max: &[i64],
857 non_null: &[usize],
858 n_chunks: usize,
859 cmp: crate::expr::CmpOp,
860 thr: i64,
861 out: &mut BitVec,
862) {
863 let mut i = 0;
864 while i + 8 <= n_chunks {
865 let bits = mask8_ranges_i64(min, max, non_null, i, cmp, thr);
866 for j in 0..8 {
867 if (bits >> j) & 1 == 1 {
868 out.set(i + j, true);
869 }
870 }
871 i += 8;
872 }
873 while i < n_chunks {
874 let mn = min[i];
875 let mx = max[i];
876 let sat = match cmp {
877 crate::expr::CmpOp::Eq => mn <= thr && thr <= mx,
878 crate::expr::CmpOp::Lt => mn < thr,
879 crate::expr::CmpOp::Lte => mn <= thr,
880 crate::expr::CmpOp::Gt => mx > thr,
881 crate::expr::CmpOp::Gte => mx >= thr,
882 crate::expr::CmpOp::Neq => true,
883 } && non_null[i] > 0;
884 if sat {
885 out.set(i, true);
886 }
887 i += 1;
888 }
889}