memx/mem/
mem_nechr_qpl.rs

1use crate::utils::*;
2
3#[inline(never)]
4pub fn _memnechr_qpl_impl(buf: &[u8], needle: B1Qpl) -> Option<usize> {
5    #[cfg(all(
6        feature = "test",
7        any(feature = "test_pointer_width_64", feature = "test_pointer_width_32")
8    ))]
9    {
10        #[cfg(feature = "test_pointer_width_64")]
11        let r = _start_nechr_64(buf, needle);
12        #[cfg(feature = "test_pointer_width_32")]
13        let r = _start_nechr_32(buf, needle);
14        //
15        r
16    }
17    #[cfg(not(all(
18        feature = "test",
19        any(feature = "test_pointer_width_64", feature = "test_pointer_width_32")
20    )))]
21    {
22        #[cfg(target_pointer_width = "64")]
23        let r = _start_nechr_64(buf, needle);
24        #[cfg(target_pointer_width = "32")]
25        let r = _start_nechr_32(buf, needle);
26        //
27        r
28    }
29}
30
31macro_rules! _unroll_one_nechr_to_align_x1 {
32    ($buf_ptr_2:expr, $buf_ptr_end:expr, $c:expr, $start_ptr:expr) => {{
33        if $buf_ptr_2 >= $buf_ptr_end {
34            break;
35        }
36        let r = _nechr_c1_aa_x1($buf_ptr_2, $c, $start_ptr);
37        if r.is_some() {
38            return (None, r);
39        }
40        $buf_ptr_2 = unsafe { $buf_ptr_2.add(1) };
41    }};
42}
43
44macro_rules! _unroll_one_nechr_to_align_x4 {
45    ($buf_ptr_2:expr, $buf_ptr_end:expr, $c:expr, $start_ptr:expr) => {{
46        _unroll_one_nechr_to_align_x1!($buf_ptr_2, $buf_ptr_end, $c, $start_ptr);
47        _unroll_one_nechr_to_align_x1!($buf_ptr_2, $buf_ptr_end, $c, $start_ptr);
48        _unroll_one_nechr_to_align_x1!($buf_ptr_2, $buf_ptr_end, $c, $start_ptr);
49        _unroll_one_nechr_to_align_x1!($buf_ptr_2, $buf_ptr_end, $c, $start_ptr);
50    }};
51}
52
53macro_rules! _unroll_one_nechr_to_align_x8 {
54    ($buf_ptr_2:expr, $buf_ptr_end:expr, $c:expr, $start_ptr:expr) => {{
55        _unroll_one_nechr_to_align_x4!($buf_ptr_2, $buf_ptr_end, $c, $start_ptr);
56        _unroll_one_nechr_to_align_x4!($buf_ptr_2, $buf_ptr_end, $c, $start_ptr);
57    }};
58}
59
60macro_rules! _unroll_one_nechr_to_align_x16 {
61    ($buf_ptr_2:expr, $buf_ptr_end:expr, $c:expr, $start_ptr:expr) => {{
62        _unroll_one_nechr_to_align_x8!($buf_ptr_2, $buf_ptr_end, $c, $start_ptr);
63        _unroll_one_nechr_to_align_x8!($buf_ptr_2, $buf_ptr_end, $c, $start_ptr);
64    }};
65}
66
67#[inline(always)]
68pub(crate) fn _nechr_qpl_to_aligned_u256(
69    buf_ptr: *const u8,
70    c: B1Qpl,
71    start_ptr: *const u8,
72) -> (Option<*const u8>, Option<usize>) {
73    let remaining_align = 0x20_usize - ((buf_ptr as usize) & 0x1F_usize);
74    let buf_ptr_end = unsafe { buf_ptr.add(remaining_align) };
75    let mut buf_ptr_2 = buf_ptr;
76    loop {
77        _unroll_one_nechr_to_align_x16!(buf_ptr_2, buf_ptr_end, c, start_ptr);
78        _unroll_one_nechr_to_align_x16!(buf_ptr_2, buf_ptr_end, c, start_ptr);
79    }
80    (Some(buf_ptr_end), None)
81}
82
83#[inline(always)]
84pub(crate) fn _nechr_qpl_to_aligned_u128(
85    buf_ptr: *const u8,
86    c: B1Qpl,
87    start_ptr: *const u8,
88) -> (Option<*const u8>, Option<usize>) {
89    let remaining_align = 0x10_usize - ((buf_ptr as usize) & 0x0F_usize);
90    let buf_ptr_end = unsafe { buf_ptr.add(remaining_align) };
91    let mut buf_ptr_2 = buf_ptr;
92    loop {
93        _unroll_one_nechr_to_align_x16!(buf_ptr_2, buf_ptr_end, c, start_ptr);
94    }
95    (Some(buf_ptr_end), None)
96}
97
98#[inline(always)]
99fn _nechr_qpl_to_aligned_u64(
100    buf_ptr: *const u8,
101    c: B1Qpl,
102    start_ptr: *const u8,
103) -> (Option<*const u8>, Option<usize>) {
104    let remaining_align = 0x08_usize - ((buf_ptr as usize) & 0x07_usize);
105    let buf_ptr_end = unsafe { buf_ptr.add(remaining_align) };
106    let mut buf_ptr_2 = buf_ptr;
107    loop {
108        _unroll_one_nechr_to_align_x8!(buf_ptr_2, buf_ptr_end, c, start_ptr);
109    }
110    (Some(buf_ptr_end), None)
111}
112
113#[inline(always)]
114fn _nechr_qpl_to_aligned_u32(
115    buf_ptr: *const u8,
116    c: B1Qpl,
117    start_ptr: *const u8,
118) -> (Option<*const u8>, Option<usize>) {
119    let remaining_align = 0x04_usize - ((buf_ptr as usize) & 0x03_usize);
120    let buf_ptr_end = unsafe { buf_ptr.add(remaining_align) };
121    let mut buf_ptr_2 = buf_ptr;
122    loop {
123        _unroll_one_nechr_to_align_x4!(buf_ptr_2, buf_ptr_end, c, start_ptr);
124    }
125    (Some(buf_ptr_end), None)
126}
127
128#[cfg(any(target_pointer_width = "64", feature = "test_pointer_width_64"))]
129#[inline(always)]
130fn _start_nechr_64(buf: &[u8], needle: B1Qpl) -> Option<usize> {
131    let buf_len = buf.len();
132    let mut buf_ptr = buf.as_ptr();
133    let start_ptr = buf_ptr;
134    let end_ptr = unsafe { buf_ptr.add(buf_len) };
135    let cc: B8Qpl = needle.into();
136    buf_ptr.prefetch_read_data();
137    //
138    if buf_len >= 8 {
139        // to a aligned pointer
140        {
141            if !buf_ptr.is_aligned_u64() {
142                #[cfg(not(feature = "test_alignment_check"))]
143                {
144                    let r = _nechr_c8_uu_x1(buf_ptr, cc, start_ptr);
145                    if r.is_some() {
146                        return r;
147                    }
148                    let remaining_align = 0x08_usize - ((buf_ptr as usize) & 0x07_usize);
149                    buf_ptr = unsafe { buf_ptr.add(remaining_align) };
150                }
151                #[cfg(feature = "test_alignment_check")]
152                {
153                    let r = _nechr_qpl_to_aligned_u64(buf_ptr, needle, start_ptr);
154                    if let Some(p) = r.0 {
155                        buf_ptr = p;
156                    } else if let Some(v) = r.1 {
157                        return Some(v);
158                    }
159                }
160            }
161        }
162        // the loop
163        /*
164        {
165            let unroll = 8;
166            let loop_size = 8;
167            while buf_ptr.is_not_over(end_ptr, loop_size * unroll) {
168                buf_ptr.prefetch_read_data();
169                let r = _nechr_c8_aa_x8(buf_ptr, cc, start_ptr);
170                if r.is_some() {
171                    return r;
172                }
173                buf_ptr = unsafe { buf_ptr.add(loop_size * unroll) };
174            }
175        }
176        */
177        {
178            let unroll = 4;
179            let loop_size = 8;
180            while buf_ptr.is_not_over(end_ptr, loop_size * unroll) {
181                //buf_ptr.prefetch_read_data();
182                let r = _nechr_c8_aa_x4(buf_ptr, cc, start_ptr);
183                if r.is_some() {
184                    return r;
185                }
186                buf_ptr = unsafe { buf_ptr.add(loop_size * unroll) };
187            }
188        }
189        /*
190        {
191            let unroll = 2;
192            let loop_size = 8;
193            while buf_ptr.is_not_over(end_ptr, loop_size * unroll) {
194                let r = _nechr_c8_aa_x2(buf_ptr, cc, start_ptr);
195                if r.is_some() {
196                    return r;
197                }
198                buf_ptr = unsafe { buf_ptr.add(loop_size * unroll) };
199            }
200        }
201        */
202        {
203            let unroll = 1;
204            let loop_size = 8;
205            while buf_ptr.is_not_over(end_ptr, loop_size * unroll) {
206                let r = _nechr_c8_aa_x1(buf_ptr, cc, start_ptr);
207                if r.is_some() {
208                    return r;
209                }
210                buf_ptr = unsafe { buf_ptr.add(loop_size) };
211            }
212        }
213    }
214    // the remaining data is the max: 7 bytes.
215    _memnechr_qpl_remaining_7_bytes_impl(buf_ptr, cc.into(), start_ptr, end_ptr)
216}
217
218#[cfg(any(target_pointer_width = "32", feature = "test_pointer_width_32"))]
219#[inline(always)]
220fn _start_nechr_32(buf: &[u8], needle: B1Qpl) -> Option<usize> {
221    let buf_len = buf.len();
222    let mut buf_ptr = buf.as_ptr();
223    let start_ptr = buf_ptr;
224    let end_ptr = unsafe { buf_ptr.add(buf_len) };
225    let cc: B4Qpl = needle.into();
226    buf_ptr.prefetch_read_data();
227    //
228    if buf_len >= 4 {
229        // to a aligned pointer
230        {
231            if !buf_ptr.is_aligned_u32() {
232                #[cfg(not(feature = "test_alignment_check"))]
233                {
234                    let r = _nechr_c4_uu_x1(buf_ptr, cc, start_ptr);
235                    if r.is_some() {
236                        return r;
237                    }
238                    let remaining_align = 0x04_usize - ((buf_ptr as usize) & 0x03_usize);
239                    buf_ptr = unsafe { buf_ptr.add(remaining_align) };
240                }
241                #[cfg(feature = "test_alignment_check")]
242                {
243                    let r = _nechr_qpl_to_aligned_u32(buf_ptr, needle, start_ptr);
244                    if let Some(p) = r.0 {
245                        buf_ptr = p;
246                    } else if let Some(v) = r.1 {
247                        return Some(v);
248                    }
249                }
250            }
251        }
252        // the loop
253        /*
254        {
255            let unroll = 8;
256            let loop_size = 4;
257            while buf_ptr.is_not_over(end_ptr, loop_size * unroll) {
258                buf_ptr.prefetch_read_data();
259                let r = _nechr_c4_aa_x8(buf_ptr, cc, start_ptr);
260                if r.is_some() {
261                    return r;
262                }
263                buf_ptr = unsafe { buf_ptr.add(loop_size * unroll) };
264            }
265        }
266        */
267        {
268            let unroll = 4;
269            let loop_size = 4;
270            while buf_ptr.is_not_over(end_ptr, loop_size * unroll) {
271                let r = _nechr_c4_aa_x4(buf_ptr, cc, start_ptr);
272                if r.is_some() {
273                    return r;
274                }
275                buf_ptr = unsafe { buf_ptr.add(loop_size * unroll) };
276            }
277        }
278        /*
279        {
280            let unroll = 2;
281            let loop_size = 4;
282            while buf_ptr.is_not_over(end_ptr, loop_size * unroll) {
283                let r = _nechr_c4_aa_x2(buf_ptr, cc, start_ptr);
284                if r.is_some() {
285                    return r;
286                }
287                buf_ptr = unsafe { buf_ptr.add(loop_size * unroll) };
288            }
289        }
290        */
291        {
292            let unroll = 1;
293            let loop_size = 4;
294            while buf_ptr.is_not_over(end_ptr, loop_size * unroll) {
295                let r = _nechr_c4_aa_x1(buf_ptr, cc, start_ptr);
296                if r.is_some() {
297                    return r;
298                }
299                buf_ptr = unsafe { buf_ptr.add(loop_size) };
300            }
301        }
302    }
303    // the remaining data is the max: 3 bytes.
304    _memnechr_qpl_remaining_3_bytes_impl(buf_ptr, cc.into(), start_ptr, end_ptr)
305}
306
307#[inline(always)]
308pub(crate) fn _memnechr_qpl_remaining_15_bytes_impl(
309    buf_ptr: *const u8,
310    cc: B8Qpl,
311    start_ptr: *const u8,
312    end_ptr: *const u8,
313) -> Option<usize> {
314    let mut buf_ptr = buf_ptr;
315    if buf_ptr.is_aligned_u64() {
316        let loop_size = 8;
317        if buf_ptr.is_not_over(end_ptr, loop_size) {
318            let r = _nechr_c8_aa_x1(buf_ptr, cc, start_ptr);
319            if r.is_some() {
320                return r;
321            }
322            buf_ptr = unsafe { buf_ptr.add(loop_size) };
323        }
324    }
325    // the remaining data is the max: 7 bytes.
326    _memnechr_qpl_remaining_7_bytes_impl(buf_ptr, cc.into(), start_ptr, end_ptr)
327}
328
329#[inline(always)]
330pub(crate) fn _memnechr_qpl_remaining_7_bytes_impl(
331    buf_ptr: *const u8,
332    cc: B4Qpl,
333    start_ptr: *const u8,
334    end_ptr: *const u8,
335) -> Option<usize> {
336    let mut buf_ptr = buf_ptr;
337    if buf_ptr.is_aligned_u32() {
338        let loop_size = 4;
339        if buf_ptr.is_not_over(end_ptr, loop_size) {
340            let r = _nechr_c4_aa_x1(buf_ptr, cc, start_ptr);
341            if r.is_some() {
342                return r;
343            }
344            buf_ptr = unsafe { buf_ptr.add(loop_size) };
345        }
346    }
347    // the remaining data is the max: 3 bytes.
348    _memnechr_qpl_remaining_3_bytes_impl(buf_ptr, cc.into(), start_ptr, end_ptr)
349}
350
351#[inline(always)]
352fn _memnechr_qpl_remaining_3_bytes_impl(
353    buf_ptr: *const u8,
354    cc: B2Qpl,
355    start_ptr: *const u8,
356    end_ptr: *const u8,
357) -> Option<usize> {
358    let mut buf_ptr = buf_ptr;
359    if buf_ptr.is_aligned_u16() {
360        let loop_size = 2;
361        if buf_ptr.is_not_over(end_ptr, loop_size) {
362            let r = _nechr_c2_aa_x1(buf_ptr, cc, start_ptr);
363            if r.is_some() {
364                return r;
365            }
366            buf_ptr = unsafe { buf_ptr.add(loop_size) };
367        }
368    }
369    {
370        let loop_size = 1;
371        while buf_ptr.is_not_over(end_ptr, loop_size) {
372            let r = _nechr_c1_aa_x1(buf_ptr, cc.into(), start_ptr);
373            if r.is_some() {
374                return r;
375            }
376            buf_ptr = unsafe { buf_ptr.add(loop_size) };
377        }
378    }
379    //
380    None
381}
382
383#[inline(always)]
384fn _return_nechr_qpl<T, PU>(base: T, bits_ab: PU) -> Option<usize>
385where
386    T: core::ops::Add<usize, Output = usize>,
387    PU: BitOrt + HighBitProp,
388{
389    if !bits_ab.is_highs() {
390        let bits_ab = bits_ab.propagate_a_high_bit();
391        let idx1 = (bits_ab.trailing_ones() / 8) as usize;
392        Some(base + idx1)
393    } else {
394        None
395    }
396}
397
398#[inline(always)]
399fn _nechr_c16_uu_x1(buf_ptr: *const u8, c16: B16Qpl, st_ptr: *const u8) -> Option<usize> {
400    _nechr_c16_aa_x1(buf_ptr, c16, st_ptr)
401}
402
403#[inline(always)]
404fn _nechr_c16_aa_x1(buf_ptr: *const u8, c16: B16Qpl, st_ptr: *const u8) -> Option<usize> {
405    let v_0 = unsafe { _read_a_little_endian_from_ptr_u128(buf_ptr) };
406    let v_0_a = v_0 ^ c16.v1;
407    let v_0_b = v_0 ^ c16.v2;
408    let v_0_c = v_0 ^ c16.v3;
409    let v_0_d = v_0 ^ c16.v4;
410    let bits_0_a = PackedU128::new(v_0_a).may_have_zero_byte();
411    let bits_0_b = PackedU128::new(v_0_b).may_have_zero_byte();
412    let bits_0_c = PackedU128::new(v_0_c).may_have_zero_byte();
413    let bits_0_d = PackedU128::new(v_0_d).may_have_zero_byte();
414    let bits_0_abcd = bits_0_a | bits_0_b | bits_0_c | bits_0_d;
415    let base = buf_ptr.usz_offset_from(st_ptr);
416    //
417    _return_nechr_qpl(base, bits_0_abcd)
418}
419
420#[inline(always)]
421fn _nechr_c16_aa_x2(buf_ptr: *const u8, c16: B16Qpl, st_ptr: *const u8) -> Option<usize> {
422    let r = _nechr_c16_aa_x1(buf_ptr, c16, st_ptr);
423    if r.is_some() {
424        return r;
425    }
426    let r = _nechr_c16_aa_x1(unsafe { buf_ptr.add(16) }, c16, st_ptr);
427    if r.is_some() {
428        return r;
429    }
430    None
431}
432
433#[inline(always)]
434fn _nechr_c16_aa_x4(buf_ptr: *const u8, c16: B16Qpl, st_ptr: *const u8) -> Option<usize> {
435    let r = _nechr_c16_aa_x2(buf_ptr, c16, st_ptr);
436    if r.is_some() {
437        return r;
438    }
439    let r = _nechr_c16_aa_x2(unsafe { buf_ptr.add(16 * 2) }, c16, st_ptr);
440    if r.is_some() {
441        return r;
442    }
443    None
444}
445
446#[inline(always)]
447fn _nechr_c16_aa_x8(buf_ptr: *const u8, c16: B16Qpl, st_ptr: *const u8) -> Option<usize> {
448    let r = _nechr_c16_aa_x4(buf_ptr, c16, st_ptr);
449    if r.is_some() {
450        return r;
451    }
452    let r = _nechr_c16_aa_x4(unsafe { buf_ptr.add(16 * 4) }, c16, st_ptr);
453    if r.is_some() {
454        return r;
455    }
456    None
457}
458
459#[inline(always)]
460fn _nechr_c8_uu_x1(buf_ptr: *const u8, c8: B8Qpl, st_ptr: *const u8) -> Option<usize> {
461    _nechr_c8_aa_x1(buf_ptr, c8, st_ptr)
462}
463
464#[inline(always)]
465fn _nechr_c8_aa_x1(buf_ptr: *const u8, c8: B8Qpl, st_ptr: *const u8) -> Option<usize> {
466    let v_0 = unsafe { _read_a_little_endian_from_ptr_u64(buf_ptr) };
467    let v_0_a = v_0 ^ c8.v1;
468    let v_0_b = v_0 ^ c8.v2;
469    let v_0_c = v_0 ^ c8.v3;
470    let v_0_d = v_0 ^ c8.v4;
471    let bits_0_a = PackedU64::new(v_0_a).may_have_zero_byte();
472    let bits_0_b = PackedU64::new(v_0_b).may_have_zero_byte();
473    let bits_0_c = PackedU64::new(v_0_c).may_have_zero_byte();
474    let bits_0_d = PackedU64::new(v_0_d).may_have_zero_byte();
475    let bits_0_abcd = bits_0_a | bits_0_b | bits_0_c | bits_0_d;
476    let base = buf_ptr.usz_offset_from(st_ptr);
477    //
478    _return_nechr_qpl(base, bits_0_abcd)
479}
480
481#[inline(always)]
482fn _nechr_c8_aa_x2(buf_ptr: *const u8, c8: B8Qpl, st_ptr: *const u8) -> Option<usize> {
483    let r = _nechr_c8_aa_x1(buf_ptr, c8, st_ptr);
484    if r.is_some() {
485        return r;
486    }
487    let r = _nechr_c8_aa_x1(unsafe { buf_ptr.add(8) }, c8, st_ptr);
488    if r.is_some() {
489        return r;
490    }
491    None
492}
493
494#[inline(always)]
495fn _nechr_c8_aa_x4(buf_ptr: *const u8, c8: B8Qpl, st_ptr: *const u8) -> Option<usize> {
496    let r = _nechr_c8_aa_x2(buf_ptr, c8, st_ptr);
497    if r.is_some() {
498        return r;
499    }
500    let r = _nechr_c8_aa_x2(unsafe { buf_ptr.add(8 * 2) }, c8, st_ptr);
501    if r.is_some() {
502        return r;
503    }
504    None
505}
506
507#[inline(always)]
508fn _nechr_c8_aa_x8(buf_ptr: *const u8, c8: B8Qpl, st_ptr: *const u8) -> Option<usize> {
509    let r = _nechr_c8_aa_x4(buf_ptr, c8, st_ptr);
510    if r.is_some() {
511        return r;
512    }
513    let r = _nechr_c8_aa_x4(unsafe { buf_ptr.add(8 * 4) }, c8, st_ptr);
514    if r.is_some() {
515        return r;
516    }
517    None
518}
519
520#[inline(always)]
521fn _nechr_c4_uu_x1(buf_ptr: *const u8, c4: B4Qpl, st_ptr: *const u8) -> Option<usize> {
522    _nechr_c4_aa_x1(buf_ptr, c4, st_ptr)
523}
524
525#[inline(always)]
526fn _nechr_c4_aa_x1(buf_ptr: *const u8, c4: B4Qpl, st_ptr: *const u8) -> Option<usize> {
527    let v_0 = unsafe { _read_a_little_endian_from_ptr_u32(buf_ptr) };
528    let v_0_a = v_0 ^ c4.v1;
529    let v_0_b = v_0 ^ c4.v2;
530    let v_0_c = v_0 ^ c4.v3;
531    let v_0_d = v_0 ^ c4.v4;
532    let bits_0_a = PackedU32::new(v_0_a).may_have_zero_byte();
533    let bits_0_b = PackedU32::new(v_0_b).may_have_zero_byte();
534    let bits_0_c = PackedU32::new(v_0_c).may_have_zero_byte();
535    let bits_0_d = PackedU32::new(v_0_d).may_have_zero_byte();
536    let bits_0_abcd = bits_0_a | bits_0_b | bits_0_c | bits_0_d;
537    let base = buf_ptr.usz_offset_from(st_ptr);
538    //
539    _return_nechr_qpl(base, bits_0_abcd)
540}
541
542#[inline(always)]
543fn _nechr_c4_aa_x2(buf_ptr: *const u8, c4: B4Qpl, st_ptr: *const u8) -> Option<usize> {
544    let r = _nechr_c4_aa_x1(buf_ptr, c4, st_ptr);
545    if r.is_some() {
546        return r;
547    }
548    let r = _nechr_c4_aa_x1(unsafe { buf_ptr.add(4) }, c4, st_ptr);
549    if r.is_some() {
550        return r;
551    }
552    None
553}
554
555#[inline(always)]
556fn _nechr_c4_aa_x4(buf_ptr: *const u8, c4: B4Qpl, st_ptr: *const u8) -> Option<usize> {
557    let r = _nechr_c4_aa_x2(buf_ptr, c4, st_ptr);
558    if r.is_some() {
559        return r;
560    }
561    let r = _nechr_c4_aa_x2(unsafe { buf_ptr.add(4 * 2) }, c4, st_ptr);
562    if r.is_some() {
563        return r;
564    }
565    None
566}
567
568#[inline(always)]
569fn _nechr_c4_aa_x8(buf_ptr: *const u8, c4: B4Qpl, st_ptr: *const u8) -> Option<usize> {
570    let r = _nechr_c4_aa_x4(buf_ptr, c4, st_ptr);
571    if r.is_some() {
572        return r;
573    }
574    let r = _nechr_c4_aa_x4(unsafe { buf_ptr.add(4 * 4) }, c4, st_ptr);
575    if r.is_some() {
576        return r;
577    }
578    None
579}
580
581#[inline(always)]
582fn _nechr_c2_aa_x1(buf_ptr: *const u8, c2: B2Qpl, st_ptr: *const u8) -> Option<usize> {
583    let v_0 = unsafe { _read_a_little_endian_from_ptr_u16(buf_ptr) };
584    let v_0_a = v_0 ^ c2.v1;
585    let v_0_b = v_0 ^ c2.v2;
586    let v_0_c = v_0 ^ c2.v3;
587    let v_0_d = v_0 ^ c2.v4;
588    let bits_0_a = PackedU16::new(v_0_a).may_have_zero_byte();
589    let bits_0_b = PackedU16::new(v_0_b).may_have_zero_byte();
590    let bits_0_c = PackedU16::new(v_0_c).may_have_zero_byte();
591    let bits_0_d = PackedU16::new(v_0_d).may_have_zero_byte();
592    let bits_0_abcd = bits_0_a | bits_0_b | bits_0_c | bits_0_d;
593    let base = buf_ptr.usz_offset_from(st_ptr);
594    //
595    _return_nechr_qpl(base, bits_0_abcd)
596}
597
598#[inline(always)]
599fn _nechr_c1_aa_x1(buf_ptr: *const u8, c1: B1Qpl, st_ptr: *const u8) -> Option<usize> {
600    let aa_ptr = buf_ptr;
601    let aac = unsafe { *aa_ptr };
602    if aac != c1.v1 && aac != c1.v2 && aac != c1.v3 && aac != c1.v4 {
603        Some(buf_ptr.usz_offset_from(st_ptr))
604    } else {
605        None
606    }
607}
608
609/*
610 * The simple implement:
611
612#[inline(always)]
613pub fn _memnechr_qpl_impl(buf: &[u8], needle: B1Qpl) -> Option<usize> {
614    for i in 0..buf.len() {
615        if buf[i] != needle.v1 && buf[i] != needle.v2 && buf[i] != needle.v3 && buf[i] != needle.v4 {
616            return Some(i);
617        }
618    }
619    None
620}
621*/