Skip to main content

diskann_wide/
emulated.rs

1/*
2 * Copyright (c) Microsoft Corporation.
3 * Licensed under the MIT license.
4 */
5
6use half::f16;
7
8use super::{
9    SplitJoin, SupportedLaneCount,
10    arch::{self, emulated::Scalar},
11    bitmask::BitMask,
12    constant::Const,
13    reference::{ReferenceAbs, ReferenceCast, ReferenceScalarOps, ReferenceShifts, TreeReduce},
14    traits::{
15        ArrayType, SIMDAbs, SIMDCast, SIMDDotProduct, SIMDMask, SIMDMinMax, SIMDMulAdd,
16        SIMDPartialEq, SIMDPartialOrd, SIMDReinterpret, SIMDSelect, SIMDSumTree, SIMDVector,
17    },
18};
19
20/// An emulated SIMD vector.
21///
22/// The emulated implementation behaves just like an intrinsic, but the APIs are implemented
23/// using loops over arrays rather than dispatching to platform specific instructions.
24///
25/// The idea behind this type is that it can be used on architecture where explicit backend
26/// support has not been added, or when an architecture does not support a given type/lengh
27/// pair well.
28///
29/// Furthermore, it can be used when developing new back-ends to provide fallback
30/// implementations. This allows new back-ends to be developed one piece as a time instead
31/// of all at onces.
32///
33/// NOTE: The alignment requirements of an emulated vector *will* be different than the
34/// alignment requirements an actual intrinsic.
35///
36/// Higher level code *must not* rely on alignments being compatible across architectures!
37#[derive(Debug, Clone, Copy)]
38pub struct Emulated<T, const N: usize, A = Scalar>(pub(crate) [T; N], A);
39
40impl<T, const N: usize, A> Emulated<T, N, A> {
41    pub fn from_arch_fn<F>(arch: A, f: F) -> Self
42    where
43        F: FnMut(usize) -> T,
44    {
45        Self(core::array::from_fn(f), arch)
46    }
47}
48
49impl<T, const N: usize, A> SIMDVector for Emulated<T, N, A>
50where
51    T: Copy + std::fmt::Debug + Default,
52    Const<N>: ArrayType<T, Type = [T; N]>,
53    BitMask<N, A>: SIMDMask<Arch = A>,
54    A: arch::Sealed,
55{
56    type Arch = A;
57    type Scalar = T;
58    type Underlying = [T; N];
59    type ConstLanes = Const<N>;
60    const LANES: usize = N;
61    type Mask = BitMask<N, A>;
62
63    /// The underlying behavior is emulated using loops and is not accelerated by back-end
64    /// intrinsics.
65    const EMULATED: bool = true;
66
67    /// Return the Scalar architecture.
68    fn arch(self) -> A {
69        self.1
70    }
71
72    fn default(arch: A) -> Self {
73        Self([T::default(); N], arch)
74    }
75
76    /// Return the underlying array.
77    fn to_underlying(self) -> Self::Underlying {
78        self.0
79    }
80
81    /// Construct from the underlying array.
82    fn from_underlying(arch: A, repr: [T; N]) -> Self {
83        Self(repr, arch)
84    }
85
86    /// Return the underlying array.
87    fn to_array(self) -> [T; N] {
88        self.0
89    }
90
91    /// Construct from the underlying array.
92    fn from_array(arch: A, x: [T; N]) -> Self {
93        Self(x, arch)
94    }
95
96    /// Broadcast the provided scalar across all lanes.
97    fn splat(arch: A, value: Self::Scalar) -> Self {
98        Self([value; N], arch)
99    }
100
101    /// Load all the things.
102    #[inline(always)]
103    unsafe fn load_simd(arch: A, ptr: *const T) -> Self {
104        // SAFETY: The caller asserts that `ptr` is contiguously readable for `N` values.
105        Self(
106            unsafe { std::ptr::read_unaligned(ptr.cast::<[T; N]>()) },
107            arch,
108        )
109    }
110
111    /// Only load values then the corresponding mask lane is set.
112    unsafe fn load_simd_masked_logical(arch: A, ptr: *const T, mask: Self::Mask) -> Self {
113        Self::from_arch_fn(arch, |i| {
114            if mask.get_unchecked(i) {
115                // SAFETY: The caller ensures it's safe to access this offset from `ptr`
116                // because the lane in `mask` is set.
117                unsafe { std::ptr::read_unaligned(ptr.add(i)) }
118            } else {
119                T::default()
120            }
121        })
122    }
123
124    /// Only load the first `first` items. Set the rest to zero.
125    #[inline(always)]
126    unsafe fn load_simd_first(arch: A, ptr: *const T, first: usize) -> Self {
127        Self::from_arch_fn(arch, |i| {
128            if i < first {
129                // SAFETY: The caller ensures it's safe to access the first `first` values
130                // beginning at `ptr`.
131                unsafe { std::ptr::read_unaligned(ptr.add(i)) }
132            } else {
133                T::default()
134            }
135        })
136    }
137
138    /// Store all the things.
139    #[inline(always)]
140    unsafe fn store_simd(self, ptr: *mut T) {
141        // SAFETY: The caller asserts that it is safe to write `N` contiguous values to `ptr`.
142        unsafe { ptr.cast::<[T; N]>().write_unaligned(self.0) }
143    }
144
145    /// Only store values then the corresponding mask lane is set.
146    unsafe fn store_simd_masked_logical(self, ptr: *mut T, mask: Self::Mask) {
147        for (i, v) in self.0.iter().enumerate() {
148            if mask.get_unchecked(i) {
149                // SAFETY: The caller asserts it is safe to write to offsets with the
150                // corresponding bit mask set.
151                unsafe { ptr.add(i).write_unaligned(*v) };
152            }
153        }
154    }
155
156    /// Only store the first `first` items. Set the rest to zero.
157    #[inline(always)]
158    unsafe fn store_simd_first(self, ptr: *mut T, first: usize) {
159        for (i, v) in self.0.iter().enumerate().take(first) {
160            // SAFETY: The caller asserts it is safe to write to the first `first` offsets
161            // beginning at `ptr`.
162            unsafe { ptr.add(i).write_unaligned(*v) };
163        }
164    }
165}
166
167/// Binary Ops
168impl<T, const N: usize, A> std::ops::Add for Emulated<T, N, A>
169where
170    T: ReferenceScalarOps + Copy + std::fmt::Debug + std::default::Default,
171    Const<N>: ArrayType<T>,
172{
173    type Output = Self;
174    fn add(self, rhs: Self) -> Self {
175        Self::from_arch_fn(self.1, |i| self.0[i].expected_add_(rhs.0[i]))
176    }
177}
178
179impl<T, const N: usize, A> std::ops::Sub for Emulated<T, N, A>
180where
181    T: ReferenceScalarOps,
182{
183    type Output = Self;
184
185    #[inline(always)]
186    fn sub(self, rhs: Self) -> Self {
187        Self::from_arch_fn(self.1, |i| self.0[i].expected_sub_(rhs.0[i]))
188    }
189}
190
191impl<T, const N: usize, A> std::ops::Mul for Emulated<T, N, A>
192where
193    T: ReferenceScalarOps,
194{
195    type Output = Self;
196    fn mul(self, rhs: Self) -> Self {
197        Self::from_arch_fn(self.1, |i| self.0[i].expected_mul_(rhs.0[i]))
198    }
199}
200
201/// MulAdd
202impl<T, const N: usize, A> SIMDMulAdd for Emulated<T, N, A>
203where
204    T: ReferenceScalarOps,
205{
206    #[inline(always)]
207    fn mul_add_simd(self, rhs: Self, accumulator: Self) -> Self {
208        Self::from_arch_fn(self.1, |i| {
209            self.0[i].expected_fma_(rhs.0[i], accumulator.0[i])
210        })
211    }
212}
213
214/// MinMax
215impl<T, const N: usize, A> SIMDMinMax for Emulated<T, N, A>
216where
217    T: ReferenceScalarOps,
218{
219    #[inline(always)]
220    fn min_simd(self, rhs: Self) -> Self {
221        Self::from_arch_fn(self.1, |i| self.0[i].expected_min_(rhs.0[i]))
222    }
223    #[inline(always)]
224    fn max_simd(self, rhs: Self) -> Self {
225        Self::from_arch_fn(self.1, |i| self.0[i].expected_max_(rhs.0[i]))
226    }
227}
228
229/// Abs
230impl<T, const N: usize, A> SIMDAbs for Emulated<T, N, A>
231where
232    T: ReferenceAbs,
233{
234    #[inline(always)]
235    fn abs_simd(self) -> Self {
236        Self::from_arch_fn(self.1, |i| self.0[i].expected_abs_())
237    }
238}
239
240/// SIMDPartialEq
241impl<T, const N: usize, A> SIMDPartialEq for Emulated<T, N, A>
242where
243    T: PartialEq,
244    Self: SIMDVector,
245{
246    #[inline(always)]
247    fn eq_simd(self, other: Self) -> Self::Mask {
248        Self::Mask::from_fn(self.arch(), |i| self.0[i] == other.0[i])
249    }
250
251    #[inline(always)]
252    fn ne_simd(self, other: Self) -> Self::Mask {
253        Self::Mask::from_fn(self.arch(), |i| self.0[i] != other.0[i])
254    }
255}
256
257/// SIMDPartialOrd
258impl<T, const N: usize, A> SIMDPartialOrd for Emulated<T, N, A>
259where
260    T: PartialOrd,
261    Self: SIMDVector,
262{
263    #[inline(always)]
264    fn lt_simd(self, other: Self) -> Self::Mask {
265        Self::Mask::from_fn(self.arch(), |i| self.0[i] < other.0[i])
266    }
267
268    #[inline(always)]
269    fn le_simd(self, other: Self) -> Self::Mask {
270        Self::Mask::from_fn(self.arch(), |i| self.0[i] <= other.0[i])
271    }
272
273    #[inline(always)]
274    fn gt_simd(self, other: Self) -> Self::Mask {
275        Self::Mask::from_fn(self.arch(), |i| self.0[i] > other.0[i])
276    }
277
278    #[inline(always)]
279    fn ge_simd(self, other: Self) -> Self::Mask {
280        Self::Mask::from_fn(self.arch(), |i| self.0[i] >= other.0[i])
281    }
282}
283
284// Bit Ops
285impl<T, const N: usize, A> std::ops::BitAnd for Emulated<T, N, A>
286where
287    T: std::ops::BitAnd<Output = T> + Copy,
288{
289    type Output = Self;
290    #[inline(always)]
291    fn bitand(self, other: Self) -> Self::Output {
292        Self::from_arch_fn(self.1, |i| self.0[i] & other.0[i])
293    }
294}
295
296impl<T, const N: usize, A> std::ops::BitOr for Emulated<T, N, A>
297where
298    T: std::ops::BitOr<Output = T> + Copy,
299{
300    type Output = Self;
301    #[inline(always)]
302    fn bitor(self, other: Self) -> Self::Output {
303        Self::from_arch_fn(self.1, |i| self.0[i] | other.0[i])
304    }
305}
306
307impl<T, const N: usize, A> std::ops::BitXor for Emulated<T, N, A>
308where
309    T: std::ops::BitXor<Output = T> + Copy,
310{
311    type Output = Self;
312    #[inline(always)]
313    fn bitxor(self, other: Self) -> Self::Output {
314        Self::from_arch_fn(self.1, |i| self.0[i] ^ other.0[i])
315    }
316}
317
318impl<T, const N: usize, A> std::ops::Not for Emulated<T, N, A>
319where
320    T: std::ops::Not<Output = T> + Copy,
321{
322    type Output = Self;
323    #[inline(always)]
324    fn not(self) -> Self::Output {
325        Self::from_arch_fn(self.1, |i| !self.0[i])
326    }
327}
328
329impl<T, const N: usize, A> std::ops::Shl for Emulated<T, N, A>
330where
331    T: ReferenceShifts,
332{
333    type Output = Self;
334    #[inline(always)]
335    fn shl(self, rhs: Self) -> Self::Output {
336        Self::from_arch_fn(self.1, |i| self.0[i].expected_shl_(rhs.0[i]))
337    }
338}
339
340impl<T, const N: usize, A> std::ops::Shl<T> for Emulated<T, N, A>
341where
342    T: ReferenceShifts,
343{
344    type Output = Self;
345    #[inline(always)]
346    fn shl(self, rhs: T) -> Self::Output {
347        Self::from_arch_fn(self.1, |i| self.0[i].expected_shl_(rhs))
348    }
349}
350
351impl<T, const N: usize, A> std::ops::Shr for Emulated<T, N, A>
352where
353    T: ReferenceShifts,
354{
355    type Output = Self;
356    #[inline(always)]
357    fn shr(self, rhs: Self) -> Self::Output {
358        Self::from_arch_fn(self.1, |i| self.0[i].expected_shr_(rhs.0[i]))
359    }
360}
361
362impl<T, const N: usize, A> std::ops::Shr<T> for Emulated<T, N, A>
363where
364    T: ReferenceShifts,
365{
366    type Output = Self;
367    #[inline(always)]
368    fn shr(self, rhs: T) -> Self::Output {
369        Self::from_arch_fn(self.1, |i| self.0[i].expected_shr_(rhs))
370    }
371}
372
373//////////////////
374// Dot Products //
375//////////////////
376
377// i16 to i32
378macro_rules! impl_simd_dot_product_i16_to_i32 {
379    ($N:literal, $TwoN:literal) => {
380        /// Promote intermediate values to `i32` and then perform accuulation.
381        impl<A> SIMDDotProduct<Emulated<i16, $TwoN, A>> for Emulated<i32, $N, A>
382        where
383            A: arch::Sealed,
384        {
385            fn dot_simd(
386                self,
387                left: Emulated<i16, $TwoN, A>,
388                right: Emulated<i16, $TwoN, A>,
389            ) -> Self {
390                self + Self::from_arch_fn(self.1, |i| {
391                    let l0: i32 = left.0[2 * i].into();
392                    let l1: i32 = left.0[2 * i + 1].into();
393
394                    let r0: i32 = right.0[2 * i].into();
395                    let r1: i32 = right.0[2 * i + 1].into();
396                    l0.expected_fma_(r0, l1.expected_mul_(r1))
397                })
398            }
399        }
400    };
401}
402
403//i8/u8 to i32
404macro_rules! impl_simd_dot_product_iu8_to_i32 {
405    ($N:literal, $TwoN:literal) => {
406        /// Promote intermediate values to `i32` and then perform accuulation.
407        impl<A> SIMDDotProduct<Emulated<u8, $TwoN, A>, Emulated<i8, $TwoN, A>>
408            for Emulated<i32, $N, A>
409        where
410            A: arch::Sealed,
411        {
412            fn dot_simd(self, left: Emulated<u8, $TwoN, A>, right: Emulated<i8, $TwoN, A>) -> Self {
413                self + Self::from_arch_fn(self.1, |i| {
414                    let l0: i32 = left.0[4 * i].into();
415                    let l1: i32 = left.0[4 * i + 1].into();
416                    let l2: i32 = left.0[4 * i + 2].into();
417                    let l3: i32 = left.0[4 * i + 3].into();
418
419                    let r0: i32 = right.0[4 * i].into();
420                    let r1: i32 = right.0[4 * i + 1].into();
421                    let r2: i32 = right.0[4 * i + 2].into();
422                    let r3: i32 = right.0[4 * i + 3].into();
423
424                    let a = l0.expected_fma_(r0, l1.expected_mul_(r1));
425                    let b = l2.expected_fma_(r2, l3.expected_mul_(r3));
426                    a + b
427                })
428            }
429        }
430
431        impl<A> SIMDDotProduct<Emulated<i8, $TwoN, A>, Emulated<u8, $TwoN, A>>
432            for Emulated<i32, $N, A>
433        where
434            A: arch::Sealed,
435        {
436            fn dot_simd(self, left: Emulated<i8, $TwoN, A>, right: Emulated<u8, $TwoN, A>) -> Self {
437                self.dot_simd(right, left)
438            }
439        }
440    };
441}
442
443impl_simd_dot_product_i16_to_i32!(8, 16);
444impl_simd_dot_product_i16_to_i32!(16, 32);
445
446impl_simd_dot_product_iu8_to_i32!(8, 32);
447impl_simd_dot_product_iu8_to_i32!(16, 64);
448
449////////////
450// Select //
451////////////
452
453impl<T, const N: usize, A> SIMDSelect<Emulated<T, N, A>> for BitMask<N, A>
454where
455    T: Copy,
456    A: arch::Sealed,
457    Const<N>: SupportedLaneCount,
458    BitMask<N, A>: SIMDMask<Arch = A>,
459    Emulated<T, N, A>: SIMDVector<Mask = BitMask<N, A>>,
460{
461    #[inline(always)]
462    fn select(self, x: Emulated<T, N, A>, y: Emulated<T, N, A>) -> Emulated<T, N, A> {
463        Emulated::from_arch_fn(self.arch(), |i| {
464            if self.get_unchecked(i) {
465                x.0[i]
466            } else {
467                y.0[i]
468            }
469        })
470    }
471}
472
473/////////////
474// SumTree //
475/////////////
476
477macro_rules! impl_sumtree {
478    ($T:ty, $N:literal) => {
479        impl<A> SIMDSumTree for Emulated<$T, $N, A>
480        where
481            A: arch::Sealed,
482        {
483            #[inline(always)]
484            fn sum_tree(self) -> $T {
485                self.0.tree_reduce(|x, y| x.expected_add_(y))
486            }
487        }
488    };
489    ($T:ty, $($N:literal),* $(,)?) => {
490        $(impl_sumtree!($T, $N);)*
491    };
492}
493
494impl_sumtree!(f32, 1, 2, 4, 8, 16);
495impl_sumtree!(i32, 4, 8, 16);
496impl_sumtree!(u32, 4, 8, 16);
497
498////////////////
499// Conversion //
500////////////////
501
502macro_rules! impl_from {
503    (f16 => f32, $N:literal) => {
504        impl<A> From<Emulated<f16, $N, A>> for Emulated<f32, $N, A> {
505            #[inline(always)]
506            fn from(value: Emulated<f16, $N, A>) -> Self {
507                Emulated(value.0.map(|v| v.reference_cast()), value.1)
508            }
509        }
510    };
511    ($from:ty => $to:ty, $N:literal) => {
512        impl<A> From<Emulated<$from, $N, A>> for Emulated<$to, $N, A> {
513            #[inline(always)]
514            fn from(value: Emulated<$from, $N, A>) -> Self {
515                Emulated(value.0.map(|v| v.into()), value.1)
516            }
517        }
518    };
519}
520
521impl_from!(f16 => f32, 1);
522impl_from!(f16 => f32, 2);
523impl_from!(f16 => f32, 4);
524impl_from!(f16 => f32, 8);
525impl_from!(f16 => f32, 16);
526
527impl_from!(u8 => i16, 16);
528impl_from!(u8 => i16, 32);
529
530impl_from!(i8 => i16, 16);
531impl_from!(i8 => i16, 32);
532
533impl_from!(i8 => i32, 1);
534impl_from!(i8 => i32, 4);
535
536impl_from!(u8 => i32, 1);
537impl_from!(u8 => i32, 4);
538
539/////////////////
540// Reinterpret //
541/////////////////
542
543macro_rules! impl_little_endian_transmute_cast {
544    (<$from:ty, $Nfrom:literal> => <$to:ty, $Nto:literal>) => {
545        #[cfg(target_endian = "little")]
546        impl<A> SIMDReinterpret<Emulated<$to, $Nto, A>> for Emulated<$from, $Nfrom, A>
547        where
548            A: arch::Sealed,
549        {
550            fn reinterpret_simd(self) -> Emulated<$to, $Nto, A> {
551                let array = self.0;
552                // # SAFETY: This is only ever instantiated with arrays of primitive
553                // types that hold no resources, no padding, and are valid for all
554                // possible bit-patterns.
555                let casted = unsafe { std::mem::transmute::<[$from; $Nfrom], [$to; $Nto]>(array) };
556                Emulated(casted, self.1)
557            }
558        }
559    };
560}
561
562impl_little_endian_transmute_cast!(<u32, 8> => <i16, 16>);
563
564impl_little_endian_transmute_cast!(<u32, 16> => <u8, 64>);
565impl_little_endian_transmute_cast!(<u32, 16> => <i8, 64>);
566
567impl_little_endian_transmute_cast!(<u8, 64> => <u32, 16>);
568impl_little_endian_transmute_cast!(<i8, 64> => <u32, 16>);
569
570/////////////
571// Casting //
572/////////////
573
574macro_rules! impl_cast {
575    ($from:ty => $to:ty, $N:literal) => {
576        impl<A> SIMDCast<$to> for Emulated<$from, $N, A>
577        where
578            A: arch::Sealed,
579        {
580            type Cast = Emulated<$to, $N, A>;
581            #[inline(always)]
582            fn simd_cast(self) -> Self::Cast {
583                Emulated::from_arch_fn(self.arch(), |i| self.0[i].reference_cast())
584            }
585        }
586    };
587}
588
589impl_cast!(f16 => f32, 8);
590impl_cast!(f16 => f32, 16);
591
592impl_cast!(f32 => f16, 8);
593impl_cast!(f32 => f16, 16);
594
595impl_cast!(i32 => f32, 8);
596
597///////////////
598// SplitJoin //
599///////////////
600
601macro_rules! impl_splitjoin {
602    ($type:ty, $N:literal => $N2:literal) => {
603        impl<A> SplitJoin for Emulated<$type, $N, A>
604        where
605            A: Copy,
606        {
607            type Halved = Emulated<$type, $N2, A>;
608
609            #[inline(always)]
610            fn split(self) -> $crate::LoHi<Self::Halved> {
611                let $crate::LoHi { lo, hi } = self.0.split();
612                let arch = self.1;
613                $crate::LoHi::new(Emulated(lo, arch), Emulated(hi, arch))
614            }
615
616            #[inline(always)]
617            fn join(lohi: $crate::LoHi<Self::Halved>) -> Self {
618                Self($crate::LoHi::new(lohi.lo.0, lohi.hi.0).join(), lohi.lo.1)
619            }
620        }
621    };
622}
623
624impl_splitjoin!(i8, 32 => 16);
625impl_splitjoin!(i8, 64 => 32);
626
627impl_splitjoin!(i16, 16 => 8);
628impl_splitjoin!(i16, 32 => 16);
629
630impl_splitjoin!(i32, 8 => 4);
631impl_splitjoin!(i32, 16 => 8);
632
633impl_splitjoin!(u8, 32 => 16);
634impl_splitjoin!(u8, 64 => 32);
635
636impl_splitjoin!(u32, 8 => 4);
637impl_splitjoin!(u32, 16 => 8);
638impl_splitjoin!(u64, 4 => 2);
639
640impl_splitjoin!(f32, 16 => 8);
641impl_splitjoin!(f32, 8 => 4);
642
643impl_splitjoin!(f16, 16 => 8);
644
645///////////
646// Tests //
647///////////
648
649#[cfg(test)]
650mod test_emulated {
651    use half::f16;
652
653    use super::*;
654    use crate::{reference::ReferenceScalarOps, test_utils};
655
656    // Test loading logic - ensure that no out of bounds accesses are made.
657    // In particular, this is meant to be run under `Miri` to ensure that our guarantees
658    // regarding out-of-bounds accesses are honored.
659    #[test]
660    fn test_load() {
661        // Floating Point
662        #[cfg(not(miri))] // Miri does not have ph-to-ps conversion.
663        test_utils::test_load_simd::<f16, 8, Emulated<f16, 8>>(Scalar);
664        test_utils::test_load_simd::<f32, 4, Emulated<f32, 4>>(Scalar);
665        test_utils::test_load_simd::<f32, 8, Emulated<f32, 8>>(Scalar);
666
667        // Unsigned Integers
668        test_utils::test_load_simd::<u8, 8, Emulated<u8, 8>>(Scalar);
669        test_utils::test_load_simd::<u8, 16, Emulated<u8, 16>>(Scalar);
670
671        test_utils::test_load_simd::<u16, 4, Emulated<u16, 4>>(Scalar);
672        test_utils::test_load_simd::<u16, 8, Emulated<u16, 8>>(Scalar);
673        test_utils::test_load_simd::<u16, 16, Emulated<u16, 16>>(Scalar);
674
675        test_utils::test_load_simd::<u32, 2, Emulated<u32, 2>>(Scalar);
676        test_utils::test_load_simd::<u32, 4, Emulated<u32, 4>>(Scalar);
677        test_utils::test_load_simd::<u32, 8, Emulated<u32, 8>>(Scalar);
678
679        // Unsigned Integers
680        test_utils::test_load_simd::<i8, 8, Emulated<i8, 8>>(Scalar);
681        test_utils::test_load_simd::<i8, 16, Emulated<i8, 16>>(Scalar);
682
683        test_utils::test_load_simd::<i16, 4, Emulated<i16, 4>>(Scalar);
684        test_utils::test_load_simd::<i16, 8, Emulated<i16, 8>>(Scalar);
685        test_utils::test_load_simd::<i16, 16, Emulated<i16, 16>>(Scalar);
686
687        test_utils::test_load_simd::<i32, 2, Emulated<i32, 2>>(Scalar);
688        test_utils::test_load_simd::<i32, 4, Emulated<i32, 4>>(Scalar);
689        test_utils::test_load_simd::<i32, 8, Emulated<i32, 8>>(Scalar);
690    }
691
692    #[test]
693    fn test_store() {
694        // Floating Point
695        #[cfg(not(miri))] // Miri does not have ph-to-ps conversion.
696        test_utils::test_store_simd::<f16, 8, Emulated<f16, 8>>(Scalar);
697        test_utils::test_store_simd::<f32, 4, Emulated<f32, 4>>(Scalar);
698        test_utils::test_store_simd::<f32, 8, Emulated<f32, 8>>(Scalar);
699
700        // Unsigned Integers
701        test_utils::test_store_simd::<u8, 8, Emulated<u8, 8>>(Scalar);
702        test_utils::test_store_simd::<u8, 16, Emulated<u8, 16>>(Scalar);
703
704        test_utils::test_store_simd::<u16, 4, Emulated<u16, 4>>(Scalar);
705        test_utils::test_store_simd::<u16, 8, Emulated<u16, 8>>(Scalar);
706        test_utils::test_store_simd::<u16, 16, Emulated<u16, 16>>(Scalar);
707
708        test_utils::test_store_simd::<u32, 2, Emulated<u32, 2>>(Scalar);
709        test_utils::test_store_simd::<u32, 4, Emulated<u32, 4>>(Scalar);
710        test_utils::test_store_simd::<u32, 8, Emulated<u32, 8>>(Scalar);
711
712        // Unsigned Integers
713        test_utils::test_store_simd::<i8, 8, Emulated<i8, 8>>(Scalar);
714        test_utils::test_store_simd::<i8, 16, Emulated<i8, 16>>(Scalar);
715
716        test_utils::test_store_simd::<i16, 4, Emulated<i16, 4>>(Scalar);
717        test_utils::test_store_simd::<i16, 8, Emulated<i16, 8>>(Scalar);
718        test_utils::test_store_simd::<i16, 16, Emulated<i16, 16>>(Scalar);
719
720        test_utils::test_store_simd::<i32, 2, Emulated<i32, 2>>(Scalar);
721        test_utils::test_store_simd::<i32, 4, Emulated<i32, 4>>(Scalar);
722        test_utils::test_store_simd::<i32, 8, Emulated<i32, 8>>(Scalar);
723    }
724
725    // Only test a subset of constructors as all `Emulated` have the same implementation.
726    #[test]
727    fn test_constructors() {
728        test_utils::ops::test_splat::<u8, 64, Emulated<u8, 64>>(Scalar);
729        let x = Emulated::<u32, 8>::default(Scalar);
730        assert_eq!(x.to_underlying(), [0; 8]);
731
732        let x = Emulated::<u32, 8>::from_underlying(Scalar, [1; 8]);
733        assert_eq!(x.to_underlying(), [1; 8]);
734    }
735
736    // Wrap inside `Some` for compatibility with optional tests.
737    const SC: Option<Scalar> = Some(Scalar);
738
739    macro_rules! test_emulated {
740        ($type:ty, $N:literal) => {
741            test_utils::ops::test_add!(Emulated<$type, $N>, 0xba37c3f2cf666f87, SC);
742            test_utils::ops::test_sub!(Emulated<$type, $N>, 0xeb755abd230e5d80, SC);
743            test_utils::ops::test_mul!(Emulated<$type, $N>, 0x0a24ed76a54c3561, SC);
744            test_utils::ops::test_fma!(Emulated<$type, $N>, 0xa906c44505abe9ca, SC);
745            test_utils::ops::test_minmax!(Emulated<$type, $N>, 0x959522be5234d492, SC);
746
747            test_utils::ops::test_cmp!(Emulated<$type, $N>, 0x9b58e6cbd8330c2d, SC);
748            test_utils::ops::test_select!(Emulated<$type, $N>, 0x610aca3aa4d77c0a, SC);
749        };
750        (unsigned, $type:ty, $N:literal) => {
751            test_emulated!($type, $N);
752
753            test_utils::ops::test_bitops!(Emulated<$type, $N>, 0x14fc7841e66bd162, SC);
754        };
755        (signed, $type:ty, $N:literal) => {
756            test_emulated!($type, $N);
757
758            test_utils::ops::test_bitops!(Emulated<$type, $N>, 0x850435f89f86f3b0, SC);
759            test_utils::ops::test_abs!(Emulated<$type, $N>, 0x1842a2b86dfd9ecb, SC);
760        };
761    }
762
763    // Emulated arithmetic.
764    test_emulated!(f32, 1);
765    test_emulated!(f32, 4);
766    test_emulated!(f32, 8);
767    test_emulated!(f32, 16);
768    // test_emulated!(f64, 8);
769
770    // unsigned integer
771    test_emulated!(unsigned, u8, 16);
772
773    test_emulated!(unsigned, u16, 16);
774    test_emulated!(unsigned, u16, 32);
775
776    test_emulated!(unsigned, u32, 1);
777    test_emulated!(unsigned, u32, 4);
778    test_emulated!(unsigned, u32, 8);
779    test_emulated!(unsigned, u32, 16);
780
781    test_emulated!(unsigned, u64, 2);
782    test_emulated!(unsigned, u64, 4);
783    test_emulated!(unsigned, u64, 8);
784    test_emulated!(unsigned, u64, 16);
785
786    // signed integer
787    test_emulated!(signed, i8, 8);
788    test_emulated!(signed, i8, 16);
789
790    test_emulated!(signed, i16, 8);
791    test_emulated!(signed, i16, 16);
792
793    test_emulated!(signed, i32, 1);
794    test_emulated!(signed, i32, 4);
795    test_emulated!(signed, i32, 8);
796    test_emulated!(signed, i32, 16);
797
798    test_emulated!(signed, i64, 2);
799    test_emulated!(signed, i64, 4);
800    test_emulated!(signed, i64, 8);
801    test_emulated!(signed, i64, 16);
802
803    // Dot Products
804    test_utils::dot_product::test_dot_product!(
805        (Emulated<i16, 16>, Emulated<i16, 16>) => Emulated<i32, 8>, 0x3001f05604e96289, SC
806    );
807    test_utils::dot_product::test_dot_product!(
808        (Emulated<i16, 32>, Emulated<i16, 32>) => Emulated<i32, 16>, 0x137ce7a540d9b1a2, SC
809    );
810
811    test_utils::dot_product::test_dot_product!(
812        (Emulated<u8, 32>, Emulated<i8, 32>) => Emulated<i32, 8>, 0x3001f05604e96289, SC
813    );
814    test_utils::dot_product::test_dot_product!(
815        (Emulated<i8, 32>, Emulated<u8, 32>) => Emulated<i32, 8>, 0x3001f05604e96289, SC
816    );
817    test_utils::dot_product::test_dot_product!(
818        (Emulated<u8, 64>, Emulated<i8, 64>) => Emulated<i32, 16>, 0x3001f05604e96289, SC
819    );
820    test_utils::dot_product::test_dot_product!(
821        (Emulated<i8, 64>, Emulated<u8, 64>) => Emulated<i32, 16>, 0x3001f05604e96289, SC
822    );
823
824    // reductions
825    test_utils::ops::test_sumtree!(Emulated<f32, 1>, 0x410bad8207a8ccfc, SC);
826    test_utils::ops::test_sumtree!(Emulated<f32, 2>, 0xf2fc4e4bbd193493, SC);
827    test_utils::ops::test_sumtree!(Emulated<f32, 4>, 0x8034d5a0cd2be14d, SC);
828    test_utils::ops::test_sumtree!(Emulated<f32, 8>, 0x0f075940b7e3732c, SC);
829    test_utils::ops::test_sumtree!(Emulated<f32, 16>, 0x5b3cb860e3f02d3c, SC);
830
831    test_utils::ops::test_sumtree!(Emulated<i32, 4>, 0xf8c38f70a807e9d2, SC);
832    test_utils::ops::test_sumtree!(Emulated<i32, 8>, 0xf8aa4a7e7a273e80, SC);
833    test_utils::ops::test_sumtree!(Emulated<i32, 16>, 0x8d1a467fe835a9c5, SC);
834
835    test_utils::ops::test_sumtree!(Emulated<u32, 4>, 0x5e4cffc86a21e90d, SC);
836    test_utils::ops::test_sumtree!(Emulated<u32, 8>, 0xf43f19adb43bc611, SC);
837    test_utils::ops::test_sumtree!(Emulated<u32, 16>, 0xa43dfe10aa9de860, SC);
838
839    /////////////////
840    // conversions //
841    /////////////////
842
843    test_utils::ops::test_lossless_convert!(
844        Emulated<i8, 16> => Emulated<i16, 16>, 0x1b4f08a8b741d565, SC
845    );
846    test_utils::ops::test_lossless_convert!(
847        Emulated<i8, 32> => Emulated<i16, 32>, 0xdf6f41eb836d4f46, SC
848    );
849
850    test_utils::ops::test_lossless_convert!(
851        Emulated<i8, 1> => Emulated<i32, 1>, 0x318ceec0e9798353, SC
852    );
853    test_utils::ops::test_lossless_convert!(
854        Emulated<i8, 4> => Emulated<i32, 4>, 0x9f5e1a437f7e7f3f, SC
855    );
856
857    test_utils::ops::test_lossless_convert!(
858        Emulated<u8, 16> => Emulated<i16, 16>, 0x96611521fed02f98, SC
859    );
860    test_utils::ops::test_lossless_convert!(
861        Emulated<u8, 32> => Emulated<i16, 32>, 0x6749d3aa94effa04, SC
862    );
863
864    test_utils::ops::test_lossless_convert!(
865        Emulated<u8, 1> => Emulated<i32, 1>, 0x669cbd5c7bf6184e, SC
866    );
867    test_utils::ops::test_lossless_convert!(
868        Emulated<u8, 4> => Emulated<i32, 4>, 0x75929494c5d333d0, SC
869    );
870
871    ///////////
872    // Casts //
873    ///////////
874
875    test_utils::ops::test_cast!(Emulated<f16, 8> => Emulated<f32, 8>, 0x1e9e37b58fb3f1a8, SC);
876    test_utils::ops::test_cast!(Emulated<f16, 16> => Emulated<f32, 16>, 0xd2b068a9bf3f9d24, SC);
877
878    test_utils::ops::test_cast!(Emulated<f32, 8> => Emulated<f16, 8>, 0xe9d2dd426d89699d, SC);
879    test_utils::ops::test_cast!(Emulated<f32, 16> => Emulated<f16, 16>, 0x2b637e21afd9ef6c, SC);
880
881    test_utils::ops::test_cast!(Emulated<i32, 8> => Emulated<f32, 8>, 0x2b08e8ec7e49323b, SC);
882}