1#![cfg_attr(not(feature = "std"), no_std)]
2#![allow(non_camel_case_types)]
3use bytemuck::Pod;
4use core::fmt::Debug;
5use num_complex::Complex;
6use pulp::Simd;
7use qd::Quad;
8pub mod imply {
10 pub trait Imply<T: ?Sized>: sealed::ImplyInner<T, Is = T> {}
11 impl<T: ?Sized, U: ?Sized> Imply<T> for U {}
12 mod sealed {
13 pub trait ImplyInner<T: ?Sized> {
14 type Is: ?Sized;
15 }
16 impl<T: ?Sized, U: ?Sized> ImplyInner<T> for U {
17 type Is = T;
18 }
19 }
20}
21use core::ops::*;
22use imply::Imply;
23use math_utils::*;
24pub mod math_utils {
25 use crate::{ByRef, ComplexField, RealField, abs_impl};
26 #[inline(always)]
27 #[must_use]
28 pub fn eps<T: RealField>() -> T {
29 T::Real::epsilon_impl()
30 }
31 #[inline(always)]
32 #[must_use]
33 pub fn nbits<T: ComplexField>() -> usize {
34 T::Real::nbits_impl()
35 }
36 #[inline(always)]
37 #[must_use]
38 pub fn min_positive<T: RealField>() -> T {
39 T::min_positive_impl()
40 }
41 #[inline(always)]
42 #[must_use]
43 pub fn max_positive<T: RealField>() -> T {
44 T::max_positive_impl()
45 }
46 #[inline(always)]
47 #[must_use]
48 pub fn sqrt_min_positive<T: RealField>() -> T {
49 T::sqrt_min_positive_impl()
50 }
51 #[inline(always)]
52 #[must_use]
53 pub fn sqrt_max_positive<T: RealField>() -> T {
54 T::sqrt_max_positive_impl()
55 }
56 #[inline(always)]
57 #[must_use]
58 pub fn zero<T: ComplexField>() -> T {
59 T::zero_impl()
60 }
61 #[inline(always)]
62 #[must_use]
63 pub fn one<T: ComplexField>() -> T {
64 T::one_impl()
65 }
66 #[inline(always)]
67 #[must_use]
68 pub fn nan<T: ComplexField>() -> T {
69 T::nan_impl()
70 }
71 #[inline(always)]
72 #[must_use]
73 pub fn infinity<T: ComplexField>() -> T {
74 T::infinity_impl()
75 }
76 #[inline(always)]
77 #[must_use]
78 pub fn as_real<T: ComplexField>(value: &T) -> T {
79 T::from_real_impl(&T::real_part_impl((value).by_ref()))
80 }
81 #[inline(always)]
82 #[must_use]
83 pub fn real<T: ComplexField>(value: &T) -> T::Real {
84 T::real_part_impl((value).by_ref())
85 }
86 #[inline(always)]
87 #[must_use]
88 pub fn imag<T: ComplexField>(value: &T) -> T::Real {
89 T::imag_part_impl((value).by_ref())
90 }
91 #[inline(always)]
92 #[track_caller]
93 #[must_use]
94 pub fn neg<T: NegByRef>(value: &T) -> T::Output {
95 value.neg_by_ref()
96 }
97 #[inline(always)]
98 #[must_use]
99 pub fn copy<T: ComplexField>(value: &T) -> T {
100 T::copy_impl((value).by_ref())
101 }
102 #[inline(always)]
103 #[must_use]
104 pub fn conj<T: ComplexField>(value: &T) -> T {
105 T::conj_impl((value).by_ref())
106 }
107 #[inline(always)]
108 #[track_caller]
109 #[must_use]
110 pub fn add<T: AddByRef<U>, U>(lhs: &T, rhs: &U) -> T::Output {
111 lhs.add_by_ref(rhs)
112 }
113 #[inline(always)]
114 #[track_caller]
115 #[must_use]
116 pub fn sub<T: SubByRef<U>, U>(lhs: &T, rhs: &U) -> T::Output {
117 lhs.sub_by_ref(rhs)
118 }
119 #[inline(always)]
120 #[track_caller]
121 #[must_use]
122 pub fn mul<T: MulByRef<U>, U>(lhs: &T, rhs: &U) -> T::Output {
123 lhs.mul_by_ref(rhs)
124 }
125 #[inline(always)]
126 #[track_caller]
127 #[must_use]
128 pub fn div<T: DivByRef<U>, U>(lhs: &T, rhs: &U) -> T::Output {
129 lhs.div_by_ref(rhs)
130 }
131 #[inline(always)]
132 #[must_use]
133 pub fn mul_real<T: ComplexField>(lhs: &T, rhs: &T::Real) -> T {
134 T::mul_real_impl((lhs).by_ref(), (rhs).by_ref())
135 }
136 #[inline(always)]
137 #[must_use]
138 pub fn mul_pow2<T: ComplexField>(lhs: &T, rhs: &T::Real) -> T {
139 T::mul_real_impl((lhs).by_ref(), (rhs).by_ref())
140 }
141 #[inline(always)]
142 #[must_use]
143 pub fn abs1<T: ComplexField>(value: &T) -> T::Real {
144 T::abs1_impl((value).by_ref())
145 }
146 #[inline(always)]
147 #[must_use]
148 pub fn absmax<T: ComplexField>(value: &T) -> T::Real {
149 if const { T::IS_REAL } {
150 T::abs1_impl(value)
151 } else {
152 &T::Real::abs1_impl(&real(value))
153 + &T::Real::abs1_impl(&imag(value))
154 }
155 }
156 #[inline(always)]
157 #[must_use]
158 pub fn abs<T: ComplexField>(value: &T) -> T::Real {
159 T::abs_impl((value).by_ref())
160 }
161 #[inline(always)]
162 #[must_use]
163 pub fn hypot<T: RealField>(lhs: &T, rhs: &T) -> T {
164 abs_impl::<T::Real>(lhs.clone(), rhs.clone())
165 }
166 #[inline(always)]
167 #[must_use]
168 pub fn abs2<T: ComplexField>(value: &T) -> T::Real {
169 T::abs2_impl((value).by_ref())
170 }
171 #[inline(always)]
172 #[must_use]
173 pub fn max<T: RealField>(lhs: &T, rhs: &T) -> T {
174 if lhs > rhs { copy(lhs) } else { copy(rhs) }
175 }
176 #[inline(always)]
177 #[must_use]
178 pub fn min<T: RealField>(lhs: &T, rhs: &T) -> T {
179 if lhs < rhs { copy(lhs) } else { copy(rhs) }
180 }
181 #[inline(always)]
182 #[must_use]
183 pub fn is_nan<T: ComplexField>(value: &T) -> bool {
184 T::is_nan_impl((value).by_ref())
185 }
186 #[inline(always)]
187 #[must_use]
188 pub fn is_finite<T: ComplexField>(value: &T) -> bool {
189 T::is_finite_impl((value).by_ref())
190 }
191 #[inline(always)]
192 #[must_use]
193 pub fn sqrt<T: ComplexField>(value: &T) -> T {
194 T::sqrt_impl((value).by_ref())
195 }
196 #[inline(always)]
197 #[must_use]
198 pub fn recip<T: ComplexField>(value: &T) -> T {
199 T::recip_impl((value).by_ref())
200 }
201 #[inline(always)]
202 #[must_use]
203 pub fn from_real<T: ComplexField>(value: &T::Real) -> T {
204 T::from_real_impl((value).by_ref())
205 }
206 #[inline(always)]
207 #[must_use]
208 pub fn from_f64<T: ComplexField>(value: f64) -> T {
209 T::from_f64_impl(value)
210 }
211 pub use crate::{AddByRef, DivByRef, MulByRef, NegByRef, SubByRef};
212}
213macro_rules! impl_op {
214 ({$(
215 impl$(<$T:ident: $bound:ident>)? $op:ident for $ty:ty {
216 fn $op_fn:ident($self:ident, $rhs:ident: Self) -> Self::Output $body:block
217 }
218 )*}) => {$(
219 impl$(<$T: $bound>)? $op for $ty {
220 type Output = $ty;
221 #[inline(always)]
222 fn $op_fn($self, $rhs: $ty) -> Self::Output $body
223 }
224 impl$(<$T: $bound>)? $op<&$ty> for $ty {
225 type Output = $ty;
226 #[inline(always)]
227 fn $op_fn($self, $rhs: &$ty) -> Self::Output $body
228 }
229 impl$(<$T: $bound>)? $op<$ty> for &$ty {
230 type Output = $ty;
231 #[inline(always)]
232 fn $op_fn($self, $rhs: $ty) -> Self::Output $body
233 }
234 impl$(<$T: $bound>)? $op<&$ty> for &$ty {
235 type Output = $ty;
236 #[inline(always)]
237 fn $op_fn($self, $rhs: &$ty) -> Self::Output $body
238 }
239 )*};
240}
241macro_rules! impl_assign_op {
242 ({$(
243 impl$(<$T:ident: $bound:ident>)? $op:ident for $ty:ty {
244 fn $op_fn:ident(&mut $self:ident, $rhs:ident: Self) $body:block
245 }
246 )*}) => {$(
247 impl$(<$T: $bound>)? $op for $ty {
248 #[inline(always)]
249 fn $op_fn(&mut $self, $rhs: Self) $body
250 }
251 impl$(<$T: $bound>)? $op<&$ty> for $ty {
252 #[inline(always)]
253 fn $op_fn(&mut $self, $rhs: &$ty) $body
254 }
255 )*};
256}
257pub trait AddByRef<Rhs = Self> {
258 type Output;
259 fn add_by_ref(&self, rhs: &Rhs) -> Self::Output;
260}
261pub trait SubByRef<Rhs = Self> {
262 type Output;
263 fn sub_by_ref(&self, rhs: &Rhs) -> Self::Output;
264}
265pub trait NegByRef {
266 type Output;
267 fn neg_by_ref(&self) -> Self::Output;
268}
269pub trait MulByRef<Rhs = Self> {
270 type Output;
271 fn mul_by_ref(&self, rhs: &Rhs) -> Self::Output;
272}
273pub trait DivByRef<Rhs = Self> {
274 type Output;
275 fn div_by_ref(&self, rhs: &Rhs) -> Self::Output;
276}
277impl<Rhs, Lhs, Output> AddByRef<Rhs> for Lhs
278where
279 for<'a> &'a Lhs: core::ops::Add<&'a Rhs, Output = Output>,
280{
281 type Output = Output;
282
283 #[inline]
284 #[track_caller]
285 fn add_by_ref(&self, rhs: &Rhs) -> Self::Output {
286 self + rhs
287 }
288}
289impl<Rhs, Lhs, Output> SubByRef<Rhs> for Lhs
290where
291 for<'a> &'a Lhs: core::ops::Sub<&'a Rhs, Output = Output>,
292{
293 type Output = Output;
294
295 #[inline]
296 #[track_caller]
297 fn sub_by_ref(&self, rhs: &Rhs) -> Self::Output {
298 self - rhs
299 }
300}
301impl<Rhs, Lhs, Output> MulByRef<Rhs> for Lhs
302where
303 for<'a> &'a Lhs: core::ops::Mul<&'a Rhs, Output = Output>,
304{
305 type Output = Output;
306
307 #[inline]
308 #[track_caller]
309 fn mul_by_ref(&self, rhs: &Rhs) -> Self::Output {
310 self * rhs
311 }
312}
313impl<Rhs, Lhs, Output> DivByRef<Rhs> for Lhs
314where
315 for<'a> &'a Lhs: core::ops::Div<&'a Rhs, Output = Output>,
316{
317 type Output = Output;
318
319 #[inline]
320 #[track_caller]
321 fn div_by_ref(&self, rhs: &Rhs) -> Self::Output {
322 self / rhs
323 }
324}
325impl<T, Output> NegByRef for T
326where
327 for<'a> &'a T: core::ops::Neg<Output = Output>,
328{
329 type Output = Output;
330
331 #[inline]
332 #[track_caller]
333 fn neg_by_ref(&self) -> Self::Output {
334 -self
335 }
336}
337fn abs_impl<T: RealField>(re: T, im: T) -> T {
338 use ext::*;
339 let (re, im) = &(re, im);
340 let small = &sqrt_min_positive();
341 let big = &sqrt_max_positive();
342 let one = &one();
343 let re_abs = &re.abs();
344 let im_abs = &im.abs();
345 if re_abs > big || im_abs > big {
346 ((re * small).abs2() + (im * small).abs2()).sqrt() * big
347 } else if re_abs > one || im_abs > one {
348 (re.abs2() + im.abs2()).sqrt()
349 } else {
350 ((re * big).abs2() + (im * big).abs2()).sqrt() * small
351 }
352}
353fn recip_impl<T: RealField>(re: T, im: T) -> (T, T) {
354 use ext::*;
355 let (re, im) = &(re, im);
356 if is_nan(re) || is_nan(im) {
357 return (nan(), nan());
358 }
359 if *re == zero() && *im == zero() {
360 return (infinity(), infinity());
361 }
362 if !is_finite(re) || !is_finite(im) {
363 return (zero(), zero());
364 }
365 let small = &sqrt_min_positive();
366 let big = &sqrt_max_positive();
367 let one = &one();
368 let re_abs = &abs(re);
369 let im_abs = &abs(im);
370 if re_abs > big || im_abs > big {
371 let re = re * small;
372 let im = im * small;
373 let inv = &(re.abs2() + im.abs2()).recip();
374 (((re * inv) * small), ((-im * inv) * small))
375 } else if re_abs > one || im_abs > one {
376 let inv = &(re.abs2() + im.abs2()).recip();
377 ((re * inv), (-im * inv))
378 } else {
379 let re = re * big;
380 let im = im * big;
381 let inv = &(re.abs2() + im.abs2()).recip();
382 (((re * inv) * big), ((-im * inv) * big))
383 }
384}
385fn sqrt_impl<T: RealField>(re: T, im: T) -> (T, T) {
386 use ext::*;
387 let im_negative = im < zero();
388 let half = &from_f64(0.5);
389 let abs = &abs_impl(re.clone(), im);
390 let zero = &zero::<T>();
391 let re = &re;
392 let mut sum = re + abs;
393 if &sum < zero {
394 sum = zero.copy();
395 }
396 let out_re = sum.mul_pow2(half).sqrt();
397 let mut out_im = (abs - re).mul_pow2(half).fmax(zero).sqrt();
398 if im_negative {
399 out_im = -out_im;
400 }
401 (out_re, out_im)
402}
403pub trait ByRef<T> {
404 fn by_ref(&self) -> &T;
405}
406impl<T> ByRef<T> for T {
407 #[inline]
408 fn by_ref(&self) -> &T {
409 self
410 }
411}
412impl<T> ByRef<T> for &T {
413 #[inline]
414 fn by_ref(&self) -> &T {
415 *self
416 }
417}
418#[repr(transparent)]
419pub struct SimdCtx<T: ComplexField, S: Simd>(pub T::SimdCtx<S>);
420impl<T: ComplexField, S: Simd> Copy for SimdCtx<T, S> {}
421impl<T: ComplexField, S: Simd> Clone for SimdCtx<T, S> {
422 #[inline]
423 fn clone(&self) -> Self {
424 *self
425 }
426}
427#[derive(Copy, Clone, Debug)]
428#[repr(transparent)]
429pub struct RealReg<T>(pub T);
430impl<T: ComplexField, S: Simd> SimdCtx<T, S> {
431 #[inline(always)]
432 pub fn new(ctx: &T::SimdCtx<S>) -> &Self {
433 unsafe { &*(ctx as *const T::SimdCtx<S> as *const Self) }
434 }
435
436 #[inline(always)]
437 pub fn zero(&self) -> T::SimdVec<S> {
438 unsafe { core::mem::zeroed() }
439 }
440
441 #[inline(always)]
442 pub fn splat(&self, value: impl ByRef<T>) -> T::SimdVec<S> {
443 unsafe {
444 core::mem::transmute_copy(&T::simd_splat(&self.0, value.by_ref()))
445 }
446 }
447
448 #[inline(always)]
449 pub fn splat_real(
450 &self,
451 value: impl ByRef<T::Real>,
452 ) -> RealReg<T::SimdVec<S>> {
453 RealReg(unsafe {
454 core::mem::transmute_copy(&T::simd_splat_real(
455 &self.0,
456 value.by_ref(),
457 ))
458 })
459 }
460
461 #[inline(always)]
462 pub fn add(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdVec<S> {
463 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
464 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
465 unsafe { core::mem::transmute_copy(&T::simd_add(&self.0, lhs, rhs)) }
466 }
467
468 #[inline(always)]
469 pub fn sub(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdVec<S> {
470 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
471 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
472 unsafe { core::mem::transmute_copy(&T::simd_sub(&self.0, lhs, rhs)) }
473 }
474
475 #[inline(always)]
476 pub fn neg(&self, value: T::SimdVec<S>) -> T::SimdVec<S> {
477 let value = unsafe { core::mem::transmute_copy(&value) };
478 unsafe { core::mem::transmute_copy(&T::simd_neg(&self.0, value)) }
479 }
480
481 #[inline(always)]
482 pub fn conj(&self, value: T::SimdVec<S>) -> T::SimdVec<S> {
483 let value = unsafe { core::mem::transmute_copy(&value) };
484 unsafe { core::mem::transmute_copy(&T::simd_conj(&self.0, value)) }
485 }
486
487 #[inline(always)]
488 pub fn abs1(&self, value: T::SimdVec<S>) -> RealReg<T::SimdVec<S>> {
489 let value = unsafe { core::mem::transmute_copy(&value) };
490 RealReg(unsafe {
491 core::mem::transmute_copy(&T::simd_abs1(&self.0, value))
492 })
493 }
494
495 #[inline(always)]
496 pub fn abs_max(&self, value: T::SimdVec<S>) -> RealReg<T::SimdVec<S>> {
497 let value = unsafe { core::mem::transmute_copy(&value) };
498 RealReg(unsafe {
499 core::mem::transmute_copy(&T::simd_abs_max(&self.0, value))
500 })
501 }
502
503 #[inline(always)]
504 pub fn mul_real(
505 &self,
506 lhs: T::SimdVec<S>,
507 rhs: RealReg<T::SimdVec<S>>,
508 ) -> T::SimdVec<S> {
509 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
510 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
511 unsafe {
512 core::mem::transmute_copy(&T::simd_mul_real(&self.0, lhs, rhs))
513 }
514 }
515
516 #[inline(always)]
517 pub fn mul_pow2(
518 &self,
519 lhs: T::SimdVec<S>,
520 rhs: RealReg<T::SimdVec<S>>,
521 ) -> T::SimdVec<S> {
522 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
523 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
524 unsafe {
525 core::mem::transmute_copy(&T::simd_mul_pow2(&self.0, lhs, rhs))
526 }
527 }
528
529 #[inline(always)]
530 pub fn mul(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdVec<S> {
531 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
532 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
533 unsafe { core::mem::transmute_copy(&T::simd_mul(&self.0, lhs, rhs)) }
534 }
535
536 #[inline(always)]
537 pub fn conj_mul(
538 &self,
539 lhs: T::SimdVec<S>,
540 rhs: T::SimdVec<S>,
541 ) -> T::SimdVec<S> {
542 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
543 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
544 unsafe {
545 core::mem::transmute_copy(&T::simd_conj_mul(&self.0, lhs, rhs))
546 }
547 }
548
549 #[inline(always)]
550 pub fn mul_add(
551 &self,
552 lhs: T::SimdVec<S>,
553 rhs: T::SimdVec<S>,
554 acc: T::SimdVec<S>,
555 ) -> T::SimdVec<S> {
556 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
557 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
558 let acc = unsafe { core::mem::transmute_copy(&acc) };
559 unsafe {
560 core::mem::transmute_copy(&T::simd_mul_add(&self.0, lhs, rhs, acc))
561 }
562 }
563
564 #[inline(always)]
565 pub fn conj_mul_add(
566 &self,
567 lhs: T::SimdVec<S>,
568 rhs: T::SimdVec<S>,
569 acc: T::SimdVec<S>,
570 ) -> T::SimdVec<S> {
571 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
572 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
573 let acc = unsafe { core::mem::transmute_copy(&acc) };
574 unsafe {
575 core::mem::transmute_copy(&T::simd_conj_mul_add(
576 &self.0, lhs, rhs, acc,
577 ))
578 }
579 }
580
581 #[inline(always)]
582 pub fn maybe_conj_mul_add<const CONJ: bool>(
583 &self,
584 lhs: T::SimdVec<S>,
585 rhs: T::SimdVec<S>,
586 acc: T::SimdVec<S>,
587 ) -> T::SimdVec<S> {
588 if const { CONJ } {
589 self.conj_mul_add(lhs, rhs, acc)
590 } else {
591 self.mul_add(lhs, rhs, acc)
592 }
593 }
594
595 #[inline(always)]
596 pub fn abs2(&self, value: T::SimdVec<S>) -> RealReg<T::SimdVec<S>> {
597 let value = unsafe { core::mem::transmute_copy(&value) };
598 RealReg(unsafe {
599 core::mem::transmute_copy(&T::simd_abs2(&self.0, value))
600 })
601 }
602
603 #[inline(always)]
604 pub fn abs2_add(
605 &self,
606 value: T::SimdVec<S>,
607 acc: RealReg<T::SimdVec<S>>,
608 ) -> RealReg<T::SimdVec<S>> {
609 let value = unsafe { core::mem::transmute_copy(&value) };
610 let acc = unsafe { core::mem::transmute_copy(&acc) };
611 RealReg(unsafe {
612 core::mem::transmute_copy(&T::simd_abs2_add(&self.0, value, acc))
613 })
614 }
615
616 #[inline(always)]
617 pub fn reduce_sum(&self, value: T::SimdVec<S>) -> T {
618 let value = unsafe { core::mem::transmute_copy(&value) };
619 unsafe {
620 core::mem::transmute_copy(&T::simd_reduce_sum(&self.0, value))
621 }
622 }
623
624 #[inline(always)]
625 pub fn reduce_max(&self, value: RealReg<T::SimdVec<S>>) -> T {
626 let value = unsafe { core::mem::transmute_copy(&value) };
627 unsafe {
628 core::mem::transmute_copy(&T::simd_reduce_max(&self.0, value))
629 }
630 }
631
632 #[inline(always)]
633 pub fn reduce_sum_real(&self, value: RealReg<T::SimdVec<S>>) -> Real<T> {
634 let value = T::simd_reduce_sum(&self.0, value.0);
635 if const { T::SIMD_ABS_SPLIT_REAL_IMAG && !S::IS_SCALAR } {
636 let r = real(&value);
637 let i = imag(&value);
638 &r + &i
639 } else {
640 real(&value)
641 }
642 }
643
644 #[inline(always)]
645 pub fn reduce_max_real(&self, value: RealReg<T::SimdVec<S>>) -> Real<T> {
646 use crate::ext::*;
647 let value = T::simd_reduce_max(&self.0, value.0);
648 if const { T::SIMD_ABS_SPLIT_REAL_IMAG && !S::IS_SCALAR } {
649 value.real().fmax(value.imag())
650 } else {
651 value.real()
652 }
653 }
654
655 #[inline(always)]
656 pub fn max(
657 &self,
658 lhs: RealReg<T::SimdVec<S>>,
659 rhs: RealReg<T::SimdVec<S>>,
660 ) -> RealReg<T::SimdVec<S>> {
661 let cmp = self.gt(lhs, rhs);
662 RealReg(self.select(cmp, lhs.0, rhs.0))
663 }
664
665 #[inline(always)]
666 pub fn eq(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdMask<S> {
667 T::simd_equal(&self.0, lhs, rhs)
668 }
669
670 #[inline(always)]
671 pub fn lt(
672 &self,
673 lhs: RealReg<T::SimdVec<S>>,
674 rhs: RealReg<T::SimdVec<S>>,
675 ) -> T::SimdMask<S> {
676 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
677 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
678 unsafe {
679 core::mem::transmute_copy(&T::simd_less_than(&self.0, lhs, rhs))
680 }
681 }
682
683 #[inline(always)]
684 pub fn gt(
685 &self,
686 lhs: RealReg<T::SimdVec<S>>,
687 rhs: RealReg<T::SimdVec<S>>,
688 ) -> T::SimdMask<S> {
689 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
690 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
691 unsafe {
692 core::mem::transmute_copy(&T::simd_greater_than(&self.0, lhs, rhs))
693 }
694 }
695
696 #[inline(always)]
697 pub fn le(
698 &self,
699 lhs: RealReg<T::SimdVec<S>>,
700 rhs: RealReg<T::SimdVec<S>>,
701 ) -> T::SimdMask<S> {
702 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
703 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
704 unsafe {
705 core::mem::transmute_copy(&T::simd_less_than_or_equal(
706 &self.0, lhs, rhs,
707 ))
708 }
709 }
710
711 #[inline(always)]
712 pub fn ge(
713 &self,
714 lhs: RealReg<T::SimdVec<S>>,
715 rhs: RealReg<T::SimdVec<S>>,
716 ) -> T::SimdMask<S> {
717 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
718 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
719 unsafe {
720 core::mem::transmute_copy(&T::simd_greater_than_or_equal(
721 &self.0, lhs, rhs,
722 ))
723 }
724 }
725
726 #[inline(always)]
727 pub fn select(
728 &self,
729 mask: T::SimdMask<S>,
730 lhs: T::SimdVec<S>,
731 rhs: T::SimdVec<S>,
732 ) -> T::SimdVec<S> {
733 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
734 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
735 unsafe {
736 core::mem::transmute_copy(&T::simd_select(&self.0, mask, lhs, rhs))
737 }
738 }
739
740 #[inline(always)]
741 pub fn iselect(
742 &self,
743 mask: T::SimdMask<S>,
744 lhs: T::SimdIndex<S>,
745 rhs: T::SimdIndex<S>,
746 ) -> T::SimdIndex<S> {
747 unsafe {
748 core::mem::transmute_copy(&T::simd_index_select(
749 &self.0, mask, lhs, rhs,
750 ))
751 }
752 }
753
754 #[inline(always)]
755 pub fn isplat(&self, value: T::Index) -> T::SimdIndex<S> {
756 unsafe {
757 core::mem::transmute_copy(&T::simd_index_splat(&self.0, value))
758 }
759 }
760
761 #[inline(always)]
762 pub fn izero(&self) -> T::SimdIndex<S> {
763 unsafe { core::mem::zeroed() }
764 }
765
766 #[inline(always)]
767 pub fn iadd(
768 &self,
769 lhs: T::SimdIndex<S>,
770 rhs: T::SimdIndex<S>,
771 ) -> T::SimdIndex<S> {
772 unsafe {
773 core::mem::transmute_copy(&T::simd_index_add(&self.0, lhs, rhs))
774 }
775 }
776
777 #[inline(always)]
778 pub fn or_mask(
779 &self,
780 lhs: T::SimdMask<S>,
781 rhs: T::SimdMask<S>,
782 ) -> T::SimdMask<S> {
783 T::simd_or_mask(&self.0, lhs, rhs)
784 }
785
786 #[inline(always)]
787 pub fn and_mask(
788 &self,
789 lhs: T::SimdMask<S>,
790 rhs: T::SimdMask<S>,
791 ) -> T::SimdMask<S> {
792 T::simd_and_mask(&self.0, lhs, rhs)
793 }
794
795 #[inline(always)]
796 pub fn not_mask(&self, mask: T::SimdMask<S>) -> T::SimdMask<S> {
797 T::simd_not_mask(&self.0, mask)
798 }
799
800 #[inline(always)]
801 pub fn first_true_mask(&self, value: T::SimdMask<S>) -> usize {
802 T::simd_first_true_mask(&self.0, value)
803 }
804
805 #[inline(always)]
806 pub unsafe fn mask_load(
807 &self,
808 mask: T::SimdMemMask<S>,
809 ptr: *const T::SimdVec<S>,
810 ) -> T::SimdVec<S> {
811 unsafe { T::simd_mask_load(&self.0, mask, ptr) }
812 }
813
814 #[inline(always)]
815 pub unsafe fn mask_store(
816 &self,
817 mask: T::SimdMemMask<S>,
818 ptr: *mut T::SimdVec<S>,
819 value: T::SimdVec<S>,
820 ) {
821 let value = unsafe { core::mem::transmute_copy(&value) };
822 unsafe { T::simd_mask_store(&self.0, mask, ptr, value) }
823 }
824
825 #[inline(always)]
826 pub fn load(&self, ptr: &T::SimdVec<S>) -> T::SimdVec<S> {
827 unsafe { core::mem::transmute_copy(&T::simd_load(&self.0, ptr)) }
828 }
829
830 #[inline(always)]
831 pub fn store(&self, ptr: &mut T::SimdVec<S>, value: T::SimdVec<S>) {
832 let value = unsafe { core::mem::transmute_copy(&value) };
833 unsafe {
834 core::mem::transmute_copy(&T::simd_store(&self.0, ptr, value))
835 }
836 }
837}
838pub unsafe trait Conjugate:
839 Send + Sync + core::fmt::Debug + Imply<Self::Canonical, Is: RefOps>
840{
841 const IS_CANONICAL: bool;
842 type Conj: Conjugate<Conj = Self, Canonical = Self::Canonical>;
843 type Canonical: Conjugate<Canonical = Self::Canonical> + ComplexField;
844}
845pub type Real<T> = <<T as Conjugate>::Canonical as ComplexField>::Real;
846#[derive(Copy, Clone, Debug, PartialEq, Eq)]
847pub struct ComplexConj<T> {
848 pub re: T,
849 pub im_neg: T,
850}
851#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
852pub enum SimdCapabilities {
853 None,
854 Copy,
855 Simd,
856}
857impl SimdCapabilities {
858 #[inline]
859 pub const fn is_copy(self) -> bool {
860 matches!(self, Self::Copy | Self::Simd)
861 }
862
863 #[inline]
864 pub const fn is_simd(self) -> bool {
865 matches!(self, Self::Simd)
866 }
867}
868mod seal {
869 pub trait Seal {}
870 impl Seal for u8 {}
871 impl Seal for u16 {}
872 impl Seal for u32 {}
873 impl Seal for u64 {}
874 impl Seal for u128 {}
875 impl Seal for usize {}
876 impl Seal for i32 {}
877 impl Seal for i64 {}
878 impl Seal for isize {}
879}
880pub trait Seal: seal::Seal {}
881impl<T: seal::Seal> Seal for T {}
882pub trait SignedIndex:
886 Seal
887 + core::fmt::Debug
888 + core::ops::Neg<Output = Self>
889 + core::ops::Add<Output = Self>
890 + core::ops::Sub<Output = Self>
891 + core::ops::AddAssign
892 + core::ops::SubAssign
893 + bytemuck::Pod
894 + Eq
895 + Ord
896 + Send
897 + Sync
898{
899 const MAX: Self;
901 #[must_use]
903 fn truncate(value: usize) -> Self;
904 #[must_use]
906 fn zx(self) -> usize;
907 #[must_use]
909 fn sx(self) -> usize;
910 fn sum_nonnegative(slice: &[Self]) -> Option<Self> {
912 let mut acc = Self::zeroed();
913 for &i in slice {
914 if Self::MAX - i < acc {
915 return None;
916 }
917 acc += i;
918 }
919 Some(acc)
920 }
921}
922impl SignedIndex for i32 {
923 const MAX: Self = Self::MAX;
924
925 #[inline(always)]
926 fn truncate(value: usize) -> Self {
927 #[allow(clippy::assertions_on_constants)]
928 const _: () = {
929 core::assert!(i32::BITS <= usize::BITS);
930 };
931 value as isize as Self
932 }
933
934 #[inline(always)]
935 fn zx(self) -> usize {
936 self as u32 as usize
937 }
938
939 #[inline(always)]
940 fn sx(self) -> usize {
941 self as isize as usize
942 }
943}
944#[cfg(any(target_pointer_width = "64"))]
945impl SignedIndex for i64 {
946 const MAX: Self = Self::MAX;
947
948 #[inline(always)]
949 fn truncate(value: usize) -> Self {
950 #[allow(clippy::assertions_on_constants)]
951 const _: () = {
952 core::assert!(i64::BITS <= usize::BITS);
953 };
954 value as isize as Self
955 }
956
957 #[inline(always)]
958 fn zx(self) -> usize {
959 self as u64 as usize
960 }
961
962 #[inline(always)]
963 fn sx(self) -> usize {
964 self as isize as usize
965 }
966}
967impl SignedIndex for isize {
968 const MAX: Self = Self::MAX;
969
970 #[inline(always)]
971 fn truncate(value: usize) -> Self {
972 value as isize
973 }
974
975 #[inline(always)]
976 fn zx(self) -> usize {
977 self as usize
978 }
979
980 #[inline(always)]
981 fn sx(self) -> usize {
982 self as usize
983 }
984}
985pub trait IndexCore:
986 Seal
987 + core::fmt::Debug
988 + core::ops::Not<Output = Self>
989 + core::ops::BitAnd<Output = Self>
990 + core::ops::BitOr<Output = Self>
991 + core::ops::BitXor<Output = Self>
992 + core::ops::Add<Output = Self>
993 + core::ops::Sub<Output = Self>
994 + core::ops::AddAssign
995 + core::ops::SubAssign
996 + bytemuck::Pod
997 + Eq
998 + Ord
999 + Send
1000 + Sync
1001 + Ord
1002{
1003 const MAX: Self;
1004 #[must_use]
1006 fn truncate(value: usize) -> Self;
1007 #[must_use]
1009 fn zx(self) -> usize;
1010}
1011pub trait Index: IndexCore {
1012 type FixedWidth: Index;
1014 type Signed: SignedIndex;
1016 const BITS: u32 = core::mem::size_of::<Self>() as u32 * 8;
1017 #[inline(always)]
1019 fn canonicalize(slice: &[Self]) -> &[Self::FixedWidth] {
1020 bytemuck::cast_slice(slice)
1021 }
1022 #[inline(always)]
1024 fn canonicalize_mut(slice: &mut [Self]) -> &mut [Self::FixedWidth] {
1025 bytemuck::cast_slice_mut(slice)
1026 }
1027 #[inline(always)]
1029 fn from_signed(value: Self::Signed) -> Self {
1030 bytemuck::cast(value)
1031 }
1032 #[inline(always)]
1034 fn to_signed(self) -> Self::Signed {
1035 bytemuck::cast(self)
1036 }
1037 #[inline]
1039 fn sum_nonnegative(slice: &[Self]) -> Option<Self> {
1040 Self::Signed::sum_nonnegative(bytemuck::cast_slice(slice))
1041 .map(Self::from_signed)
1042 }
1043}
1044impl IndexCore for u8 {
1045 const MAX: Self = Self::MAX;
1046
1047 #[inline(always)]
1048 fn truncate(value: usize) -> Self {
1049 value as _
1050 }
1051
1052 #[inline(always)]
1053 fn zx(self) -> usize {
1054 self as _
1055 }
1056}
1057impl IndexCore for u16 {
1058 const MAX: Self = Self::MAX;
1059
1060 #[inline(always)]
1061 fn truncate(value: usize) -> Self {
1062 value as _
1063 }
1064
1065 #[inline(always)]
1066 fn zx(self) -> usize {
1067 self as _
1068 }
1069}
1070impl IndexCore for u32 {
1071 const MAX: Self = Self::MAX;
1072
1073 #[inline(always)]
1074 fn truncate(value: usize) -> Self {
1075 value as _
1076 }
1077
1078 #[inline(always)]
1079 fn zx(self) -> usize {
1080 self as _
1081 }
1082}
1083impl IndexCore for u64 {
1084 const MAX: Self = Self::MAX;
1085
1086 #[inline(always)]
1087 fn truncate(value: usize) -> Self {
1088 value as _
1089 }
1090
1091 #[inline(always)]
1092 fn zx(self) -> usize {
1093 self as _
1094 }
1095}
1096impl IndexCore for u128 {
1097 const MAX: Self = Self::MAX;
1098
1099 #[inline(always)]
1100 fn truncate(value: usize) -> Self {
1101 value as _
1102 }
1103
1104 #[inline(always)]
1105 fn zx(self) -> usize {
1106 self as _
1107 }
1108}
1109impl IndexCore for usize {
1110 const MAX: Self = Self::MAX;
1111
1112 #[inline(always)]
1113 fn truncate(value: usize) -> Self {
1114 value
1115 }
1116
1117 #[inline(always)]
1118 fn zx(self) -> usize {
1119 self
1120 }
1121}
1122impl Index for u32 {
1123 type FixedWidth = u32;
1124 type Signed = i32;
1125}
1126#[cfg(any(target_pointer_width = "64"))]
1127impl Index for u64 {
1128 type FixedWidth = u64;
1129 type Signed = i64;
1130}
1131impl Index for usize {
1132 #[cfg(target_pointer_width = "32")]
1133 type FixedWidth = u32;
1134 #[cfg(target_pointer_width = "64")]
1135 type FixedWidth = u64;
1136 type Signed = isize;
1137}
1138unsafe impl<T: RealField> Conjugate for T {
1139 type Canonical = T;
1140 type Conj = T;
1141
1142 const IS_CANONICAL: bool = true;
1143}
1144unsafe impl<T: RealField> Conjugate for Complex<T> {
1145 type Canonical = Complex<T>;
1146 type Conj = ComplexConj<T>;
1147
1148 const IS_CANONICAL: bool = true;
1149}
1150unsafe impl<T: RealField> Conjugate for ComplexConj<T> {
1151 type Canonical = Complex<T>;
1152 type Conj = Complex<T>;
1153
1154 const IS_CANONICAL: bool = false;
1155}
1156pub trait SimdArch: Copy + Default + Send + Sync {
1157 fn dispatch<R>(self, f: impl pulp::WithSimd<Output = R>) -> R;
1158}
1159impl SimdArch for pulp::Arch {
1160 #[inline]
1161 fn dispatch<R>(self, f: impl pulp::WithSimd<Output = R>) -> R {
1162 self.dispatch(f)
1163 }
1164}
1165impl SimdArch for pulp::Scalar {
1166 #[inline]
1167 fn dispatch<R>(self, f: impl pulp::WithSimd<Output = R>) -> R {
1168 f.with_simd(self)
1169 }
1170}
1171pub trait RefOps:
1172 Sized
1173 + core::ops::Neg<Output = Self>
1174 + core::ops::Add<Output = Self>
1175 + core::ops::Sub<Output = Self>
1176 + core::ops::Mul<Output = Self>
1177 + core::ops::Div<Output = Self>
1178 + core::ops::AddAssign
1179 + core::ops::SubAssign
1180 + core::ops::MulAssign
1181 + core::ops::DivAssign
1182 + for<'a> core::ops::Add<&'a Self, Output = Self>
1183 + for<'a> core::ops::Sub<&'a Self, Output = Self>
1184 + for<'a> core::ops::Mul<&'a Self, Output = Self>
1185 + for<'a> core::ops::Div<&'a Self, Output = Self>
1186 + for<'a> core::ops::AddAssign<&'a Self>
1187 + for<'a> core::ops::SubAssign<&'a Self>
1188 + for<'a> core::ops::MulAssign<&'a Self>
1189 + for<'a> core::ops::DivAssign<&'a Self>
1190 + for<'a> Imply<
1191 &'a Self,
1192 Is: core::ops::Neg<Output = Self>
1193 + core::ops::Add<Output = Self>
1194 + core::ops::Sub<Output = Self>
1195 + core::ops::Mul<Output = Self>
1196 + core::ops::Div<Output = Self>
1197 + core::ops::Add<Self, Output = Self>
1198 + core::ops::Sub<Self, Output = Self>
1199 + core::ops::Mul<Self, Output = Self>
1200 + core::ops::Div<Self, Output = Self>,
1201 >
1202{
1203}
1204impl<T> RefOps for T
1205where
1206 T: core::ops::Neg<Output = Self>
1207 + core::ops::Add<Output = Self>
1208 + core::ops::Sub<Output = Self>
1209 + core::ops::Mul<Output = Self>
1210 + core::ops::Div<Output = Self>
1211 + core::ops::AddAssign
1212 + core::ops::SubAssign
1213 + core::ops::MulAssign
1214 + core::ops::DivAssign
1215 + for<'a> core::ops::Add<&'a T, Output = Self>
1216 + for<'a> core::ops::Sub<&'a T, Output = Self>
1217 + for<'a> core::ops::Mul<&'a T, Output = Self>
1218 + for<'a> core::ops::Div<&'a T, Output = Self>
1219 + for<'a> core::ops::AddAssign<&'a T>
1220 + for<'a> core::ops::SubAssign<&'a T>
1221 + for<'a> core::ops::MulAssign<&'a T>
1222 + for<'a> core::ops::DivAssign<&'a T>,
1223 for<'a> &'a T: core::ops::Neg<Output = T>
1224 + core::ops::Add<Output = T>
1225 + core::ops::Sub<Output = T>
1226 + core::ops::Mul<Output = T>
1227 + core::ops::Div<Output = T>
1228 + core::ops::Add<T, Output = T>
1229 + core::ops::Sub<T, Output = T>
1230 + core::ops::Mul<T, Output = T>
1231 + core::ops::Div<T, Output = T>,
1232{
1233}
1234pub trait ComplexField:
1235 Debug
1236 + Clone
1237 + Conjugate<Canonical = Self>
1238 + PartialEq
1239 + RefOps
1240 + Imply<Self::Real, Is: RefOps>
1241{
1242 const IS_REAL: bool;
1243 const SIMD_ABS_SPLIT_REAL_IMAG: bool = false;
1244 type Arch: SimdArch;
1245 type Unit: ComplexField;
1246 type SimdCtx<S: Simd>: Copy;
1247 type Index: IndexCore;
1248 type Real: RealField;
1249 #[doc(hidden)]
1250 const IS_NATIVE_F32: bool = false;
1251 #[doc(hidden)]
1252 const IS_NATIVE_C32: bool = false;
1253 #[doc(hidden)]
1254 const IS_NATIVE_F64: bool = false;
1255 #[doc(hidden)]
1256 const IS_NATIVE_C64: bool = false;
1257 const SIMD_CAPABILITIES: SimdCapabilities;
1258 type SimdMask<S: Simd>: Copy + Debug;
1259 type SimdMemMask<S: Simd>: Copy + Debug;
1260 type SimdVec<S: Simd>: Pod + Debug;
1261 type SimdIndex<S: Simd>: Pod + Debug;
1262 fn zero_impl() -> Self;
1263 fn one_impl() -> Self;
1264 fn nan_impl() -> Self;
1265 fn infinity_impl() -> Self;
1266 fn from_real_impl(real: &Self::Real) -> Self;
1267 fn from_f64_impl(real: f64) -> Self;
1268 fn real_part_impl(value: &Self) -> Self::Real;
1269 fn imag_part_impl(value: &Self) -> Self::Real;
1270 fn copy_impl(value: &Self) -> Self;
1271 fn conj_impl(value: &Self) -> Self;
1272 fn recip_impl(value: &Self) -> Self;
1273 fn sqrt_impl(value: &Self) -> Self;
1274 fn abs_impl(value: &Self) -> Self::Real;
1275 fn abs1_impl(value: &Self) -> Self::Real;
1276 fn abs2_impl(value: &Self) -> Self::Real;
1277 fn mul_real_impl(lhs: &Self, rhs: &Self::Real) -> Self;
1278 fn mul_pow2_impl(lhs: &Self, rhs: &Self::Real) -> Self;
1279 fn is_finite_impl(value: &Self) -> bool;
1280 fn is_nan_impl(value: &Self) -> bool {
1281 value != value
1282 }
1283 fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S>;
1284 fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S;
1285 fn simd_mask_between<S: Simd>(
1286 ctx: &Self::SimdCtx<S>,
1287 start: Self::Index,
1288 end: Self::Index,
1289 ) -> Self::SimdMask<S>;
1290 fn simd_mem_mask_between<S: Simd>(
1291 ctx: &Self::SimdCtx<S>,
1292 start: Self::Index,
1293 end: Self::Index,
1294 ) -> Self::SimdMemMask<S>;
1295 unsafe fn simd_mask_load_raw<S: Simd>(
1296 ctx: &Self::SimdCtx<S>,
1297 mask: Self::SimdMemMask<S>,
1298 ptr: *const Self::SimdVec<S>,
1299 ) -> Self::SimdVec<S>;
1300 unsafe fn simd_mask_store_raw<S: Simd>(
1301 ctx: &Self::SimdCtx<S>,
1302 mask: Self::SimdMemMask<S>,
1303 ptr: *mut Self::SimdVec<S>,
1304 values: Self::SimdVec<S>,
1305 );
1306 fn simd_splat<S: Simd>(
1307 ctx: &Self::SimdCtx<S>,
1308 value: &Self,
1309 ) -> Self::SimdVec<S>;
1310 fn simd_splat_real<S: Simd>(
1311 ctx: &Self::SimdCtx<S>,
1312 value: &Self::Real,
1313 ) -> Self::SimdVec<S>;
1314 fn simd_add<S: Simd>(
1315 ctx: &Self::SimdCtx<S>,
1316 lhs: Self::SimdVec<S>,
1317 rhs: Self::SimdVec<S>,
1318 ) -> Self::SimdVec<S>;
1319 fn simd_sub<S: Simd>(
1320 ctx: &Self::SimdCtx<S>,
1321 lhs: Self::SimdVec<S>,
1322 rhs: Self::SimdVec<S>,
1323 ) -> Self::SimdVec<S>;
1324 fn simd_neg<S: Simd>(
1325 ctx: &Self::SimdCtx<S>,
1326 value: Self::SimdVec<S>,
1327 ) -> Self::SimdVec<S>;
1328 fn simd_conj<S: Simd>(
1329 ctx: &Self::SimdCtx<S>,
1330 value: Self::SimdVec<S>,
1331 ) -> Self::SimdVec<S>;
1332 fn simd_abs1<S: Simd>(
1333 ctx: &Self::SimdCtx<S>,
1334 value: Self::SimdVec<S>,
1335 ) -> Self::SimdVec<S>;
1336 fn simd_abs_max<S: Simd>(
1337 ctx: &Self::SimdCtx<S>,
1338 value: Self::SimdVec<S>,
1339 ) -> Self::SimdVec<S>;
1340 fn simd_mul_real<S: Simd>(
1341 ctx: &Self::SimdCtx<S>,
1342 lhs: Self::SimdVec<S>,
1343 real_rhs: Self::SimdVec<S>,
1344 ) -> Self::SimdVec<S>;
1345 fn simd_mul_pow2<S: Simd>(
1346 ctx: &Self::SimdCtx<S>,
1347 lhs: Self::SimdVec<S>,
1348 real_rhs: Self::SimdVec<S>,
1349 ) -> Self::SimdVec<S>;
1350 fn simd_mul<S: Simd>(
1351 ctx: &Self::SimdCtx<S>,
1352 lhs: Self::SimdVec<S>,
1353 rhs: Self::SimdVec<S>,
1354 ) -> Self::SimdVec<S>;
1355 fn simd_conj_mul<S: Simd>(
1356 ctx: &Self::SimdCtx<S>,
1357 lhs: Self::SimdVec<S>,
1358 rhs: Self::SimdVec<S>,
1359 ) -> Self::SimdVec<S>;
1360 fn simd_mul_add<S: Simd>(
1361 ctx: &Self::SimdCtx<S>,
1362 lhs: Self::SimdVec<S>,
1363 rhs: Self::SimdVec<S>,
1364 acc: Self::SimdVec<S>,
1365 ) -> Self::SimdVec<S>;
1366 fn simd_conj_mul_add<S: Simd>(
1367 ctx: &Self::SimdCtx<S>,
1368 lhs: Self::SimdVec<S>,
1369 rhs: Self::SimdVec<S>,
1370 acc: Self::SimdVec<S>,
1371 ) -> Self::SimdVec<S>;
1372 fn simd_abs2<S: Simd>(
1373 ctx: &Self::SimdCtx<S>,
1374 value: Self::SimdVec<S>,
1375 ) -> Self::SimdVec<S>;
1376 fn simd_abs2_add<S: Simd>(
1377 ctx: &Self::SimdCtx<S>,
1378 value: Self::SimdVec<S>,
1379 acc: Self::SimdVec<S>,
1380 ) -> Self::SimdVec<S>;
1381 fn simd_reduce_sum<S: Simd>(
1382 ctx: &Self::SimdCtx<S>,
1383 value: Self::SimdVec<S>,
1384 ) -> Self;
1385 fn simd_reduce_max<S: Simd>(
1386 ctx: &Self::SimdCtx<S>,
1387 value: Self::SimdVec<S>,
1388 ) -> Self;
1389 fn simd_equal<S: Simd>(
1390 ctx: &Self::SimdCtx<S>,
1391 real_lhs: Self::SimdVec<S>,
1392 real_rhs: Self::SimdVec<S>,
1393 ) -> Self::SimdMask<S>;
1394 fn simd_less_than<S: Simd>(
1395 ctx: &Self::SimdCtx<S>,
1396 real_lhs: Self::SimdVec<S>,
1397 real_rhs: Self::SimdVec<S>,
1398 ) -> Self::SimdMask<S>;
1399 fn simd_less_than_or_equal<S: Simd>(
1400 ctx: &Self::SimdCtx<S>,
1401 real_lhs: Self::SimdVec<S>,
1402 real_rhs: Self::SimdVec<S>,
1403 ) -> Self::SimdMask<S>;
1404 fn simd_greater_than<S: Simd>(
1405 ctx: &Self::SimdCtx<S>,
1406 real_lhs: Self::SimdVec<S>,
1407 real_rhs: Self::SimdVec<S>,
1408 ) -> Self::SimdMask<S>;
1409 fn simd_greater_than_or_equal<S: Simd>(
1410 ctx: &Self::SimdCtx<S>,
1411 real_lhs: Self::SimdVec<S>,
1412 real_rhs: Self::SimdVec<S>,
1413 ) -> Self::SimdMask<S>;
1414 fn simd_select<S: Simd>(
1415 ctx: &Self::SimdCtx<S>,
1416 mask: Self::SimdMask<S>,
1417 lhs: Self::SimdVec<S>,
1418 rhs: Self::SimdVec<S>,
1419 ) -> Self::SimdVec<S>;
1420 fn simd_index_select<S: Simd>(
1421 ctx: &Self::SimdCtx<S>,
1422 mask: Self::SimdMask<S>,
1423 lhs: Self::SimdIndex<S>,
1424 rhs: Self::SimdIndex<S>,
1425 ) -> Self::SimdIndex<S>;
1426 fn simd_index_splat<S: Simd>(
1427 ctx: &Self::SimdCtx<S>,
1428 value: Self::Index,
1429 ) -> Self::SimdIndex<S>;
1430 fn simd_index_add<S: Simd>(
1431 ctx: &Self::SimdCtx<S>,
1432 lhs: Self::SimdIndex<S>,
1433 rhs: Self::SimdIndex<S>,
1434 ) -> Self::SimdIndex<S>;
1435 fn simd_index_less_than<S: Simd>(
1436 ctx: &Self::SimdCtx<S>,
1437 lhs: Self::SimdIndex<S>,
1438 rhs: Self::SimdIndex<S>,
1439 ) -> Self::SimdMask<S>;
1440 #[inline(always)]
1441 fn simd_index_greater_than<S: Simd>(
1442 ctx: &Self::SimdCtx<S>,
1443 lhs: Self::SimdIndex<S>,
1444 rhs: Self::SimdIndex<S>,
1445 ) -> Self::SimdMask<S> {
1446 Self::simd_index_less_than(ctx, rhs, lhs)
1447 }
1448 #[inline(always)]
1449 fn simd_index_less_than_or_equal<S: Simd>(
1450 ctx: &Self::SimdCtx<S>,
1451 lhs: Self::SimdIndex<S>,
1452 rhs: Self::SimdIndex<S>,
1453 ) -> Self::SimdMask<S> {
1454 Self::simd_not_mask(ctx, Self::simd_index_less_than(ctx, rhs, lhs))
1455 }
1456 #[inline(always)]
1457 fn simd_index_greater_than_or_equal<S: Simd>(
1458 ctx: &Self::SimdCtx<S>,
1459 lhs: Self::SimdIndex<S>,
1460 rhs: Self::SimdIndex<S>,
1461 ) -> Self::SimdMask<S> {
1462 Self::simd_not_mask(ctx, Self::simd_index_greater_than(ctx, rhs, lhs))
1463 }
1464 fn simd_and_mask<S: Simd>(
1465 ctx: &Self::SimdCtx<S>,
1466 lhs: Self::SimdMask<S>,
1467 rhs: Self::SimdMask<S>,
1468 ) -> Self::SimdMask<S>;
1469 fn simd_or_mask<S: Simd>(
1470 ctx: &Self::SimdCtx<S>,
1471 lhs: Self::SimdMask<S>,
1472 rhs: Self::SimdMask<S>,
1473 ) -> Self::SimdMask<S>;
1474 fn simd_not_mask<S: Simd>(
1475 ctx: &Self::SimdCtx<S>,
1476 mask: Self::SimdMask<S>,
1477 ) -> Self::SimdMask<S>;
1478 fn simd_first_true_mask<S: Simd>(
1479 ctx: &Self::SimdCtx<S>,
1480 value: Self::SimdMask<S>,
1481 ) -> usize;
1482 #[inline(always)]
1483 fn simd_load<S: Simd>(
1484 ctx: &Self::SimdCtx<S>,
1485 ptr: &Self::SimdVec<S>,
1486 ) -> Self::SimdVec<S> {
1487 let simd = Self::ctx_from_simd(ctx);
1488 if const { Self::Unit::IS_NATIVE_F32 } {
1489 simd.deinterleave_shfl_f32s(*ptr)
1490 } else if const { Self::Unit::IS_NATIVE_F64 } {
1491 simd.deinterleave_shfl_f64s(*ptr)
1492 } else {
1493 panic!();
1494 }
1495 }
1496 #[inline(always)]
1497 fn simd_store<S: Simd>(
1498 ctx: &Self::SimdCtx<S>,
1499 ptr: &mut Self::SimdVec<S>,
1500 value: Self::SimdVec<S>,
1501 ) {
1502 let simd = Self::ctx_from_simd(ctx);
1503 if const { Self::Unit::IS_NATIVE_F32 } {
1504 *ptr = simd.interleave_shfl_f32s(value)
1505 } else if const { Self::Unit::IS_NATIVE_F64 } {
1506 *ptr = simd.interleave_shfl_f64s(value)
1507 } else {
1508 panic!();
1509 }
1510 }
1511 #[inline(always)]
1512 unsafe fn simd_mask_load<S: Simd>(
1513 ctx: &Self::SimdCtx<S>,
1514 mask: Self::SimdMemMask<S>,
1515 ptr: *const Self::SimdVec<S>,
1516 ) -> Self::SimdVec<S> {
1517 let simd = Self::ctx_from_simd(ctx);
1518 let value = Self::simd_mask_load_raw(ctx, mask, ptr);
1519 if const { Self::Unit::IS_NATIVE_F32 } {
1520 simd.deinterleave_shfl_f32s(value)
1521 } else if const { Self::Unit::IS_NATIVE_F64 } {
1522 simd.deinterleave_shfl_f64s(value)
1523 } else {
1524 panic!();
1525 }
1526 }
1527 #[inline(always)]
1528 unsafe fn simd_mask_store<S: Simd>(
1529 ctx: &Self::SimdCtx<S>,
1530 mask: Self::SimdMemMask<S>,
1531 ptr: *mut Self::SimdVec<S>,
1532 value: Self::SimdVec<S>,
1533 ) {
1534 let simd = Self::ctx_from_simd(ctx);
1535 if const { Self::Unit::IS_NATIVE_F32 } {
1536 Self::simd_mask_store_raw(
1537 ctx,
1538 mask,
1539 ptr,
1540 simd.interleave_shfl_f32s(value),
1541 )
1542 } else if const { Self::Unit::IS_NATIVE_F64 } {
1543 Self::simd_mask_store_raw(
1544 ctx,
1545 mask,
1546 ptr,
1547 simd.interleave_shfl_f64s(value),
1548 )
1549 } else {
1550 panic!();
1551 }
1552 }
1553 #[inline(always)]
1554 fn simd_iota<S: Simd>(ctx: &Self::SimdCtx<S>) -> Self::SimdIndex<S> {
1555 let simd = Self::ctx_from_simd(ctx);
1556 #[repr(transparent)]
1557 struct Interleave<T>(T);
1558 unsafe impl<T> pulp::Interleave for Interleave<T> {}
1559 unsafe {
1560 if const { Self::Unit::IS_NATIVE_F32 } {
1561 core::mem::transmute_copy::<_, Self::SimdIndex<S>>(
1562 &simd.deinterleave_shfl_f32s(Interleave(
1563 core::mem::transmute_copy::<_, Self::SimdVec<S>>(
1564 &pulp::iota_32::<Interleave<Self>, 64>(),
1565 ),
1566 )),
1567 )
1568 } else if const { Self::Unit::IS_NATIVE_F64 } {
1569 core::mem::transmute_copy::<_, Self::SimdIndex<S>>(
1570 &simd.deinterleave_shfl_f64s(core::mem::transmute_copy::<
1571 _,
1572 Self::SimdVec<S>,
1573 >(&pulp::iota_64::<
1574 Interleave<Self>,
1575 64,
1576 >())),
1577 )
1578 } else {
1579 panic!();
1580 }
1581 }
1582 }
1583}
1584pub trait RealField:
1585 ComplexField<Real = Self, Conj = Self>
1586 + PartialOrd
1587 + num_traits::NumAssign
1588 + num_traits::Num
1589 + RefOps
1590{
1591 fn epsilon_impl() -> Self;
1592 fn nbits_impl() -> usize;
1593 fn min_positive_impl() -> Self;
1594 fn max_positive_impl() -> Self;
1595 fn sqrt_min_positive_impl() -> Self;
1596 fn sqrt_max_positive_impl() -> Self;
1597}
1598impl ComplexField for f32 {
1599 type Arch = pulp::Arch;
1600 type Index = u32;
1601 type Real = Self;
1602 type SimdCtx<S: Simd> = S;
1603 type SimdIndex<S: Simd> = S::u32s;
1604 type SimdMask<S: Simd> = S::m32s;
1605 type SimdMemMask<S: Simd> = pulp::MemMask<S::m32s>;
1606 type SimdVec<S: Simd> = S::f32s;
1607 type Unit = Self;
1608
1609 const IS_NATIVE_F32: bool = true;
1610 const IS_REAL: bool = true;
1611 const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Simd;
1612
1613 #[inline(always)]
1614 fn zero_impl() -> Self {
1615 0.0
1616 }
1617
1618 #[inline(always)]
1619 fn one_impl() -> Self {
1620 1.0
1621 }
1622
1623 #[inline(always)]
1624 fn nan_impl() -> Self {
1625 Self::NAN
1626 }
1627
1628 #[inline(always)]
1629 fn infinity_impl() -> Self {
1630 Self::INFINITY
1631 }
1632
1633 #[inline(always)]
1634 fn from_real_impl(value: &Self) -> Self {
1635 *value
1636 }
1637
1638 #[inline(always)]
1639 fn from_f64_impl(value: f64) -> Self {
1640 value as _
1641 }
1642
1643 #[inline(always)]
1644 fn real_part_impl(value: &Self) -> Self {
1645 *value
1646 }
1647
1648 #[inline(always)]
1649 fn imag_part_impl(_: &Self) -> Self {
1650 0.0
1651 }
1652
1653 #[inline(always)]
1654 fn copy_impl(value: &Self) -> Self {
1655 *value
1656 }
1657
1658 #[inline(always)]
1659 fn conj_impl(value: &Self) -> Self {
1660 *value
1661 }
1662
1663 #[inline(always)]
1664 fn recip_impl(value: &Self) -> Self {
1665 1.0 / *value
1666 }
1667
1668 #[inline(always)]
1669 fn sqrt_impl(value: &Self) -> Self {
1670 #[cfg(feature = "std")]
1671 {
1672 Self::sqrt(*value)
1673 }
1674 #[cfg(not(feature = "std"))]
1675 {
1676 libm::sqrtf(*value)
1677 }
1678 }
1679
1680 #[inline(always)]
1681 fn abs_impl(value: &Self) -> Self {
1682 (*value).abs()
1683 }
1684
1685 #[inline(always)]
1686 fn abs1_impl(value: &Self) -> Self {
1687 (*value).abs()
1688 }
1689
1690 #[inline(always)]
1691 fn abs2_impl(value: &Self) -> Self {
1692 (*value) * (*value)
1693 }
1694
1695 #[inline(always)]
1696 fn mul_real_impl(lhs: &Self, rhs: &Self) -> Self {
1697 (*lhs) * (*rhs)
1698 }
1699
1700 #[inline(always)]
1701 fn mul_pow2_impl(lhs: &Self, rhs: &Self) -> Self {
1702 (*lhs) * (*rhs)
1703 }
1704
1705 #[inline(always)]
1706 fn is_finite_impl(value: &Self) -> bool {
1707 (*value).is_finite()
1708 }
1709
1710 #[inline(always)]
1711 fn is_nan_impl(value: &Self) -> bool {
1712 (*value).is_nan()
1713 }
1714
1715 #[inline(always)]
1716 fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
1717 simd
1718 }
1719
1720 #[inline(always)]
1721 fn simd_splat<S: Simd>(
1722 ctx: &Self::SimdCtx<S>,
1723 value: &Self,
1724 ) -> Self::SimdVec<S> {
1725 ctx.splat_f32s(*value)
1726 }
1727
1728 #[inline(always)]
1729 fn simd_splat_real<S: Simd>(
1730 ctx: &Self::SimdCtx<S>,
1731 value: &Self::Real,
1732 ) -> Self::SimdVec<S> {
1733 ctx.splat_f32s(*value)
1734 }
1735
1736 #[inline(always)]
1737 fn simd_add<S: Simd>(
1738 ctx: &Self::SimdCtx<S>,
1739 lhs: Self::SimdVec<S>,
1740 rhs: Self::SimdVec<S>,
1741 ) -> Self::SimdVec<S> {
1742 ctx.add_f32s(lhs, rhs)
1743 }
1744
1745 #[inline(always)]
1746 fn simd_sub<S: Simd>(
1747 ctx: &Self::SimdCtx<S>,
1748 lhs: Self::SimdVec<S>,
1749 rhs: Self::SimdVec<S>,
1750 ) -> Self::SimdVec<S> {
1751 ctx.sub_f32s(lhs, rhs)
1752 }
1753
1754 #[inline(always)]
1755 fn simd_neg<S: Simd>(
1756 ctx: &Self::SimdCtx<S>,
1757 value: Self::SimdVec<S>,
1758 ) -> Self::SimdVec<S> {
1759 ctx.neg_f32s(value)
1760 }
1761
1762 #[inline(always)]
1763 fn simd_conj<S: Simd>(
1764 _: &Self::SimdCtx<S>,
1765 value: Self::SimdVec<S>,
1766 ) -> Self::SimdVec<S> {
1767 value
1768 }
1769
1770 #[inline(always)]
1771 fn simd_abs1<S: Simd>(
1772 ctx: &Self::SimdCtx<S>,
1773 value: Self::SimdVec<S>,
1774 ) -> Self::SimdVec<S> {
1775 ctx.abs_f32s(value)
1776 }
1777
1778 #[inline(always)]
1779 fn simd_mul<S: Simd>(
1780 ctx: &Self::SimdCtx<S>,
1781 lhs: Self::SimdVec<S>,
1782 rhs: Self::SimdVec<S>,
1783 ) -> Self::SimdVec<S> {
1784 ctx.mul_f32s(lhs, rhs)
1785 }
1786
1787 #[inline(always)]
1788 fn simd_mul_real<S: Simd>(
1789 ctx: &Self::SimdCtx<S>,
1790 lhs: Self::SimdVec<S>,
1791 real_rhs: Self::SimdVec<S>,
1792 ) -> Self::SimdVec<S> {
1793 ctx.mul_f32s(lhs, real_rhs)
1794 }
1795
1796 #[inline(always)]
1797 fn simd_mul_pow2<S: Simd>(
1798 ctx: &Self::SimdCtx<S>,
1799 lhs: Self::SimdVec<S>,
1800 real_rhs: Self::SimdVec<S>,
1801 ) -> Self::SimdVec<S> {
1802 ctx.mul_f32s(lhs, real_rhs)
1803 }
1804
1805 #[inline(always)]
1806 fn simd_conj_mul<S: Simd>(
1807 ctx: &Self::SimdCtx<S>,
1808 lhs: Self::SimdVec<S>,
1809 rhs: Self::SimdVec<S>,
1810 ) -> Self::SimdVec<S> {
1811 ctx.mul_f32s(lhs, rhs)
1812 }
1813
1814 #[inline(always)]
1815 fn simd_mul_add<S: Simd>(
1816 ctx: &Self::SimdCtx<S>,
1817 lhs: Self::SimdVec<S>,
1818 rhs: Self::SimdVec<S>,
1819 acc: Self::SimdVec<S>,
1820 ) -> Self::SimdVec<S> {
1821 ctx.mul_add_e_f32s(lhs, rhs, acc)
1822 }
1823
1824 #[inline(always)]
1825 fn simd_conj_mul_add<S: Simd>(
1826 ctx: &Self::SimdCtx<S>,
1827 lhs: Self::SimdVec<S>,
1828 rhs: Self::SimdVec<S>,
1829 acc: Self::SimdVec<S>,
1830 ) -> Self::SimdVec<S> {
1831 ctx.mul_add_e_f32s(lhs, rhs, acc)
1832 }
1833
1834 #[inline(always)]
1835 fn simd_abs2<S: Simd>(
1836 ctx: &Self::SimdCtx<S>,
1837 value: Self::SimdVec<S>,
1838 ) -> Self::SimdVec<S> {
1839 ctx.mul_f32s(value, value)
1840 }
1841
1842 #[inline(always)]
1843 fn simd_abs2_add<S: Simd>(
1844 ctx: &Self::SimdCtx<S>,
1845 value: Self::SimdVec<S>,
1846 acc: Self::SimdVec<S>,
1847 ) -> Self::SimdVec<S> {
1848 ctx.mul_add_e_f32s(value, value, acc)
1849 }
1850
1851 #[inline(always)]
1852 fn simd_reduce_sum<S: Simd>(
1853 ctx: &Self::SimdCtx<S>,
1854 value: Self::SimdVec<S>,
1855 ) -> Self {
1856 ctx.reduce_sum_f32s(value)
1857 }
1858
1859 #[inline(always)]
1860 fn simd_reduce_max<S: Simd>(
1861 ctx: &Self::SimdCtx<S>,
1862 value: Self::SimdVec<S>,
1863 ) -> Self {
1864 ctx.reduce_max_f32s(value)
1865 }
1866
1867 #[inline(always)]
1868 fn simd_equal<S: Simd>(
1869 ctx: &Self::SimdCtx<S>,
1870 real_lhs: Self::SimdVec<S>,
1871 real_rhs: Self::SimdVec<S>,
1872 ) -> Self::SimdMask<S> {
1873 ctx.equal_f32s(real_lhs, real_rhs)
1874 }
1875
1876 #[inline(always)]
1877 fn simd_less_than<S: Simd>(
1878 ctx: &Self::SimdCtx<S>,
1879 real_lhs: Self::SimdVec<S>,
1880 real_rhs: Self::SimdVec<S>,
1881 ) -> Self::SimdMask<S> {
1882 ctx.less_than_f32s(real_lhs, real_rhs)
1883 }
1884
1885 #[inline(always)]
1886 fn simd_greater_than<S: Simd>(
1887 ctx: &Self::SimdCtx<S>,
1888 real_lhs: Self::SimdVec<S>,
1889 real_rhs: Self::SimdVec<S>,
1890 ) -> Self::SimdMask<S> {
1891 ctx.greater_than_f32s(real_lhs, real_rhs)
1892 }
1893
1894 #[inline(always)]
1895 fn simd_less_than_or_equal<S: Simd>(
1896 ctx: &Self::SimdCtx<S>,
1897 real_lhs: Self::SimdVec<S>,
1898 real_rhs: Self::SimdVec<S>,
1899 ) -> Self::SimdMask<S> {
1900 ctx.less_than_or_equal_f32s(real_lhs, real_rhs)
1901 }
1902
1903 #[inline(always)]
1904 fn simd_greater_than_or_equal<S: Simd>(
1905 ctx: &Self::SimdCtx<S>,
1906 real_lhs: Self::SimdVec<S>,
1907 real_rhs: Self::SimdVec<S>,
1908 ) -> Self::SimdMask<S> {
1909 ctx.greater_than_or_equal_f32s(real_lhs, real_rhs)
1910 }
1911
1912 #[inline(always)]
1913 fn simd_select<S: Simd>(
1914 ctx: &Self::SimdCtx<S>,
1915 mask: Self::SimdMask<S>,
1916 lhs: Self::SimdVec<S>,
1917 rhs: Self::SimdVec<S>,
1918 ) -> Self::SimdVec<S> {
1919 ctx.select_f32s(mask, lhs, rhs)
1920 }
1921
1922 #[inline(always)]
1923 fn simd_index_select<S: Simd>(
1924 ctx: &Self::SimdCtx<S>,
1925 mask: Self::SimdMask<S>,
1926 lhs: Self::SimdIndex<S>,
1927 rhs: Self::SimdIndex<S>,
1928 ) -> Self::SimdIndex<S> {
1929 ctx.select_u32s(mask, lhs, rhs)
1930 }
1931
1932 #[inline(always)]
1933 fn simd_index_splat<S: Simd>(
1934 ctx: &Self::SimdCtx<S>,
1935 value: Self::Index,
1936 ) -> Self::SimdIndex<S> {
1937 ctx.splat_u32s(value as _)
1938 }
1939
1940 #[inline(always)]
1941 fn simd_index_add<S: Simd>(
1942 ctx: &Self::SimdCtx<S>,
1943 lhs: Self::SimdIndex<S>,
1944 rhs: Self::SimdIndex<S>,
1945 ) -> Self::SimdIndex<S> {
1946 ctx.add_u32s(lhs, rhs)
1947 }
1948
1949 #[inline(always)]
1950 fn simd_index_less_than<S: Simd>(
1951 ctx: &Self::SimdCtx<S>,
1952 lhs: Self::SimdIndex<S>,
1953 rhs: Self::SimdIndex<S>,
1954 ) -> Self::SimdMask<S> {
1955 ctx.less_than_u32s(lhs, rhs)
1956 }
1957
1958 #[inline(always)]
1959 fn simd_abs_max<S: Simd>(
1960 ctx: &Self::SimdCtx<S>,
1961 value: Self::SimdVec<S>,
1962 ) -> Self::SimdVec<S> {
1963 ctx.abs_f32s(value)
1964 }
1965
1966 #[inline(always)]
1967 fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
1968 *ctx
1969 }
1970
1971 #[inline(always)]
1972 fn simd_and_mask<S: Simd>(
1973 simd: &Self::SimdCtx<S>,
1974 lhs: Self::SimdMask<S>,
1975 rhs: Self::SimdMask<S>,
1976 ) -> Self::SimdMask<S> {
1977 simd.and_m32s(lhs, rhs)
1978 }
1979
1980 #[inline(always)]
1981 fn simd_or_mask<S: Simd>(
1982 simd: &Self::SimdCtx<S>,
1983 lhs: Self::SimdMask<S>,
1984 rhs: Self::SimdMask<S>,
1985 ) -> Self::SimdMask<S> {
1986 simd.or_m32s(lhs, rhs)
1987 }
1988
1989 #[inline(always)]
1990 fn simd_not_mask<S: Simd>(
1991 ctx: &Self::SimdCtx<S>,
1992 mask: Self::SimdMask<S>,
1993 ) -> Self::SimdMask<S> {
1994 ctx.not_m32s(mask)
1995 }
1996
1997 #[inline(always)]
1998 fn simd_first_true_mask<S: Simd>(
1999 ctx: &Self::SimdCtx<S>,
2000 value: Self::SimdMask<S>,
2001 ) -> usize {
2002 ctx.first_true_m32s(value)
2003 }
2004
2005 #[inline(always)]
2006 fn simd_mem_mask_between<S: Simd>(
2007 ctx: &Self::SimdCtx<S>,
2008 start: Self::Index,
2009 end: Self::Index,
2010 ) -> Self::SimdMemMask<S> {
2011 ctx.mask_between_m32s(start as _, end as _)
2012 }
2013
2014 #[inline(always)]
2015 fn simd_mask_between<S: Simd>(
2016 ctx: &Self::SimdCtx<S>,
2017 start: Self::Index,
2018 end: Self::Index,
2019 ) -> Self::SimdMask<S> {
2020 ctx.mask_between_m32s(start as _, end as _).mask()
2021 }
2022
2023 #[inline(always)]
2024 unsafe fn simd_mask_load_raw<S: Simd>(
2025 ctx: &Self::SimdCtx<S>,
2026 mask: Self::SimdMemMask<S>,
2027 ptr: *const Self::SimdVec<S>,
2028 ) -> Self::SimdVec<S> {
2029 ctx.mask_load_ptr_f32s(mask, ptr as _)
2030 }
2031
2032 #[inline(always)]
2033 unsafe fn simd_mask_store_raw<S: Simd>(
2034 ctx: &Self::SimdCtx<S>,
2035 mask: Self::SimdMemMask<S>,
2036 ptr: *mut Self::SimdVec<S>,
2037 values: Self::SimdVec<S>,
2038 ) {
2039 ctx.mask_store_ptr_f32s(mask, ptr as _, values);
2040 }
2041}
2042impl RealField for f32 {
2043 #[inline(always)]
2044 fn epsilon_impl() -> Self {
2045 Self::EPSILON
2046 }
2047
2048 #[inline(always)]
2049 fn min_positive_impl() -> Self {
2050 Self::MIN_POSITIVE
2051 }
2052
2053 #[inline(always)]
2054 fn max_positive_impl() -> Self {
2055 Self::MIN_POSITIVE.recip()
2056 }
2057
2058 #[inline(always)]
2059 fn sqrt_min_positive_impl() -> Self {
2060 Self::sqrt_impl(&Self::MIN_POSITIVE)
2061 }
2062
2063 #[inline(always)]
2064 fn sqrt_max_positive_impl() -> Self {
2065 Self::sqrt_impl(&Self::MIN_POSITIVE.recip())
2066 }
2067
2068 #[inline(always)]
2069 fn nbits_impl() -> usize {
2070 Self::MANTISSA_DIGITS as usize
2071 }
2072}
2073impl ComplexField for f64 {
2074 type Arch = pulp::Arch;
2075 type Index = u64;
2076 type Real = Self;
2077 type SimdCtx<S: Simd> = S;
2078 type SimdIndex<S: Simd> = S::u64s;
2079 type SimdMask<S: Simd> = S::m64s;
2080 type SimdMemMask<S: Simd> = pulp::MemMask<S::m64s>;
2081 type SimdVec<S: Simd> = S::f64s;
2082 type Unit = Self;
2083
2084 const IS_NATIVE_F64: bool = true;
2085 const IS_REAL: bool = true;
2086 const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Simd;
2087
2088 #[inline(always)]
2089 fn zero_impl() -> Self {
2090 0.0
2091 }
2092
2093 #[inline(always)]
2094 fn one_impl() -> Self {
2095 1.0
2096 }
2097
2098 #[inline(always)]
2099 fn nan_impl() -> Self {
2100 Self::NAN
2101 }
2102
2103 #[inline(always)]
2104 fn infinity_impl() -> Self {
2105 Self::INFINITY
2106 }
2107
2108 #[inline(always)]
2109 fn from_real_impl(value: &Self) -> Self {
2110 *value
2111 }
2112
2113 #[inline(always)]
2114 fn from_f64_impl(value: f64) -> Self {
2115 value as _
2116 }
2117
2118 #[inline(always)]
2119 fn real_part_impl(value: &Self) -> Self {
2120 *value
2121 }
2122
2123 #[inline(always)]
2124 fn imag_part_impl(_: &Self) -> Self {
2125 0.0
2126 }
2127
2128 #[inline(always)]
2129 fn copy_impl(value: &Self) -> Self {
2130 *value
2131 }
2132
2133 #[inline(always)]
2134 fn conj_impl(value: &Self) -> Self {
2135 *value
2136 }
2137
2138 #[inline(always)]
2139 fn recip_impl(value: &Self) -> Self {
2140 1.0 / *value
2141 }
2142
2143 #[inline(always)]
2144 fn sqrt_impl(value: &Self) -> Self {
2145 #[cfg(feature = "std")]
2146 {
2147 Self::sqrt(*value)
2148 }
2149 #[cfg(not(feature = "std"))]
2150 {
2151 libm::sqrt(*value)
2152 }
2153 }
2154
2155 #[inline(always)]
2156 fn abs_impl(value: &Self) -> Self {
2157 (*value).abs()
2158 }
2159
2160 #[inline(always)]
2161 fn abs1_impl(value: &Self) -> Self {
2162 (*value).abs()
2163 }
2164
2165 #[inline(always)]
2166 fn abs2_impl(value: &Self) -> Self {
2167 (*value) * (*value)
2168 }
2169
2170 #[inline(always)]
2171 fn mul_real_impl(lhs: &Self, rhs: &Self) -> Self {
2172 (*lhs) * (*rhs)
2173 }
2174
2175 #[inline(always)]
2176 fn mul_pow2_impl(lhs: &Self, rhs: &Self) -> Self {
2177 (*lhs) * (*rhs)
2178 }
2179
2180 #[inline(always)]
2181 fn is_nan_impl(value: &Self) -> bool {
2182 (*value).is_nan()
2183 }
2184
2185 #[inline(always)]
2186 fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
2187 simd
2188 }
2189
2190 #[inline(always)]
2191 fn simd_splat<S: Simd>(
2192 ctx: &Self::SimdCtx<S>,
2193 value: &Self,
2194 ) -> Self::SimdVec<S> {
2195 ctx.splat_f64s(*value)
2196 }
2197
2198 #[inline(always)]
2199 fn simd_splat_real<S: Simd>(
2200 ctx: &Self::SimdCtx<S>,
2201 value: &Self::Real,
2202 ) -> Self::SimdVec<S> {
2203 ctx.splat_f64s(*value)
2204 }
2205
2206 #[inline(always)]
2207 fn simd_add<S: Simd>(
2208 ctx: &Self::SimdCtx<S>,
2209 lhs: Self::SimdVec<S>,
2210 rhs: Self::SimdVec<S>,
2211 ) -> Self::SimdVec<S> {
2212 ctx.add_f64s(lhs, rhs)
2213 }
2214
2215 #[inline(always)]
2216 fn simd_sub<S: Simd>(
2217 ctx: &Self::SimdCtx<S>,
2218 lhs: Self::SimdVec<S>,
2219 rhs: Self::SimdVec<S>,
2220 ) -> Self::SimdVec<S> {
2221 ctx.sub_f64s(lhs, rhs)
2222 }
2223
2224 #[inline(always)]
2225 fn simd_neg<S: Simd>(
2226 ctx: &Self::SimdCtx<S>,
2227 value: Self::SimdVec<S>,
2228 ) -> Self::SimdVec<S> {
2229 ctx.neg_f64s(value)
2230 }
2231
2232 #[inline(always)]
2233 fn simd_conj<S: Simd>(
2234 _: &Self::SimdCtx<S>,
2235 value: Self::SimdVec<S>,
2236 ) -> Self::SimdVec<S> {
2237 value
2238 }
2239
2240 #[inline(always)]
2241 fn simd_abs1<S: Simd>(
2242 ctx: &Self::SimdCtx<S>,
2243 value: Self::SimdVec<S>,
2244 ) -> Self::SimdVec<S> {
2245 ctx.abs_f64s(value)
2246 }
2247
2248 #[inline(always)]
2249 fn simd_mul<S: Simd>(
2250 ctx: &Self::SimdCtx<S>,
2251 lhs: Self::SimdVec<S>,
2252 rhs: Self::SimdVec<S>,
2253 ) -> Self::SimdVec<S> {
2254 ctx.mul_f64s(lhs, rhs)
2255 }
2256
2257 #[inline(always)]
2258 fn simd_mul_real<S: Simd>(
2259 ctx: &Self::SimdCtx<S>,
2260 lhs: Self::SimdVec<S>,
2261 real_rhs: Self::SimdVec<S>,
2262 ) -> Self::SimdVec<S> {
2263 ctx.mul_f64s(lhs, real_rhs)
2264 }
2265
2266 #[inline(always)]
2267 fn simd_mul_pow2<S: Simd>(
2268 ctx: &Self::SimdCtx<S>,
2269 lhs: Self::SimdVec<S>,
2270 real_rhs: Self::SimdVec<S>,
2271 ) -> Self::SimdVec<S> {
2272 ctx.mul_f64s(lhs, real_rhs)
2273 }
2274
2275 #[inline(always)]
2276 fn simd_conj_mul<S: Simd>(
2277 ctx: &Self::SimdCtx<S>,
2278 lhs: Self::SimdVec<S>,
2279 rhs: Self::SimdVec<S>,
2280 ) -> Self::SimdVec<S> {
2281 ctx.mul_f64s(lhs, rhs)
2282 }
2283
2284 #[inline(always)]
2285 fn simd_mul_add<S: Simd>(
2286 ctx: &Self::SimdCtx<S>,
2287 lhs: Self::SimdVec<S>,
2288 rhs: Self::SimdVec<S>,
2289 acc: Self::SimdVec<S>,
2290 ) -> Self::SimdVec<S> {
2291 ctx.mul_add_e_f64s(lhs, rhs, acc)
2292 }
2293
2294 #[inline(always)]
2295 fn simd_conj_mul_add<S: Simd>(
2296 ctx: &Self::SimdCtx<S>,
2297 lhs: Self::SimdVec<S>,
2298 rhs: Self::SimdVec<S>,
2299 acc: Self::SimdVec<S>,
2300 ) -> Self::SimdVec<S> {
2301 ctx.mul_add_e_f64s(lhs, rhs, acc)
2302 }
2303
2304 #[inline(always)]
2305 fn simd_abs2<S: Simd>(
2306 ctx: &Self::SimdCtx<S>,
2307 value: Self::SimdVec<S>,
2308 ) -> Self::SimdVec<S> {
2309 ctx.mul_f64s(value, value)
2310 }
2311
2312 #[inline(always)]
2313 fn simd_abs2_add<S: Simd>(
2314 ctx: &Self::SimdCtx<S>,
2315 value: Self::SimdVec<S>,
2316 acc: Self::SimdVec<S>,
2317 ) -> Self::SimdVec<S> {
2318 ctx.mul_add_e_f64s(value, value, acc)
2319 }
2320
2321 #[inline(always)]
2322 fn simd_reduce_sum<S: Simd>(
2323 ctx: &Self::SimdCtx<S>,
2324 value: Self::SimdVec<S>,
2325 ) -> Self {
2326 ctx.reduce_sum_f64s(value)
2327 }
2328
2329 #[inline(always)]
2330 fn simd_reduce_max<S: Simd>(
2331 ctx: &Self::SimdCtx<S>,
2332 value: Self::SimdVec<S>,
2333 ) -> Self {
2334 ctx.reduce_max_f64s(value)
2335 }
2336
2337 #[inline(always)]
2338 fn simd_equal<S: Simd>(
2339 ctx: &Self::SimdCtx<S>,
2340 real_lhs: Self::SimdVec<S>,
2341 real_rhs: Self::SimdVec<S>,
2342 ) -> Self::SimdMask<S> {
2343 ctx.equal_f64s(real_lhs, real_rhs)
2344 }
2345
2346 #[inline(always)]
2347 fn simd_less_than<S: Simd>(
2348 ctx: &Self::SimdCtx<S>,
2349 real_lhs: Self::SimdVec<S>,
2350 real_rhs: Self::SimdVec<S>,
2351 ) -> Self::SimdMask<S> {
2352 ctx.less_than_f64s(real_lhs, real_rhs)
2353 }
2354
2355 #[inline(always)]
2356 fn simd_greater_than<S: Simd>(
2357 ctx: &Self::SimdCtx<S>,
2358 real_lhs: Self::SimdVec<S>,
2359 real_rhs: Self::SimdVec<S>,
2360 ) -> Self::SimdMask<S> {
2361 ctx.greater_than_f64s(real_lhs, real_rhs)
2362 }
2363
2364 #[inline(always)]
2365 fn simd_less_than_or_equal<S: Simd>(
2366 ctx: &Self::SimdCtx<S>,
2367 real_lhs: Self::SimdVec<S>,
2368 real_rhs: Self::SimdVec<S>,
2369 ) -> Self::SimdMask<S> {
2370 ctx.less_than_or_equal_f64s(real_lhs, real_rhs)
2371 }
2372
2373 #[inline(always)]
2374 fn simd_greater_than_or_equal<S: Simd>(
2375 ctx: &Self::SimdCtx<S>,
2376 real_lhs: Self::SimdVec<S>,
2377 real_rhs: Self::SimdVec<S>,
2378 ) -> Self::SimdMask<S> {
2379 ctx.greater_than_or_equal_f64s(real_lhs, real_rhs)
2380 }
2381
2382 #[inline(always)]
2383 fn simd_select<S: Simd>(
2384 ctx: &Self::SimdCtx<S>,
2385 mask: Self::SimdMask<S>,
2386 lhs: Self::SimdVec<S>,
2387 rhs: Self::SimdVec<S>,
2388 ) -> Self::SimdVec<S> {
2389 ctx.select_f64s(mask, lhs, rhs)
2390 }
2391
2392 #[inline(always)]
2393 fn simd_index_select<S: Simd>(
2394 ctx: &Self::SimdCtx<S>,
2395 mask: Self::SimdMask<S>,
2396 lhs: Self::SimdIndex<S>,
2397 rhs: Self::SimdIndex<S>,
2398 ) -> Self::SimdIndex<S> {
2399 ctx.select_u64s(mask, lhs, rhs)
2400 }
2401
2402 #[inline(always)]
2403 fn simd_index_splat<S: Simd>(
2404 ctx: &Self::SimdCtx<S>,
2405 value: Self::Index,
2406 ) -> Self::SimdIndex<S> {
2407 ctx.splat_u64s(value as _)
2408 }
2409
2410 #[inline(always)]
2411 fn simd_index_add<S: Simd>(
2412 ctx: &Self::SimdCtx<S>,
2413 lhs: Self::SimdIndex<S>,
2414 rhs: Self::SimdIndex<S>,
2415 ) -> Self::SimdIndex<S> {
2416 ctx.add_u64s(lhs, rhs)
2417 }
2418
2419 #[inline(always)]
2420 fn simd_index_less_than<S: Simd>(
2421 ctx: &Self::SimdCtx<S>,
2422 lhs: Self::SimdIndex<S>,
2423 rhs: Self::SimdIndex<S>,
2424 ) -> Self::SimdMask<S> {
2425 ctx.less_than_u64s(lhs, rhs)
2426 }
2427
2428 #[inline(always)]
2429 fn simd_abs_max<S: Simd>(
2430 ctx: &Self::SimdCtx<S>,
2431 value: Self::SimdVec<S>,
2432 ) -> Self::SimdVec<S> {
2433 ctx.abs_f64s(value)
2434 }
2435
2436 #[inline(always)]
2437 fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
2438 *ctx
2439 }
2440
2441 #[inline(always)]
2442 fn simd_and_mask<S: Simd>(
2443 simd: &Self::SimdCtx<S>,
2444 lhs: Self::SimdMask<S>,
2445 rhs: Self::SimdMask<S>,
2446 ) -> Self::SimdMask<S> {
2447 simd.and_m64s(lhs, rhs)
2448 }
2449
2450 #[inline(always)]
2451 fn simd_or_mask<S: Simd>(
2452 simd: &Self::SimdCtx<S>,
2453 lhs: Self::SimdMask<S>,
2454 rhs: Self::SimdMask<S>,
2455 ) -> Self::SimdMask<S> {
2456 simd.or_m64s(lhs, rhs)
2457 }
2458
2459 #[inline(always)]
2460 fn simd_not_mask<S: Simd>(
2461 ctx: &Self::SimdCtx<S>,
2462 mask: Self::SimdMask<S>,
2463 ) -> Self::SimdMask<S> {
2464 ctx.not_m64s(mask)
2465 }
2466
2467 #[inline(always)]
2468 fn simd_first_true_mask<S: Simd>(
2469 ctx: &Self::SimdCtx<S>,
2470 value: Self::SimdMask<S>,
2471 ) -> usize {
2472 ctx.first_true_m64s(value)
2473 }
2474
2475 #[inline(always)]
2476 fn is_finite_impl(value: &Self) -> bool {
2477 (*value).is_finite()
2478 }
2479
2480 #[inline(always)]
2481 fn simd_mem_mask_between<S: Simd>(
2482 ctx: &Self::SimdCtx<S>,
2483 start: Self::Index,
2484 end: Self::Index,
2485 ) -> Self::SimdMemMask<S> {
2486 ctx.mask_between_m64s(start as _, end as _)
2487 }
2488
2489 #[inline(always)]
2490 fn simd_mask_between<S: Simd>(
2491 ctx: &Self::SimdCtx<S>,
2492 start: Self::Index,
2493 end: Self::Index,
2494 ) -> Self::SimdMask<S> {
2495 ctx.mask_between_m64s(start as _, end as _).mask()
2496 }
2497
2498 #[inline(always)]
2499 unsafe fn simd_mask_load_raw<S: Simd>(
2500 ctx: &Self::SimdCtx<S>,
2501 mask: Self::SimdMemMask<S>,
2502 ptr: *const Self::SimdVec<S>,
2503 ) -> Self::SimdVec<S> {
2504 ctx.mask_load_ptr_f64s(mask, ptr as _)
2505 }
2506
2507 #[inline(always)]
2508 unsafe fn simd_mask_store_raw<S: Simd>(
2509 ctx: &Self::SimdCtx<S>,
2510 mask: Self::SimdMemMask<S>,
2511 ptr: *mut Self::SimdVec<S>,
2512 values: Self::SimdVec<S>,
2513 ) {
2514 ctx.mask_store_ptr_f64s(mask, ptr as _, values);
2515 }
2516}
2517impl RealField for f64 {
2518 #[inline(always)]
2519 fn epsilon_impl() -> Self {
2520 Self::EPSILON
2521 }
2522
2523 #[inline(always)]
2524 fn min_positive_impl() -> Self {
2525 Self::MIN_POSITIVE
2526 }
2527
2528 #[inline(always)]
2529 fn max_positive_impl() -> Self {
2530 Self::MIN_POSITIVE.recip()
2531 }
2532
2533 #[inline(always)]
2534 fn sqrt_min_positive_impl() -> Self {
2535 Self::sqrt_impl(&Self::MIN_POSITIVE)
2536 }
2537
2538 #[inline(always)]
2539 fn sqrt_max_positive_impl() -> Self {
2540 Self::sqrt_impl(&Self::MIN_POSITIVE.recip())
2541 }
2542
2543 #[inline(always)]
2544 fn nbits_impl() -> usize {
2545 Self::MANTISSA_DIGITS as usize
2546 }
2547}
2548impl<T: RealField<Unit: ComplexField>> ComplexField for Complex<T> {
2549 type Arch = T::Arch;
2550 type Index = T::Index;
2551 type Real = T;
2552 type SimdCtx<S: Simd> = T::SimdCtx<S>;
2553 type SimdIndex<S: Simd> = T::SimdIndex<S>;
2554 type SimdMask<S: Simd> = T::SimdMask<S>;
2555 type SimdMemMask<S: Simd> = Complex<T::SimdMemMask<S>>;
2556 type SimdVec<S: Simd> = Complex<T::SimdVec<S>>;
2557 type Unit = T::Unit;
2558
2559 const IS_NATIVE_C32: bool = T::IS_NATIVE_F32;
2560 const IS_NATIVE_C64: bool = T::IS_NATIVE_F64;
2561 const IS_REAL: bool = false;
2562 const SIMD_CAPABILITIES: SimdCapabilities = T::SIMD_CAPABILITIES;
2563
2564 #[inline]
2565 fn zero_impl() -> Self {
2566 Complex {
2567 re: T::zero_impl(),
2568 im: T::zero_impl(),
2569 }
2570 }
2571
2572 #[inline]
2573 fn one_impl() -> Self {
2574 Complex {
2575 re: T::one_impl(),
2576 im: T::zero_impl(),
2577 }
2578 }
2579
2580 #[inline]
2581 fn nan_impl() -> Self {
2582 Complex {
2583 re: T::nan_impl(),
2584 im: T::nan_impl(),
2585 }
2586 }
2587
2588 #[inline]
2589 fn infinity_impl() -> Self {
2590 Complex {
2591 re: T::infinity_impl(),
2592 im: T::infinity_impl(),
2593 }
2594 }
2595
2596 #[inline]
2597 fn from_real_impl(real: &Self::Real) -> Self {
2598 Complex {
2599 re: real.clone(),
2600 im: T::zero_impl(),
2601 }
2602 }
2603
2604 #[inline]
2605 fn from_f64_impl(real: f64) -> Self {
2606 Complex {
2607 re: T::from_f64_impl(real),
2608 im: T::zero_impl(),
2609 }
2610 }
2611
2612 #[inline]
2613 fn real_part_impl(value: &Self) -> Self::Real {
2614 value.re.clone()
2615 }
2616
2617 #[inline]
2618 fn imag_part_impl(value: &Self) -> Self::Real {
2619 value.im.clone()
2620 }
2621
2622 #[inline]
2623 fn copy_impl(value: &Self) -> Self {
2624 value.clone()
2625 }
2626
2627 #[inline]
2628 fn conj_impl(value: &Self) -> Self {
2629 Self {
2630 re: value.re.clone(),
2631 im: value.im.neg_by_ref(),
2632 }
2633 }
2634
2635 #[inline]
2636 fn recip_impl(value: &Self) -> Self {
2637 let (re, im) = recip_impl(value.re.clone(), value.im.clone());
2638 Complex { re, im }
2639 }
2640
2641 #[inline]
2642 fn sqrt_impl(value: &Self) -> Self {
2643 let (re, im) = sqrt_impl(value.re.clone(), value.im.clone());
2644 Complex { re, im }
2645 }
2646
2647 #[inline]
2648 fn abs_impl(value: &Self) -> Self::Real {
2649 abs_impl(value.re.clone(), value.im.clone())
2650 }
2651
2652 #[inline]
2653 fn abs1_impl(value: &Self) -> Self::Real {
2654 use crate::ext::*;
2655 value.re.abs1() + value.im.abs1()
2656 }
2657
2658 #[inline]
2659 fn abs2_impl(value: &Self) -> Self::Real {
2660 use crate::ext::*;
2661 value.re.abs2() + value.im.abs2()
2662 }
2663
2664 #[inline]
2665 fn mul_real_impl(lhs: &Self, rhs: &Self::Real) -> Self {
2666 Complex {
2667 re: &lhs.re * rhs,
2668 im: &lhs.im * rhs,
2669 }
2670 }
2671
2672 #[inline]
2673 fn mul_pow2_impl(lhs: &Self, rhs: &Self::Real) -> Self {
2674 use crate::ext::*;
2675 Complex {
2676 re: lhs.re.mul_pow2(rhs),
2677 im: lhs.im.mul_pow2(rhs),
2678 }
2679 }
2680
2681 #[inline]
2682 fn is_finite_impl(value: &Self) -> bool {
2683 use crate::ext::*;
2684 value.re.is_finite() && value.im.is_finite()
2685 }
2686
2687 #[inline(always)]
2688 fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
2689 T::simd_ctx(simd)
2690 }
2691
2692 #[inline(always)]
2693 fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
2694 T::ctx_from_simd(ctx)
2695 }
2696
2697 #[inline(always)]
2698 fn simd_splat<S: Simd>(
2699 ctx: &Self::SimdCtx<S>,
2700 value: &Self,
2701 ) -> Self::SimdVec<S> {
2702 Complex {
2703 re: T::simd_splat(ctx, &value.re),
2704 im: T::simd_splat(ctx, &value.im),
2705 }
2706 }
2707
2708 #[inline(always)]
2709 fn simd_splat_real<S: Simd>(
2710 ctx: &Self::SimdCtx<S>,
2711 value: &Self::Real,
2712 ) -> Self::SimdVec<S> {
2713 Complex {
2714 re: T::simd_splat_real(ctx, value),
2715 im: T::simd_splat_real(ctx, value),
2716 }
2717 }
2718
2719 #[inline(always)]
2720 fn simd_add<S: Simd>(
2721 ctx: &Self::SimdCtx<S>,
2722 lhs: Self::SimdVec<S>,
2723 rhs: Self::SimdVec<S>,
2724 ) -> Self::SimdVec<S> {
2725 Complex {
2726 re: T::simd_add(ctx, lhs.re, rhs.re),
2727 im: T::simd_add(ctx, lhs.im, rhs.im),
2728 }
2729 }
2730
2731 #[inline(always)]
2732 fn simd_sub<S: Simd>(
2733 ctx: &Self::SimdCtx<S>,
2734 lhs: Self::SimdVec<S>,
2735 rhs: Self::SimdVec<S>,
2736 ) -> Self::SimdVec<S> {
2737 Complex {
2738 re: T::simd_sub(ctx, lhs.re, rhs.re),
2739 im: T::simd_sub(ctx, lhs.im, rhs.im),
2740 }
2741 }
2742
2743 #[inline(always)]
2744 fn simd_neg<S: Simd>(
2745 ctx: &Self::SimdCtx<S>,
2746 value: Self::SimdVec<S>,
2747 ) -> Self::SimdVec<S> {
2748 Complex {
2749 re: T::simd_neg(ctx, value.re),
2750 im: T::simd_neg(ctx, value.im),
2751 }
2752 }
2753
2754 #[inline(always)]
2755 fn simd_conj<S: Simd>(
2756 ctx: &Self::SimdCtx<S>,
2757 value: Self::SimdVec<S>,
2758 ) -> Self::SimdVec<S> {
2759 Complex {
2760 re: value.re,
2761 im: T::simd_neg(ctx, value.im),
2762 }
2763 }
2764
2765 #[inline(always)]
2766 fn simd_abs1<S: Simd>(
2767 ctx: &Self::SimdCtx<S>,
2768 value: Self::SimdVec<S>,
2769 ) -> Self::SimdVec<S> {
2770 let v = T::simd_add(
2771 ctx,
2772 T::simd_abs1(ctx, value.re),
2773 T::simd_abs1(ctx, value.im),
2774 );
2775 Complex { re: v, im: v }
2776 }
2777
2778 #[inline(always)]
2779 fn simd_abs_max<S: Simd>(
2780 ctx: &Self::SimdCtx<S>,
2781 value: Self::SimdVec<S>,
2782 ) -> Self::SimdVec<S> {
2783 let re = T::simd_abs_max(ctx, value.re);
2784 let im = T::simd_abs_max(ctx, value.im);
2785 let v = T::simd_select(ctx, T::simd_greater_than(ctx, re, im), re, im);
2786 Complex { re: v, im: v }
2787 }
2788
2789 #[inline(always)]
2790 fn simd_mul_real<S: Simd>(
2791 ctx: &Self::SimdCtx<S>,
2792 lhs: Self::SimdVec<S>,
2793 real_rhs: Self::SimdVec<S>,
2794 ) -> Self::SimdVec<S> {
2795 Complex {
2796 re: T::simd_mul_real(ctx, lhs.re, real_rhs.re),
2797 im: T::simd_mul_real(ctx, lhs.im, real_rhs.re),
2798 }
2799 }
2800
2801 #[inline(always)]
2802 fn simd_mul_pow2<S: Simd>(
2803 ctx: &Self::SimdCtx<S>,
2804 lhs: Self::SimdVec<S>,
2805 real_rhs: Self::SimdVec<S>,
2806 ) -> Self::SimdVec<S> {
2807 Complex {
2808 re: T::simd_mul_pow2(ctx, lhs.re, real_rhs.re),
2809 im: T::simd_mul_pow2(ctx, lhs.im, real_rhs.re),
2810 }
2811 }
2812
2813 #[inline(always)]
2814 fn simd_mul<S: Simd>(
2815 ctx: &Self::SimdCtx<S>,
2816 lhs: Self::SimdVec<S>,
2817 rhs: Self::SimdVec<S>,
2818 ) -> Self::SimdVec<S> {
2819 Complex {
2820 re: T::simd_mul_add(
2821 ctx,
2822 lhs.re,
2823 rhs.re,
2824 T::simd_neg(ctx, T::simd_mul(ctx, lhs.im, rhs.im)),
2825 ),
2826 im: T::simd_mul_add(
2827 ctx,
2828 lhs.re,
2829 rhs.im,
2830 T::simd_mul(ctx, lhs.im, rhs.re),
2831 ),
2832 }
2833 }
2834
2835 #[inline(always)]
2836 fn simd_conj_mul<S: Simd>(
2837 ctx: &Self::SimdCtx<S>,
2838 lhs: Self::SimdVec<S>,
2839 rhs: Self::SimdVec<S>,
2840 ) -> Self::SimdVec<S> {
2841 Complex {
2842 re: T::simd_mul_add(
2843 ctx,
2844 lhs.re,
2845 rhs.re,
2846 T::simd_mul(ctx, lhs.im, rhs.im),
2847 ),
2848 im: T::simd_mul_add(
2849 ctx,
2850 lhs.re,
2851 rhs.im,
2852 T::simd_neg(ctx, T::simd_mul(ctx, lhs.im, rhs.re)),
2853 ),
2854 }
2855 }
2856
2857 #[inline(always)]
2858 fn simd_mul_add<S: Simd>(
2859 ctx: &Self::SimdCtx<S>,
2860 lhs: Self::SimdVec<S>,
2861 rhs: Self::SimdVec<S>,
2862 acc: Self::SimdVec<S>,
2863 ) -> Self::SimdVec<S> {
2864 Complex {
2865 re: T::simd_mul_add(
2866 ctx,
2867 T::simd_neg(ctx, lhs.im),
2868 rhs.im,
2869 T::simd_mul_add(ctx, lhs.re, rhs.re, acc.re),
2870 ),
2871 im: T::simd_mul_add(
2872 ctx,
2873 lhs.re,
2874 rhs.im,
2875 T::simd_mul_add(ctx, lhs.im, rhs.re, acc.im),
2876 ),
2877 }
2878 }
2879
2880 #[inline(always)]
2881 fn simd_conj_mul_add<S: Simd>(
2882 ctx: &Self::SimdCtx<S>,
2883 lhs: Self::SimdVec<S>,
2884 rhs: Self::SimdVec<S>,
2885 acc: Self::SimdVec<S>,
2886 ) -> Self::SimdVec<S> {
2887 Complex {
2888 re: T::simd_mul_add(
2889 ctx,
2890 lhs.im,
2891 rhs.im,
2892 T::simd_mul_add(ctx, lhs.re, rhs.re, acc.re),
2893 ),
2894 im: T::simd_mul_add(
2895 ctx,
2896 lhs.re,
2897 rhs.im,
2898 T::simd_mul_add(ctx, T::simd_neg(ctx, lhs.im), rhs.re, acc.im),
2899 ),
2900 }
2901 }
2902
2903 #[inline(always)]
2904 fn simd_abs2<S: Simd>(
2905 ctx: &Self::SimdCtx<S>,
2906 value: Self::SimdVec<S>,
2907 ) -> Self::SimdVec<S> {
2908 let v = T::simd_abs2_add(ctx, value.re, T::simd_abs2(ctx, value.im));
2909 Complex { re: v, im: v }
2910 }
2911
2912 #[inline(always)]
2913 fn simd_abs2_add<S: Simd>(
2914 ctx: &Self::SimdCtx<S>,
2915 value: Self::SimdVec<S>,
2916 acc: Self::SimdVec<S>,
2917 ) -> Self::SimdVec<S> {
2918 let v = T::simd_abs2_add(
2919 ctx,
2920 value.re,
2921 T::simd_abs2_add(ctx, value.im, acc.re),
2922 );
2923 Complex { re: v, im: v }
2924 }
2925
2926 #[inline(always)]
2927 fn simd_reduce_sum<S: Simd>(
2928 ctx: &Self::SimdCtx<S>,
2929 value: Self::SimdVec<S>,
2930 ) -> Self {
2931 Complex {
2932 re: T::simd_reduce_sum(ctx, value.re),
2933 im: T::simd_reduce_sum(ctx, value.im),
2934 }
2935 }
2936
2937 #[inline(always)]
2938 fn simd_reduce_max<S: Simd>(
2939 ctx: &Self::SimdCtx<S>,
2940 value: Self::SimdVec<S>,
2941 ) -> Self {
2942 Complex {
2943 re: T::simd_reduce_max(ctx, value.re),
2944 im: T::simd_reduce_max(ctx, value.im),
2945 }
2946 }
2947
2948 #[inline(always)]
2949 fn simd_equal<S: Simd>(
2950 ctx: &Self::SimdCtx<S>,
2951 real_lhs: Self::SimdVec<S>,
2952 real_rhs: Self::SimdVec<S>,
2953 ) -> Self::SimdMask<S> {
2954 T::simd_and_mask(
2955 ctx,
2956 T::simd_equal(ctx, real_lhs.re, real_rhs.re),
2957 T::simd_equal(ctx, real_lhs.im, real_rhs.im),
2958 )
2959 }
2960
2961 #[inline(always)]
2962 fn simd_less_than<S: Simd>(
2963 ctx: &Self::SimdCtx<S>,
2964 real_lhs: Self::SimdVec<S>,
2965 real_rhs: Self::SimdVec<S>,
2966 ) -> Self::SimdMask<S> {
2967 T::simd_less_than(ctx, real_lhs.re, real_rhs.re)
2968 }
2969
2970 #[inline(always)]
2971 fn simd_less_than_or_equal<S: Simd>(
2972 ctx: &Self::SimdCtx<S>,
2973 real_lhs: Self::SimdVec<S>,
2974 real_rhs: Self::SimdVec<S>,
2975 ) -> Self::SimdMask<S> {
2976 T::simd_less_than_or_equal(ctx, real_lhs.re, real_rhs.re)
2977 }
2978
2979 #[inline(always)]
2980 fn simd_greater_than<S: Simd>(
2981 ctx: &Self::SimdCtx<S>,
2982 real_lhs: Self::SimdVec<S>,
2983 real_rhs: Self::SimdVec<S>,
2984 ) -> Self::SimdMask<S> {
2985 T::simd_greater_than(ctx, real_lhs.re, real_rhs.re)
2986 }
2987
2988 #[inline(always)]
2989 fn simd_greater_than_or_equal<S: Simd>(
2990 ctx: &Self::SimdCtx<S>,
2991 real_lhs: Self::SimdVec<S>,
2992 real_rhs: Self::SimdVec<S>,
2993 ) -> Self::SimdMask<S> {
2994 T::simd_greater_than_or_equal(ctx, real_lhs.re, real_rhs.re)
2995 }
2996
2997 #[inline(always)]
2998 fn simd_select<S: Simd>(
2999 ctx: &Self::SimdCtx<S>,
3000 mask: Self::SimdMask<S>,
3001 lhs: Self::SimdVec<S>,
3002 rhs: Self::SimdVec<S>,
3003 ) -> Self::SimdVec<S> {
3004 Complex {
3005 re: T::simd_select(ctx, mask, lhs.re, rhs.re),
3006 im: T::simd_select(ctx, mask, lhs.im, rhs.im),
3007 }
3008 }
3009
3010 #[inline(always)]
3011 fn simd_index_select<S: Simd>(
3012 ctx: &Self::SimdCtx<S>,
3013 mask: Self::SimdMask<S>,
3014 lhs: Self::SimdIndex<S>,
3015 rhs: Self::SimdIndex<S>,
3016 ) -> Self::SimdIndex<S> {
3017 T::simd_index_select(ctx, mask, lhs, rhs)
3018 }
3019
3020 #[inline(always)]
3021 fn simd_index_splat<S: Simd>(
3022 ctx: &Self::SimdCtx<S>,
3023 value: Self::Index,
3024 ) -> Self::SimdIndex<S> {
3025 T::simd_index_splat(ctx, value)
3026 }
3027
3028 #[inline(always)]
3029 fn simd_index_add<S: Simd>(
3030 ctx: &Self::SimdCtx<S>,
3031 lhs: Self::SimdIndex<S>,
3032 rhs: Self::SimdIndex<S>,
3033 ) -> Self::SimdIndex<S> {
3034 T::simd_index_add(ctx, lhs, rhs)
3035 }
3036
3037 #[inline(always)]
3038 fn simd_index_less_than<S: Simd>(
3039 ctx: &Self::SimdCtx<S>,
3040 lhs: Self::SimdIndex<S>,
3041 rhs: Self::SimdIndex<S>,
3042 ) -> Self::SimdMask<S> {
3043 T::simd_index_less_than(ctx, lhs, rhs)
3044 }
3045
3046 #[inline(always)]
3047 fn simd_and_mask<S: Simd>(
3048 ctx: &Self::SimdCtx<S>,
3049 lhs: Self::SimdMask<S>,
3050 rhs: Self::SimdMask<S>,
3051 ) -> Self::SimdMask<S> {
3052 T::simd_and_mask(ctx, lhs, rhs)
3053 }
3054
3055 #[inline(always)]
3056 fn simd_or_mask<S: Simd>(
3057 ctx: &Self::SimdCtx<S>,
3058 lhs: Self::SimdMask<S>,
3059 rhs: Self::SimdMask<S>,
3060 ) -> Self::SimdMask<S> {
3061 T::simd_or_mask(ctx, lhs, rhs)
3062 }
3063
3064 #[inline(always)]
3065 fn simd_not_mask<S: Simd>(
3066 ctx: &Self::SimdCtx<S>,
3067 mask: Self::SimdMask<S>,
3068 ) -> Self::SimdMask<S> {
3069 T::simd_not_mask(ctx, mask)
3070 }
3071
3072 #[inline(always)]
3073 fn simd_first_true_mask<S: Simd>(
3074 ctx: &Self::SimdCtx<S>,
3075 value: Self::SimdMask<S>,
3076 ) -> usize {
3077 T::simd_first_true_mask(ctx, value)
3078 }
3079
3080 #[inline(always)]
3081 fn simd_mem_mask_between<S: Simd>(
3082 ctx: &Self::SimdCtx<S>,
3083 start: Self::Index,
3084 end: Self::Index,
3085 ) -> Self::SimdMemMask<S> {
3086 let n = core::mem::size_of::<Self::SimdVec<S>>()
3087 / core::mem::size_of::<Self>();
3088 let start = start.zx() * 2;
3089 let end = end.zx() * 2;
3090 let mut sa = start.min(n);
3091 let mut ea = end.min(n);
3092 let mut sb = start.max(n) - n;
3093 let mut eb = end.max(n) - n;
3094 if sa == ea {
3095 sa = 0;
3096 ea = 0;
3097 }
3098 if sb == eb {
3099 sb = 0;
3100 eb = 0;
3101 }
3102 let re = T::simd_mem_mask_between(
3103 ctx,
3104 T::Index::truncate(sa),
3105 T::Index::truncate(ea),
3106 );
3107 let im = T::simd_mem_mask_between(
3108 ctx,
3109 T::Index::truncate(sb),
3110 T::Index::truncate(eb),
3111 );
3112 Complex { re, im }
3113 }
3114
3115 #[inline(always)]
3116 fn simd_mask_between<S: Simd>(
3117 ctx: &Self::SimdCtx<S>,
3118 start: Self::Index,
3119 end: Self::Index,
3120 ) -> Self::SimdMask<S> {
3121 T::simd_mask_between(ctx, start, end)
3122 }
3123
3124 #[inline(always)]
3125 unsafe fn simd_mask_load_raw<S: Simd>(
3126 ctx: &Self::SimdCtx<S>,
3127 mask: Self::SimdMemMask<S>,
3128 ptr: *const Self::SimdVec<S>,
3129 ) -> Self::SimdVec<S> {
3130 Complex {
3131 re: T::simd_mask_load_raw(
3132 ctx,
3133 mask.re,
3134 core::ptr::addr_of!((*ptr).re),
3135 ),
3136 im: T::simd_mask_load_raw(
3137 ctx,
3138 mask.im,
3139 core::ptr::addr_of!((*ptr).im),
3140 ),
3141 }
3142 }
3143
3144 #[inline(always)]
3145 unsafe fn simd_mask_store_raw<S: Simd>(
3146 ctx: &Self::SimdCtx<S>,
3147 mask: Self::SimdMemMask<S>,
3148 ptr: *mut Self::SimdVec<S>,
3149 values: Self::SimdVec<S>,
3150 ) {
3151 T::simd_mask_store_raw(
3152 ctx,
3153 mask.re,
3154 core::ptr::addr_of_mut!((*ptr).re),
3155 values.re,
3156 );
3157 T::simd_mask_store_raw(
3158 ctx,
3159 mask.im,
3160 core::ptr::addr_of_mut!((*ptr).im),
3161 values.im,
3162 );
3163 }
3164}
3165#[repr(transparent)]
3166#[doc(hidden)]
3167#[derive(Copy, Clone, Debug, PartialEq)]
3168pub struct ComplexImpl<T>(Complex<T>);
3169#[repr(transparent)]
3170#[doc(hidden)]
3171#[derive(Copy, Clone, Debug, PartialEq)]
3172pub struct ComplexImplConj<T>(Complex<T>);
3173unsafe impl Conjugate for ComplexImpl<f32> {
3174 type Canonical = ComplexImpl<f32>;
3175 type Conj = ComplexImplConj<f32>;
3176
3177 const IS_CANONICAL: bool = true;
3178}
3179unsafe impl Conjugate for ComplexImplConj<f32> {
3180 type Canonical = ComplexImpl<f32>;
3181 type Conj = ComplexImpl<f32>;
3182
3183 const IS_CANONICAL: bool = false;
3184}
3185unsafe impl Conjugate for ComplexImpl<f64> {
3186 type Canonical = ComplexImpl<f64>;
3187 type Conj = ComplexImplConj<f64>;
3188
3189 const IS_CANONICAL: bool = true;
3190}
3191unsafe impl Conjugate for ComplexImplConj<f64> {
3192 type Canonical = ComplexImpl<f64>;
3193 type Conj = ComplexImpl<f64>;
3194
3195 const IS_CANONICAL: bool = false;
3196}
3197impl<T: RealField> core::ops::Neg for ComplexImpl<T> {
3198 type Output = ComplexImpl<T>;
3199
3200 #[inline]
3201 fn neg(self) -> Self::Output {
3202 use math_utils::*;
3203 ComplexImpl(neg(&self.0))
3204 }
3205}
3206impl<T: RealField> core::ops::Neg for &ComplexImpl<T> {
3207 type Output = ComplexImpl<T>;
3208
3209 #[inline]
3210 fn neg(self) -> Self::Output {
3211 use math_utils::*;
3212 ComplexImpl(neg(&self.0))
3213 }
3214}
3215impl_op!({
3216 impl<T: RealField> Add for ComplexImpl<T> {
3217 fn add(self, rhs: Self) -> Self::Output {
3218 use math_utils::*;
3219 ComplexImpl(add(&self.0, &rhs.0))
3220 }
3221 }
3222 impl<T: RealField> Sub for ComplexImpl<T> {
3223 fn sub(self, rhs: Self) -> Self::Output {
3224 use math_utils::*;
3225 ComplexImpl(sub(&self.0, &rhs.0))
3226 }
3227 }
3228 impl<T: RealField> Mul for ComplexImpl<T> {
3229 fn mul(self, rhs: Self) -> Self::Output {
3230 use math_utils::*;
3231 ComplexImpl(mul(&self.0, &rhs.0))
3232 }
3233 }
3234 impl<T: RealField> Div for ComplexImpl<T> {
3235 fn div(self, rhs: Self) -> Self::Output {
3236 use math_utils::*;
3237 ComplexImpl(div(&self.0, &rhs.0))
3238 }
3239 }
3240});
3241impl_assign_op!({
3242 impl<T: RealField> AddAssign for ComplexImpl<T> {
3243 fn add_assign(&mut self, rhs: Self) {
3244 use math_utils::*;
3245 *self = ComplexImpl(add(&self.0, &rhs.0));
3246 }
3247 }
3248 impl<T: RealField> SubAssign for ComplexImpl<T> {
3249 fn sub_assign(&mut self, rhs: Self) {
3250 use math_utils::*;
3251 *self = ComplexImpl(sub(&self.0, &rhs.0));
3252 }
3253 }
3254 impl<T: RealField> MulAssign for ComplexImpl<T> {
3255 fn mul_assign(&mut self, rhs: Self) {
3256 use math_utils::*;
3257 *self = ComplexImpl(mul(&self.0, &rhs.0));
3258 }
3259 }
3260 impl<T: RealField> DivAssign for ComplexImpl<T> {
3261 fn div_assign(&mut self, rhs: Self) {
3262 use math_utils::*;
3263 *self = ComplexImpl(div(&self.0, &rhs.0));
3264 }
3265 }
3266});
3267impl<T> From<Complex<T>> for ComplexImpl<T> {
3268 #[inline]
3269 fn from(value: Complex<T>) -> Self {
3270 Self(value)
3271 }
3272}
3273impl ComplexField for ComplexImpl<f32> {
3274 type Arch = pulp::Arch;
3275 type Index = u32;
3276 type Real = f32;
3277 type SimdCtx<S: Simd> = S;
3278 type SimdIndex<S: Simd> = S::u32s;
3279 type SimdMask<S: Simd> = S::m32s;
3280 type SimdMemMask<S: Simd> = pulp::MemMask<S::m32s>;
3281 type SimdVec<S: Simd> = S::c32s;
3282 type Unit = f32;
3283
3284 const IS_NATIVE_C32: bool = true;
3285 const IS_REAL: bool = false;
3286 const SIMD_ABS_SPLIT_REAL_IMAG: bool = true;
3287 const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Simd;
3288
3289 #[inline]
3290 fn zero_impl() -> Self {
3291 Complex {
3292 re: f32::zero_impl(),
3293 im: f32::zero_impl(),
3294 }
3295 .into()
3296 }
3297
3298 #[inline]
3299 fn one_impl() -> Self {
3300 Complex {
3301 re: f32::one_impl(),
3302 im: f32::zero_impl(),
3303 }
3304 .into()
3305 }
3306
3307 #[inline]
3308 fn nan_impl() -> Self {
3309 Complex {
3310 re: f32::nan_impl(),
3311 im: f32::nan_impl(),
3312 }
3313 .into()
3314 }
3315
3316 #[inline]
3317 fn infinity_impl() -> Self {
3318 Complex {
3319 re: f32::infinity_impl(),
3320 im: f32::infinity_impl(),
3321 }
3322 .into()
3323 }
3324
3325 #[inline]
3326 fn from_real_impl(real: &Self::Real) -> Self {
3327 Complex {
3328 re: real.clone(),
3329 im: f32::zero_impl(),
3330 }
3331 .into()
3332 }
3333
3334 #[inline]
3335 fn from_f64_impl(real: f64) -> Self {
3336 Complex {
3337 re: f32::from_f64_impl(real),
3338 im: f32::zero_impl(),
3339 }
3340 .into()
3341 }
3342
3343 #[inline]
3344 fn real_part_impl(value: &Self) -> Self::Real {
3345 value.0.re.clone()
3346 }
3347
3348 #[inline]
3349 fn imag_part_impl(value: &Self) -> Self::Real {
3350 value.0.im.clone()
3351 }
3352
3353 #[inline]
3354 fn copy_impl(value: &Self) -> Self {
3355 value.clone()
3356 }
3357
3358 #[inline]
3359 fn conj_impl(value: &Self) -> Self {
3360 Complex {
3361 re: value.0.re.clone(),
3362 im: value.0.im.neg_by_ref(),
3363 }
3364 .into()
3365 }
3366
3367 #[inline]
3368 fn recip_impl(value: &Self) -> Self {
3369 let (re, im) = recip_impl(value.0.re.clone(), value.0.im.clone());
3370 Complex { re, im }.into()
3371 }
3372
3373 #[inline]
3374 fn sqrt_impl(value: &Self) -> Self {
3375 let (re, im) = sqrt_impl(value.0.re.clone(), value.0.im.clone());
3376 Complex { re, im }.into()
3377 }
3378
3379 #[inline]
3380 fn abs_impl(value: &Self) -> Self::Real {
3381 abs_impl(value.0.re.clone(), value.0.im.clone())
3382 }
3383
3384 #[inline]
3385 fn abs1_impl(value: &Self) -> Self::Real {
3386 use crate::ext::*;
3387 value.0.re.abs1() + value.0.im.abs1()
3388 }
3389
3390 #[inline]
3391 fn abs2_impl(value: &Self) -> Self::Real {
3392 use crate::ext::*;
3393 value.0.re.abs2() + value.0.im.abs2()
3394 }
3395
3396 #[inline]
3397 fn mul_real_impl(lhs: &Self, rhs: &Self::Real) -> Self {
3398 Complex {
3399 re: lhs.0.re * rhs,
3400 im: lhs.0.im * rhs,
3401 }
3402 .into()
3403 }
3404
3405 #[inline]
3406 fn mul_pow2_impl(lhs: &Self, rhs: &Self::Real) -> Self {
3407 use crate::ext::*;
3408 Complex {
3409 re: lhs.0.re.mul_pow2(rhs),
3410 im: lhs.0.im.mul_pow2(rhs),
3411 }
3412 .into()
3413 }
3414
3415 #[inline]
3416 fn is_finite_impl(value: &Self) -> bool {
3417 value.0.re.is_finite() && value.0.im.is_finite()
3418 }
3419
3420 #[inline(always)]
3421 fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
3422 f32::simd_ctx(simd)
3423 }
3424
3425 #[inline(always)]
3426 fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
3427 f32::ctx_from_simd(ctx)
3428 }
3429
3430 #[inline(always)]
3431 fn simd_splat<S: Simd>(
3432 ctx: &Self::SimdCtx<S>,
3433 value: &Self,
3434 ) -> Self::SimdVec<S> {
3435 ctx.splat_c32s(value.0)
3436 }
3437
3438 #[inline(always)]
3439 fn simd_splat_real<S: Simd>(
3440 ctx: &Self::SimdCtx<S>,
3441 value: &Self::Real,
3442 ) -> Self::SimdVec<S> {
3443 ctx.splat_c32s(Complex {
3444 re: *value,
3445 im: *value,
3446 })
3447 }
3448
3449 #[inline(always)]
3450 fn simd_add<S: Simd>(
3451 ctx: &Self::SimdCtx<S>,
3452 lhs: Self::SimdVec<S>,
3453 rhs: Self::SimdVec<S>,
3454 ) -> Self::SimdVec<S> {
3455 ctx.add_c32s(lhs, rhs)
3456 }
3457
3458 #[inline(always)]
3459 fn simd_sub<S: Simd>(
3460 ctx: &Self::SimdCtx<S>,
3461 lhs: Self::SimdVec<S>,
3462 rhs: Self::SimdVec<S>,
3463 ) -> Self::SimdVec<S> {
3464 ctx.sub_c32s(lhs, rhs)
3465 }
3466
3467 #[inline(always)]
3468 fn simd_neg<S: Simd>(
3469 ctx: &Self::SimdCtx<S>,
3470 value: Self::SimdVec<S>,
3471 ) -> Self::SimdVec<S> {
3472 ctx.neg_c32s(value)
3473 }
3474
3475 #[inline(always)]
3476 fn simd_conj<S: Simd>(
3477 ctx: &Self::SimdCtx<S>,
3478 value: Self::SimdVec<S>,
3479 ) -> Self::SimdVec<S> {
3480 ctx.conj_c32s(value)
3481 }
3482
3483 #[inline(always)]
3484 fn simd_abs1<S: Simd>(
3485 ctx: &Self::SimdCtx<S>,
3486 value: Self::SimdVec<S>,
3487 ) -> Self::SimdVec<S> {
3488 if const {
3489 core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>()
3490 } {
3491 bytemuck::cast(ctx.abs_f32s(bytemuck::cast(value)))
3492 } else if const {
3493 core::mem::size_of::<S::c32s>()
3494 == core::mem::size_of::<Complex<f32>>()
3495 } {
3496 let value: Complex<f32> = bytemuck::cast(value);
3497 let v = value.re.abs() + value.im.abs();
3498 bytemuck::cast(Complex { re: v, im: v })
3499 } else {
3500 panic!();
3501 }
3502 }
3503
3504 #[inline(always)]
3505 fn simd_abs_max<S: Simd>(
3506 ctx: &Self::SimdCtx<S>,
3507 value: Self::SimdVec<S>,
3508 ) -> Self::SimdVec<S> {
3509 if const {
3510 core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>()
3511 } {
3512 bytemuck::cast(ctx.abs_f32s(bytemuck::cast(value)))
3513 } else if const {
3514 core::mem::size_of::<S::c32s>()
3515 == core::mem::size_of::<Complex<f32>>()
3516 } {
3517 let value: Complex<f32> = bytemuck::cast(value);
3518 let re = value.re.abs();
3519 let im = value.im.abs();
3520 let v = if re > im { re } else { im };
3521 bytemuck::cast(Complex { re: v, im: v })
3522 } else {
3523 panic!();
3524 }
3525 }
3526
3527 #[inline(always)]
3528 fn simd_mul_real<S: Simd>(
3529 ctx: &Self::SimdCtx<S>,
3530 lhs: Self::SimdVec<S>,
3531 real_rhs: Self::SimdVec<S>,
3532 ) -> Self::SimdVec<S> {
3533 if const {
3534 core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>()
3535 } {
3536 bytemuck::cast(
3537 ctx.mul_f32s(bytemuck::cast(lhs), bytemuck::cast(real_rhs)),
3538 )
3539 } else if const {
3540 core::mem::size_of::<S::c32s>()
3541 == core::mem::size_of::<Complex<f32>>()
3542 } {
3543 let mut lhs: Complex<f32> = bytemuck::cast(lhs);
3544 let rhs: Complex<f32> = bytemuck::cast(real_rhs);
3545 lhs *= rhs.re;
3546 bytemuck::cast(lhs)
3547 } else {
3548 panic!();
3549 }
3550 }
3551
3552 #[inline(always)]
3553 fn simd_mul_pow2<S: Simd>(
3554 ctx: &Self::SimdCtx<S>,
3555 lhs: Self::SimdVec<S>,
3556 real_rhs: Self::SimdVec<S>,
3557 ) -> Self::SimdVec<S> {
3558 Self::simd_mul_real(ctx, lhs, real_rhs)
3559 }
3560
3561 #[inline(always)]
3562 fn simd_mul<S: Simd>(
3563 ctx: &Self::SimdCtx<S>,
3564 lhs: Self::SimdVec<S>,
3565 rhs: Self::SimdVec<S>,
3566 ) -> Self::SimdVec<S> {
3567 ctx.mul_e_c32s(lhs, rhs)
3568 }
3569
3570 #[inline(always)]
3571 fn simd_conj_mul<S: Simd>(
3572 ctx: &Self::SimdCtx<S>,
3573 lhs: Self::SimdVec<S>,
3574 rhs: Self::SimdVec<S>,
3575 ) -> Self::SimdVec<S> {
3576 ctx.conj_mul_e_c32s(lhs, rhs)
3577 }
3578
3579 #[inline(always)]
3580 fn simd_mul_add<S: Simd>(
3581 ctx: &Self::SimdCtx<S>,
3582 lhs: Self::SimdVec<S>,
3583 rhs: Self::SimdVec<S>,
3584 acc: Self::SimdVec<S>,
3585 ) -> Self::SimdVec<S> {
3586 ctx.mul_add_e_c32s(lhs, rhs, acc)
3587 }
3588
3589 #[inline(always)]
3590 fn simd_conj_mul_add<S: Simd>(
3591 ctx: &Self::SimdCtx<S>,
3592 lhs: Self::SimdVec<S>,
3593 rhs: Self::SimdVec<S>,
3594 acc: Self::SimdVec<S>,
3595 ) -> Self::SimdVec<S> {
3596 ctx.conj_mul_add_e_c32s(lhs, rhs, acc)
3597 }
3598
3599 #[inline(always)]
3600 fn simd_abs2<S: Simd>(
3601 ctx: &Self::SimdCtx<S>,
3602 value: Self::SimdVec<S>,
3603 ) -> Self::SimdVec<S> {
3604 if const {
3605 core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>()
3606 } {
3607 bytemuck::cast(
3608 ctx.mul_f32s(bytemuck::cast(value), bytemuck::cast(value)),
3609 )
3610 } else if const {
3611 core::mem::size_of::<S::c32s>()
3612 == core::mem::size_of::<Complex<f32>>()
3613 } {
3614 let value: Complex<f32> = bytemuck::cast(value);
3615 let v = value.re * value.re + value.im * value.im;
3616 bytemuck::cast(Complex { re: v, im: v })
3617 } else {
3618 panic!();
3619 }
3620 }
3621
3622 #[inline(always)]
3623 fn simd_abs2_add<S: Simd>(
3624 ctx: &Self::SimdCtx<S>,
3625 value: Self::SimdVec<S>,
3626 acc: Self::SimdVec<S>,
3627 ) -> Self::SimdVec<S> {
3628 if const {
3629 core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>()
3630 } {
3631 bytemuck::cast(ctx.mul_add_f32s(
3632 bytemuck::cast(value),
3633 bytemuck::cast(value),
3634 bytemuck::cast(acc),
3635 ))
3636 } else if const {
3637 core::mem::size_of::<S::c32s>()
3638 == core::mem::size_of::<Complex<f32>>()
3639 } {
3640 let value: Complex<f32> = bytemuck::cast(value);
3641 let acc: Complex<f32> = bytemuck::cast(acc);
3642 let v = value.re * value.re + value.im * value.im + acc.re;
3643 bytemuck::cast(Complex { re: v, im: v })
3644 } else {
3645 panic!();
3646 }
3647 }
3648
3649 #[inline(always)]
3650 fn simd_reduce_sum<S: Simd>(
3651 ctx: &Self::SimdCtx<S>,
3652 value: Self::SimdVec<S>,
3653 ) -> Self {
3654 ctx.reduce_sum_c32s(value).into()
3655 }
3656
3657 #[inline(always)]
3658 fn simd_reduce_max<S: Simd>(
3659 ctx: &Self::SimdCtx<S>,
3660 value: Self::SimdVec<S>,
3661 ) -> Self {
3662 ctx.reduce_max_c32s(value).into()
3663 }
3664
3665 #[inline(always)]
3666 fn simd_equal<S: Simd>(
3667 _: &Self::SimdCtx<S>,
3668 _: Self::SimdVec<S>,
3669 _: Self::SimdVec<S>,
3670 ) -> Self::SimdMask<S> {
3671 panic!()
3672 }
3673
3674 #[inline(always)]
3675 fn simd_less_than<S: Simd>(
3676 ctx: &Self::SimdCtx<S>,
3677 real_lhs: Self::SimdVec<S>,
3678 real_rhs: Self::SimdVec<S>,
3679 ) -> Self::SimdMask<S> {
3680 if const {
3681 core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>()
3682 } {
3683 ctx.less_than_f32s(
3684 bytemuck::cast(real_lhs),
3685 bytemuck::cast(real_rhs),
3686 )
3687 } else if const {
3688 core::mem::size_of::<S::c32s>()
3689 == core::mem::size_of::<Complex<f32>>()
3690 } {
3691 assert!(
3692 const {
3693 core::mem::size_of::<S::m32s>()
3694 == core::mem::size_of::<bool>()
3695 }
3696 );
3697 let lhs: Complex<f32> = bytemuck::cast(real_lhs);
3698 let rhs: Complex<f32> = bytemuck::cast(real_rhs);
3699 unsafe { core::mem::transmute_copy(&(lhs.re < rhs.re)) }
3700 } else {
3701 panic!();
3702 }
3703 }
3704
3705 #[inline(always)]
3706 fn simd_less_than_or_equal<S: Simd>(
3707 ctx: &Self::SimdCtx<S>,
3708 real_lhs: Self::SimdVec<S>,
3709 real_rhs: Self::SimdVec<S>,
3710 ) -> Self::SimdMask<S> {
3711 if const {
3712 core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>()
3713 } {
3714 ctx.less_than_or_equal_f32s(
3715 bytemuck::cast(real_lhs),
3716 bytemuck::cast(real_rhs),
3717 )
3718 } else if const {
3719 core::mem::size_of::<S::c32s>()
3720 == core::mem::size_of::<Complex<f32>>()
3721 } {
3722 assert!(
3723 const {
3724 core::mem::size_of::<S::m32s>()
3725 == core::mem::size_of::<bool>()
3726 }
3727 );
3728 let lhs: Complex<f32> = bytemuck::cast(real_lhs);
3729 let rhs: Complex<f32> = bytemuck::cast(real_rhs);
3730 unsafe { core::mem::transmute_copy(&(lhs.re <= rhs.re)) }
3731 } else {
3732 panic!();
3733 }
3734 }
3735
3736 #[inline(always)]
3737 fn simd_greater_than<S: Simd>(
3738 ctx: &Self::SimdCtx<S>,
3739 real_lhs: Self::SimdVec<S>,
3740 real_rhs: Self::SimdVec<S>,
3741 ) -> Self::SimdMask<S> {
3742 if const {
3743 core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>()
3744 } {
3745 ctx.greater_than_f32s(
3746 bytemuck::cast(real_lhs),
3747 bytemuck::cast(real_rhs),
3748 )
3749 } else if const {
3750 core::mem::size_of::<S::c32s>()
3751 == core::mem::size_of::<Complex<f32>>()
3752 } {
3753 assert!(
3754 const {
3755 core::mem::size_of::<S::m32s>()
3756 == core::mem::size_of::<bool>()
3757 }
3758 );
3759 let lhs: Complex<f32> = bytemuck::cast(real_lhs);
3760 let rhs: Complex<f32> = bytemuck::cast(real_rhs);
3761 unsafe { core::mem::transmute_copy(&(lhs.re > rhs.re)) }
3762 } else {
3763 panic!();
3764 }
3765 }
3766
3767 #[inline(always)]
3768 fn simd_greater_than_or_equal<S: Simd>(
3769 ctx: &Self::SimdCtx<S>,
3770 real_lhs: Self::SimdVec<S>,
3771 real_rhs: Self::SimdVec<S>,
3772 ) -> Self::SimdMask<S> {
3773 if const {
3774 core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>()
3775 } {
3776 ctx.greater_than_or_equal_f32s(
3777 bytemuck::cast(real_lhs),
3778 bytemuck::cast(real_rhs),
3779 )
3780 } else if const {
3781 core::mem::size_of::<S::c32s>()
3782 == core::mem::size_of::<Complex<f32>>()
3783 } {
3784 assert!(
3785 const {
3786 core::mem::size_of::<S::m32s>()
3787 == core::mem::size_of::<bool>()
3788 }
3789 );
3790 let lhs: Complex<f32> = bytemuck::cast(real_lhs);
3791 let rhs: Complex<f32> = bytemuck::cast(real_rhs);
3792 unsafe { core::mem::transmute_copy(&(lhs.re >= rhs.re)) }
3793 } else {
3794 panic!();
3795 }
3796 }
3797
3798 #[inline(always)]
3799 fn simd_select<S: Simd>(
3800 ctx: &Self::SimdCtx<S>,
3801 mask: Self::SimdMask<S>,
3802 lhs: Self::SimdVec<S>,
3803 rhs: Self::SimdVec<S>,
3804 ) -> Self::SimdVec<S> {
3805 if const {
3806 core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>()
3807 } {
3808 bytemuck::cast(ctx.select_f32s(
3809 mask,
3810 bytemuck::cast(lhs),
3811 bytemuck::cast(rhs),
3812 ))
3813 } else if const {
3814 core::mem::size_of::<S::c32s>()
3815 == core::mem::size_of::<Complex<f32>>()
3816 } {
3817 assert!(
3818 const {
3819 core::mem::size_of::<S::m32s>()
3820 == core::mem::size_of::<bool>()
3821 }
3822 );
3823 let mask: bool = unsafe { core::mem::transmute_copy(&mask) };
3824 let lhs: Complex<f32> = bytemuck::cast(lhs);
3825 let rhs: Complex<f32> = bytemuck::cast(rhs);
3826 bytemuck::cast(if mask { lhs } else { rhs })
3827 } else {
3828 panic!();
3829 }
3830 }
3831
3832 #[inline(always)]
3833 fn simd_index_select<S: Simd>(
3834 ctx: &Self::SimdCtx<S>,
3835 mask: Self::SimdMask<S>,
3836 lhs: Self::SimdIndex<S>,
3837 rhs: Self::SimdIndex<S>,
3838 ) -> Self::SimdIndex<S> {
3839 f32::simd_index_select(ctx, mask, lhs, rhs)
3840 }
3841
3842 #[inline(always)]
3843 fn simd_index_splat<S: Simd>(
3844 ctx: &Self::SimdCtx<S>,
3845 value: Self::Index,
3846 ) -> Self::SimdIndex<S> {
3847 f32::simd_index_splat(ctx, value)
3848 }
3849
3850 #[inline(always)]
3851 fn simd_index_add<S: Simd>(
3852 ctx: &Self::SimdCtx<S>,
3853 lhs: Self::SimdIndex<S>,
3854 rhs: Self::SimdIndex<S>,
3855 ) -> Self::SimdIndex<S> {
3856 f32::simd_index_add(ctx, lhs, rhs)
3857 }
3858
3859 #[inline(always)]
3860 fn simd_index_less_than<S: Simd>(
3861 ctx: &Self::SimdCtx<S>,
3862 lhs: Self::SimdIndex<S>,
3863 rhs: Self::SimdIndex<S>,
3864 ) -> Self::SimdMask<S> {
3865 f32::simd_index_less_than(ctx, lhs, rhs)
3866 }
3867
3868 #[inline(always)]
3869 fn simd_and_mask<S: Simd>(
3870 ctx: &Self::SimdCtx<S>,
3871 lhs: Self::SimdMask<S>,
3872 rhs: Self::SimdMask<S>,
3873 ) -> Self::SimdMask<S> {
3874 f32::simd_and_mask(ctx, lhs, rhs)
3875 }
3876
3877 #[inline(always)]
3878 fn simd_or_mask<S: Simd>(
3879 ctx: &Self::SimdCtx<S>,
3880 lhs: Self::SimdMask<S>,
3881 rhs: Self::SimdMask<S>,
3882 ) -> Self::SimdMask<S> {
3883 f32::simd_or_mask(ctx, lhs, rhs)
3884 }
3885
3886 #[inline(always)]
3887 fn simd_not_mask<S: Simd>(
3888 ctx: &Self::SimdCtx<S>,
3889 mask: Self::SimdMask<S>,
3890 ) -> Self::SimdMask<S> {
3891 f32::simd_not_mask(ctx, mask)
3892 }
3893
3894 #[inline(always)]
3895 fn simd_first_true_mask<S: Simd>(
3896 ctx: &Self::SimdCtx<S>,
3897 value: Self::SimdMask<S>,
3898 ) -> usize {
3899 f32::simd_first_true_mask(ctx, value)
3900 }
3901
3902 #[inline(always)]
3903 fn simd_mem_mask_between<S: Simd>(
3904 ctx: &Self::SimdCtx<S>,
3905 start: Self::Index,
3906 end: Self::Index,
3907 ) -> Self::SimdMemMask<S> {
3908 ctx.mask_between_m32s((2 * start) as _, (2 * end) as _)
3909 }
3910
3911 #[inline(always)]
3912 fn simd_mask_between<S: Simd>(
3913 ctx: &Self::SimdCtx<S>,
3914 start: Self::Index,
3915 end: Self::Index,
3916 ) -> Self::SimdMask<S> {
3917 ctx.mask_between_m32s((2 * start) as _, (2 * end) as _)
3918 .mask()
3919 }
3920
3921 #[inline(always)]
3922 unsafe fn simd_mask_load_raw<S: Simd>(
3923 ctx: &Self::SimdCtx<S>,
3924 mask: Self::SimdMemMask<S>,
3925 ptr: *const Self::SimdVec<S>,
3926 ) -> Self::SimdVec<S> {
3927 ctx.mask_load_ptr_c32s(mask, ptr as _)
3928 }
3929
3930 #[inline(always)]
3931 unsafe fn simd_mask_store_raw<S: Simd>(
3932 ctx: &Self::SimdCtx<S>,
3933 mask: Self::SimdMemMask<S>,
3934 ptr: *mut Self::SimdVec<S>,
3935 values: Self::SimdVec<S>,
3936 ) {
3937 ctx.mask_store_ptr_c32s(mask, ptr as _, values);
3938 }
3939}
3940impl ComplexField for ComplexImpl<f64> {
3941 type Arch = pulp::Arch;
3942 type Index = u64;
3943 type Real = f64;
3944 type SimdCtx<S: Simd> = S;
3945 type SimdIndex<S: Simd> = S::u64s;
3946 type SimdMask<S: Simd> = S::m64s;
3947 type SimdMemMask<S: Simd> = pulp::MemMask<S::m64s>;
3948 type SimdVec<S: Simd> = S::c64s;
3949 type Unit = f64;
3950
3951 const IS_NATIVE_C64: bool = true;
3952 const IS_REAL: bool = false;
3953 const SIMD_ABS_SPLIT_REAL_IMAG: bool = true;
3954 const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Simd;
3955
3956 #[inline]
3957 fn zero_impl() -> Self {
3958 Complex {
3959 re: f64::zero_impl(),
3960 im: f64::zero_impl(),
3961 }
3962 .into()
3963 }
3964
3965 #[inline]
3966 fn one_impl() -> Self {
3967 Complex {
3968 re: f64::one_impl(),
3969 im: f64::zero_impl(),
3970 }
3971 .into()
3972 }
3973
3974 #[inline]
3975 fn nan_impl() -> Self {
3976 Complex {
3977 re: f64::nan_impl(),
3978 im: f64::nan_impl(),
3979 }
3980 .into()
3981 }
3982
3983 #[inline]
3984 fn infinity_impl() -> Self {
3985 Complex {
3986 re: f64::infinity_impl(),
3987 im: f64::infinity_impl(),
3988 }
3989 .into()
3990 }
3991
3992 #[inline]
3993 fn from_real_impl(real: &Self::Real) -> Self {
3994 Complex {
3995 re: real.clone(),
3996 im: f64::zero_impl(),
3997 }
3998 .into()
3999 }
4000
4001 #[inline]
4002 fn from_f64_impl(real: f64) -> Self {
4003 Complex {
4004 re: f64::from_f64_impl(real),
4005 im: f64::zero_impl(),
4006 }
4007 .into()
4008 }
4009
4010 #[inline]
4011 fn real_part_impl(value: &Self) -> Self::Real {
4012 value.0.re.clone()
4013 }
4014
4015 #[inline]
4016 fn imag_part_impl(value: &Self) -> Self::Real {
4017 value.0.im.clone()
4018 }
4019
4020 #[inline]
4021 fn copy_impl(value: &Self) -> Self {
4022 value.clone()
4023 }
4024
4025 #[inline]
4026 fn conj_impl(value: &Self) -> Self {
4027 Complex {
4028 re: value.0.re.clone(),
4029 im: value.0.im.neg_by_ref(),
4030 }
4031 .into()
4032 }
4033
4034 #[inline]
4035 fn recip_impl(value: &Self) -> Self {
4036 let (re, im) = recip_impl(value.0.re.clone(), value.0.im.clone());
4037 Complex { re, im }.into()
4038 }
4039
4040 #[inline]
4041 fn sqrt_impl(value: &Self) -> Self {
4042 let (re, im) = sqrt_impl(value.0.re.clone(), value.0.im.clone());
4043 Complex { re, im }.into()
4044 }
4045
4046 #[inline]
4047 fn abs_impl(value: &Self) -> Self::Real {
4048 abs_impl(value.0.re.clone(), value.0.im.clone())
4049 }
4050
4051 #[inline]
4052 fn abs1_impl(value: &Self) -> Self::Real {
4053 use crate::ext::*;
4054 value.0.re.abs1() + value.0.im.abs1()
4055 }
4056
4057 #[inline]
4058 fn abs2_impl(value: &Self) -> Self::Real {
4059 use crate::ext::*;
4060 value.0.re.abs2() + value.0.im.abs2()
4061 }
4062
4063 #[inline]
4064 fn mul_real_impl(lhs: &Self, rhs: &Self::Real) -> Self {
4065 Complex {
4066 re: lhs.0.re * rhs,
4067 im: lhs.0.im * rhs,
4068 }
4069 .into()
4070 }
4071
4072 #[inline]
4073 fn mul_pow2_impl(lhs: &Self, rhs: &Self::Real) -> Self {
4074 use crate::ext::*;
4075 Complex {
4076 re: lhs.0.re.mul_pow2(rhs),
4077 im: lhs.0.im.mul_pow2(rhs),
4078 }
4079 .into()
4080 }
4081
4082 #[inline]
4083 fn is_finite_impl(value: &Self) -> bool {
4084 value.0.re.is_finite() && value.0.im.is_finite()
4085 }
4086
4087 #[inline(always)]
4088 fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
4089 f64::simd_ctx(simd)
4090 }
4091
4092 #[inline(always)]
4093 fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
4094 f64::ctx_from_simd(ctx)
4095 }
4096
4097 #[inline(always)]
4098 fn simd_splat<S: Simd>(
4099 ctx: &Self::SimdCtx<S>,
4100 value: &Self,
4101 ) -> Self::SimdVec<S> {
4102 ctx.splat_c64s(value.0)
4103 }
4104
4105 #[inline(always)]
4106 fn simd_splat_real<S: Simd>(
4107 ctx: &Self::SimdCtx<S>,
4108 value: &Self::Real,
4109 ) -> Self::SimdVec<S> {
4110 ctx.splat_c64s(Complex {
4111 re: *value,
4112 im: *value,
4113 })
4114 }
4115
4116 #[inline(always)]
4117 fn simd_add<S: Simd>(
4118 ctx: &Self::SimdCtx<S>,
4119 lhs: Self::SimdVec<S>,
4120 rhs: Self::SimdVec<S>,
4121 ) -> Self::SimdVec<S> {
4122 ctx.add_c64s(lhs, rhs)
4123 }
4124
4125 #[inline(always)]
4126 fn simd_sub<S: Simd>(
4127 ctx: &Self::SimdCtx<S>,
4128 lhs: Self::SimdVec<S>,
4129 rhs: Self::SimdVec<S>,
4130 ) -> Self::SimdVec<S> {
4131 ctx.sub_c64s(lhs, rhs)
4132 }
4133
4134 #[inline(always)]
4135 fn simd_neg<S: Simd>(
4136 ctx: &Self::SimdCtx<S>,
4137 value: Self::SimdVec<S>,
4138 ) -> Self::SimdVec<S> {
4139 ctx.neg_c64s(value)
4140 }
4141
4142 #[inline(always)]
4143 fn simd_conj<S: Simd>(
4144 ctx: &Self::SimdCtx<S>,
4145 value: Self::SimdVec<S>,
4146 ) -> Self::SimdVec<S> {
4147 ctx.conj_c64s(value)
4148 }
4149
4150 #[inline(always)]
4151 fn simd_abs1<S: Simd>(
4152 ctx: &Self::SimdCtx<S>,
4153 value: Self::SimdVec<S>,
4154 ) -> Self::SimdVec<S> {
4155 if const {
4156 core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>()
4157 } {
4158 bytemuck::cast(ctx.abs_f64s(bytemuck::cast(value)))
4159 } else if const {
4160 core::mem::size_of::<S::c64s>()
4161 == core::mem::size_of::<Complex<f64>>()
4162 } {
4163 let value: Complex<f64> = bytemuck::cast(value);
4164 let v = value.re.abs() + value.im.abs();
4165 bytemuck::cast(Complex { re: v, im: v })
4166 } else {
4167 panic!();
4168 }
4169 }
4170
4171 #[inline(always)]
4172 fn simd_abs_max<S: Simd>(
4173 ctx: &Self::SimdCtx<S>,
4174 value: Self::SimdVec<S>,
4175 ) -> Self::SimdVec<S> {
4176 if const {
4177 core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>()
4178 } {
4179 bytemuck::cast(ctx.abs_f64s(bytemuck::cast(value)))
4180 } else if const {
4181 core::mem::size_of::<S::c64s>()
4182 == core::mem::size_of::<Complex<f64>>()
4183 } {
4184 let value: Complex<f64> = bytemuck::cast(value);
4185 let re = value.re.abs();
4186 let im = value.im.abs();
4187 let v = if re > im { re } else { im };
4188 bytemuck::cast(Complex { re: v, im: v })
4189 } else {
4190 panic!();
4191 }
4192 }
4193
4194 #[inline(always)]
4195 fn simd_mul_real<S: Simd>(
4196 ctx: &Self::SimdCtx<S>,
4197 lhs: Self::SimdVec<S>,
4198 real_rhs: Self::SimdVec<S>,
4199 ) -> Self::SimdVec<S> {
4200 if const {
4201 core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>()
4202 } {
4203 bytemuck::cast(
4204 ctx.mul_f64s(bytemuck::cast(lhs), bytemuck::cast(real_rhs)),
4205 )
4206 } else if const {
4207 core::mem::size_of::<S::c64s>()
4208 == core::mem::size_of::<Complex<f64>>()
4209 } {
4210 let mut lhs: Complex<f64> = bytemuck::cast(lhs);
4211 let rhs: Complex<f64> = bytemuck::cast(real_rhs);
4212 lhs *= rhs.re;
4213 bytemuck::cast(lhs)
4214 } else {
4215 panic!();
4216 }
4217 }
4218
4219 #[inline(always)]
4220 fn simd_mul_pow2<S: Simd>(
4221 ctx: &Self::SimdCtx<S>,
4222 lhs: Self::SimdVec<S>,
4223 real_rhs: Self::SimdVec<S>,
4224 ) -> Self::SimdVec<S> {
4225 Self::simd_mul_real(ctx, lhs, real_rhs)
4226 }
4227
4228 #[inline(always)]
4229 fn simd_mul<S: Simd>(
4230 ctx: &Self::SimdCtx<S>,
4231 lhs: Self::SimdVec<S>,
4232 rhs: Self::SimdVec<S>,
4233 ) -> Self::SimdVec<S> {
4234 ctx.mul_e_c64s(lhs, rhs)
4235 }
4236
4237 #[inline(always)]
4238 fn simd_conj_mul<S: Simd>(
4239 ctx: &Self::SimdCtx<S>,
4240 lhs: Self::SimdVec<S>,
4241 rhs: Self::SimdVec<S>,
4242 ) -> Self::SimdVec<S> {
4243 ctx.conj_mul_e_c64s(lhs, rhs)
4244 }
4245
4246 #[inline(always)]
4247 fn simd_mul_add<S: Simd>(
4248 ctx: &Self::SimdCtx<S>,
4249 lhs: Self::SimdVec<S>,
4250 rhs: Self::SimdVec<S>,
4251 acc: Self::SimdVec<S>,
4252 ) -> Self::SimdVec<S> {
4253 ctx.mul_add_e_c64s(lhs, rhs, acc)
4254 }
4255
4256 #[inline(always)]
4257 fn simd_conj_mul_add<S: Simd>(
4258 ctx: &Self::SimdCtx<S>,
4259 lhs: Self::SimdVec<S>,
4260 rhs: Self::SimdVec<S>,
4261 acc: Self::SimdVec<S>,
4262 ) -> Self::SimdVec<S> {
4263 ctx.conj_mul_add_e_c64s(lhs, rhs, acc)
4264 }
4265
4266 #[inline(always)]
4267 fn simd_abs2<S: Simd>(
4268 ctx: &Self::SimdCtx<S>,
4269 value: Self::SimdVec<S>,
4270 ) -> Self::SimdVec<S> {
4271 if const {
4272 core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>()
4273 } {
4274 bytemuck::cast(
4275 ctx.mul_f64s(bytemuck::cast(value), bytemuck::cast(value)),
4276 )
4277 } else if const {
4278 core::mem::size_of::<S::c64s>()
4279 == core::mem::size_of::<Complex<f64>>()
4280 } {
4281 let value: Complex<f64> = bytemuck::cast(value);
4282 let v = value.re * value.re + value.im * value.im;
4283 bytemuck::cast(Complex { re: v, im: v })
4284 } else {
4285 panic!();
4286 }
4287 }
4288
4289 #[inline(always)]
4290 fn simd_abs2_add<S: Simd>(
4291 ctx: &Self::SimdCtx<S>,
4292 value: Self::SimdVec<S>,
4293 acc: Self::SimdVec<S>,
4294 ) -> Self::SimdVec<S> {
4295 if const {
4296 core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>()
4297 } {
4298 bytemuck::cast(ctx.mul_add_f64s(
4299 bytemuck::cast(value),
4300 bytemuck::cast(value),
4301 bytemuck::cast(acc),
4302 ))
4303 } else if const {
4304 core::mem::size_of::<S::c64s>()
4305 == core::mem::size_of::<Complex<f64>>()
4306 } {
4307 let value: Complex<f64> = bytemuck::cast(value);
4308 let acc: Complex<f64> = bytemuck::cast(acc);
4309 let v = value.re * value.re + value.im * value.im + acc.re;
4310 bytemuck::cast(Complex { re: v, im: v })
4311 } else {
4312 panic!();
4313 }
4314 }
4315
4316 #[inline(always)]
4317 fn simd_reduce_sum<S: Simd>(
4318 ctx: &Self::SimdCtx<S>,
4319 value: Self::SimdVec<S>,
4320 ) -> Self {
4321 ctx.reduce_sum_c64s(value).into()
4322 }
4323
4324 #[inline(always)]
4325 fn simd_reduce_max<S: Simd>(
4326 ctx: &Self::SimdCtx<S>,
4327 value: Self::SimdVec<S>,
4328 ) -> Self {
4329 ctx.reduce_max_c64s(value).into()
4330 }
4331
4332 #[inline(always)]
4333 fn simd_equal<S: Simd>(
4334 _: &Self::SimdCtx<S>,
4335 _: Self::SimdVec<S>,
4336 _: Self::SimdVec<S>,
4337 ) -> Self::SimdMask<S> {
4338 panic!()
4339 }
4340
4341 #[inline(always)]
4342 fn simd_less_than<S: Simd>(
4343 ctx: &Self::SimdCtx<S>,
4344 real_lhs: Self::SimdVec<S>,
4345 real_rhs: Self::SimdVec<S>,
4346 ) -> Self::SimdMask<S> {
4347 if const {
4348 core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>()
4349 } {
4350 ctx.less_than_f64s(
4351 bytemuck::cast(real_lhs),
4352 bytemuck::cast(real_rhs),
4353 )
4354 } else if const {
4355 core::mem::size_of::<S::c64s>()
4356 == core::mem::size_of::<Complex<f64>>()
4357 } {
4358 assert!(
4359 const {
4360 core::mem::size_of::<S::m64s>()
4361 == core::mem::size_of::<bool>()
4362 }
4363 );
4364 let lhs: Complex<f64> = bytemuck::cast(real_lhs);
4365 let rhs: Complex<f64> = bytemuck::cast(real_rhs);
4366 unsafe { core::mem::transmute_copy(&(lhs.re < rhs.re)) }
4367 } else {
4368 panic!();
4369 }
4370 }
4371
4372 #[inline(always)]
4373 fn simd_less_than_or_equal<S: Simd>(
4374 ctx: &Self::SimdCtx<S>,
4375 real_lhs: Self::SimdVec<S>,
4376 real_rhs: Self::SimdVec<S>,
4377 ) -> Self::SimdMask<S> {
4378 if const {
4379 core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>()
4380 } {
4381 ctx.less_than_or_equal_f64s(
4382 bytemuck::cast(real_lhs),
4383 bytemuck::cast(real_rhs),
4384 )
4385 } else if const {
4386 core::mem::size_of::<S::c64s>()
4387 == core::mem::size_of::<Complex<f64>>()
4388 } {
4389 assert!(
4390 const {
4391 core::mem::size_of::<S::m64s>()
4392 == core::mem::size_of::<bool>()
4393 }
4394 );
4395 let lhs: Complex<f64> = bytemuck::cast(real_lhs);
4396 let rhs: Complex<f64> = bytemuck::cast(real_rhs);
4397 unsafe { core::mem::transmute_copy(&(lhs.re <= rhs.re)) }
4398 } else {
4399 panic!();
4400 }
4401 }
4402
4403 #[inline(always)]
4404 fn simd_greater_than<S: Simd>(
4405 ctx: &Self::SimdCtx<S>,
4406 real_lhs: Self::SimdVec<S>,
4407 real_rhs: Self::SimdVec<S>,
4408 ) -> Self::SimdMask<S> {
4409 if const {
4410 core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>()
4411 } {
4412 ctx.greater_than_f64s(
4413 bytemuck::cast(real_lhs),
4414 bytemuck::cast(real_rhs),
4415 )
4416 } else if const {
4417 core::mem::size_of::<S::c64s>()
4418 == core::mem::size_of::<Complex<f64>>()
4419 } {
4420 assert!(
4421 const {
4422 core::mem::size_of::<S::m64s>()
4423 == core::mem::size_of::<bool>()
4424 }
4425 );
4426 let lhs: Complex<f64> = bytemuck::cast(real_lhs);
4427 let rhs: Complex<f64> = bytemuck::cast(real_rhs);
4428 unsafe { core::mem::transmute_copy(&(lhs.re > rhs.re)) }
4429 } else {
4430 panic!();
4431 }
4432 }
4433
4434 #[inline(always)]
4435 fn simd_greater_than_or_equal<S: Simd>(
4436 ctx: &Self::SimdCtx<S>,
4437 real_lhs: Self::SimdVec<S>,
4438 real_rhs: Self::SimdVec<S>,
4439 ) -> Self::SimdMask<S> {
4440 if const {
4441 core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>()
4442 } {
4443 ctx.greater_than_or_equal_f64s(
4444 bytemuck::cast(real_lhs),
4445 bytemuck::cast(real_rhs),
4446 )
4447 } else if const {
4448 core::mem::size_of::<S::c64s>()
4449 == core::mem::size_of::<Complex<f64>>()
4450 } {
4451 assert!(
4452 const {
4453 core::mem::size_of::<S::m64s>()
4454 == core::mem::size_of::<bool>()
4455 }
4456 );
4457 let lhs: Complex<f64> = bytemuck::cast(real_lhs);
4458 let rhs: Complex<f64> = bytemuck::cast(real_rhs);
4459 unsafe { core::mem::transmute_copy(&(lhs.re >= rhs.re)) }
4460 } else {
4461 panic!();
4462 }
4463 }
4464
4465 #[inline(always)]
4466 fn simd_select<S: Simd>(
4467 ctx: &Self::SimdCtx<S>,
4468 mask: Self::SimdMask<S>,
4469 lhs: Self::SimdVec<S>,
4470 rhs: Self::SimdVec<S>,
4471 ) -> Self::SimdVec<S> {
4472 if const {
4473 core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>()
4474 } {
4475 bytemuck::cast(ctx.select_f64s(
4476 mask,
4477 bytemuck::cast(lhs),
4478 bytemuck::cast(rhs),
4479 ))
4480 } else if const {
4481 core::mem::size_of::<S::c64s>()
4482 == core::mem::size_of::<Complex<f64>>()
4483 } {
4484 assert!(
4485 const {
4486 core::mem::size_of::<S::m64s>()
4487 == core::mem::size_of::<bool>()
4488 }
4489 );
4490 let mask: bool = unsafe { core::mem::transmute_copy(&mask) };
4491 let lhs: Complex<f64> = bytemuck::cast(lhs);
4492 let rhs: Complex<f64> = bytemuck::cast(rhs);
4493 bytemuck::cast(if mask { lhs } else { rhs })
4494 } else {
4495 panic!();
4496 }
4497 }
4498
4499 #[inline(always)]
4500 fn simd_index_select<S: Simd>(
4501 ctx: &Self::SimdCtx<S>,
4502 mask: Self::SimdMask<S>,
4503 lhs: Self::SimdIndex<S>,
4504 rhs: Self::SimdIndex<S>,
4505 ) -> Self::SimdIndex<S> {
4506 f64::simd_index_select(ctx, mask, lhs, rhs)
4507 }
4508
4509 #[inline(always)]
4510 fn simd_index_splat<S: Simd>(
4511 ctx: &Self::SimdCtx<S>,
4512 value: Self::Index,
4513 ) -> Self::SimdIndex<S> {
4514 f64::simd_index_splat(ctx, value)
4515 }
4516
4517 #[inline(always)]
4518 fn simd_index_add<S: Simd>(
4519 ctx: &Self::SimdCtx<S>,
4520 lhs: Self::SimdIndex<S>,
4521 rhs: Self::SimdIndex<S>,
4522 ) -> Self::SimdIndex<S> {
4523 f64::simd_index_add(ctx, lhs, rhs)
4524 }
4525
4526 #[inline(always)]
4527 fn simd_index_less_than<S: Simd>(
4528 ctx: &Self::SimdCtx<S>,
4529 lhs: Self::SimdIndex<S>,
4530 rhs: Self::SimdIndex<S>,
4531 ) -> Self::SimdMask<S> {
4532 f64::simd_index_less_than(ctx, lhs, rhs)
4533 }
4534
4535 #[inline(always)]
4536 fn simd_and_mask<S: Simd>(
4537 ctx: &Self::SimdCtx<S>,
4538 lhs: Self::SimdMask<S>,
4539 rhs: Self::SimdMask<S>,
4540 ) -> Self::SimdMask<S> {
4541 f64::simd_and_mask(ctx, lhs, rhs)
4542 }
4543
4544 #[inline(always)]
4545 fn simd_or_mask<S: Simd>(
4546 ctx: &Self::SimdCtx<S>,
4547 lhs: Self::SimdMask<S>,
4548 rhs: Self::SimdMask<S>,
4549 ) -> Self::SimdMask<S> {
4550 f64::simd_or_mask(ctx, lhs, rhs)
4551 }
4552
4553 #[inline(always)]
4554 fn simd_not_mask<S: Simd>(
4555 ctx: &Self::SimdCtx<S>,
4556 mask: Self::SimdMask<S>,
4557 ) -> Self::SimdMask<S> {
4558 f64::simd_not_mask(ctx, mask)
4559 }
4560
4561 #[inline(always)]
4562 fn simd_first_true_mask<S: Simd>(
4563 ctx: &Self::SimdCtx<S>,
4564 value: Self::SimdMask<S>,
4565 ) -> usize {
4566 f64::simd_first_true_mask(ctx, value)
4567 }
4568
4569 #[inline(always)]
4570 fn simd_mem_mask_between<S: Simd>(
4571 ctx: &Self::SimdCtx<S>,
4572 start: Self::Index,
4573 end: Self::Index,
4574 ) -> Self::SimdMemMask<S> {
4575 ctx.mask_between_m64s((2 * start) as _, (2 * end) as _)
4576 }
4577
4578 #[inline(always)]
4579 fn simd_mask_between<S: Simd>(
4580 ctx: &Self::SimdCtx<S>,
4581 start: Self::Index,
4582 end: Self::Index,
4583 ) -> Self::SimdMask<S> {
4584 ctx.mask_between_m64s((2 * start) as _, (2 * end) as _)
4585 .mask()
4586 }
4587
4588 #[inline(always)]
4589 unsafe fn simd_mask_load_raw<S: Simd>(
4590 ctx: &Self::SimdCtx<S>,
4591 mask: Self::SimdMemMask<S>,
4592 ptr: *const Self::SimdVec<S>,
4593 ) -> Self::SimdVec<S> {
4594 ctx.mask_load_ptr_c64s(mask, ptr as _)
4595 }
4596
4597 #[inline(always)]
4598 unsafe fn simd_mask_store_raw<S: Simd>(
4599 ctx: &Self::SimdCtx<S>,
4600 mask: Self::SimdMemMask<S>,
4601 ptr: *mut Self::SimdVec<S>,
4602 values: Self::SimdVec<S>,
4603 ) {
4604 ctx.mask_store_ptr_c64s(mask, ptr as _, values);
4605 }
4606}
4607#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
4608pub struct Symbolic;
4609impl_op!({
4610 impl Add for Symbolic {
4611 fn add(self, rhs: Self) -> Self::Output {
4612 _ = rhs;
4613 Symbolic
4614 }
4615 }
4616 impl Sub for Symbolic {
4617 fn sub(self, rhs: Self) -> Self::Output {
4618 _ = rhs;
4619 Symbolic
4620 }
4621 }
4622 impl Mul for Symbolic {
4623 fn mul(self, rhs: Self) -> Self::Output {
4624 _ = rhs;
4625 Symbolic
4626 }
4627 }
4628 impl Div for Symbolic {
4629 fn div(self, rhs: Self) -> Self::Output {
4630 _ = rhs;
4631 Symbolic
4632 }
4633 }
4634 impl Rem for Symbolic {
4635 fn rem(self, rhs: Self) -> Self::Output {
4636 _ = rhs;
4637 Symbolic
4638 }
4639 }
4640});
4641impl_assign_op!({
4642 impl AddAssign for Symbolic {
4643 fn add_assign(&mut self, rhs: Self) {
4644 _ = rhs;
4645 }
4646 }
4647 impl SubAssign for Symbolic {
4648 fn sub_assign(&mut self, rhs: Self) {
4649 _ = rhs;
4650 }
4651 }
4652 impl MulAssign for Symbolic {
4653 fn mul_assign(&mut self, rhs: Self) {
4654 _ = rhs;
4655 }
4656 }
4657 impl DivAssign for Symbolic {
4658 fn div_assign(&mut self, rhs: Self) {
4659 _ = rhs;
4660 }
4661 }
4662 impl RemAssign for Symbolic {
4663 fn rem_assign(&mut self, rhs: Self) {
4664 _ = rhs;
4665 }
4666 }
4667});
4668impl core::ops::Neg for Symbolic {
4669 type Output = Self;
4670
4671 fn neg(self) -> Self {
4672 Self
4673 }
4674}
4675impl core::ops::Neg for &Symbolic {
4676 type Output = Symbolic;
4677
4678 fn neg(self) -> Symbolic {
4679 Symbolic
4680 }
4681}
4682impl num_traits::Zero for Symbolic {
4683 fn zero() -> Self {
4684 Self
4685 }
4686
4687 fn is_zero(&self) -> bool {
4688 true
4689 }
4690}
4691impl num_traits::One for Symbolic {
4692 fn one() -> Self {
4693 Self
4694 }
4695
4696 fn is_one(&self) -> bool {
4697 true
4698 }
4699}
4700impl num_traits::Num for Symbolic {
4701 type FromStrRadixErr = core::convert::Infallible;
4702
4703 fn from_str_radix(_: &str, _: u32) -> Result<Self, Self::FromStrRadixErr> {
4704 Ok(Self)
4705 }
4706}
4707impl Symbolic {
4708 #[inline]
4709 pub fn materialize(len: usize) -> &'static mut [Self] {
4710 unsafe {
4711 core::slice::from_raw_parts_mut(
4712 core::ptr::NonNull::dangling().as_ptr(),
4713 len,
4714 )
4715 }
4716 }
4717}
4718impl RealField for Symbolic {
4719 fn epsilon_impl() -> Self {
4720 Self
4721 }
4722
4723 fn nbits_impl() -> usize {
4724 0
4725 }
4726
4727 fn min_positive_impl() -> Self {
4728 Self
4729 }
4730
4731 fn max_positive_impl() -> Self {
4732 Self
4733 }
4734
4735 fn sqrt_min_positive_impl() -> Self {
4736 Self
4737 }
4738
4739 fn sqrt_max_positive_impl() -> Self {
4740 Self
4741 }
4742}
4743impl ComplexField for Symbolic {
4744 type Arch = pulp::Scalar;
4745 type Index = usize;
4746 type Real = Self;
4747 type SimdCtx<S: pulp::Simd> = S;
4748 type SimdIndex<S: pulp::Simd> = ();
4749 type SimdMask<S: pulp::Simd> = ();
4750 type SimdMemMask<S: pulp::Simd> = ();
4751 type SimdVec<S: pulp::Simd> = ();
4752 type Unit = Self;
4753
4754 const IS_REAL: bool = true;
4755 const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Copy;
4756
4757 fn zero_impl() -> Self {
4758 Self
4759 }
4760
4761 fn one_impl() -> Self {
4762 Self
4763 }
4764
4765 fn nan_impl() -> Self {
4766 Self
4767 }
4768
4769 fn infinity_impl() -> Self {
4770 Self
4771 }
4772
4773 fn from_real_impl(_: &Self::Real) -> Self {
4774 Self
4775 }
4776
4777 fn from_f64_impl(_: f64) -> Self {
4778 Self
4779 }
4780
4781 fn real_part_impl(_: &Self) -> Self::Real {
4782 Self
4783 }
4784
4785 fn imag_part_impl(_: &Self) -> Self::Real {
4786 Self
4787 }
4788
4789 fn copy_impl(_: &Self) -> Self {
4790 Self
4791 }
4792
4793 fn conj_impl(_: &Self) -> Self {
4794 Self
4795 }
4796
4797 fn recip_impl(_: &Self) -> Self {
4798 Self
4799 }
4800
4801 fn sqrt_impl(_: &Self) -> Self {
4802 Self
4803 }
4804
4805 fn abs_impl(_: &Self) -> Self::Real {
4806 Self
4807 }
4808
4809 fn abs1_impl(_: &Self) -> Self::Real {
4810 Self
4811 }
4812
4813 fn abs2_impl(_: &Self) -> Self::Real {
4814 Self
4815 }
4816
4817 fn mul_real_impl(_: &Self, _: &Self::Real) -> Self {
4818 Self
4819 }
4820
4821 fn mul_pow2_impl(_: &Self, _: &Self::Real) -> Self {
4822 Self
4823 }
4824
4825 fn is_finite_impl(_: &Self) -> bool {
4826 true
4827 }
4828
4829 fn simd_ctx<S: pulp::Simd>(simd: S) -> Self::SimdCtx<S> {
4830 simd
4831 }
4832
4833 fn ctx_from_simd<S: pulp::Simd>(simd: &Self::SimdCtx<S>) -> S {
4834 *simd
4835 }
4836
4837 fn simd_mem_mask_between<S: pulp::Simd>(
4838 _: &Self::SimdCtx<S>,
4839 _: Self::Index,
4840 _: Self::Index,
4841 ) -> Self::SimdMemMask<S> {
4842 ()
4843 }
4844
4845 unsafe fn simd_mask_load_raw<S: pulp::Simd>(
4846 _: &Self::SimdCtx<S>,
4847 _: Self::SimdMemMask<S>,
4848 _: *const Self::SimdVec<S>,
4849 ) -> Self::SimdVec<S> {
4850 ()
4851 }
4852
4853 unsafe fn simd_mask_store_raw<S: pulp::Simd>(
4854 _: &Self::SimdCtx<S>,
4855 _: Self::SimdMemMask<S>,
4856 _: *mut Self::SimdVec<S>,
4857 _: Self::SimdVec<S>,
4858 ) {
4859 ()
4860 }
4861
4862 fn simd_splat<S: pulp::Simd>(
4863 _: &Self::SimdCtx<S>,
4864 _: &Self,
4865 ) -> Self::SimdVec<S> {
4866 ()
4867 }
4868
4869 fn simd_splat_real<S: pulp::Simd>(
4870 _: &Self::SimdCtx<S>,
4871 _: &Self::Real,
4872 ) -> Self::SimdVec<S> {
4873 ()
4874 }
4875
4876 fn simd_add<S: pulp::Simd>(
4877 _: &Self::SimdCtx<S>,
4878 _: Self::SimdVec<S>,
4879 _: Self::SimdVec<S>,
4880 ) -> Self::SimdVec<S> {
4881 ()
4882 }
4883
4884 fn simd_sub<S: pulp::Simd>(
4885 _: &Self::SimdCtx<S>,
4886 _: Self::SimdVec<S>,
4887 _: Self::SimdVec<S>,
4888 ) -> Self::SimdVec<S> {
4889 ()
4890 }
4891
4892 fn simd_neg<S: pulp::Simd>(
4893 _: &Self::SimdCtx<S>,
4894 _: Self::SimdVec<S>,
4895 ) -> Self::SimdVec<S> {
4896 ()
4897 }
4898
4899 fn simd_conj<S: pulp::Simd>(
4900 _: &Self::SimdCtx<S>,
4901 _: Self::SimdVec<S>,
4902 ) -> Self::SimdVec<S> {
4903 ()
4904 }
4905
4906 fn simd_abs1<S: pulp::Simd>(
4907 _: &Self::SimdCtx<S>,
4908 _: Self::SimdVec<S>,
4909 ) -> Self::SimdVec<S> {
4910 ()
4911 }
4912
4913 fn simd_abs_max<S: pulp::Simd>(
4914 _: &Self::SimdCtx<S>,
4915 _: Self::SimdVec<S>,
4916 ) -> Self::SimdVec<S> {
4917 ()
4918 }
4919
4920 fn simd_mul_real<S: pulp::Simd>(
4921 _: &Self::SimdCtx<S>,
4922 _: Self::SimdVec<S>,
4923 _: Self::SimdVec<S>,
4924 ) -> Self::SimdVec<S> {
4925 ()
4926 }
4927
4928 fn simd_mul_pow2<S: pulp::Simd>(
4929 _: &Self::SimdCtx<S>,
4930 _: Self::SimdVec<S>,
4931 _: Self::SimdVec<S>,
4932 ) -> Self::SimdVec<S> {
4933 ()
4934 }
4935
4936 fn simd_mul<S: pulp::Simd>(
4937 _: &Self::SimdCtx<S>,
4938 _: Self::SimdVec<S>,
4939 _: Self::SimdVec<S>,
4940 ) -> Self::SimdVec<S> {
4941 ()
4942 }
4943
4944 fn simd_conj_mul<S: pulp::Simd>(
4945 _: &Self::SimdCtx<S>,
4946 _: Self::SimdVec<S>,
4947 _: Self::SimdVec<S>,
4948 ) -> Self::SimdVec<S> {
4949 ()
4950 }
4951
4952 fn simd_mul_add<S: pulp::Simd>(
4953 _: &Self::SimdCtx<S>,
4954 _: Self::SimdVec<S>,
4955 _: Self::SimdVec<S>,
4956 _: Self::SimdVec<S>,
4957 ) -> Self::SimdVec<S> {
4958 ()
4959 }
4960
4961 fn simd_conj_mul_add<S: pulp::Simd>(
4962 _: &Self::SimdCtx<S>,
4963 _: Self::SimdVec<S>,
4964 _: Self::SimdVec<S>,
4965 _: Self::SimdVec<S>,
4966 ) -> Self::SimdVec<S> {
4967 ()
4968 }
4969
4970 fn simd_abs2<S: pulp::Simd>(
4971 _: &Self::SimdCtx<S>,
4972 _: Self::SimdVec<S>,
4973 ) -> Self::SimdVec<S> {
4974 ()
4975 }
4976
4977 fn simd_abs2_add<S: pulp::Simd>(
4978 _: &Self::SimdCtx<S>,
4979 _: Self::SimdVec<S>,
4980 _: Self::SimdVec<S>,
4981 ) -> Self::SimdVec<S> {
4982 ()
4983 }
4984
4985 fn simd_reduce_sum<S: pulp::Simd>(
4986 _: &Self::SimdCtx<S>,
4987 _: Self::SimdVec<S>,
4988 ) -> Self {
4989 Self
4990 }
4991
4992 fn simd_reduce_max<S: pulp::Simd>(
4993 _: &Self::SimdCtx<S>,
4994 _: Self::SimdVec<S>,
4995 ) -> Self {
4996 Self
4997 }
4998
4999 fn simd_equal<S: pulp::Simd>(
5000 _: &Self::SimdCtx<S>,
5001 _: Self::SimdVec<S>,
5002 _: Self::SimdVec<S>,
5003 ) -> Self::SimdMask<S> {
5004 ()
5005 }
5006
5007 fn simd_less_than<S: pulp::Simd>(
5008 _: &Self::SimdCtx<S>,
5009 _: Self::SimdVec<S>,
5010 _: Self::SimdVec<S>,
5011 ) -> Self::SimdMask<S> {
5012 ()
5013 }
5014
5015 fn simd_less_than_or_equal<S: pulp::Simd>(
5016 _: &Self::SimdCtx<S>,
5017 _: Self::SimdVec<S>,
5018 _: Self::SimdVec<S>,
5019 ) -> Self::SimdMask<S> {
5020 ()
5021 }
5022
5023 fn simd_greater_than<S: pulp::Simd>(
5024 _: &Self::SimdCtx<S>,
5025 _: Self::SimdVec<S>,
5026 _: Self::SimdVec<S>,
5027 ) -> Self::SimdMask<S> {
5028 ()
5029 }
5030
5031 fn simd_greater_than_or_equal<S: pulp::Simd>(
5032 _: &Self::SimdCtx<S>,
5033 _: Self::SimdVec<S>,
5034 _: Self::SimdVec<S>,
5035 ) -> Self::SimdMask<S> {
5036 ()
5037 }
5038
5039 fn simd_select<S: pulp::Simd>(
5040 _: &Self::SimdCtx<S>,
5041 _: Self::SimdMask<S>,
5042 _: Self::SimdVec<S>,
5043 _: Self::SimdVec<S>,
5044 ) -> Self::SimdVec<S> {
5045 ()
5046 }
5047
5048 fn simd_index_select<S: pulp::Simd>(
5049 _: &Self::SimdCtx<S>,
5050 _: Self::SimdMask<S>,
5051 _: Self::SimdIndex<S>,
5052 _: Self::SimdIndex<S>,
5053 ) -> Self::SimdIndex<S> {
5054 ()
5055 }
5056
5057 fn simd_index_splat<S: pulp::Simd>(
5058 _: &Self::SimdCtx<S>,
5059 _: Self::Index,
5060 ) -> Self::SimdIndex<S> {
5061 ()
5062 }
5063
5064 fn simd_index_add<S: pulp::Simd>(
5065 _: &Self::SimdCtx<S>,
5066 _: Self::SimdIndex<S>,
5067 _: Self::SimdIndex<S>,
5068 ) -> Self::SimdIndex<S> {
5069 ()
5070 }
5071
5072 fn simd_and_mask<S: pulp::Simd>(
5073 _: &Self::SimdCtx<S>,
5074 _: Self::SimdMask<S>,
5075 _: Self::SimdMask<S>,
5076 ) -> Self::SimdMask<S> {
5077 ()
5078 }
5079
5080 fn simd_or_mask<S: pulp::Simd>(
5081 _: &Self::SimdCtx<S>,
5082 _: Self::SimdMask<S>,
5083 _: Self::SimdMask<S>,
5084 ) -> Self::SimdMask<S> {
5085 ()
5086 }
5087
5088 fn simd_not_mask<S: Simd>(
5089 _: &Self::SimdCtx<S>,
5090 _: Self::SimdMask<S>,
5091 ) -> Self::SimdMask<S> {
5092 ()
5093 }
5094
5095 fn simd_first_true_mask<S: pulp::Simd>(
5096 _: &Self::SimdCtx<S>,
5097 _: Self::SimdMask<S>,
5098 ) -> usize {
5099 0
5100 }
5101
5102 fn simd_mask_between<S: Simd>(
5103 _: &Self::SimdCtx<S>,
5104 _: Self::Index,
5105 _: Self::Index,
5106 ) -> Self::SimdMask<S> {
5107 ()
5108 }
5109
5110 fn simd_index_less_than<S: Simd>(
5111 _: &Self::SimdCtx<S>,
5112 _: Self::SimdIndex<S>,
5113 _: Self::SimdIndex<S>,
5114 ) -> Self::SimdMask<S> {
5115 ()
5116 }
5117}
5118pub type c64 = Complex<f64>;
5119pub type c32 = Complex<f32>;
5120pub type fx128 = qd::Quad;
5121pub type cx128 = Complex<fx128>;
5122pub extern crate num_traits;
5123pub extern crate pulp;
5124impl ComplexField for fx128 {
5125 type Arch = pulp::Arch;
5126 type Index = u64;
5127 type Real = Self;
5128 type SimdCtx<S: Simd> = S;
5129 type SimdIndex<S: Simd> = S::u64s;
5130 type SimdMask<S: Simd> = S::m64s;
5131 type SimdMemMask<S: Simd> = Quad<pulp::MemMask<S::m64s>>;
5132 type SimdVec<S: Simd> = Quad<S::f64s>;
5133 type Unit = f64;
5134
5135 const IS_REAL: bool = true;
5136 const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Simd;
5137
5138 #[inline(always)]
5139 fn zero_impl() -> Self {
5140 Self::ZERO
5141 }
5142
5143 #[inline(always)]
5144 fn one_impl() -> Self {
5145 Quad(1.0, 0.0)
5146 }
5147
5148 #[inline(always)]
5149 fn nan_impl() -> Self {
5150 Self::NAN
5151 }
5152
5153 #[inline(always)]
5154 fn infinity_impl() -> Self {
5155 Self::INFINITY
5156 }
5157
5158 #[inline(always)]
5159 fn from_real_impl(real: &Self::Real) -> Self {
5160 *real
5161 }
5162
5163 #[inline(always)]
5164 fn from_f64_impl(real: f64) -> Self {
5165 real.into()
5166 }
5167
5168 #[inline(always)]
5169 fn real_part_impl(value: &Self) -> Self::Real {
5170 *value
5171 }
5172
5173 #[inline(always)]
5174 fn imag_part_impl(_: &Self) -> Self::Real {
5175 Self::ZERO
5176 }
5177
5178 #[inline(always)]
5179 fn copy_impl(value: &Self) -> Self {
5180 *value
5181 }
5182
5183 #[inline(always)]
5184 fn conj_impl(value: &Self) -> Self {
5185 *value
5186 }
5187
5188 #[inline(always)]
5189 fn recip_impl(value: &Self) -> Self {
5190 if value.0.abs() == f64::INFINITY {
5191 Quad::ZERO
5192 } else {
5193 Quad::from(1.0) / *value
5194 }
5195 }
5196
5197 #[inline(always)]
5198 fn sqrt_impl(value: &Self) -> Self {
5199 value.sqrt()
5200 }
5201
5202 #[inline(always)]
5203 fn abs_impl(value: &Self) -> Self::Real {
5204 value.abs()
5205 }
5206
5207 #[inline(always)]
5208 fn abs1_impl(value: &Self) -> Self::Real {
5209 value.abs()
5210 }
5211
5212 #[inline(always)]
5213 fn abs2_impl(value: &Self) -> Self::Real {
5214 value * value
5215 }
5216
5217 #[inline(always)]
5218 fn mul_real_impl(lhs: &Self, rhs: &Self::Real) -> Self {
5219 lhs * rhs
5220 }
5221
5222 #[inline(always)]
5223 fn mul_pow2_impl(lhs: &Self, rhs: &Self::Real) -> Self {
5224 lhs * rhs
5225 }
5226
5227 #[inline(always)]
5228 fn is_finite_impl(value: &Self) -> bool {
5229 value.0.is_finite() && value.1.is_finite()
5230 }
5231
5232 #[inline(always)]
5233 fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
5234 simd
5235 }
5236
5237 #[inline(always)]
5238 fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
5239 *ctx
5240 }
5241
5242 #[inline(always)]
5243 fn simd_mask_between<S: Simd>(
5244 ctx: &Self::SimdCtx<S>,
5245 start: Self::Index,
5246 end: Self::Index,
5247 ) -> Self::SimdMask<S> {
5248 ctx.mask_between_m64s(start as _, end as _).mask()
5249 }
5250
5251 #[inline(always)]
5252 fn simd_mem_mask_between<S: Simd>(
5253 ctx: &Self::SimdCtx<S>,
5254 start: Self::Index,
5255 end: Self::Index,
5256 ) -> Self::SimdMemMask<S> {
5257 let n = (core::mem::size_of::<Self::SimdVec<S>>()
5258 / core::mem::size_of::<Self>()) as u64;
5259 let start = start * 2;
5260 let end = end * 2;
5261 let mut sa = start.min(n);
5262 let mut ea = end.min(n);
5263 let mut sb = start.max(n) - n;
5264 let mut eb = end.max(n) - n;
5265 if sa == ea {
5266 sa = 0;
5267 ea = 0;
5268 }
5269 if sb == eb {
5270 sb = 0;
5271 eb = 0;
5272 }
5273 let a = f64::simd_mem_mask_between(ctx, sa, ea);
5274 let b = f64::simd_mem_mask_between(ctx, sb, eb);
5275 Quad(a, b)
5276 }
5277
5278 #[inline(always)]
5279 unsafe fn simd_mask_load_raw<S: Simd>(
5280 ctx: &Self::SimdCtx<S>,
5281 mask: Self::SimdMemMask<S>,
5282 ptr: *const Self::SimdVec<S>,
5283 ) -> Self::SimdVec<S> {
5284 unsafe {
5285 Quad(
5286 f64::simd_mask_load_raw(ctx, mask.0, &raw const (*ptr).0),
5287 f64::simd_mask_load_raw(ctx, mask.1, &raw const (*ptr).1),
5288 )
5289 }
5290 }
5291
5292 #[inline(always)]
5293 unsafe fn simd_mask_store_raw<S: Simd>(
5294 ctx: &Self::SimdCtx<S>,
5295 mask: Self::SimdMemMask<S>,
5296 ptr: *mut Self::SimdVec<S>,
5297 values: Self::SimdVec<S>,
5298 ) {
5299 unsafe {
5300 Quad(
5301 f64::simd_mask_store_raw(
5302 ctx,
5303 mask.0,
5304 &raw mut (*ptr).0,
5305 values.0,
5306 ),
5307 f64::simd_mask_store_raw(
5308 ctx,
5309 mask.1,
5310 &raw mut (*ptr).1,
5311 values.1,
5312 ),
5313 );
5314 }
5315 }
5316
5317 #[inline(always)]
5318 fn simd_splat<S: Simd>(
5319 ctx: &Self::SimdCtx<S>,
5320 value: &Self,
5321 ) -> Self::SimdVec<S> {
5322 Quad(ctx.splat_f64s(value.0), ctx.splat_f64s(value.1))
5323 }
5324
5325 #[inline(always)]
5326 fn simd_splat_real<S: Simd>(
5327 ctx: &Self::SimdCtx<S>,
5328 value: &Self::Real,
5329 ) -> Self::SimdVec<S> {
5330 Quad(ctx.splat_f64s(value.0), ctx.splat_f64s(value.1))
5331 }
5332
5333 #[inline(always)]
5334 fn simd_add<S: Simd>(
5335 ctx: &Self::SimdCtx<S>,
5336 lhs: Self::SimdVec<S>,
5337 rhs: Self::SimdVec<S>,
5338 ) -> Self::SimdVec<S> {
5339 qd::simd::add_estimate(*ctx, lhs, rhs)
5340 }
5341
5342 #[inline(always)]
5343 fn simd_sub<S: Simd>(
5344 ctx: &Self::SimdCtx<S>,
5345 lhs: Self::SimdVec<S>,
5346 rhs: Self::SimdVec<S>,
5347 ) -> Self::SimdVec<S> {
5348 qd::simd::sub_estimate(*ctx, lhs, rhs)
5349 }
5350
5351 #[inline(always)]
5352 fn simd_neg<S: Simd>(
5353 ctx: &Self::SimdCtx<S>,
5354 value: Self::SimdVec<S>,
5355 ) -> Self::SimdVec<S> {
5356 qd::simd::neg(*ctx, value)
5357 }
5358
5359 #[inline(always)]
5360 fn simd_conj<S: Simd>(
5361 _: &Self::SimdCtx<S>,
5362 value: Self::SimdVec<S>,
5363 ) -> Self::SimdVec<S> {
5364 value
5365 }
5366
5367 #[inline(always)]
5368 fn simd_abs1<S: Simd>(
5369 ctx: &Self::SimdCtx<S>,
5370 value: Self::SimdVec<S>,
5371 ) -> Self::SimdVec<S> {
5372 qd::simd::abs(*ctx, value)
5373 }
5374
5375 #[inline(always)]
5376 fn simd_abs_max<S: Simd>(
5377 ctx: &Self::SimdCtx<S>,
5378 value: Self::SimdVec<S>,
5379 ) -> Self::SimdVec<S> {
5380 qd::simd::abs(*ctx, value)
5381 }
5382
5383 #[inline(always)]
5384 fn simd_mul_real<S: Simd>(
5385 ctx: &Self::SimdCtx<S>,
5386 lhs: Self::SimdVec<S>,
5387 real_rhs: Self::SimdVec<S>,
5388 ) -> Self::SimdVec<S> {
5389 qd::simd::mul(*ctx, lhs, real_rhs)
5390 }
5391
5392 #[inline(always)]
5393 fn simd_mul_pow2<S: Simd>(
5394 ctx: &Self::SimdCtx<S>,
5395 lhs: Self::SimdVec<S>,
5396 real_rhs: Self::SimdVec<S>,
5397 ) -> Self::SimdVec<S> {
5398 qd::simd::mul(*ctx, lhs, real_rhs)
5399 }
5400
5401 #[inline(always)]
5402 fn simd_mul<S: Simd>(
5403 ctx: &Self::SimdCtx<S>,
5404 lhs: Self::SimdVec<S>,
5405 rhs: Self::SimdVec<S>,
5406 ) -> Self::SimdVec<S> {
5407 qd::simd::mul(*ctx, lhs, rhs)
5408 }
5409
5410 #[inline(always)]
5411 fn simd_conj_mul<S: Simd>(
5412 ctx: &Self::SimdCtx<S>,
5413 lhs: Self::SimdVec<S>,
5414 rhs: Self::SimdVec<S>,
5415 ) -> Self::SimdVec<S> {
5416 qd::simd::mul(*ctx, lhs, rhs)
5417 }
5418
5419 #[inline(always)]
5420 fn simd_mul_add<S: Simd>(
5421 ctx: &Self::SimdCtx<S>,
5422 lhs: Self::SimdVec<S>,
5423 rhs: Self::SimdVec<S>,
5424 acc: Self::SimdVec<S>,
5425 ) -> Self::SimdVec<S> {
5426 qd::simd::add_estimate(*ctx, qd::simd::mul(*ctx, lhs, rhs), acc)
5427 }
5428
5429 #[inline(always)]
5430 fn simd_conj_mul_add<S: Simd>(
5431 ctx: &Self::SimdCtx<S>,
5432 lhs: Self::SimdVec<S>,
5433 rhs: Self::SimdVec<S>,
5434 acc: Self::SimdVec<S>,
5435 ) -> Self::SimdVec<S> {
5436 qd::simd::add_estimate(*ctx, qd::simd::mul(*ctx, lhs, rhs), acc)
5437 }
5438
5439 #[inline(always)]
5440 fn simd_abs2<S: Simd>(
5441 ctx: &Self::SimdCtx<S>,
5442 value: Self::SimdVec<S>,
5443 ) -> Self::SimdVec<S> {
5444 qd::simd::mul(*ctx, value, value)
5445 }
5446
5447 #[inline(always)]
5448 fn simd_abs2_add<S: Simd>(
5449 ctx: &Self::SimdCtx<S>,
5450 value: Self::SimdVec<S>,
5451 acc: Self::SimdVec<S>,
5452 ) -> Self::SimdVec<S> {
5453 qd::simd::add_estimate(*ctx, qd::simd::mul(*ctx, value, value), acc)
5454 }
5455
5456 #[inline(always)]
5457 fn simd_reduce_sum<S: Simd>(
5458 _: &Self::SimdCtx<S>,
5459 value: Self::SimdVec<S>,
5460 ) -> Self {
5461 let a = value.0;
5462 let b = value.1;
5463 let a: &[f64] = bytemuck::cast_slice(core::slice::from_ref(&a));
5464 let b: &[f64] = bytemuck::cast_slice(core::slice::from_ref(&b));
5465 let mut acc = Quad::ZERO;
5466 for (&a, &b) in core::iter::zip(a, b) {
5467 acc += Quad(a, b);
5468 }
5469 acc
5470 }
5471
5472 #[inline(always)]
5473 fn simd_reduce_max<S: Simd>(
5474 _: &Self::SimdCtx<S>,
5475 value: Self::SimdVec<S>,
5476 ) -> Self {
5477 let a = value.0;
5478 let b = value.1;
5479 let a: &[f64] = bytemuck::cast_slice(core::slice::from_ref(&a));
5480 let b: &[f64] = bytemuck::cast_slice(core::slice::from_ref(&b));
5481 let mut acc = Quad::NEG_INFINITY;
5482 for (&a, &b) in core::iter::zip(a, b) {
5483 let val = Quad(a, b);
5484 if val > acc {
5485 acc = val
5486 }
5487 }
5488 acc
5489 }
5490
5491 #[inline(always)]
5492 fn simd_equal<S: Simd>(
5493 ctx: &Self::SimdCtx<S>,
5494 real_lhs: Self::SimdVec<S>,
5495 real_rhs: Self::SimdVec<S>,
5496 ) -> Self::SimdMask<S> {
5497 qd::simd::eq(*ctx, real_lhs, real_rhs)
5498 }
5499
5500 #[inline(always)]
5501 fn simd_less_than<S: Simd>(
5502 ctx: &Self::SimdCtx<S>,
5503 real_lhs: Self::SimdVec<S>,
5504 real_rhs: Self::SimdVec<S>,
5505 ) -> Self::SimdMask<S> {
5506 qd::simd::less_than(*ctx, real_lhs, real_rhs)
5507 }
5508
5509 #[inline(always)]
5510 fn simd_less_than_or_equal<S: Simd>(
5511 ctx: &Self::SimdCtx<S>,
5512 real_lhs: Self::SimdVec<S>,
5513 real_rhs: Self::SimdVec<S>,
5514 ) -> Self::SimdMask<S> {
5515 qd::simd::less_than_or_equal(*ctx, real_lhs, real_rhs)
5516 }
5517
5518 #[inline(always)]
5519 fn simd_greater_than<S: Simd>(
5520 ctx: &Self::SimdCtx<S>,
5521 real_lhs: Self::SimdVec<S>,
5522 real_rhs: Self::SimdVec<S>,
5523 ) -> Self::SimdMask<S> {
5524 qd::simd::greater_than(*ctx, real_lhs, real_rhs)
5525 }
5526
5527 #[inline(always)]
5528 fn simd_greater_than_or_equal<S: Simd>(
5529 ctx: &Self::SimdCtx<S>,
5530 real_lhs: Self::SimdVec<S>,
5531 real_rhs: Self::SimdVec<S>,
5532 ) -> Self::SimdMask<S> {
5533 qd::simd::greater_than_or_equal(*ctx, real_lhs, real_rhs)
5534 }
5535
5536 #[inline(always)]
5537 fn simd_select<S: Simd>(
5538 ctx: &Self::SimdCtx<S>,
5539 mask: Self::SimdMask<S>,
5540 lhs: Self::SimdVec<S>,
5541 rhs: Self::SimdVec<S>,
5542 ) -> Self::SimdVec<S> {
5543 Quad(
5544 ctx.select_f64s(mask, lhs.0, rhs.0),
5545 ctx.select_f64s(mask, lhs.1, rhs.1),
5546 )
5547 }
5548
5549 #[inline(always)]
5550 fn simd_index_select<S: Simd>(
5551 ctx: &Self::SimdCtx<S>,
5552 mask: Self::SimdMask<S>,
5553 lhs: Self::SimdIndex<S>,
5554 rhs: Self::SimdIndex<S>,
5555 ) -> Self::SimdIndex<S> {
5556 ctx.select_u64s(mask, lhs, rhs)
5557 }
5558
5559 #[inline(always)]
5560 fn simd_index_splat<S: Simd>(
5561 ctx: &Self::SimdCtx<S>,
5562 value: Self::Index,
5563 ) -> Self::SimdIndex<S> {
5564 ctx.splat_u64s(value as u64)
5565 }
5566
5567 #[inline(always)]
5568 fn simd_index_add<S: Simd>(
5569 ctx: &Self::SimdCtx<S>,
5570 lhs: Self::SimdIndex<S>,
5571 rhs: Self::SimdIndex<S>,
5572 ) -> Self::SimdIndex<S> {
5573 ctx.add_u64s(lhs, rhs)
5574 }
5575
5576 #[inline(always)]
5577 fn simd_index_less_than<S: Simd>(
5578 ctx: &Self::SimdCtx<S>,
5579 lhs: Self::SimdIndex<S>,
5580 rhs: Self::SimdIndex<S>,
5581 ) -> Self::SimdMask<S> {
5582 ctx.less_than_u64s(lhs, rhs)
5583 }
5584
5585 #[inline(always)]
5586 fn simd_and_mask<S: Simd>(
5587 ctx: &Self::SimdCtx<S>,
5588 lhs: Self::SimdMask<S>,
5589 rhs: Self::SimdMask<S>,
5590 ) -> Self::SimdMask<S> {
5591 ctx.and_m64s(lhs, rhs)
5592 }
5593
5594 #[inline(always)]
5595 fn simd_or_mask<S: Simd>(
5596 ctx: &Self::SimdCtx<S>,
5597 lhs: Self::SimdMask<S>,
5598 rhs: Self::SimdMask<S>,
5599 ) -> Self::SimdMask<S> {
5600 ctx.or_m64s(lhs, rhs)
5601 }
5602
5603 #[inline(always)]
5604 fn simd_not_mask<S: Simd>(
5605 ctx: &Self::SimdCtx<S>,
5606 mask: Self::SimdMask<S>,
5607 ) -> Self::SimdMask<S> {
5608 ctx.not_m64s(mask)
5609 }
5610
5611 #[inline(always)]
5612 fn simd_first_true_mask<S: Simd>(
5613 ctx: &Self::SimdCtx<S>,
5614 value: Self::SimdMask<S>,
5615 ) -> usize {
5616 ctx.first_true_m64s(value)
5617 }
5618}
5619impl RealField for fx128 {
5620 #[inline(always)]
5621 fn epsilon_impl() -> Self {
5622 let mut x = Quad::EPSILON;
5623 x.0 *= 8.0;
5624 x.1 *= 8.0;
5625 x
5626 }
5627
5628 #[inline(always)]
5629 fn nbits_impl() -> usize {
5630 100
5631 }
5632
5633 #[inline(always)]
5634 fn min_positive_impl() -> Self {
5635 Quad::MIN_POSITIVE
5636 }
5637
5638 #[inline(always)]
5639 fn max_positive_impl() -> Self {
5640 Quad::MIN_POSITIVE.recip()
5641 }
5642
5643 #[inline(always)]
5644 fn sqrt_min_positive_impl() -> Self {
5645 Quad::MIN_POSITIVE.sqrt()
5646 }
5647
5648 #[inline(always)]
5649 fn sqrt_max_positive_impl() -> Self {
5650 Quad::MIN_POSITIVE.recip().sqrt()
5651 }
5652}
5653pub mod ext {
5654 use super::*;
5655 pub use super::{ComplexField, RealField};
5656 pub trait ComplexFieldExt: ComplexField {
5657 #[inline(always)]
5658 #[must_use]
5659 fn nbits() -> usize {
5660 Self::Real::nbits_impl()
5661 }
5662 #[inline(always)]
5663 #[must_use]
5664 fn zero() -> Self {
5665 Self::zero_impl()
5666 }
5667 #[inline(always)]
5668 #[must_use]
5669 fn one() -> Self {
5670 Self::one_impl()
5671 }
5672 #[inline(always)]
5673 #[must_use]
5674 fn nan() -> Self {
5675 Self::nan_impl()
5676 }
5677 #[inline(always)]
5678 #[must_use]
5679 fn infinity() -> Self {
5680 Self::infinity_impl()
5681 }
5682 #[inline(always)]
5683 #[must_use]
5684 fn as_real(&self) -> Self {
5685 Self::from_real_impl(&Self::real_part_impl(self))
5686 }
5687 #[inline(always)]
5688 #[must_use]
5689 fn real(&self) -> Self::Real {
5690 Self::real_part_impl(self)
5691 }
5692 #[inline(always)]
5693 #[must_use]
5694 fn imag(&self) -> Self::Real {
5695 Self::imag_part_impl(self)
5696 }
5697 #[inline(always)]
5698 #[must_use]
5699 fn copy(&self) -> Self {
5700 Self::copy_impl(self)
5701 }
5702 #[inline(always)]
5703 #[must_use]
5704 fn conj(&self) -> Self {
5705 Self::conj_impl(self)
5706 }
5707 #[inline(always)]
5708 #[must_use]
5709 fn mul_real(&self, rhs: impl ByRef<Self::Real>) -> Self {
5710 Self::mul_real_impl(self, rhs.by_ref())
5711 }
5712 #[inline(always)]
5713 #[must_use]
5714 fn mul_pow2(&self, rhs: impl ByRef<Self::Real>) -> Self {
5715 Self::mul_real_impl(self, rhs.by_ref())
5716 }
5717 #[inline(always)]
5718 #[must_use]
5719 fn abs1(&self) -> Self::Real {
5720 Self::abs1_impl(self)
5721 }
5722 #[inline(always)]
5723 #[must_use]
5724 fn absmax(&self) -> Self::Real {
5725 if const { Self::IS_REAL } {
5726 Self::abs1_impl(self)
5727 } else {
5728 &Self::Real::abs1_impl(&real(self))
5729 + &Self::Real::abs1_impl(&imag(self))
5730 }
5731 }
5732 #[inline(always)]
5733 #[must_use]
5734 fn abs(&self) -> Self::Real {
5735 Self::abs_impl(self)
5736 }
5737 #[inline(always)]
5738 #[must_use]
5739 fn abs2(&self) -> Self::Real {
5740 Self::abs2_impl(self)
5741 }
5742 #[inline(always)]
5743 #[must_use]
5744 fn is_nan(&self) -> bool {
5745 Self::is_nan_impl(self)
5746 }
5747 #[inline(always)]
5748 #[must_use]
5749 fn is_finite(&self) -> bool {
5750 Self::is_finite_impl(self)
5751 }
5752 #[inline(always)]
5753 #[must_use]
5754 fn sqrt(&self) -> Self {
5755 Self::sqrt_impl(self)
5756 }
5757 #[inline(always)]
5758 #[must_use]
5759 fn recip(&self) -> Self {
5760 Self::recip_impl(self)
5761 }
5762 #[inline(always)]
5763 #[must_use]
5764 fn from_f64(value: f64) -> Self {
5765 Self::from_f64_impl(value)
5766 }
5767 }
5768 pub trait RealFieldExt: RealField {
5769 #[inline(always)]
5770 #[must_use]
5771 fn eps<T: RealField>() -> T {
5772 T::Real::epsilon_impl()
5773 }
5774 #[inline(always)]
5775 #[must_use]
5776 fn nbits<T: ComplexField>() -> usize {
5777 T::Real::nbits_impl()
5778 }
5779 #[inline(always)]
5780 #[must_use]
5781 fn min_positive<T: RealField>() -> T {
5782 T::min_positive_impl()
5783 }
5784 #[inline(always)]
5785 #[must_use]
5786 fn max_positive<T: RealField>() -> T {
5787 T::max_positive_impl()
5788 }
5789 #[inline(always)]
5790 #[must_use]
5791 fn sqrt_min_positive<T: RealField>() -> T {
5792 T::sqrt_min_positive_impl()
5793 }
5794 #[inline(always)]
5795 #[must_use]
5796 fn sqrt_max_positive<T: RealField>() -> T {
5797 T::sqrt_max_positive_impl()
5798 }
5799 #[inline(always)]
5800 #[must_use]
5801 fn to_cplx<T: ComplexField<Real = Self>>(&self) -> T {
5802 T::from_real_impl(self)
5803 }
5804 #[inline(always)]
5805 #[must_use]
5806 fn fmax(&self, rhs: impl ByRef<Self>) -> Self {
5807 math_utils::max(self, rhs.by_ref())
5808 }
5809 #[inline(always)]
5810 #[must_use]
5811 fn fmin(&self, rhs: impl ByRef<Self>) -> Self {
5812 math_utils::min(self, rhs.by_ref())
5813 }
5814 #[inline(always)]
5815 #[must_use]
5816 fn hypot(&self, rhs: impl ByRef<Self>) -> Self {
5817 math_utils::hypot(self, rhs.by_ref())
5818 }
5819 }
5820 impl<T: ComplexField> ComplexFieldExt for T {}
5821 impl<T: RealField> RealFieldExt for T {}
5822}