1#![no_std]
2#![allow(non_camel_case_types)]
3
4use bytemuck::Pod;
5use core::fmt::Debug;
6use num_complex::Complex;
7use pulp::Simd;
8use qd::Quad;
9
10use math_utils::*;
11
12use pulp::try_const;
13
14pub mod math_utils {
15 use crate::{ByRef, ComplexField, RealField, abs_impl};
16 use pulp::try_const;
17
18 #[inline(always)]
19 #[must_use]
20 pub fn eps<T: RealField>() -> T {
21 T::Real::epsilon_impl()
22 }
23
24 #[inline(always)]
25 #[must_use]
26 pub fn nbits<T: ComplexField>() -> usize {
27 T::Real::nbits_impl()
28 }
29
30 #[inline(always)]
31 #[must_use]
32 pub fn min_positive<T: RealField>() -> T {
33 T::min_positive_impl()
34 }
35 #[inline(always)]
36 #[must_use]
37 pub fn max_positive<T: RealField>() -> T {
38 T::max_positive_impl()
39 }
40 #[inline(always)]
41 #[must_use]
42 pub fn sqrt_min_positive<T: RealField>() -> T {
43 T::sqrt_min_positive_impl()
44 }
45 #[inline(always)]
46 #[must_use]
47 pub fn sqrt_max_positive<T: RealField>() -> T {
48 T::sqrt_max_positive_impl()
49 }
50
51 #[inline(always)]
52 #[must_use]
53 pub fn zero<T: ComplexField>() -> T {
54 T::zero_impl()
55 }
56 #[inline(always)]
57 #[must_use]
58 pub fn one<T: ComplexField>() -> T {
59 T::one_impl()
60 }
61 #[inline(always)]
62 #[must_use]
63 pub fn nan<T: ComplexField>() -> T {
64 T::nan_impl()
65 }
66 #[inline(always)]
67 #[must_use]
68 pub fn infinity<T: ComplexField>() -> T {
69 T::infinity_impl()
70 }
71
72 #[inline(always)]
73 #[must_use]
74 pub fn real<T: ComplexField>(value: &T) -> T::Real {
75 T::real_part_impl((value).by_ref())
76 }
77 #[inline(always)]
78 #[must_use]
79 pub fn imag<T: ComplexField>(value: &T) -> T::Real {
80 T::imag_part_impl((value).by_ref())
81 }
82 #[inline(always)]
83 #[track_caller]
84 #[must_use]
85 pub fn neg<T: NegByRef>(value: &T) -> T::Output {
86 value.neg_by_ref()
87 }
88 #[inline(always)]
89 #[must_use]
90 pub fn copy<T: ComplexField>(value: &T) -> T {
91 T::copy_impl((value).by_ref())
92 }
93
94 #[inline(always)]
95 #[must_use]
96 pub fn conj<T: ComplexField>(value: &T) -> T {
97 T::conj_impl((value).by_ref())
98 }
99
100 #[inline(always)]
101 #[track_caller]
102 #[must_use]
103 pub fn add<T: AddByRef<U>, U>(lhs: &T, rhs: &U) -> T::Output {
104 lhs.add_by_ref(rhs)
105 }
106 #[inline(always)]
107 #[track_caller]
108 #[must_use]
109 pub fn sub<T: SubByRef<U>, U>(lhs: &T, rhs: &U) -> T::Output {
110 lhs.sub_by_ref(rhs)
111 }
112 #[inline(always)]
113 #[track_caller]
114 #[must_use]
115 pub fn mul<T: MulByRef<U>, U>(lhs: &T, rhs: &U) -> T::Output {
116 lhs.mul_by_ref(rhs)
117 }
118 #[inline(always)]
119 #[track_caller]
120 #[must_use]
121 pub fn div<T: DivByRef<U>, U>(lhs: &T, rhs: &U) -> T::Output {
122 lhs.div_by_ref(rhs)
123 }
124
125 #[inline(always)]
126 #[must_use]
127 pub fn mul_real<T: ComplexField>(lhs: &T, rhs: &T::Real) -> T {
128 T::mul_real_impl((lhs).by_ref(), (rhs).by_ref())
129 }
130
131 #[inline(always)]
132 #[must_use]
133 pub fn mul_pow2<T: ComplexField>(lhs: &T, rhs: &T::Real) -> T {
134 T::mul_real_impl((lhs).by_ref(), (rhs).by_ref())
135 }
136
137 #[inline(always)]
138 #[must_use]
139 pub fn abs1<T: ComplexField>(value: &T) -> T::Real {
140 T::abs1_impl((value).by_ref())
141 }
142
143 #[inline(always)]
144 #[must_use]
145 pub fn absmax<T: ComplexField>(value: &T) -> T::Real {
146 if try_const! { T::IS_REAL } {
147 T::abs1_impl(value)
148 } else {
149 add(&T::Real::abs1_impl(&real(value)), &T::Real::abs1_impl(&imag(value)))
150 }
151 }
152
153 #[inline(always)]
154 #[must_use]
155 pub fn abs<T: ComplexField>(value: &T) -> T::Real {
156 T::abs_impl((value).by_ref())
157 }
158
159 #[inline(always)]
160 #[must_use]
161 pub fn hypot<T: RealField>(lhs: &T, rhs: &T) -> T {
162 abs_impl::<T::Real>(lhs.clone(), rhs.clone())
163 }
164
165 #[inline(always)]
166 #[must_use]
167 pub fn abs2<T: ComplexField>(value: &T) -> T::Real {
168 T::abs2_impl((value).by_ref())
169 }
170
171 #[inline(always)]
172 #[must_use]
173 pub fn max<T: RealField>(lhs: &T, rhs: &T) -> T {
174 if lhs > rhs { copy(lhs) } else { copy(rhs) }
175 }
176 #[inline(always)]
177 #[must_use]
178 pub fn min<T: RealField>(lhs: &T, rhs: &T) -> T {
179 if lhs < rhs { copy(lhs) } else { copy(rhs) }
180 }
181
182 #[inline(always)]
183 #[must_use]
184 pub fn is_nan<T: ComplexField>(value: &T) -> bool {
185 T::is_nan_impl((value).by_ref())
186 }
187
188 #[inline(always)]
189 #[must_use]
190 pub fn is_finite<T: ComplexField>(value: &T) -> bool {
191 T::is_finite_impl((value).by_ref())
192 }
193
194 #[inline(always)]
195 #[must_use]
196 pub fn sqrt<T: ComplexField>(value: &T) -> T {
197 T::sqrt_impl((value).by_ref())
198 }
199 #[inline(always)]
200 #[must_use]
201 pub fn recip<T: ComplexField>(value: &T) -> T {
202 T::recip_impl((value).by_ref())
203 }
204
205 #[inline(always)]
206 #[must_use]
207 pub fn from_real<T: ComplexField>(value: &T::Real) -> T {
208 T::from_real_impl((value).by_ref())
209 }
210
211 #[inline(always)]
212 #[must_use]
213 pub fn from_f64<T: ComplexField>(value: f64) -> T {
214 T::from_f64_impl(value)
215 }
216
217 pub use crate::{AddByRef, DivByRef, MulByRef, NegByRef, SubByRef};
218}
219
220pub trait AddByRef<Rhs = Self> {
221 type Output;
222 fn add_by_ref(&self, rhs: &Rhs) -> Self::Output;
223}
224pub trait SubByRef<Rhs = Self> {
225 type Output;
226 fn sub_by_ref(&self, rhs: &Rhs) -> Self::Output;
227}
228pub trait NegByRef {
229 type Output;
230 fn neg_by_ref(&self) -> Self::Output;
231}
232pub trait MulByRef<Rhs = Self> {
233 type Output;
234 fn mul_by_ref(&self, rhs: &Rhs) -> Self::Output;
235}
236pub trait DivByRef<Rhs = Self> {
237 type Output;
238 fn div_by_ref(&self, rhs: &Rhs) -> Self::Output;
239}
240
241impl<Rhs, Lhs, Output> AddByRef<Rhs> for Lhs
242where
243 for<'a> &'a Lhs: core::ops::Add<&'a Rhs, Output = Output>,
244{
245 type Output = Output;
246
247 #[inline]
248 #[track_caller]
249 fn add_by_ref(&self, rhs: &Rhs) -> Self::Output {
250 self + rhs
251 }
252}
253impl<Rhs, Lhs, Output> SubByRef<Rhs> for Lhs
254where
255 for<'a> &'a Lhs: core::ops::Sub<&'a Rhs, Output = Output>,
256{
257 type Output = Output;
258
259 #[inline]
260 #[track_caller]
261 fn sub_by_ref(&self, rhs: &Rhs) -> Self::Output {
262 self - rhs
263 }
264}
265impl<Rhs, Lhs, Output> MulByRef<Rhs> for Lhs
266where
267 for<'a> &'a Lhs: core::ops::Mul<&'a Rhs, Output = Output>,
268{
269 type Output = Output;
270
271 #[inline]
272 #[track_caller]
273 fn mul_by_ref(&self, rhs: &Rhs) -> Self::Output {
274 self * rhs
275 }
276}
277impl<Rhs, Lhs, Output> DivByRef<Rhs> for Lhs
278where
279 for<'a> &'a Lhs: core::ops::Div<&'a Rhs, Output = Output>,
280{
281 type Output = Output;
282
283 #[inline]
284 #[track_caller]
285 fn div_by_ref(&self, rhs: &Rhs) -> Self::Output {
286 self / rhs
287 }
288}
289
290impl<T, Output> NegByRef for T
291where
292 for<'a> &'a T: core::ops::Neg<Output = Output>,
293{
294 type Output = Output;
295
296 #[inline]
297 #[track_caller]
298 fn neg_by_ref(&self) -> Self::Output {
299 -self
300 }
301}
302
303#[faer_macros::math]
304fn abs_impl<T: RealField>(re: T, im: T) -> T {
305 let small = sqrt_min_positive();
306 let big = sqrt_max_positive();
307 let one = one();
308 let re_abs = abs(re);
309 let im_abs = abs(im);
310
311 if re_abs > big || im_abs > big {
312 sqrt(abs2(re * small) + abs2(im * small)) * big
313 } else if re_abs > one || im_abs > one {
314 sqrt(abs2(re) + abs2(im))
315 } else {
316 sqrt(abs2(re * big) + abs2(im * big)) * small
317 }
318}
319
320#[faer_macros::math]
321fn recip_impl<T: RealField>(re: T, im: T) -> (T, T) {
322 if is_nan(re) || is_nan(im) {
323 return (nan(), nan());
324 }
325 if re == zero() && im == zero() {
326 return (infinity(), infinity());
327 }
328 if !is_finite(re) || !is_finite(im) {
329 return (zero(), zero());
330 }
331
332 let small = sqrt_min_positive();
333 let big = sqrt_max_positive();
334 let one = one();
335 let re_abs = abs(re);
336 let im_abs = abs(im);
337
338 if re_abs > big || im_abs > big {
339 let re = re * small;
340 let im = im * small;
341 let inv = recip(abs2(re) + abs2(im));
342 (((re * inv) * small), ((-im * inv) * small))
343 } else if re_abs > one || im_abs > one {
344 let inv = recip(abs2(re) + abs2(im));
345 ((re * inv), (-im * inv))
346 } else {
347 let re = re * big;
348 let im = im * big;
349 let inv = recip(abs2(re) + abs2(im));
350 (((re * inv) * big), ((-im * inv) * big))
351 }
352}
353
354#[faer_macros::math]
355fn sqrt_impl<T: RealField>(re: T, im: T) -> (T, T) {
356 let im_negative = im < zero();
357 let half = from_f64(0.5);
358 let abs = abs_impl(re.clone(), im.clone());
359
360 let mut sum = re + abs;
361 if sum < zero() {
362 sum = zero();
363 }
364
365 let out_re = sqrt(mul_pow2(sum, half));
366 let mut out_im = sqrt(mul_pow2(abs - re, half));
367 if im_negative {
368 out_im = -out_im;
369 }
370 (out_re, out_im)
371}
372
373pub trait ByRef<T> {
374 fn by_ref(&self) -> &T;
375}
376impl<T> ByRef<T> for T {
377 #[inline]
378 fn by_ref(&self) -> &T {
379 self
380 }
381}
382impl<T> ByRef<T> for &T {
383 #[inline]
384 fn by_ref(&self) -> &T {
385 *self
386 }
387}
388impl<T> ByRef<T> for &mut T {
389 #[inline]
390 fn by_ref(&self) -> &T {
391 *self
392 }
393}
394
395#[repr(transparent)]
396pub struct SimdCtx<T: ComplexField, S: Simd>(pub T::SimdCtx<S>);
397
398#[repr(transparent)]
399pub struct SimdCtxCopy<T: ComplexField, S: Simd>(pub T::SimdCtx<S>);
400
401#[derive(Copy, Clone, Debug)]
402#[repr(transparent)]
403pub struct RealReg<T>(pub T);
404
405impl<T: ComplexField, S: Simd> SimdCtx<T, S> {
406 #[inline(always)]
407 pub fn new(ctx: &T::SimdCtx<S>) -> &Self {
408 unsafe { &*(ctx as *const T::SimdCtx<S> as *const Self) }
409 }
410
411 #[inline(always)]
412 pub fn zero(&self) -> T::SimdVec<S> {
413 unsafe { core::mem::zeroed() }
414 }
415
416 #[inline(always)]
417 pub fn splat(&self, value: &T) -> T::SimdVec<S> {
418 unsafe { core::mem::transmute_copy(&T::simd_splat(&self.0, (value).by_ref())) }
419 }
420
421 #[inline(always)]
422 pub fn splat_real(&self, value: &T::Real) -> RealReg<T::SimdVec<S>> {
423 RealReg(unsafe { core::mem::transmute_copy(&T::simd_splat_real(&self.0, (value).by_ref())) })
424 }
425
426 #[inline(always)]
427 pub fn add(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdVec<S> {
428 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
429 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
430 unsafe { core::mem::transmute_copy(&T::simd_add(&self.0, lhs, rhs)) }
431 }
432
433 #[inline(always)]
434 pub fn sub(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdVec<S> {
435 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
436 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
437 unsafe { core::mem::transmute_copy(&T::simd_sub(&self.0, lhs, rhs)) }
438 }
439
440 #[inline(always)]
441 pub fn neg(&self, value: T::SimdVec<S>) -> T::SimdVec<S> {
442 let value = unsafe { core::mem::transmute_copy(&value) };
443 unsafe { core::mem::transmute_copy(&T::simd_neg(&self.0, value)) }
444 }
445
446 #[inline(always)]
447 pub fn conj(&self, value: T::SimdVec<S>) -> T::SimdVec<S> {
448 let value = unsafe { core::mem::transmute_copy(&value) };
449 unsafe { core::mem::transmute_copy(&T::simd_conj(&self.0, value)) }
450 }
451
452 #[inline(always)]
453 pub fn abs1(&self, value: T::SimdVec<S>) -> RealReg<T::SimdVec<S>> {
454 let value = unsafe { core::mem::transmute_copy(&value) };
455 RealReg(unsafe { core::mem::transmute_copy(&T::simd_abs1(&self.0, value)) })
456 }
457
458 #[inline(always)]
459 pub fn abs_max(&self, value: T::SimdVec<S>) -> RealReg<T::SimdVec<S>> {
460 let value = unsafe { core::mem::transmute_copy(&value) };
461 RealReg(unsafe { core::mem::transmute_copy(&T::simd_abs_max(&self.0, value)) })
462 }
463
464 #[inline(always)]
465 pub fn mul_real(&self, lhs: T::SimdVec<S>, rhs: RealReg<T::SimdVec<S>>) -> T::SimdVec<S> {
466 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
467 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
468 unsafe { core::mem::transmute_copy(&T::simd_mul_real(&self.0, lhs, rhs)) }
469 }
470
471 #[inline(always)]
472 pub fn mul_pow2(&self, lhs: T::SimdVec<S>, rhs: RealReg<T::SimdVec<S>>) -> T::SimdVec<S> {
473 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
474 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
475 unsafe { core::mem::transmute_copy(&T::simd_mul_pow2(&self.0, lhs, rhs)) }
476 }
477
478 #[inline(always)]
479 pub fn mul(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdVec<S> {
480 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
481 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
482 unsafe { core::mem::transmute_copy(&T::simd_mul(&self.0, lhs, rhs)) }
483 }
484
485 #[inline(always)]
486 pub fn conj_mul(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdVec<S> {
487 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
488 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
489 unsafe { core::mem::transmute_copy(&T::simd_conj_mul(&self.0, lhs, rhs)) }
490 }
491
492 #[inline(always)]
493 pub fn mul_add(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>, acc: T::SimdVec<S>) -> T::SimdVec<S> {
494 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
495 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
496 let acc = unsafe { core::mem::transmute_copy(&acc) };
497 unsafe { core::mem::transmute_copy(&T::simd_mul_add(&self.0, lhs, rhs, acc)) }
498 }
499
500 #[inline(always)]
501 pub fn conj_mul_add(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>, acc: T::SimdVec<S>) -> T::SimdVec<S> {
502 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
503 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
504 let acc = unsafe { core::mem::transmute_copy(&acc) };
505 unsafe { core::mem::transmute_copy(&T::simd_conj_mul_add(&self.0, lhs, rhs, acc)) }
506 }
507
508 #[inline(always)]
509 pub fn abs2(&self, value: T::SimdVec<S>) -> RealReg<T::SimdVec<S>> {
510 let value = unsafe { core::mem::transmute_copy(&value) };
511 RealReg(unsafe { core::mem::transmute_copy(&T::simd_abs2(&self.0, value)) })
512 }
513
514 #[inline(always)]
515 pub fn abs2_add(&self, value: T::SimdVec<S>, acc: RealReg<T::SimdVec<S>>) -> RealReg<T::SimdVec<S>> {
516 let value = unsafe { core::mem::transmute_copy(&value) };
517 let acc = unsafe { core::mem::transmute_copy(&acc) };
518 RealReg(unsafe { core::mem::transmute_copy(&T::simd_abs2_add(&self.0, value, acc)) })
519 }
520
521 #[inline(always)]
522 pub fn reduce_sum(&self, value: T::SimdVec<S>) -> T {
523 let value = unsafe { core::mem::transmute_copy(&value) };
524 unsafe { core::mem::transmute_copy(&T::simd_reduce_sum(&self.0, value)) }
525 }
526
527 #[inline(always)]
528 pub fn reduce_max(&self, value: RealReg<T::SimdVec<S>>) -> T {
529 let value = unsafe { core::mem::transmute_copy(&value) };
530 unsafe { core::mem::transmute_copy(&T::simd_reduce_max(&self.0, value)) }
531 }
532
533 #[faer_macros::math]
534 #[inline(always)]
535 pub fn reduce_sum_real(&self, value: RealReg<T::SimdVec<S>>) -> Real<T> {
536 let value = T::simd_reduce_sum(&self.0, value.0);
537 if try_const! { T::SIMD_ABS_SPLIT_REAL_IMAG && !S::IS_SCALAR } {
538 add(real(value), imag(value))
539 } else {
540 real(value)
541 }
542 }
543
544 #[faer_macros::math]
545 #[inline(always)]
546 pub fn reduce_max_real(&self, value: RealReg<T::SimdVec<S>>) -> Real<T> {
547 let value = T::simd_reduce_max(&self.0, value.0);
548 if try_const! { T::SIMD_ABS_SPLIT_REAL_IMAG && !S::IS_SCALAR } {
549 max(real(value), imag(value))
550 } else {
551 real(value)
552 }
553 }
554
555 #[inline(always)]
556 pub fn max(&self, lhs: RealReg<T::SimdVec<S>>, rhs: RealReg<T::SimdVec<S>>) -> RealReg<T::SimdVec<S>> {
557 let cmp = self.gt(lhs, rhs);
558 RealReg(self.select(cmp, lhs.0, rhs.0))
559 }
560
561 #[inline(always)]
562 pub fn eq(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdMask<S> {
563 T::simd_equal(&self.0, lhs, rhs)
564 }
565
566 #[inline(always)]
567 pub fn lt(&self, lhs: RealReg<T::SimdVec<S>>, rhs: RealReg<T::SimdVec<S>>) -> T::SimdMask<S> {
568 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
569 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
570 unsafe { core::mem::transmute_copy(&T::simd_less_than(&self.0, lhs, rhs)) }
571 }
572
573 #[inline(always)]
574 pub fn gt(&self, lhs: RealReg<T::SimdVec<S>>, rhs: RealReg<T::SimdVec<S>>) -> T::SimdMask<S> {
575 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
576 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
577 unsafe { core::mem::transmute_copy(&T::simd_greater_than(&self.0, lhs, rhs)) }
578 }
579
580 #[inline(always)]
581 pub fn le(&self, lhs: RealReg<T::SimdVec<S>>, rhs: RealReg<T::SimdVec<S>>) -> T::SimdMask<S> {
582 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
583 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
584 unsafe { core::mem::transmute_copy(&T::simd_less_than_or_equal(&self.0, lhs, rhs)) }
585 }
586
587 #[inline(always)]
588 pub fn ge(&self, lhs: RealReg<T::SimdVec<S>>, rhs: RealReg<T::SimdVec<S>>) -> T::SimdMask<S> {
589 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
590 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
591 unsafe { core::mem::transmute_copy(&T::simd_greater_than_or_equal(&self.0, lhs, rhs)) }
592 }
593
594 #[inline(always)]
595 pub fn select(&self, mask: T::SimdMask<S>, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdVec<S> {
596 let lhs = unsafe { core::mem::transmute_copy(&lhs) };
597 let rhs = unsafe { core::mem::transmute_copy(&rhs) };
598 unsafe { core::mem::transmute_copy(&T::simd_select(&self.0, mask, lhs, rhs)) }
599 }
600
601 #[inline(always)]
602 pub fn iselect(&self, mask: T::SimdMask<S>, lhs: T::SimdIndex<S>, rhs: T::SimdIndex<S>) -> T::SimdIndex<S> {
603 unsafe { core::mem::transmute_copy(&T::simd_index_select(&self.0, mask, lhs, rhs)) }
604 }
605
606 #[inline(always)]
607 pub fn isplat(&self, value: T::Index) -> T::SimdIndex<S> {
608 unsafe { core::mem::transmute_copy(&T::simd_index_splat(&self.0, value)) }
609 }
610
611 #[inline(always)]
612 pub fn iadd(&self, lhs: T::SimdIndex<S>, rhs: T::SimdIndex<S>) -> T::SimdIndex<S> {
613 unsafe { core::mem::transmute_copy(&T::simd_index_add(&self.0, lhs, rhs)) }
614 }
615
616 #[inline(always)]
617 pub fn or_mask(&self, lhs: T::SimdMask<S>, rhs: T::SimdMask<S>) -> T::SimdMask<S> {
618 T::simd_or_mask(&self.0, lhs, rhs)
619 }
620
621 #[inline(always)]
622 pub fn and_mask(&self, lhs: T::SimdMask<S>, rhs: T::SimdMask<S>) -> T::SimdMask<S> {
623 T::simd_and_mask(&self.0, lhs, rhs)
624 }
625
626 #[inline(always)]
627 pub fn not_mask(&self, mask: T::SimdMask<S>) -> T::SimdMask<S> {
628 T::simd_not_mask(&self.0, mask)
629 }
630
631 #[inline(always)]
632 pub fn first_true_mask(&self, value: T::SimdMask<S>) -> usize {
633 T::simd_first_true_mask(&self.0, value)
634 }
635
636 #[inline(always)]
637 pub unsafe fn mask_load(&self, mask: T::SimdMemMask<S>, ptr: *const T::SimdVec<S>) -> T::SimdVec<S> {
638 unsafe { T::simd_mask_load(&self.0, mask, ptr) }
639 }
640
641 #[inline(always)]
642 pub unsafe fn mask_store(&self, mask: T::SimdMemMask<S>, ptr: *mut T::SimdVec<S>, value: T::SimdVec<S>) {
643 let value = unsafe { core::mem::transmute_copy(&value) };
644 unsafe { T::simd_mask_store(&self.0, mask, ptr, value) }
645 }
646
647 #[inline(always)]
648 pub fn load(&self, ptr: &T::SimdVec<S>) -> T::SimdVec<S> {
649 unsafe { core::mem::transmute_copy(&T::simd_load(&self.0, ptr)) }
650 }
651
652 #[inline(always)]
653 pub fn store(&self, ptr: &mut T::SimdVec<S>, value: T::SimdVec<S>) {
654 let value = unsafe { core::mem::transmute_copy(&value) };
655 unsafe { core::mem::transmute_copy(&T::simd_store(&self.0, ptr, value)) }
656 }
657}
658
659pub unsafe trait Conjugate: Send + Sync + core::fmt::Debug {
660 const IS_CANONICAL: bool;
661
662 type Conj: Conjugate<Conj = Self, Canonical = Self::Canonical>;
663 type Canonical: Conjugate<Canonical = Self::Canonical> + ComplexField;
664}
665
666pub type Real<T> = <<T as Conjugate>::Canonical as ComplexField>::Real;
667
668#[derive(Copy, Clone, Debug, PartialEq, Eq)]
669pub struct ComplexConj<T> {
670 pub re: T,
671 pub im_neg: T,
672}
673
674#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
675pub enum SimdCapabilities {
676 None,
677 Copy,
678 Simd,
679}
680
681impl SimdCapabilities {
682 #[inline]
683 pub const fn is_copy(self) -> bool {
684 matches!(self, Self::Copy | Self::Simd)
685 }
686
687 #[inline]
688 pub const fn is_simd(self) -> bool {
689 matches!(self, Self::Simd)
690 }
691}
692
693mod seal {
694 pub trait Seal {}
695 impl Seal for u8 {}
696 impl Seal for u16 {}
697 impl Seal for u32 {}
698 impl Seal for u64 {}
699 impl Seal for u128 {}
700 impl Seal for usize {}
701 impl Seal for i32 {}
702 impl Seal for i64 {}
703 impl Seal for isize {}
704}
705
706pub trait Seal: seal::Seal {}
707impl<T: seal::Seal> Seal for T {}
708
709pub trait SignedIndex:
713 Seal
714 + core::fmt::Debug
715 + core::ops::Neg<Output = Self>
716 + core::ops::Add<Output = Self>
717 + core::ops::Sub<Output = Self>
718 + core::ops::AddAssign
719 + core::ops::SubAssign
720 + bytemuck::Pod
721 + Eq
722 + Ord
723 + Send
724 + Sync
725{
726 const MAX: Self;
728
729 #[must_use]
731 fn truncate(value: usize) -> Self;
732
733 #[must_use]
735 fn zx(self) -> usize;
736 #[must_use]
738 fn sx(self) -> usize;
739
740 fn sum_nonnegative(slice: &[Self]) -> Option<Self> {
742 let mut acc = Self::zeroed();
743 for &i in slice {
744 if Self::MAX - i < acc {
745 return None;
746 }
747 acc += i;
748 }
749 Some(acc)
750 }
751}
752
753impl SignedIndex for i32 {
754 const MAX: Self = Self::MAX;
755
756 #[inline(always)]
757 fn truncate(value: usize) -> Self {
758 #[allow(clippy::assertions_on_constants)]
759 const _: () = {
760 core::assert!(i32::BITS <= usize::BITS);
761 };
762 value as isize as Self
763 }
764
765 #[inline(always)]
766 fn zx(self) -> usize {
767 self as u32 as usize
768 }
769
770 #[inline(always)]
771 fn sx(self) -> usize {
772 self as isize as usize
773 }
774}
775
776#[cfg(any(target_pointer_width = "64"))]
777impl SignedIndex for i64 {
778 const MAX: Self = Self::MAX;
779
780 #[inline(always)]
781 fn truncate(value: usize) -> Self {
782 #[allow(clippy::assertions_on_constants)]
783 const _: () = {
784 core::assert!(i64::BITS <= usize::BITS);
785 };
786 value as isize as Self
787 }
788
789 #[inline(always)]
790 fn zx(self) -> usize {
791 self as u64 as usize
792 }
793
794 #[inline(always)]
795 fn sx(self) -> usize {
796 self as isize as usize
797 }
798}
799
800impl SignedIndex for isize {
801 const MAX: Self = Self::MAX;
802
803 #[inline(always)]
804 fn truncate(value: usize) -> Self {
805 value as isize
806 }
807
808 #[inline(always)]
809 fn zx(self) -> usize {
810 self as usize
811 }
812
813 #[inline(always)]
814 fn sx(self) -> usize {
815 self as usize
816 }
817}
818
819pub trait IndexCore:
820 Seal
821 + core::fmt::Debug
822 + core::ops::Not<Output = Self>
823 + core::ops::BitAnd<Output = Self>
824 + core::ops::BitOr<Output = Self>
825 + core::ops::BitXor<Output = Self>
826 + core::ops::Add<Output = Self>
827 + core::ops::Sub<Output = Self>
828 + core::ops::AddAssign
829 + core::ops::SubAssign
830 + bytemuck::Pod
831 + Eq
832 + Ord
833 + Send
834 + Sync
835 + Ord
836{
837 const MAX: Self;
838
839 #[must_use]
841 fn truncate(value: usize) -> Self;
842
843 #[must_use]
845 fn zx(self) -> usize;
846}
847
848pub trait Index: IndexCore {
849 type FixedWidth: Index;
851 type Signed: SignedIndex;
853
854 const BITS: u32 = core::mem::size_of::<Self>() as u32 * 8;
855
856 #[inline(always)]
858 fn canonicalize(slice: &[Self]) -> &[Self::FixedWidth] {
859 bytemuck::cast_slice(slice)
860 }
861
862 #[inline(always)]
864 fn canonicalize_mut(slice: &mut [Self]) -> &mut [Self::FixedWidth] {
865 bytemuck::cast_slice_mut(slice)
866 }
867
868 #[inline(always)]
870 fn from_signed(value: Self::Signed) -> Self {
871 bytemuck::cast(value)
872 }
873
874 #[inline(always)]
876 fn to_signed(self) -> Self::Signed {
877 bytemuck::cast(self)
878 }
879
880 #[inline]
882 fn sum_nonnegative(slice: &[Self]) -> Option<Self> {
883 Self::Signed::sum_nonnegative(bytemuck::cast_slice(slice)).map(Self::from_signed)
884 }
885}
886
887impl IndexCore for u8 {
888 const MAX: Self = Self::MAX;
889
890 #[inline(always)]
891 fn truncate(value: usize) -> Self {
892 value as _
893 }
894
895 #[inline(always)]
896 fn zx(self) -> usize {
897 self as _
898 }
899}
900impl IndexCore for u16 {
901 const MAX: Self = Self::MAX;
902
903 #[inline(always)]
904 fn truncate(value: usize) -> Self {
905 value as _
906 }
907
908 #[inline(always)]
909 fn zx(self) -> usize {
910 self as _
911 }
912}
913impl IndexCore for u32 {
914 const MAX: Self = Self::MAX;
915
916 #[inline(always)]
917 fn truncate(value: usize) -> Self {
918 value as _
919 }
920
921 #[inline(always)]
922 fn zx(self) -> usize {
923 self as _
924 }
925}
926
927impl IndexCore for u64 {
928 const MAX: Self = Self::MAX;
929
930 #[inline(always)]
931 fn truncate(value: usize) -> Self {
932 value as _
933 }
934
935 #[inline(always)]
936 fn zx(self) -> usize {
937 self as _
938 }
939}
940
941impl IndexCore for u128 {
942 const MAX: Self = Self::MAX;
943
944 #[inline(always)]
945 fn truncate(value: usize) -> Self {
946 value as _
947 }
948
949 #[inline(always)]
950 fn zx(self) -> usize {
951 self as _
952 }
953}
954
955impl IndexCore for usize {
956 const MAX: Self = Self::MAX;
957
958 #[inline(always)]
959 fn truncate(value: usize) -> Self {
960 value
961 }
962
963 #[inline(always)]
964 fn zx(self) -> usize {
965 self
966 }
967}
968
969impl Index for u32 {
970 type FixedWidth = u32;
971 type Signed = i32;
972}
973#[cfg(any(target_pointer_width = "64"))]
974impl Index for u64 {
975 type FixedWidth = u64;
976 type Signed = i64;
977}
978
979impl Index for usize {
980 #[cfg(target_pointer_width = "32")]
981 type FixedWidth = u32;
982 #[cfg(target_pointer_width = "64")]
983 type FixedWidth = u64;
984 type Signed = isize;
985}
986
987unsafe impl<T: RealField> Conjugate for T {
988 type Canonical = T;
989 type Conj = T;
990
991 const IS_CANONICAL: bool = true;
992}
993
994unsafe impl<T: RealField> Conjugate for Complex<T> {
995 type Canonical = Complex<T>;
996 type Conj = ComplexConj<T>;
997
998 const IS_CANONICAL: bool = true;
999}
1000unsafe impl<T: RealField> Conjugate for ComplexConj<T> {
1001 type Canonical = Complex<T>;
1002 type Conj = Complex<T>;
1003
1004 const IS_CANONICAL: bool = false;
1005}
1006
1007pub trait SimdArch: Copy + Default + Send + Sync {
1008 fn dispatch<R>(self, f: impl pulp::WithSimd<Output = R>) -> R;
1009}
1010
1011impl SimdArch for pulp::Arch {
1012 #[inline]
1013 fn dispatch<R>(self, f: impl pulp::WithSimd<Output = R>) -> R {
1014 self.dispatch(f)
1015 }
1016}
1017
1018impl SimdArch for pulp::Scalar {
1019 #[inline]
1020 fn dispatch<R>(self, f: impl pulp::WithSimd<Output = R>) -> R {
1021 f.with_simd(self)
1022 }
1023}
1024
1025pub trait ComplexField:
1026 Debug
1027 + Clone
1028 + Conjugate<Canonical = Self>
1029 + PartialEq
1030 + AddByRef<Output = Self>
1031 + SubByRef<Output = Self>
1032 + MulByRef<Output = Self>
1033 + NegByRef<Output = Self>
1034{
1035 const IS_REAL: bool;
1036 const SIMD_ABS_SPLIT_REAL_IMAG: bool = false;
1037
1038 type Arch: SimdArch;
1039 type Unit: ComplexField;
1040
1041 type SimdCtx<S: Simd>: Copy;
1042 type Index: IndexCore;
1043
1044 type Real: RealField;
1045
1046 #[doc(hidden)]
1047 const IS_NATIVE_F32: bool = false;
1048 #[doc(hidden)]
1049 const IS_NATIVE_C32: bool = false;
1050 #[doc(hidden)]
1051 const IS_NATIVE_F64: bool = false;
1052 #[doc(hidden)]
1053 const IS_NATIVE_C64: bool = false;
1054
1055 const SIMD_CAPABILITIES: SimdCapabilities;
1056 type SimdMask<S: Simd>: Copy + Debug;
1057 type SimdMemMask<S: Simd>: Copy + Debug;
1058
1059 type SimdVec<S: Simd>: Pod + Debug;
1060 type SimdIndex<S: Simd>: Pod + Debug;
1061
1062 fn zero_impl() -> Self;
1063 fn one_impl() -> Self;
1064 fn nan_impl() -> Self;
1065 fn infinity_impl() -> Self;
1066
1067 fn from_real_impl(real: &Self::Real) -> Self;
1068 fn from_f64_impl(real: f64) -> Self;
1069
1070 fn real_part_impl(value: &Self) -> Self::Real;
1071 fn imag_part_impl(value: &Self) -> Self::Real;
1072
1073 fn copy_impl(value: &Self) -> Self;
1074 fn conj_impl(value: &Self) -> Self;
1075 fn recip_impl(value: &Self) -> Self;
1076 fn sqrt_impl(value: &Self) -> Self;
1077
1078 fn abs_impl(value: &Self) -> Self::Real;
1079 fn abs1_impl(value: &Self) -> Self::Real;
1080 fn abs2_impl(value: &Self) -> Self::Real;
1081
1082 fn mul_real_impl(lhs: &Self, rhs: &Self::Real) -> Self;
1083
1084 fn mul_pow2_impl(lhs: &Self, rhs: &Self::Real) -> Self;
1085
1086 fn is_finite_impl(value: &Self) -> bool;
1087 fn is_nan_impl(value: &Self) -> bool {
1088 value != value
1089 }
1090
1091 fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S>;
1092 fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S;
1093
1094 fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMask<S>;
1095 fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMemMask<S>;
1096 unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S>;
1097 unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>);
1098
1099 fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S>;
1100 fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S>;
1101
1102 fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1103 fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1104
1105 fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S>;
1106 fn simd_conj<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S>;
1107 fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S>;
1108 fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S>;
1109
1110 fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1111 fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1112
1113 fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1114 fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1115 fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S>;
1116 fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S>;
1117 fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S>;
1118 fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S>;
1119
1120 fn simd_reduce_sum<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self;
1121 fn simd_reduce_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self;
1122 fn simd_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S>;
1123 fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S>;
1124 fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S>;
1125 fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S>;
1126 fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S>;
1127
1128 fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1129 fn simd_index_select<S: Simd>(
1130 ctx: &Self::SimdCtx<S>,
1131 mask: Self::SimdMask<S>,
1132 lhs: Self::SimdIndex<S>,
1133 rhs: Self::SimdIndex<S>,
1134 ) -> Self::SimdIndex<S>;
1135
1136 fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S>;
1137 fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S>;
1138 fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S>;
1139 #[inline(always)]
1140 fn simd_index_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
1141 Self::simd_index_less_than(ctx, rhs, lhs)
1142 }
1143 #[inline(always)]
1144 fn simd_index_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
1145 Self::simd_not_mask(ctx, Self::simd_index_less_than(ctx, rhs, lhs))
1146 }
1147 #[inline(always)]
1148 fn simd_index_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
1149 Self::simd_not_mask(ctx, Self::simd_index_greater_than(ctx, rhs, lhs))
1150 }
1151
1152 fn simd_and_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S>;
1153 fn simd_or_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S>;
1154 fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S>;
1155 fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize;
1156
1157 #[inline(always)]
1158 fn simd_load<S: Simd>(ctx: &Self::SimdCtx<S>, ptr: &Self::SimdVec<S>) -> Self::SimdVec<S> {
1159 let simd = Self::ctx_from_simd(ctx);
1160 if try_const! { Self::Unit::IS_NATIVE_F32 } {
1161 simd.deinterleave_shfl_f32s(*ptr)
1162 } else if try_const! { Self::Unit::IS_NATIVE_F64 } {
1163 simd.deinterleave_shfl_f64s(*ptr)
1164 } else {
1165 panic!();
1166 }
1167 }
1168
1169 #[inline(always)]
1170 fn simd_store<S: Simd>(ctx: &Self::SimdCtx<S>, ptr: &mut Self::SimdVec<S>, value: Self::SimdVec<S>) {
1171 let simd = Self::ctx_from_simd(ctx);
1172 if try_const! { Self::Unit::IS_NATIVE_F32 } {
1173 *ptr = simd.deinterleave_shfl_f32s(value)
1174 } else if try_const! { Self::Unit::IS_NATIVE_F64 } {
1175 *ptr = simd.deinterleave_shfl_f64s(value)
1176 } else {
1177 panic!();
1178 }
1179 }
1180
1181 #[inline(always)]
1182 unsafe fn simd_mask_load<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
1183 let simd = Self::ctx_from_simd(ctx);
1184 let value = Self::simd_mask_load_raw(ctx, mask, ptr);
1185 if try_const! { Self::Unit::IS_NATIVE_F32 } {
1186 simd.deinterleave_shfl_f32s(value)
1187 } else if try_const! { Self::Unit::IS_NATIVE_F64 } {
1188 simd.deinterleave_shfl_f64s(value)
1189 } else {
1190 panic!();
1191 }
1192 }
1193
1194 #[inline(always)]
1195 unsafe fn simd_mask_store<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, value: Self::SimdVec<S>) {
1196 let simd = Self::ctx_from_simd(ctx);
1197 if try_const! { Self::Unit::IS_NATIVE_F32 } {
1198 Self::simd_mask_store_raw(ctx, mask, ptr, simd.deinterleave_shfl_f32s(value))
1199 } else if try_const! { Self::Unit::IS_NATIVE_F64 } {
1200 Self::simd_mask_store_raw(ctx, mask, ptr, simd.deinterleave_shfl_f64s(value))
1201 } else {
1202 panic!();
1203 }
1204 }
1205
1206 #[inline(always)]
1207 fn simd_iota<S: Simd>(ctx: &Self::SimdCtx<S>) -> Self::SimdIndex<S> {
1208 let simd = Self::ctx_from_simd(ctx);
1209 struct Interleave<T>(T);
1210 unsafe impl<T> pulp::Interleave for Interleave<T> {}
1211
1212 unsafe {
1213 if try_const! { Self::Unit::IS_NATIVE_F32 } {
1214 core::mem::transmute_copy::<_, Self::SimdIndex<S>>(&simd.deinterleave_shfl_f32s(Interleave(core::mem::transmute_copy::<
1215 _,
1216 Self::SimdVec<S>,
1217 >(
1218 &pulp::iota_32::<Interleave<Self>>()
1219 ))))
1220 } else if try_const! { Self::Unit::IS_NATIVE_F64 } {
1221 core::mem::transmute_copy::<_, Self::SimdIndex<S>>(
1222 &simd.deinterleave_shfl_f64s(core::mem::transmute_copy::<_, Self::SimdVec<S>>(&pulp::iota_64::<Interleave<Self>>())),
1223 )
1224 } else {
1225 panic!();
1226 }
1227 }
1228 }
1229}
1230
1231pub trait RealField:
1232 ComplexField<Real = Self, Conj = Self> + DivByRef<Output = Self> + PartialOrd + num_traits::NumOps + num_traits::Num + core::ops::Neg<Output = Self>
1233{
1234 fn epsilon_impl() -> Self;
1235 fn nbits_impl() -> usize;
1236
1237 fn min_positive_impl() -> Self;
1238 fn max_positive_impl() -> Self;
1239 fn sqrt_min_positive_impl() -> Self;
1240 fn sqrt_max_positive_impl() -> Self;
1241}
1242
1243impl ComplexField for f32 {
1244 type Arch = pulp::Arch;
1245 type Index = u32;
1246 type Real = Self;
1247 type SimdCtx<S: Simd> = S;
1248 type SimdIndex<S: Simd> = S::u32s;
1249 type SimdMask<S: Simd> = S::m32s;
1250 type SimdMemMask<S: Simd> = pulp::MemMask<S::m32s>;
1251 type SimdVec<S: Simd> = S::f32s;
1252 type Unit = Self;
1253
1254 const IS_NATIVE_F32: bool = true;
1255 const IS_REAL: bool = true;
1256 const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Simd;
1257
1258 #[inline(always)]
1259 fn zero_impl() -> Self {
1260 0.0
1261 }
1262
1263 #[inline(always)]
1264 fn one_impl() -> Self {
1265 1.0
1266 }
1267
1268 #[inline(always)]
1269 fn nan_impl() -> Self {
1270 Self::NAN
1271 }
1272
1273 #[inline(always)]
1274 fn infinity_impl() -> Self {
1275 Self::INFINITY
1276 }
1277
1278 #[inline(always)]
1279 fn from_real_impl(value: &Self) -> Self {
1280 *value
1281 }
1282
1283 #[inline(always)]
1284 fn from_f64_impl(value: f64) -> Self {
1285 value as _
1286 }
1287
1288 #[inline(always)]
1289 fn real_part_impl(value: &Self) -> Self {
1290 *value
1291 }
1292
1293 #[inline(always)]
1294 fn imag_part_impl(_: &Self) -> Self {
1295 0.0
1296 }
1297
1298 #[inline(always)]
1299 fn copy_impl(value: &Self) -> Self {
1300 *value
1301 }
1302
1303 #[inline(always)]
1304 fn conj_impl(value: &Self) -> Self {
1305 *value
1306 }
1307
1308 #[inline(always)]
1309 fn recip_impl(value: &Self) -> Self {
1310 1.0 / *value
1311 }
1312
1313 #[inline(always)]
1314 fn sqrt_impl(value: &Self) -> Self {
1315 libm::sqrtf(*value)
1316 }
1317
1318 #[inline(always)]
1319 fn abs_impl(value: &Self) -> Self {
1320 (*value).abs()
1321 }
1322
1323 #[inline(always)]
1324 fn abs1_impl(value: &Self) -> Self {
1325 (*value).abs()
1326 }
1327
1328 #[inline(always)]
1329 fn abs2_impl(value: &Self) -> Self {
1330 (*value) * (*value)
1331 }
1332
1333 #[inline(always)]
1334 fn mul_real_impl(lhs: &Self, rhs: &Self) -> Self {
1335 (*lhs) * (*rhs)
1336 }
1337
1338 #[inline(always)]
1339 fn mul_pow2_impl(lhs: &Self, rhs: &Self) -> Self {
1340 (*lhs) * (*rhs)
1341 }
1342
1343 #[inline(always)]
1344 fn is_finite_impl(value: &Self) -> bool {
1345 (*value).is_finite()
1346 }
1347
1348 #[inline(always)]
1349 fn is_nan_impl(value: &Self) -> bool {
1350 (*value).is_nan()
1351 }
1352
1353 #[inline(always)]
1354 fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
1355 simd
1356 }
1357
1358 #[inline(always)]
1359 fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S> {
1360 ctx.splat_f32s(*value)
1361 }
1362
1363 #[inline(always)]
1364 fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S> {
1365 ctx.splat_f32s(*value)
1366 }
1367
1368 #[inline(always)]
1369 fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1370 ctx.add_f32s(lhs, rhs)
1371 }
1372
1373 #[inline(always)]
1374 fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1375 ctx.sub_f32s(lhs, rhs)
1376 }
1377
1378 #[inline(always)]
1379 fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1380 ctx.neg_f32s(value)
1381 }
1382
1383 #[inline(always)]
1384 fn simd_conj<S: Simd>(_: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1385 value
1386 }
1387
1388 #[inline(always)]
1389 fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1390 ctx.abs_f32s(value)
1391 }
1392
1393 #[inline(always)]
1394 fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1395 ctx.mul_f32s(lhs, rhs)
1396 }
1397
1398 #[inline(always)]
1399 fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1400 ctx.mul_f32s(lhs, real_rhs)
1401 }
1402
1403 #[inline(always)]
1404 fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1405 ctx.mul_f32s(lhs, real_rhs)
1406 }
1407
1408 #[inline(always)]
1409 fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1410 ctx.mul_f32s(lhs, rhs)
1411 }
1412
1413 #[inline(always)]
1414 fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
1415 ctx.mul_add_e_f32s(lhs, rhs, acc)
1416 }
1417
1418 #[inline(always)]
1419 fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
1420 ctx.mul_add_e_f32s(lhs, rhs, acc)
1421 }
1422
1423 #[inline(always)]
1424 fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1425 ctx.mul_f32s(value, value)
1426 }
1427
1428 #[inline(always)]
1429 fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
1430 ctx.mul_add_e_f32s(value, value, acc)
1431 }
1432
1433 #[inline(always)]
1434 fn simd_reduce_sum<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
1435 ctx.reduce_sum_f32s(value)
1436 }
1437
1438 #[inline(always)]
1439 fn simd_reduce_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
1440 ctx.reduce_max_f32s(value)
1441 }
1442
1443 #[inline(always)]
1444 fn simd_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1445 ctx.equal_f32s(real_lhs, real_rhs)
1446 }
1447
1448 #[inline(always)]
1449 fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1450 ctx.less_than_f32s(real_lhs, real_rhs)
1451 }
1452
1453 #[inline(always)]
1454 fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1455 ctx.greater_than_f32s(real_lhs, real_rhs)
1456 }
1457
1458 #[inline(always)]
1459 fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1460 ctx.less_than_or_equal_f32s(real_lhs, real_rhs)
1461 }
1462
1463 #[inline(always)]
1464 fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1465 ctx.greater_than_or_equal_f32s(real_lhs, real_rhs)
1466 }
1467
1468 #[inline(always)]
1469 fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1470 ctx.select_f32s_m32s(mask, lhs, rhs)
1471 }
1472
1473 #[inline(always)]
1474 fn simd_index_select<S: Simd>(
1475 ctx: &Self::SimdCtx<S>,
1476 mask: Self::SimdMask<S>,
1477 lhs: Self::SimdIndex<S>,
1478 rhs: Self::SimdIndex<S>,
1479 ) -> Self::SimdIndex<S> {
1480 ctx.select_u32s_m32s(mask, lhs, rhs)
1481 }
1482
1483 #[inline(always)]
1484 fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S> {
1485 ctx.splat_u32s(value as _)
1486 }
1487
1488 #[inline(always)]
1489 fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
1490 ctx.add_u32s(lhs, rhs)
1491 }
1492
1493 #[inline(always)]
1494 fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
1495 ctx.less_than_u32s(lhs, rhs)
1496 }
1497
1498 #[inline(always)]
1499 fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1500 ctx.abs_f32s(value)
1501 }
1502
1503 #[inline(always)]
1504 fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
1505 *ctx
1506 }
1507
1508 #[inline(always)]
1509 fn simd_and_mask<S: Simd>(simd: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
1510 simd.and_m32s(lhs, rhs)
1511 }
1512
1513 #[inline(always)]
1514 fn simd_or_mask<S: Simd>(simd: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
1515 simd.or_m32s(lhs, rhs)
1516 }
1517
1518 #[inline(always)]
1519 fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S> {
1520 ctx.not_m32s(mask)
1521 }
1522
1523 #[inline(always)]
1524 fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize {
1525 ctx.first_true_m32s(value)
1526 }
1527
1528 #[inline(always)]
1529 fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMemMask<S> {
1530 ctx.mask_between_m32s(start as _, end as _)
1531 }
1532
1533 #[inline(always)]
1534 fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMask<S> {
1535 ctx.mask_between_m32s(start as _, end as _).mask()
1536 }
1537
1538 #[inline(always)]
1539 unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
1540 ctx.mask_load_ptr_f32s(mask, ptr as _)
1541 }
1542
1543 #[inline(always)]
1544 unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>) {
1545 ctx.mask_store_ptr_f32s(mask, ptr as _, values);
1546 }
1547}
1548
1549impl RealField for f32 {
1550 #[inline(always)]
1551 fn epsilon_impl() -> Self {
1552 Self::EPSILON
1553 }
1554
1555 #[inline(always)]
1556 fn min_positive_impl() -> Self {
1557 Self::MIN_POSITIVE
1558 }
1559
1560 #[inline(always)]
1561 fn max_positive_impl() -> Self {
1562 Self::MIN_POSITIVE.recip()
1563 }
1564
1565 #[inline(always)]
1566 fn sqrt_min_positive_impl() -> Self {
1567 libm::sqrtf(Self::MIN_POSITIVE)
1568 }
1569
1570 #[inline(always)]
1571 fn sqrt_max_positive_impl() -> Self {
1572 libm::sqrtf(Self::MIN_POSITIVE.recip())
1573 }
1574
1575 #[inline(always)]
1576 fn nbits_impl() -> usize {
1577 Self::MANTISSA_DIGITS as usize
1578 }
1579}
1580
1581impl ComplexField for f64 {
1582 type Arch = pulp::Arch;
1583 type Index = u64;
1584 type Real = Self;
1585 type SimdCtx<S: Simd> = S;
1586 type SimdIndex<S: Simd> = S::u64s;
1587 type SimdMask<S: Simd> = S::m64s;
1588 type SimdMemMask<S: Simd> = pulp::MemMask<S::m64s>;
1589 type SimdVec<S: Simd> = S::f64s;
1590 type Unit = Self;
1591
1592 const IS_NATIVE_F64: bool = true;
1593 const IS_REAL: bool = true;
1594 const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Simd;
1595
1596 #[inline(always)]
1597 fn zero_impl() -> Self {
1598 0.0
1599 }
1600
1601 #[inline(always)]
1602 fn one_impl() -> Self {
1603 1.0
1604 }
1605
1606 #[inline(always)]
1607 fn nan_impl() -> Self {
1608 Self::NAN
1609 }
1610
1611 #[inline(always)]
1612 fn infinity_impl() -> Self {
1613 Self::INFINITY
1614 }
1615
1616 #[inline(always)]
1617 fn from_real_impl(value: &Self) -> Self {
1618 *value
1619 }
1620
1621 #[inline(always)]
1622 fn from_f64_impl(value: f64) -> Self {
1623 value as _
1624 }
1625
1626 #[inline(always)]
1627 fn real_part_impl(value: &Self) -> Self {
1628 *value
1629 }
1630
1631 #[inline(always)]
1632 fn imag_part_impl(_: &Self) -> Self {
1633 0.0
1634 }
1635
1636 #[inline(always)]
1637 fn copy_impl(value: &Self) -> Self {
1638 *value
1639 }
1640
1641 #[inline(always)]
1642 fn conj_impl(value: &Self) -> Self {
1643 *value
1644 }
1645
1646 #[inline(always)]
1647 fn recip_impl(value: &Self) -> Self {
1648 1.0 / *value
1649 }
1650
1651 #[inline(always)]
1652 fn sqrt_impl(value: &Self) -> Self {
1653 libm::sqrt(*value)
1654 }
1655
1656 #[inline(always)]
1657 fn abs_impl(value: &Self) -> Self {
1658 (*value).abs()
1659 }
1660
1661 #[inline(always)]
1662 fn abs1_impl(value: &Self) -> Self {
1663 (*value).abs()
1664 }
1665
1666 #[inline(always)]
1667 fn abs2_impl(value: &Self) -> Self {
1668 (*value) * (*value)
1669 }
1670
1671 #[inline(always)]
1672 fn mul_real_impl(lhs: &Self, rhs: &Self) -> Self {
1673 (*lhs) * (*rhs)
1674 }
1675
1676 #[inline(always)]
1677 fn mul_pow2_impl(lhs: &Self, rhs: &Self) -> Self {
1678 (*lhs) * (*rhs)
1679 }
1680
1681 #[inline(always)]
1682 fn is_nan_impl(value: &Self) -> bool {
1683 (*value).is_nan()
1684 }
1685
1686 #[inline(always)]
1687 fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
1688 simd
1689 }
1690
1691 #[inline(always)]
1692 fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S> {
1693 ctx.splat_f64s(*value)
1694 }
1695
1696 #[inline(always)]
1697 fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S> {
1698 ctx.splat_f64s(*value)
1699 }
1700
1701 #[inline(always)]
1702 fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1703 ctx.add_f64s(lhs, rhs)
1704 }
1705
1706 #[inline(always)]
1707 fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1708 ctx.sub_f64s(lhs, rhs)
1709 }
1710
1711 #[inline(always)]
1712 fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1713 ctx.neg_f64s(value)
1714 }
1715
1716 #[inline(always)]
1717 fn simd_conj<S: Simd>(_: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1718 value
1719 }
1720
1721 #[inline(always)]
1722 fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1723 ctx.abs_f64s(value)
1724 }
1725
1726 #[inline(always)]
1727 fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1728 ctx.mul_f64s(lhs, rhs)
1729 }
1730
1731 #[inline(always)]
1732 fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1733 ctx.mul_f64s(lhs, real_rhs)
1734 }
1735
1736 #[inline(always)]
1737 fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1738 ctx.mul_f64s(lhs, real_rhs)
1739 }
1740
1741 #[inline(always)]
1742 fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1743 ctx.mul_f64s(lhs, rhs)
1744 }
1745
1746 #[inline(always)]
1747 fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
1748 ctx.mul_add_e_f64s(lhs, rhs, acc)
1749 }
1750
1751 #[inline(always)]
1752 fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
1753 ctx.mul_add_e_f64s(lhs, rhs, acc)
1754 }
1755
1756 #[inline(always)]
1757 fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1758 ctx.mul_f64s(value, value)
1759 }
1760
1761 #[inline(always)]
1762 fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
1763 ctx.mul_add_e_f64s(value, value, acc)
1764 }
1765
1766 #[inline(always)]
1767 fn simd_reduce_sum<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
1768 ctx.reduce_sum_f64s(value)
1769 }
1770
1771 #[inline(always)]
1772 fn simd_reduce_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
1773 ctx.reduce_max_f64s(value)
1774 }
1775
1776 #[inline(always)]
1777 fn simd_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1778 ctx.equal_f64s(real_lhs, real_rhs)
1779 }
1780
1781 #[inline(always)]
1782 fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1783 ctx.less_than_f64s(real_lhs, real_rhs)
1784 }
1785
1786 #[inline(always)]
1787 fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1788 ctx.greater_than_f64s(real_lhs, real_rhs)
1789 }
1790
1791 #[inline(always)]
1792 fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1793 ctx.less_than_or_equal_f64s(real_lhs, real_rhs)
1794 }
1795
1796 #[inline(always)]
1797 fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1798 ctx.greater_than_or_equal_f64s(real_lhs, real_rhs)
1799 }
1800
1801 #[inline(always)]
1802 fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1803 ctx.select_f64s_m64s(mask, lhs, rhs)
1804 }
1805
1806 #[inline(always)]
1807 fn simd_index_select<S: Simd>(
1808 ctx: &Self::SimdCtx<S>,
1809 mask: Self::SimdMask<S>,
1810 lhs: Self::SimdIndex<S>,
1811 rhs: Self::SimdIndex<S>,
1812 ) -> Self::SimdIndex<S> {
1813 ctx.select_u64s_m64s(mask, lhs, rhs)
1814 }
1815
1816 #[inline(always)]
1817 fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S> {
1818 ctx.splat_u64s(value as _)
1819 }
1820
1821 #[inline(always)]
1822 fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
1823 ctx.add_u64s(lhs, rhs)
1824 }
1825
1826 #[inline(always)]
1827 fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
1828 ctx.less_than_u64s(lhs, rhs)
1829 }
1830
1831 #[inline(always)]
1832 fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1833 ctx.abs_f64s(value)
1834 }
1835
1836 #[inline(always)]
1837 fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
1838 *ctx
1839 }
1840
1841 #[inline(always)]
1842 fn simd_and_mask<S: Simd>(simd: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
1843 simd.and_m64s(lhs, rhs)
1844 }
1845
1846 #[inline(always)]
1847 fn simd_or_mask<S: Simd>(simd: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
1848 simd.or_m64s(lhs, rhs)
1849 }
1850
1851 #[inline(always)]
1852 fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S> {
1853 ctx.not_m64s(mask)
1854 }
1855
1856 #[inline(always)]
1857 fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize {
1858 ctx.first_true_m64s(value)
1859 }
1860
1861 #[inline(always)]
1862 fn is_finite_impl(value: &Self) -> bool {
1863 (*value).is_finite()
1864 }
1865
1866 #[inline(always)]
1867 fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMemMask<S> {
1868 ctx.mask_between_m64s(start as _, end as _)
1869 }
1870
1871 #[inline(always)]
1872 fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMask<S> {
1873 ctx.mask_between_m64s(start as _, end as _).mask()
1874 }
1875
1876 #[inline(always)]
1877 unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
1878 ctx.mask_load_ptr_f64s(mask, ptr as _)
1879 }
1880
1881 #[inline(always)]
1882 unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>) {
1883 ctx.mask_store_ptr_f64s(mask, ptr as _, values);
1884 }
1885}
1886
1887impl RealField for f64 {
1888 #[inline(always)]
1889 fn epsilon_impl() -> Self {
1890 Self::EPSILON
1891 }
1892
1893 #[inline(always)]
1894 fn min_positive_impl() -> Self {
1895 Self::MIN_POSITIVE
1896 }
1897
1898 #[inline(always)]
1899 fn max_positive_impl() -> Self {
1900 Self::MIN_POSITIVE.recip()
1901 }
1902
1903 #[inline(always)]
1904 fn sqrt_min_positive_impl() -> Self {
1905 libm::sqrt(Self::MIN_POSITIVE)
1906 }
1907
1908 #[inline(always)]
1909 fn sqrt_max_positive_impl() -> Self {
1910 libm::sqrt(Self::MIN_POSITIVE.recip())
1911 }
1912
1913 #[inline(always)]
1914 fn nbits_impl() -> usize {
1915 Self::MANTISSA_DIGITS as usize
1916 }
1917}
1918
1919impl<T: RealField> ComplexField for Complex<T> {
1920 type Arch = T::Arch;
1921 type Index = T::Index;
1922 type Real = T;
1923 type SimdCtx<S: Simd> = T::SimdCtx<S>;
1924 type SimdIndex<S: Simd> = T::SimdIndex<S>;
1925 type SimdMask<S: Simd> = T::SimdMask<S>;
1926 type SimdMemMask<S: Simd> = Complex<T::SimdMemMask<S>>;
1927 type SimdVec<S: Simd> = Complex<T::SimdVec<S>>;
1928 type Unit = T::Unit;
1929
1930 const IS_NATIVE_C32: bool = T::IS_NATIVE_F32;
1931 const IS_NATIVE_C64: bool = T::IS_NATIVE_F64;
1932 const IS_REAL: bool = false;
1933 const SIMD_CAPABILITIES: SimdCapabilities = T::SIMD_CAPABILITIES;
1934
1935 #[inline]
1936 fn zero_impl() -> Self {
1937 Complex {
1938 re: T::zero_impl(),
1939 im: T::zero_impl(),
1940 }
1941 }
1942
1943 #[inline]
1944 fn one_impl() -> Self {
1945 Complex {
1946 re: T::one_impl(),
1947 im: T::zero_impl(),
1948 }
1949 }
1950
1951 #[inline]
1952 fn nan_impl() -> Self {
1953 Complex {
1954 re: T::nan_impl(),
1955 im: T::nan_impl(),
1956 }
1957 }
1958
1959 #[inline]
1960 fn infinity_impl() -> Self {
1961 Complex {
1962 re: T::infinity_impl(),
1963 im: T::infinity_impl(),
1964 }
1965 }
1966
1967 #[inline]
1968 fn from_real_impl(real: &Self::Real) -> Self {
1969 Complex {
1970 re: real.clone(),
1971 im: T::zero_impl(),
1972 }
1973 }
1974
1975 #[inline]
1976 fn from_f64_impl(real: f64) -> Self {
1977 Complex {
1978 re: T::from_f64_impl(real),
1979 im: T::zero_impl(),
1980 }
1981 }
1982
1983 #[inline]
1984 fn real_part_impl(value: &Self) -> Self::Real {
1985 value.re.clone()
1986 }
1987
1988 #[inline]
1989 fn imag_part_impl(value: &Self) -> Self::Real {
1990 value.im.clone()
1991 }
1992
1993 #[inline]
1994 fn copy_impl(value: &Self) -> Self {
1995 value.clone()
1996 }
1997
1998 #[inline]
1999 fn conj_impl(value: &Self) -> Self {
2000 Self {
2001 re: value.re.clone(),
2002 im: value.im.neg_by_ref(),
2003 }
2004 }
2005
2006 #[inline]
2007 fn recip_impl(value: &Self) -> Self {
2008 let (re, im) = recip_impl(value.re.clone(), value.im.clone());
2009 Complex { re, im }
2010 }
2011
2012 #[inline]
2013 fn sqrt_impl(value: &Self) -> Self {
2014 let (re, im) = sqrt_impl(value.re.clone(), value.im.clone());
2015 Complex { re, im }
2016 }
2017
2018 #[inline]
2019 fn abs_impl(value: &Self) -> Self::Real {
2020 abs_impl(value.re.clone(), value.im.clone())
2021 }
2022
2023 #[inline]
2024 #[faer_macros::math]
2025 fn abs1_impl(value: &Self) -> Self::Real {
2026 abs1(value.re) + abs1(value.im)
2027 }
2028
2029 #[inline]
2030 #[faer_macros::math]
2031 fn abs2_impl(value: &Self) -> Self::Real {
2032 abs2(value.re) + abs2(value.im)
2033 }
2034
2035 #[inline]
2036 #[faer_macros::math]
2037 fn mul_real_impl(lhs: &Self, rhs: &Self::Real) -> Self {
2038 Complex {
2039 re: lhs.re * rhs,
2040 im: lhs.im * rhs,
2041 }
2042 }
2043
2044 #[inline]
2045 #[faer_macros::math]
2046 fn mul_pow2_impl(lhs: &Self, rhs: &Self::Real) -> Self {
2047 Complex {
2048 re: mul_pow2(lhs.re, rhs),
2049 im: mul_pow2(lhs.im, rhs),
2050 }
2051 }
2052
2053 #[inline]
2054 #[faer_macros::math]
2055 fn is_finite_impl(value: &Self) -> bool {
2056 is_finite(value.re) && is_finite(value.im)
2057 }
2058
2059 #[inline(always)]
2060 fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
2061 T::simd_ctx(simd)
2062 }
2063
2064 #[inline(always)]
2065 fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
2066 T::ctx_from_simd(ctx)
2067 }
2068
2069 #[inline(always)]
2070 fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S> {
2071 Complex {
2072 re: T::simd_splat(ctx, &value.re),
2073 im: T::simd_splat(ctx, &value.im),
2074 }
2075 }
2076
2077 #[inline(always)]
2078 fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S> {
2079 Complex {
2080 re: T::simd_splat_real(ctx, value),
2081 im: T::simd_splat_real(ctx, value),
2082 }
2083 }
2084
2085 #[inline(always)]
2086 fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2087 Complex {
2088 re: T::simd_add(ctx, lhs.re, rhs.re),
2089 im: T::simd_add(ctx, lhs.im, rhs.im),
2090 }
2091 }
2092
2093 #[inline(always)]
2094 fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2095 Complex {
2096 re: T::simd_sub(ctx, lhs.re, rhs.re),
2097 im: T::simd_sub(ctx, lhs.im, rhs.im),
2098 }
2099 }
2100
2101 #[inline(always)]
2102 fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2103 Complex {
2104 re: T::simd_neg(ctx, value.re),
2105 im: T::simd_neg(ctx, value.im),
2106 }
2107 }
2108
2109 #[inline(always)]
2110 fn simd_conj<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2111 Complex {
2112 re: value.re,
2113 im: T::simd_neg(ctx, value.im),
2114 }
2115 }
2116
2117 #[inline(always)]
2118 fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2119 let v = T::simd_add(ctx, T::simd_abs1(ctx, value.re), T::simd_abs1(ctx, value.im));
2120 Complex { re: v, im: v }
2121 }
2122
2123 #[inline(always)]
2124 fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2125 let re = T::simd_abs_max(ctx, value.re);
2126 let im = T::simd_abs_max(ctx, value.im);
2127
2128 let v = T::simd_select(ctx, T::simd_greater_than(ctx, re, im), re, im);
2129 Complex { re: v, im: v }
2130 }
2131
2132 #[inline(always)]
2133 fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2134 Complex {
2135 re: T::simd_mul_real(ctx, lhs.re, real_rhs.re),
2136 im: T::simd_mul_real(ctx, lhs.im, real_rhs.re),
2137 }
2138 }
2139
2140 #[inline(always)]
2141 fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2142 Complex {
2143 re: T::simd_mul_pow2(ctx, lhs.re, real_rhs.re),
2144 im: T::simd_mul_pow2(ctx, lhs.im, real_rhs.re),
2145 }
2146 }
2147
2148 #[inline(always)]
2149 fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2150 Complex {
2151 re: T::simd_mul_add(ctx, lhs.re, rhs.re, T::simd_neg(ctx, T::simd_mul(ctx, lhs.im, rhs.im))),
2152 im: T::simd_mul_add(ctx, lhs.re, rhs.im, T::simd_mul(ctx, lhs.im, rhs.re)),
2153 }
2154 }
2155
2156 #[inline(always)]
2157 fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2158 Complex {
2159 re: T::simd_mul_add(ctx, lhs.re, rhs.re, T::simd_mul(ctx, lhs.im, rhs.im)),
2160 im: T::simd_mul_add(ctx, lhs.re, rhs.im, T::simd_neg(ctx, T::simd_mul(ctx, lhs.im, rhs.re))),
2161 }
2162 }
2163
2164 #[inline(always)]
2165 fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
2166 Complex {
2167 re: T::simd_mul_add(ctx, T::simd_neg(ctx, lhs.im), rhs.im, T::simd_mul_add(ctx, lhs.re, rhs.re, acc.re)),
2168 im: T::simd_mul_add(ctx, lhs.re, rhs.im, T::simd_mul_add(ctx, lhs.im, rhs.re, acc.im)),
2169 }
2170 }
2171
2172 #[inline(always)]
2173 fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
2174 Complex {
2175 re: T::simd_mul_add(ctx, lhs.im, rhs.im, T::simd_mul_add(ctx, lhs.re, rhs.re, acc.re)),
2176 im: T::simd_mul_add(ctx, lhs.re, rhs.im, T::simd_mul_add(ctx, T::simd_neg(ctx, lhs.im), rhs.re, acc.im)),
2177 }
2178 }
2179
2180 #[inline(always)]
2181 fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2182 let v = T::simd_abs2_add(ctx, value.re, T::simd_abs2(ctx, value.im));
2183 Complex { re: v, im: v }
2184 }
2185
2186 #[inline(always)]
2187 fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
2188 let v = T::simd_abs2_add(ctx, value.re, T::simd_abs2_add(ctx, value.im, acc.re));
2189 Complex { re: v, im: v }
2190 }
2191
2192 #[inline(always)]
2193 fn simd_reduce_sum<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
2194 Complex {
2195 re: T::simd_reduce_sum(ctx, value.re),
2196 im: T::simd_reduce_sum(ctx, value.im),
2197 }
2198 }
2199
2200 #[inline(always)]
2201 fn simd_reduce_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
2202 Complex {
2203 re: T::simd_reduce_max(ctx, value.re),
2204 im: T::simd_reduce_max(ctx, value.im),
2205 }
2206 }
2207
2208 #[inline(always)]
2209 fn simd_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2210 T::simd_and_mask(
2211 ctx,
2212 T::simd_equal(ctx, real_lhs.re, real_rhs.re),
2213 T::simd_equal(ctx, real_lhs.im, real_rhs.im),
2214 )
2215 }
2216
2217 #[inline(always)]
2218 fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2219 T::simd_less_than(ctx, real_lhs.re, real_rhs.re)
2220 }
2221
2222 #[inline(always)]
2223 fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2224 T::simd_less_than_or_equal(ctx, real_lhs.re, real_rhs.re)
2225 }
2226
2227 #[inline(always)]
2228 fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2229 T::simd_greater_than(ctx, real_lhs.re, real_rhs.re)
2230 }
2231
2232 #[inline(always)]
2233 fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2234 T::simd_greater_than_or_equal(ctx, real_lhs.re, real_rhs.re)
2235 }
2236
2237 #[inline(always)]
2238 fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2239 Complex {
2240 re: T::simd_select(ctx, mask, lhs.re, rhs.re),
2241 im: T::simd_select(ctx, mask, lhs.im, rhs.im),
2242 }
2243 }
2244
2245 #[inline(always)]
2246 fn simd_index_select<S: Simd>(
2247 ctx: &Self::SimdCtx<S>,
2248 mask: Self::SimdMask<S>,
2249 lhs: Self::SimdIndex<S>,
2250 rhs: Self::SimdIndex<S>,
2251 ) -> Self::SimdIndex<S> {
2252 T::simd_index_select(ctx, mask, lhs, rhs)
2253 }
2254
2255 #[inline(always)]
2256 fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S> {
2257 T::simd_index_splat(ctx, value)
2258 }
2259
2260 #[inline(always)]
2261 fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
2262 T::simd_index_add(ctx, lhs, rhs)
2263 }
2264
2265 #[inline(always)]
2266 fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
2267 T::simd_index_less_than(ctx, lhs, rhs)
2268 }
2269
2270 #[inline(always)]
2271 fn simd_and_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
2272 T::simd_and_mask(ctx, lhs, rhs)
2273 }
2274
2275 #[inline(always)]
2276 fn simd_or_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
2277 T::simd_or_mask(ctx, lhs, rhs)
2278 }
2279
2280 #[inline(always)]
2281 fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S> {
2282 T::simd_not_mask(ctx, mask)
2283 }
2284
2285 #[inline(always)]
2286 fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize {
2287 T::simd_first_true_mask(ctx, value)
2288 }
2289
2290 #[inline(always)]
2291 fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMemMask<S> {
2292 let n = core::mem::size_of::<Self::SimdVec<S>>() / core::mem::size_of::<Self>();
2293 let start = start.zx() * 2;
2294 let end = end.zx() * 2;
2295
2296 let re = T::simd_mem_mask_between(ctx, Self::Index::truncate(start.min(n)), Self::Index::truncate(end.min(n)));
2297 let im = T::simd_mem_mask_between(ctx, Self::Index::truncate(start.max(n) - n), Self::Index::truncate(end.max(n) - n));
2298 Complex { re, im }
2299 }
2300
2301 #[inline(always)]
2302 fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMask<S> {
2303 T::simd_mask_between(ctx, start, end)
2304 }
2305
2306 #[inline(always)]
2307 unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
2308 Complex {
2309 re: T::simd_mask_load_raw(ctx, mask.re, core::ptr::addr_of!((*ptr).re)),
2310 im: T::simd_mask_load_raw(ctx, mask.im, core::ptr::addr_of!((*ptr).im)),
2311 }
2312 }
2313
2314 #[inline(always)]
2315 unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>) {
2316 T::simd_mask_store_raw(ctx, mask.re, core::ptr::addr_of_mut!((*ptr).re), values.re);
2317 T::simd_mask_store_raw(ctx, mask.im, core::ptr::addr_of_mut!((*ptr).im), values.im);
2318 }
2319}
2320
2321#[repr(transparent)]
2322#[doc(hidden)]
2323#[derive(Copy, Clone, Debug, PartialEq)]
2324pub struct ComplexImpl<T>(Complex<T>);
2325
2326#[repr(transparent)]
2327#[doc(hidden)]
2328#[derive(Copy, Clone, Debug, PartialEq)]
2329pub struct ComplexImplConj<T>(Complex<T>);
2330
2331unsafe impl Conjugate for ComplexImpl<f32> {
2332 type Canonical = ComplexImpl<f32>;
2333 type Conj = ComplexImplConj<f32>;
2334
2335 const IS_CANONICAL: bool = true;
2336}
2337unsafe impl Conjugate for ComplexImplConj<f32> {
2338 type Canonical = ComplexImpl<f32>;
2339 type Conj = ComplexImpl<f32>;
2340
2341 const IS_CANONICAL: bool = false;
2342}
2343unsafe impl Conjugate for ComplexImpl<f64> {
2344 type Canonical = ComplexImpl<f64>;
2345 type Conj = ComplexImplConj<f64>;
2346
2347 const IS_CANONICAL: bool = true;
2348}
2349unsafe impl Conjugate for ComplexImplConj<f64> {
2350 type Canonical = ComplexImpl<f64>;
2351 type Conj = ComplexImpl<f64>;
2352
2353 const IS_CANONICAL: bool = false;
2354}
2355
2356impl<T: RealField> core::ops::Neg for &ComplexImpl<T> {
2357 type Output = ComplexImpl<T>;
2358
2359 #[inline]
2360 fn neg(self) -> Self::Output {
2361 use math_utils::*;
2362
2363 ComplexImpl(neg(&self.0))
2364 }
2365}
2366impl<T: RealField> core::ops::Add<&ComplexImpl<T>> for &ComplexImpl<T> {
2367 type Output = ComplexImpl<T>;
2368
2369 #[inline]
2370 fn add(self, rhs: &ComplexImpl<T>) -> Self::Output {
2371 use math_utils::*;
2372
2373 ComplexImpl(add(&self.0, &rhs.0))
2374 }
2375}
2376impl<T: RealField> core::ops::Sub<&ComplexImpl<T>> for &ComplexImpl<T> {
2377 type Output = ComplexImpl<T>;
2378
2379 #[inline]
2380 fn sub(self, rhs: &ComplexImpl<T>) -> Self::Output {
2381 use math_utils::*;
2382
2383 ComplexImpl(sub(&self.0, &rhs.0))
2384 }
2385}
2386impl<T: RealField> core::ops::Mul<&ComplexImpl<T>> for &ComplexImpl<T> {
2387 type Output = ComplexImpl<T>;
2388
2389 #[inline]
2390 fn mul(self, rhs: &ComplexImpl<T>) -> Self::Output {
2391 use math_utils::*;
2392
2393 ComplexImpl(mul(&self.0, &rhs.0))
2394 }
2395}
2396
2397impl<T> From<Complex<T>> for ComplexImpl<T> {
2398 #[inline]
2399 fn from(value: Complex<T>) -> Self {
2400 Self(value)
2401 }
2402}
2403
2404impl ComplexField for ComplexImpl<f32> {
2405 type Arch = pulp::Arch;
2406 type Index = u32;
2407 type Real = f32;
2408 type SimdCtx<S: Simd> = S;
2409 type SimdIndex<S: Simd> = S::u32s;
2410 type SimdMask<S: Simd> = S::m32s;
2411 type SimdMemMask<S: Simd> = pulp::MemMask<S::m32s>;
2412 type SimdVec<S: Simd> = S::c32s;
2413 type Unit = f32;
2414
2415 const IS_NATIVE_C32: bool = true;
2416 const IS_REAL: bool = false;
2417 const SIMD_ABS_SPLIT_REAL_IMAG: bool = true;
2418 const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Simd;
2419
2420 #[inline]
2421 fn zero_impl() -> Self {
2422 Complex {
2423 re: f32::zero_impl(),
2424 im: f32::zero_impl(),
2425 }
2426 .into()
2427 }
2428
2429 #[inline]
2430 fn one_impl() -> Self {
2431 Complex {
2432 re: f32::one_impl(),
2433 im: f32::zero_impl(),
2434 }
2435 .into()
2436 }
2437
2438 #[inline]
2439 fn nan_impl() -> Self {
2440 Complex {
2441 re: f32::nan_impl(),
2442 im: f32::nan_impl(),
2443 }
2444 .into()
2445 }
2446
2447 #[inline]
2448 fn infinity_impl() -> Self {
2449 Complex {
2450 re: f32::infinity_impl(),
2451 im: f32::infinity_impl(),
2452 }
2453 .into()
2454 }
2455
2456 #[inline]
2457 fn from_real_impl(real: &Self::Real) -> Self {
2458 Complex {
2459 re: real.clone(),
2460 im: f32::zero_impl(),
2461 }
2462 .into()
2463 }
2464
2465 #[inline]
2466 fn from_f64_impl(real: f64) -> Self {
2467 Complex {
2468 re: f32::from_f64_impl(real),
2469 im: f32::zero_impl(),
2470 }
2471 .into()
2472 }
2473
2474 #[inline]
2475 fn real_part_impl(value: &Self) -> Self::Real {
2476 value.0.re.clone()
2477 }
2478
2479 #[inline]
2480 fn imag_part_impl(value: &Self) -> Self::Real {
2481 value.0.im.clone()
2482 }
2483
2484 #[inline]
2485 fn copy_impl(value: &Self) -> Self {
2486 value.clone()
2487 }
2488
2489 #[inline]
2490 fn conj_impl(value: &Self) -> Self {
2491 Complex {
2492 re: value.0.re.clone(),
2493 im: value.0.im.neg_by_ref(),
2494 }
2495 .into()
2496 }
2497
2498 #[inline]
2499 fn recip_impl(value: &Self) -> Self {
2500 let (re, im) = recip_impl(value.0.re.clone(), value.0.im.clone());
2501 Complex { re, im }.into()
2502 }
2503
2504 #[inline]
2505 fn sqrt_impl(value: &Self) -> Self {
2506 let (re, im) = sqrt_impl(value.0.re.clone(), value.0.im.clone());
2507 Complex { re, im }.into()
2508 }
2509
2510 #[inline]
2511 fn abs_impl(value: &Self) -> Self::Real {
2512 abs_impl(value.0.re.clone(), value.0.im.clone())
2513 }
2514
2515 #[inline]
2516 #[faer_macros::math]
2517 fn abs1_impl(value: &Self) -> Self::Real {
2518 abs1(value.0.re) + abs1(value.0.im)
2519 }
2520
2521 #[inline]
2522 #[faer_macros::math]
2523 fn abs2_impl(value: &Self) -> Self::Real {
2524 abs2(value.0.re) + abs2(value.0.im)
2525 }
2526
2527 #[inline]
2528 #[faer_macros::math]
2529 fn mul_real_impl(lhs: &Self, rhs: &Self::Real) -> Self {
2530 Complex {
2531 re: lhs.0.re * *rhs,
2532 im: lhs.0.im * *rhs,
2533 }
2534 .into()
2535 }
2536
2537 #[inline]
2538 #[faer_macros::math]
2539 fn mul_pow2_impl(lhs: &Self, rhs: &Self::Real) -> Self {
2540 Complex {
2541 re: mul_pow2(lhs.0.re, rhs),
2542 im: mul_pow2(lhs.0.im, rhs),
2543 }
2544 .into()
2545 }
2546
2547 #[inline]
2548 #[faer_macros::math]
2549 fn is_finite_impl(value: &Self) -> bool {
2550 is_finite(value.0.re) && is_finite(value.0.im)
2551 }
2552
2553 #[inline(always)]
2554 fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
2555 f32::simd_ctx(simd)
2556 }
2557
2558 #[inline(always)]
2559 fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
2560 f32::ctx_from_simd(ctx)
2561 }
2562
2563 #[inline(always)]
2564 fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S> {
2565 ctx.splat_c32s(value.0)
2566 }
2567
2568 #[inline(always)]
2569 fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S> {
2570 ctx.splat_c32s(Complex { re: *value, im: *value })
2571 }
2572
2573 #[inline(always)]
2574 fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2575 ctx.add_c32s(lhs, rhs)
2576 }
2577
2578 #[inline(always)]
2579 fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2580 ctx.sub_c32s(lhs, rhs)
2581 }
2582
2583 #[inline(always)]
2584 fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2585 ctx.neg_c32s(value)
2586 }
2587
2588 #[inline(always)]
2589 fn simd_conj<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2590 ctx.conj_c32s(value)
2591 }
2592
2593 #[inline(always)]
2594 fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2595 if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2596 bytemuck::cast(ctx.abs_f32s(bytemuck::cast(value)))
2597 } else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2598 let value: Complex<f32> = bytemuck::cast(value);
2599 let v = value.re.abs() + value.im.abs();
2600 bytemuck::cast(Complex { re: v, im: v })
2601 } else {
2602 panic!();
2603 }
2604 }
2605
2606 #[inline(always)]
2607 fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2608 if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2609 bytemuck::cast(ctx.abs_f32s(bytemuck::cast(value)))
2610 } else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2611 let value: Complex<f32> = bytemuck::cast(value);
2612 let re = value.re.abs();
2613 let im = value.im.abs();
2614 let v = if re > im { re } else { im };
2615 bytemuck::cast(Complex { re: v, im: v })
2616 } else {
2617 panic!();
2618 }
2619 }
2620
2621 #[inline(always)]
2622 fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2623 if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2624 bytemuck::cast(ctx.mul_f32s(bytemuck::cast(lhs), bytemuck::cast(real_rhs)))
2625 } else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2626 let mut lhs: Complex<f32> = bytemuck::cast(lhs);
2627 let rhs: Complex<f32> = bytemuck::cast(real_rhs);
2628 lhs *= rhs.re;
2629 bytemuck::cast(lhs)
2630 } else {
2631 panic!();
2632 }
2633 }
2634
2635 #[inline(always)]
2636 fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2637 Self::simd_mul_real(ctx, lhs, real_rhs)
2638 }
2639
2640 #[inline(always)]
2641 fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2642 ctx.mul_e_c32s(lhs, rhs)
2643 }
2644
2645 #[inline(always)]
2646 fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2647 ctx.conj_mul_e_c32s(lhs, rhs)
2648 }
2649
2650 #[inline(always)]
2651 fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
2652 ctx.mul_add_e_c32s(lhs, rhs, acc)
2653 }
2654
2655 #[inline(always)]
2656 fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
2657 ctx.conj_mul_add_e_c32s(lhs, rhs, acc)
2658 }
2659
2660 #[inline(always)]
2661 fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2662 if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2663 bytemuck::cast(ctx.mul_f32s(bytemuck::cast(value), bytemuck::cast(value)))
2664 } else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2665 let value: Complex<f32> = bytemuck::cast(value);
2666 let v = value.re * value.re + value.im * value.im;
2667 bytemuck::cast(Complex { re: v, im: v })
2668 } else {
2669 panic!();
2670 }
2671 }
2672
2673 #[inline(always)]
2674 fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
2675 if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2676 bytemuck::cast(ctx.mul_add_f32s(bytemuck::cast(value), bytemuck::cast(value), bytemuck::cast(acc)))
2677 } else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2678 let value: Complex<f32> = bytemuck::cast(value);
2679 let acc: Complex<f32> = bytemuck::cast(acc);
2680 let v = value.re * value.re + value.im * value.im + acc.re;
2681 bytemuck::cast(Complex { re: v, im: v })
2682 } else {
2683 panic!();
2684 }
2685 }
2686
2687 #[inline(always)]
2688 fn simd_reduce_sum<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
2689 ctx.reduce_sum_c32s(value).into()
2690 }
2691
2692 #[inline(always)]
2693 fn simd_reduce_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
2694 ctx.reduce_max_c32s(value).into()
2695 }
2696
2697 #[inline(always)]
2698 fn simd_equal<S: Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
2699 panic!()
2700 }
2701
2702 #[inline(always)]
2703 fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2704 if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2705 ctx.less_than_f32s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
2706 } else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2707 assert!(try_const! { core::mem::size_of::<S::m32s>() == core::mem::size_of::<bool>() });
2708
2709 let lhs: Complex<f32> = bytemuck::cast(real_lhs);
2710 let rhs: Complex<f32> = bytemuck::cast(real_rhs);
2711 unsafe { core::mem::transmute_copy(&(lhs.re < rhs.re)) }
2712 } else {
2713 panic!();
2714 }
2715 }
2716
2717 #[inline(always)]
2718 fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2719 if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2720 ctx.less_than_or_equal_f32s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
2721 } else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2722 assert!(try_const! { core::mem::size_of::<S::m32s>() == core::mem::size_of::<bool>() });
2723
2724 let lhs: Complex<f32> = bytemuck::cast(real_lhs);
2725 let rhs: Complex<f32> = bytemuck::cast(real_rhs);
2726 unsafe { core::mem::transmute_copy(&(lhs.re <= rhs.re)) }
2727 } else {
2728 panic!();
2729 }
2730 }
2731
2732 #[inline(always)]
2733 fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2734 if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2735 ctx.greater_than_f32s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
2736 } else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2737 assert!(try_const! { core::mem::size_of::<S::m32s>() == core::mem::size_of::<bool>() });
2738
2739 let lhs: Complex<f32> = bytemuck::cast(real_lhs);
2740 let rhs: Complex<f32> = bytemuck::cast(real_rhs);
2741 unsafe { core::mem::transmute_copy(&(lhs.re > rhs.re)) }
2742 } else {
2743 panic!();
2744 }
2745 }
2746
2747 #[inline(always)]
2748 fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2749 if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2750 ctx.greater_than_or_equal_f32s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
2751 } else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2752 assert!(try_const! { core::mem::size_of::<S::m32s>() == core::mem::size_of::<bool>() });
2753
2754 let lhs: Complex<f32> = bytemuck::cast(real_lhs);
2755 let rhs: Complex<f32> = bytemuck::cast(real_rhs);
2756 unsafe { core::mem::transmute_copy(&(lhs.re >= rhs.re)) }
2757 } else {
2758 panic!();
2759 }
2760 }
2761
2762 #[inline(always)]
2763 fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2764 if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2765 bytemuck::cast(ctx.select_f32s_m32s(mask, bytemuck::cast(lhs), bytemuck::cast(rhs)))
2766 } else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2767 assert!(try_const! { core::mem::size_of::<S::m32s>() == core::mem::size_of::<bool>() });
2768 let mask: bool = unsafe { core::mem::transmute_copy(&mask) };
2769 let lhs: Complex<f32> = bytemuck::cast(lhs);
2770 let rhs: Complex<f32> = bytemuck::cast(rhs);
2771 bytemuck::cast(if mask { lhs } else { rhs })
2772 } else {
2773 panic!();
2774 }
2775 }
2776
2777 #[inline(always)]
2778 fn simd_index_select<S: Simd>(
2779 ctx: &Self::SimdCtx<S>,
2780 mask: Self::SimdMask<S>,
2781 lhs: Self::SimdIndex<S>,
2782 rhs: Self::SimdIndex<S>,
2783 ) -> Self::SimdIndex<S> {
2784 f32::simd_index_select(ctx, mask, lhs, rhs)
2785 }
2786
2787 #[inline(always)]
2788 fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S> {
2789 f32::simd_index_splat(ctx, value)
2790 }
2791
2792 #[inline(always)]
2793 fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
2794 f32::simd_index_add(ctx, lhs, rhs)
2795 }
2796
2797 #[inline(always)]
2798 fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
2799 f32::simd_index_less_than(ctx, lhs, rhs)
2800 }
2801
2802 #[inline(always)]
2803 fn simd_and_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
2804 f32::simd_and_mask(ctx, lhs, rhs)
2805 }
2806
2807 #[inline(always)]
2808 fn simd_or_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
2809 f32::simd_or_mask(ctx, lhs, rhs)
2810 }
2811
2812 #[inline(always)]
2813 fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S> {
2814 f32::simd_not_mask(ctx, mask)
2815 }
2816
2817 #[inline(always)]
2818 fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize {
2819 f32::simd_first_true_mask(ctx, value)
2820 }
2821
2822 #[inline(always)]
2823 fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMemMask<S> {
2824 ctx.mask_between_m32s((2 * start) as _, (2 * end) as _)
2825 }
2826
2827 #[inline(always)]
2828 fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMask<S> {
2829 ctx.mask_between_m32s((2 * start) as _, (2 * end) as _).mask()
2830 }
2831
2832 #[inline(always)]
2833 unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
2834 ctx.mask_load_ptr_c32s(mask, ptr as _)
2835 }
2836
2837 #[inline(always)]
2838 unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>) {
2839 ctx.mask_store_ptr_c32s(mask, ptr as _, values);
2840 }
2841}
2842
2843impl ComplexField for ComplexImpl<f64> {
2844 type Arch = pulp::Arch;
2845 type Index = u64;
2846 type Real = f64;
2847 type SimdCtx<S: Simd> = S;
2848 type SimdIndex<S: Simd> = S::u64s;
2849 type SimdMask<S: Simd> = S::m64s;
2850 type SimdMemMask<S: Simd> = pulp::MemMask<S::m64s>;
2851 type SimdVec<S: Simd> = S::c64s;
2852 type Unit = f64;
2853
2854 const IS_NATIVE_C64: bool = true;
2855 const IS_REAL: bool = false;
2856 const SIMD_ABS_SPLIT_REAL_IMAG: bool = true;
2857 const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Simd;
2858
2859 #[inline]
2860 fn zero_impl() -> Self {
2861 Complex {
2862 re: f64::zero_impl(),
2863 im: f64::zero_impl(),
2864 }
2865 .into()
2866 }
2867
2868 #[inline]
2869 fn one_impl() -> Self {
2870 Complex {
2871 re: f64::one_impl(),
2872 im: f64::zero_impl(),
2873 }
2874 .into()
2875 }
2876
2877 #[inline]
2878 fn nan_impl() -> Self {
2879 Complex {
2880 re: f64::nan_impl(),
2881 im: f64::nan_impl(),
2882 }
2883 .into()
2884 }
2885
2886 #[inline]
2887 fn infinity_impl() -> Self {
2888 Complex {
2889 re: f64::infinity_impl(),
2890 im: f64::infinity_impl(),
2891 }
2892 .into()
2893 }
2894
2895 #[inline]
2896 fn from_real_impl(real: &Self::Real) -> Self {
2897 Complex {
2898 re: real.clone(),
2899 im: f64::zero_impl(),
2900 }
2901 .into()
2902 }
2903
2904 #[inline]
2905 fn from_f64_impl(real: f64) -> Self {
2906 Complex {
2907 re: f64::from_f64_impl(real),
2908 im: f64::zero_impl(),
2909 }
2910 .into()
2911 }
2912
2913 #[inline]
2914 fn real_part_impl(value: &Self) -> Self::Real {
2915 value.0.re.clone()
2916 }
2917
2918 #[inline]
2919 fn imag_part_impl(value: &Self) -> Self::Real {
2920 value.0.im.clone()
2921 }
2922
2923 #[inline]
2924 fn copy_impl(value: &Self) -> Self {
2925 value.clone()
2926 }
2927
2928 #[inline]
2929 fn conj_impl(value: &Self) -> Self {
2930 Complex {
2931 re: value.0.re.clone(),
2932 im: value.0.im.neg_by_ref(),
2933 }
2934 .into()
2935 }
2936
2937 #[inline]
2938 fn recip_impl(value: &Self) -> Self {
2939 let (re, im) = recip_impl(value.0.re.clone(), value.0.im.clone());
2940 Complex { re, im }.into()
2941 }
2942
2943 #[inline]
2944 fn sqrt_impl(value: &Self) -> Self {
2945 let (re, im) = sqrt_impl(value.0.re.clone(), value.0.im.clone());
2946 Complex { re, im }.into()
2947 }
2948
2949 #[inline]
2950 fn abs_impl(value: &Self) -> Self::Real {
2951 abs_impl(value.0.re.clone(), value.0.im.clone())
2952 }
2953
2954 #[inline]
2955 #[faer_macros::math]
2956 fn abs1_impl(value: &Self) -> Self::Real {
2957 abs1(value.0.re) + abs1(value.0.im)
2958 }
2959
2960 #[inline]
2961 #[faer_macros::math]
2962 fn abs2_impl(value: &Self) -> Self::Real {
2963 abs2(value.0.re) + abs2(value.0.im)
2964 }
2965
2966 #[inline]
2967 #[faer_macros::math]
2968 fn mul_real_impl(lhs: &Self, rhs: &Self::Real) -> Self {
2969 Complex {
2970 re: lhs.0.re * *rhs,
2971 im: lhs.0.im * *rhs,
2972 }
2973 .into()
2974 }
2975
2976 #[inline]
2977 #[faer_macros::math]
2978 fn mul_pow2_impl(lhs: &Self, rhs: &Self::Real) -> Self {
2979 Complex {
2980 re: mul_pow2(lhs.0.re, rhs),
2981 im: mul_pow2(lhs.0.im, rhs),
2982 }
2983 .into()
2984 }
2985
2986 #[inline]
2987 #[faer_macros::math]
2988 fn is_finite_impl(value: &Self) -> bool {
2989 is_finite(value.0.re) && is_finite(value.0.im)
2990 }
2991
2992 #[inline(always)]
2993 fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
2994 f64::simd_ctx(simd)
2995 }
2996
2997 #[inline(always)]
2998 fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
2999 f64::ctx_from_simd(ctx)
3000 }
3001
3002 #[inline(always)]
3003 fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S> {
3004 ctx.splat_c64s(value.0)
3005 }
3006
3007 #[inline(always)]
3008 fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S> {
3009 ctx.splat_c64s(Complex { re: *value, im: *value })
3010 }
3011
3012 #[inline(always)]
3013 fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3014 ctx.add_c64s(lhs, rhs)
3015 }
3016
3017 #[inline(always)]
3018 fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3019 ctx.sub_c64s(lhs, rhs)
3020 }
3021
3022 #[inline(always)]
3023 fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
3024 ctx.neg_c64s(value)
3025 }
3026
3027 #[inline(always)]
3028 fn simd_conj<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
3029 ctx.conj_c64s(value)
3030 }
3031
3032 #[inline(always)]
3033 fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
3034 if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3035 bytemuck::cast(ctx.abs_f64s(bytemuck::cast(value)))
3036 } else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3037 let value: Complex<f64> = bytemuck::cast(value);
3038 let v = value.re.abs() + value.im.abs();
3039 bytemuck::cast(Complex { re: v, im: v })
3040 } else {
3041 panic!();
3042 }
3043 }
3044
3045 #[inline(always)]
3046 fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
3047 if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3048 bytemuck::cast(ctx.abs_f64s(bytemuck::cast(value)))
3049 } else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3050 let value: Complex<f64> = bytemuck::cast(value);
3051 let re = value.re.abs();
3052 let im = value.im.abs();
3053 let v = if re > im { re } else { im };
3054 bytemuck::cast(Complex { re: v, im: v })
3055 } else {
3056 panic!();
3057 }
3058 }
3059
3060 #[inline(always)]
3061 fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3062 if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3063 bytemuck::cast(ctx.mul_f64s(bytemuck::cast(lhs), bytemuck::cast(real_rhs)))
3064 } else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3065 let mut lhs: Complex<f64> = bytemuck::cast(lhs);
3066 let rhs: Complex<f64> = bytemuck::cast(real_rhs);
3067 lhs *= rhs.re;
3068 bytemuck::cast(lhs)
3069 } else {
3070 panic!();
3071 }
3072 }
3073
3074 #[inline(always)]
3075 fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3076 Self::simd_mul_real(ctx, lhs, real_rhs)
3077 }
3078
3079 #[inline(always)]
3080 fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3081 ctx.mul_e_c64s(lhs, rhs)
3082 }
3083
3084 #[inline(always)]
3085 fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3086 ctx.conj_mul_e_c64s(lhs, rhs)
3087 }
3088
3089 #[inline(always)]
3090 fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
3091 ctx.mul_add_e_c64s(lhs, rhs, acc)
3092 }
3093
3094 #[inline(always)]
3095 fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
3096 ctx.conj_mul_add_e_c64s(lhs, rhs, acc)
3097 }
3098
3099 #[inline(always)]
3100 fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
3101 if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3102 bytemuck::cast(ctx.mul_f64s(bytemuck::cast(value), bytemuck::cast(value)))
3103 } else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3104 let value: Complex<f64> = bytemuck::cast(value);
3105 let v = value.re * value.re + value.im * value.im;
3106 bytemuck::cast(Complex { re: v, im: v })
3107 } else {
3108 panic!();
3109 }
3110 }
3111
3112 #[inline(always)]
3113 fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
3114 if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3115 bytemuck::cast(ctx.mul_add_f64s(bytemuck::cast(value), bytemuck::cast(value), bytemuck::cast(acc)))
3116 } else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3117 let value: Complex<f64> = bytemuck::cast(value);
3118 let acc: Complex<f64> = bytemuck::cast(acc);
3119 let v = value.re * value.re + value.im * value.im + acc.re;
3120 bytemuck::cast(Complex { re: v, im: v })
3121 } else {
3122 panic!();
3123 }
3124 }
3125
3126 #[inline(always)]
3127 fn simd_reduce_sum<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
3128 ctx.reduce_sum_c64s(value).into()
3129 }
3130
3131 #[inline(always)]
3132 fn simd_reduce_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
3133 ctx.reduce_max_c64s(value).into()
3134 }
3135
3136 #[inline(always)]
3137 fn simd_equal<S: Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
3138 panic!()
3139 }
3140
3141 #[inline(always)]
3142 fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3143 if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3144 ctx.less_than_f64s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
3145 } else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3146 assert!(try_const! { core::mem::size_of::<S::m64s>() == core::mem::size_of::<bool>() });
3147
3148 let lhs: Complex<f64> = bytemuck::cast(real_lhs);
3149 let rhs: Complex<f64> = bytemuck::cast(real_rhs);
3150 unsafe { core::mem::transmute_copy(&(lhs.re < rhs.re)) }
3151 } else {
3152 panic!();
3153 }
3154 }
3155
3156 #[inline(always)]
3157 fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3158 if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3159 ctx.less_than_or_equal_f64s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
3160 } else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3161 assert!(try_const! { core::mem::size_of::<S::m64s>() == core::mem::size_of::<bool>() });
3162
3163 let lhs: Complex<f64> = bytemuck::cast(real_lhs);
3164 let rhs: Complex<f64> = bytemuck::cast(real_rhs);
3165 unsafe { core::mem::transmute_copy(&(lhs.re <= rhs.re)) }
3166 } else {
3167 panic!();
3168 }
3169 }
3170
3171 #[inline(always)]
3172 fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3173 if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3174 ctx.greater_than_f64s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
3175 } else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3176 assert!(try_const! { core::mem::size_of::<S::m64s>() == core::mem::size_of::<bool>() });
3177
3178 let lhs: Complex<f64> = bytemuck::cast(real_lhs);
3179 let rhs: Complex<f64> = bytemuck::cast(real_rhs);
3180 unsafe { core::mem::transmute_copy(&(lhs.re > rhs.re)) }
3181 } else {
3182 panic!();
3183 }
3184 }
3185
3186 #[inline(always)]
3187 fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3188 if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3189 ctx.greater_than_or_equal_f64s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
3190 } else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3191 assert!(try_const! { core::mem::size_of::<S::m64s>() == core::mem::size_of::<bool>() });
3192
3193 let lhs: Complex<f64> = bytemuck::cast(real_lhs);
3194 let rhs: Complex<f64> = bytemuck::cast(real_rhs);
3195 unsafe { core::mem::transmute_copy(&(lhs.re >= rhs.re)) }
3196 } else {
3197 panic!();
3198 }
3199 }
3200
3201 #[inline(always)]
3202 fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3203 if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3204 bytemuck::cast(ctx.select_f64s_m64s(mask, bytemuck::cast(lhs), bytemuck::cast(rhs)))
3205 } else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3206 assert!(try_const! { core::mem::size_of::<S::m64s>() == core::mem::size_of::<bool>() });
3207 let mask: bool = unsafe { core::mem::transmute_copy(&mask) };
3208 let lhs: Complex<f64> = bytemuck::cast(lhs);
3209 let rhs: Complex<f64> = bytemuck::cast(rhs);
3210 bytemuck::cast(if mask { lhs } else { rhs })
3211 } else {
3212 panic!();
3213 }
3214 }
3215
3216 #[inline(always)]
3217 fn simd_index_select<S: Simd>(
3218 ctx: &Self::SimdCtx<S>,
3219 mask: Self::SimdMask<S>,
3220 lhs: Self::SimdIndex<S>,
3221 rhs: Self::SimdIndex<S>,
3222 ) -> Self::SimdIndex<S> {
3223 f64::simd_index_select(ctx, mask, lhs, rhs)
3224 }
3225
3226 #[inline(always)]
3227 fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S> {
3228 f64::simd_index_splat(ctx, value)
3229 }
3230
3231 #[inline(always)]
3232 fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
3233 f64::simd_index_add(ctx, lhs, rhs)
3234 }
3235
3236 #[inline(always)]
3237 fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
3238 f64::simd_index_less_than(ctx, lhs, rhs)
3239 }
3240
3241 #[inline(always)]
3242 fn simd_and_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
3243 f64::simd_and_mask(ctx, lhs, rhs)
3244 }
3245
3246 #[inline(always)]
3247 fn simd_or_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
3248 f64::simd_or_mask(ctx, lhs, rhs)
3249 }
3250
3251 #[inline(always)]
3252 fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S> {
3253 f64::simd_not_mask(ctx, mask)
3254 }
3255
3256 #[inline(always)]
3257 fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize {
3258 f64::simd_first_true_mask(ctx, value)
3259 }
3260
3261 #[inline(always)]
3262 fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMemMask<S> {
3263 ctx.mask_between_m64s((2 * start) as _, (2 * end) as _)
3264 }
3265
3266 #[inline(always)]
3267 fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMask<S> {
3268 ctx.mask_between_m64s((2 * start) as _, (2 * end) as _).mask()
3269 }
3270
3271 #[inline(always)]
3272 unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
3273 ctx.mask_load_ptr_c64s(mask, ptr as _)
3274 }
3275
3276 #[inline(always)]
3277 unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>) {
3278 ctx.mask_store_ptr_c64s(mask, ptr as _, values);
3279 }
3280}
3281
3282#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
3283pub struct Symbolic;
3284
3285impl core::ops::Add for Symbolic {
3286 type Output = Self;
3287
3288 fn add(self, _: Self) -> Self {
3289 Self
3290 }
3291}
3292impl core::ops::Sub for Symbolic {
3293 type Output = Self;
3294
3295 fn sub(self, _: Self) -> Self {
3296 Self
3297 }
3298}
3299impl core::ops::Mul for Symbolic {
3300 type Output = Self;
3301
3302 fn mul(self, _: Self) -> Self {
3303 Self
3304 }
3305}
3306impl core::ops::Div for Symbolic {
3307 type Output = Self;
3308
3309 fn div(self, _: Self) -> Self {
3310 Self
3311 }
3312}
3313impl core::ops::Neg for Symbolic {
3314 type Output = Self;
3315
3316 fn neg(self) -> Self {
3317 Self
3318 }
3319}
3320
3321impl core::ops::Add for &Symbolic {
3322 type Output = Symbolic;
3323
3324 fn add(self, _: Self) -> Symbolic {
3325 Symbolic
3326 }
3327}
3328impl core::ops::Sub for &Symbolic {
3329 type Output = Symbolic;
3330
3331 fn sub(self, _: Self) -> Symbolic {
3332 Symbolic
3333 }
3334}
3335impl core::ops::Mul for &Symbolic {
3336 type Output = Symbolic;
3337
3338 fn mul(self, _: Self) -> Symbolic {
3339 Symbolic
3340 }
3341}
3342impl core::ops::Div for &Symbolic {
3343 type Output = Symbolic;
3344
3345 fn div(self, _: Self) -> Symbolic {
3346 Symbolic
3347 }
3348}
3349impl core::ops::Neg for &Symbolic {
3350 type Output = Symbolic;
3351
3352 fn neg(self) -> Symbolic {
3353 Symbolic
3354 }
3355}
3356
3357impl core::ops::Rem for Symbolic {
3358 type Output = Self;
3359
3360 fn rem(self, _: Self) -> Self {
3361 Self
3362 }
3363}
3364impl num_traits::Zero for Symbolic {
3365 fn zero() -> Self {
3366 Self
3367 }
3368
3369 fn is_zero(&self) -> bool {
3370 true
3371 }
3372}
3373impl num_traits::One for Symbolic {
3374 fn one() -> Self {
3375 Self
3376 }
3377
3378 fn is_one(&self) -> bool {
3379 true
3380 }
3381}
3382impl num_traits::Num for Symbolic {
3383 type FromStrRadixErr = core::convert::Infallible;
3384
3385 fn from_str_radix(_: &str, _: u32) -> Result<Self, Self::FromStrRadixErr> {
3386 Ok(Self)
3387 }
3388}
3389
3390impl Symbolic {
3391 #[inline]
3392 pub fn materialize(len: usize) -> &'static mut [Self] {
3393 unsafe { core::slice::from_raw_parts_mut(core::ptr::NonNull::dangling().as_ptr(), len) }
3394 }
3395}
3396
3397impl RealField for Symbolic {
3398 fn epsilon_impl() -> Self {
3399 Self
3400 }
3401
3402 fn nbits_impl() -> usize {
3403 0
3404 }
3405
3406 fn min_positive_impl() -> Self {
3407 Self
3408 }
3409
3410 fn max_positive_impl() -> Self {
3411 Self
3412 }
3413
3414 fn sqrt_min_positive_impl() -> Self {
3415 Self
3416 }
3417
3418 fn sqrt_max_positive_impl() -> Self {
3419 Self
3420 }
3421}
3422
3423impl ComplexField for Symbolic {
3424 type Arch = pulp::Scalar;
3425 type Index = usize;
3426 type Real = Self;
3427 type SimdCtx<S: pulp::Simd> = S;
3428 type SimdIndex<S: pulp::Simd> = ();
3429 type SimdMask<S: pulp::Simd> = ();
3430 type SimdMemMask<S: pulp::Simd> = ();
3431 type SimdVec<S: pulp::Simd> = ();
3432 type Unit = Self;
3433
3434 const IS_REAL: bool = true;
3435 const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Copy;
3436
3437 fn zero_impl() -> Self {
3438 Self
3439 }
3440
3441 fn one_impl() -> Self {
3442 Self
3443 }
3444
3445 fn nan_impl() -> Self {
3446 Self
3447 }
3448
3449 fn infinity_impl() -> Self {
3450 Self
3451 }
3452
3453 fn from_real_impl(_: &Self::Real) -> Self {
3454 Self
3455 }
3456
3457 fn from_f64_impl(_: f64) -> Self {
3458 Self
3459 }
3460
3461 fn real_part_impl(_: &Self) -> Self::Real {
3462 Self
3463 }
3464
3465 fn imag_part_impl(_: &Self) -> Self::Real {
3466 Self
3467 }
3468
3469 fn copy_impl(_: &Self) -> Self {
3470 Self
3471 }
3472
3473 fn conj_impl(_: &Self) -> Self {
3474 Self
3475 }
3476
3477 fn recip_impl(_: &Self) -> Self {
3478 Self
3479 }
3480
3481 fn sqrt_impl(_: &Self) -> Self {
3482 Self
3483 }
3484
3485 fn abs_impl(_: &Self) -> Self::Real {
3486 Self
3487 }
3488
3489 fn abs1_impl(_: &Self) -> Self::Real {
3490 Self
3491 }
3492
3493 fn abs2_impl(_: &Self) -> Self::Real {
3494 Self
3495 }
3496
3497 fn mul_real_impl(_: &Self, _: &Self::Real) -> Self {
3498 Self
3499 }
3500
3501 fn mul_pow2_impl(_: &Self, _: &Self::Real) -> Self {
3502 Self
3503 }
3504
3505 fn is_finite_impl(_: &Self) -> bool {
3506 true
3507 }
3508
3509 fn simd_ctx<S: pulp::Simd>(simd: S) -> Self::SimdCtx<S> {
3510 simd
3511 }
3512
3513 fn ctx_from_simd<S: pulp::Simd>(simd: &Self::SimdCtx<S>) -> S {
3514 *simd
3515 }
3516
3517 fn simd_mem_mask_between<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::Index, _: Self::Index) -> Self::SimdMemMask<S> {
3518 ()
3519 }
3520
3521 unsafe fn simd_mask_load_raw<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMemMask<S>, _: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
3522 ()
3523 }
3524
3525 unsafe fn simd_mask_store_raw<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMemMask<S>, _: *mut Self::SimdVec<S>, _: Self::SimdVec<S>) {
3526 ()
3527 }
3528
3529 fn simd_splat<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: &Self) -> Self::SimdVec<S> {
3530 ()
3531 }
3532
3533 fn simd_splat_real<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: &Self::Real) -> Self::SimdVec<S> {
3534 ()
3535 }
3536
3537 fn simd_add<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3538 ()
3539 }
3540
3541 fn simd_sub<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3542 ()
3543 }
3544
3545 fn simd_neg<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3546 ()
3547 }
3548
3549 fn simd_conj<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3550 ()
3551 }
3552
3553 fn simd_abs1<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3554 ()
3555 }
3556
3557 fn simd_abs_max<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3558 ()
3559 }
3560
3561 fn simd_mul_real<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3562 ()
3563 }
3564
3565 fn simd_mul_pow2<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3566 ()
3567 }
3568
3569 fn simd_mul<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3570 ()
3571 }
3572
3573 fn simd_conj_mul<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3574 ()
3575 }
3576
3577 fn simd_mul_add<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3578 ()
3579 }
3580
3581 fn simd_conj_mul_add<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3582 ()
3583 }
3584
3585 fn simd_abs2<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3586 ()
3587 }
3588
3589 fn simd_abs2_add<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3590 ()
3591 }
3592
3593 fn simd_reduce_sum<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self {
3594 Self
3595 }
3596
3597 fn simd_reduce_max<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self {
3598 Self
3599 }
3600
3601 fn simd_equal<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
3602 ()
3603 }
3604
3605 fn simd_less_than<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
3606 ()
3607 }
3608
3609 fn simd_less_than_or_equal<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
3610 ()
3611 }
3612
3613 fn simd_greater_than<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
3614 ()
3615 }
3616
3617 fn simd_greater_than_or_equal<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
3618 ()
3619 }
3620
3621 fn simd_select<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMask<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3622 ()
3623 }
3624
3625 fn simd_index_select<S: pulp::Simd>(
3626 _: &Self::SimdCtx<S>,
3627 _: Self::SimdMask<S>,
3628 _: Self::SimdIndex<S>,
3629 _: Self::SimdIndex<S>,
3630 ) -> Self::SimdIndex<S> {
3631 ()
3632 }
3633
3634 fn simd_index_splat<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::Index) -> Self::SimdIndex<S> {
3635 ()
3636 }
3637
3638 fn simd_index_add<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdIndex<S>, _: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
3639 ()
3640 }
3641
3642 fn simd_and_mask<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMask<S>, _: Self::SimdMask<S>) -> Self::SimdMask<S> {
3643 ()
3644 }
3645
3646 fn simd_or_mask<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMask<S>, _: Self::SimdMask<S>) -> Self::SimdMask<S> {
3647 ()
3648 }
3649
3650 fn simd_not_mask<S: Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMask<S>) -> Self::SimdMask<S> {
3651 ()
3652 }
3653
3654 fn simd_first_true_mask<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMask<S>) -> usize {
3655 0
3656 }
3657
3658 fn simd_mask_between<S: Simd>(_: &Self::SimdCtx<S>, _: Self::Index, _: Self::Index) -> Self::SimdMask<S> {
3659 ()
3660 }
3661
3662 fn simd_index_less_than<S: Simd>(_: &Self::SimdCtx<S>, _: Self::SimdIndex<S>, _: Self::SimdIndex<S>) -> Self::SimdMask<S> {
3663 ()
3664 }
3665}
3666
3667pub type c64 = Complex<f64>;
3668pub type c32 = Complex<f32>;
3669pub type fx128 = qd::Quad;
3670pub type cx128 = Complex<fx128>;
3671
3672pub extern crate num_traits;
3673pub extern crate pulp;
3674
3675impl ComplexField for fx128 {
3676 type Arch = pulp::Arch;
3677 type Index = u64;
3678 type Real = Self;
3679 type SimdCtx<S: Simd> = S;
3680 type SimdIndex<S: Simd> = S::u64s;
3681 type SimdMask<S: Simd> = S::m64s;
3682 type SimdMemMask<S: Simd> = Quad<pulp::MemMask<S::m64s>>;
3683 type SimdVec<S: Simd> = Quad<S::f64s>;
3684 type Unit = f64;
3685
3686 const IS_REAL: bool = true;
3687 const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Simd;
3688
3689 #[inline(always)]
3690 fn zero_impl() -> Self {
3691 Self::ZERO
3692 }
3693
3694 #[inline(always)]
3695 fn one_impl() -> Self {
3696 Quad(1.0, 0.0)
3697 }
3698
3699 #[inline(always)]
3700 fn nan_impl() -> Self {
3701 Self::NAN
3702 }
3703
3704 #[inline(always)]
3705 fn infinity_impl() -> Self {
3706 Self::INFINITY
3707 }
3708
3709 #[inline(always)]
3710 fn from_real_impl(real: &Self::Real) -> Self {
3711 *real
3712 }
3713
3714 #[inline(always)]
3715 fn from_f64_impl(real: f64) -> Self {
3716 real.into()
3717 }
3718
3719 #[inline(always)]
3720 fn real_part_impl(value: &Self) -> Self::Real {
3721 *value
3722 }
3723
3724 #[inline(always)]
3725 fn imag_part_impl(_: &Self) -> Self::Real {
3726 Self::ZERO
3727 }
3728
3729 #[inline(always)]
3730 fn copy_impl(value: &Self) -> Self {
3731 *value
3732 }
3733
3734 #[inline(always)]
3735 fn conj_impl(value: &Self) -> Self {
3736 *value
3737 }
3738
3739 #[inline(always)]
3740 fn recip_impl(value: &Self) -> Self {
3741 Quad::from(1.0) / *value
3742 }
3743
3744 #[inline(always)]
3745 fn sqrt_impl(value: &Self) -> Self {
3746 value.sqrt()
3747 }
3748
3749 #[inline(always)]
3750 fn abs_impl(value: &Self) -> Self::Real {
3751 value.abs()
3752 }
3753
3754 #[inline(always)]
3755 fn abs1_impl(value: &Self) -> Self::Real {
3756 value.abs()
3757 }
3758
3759 #[inline(always)]
3760 fn abs2_impl(value: &Self) -> Self::Real {
3761 value * value
3762 }
3763
3764 #[inline(always)]
3765 fn mul_real_impl(lhs: &Self, rhs: &Self::Real) -> Self {
3766 lhs * rhs
3767 }
3768
3769 #[inline(always)]
3770 fn mul_pow2_impl(lhs: &Self, rhs: &Self::Real) -> Self {
3771 lhs * rhs
3772 }
3773
3774 #[inline(always)]
3775 fn is_finite_impl(value: &Self) -> bool {
3776 value.0.is_finite() && value.1.is_finite()
3777 }
3778
3779 #[inline(always)]
3780 fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
3781 simd
3782 }
3783
3784 #[inline(always)]
3785 fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
3786 *ctx
3787 }
3788
3789 #[inline(always)]
3790 fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMask<S> {
3791 ctx.mask_between_m64s(start as _, end as _).mask()
3792 }
3793
3794 #[inline(always)]
3795 fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMemMask<S> {
3796 let n = (core::mem::size_of::<Self::SimdVec<S>>() / core::mem::size_of::<Self>()) as u64;
3797 let start = start * 2;
3798 let end = end * 2;
3799
3800 let a = f64::simd_mem_mask_between(ctx, start.min(n), end.min(n));
3801 let b = f64::simd_mem_mask_between(ctx, start.max(n) - n, end.max(n) - n);
3802 Quad(a, b)
3803 }
3804
3805 #[inline(always)]
3806 unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
3807 unsafe {
3808 Quad(
3809 f64::simd_mask_load_raw(ctx, mask.0, &raw const (*ptr).0),
3810 f64::simd_mask_load_raw(ctx, mask.1, &raw const (*ptr).1),
3811 )
3812 }
3813 }
3814
3815 #[inline(always)]
3816 unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>) {
3817 unsafe {
3818 Quad(
3819 f64::simd_mask_store_raw(ctx, mask.0, &raw mut (*ptr).0, values.0),
3820 f64::simd_mask_store_raw(ctx, mask.1, &raw mut (*ptr).1, values.1),
3821 );
3822 }
3823 }
3824
3825 #[inline(always)]
3826 fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S> {
3827 Quad(ctx.splat_f64s(value.0), ctx.splat_f64s(value.1))
3828 }
3829
3830 #[inline(always)]
3831 fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S> {
3832 Quad(ctx.splat_f64s(value.0), ctx.splat_f64s(value.1))
3833 }
3834
3835 #[inline(always)]
3836 fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3837 qd::simd::add_estimate(*ctx, lhs, rhs)
3838 }
3839
3840 #[inline(always)]
3841 fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3842 qd::simd::sub_estimate(*ctx, lhs, rhs)
3843 }
3844
3845 #[inline(always)]
3846 fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
3847 qd::simd::neg(*ctx, value)
3848 }
3849
3850 #[inline(always)]
3851 fn simd_conj<S: Simd>(_: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
3852 value
3853 }
3854
3855 #[inline(always)]
3856 fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
3857 qd::simd::abs(*ctx, value)
3858 }
3859
3860 #[inline(always)]
3861 fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
3862 qd::simd::abs(*ctx, value)
3863 }
3864
3865 #[inline(always)]
3866 fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3867 qd::simd::mul(*ctx, lhs, real_rhs)
3868 }
3869
3870 #[inline(always)]
3871 fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3872 qd::simd::mul(*ctx, lhs, real_rhs)
3873 }
3874
3875 #[inline(always)]
3876 fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3877 qd::simd::mul(*ctx, lhs, rhs)
3878 }
3879
3880 #[inline(always)]
3881 fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3882 qd::simd::mul(*ctx, lhs, rhs)
3883 }
3884
3885 #[inline(always)]
3886 fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
3887 qd::simd::add_estimate(*ctx, qd::simd::mul(*ctx, lhs, rhs), acc)
3888 }
3889
3890 #[inline(always)]
3891 fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
3892 qd::simd::add_estimate(*ctx, qd::simd::mul(*ctx, lhs, rhs), acc)
3893 }
3894
3895 #[inline(always)]
3896 fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
3897 qd::simd::mul(*ctx, value, value)
3898 }
3899
3900 #[inline(always)]
3901 fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
3902 qd::simd::add_estimate(*ctx, qd::simd::mul(*ctx, value, value), acc)
3903 }
3904
3905 #[inline(always)]
3906 fn simd_reduce_sum<S: Simd>(_: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
3907 let a = value.0;
3908 let b = value.1;
3909 let a: &[f64] = bytemuck::cast_slice(core::slice::from_ref(&a));
3910 let b: &[f64] = bytemuck::cast_slice(core::slice::from_ref(&b));
3911 let mut acc = Quad::ZERO;
3912
3913 for (&a, &b) in core::iter::zip(a, b) {
3914 acc += Quad(a, b);
3915 }
3916
3917 acc
3918 }
3919
3920 #[inline(always)]
3921 fn simd_reduce_max<S: Simd>(_: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
3922 let a = value.0;
3923 let b = value.1;
3924 let a: &[f64] = bytemuck::cast_slice(core::slice::from_ref(&a));
3925 let b: &[f64] = bytemuck::cast_slice(core::slice::from_ref(&b));
3926 let mut acc = Quad::NEG_INFINITY;
3927
3928 for (&a, &b) in core::iter::zip(a, b) {
3929 let val = Quad(a, b);
3930 if val > acc {
3931 acc = val
3932 }
3933 }
3934
3935 acc
3936 }
3937
3938 #[inline(always)]
3939 fn simd_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3940 qd::simd::eq(*ctx, real_lhs, real_rhs)
3941 }
3942
3943 #[inline(always)]
3944 fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3945 qd::simd::less_than(*ctx, real_lhs, real_rhs)
3946 }
3947
3948 #[inline(always)]
3949 fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3950 qd::simd::less_than_or_equal(*ctx, real_lhs, real_rhs)
3951 }
3952
3953 #[inline(always)]
3954 fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3955 qd::simd::greater_than(*ctx, real_lhs, real_rhs)
3956 }
3957
3958 #[inline(always)]
3959 fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3960 qd::simd::greater_than_or_equal(*ctx, real_lhs, real_rhs)
3961 }
3962
3963 #[inline(always)]
3964 fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3965 Quad(ctx.select_f64s_m64s(mask, lhs.0, rhs.0), ctx.select_f64s_m64s(mask, lhs.1, rhs.1))
3966 }
3967
3968 #[inline(always)]
3969 fn simd_index_select<S: Simd>(
3970 ctx: &Self::SimdCtx<S>,
3971 mask: Self::SimdMask<S>,
3972 lhs: Self::SimdIndex<S>,
3973 rhs: Self::SimdIndex<S>,
3974 ) -> Self::SimdIndex<S> {
3975 ctx.select_u64s_m64s(mask, lhs, rhs)
3976 }
3977
3978 #[inline(always)]
3979 fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S> {
3980 ctx.splat_u64s(value as u64)
3981 }
3982
3983 #[inline(always)]
3984 fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
3985 ctx.add_u64s(lhs, rhs)
3986 }
3987
3988 #[inline(always)]
3989 fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
3990 ctx.less_than_u64s(lhs, rhs)
3991 }
3992
3993 #[inline(always)]
3994 fn simd_and_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
3995 ctx.and_m64s(lhs, rhs)
3996 }
3997
3998 #[inline(always)]
3999 fn simd_or_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
4000 ctx.or_m64s(lhs, rhs)
4001 }
4002
4003 #[inline(always)]
4004 fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S> {
4005 ctx.not_m64s(mask)
4006 }
4007
4008 #[inline(always)]
4009 fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize {
4010 ctx.first_true_m64s(value)
4011 }
4012}
4013
4014impl RealField for fx128 {
4015 #[inline(always)]
4016 fn epsilon_impl() -> Self {
4017 Quad::EPSILON
4018 }
4019
4020 #[inline(always)]
4021 fn nbits_impl() -> usize {
4022 100
4023 }
4024
4025 #[inline(always)]
4026 fn min_positive_impl() -> Self {
4027 Quad::MIN_POSITIVE
4028 }
4029
4030 #[inline(always)]
4031 fn max_positive_impl() -> Self {
4032 Quad::MIN_POSITIVE.recip()
4033 }
4034
4035 #[inline(always)]
4036 fn sqrt_min_positive_impl() -> Self {
4037 Quad::MIN_POSITIVE.sqrt()
4038 }
4039
4040 #[inline(always)]
4041 fn sqrt_max_positive_impl() -> Self {
4042 Quad::MIN_POSITIVE.recip().sqrt()
4043 }
4044}