faer_traits/
lib.rs

1#![cfg_attr(not(feature = "std"), no_std)]
2#![allow(non_camel_case_types)]
3
4use bytemuck::Pod;
5use core::fmt::Debug;
6use num_complex::Complex;
7use pulp::Simd;
8use qd::Quad;
9
10use math_utils::*;
11
12use pulp::try_const;
13
14pub mod math_utils {
15	use crate::{ByRef, ComplexField, RealField, abs_impl};
16	use pulp::try_const;
17
18	#[inline(always)]
19	#[must_use]
20	pub fn eps<T: RealField>() -> T {
21		T::Real::epsilon_impl()
22	}
23
24	#[inline(always)]
25	#[must_use]
26	pub fn nbits<T: ComplexField>() -> usize {
27		T::Real::nbits_impl()
28	}
29
30	#[inline(always)]
31	#[must_use]
32	pub fn min_positive<T: RealField>() -> T {
33		T::min_positive_impl()
34	}
35	#[inline(always)]
36	#[must_use]
37	pub fn max_positive<T: RealField>() -> T {
38		T::max_positive_impl()
39	}
40	#[inline(always)]
41	#[must_use]
42	pub fn sqrt_min_positive<T: RealField>() -> T {
43		T::sqrt_min_positive_impl()
44	}
45	#[inline(always)]
46	#[must_use]
47	pub fn sqrt_max_positive<T: RealField>() -> T {
48		T::sqrt_max_positive_impl()
49	}
50
51	#[inline(always)]
52	#[must_use]
53	pub fn zero<T: ComplexField>() -> T {
54		T::zero_impl()
55	}
56	#[inline(always)]
57	#[must_use]
58	pub fn one<T: ComplexField>() -> T {
59		T::one_impl()
60	}
61	#[inline(always)]
62	#[must_use]
63	pub fn nan<T: ComplexField>() -> T {
64		T::nan_impl()
65	}
66	#[inline(always)]
67	#[must_use]
68	pub fn infinity<T: ComplexField>() -> T {
69		T::infinity_impl()
70	}
71
72	#[inline(always)]
73	#[must_use]
74	pub fn real<T: ComplexField>(value: &T) -> T::Real {
75		T::real_part_impl((value).by_ref())
76	}
77	#[inline(always)]
78	#[must_use]
79	pub fn imag<T: ComplexField>(value: &T) -> T::Real {
80		T::imag_part_impl((value).by_ref())
81	}
82	#[inline(always)]
83	#[track_caller]
84	#[must_use]
85	pub fn neg<T: NegByRef>(value: &T) -> T::Output {
86		value.neg_by_ref()
87	}
88	#[inline(always)]
89	#[must_use]
90	pub fn copy<T: ComplexField>(value: &T) -> T {
91		T::copy_impl((value).by_ref())
92	}
93
94	#[inline(always)]
95	#[must_use]
96	pub fn conj<T: ComplexField>(value: &T) -> T {
97		T::conj_impl((value).by_ref())
98	}
99
100	#[inline(always)]
101	#[track_caller]
102	#[must_use]
103	pub fn add<T: AddByRef<U>, U>(lhs: &T, rhs: &U) -> T::Output {
104		lhs.add_by_ref(rhs)
105	}
106	#[inline(always)]
107	#[track_caller]
108	#[must_use]
109	pub fn sub<T: SubByRef<U>, U>(lhs: &T, rhs: &U) -> T::Output {
110		lhs.sub_by_ref(rhs)
111	}
112	#[inline(always)]
113	#[track_caller]
114	#[must_use]
115	pub fn mul<T: MulByRef<U>, U>(lhs: &T, rhs: &U) -> T::Output {
116		lhs.mul_by_ref(rhs)
117	}
118	#[inline(always)]
119	#[track_caller]
120	#[must_use]
121	pub fn div<T: DivByRef<U>, U>(lhs: &T, rhs: &U) -> T::Output {
122		lhs.div_by_ref(rhs)
123	}
124
125	#[inline(always)]
126	#[must_use]
127	pub fn mul_real<T: ComplexField>(lhs: &T, rhs: &T::Real) -> T {
128		T::mul_real_impl((lhs).by_ref(), (rhs).by_ref())
129	}
130
131	#[inline(always)]
132	#[must_use]
133	pub fn mul_pow2<T: ComplexField>(lhs: &T, rhs: &T::Real) -> T {
134		T::mul_real_impl((lhs).by_ref(), (rhs).by_ref())
135	}
136
137	#[inline(always)]
138	#[must_use]
139	pub fn abs1<T: ComplexField>(value: &T) -> T::Real {
140		T::abs1_impl((value).by_ref())
141	}
142
143	#[inline(always)]
144	#[must_use]
145	pub fn absmax<T: ComplexField>(value: &T) -> T::Real {
146		if try_const! { T::IS_REAL } {
147			T::abs1_impl(value)
148		} else {
149			add(&T::Real::abs1_impl(&real(value)), &T::Real::abs1_impl(&imag(value)))
150		}
151	}
152
153	#[inline(always)]
154	#[must_use]
155	pub fn abs<T: ComplexField>(value: &T) -> T::Real {
156		T::abs_impl((value).by_ref())
157	}
158
159	#[inline(always)]
160	#[must_use]
161	pub fn hypot<T: RealField>(lhs: &T, rhs: &T) -> T {
162		abs_impl::<T::Real>(lhs.clone(), rhs.clone())
163	}
164
165	#[inline(always)]
166	#[must_use]
167	pub fn abs2<T: ComplexField>(value: &T) -> T::Real {
168		T::abs2_impl((value).by_ref())
169	}
170
171	#[inline(always)]
172	#[must_use]
173	pub fn max<T: RealField>(lhs: &T, rhs: &T) -> T {
174		if lhs > rhs { copy(lhs) } else { copy(rhs) }
175	}
176	#[inline(always)]
177	#[must_use]
178	pub fn min<T: RealField>(lhs: &T, rhs: &T) -> T {
179		if lhs < rhs { copy(lhs) } else { copy(rhs) }
180	}
181
182	#[inline(always)]
183	#[must_use]
184	pub fn is_nan<T: ComplexField>(value: &T) -> bool {
185		T::is_nan_impl((value).by_ref())
186	}
187
188	#[inline(always)]
189	#[must_use]
190	pub fn is_finite<T: ComplexField>(value: &T) -> bool {
191		T::is_finite_impl((value).by_ref())
192	}
193
194	#[inline(always)]
195	#[must_use]
196	pub fn sqrt<T: ComplexField>(value: &T) -> T {
197		T::sqrt_impl((value).by_ref())
198	}
199	#[inline(always)]
200	#[must_use]
201	pub fn recip<T: ComplexField>(value: &T) -> T {
202		T::recip_impl((value).by_ref())
203	}
204
205	#[inline(always)]
206	#[must_use]
207	pub fn from_real<T: ComplexField>(value: &T::Real) -> T {
208		T::from_real_impl((value).by_ref())
209	}
210
211	#[inline(always)]
212	#[must_use]
213	pub fn from_f64<T: ComplexField>(value: f64) -> T {
214		T::from_f64_impl(value)
215	}
216
217	pub use crate::{AddByRef, DivByRef, MulByRef, NegByRef, SubByRef};
218}
219
220pub trait AddByRef<Rhs = Self> {
221	type Output;
222	fn add_by_ref(&self, rhs: &Rhs) -> Self::Output;
223}
224pub trait SubByRef<Rhs = Self> {
225	type Output;
226	fn sub_by_ref(&self, rhs: &Rhs) -> Self::Output;
227}
228pub trait NegByRef {
229	type Output;
230	fn neg_by_ref(&self) -> Self::Output;
231}
232pub trait MulByRef<Rhs = Self> {
233	type Output;
234	fn mul_by_ref(&self, rhs: &Rhs) -> Self::Output;
235}
236pub trait DivByRef<Rhs = Self> {
237	type Output;
238	fn div_by_ref(&self, rhs: &Rhs) -> Self::Output;
239}
240
241impl<Rhs, Lhs, Output> AddByRef<Rhs> for Lhs
242where
243	for<'a> &'a Lhs: core::ops::Add<&'a Rhs, Output = Output>,
244{
245	type Output = Output;
246
247	#[inline]
248	#[track_caller]
249	fn add_by_ref(&self, rhs: &Rhs) -> Self::Output {
250		self + rhs
251	}
252}
253impl<Rhs, Lhs, Output> SubByRef<Rhs> for Lhs
254where
255	for<'a> &'a Lhs: core::ops::Sub<&'a Rhs, Output = Output>,
256{
257	type Output = Output;
258
259	#[inline]
260	#[track_caller]
261	fn sub_by_ref(&self, rhs: &Rhs) -> Self::Output {
262		self - rhs
263	}
264}
265impl<Rhs, Lhs, Output> MulByRef<Rhs> for Lhs
266where
267	for<'a> &'a Lhs: core::ops::Mul<&'a Rhs, Output = Output>,
268{
269	type Output = Output;
270
271	#[inline]
272	#[track_caller]
273	fn mul_by_ref(&self, rhs: &Rhs) -> Self::Output {
274		self * rhs
275	}
276}
277impl<Rhs, Lhs, Output> DivByRef<Rhs> for Lhs
278where
279	for<'a> &'a Lhs: core::ops::Div<&'a Rhs, Output = Output>,
280{
281	type Output = Output;
282
283	#[inline]
284	#[track_caller]
285	fn div_by_ref(&self, rhs: &Rhs) -> Self::Output {
286		self / rhs
287	}
288}
289
290impl<T, Output> NegByRef for T
291where
292	for<'a> &'a T: core::ops::Neg<Output = Output>,
293{
294	type Output = Output;
295
296	#[inline]
297	#[track_caller]
298	fn neg_by_ref(&self) -> Self::Output {
299		-self
300	}
301}
302
303#[faer_macros::math]
304fn abs_impl<T: RealField>(re: T, im: T) -> T {
305	let small = sqrt_min_positive();
306	let big = sqrt_max_positive();
307	let one = one();
308	let re_abs = abs(re);
309	let im_abs = abs(im);
310
311	if re_abs > big || im_abs > big {
312		sqrt(abs2(re * small) + abs2(im * small)) * big
313	} else if re_abs > one || im_abs > one {
314		sqrt(abs2(re) + abs2(im))
315	} else {
316		sqrt(abs2(re * big) + abs2(im * big)) * small
317	}
318}
319
320#[faer_macros::math]
321fn recip_impl<T: RealField>(re: T, im: T) -> (T, T) {
322	if is_nan(re) || is_nan(im) {
323		return (nan(), nan());
324	}
325	if re == zero() && im == zero() {
326		return (infinity(), infinity());
327	}
328	if !is_finite(re) || !is_finite(im) {
329		return (zero(), zero());
330	}
331
332	let small = sqrt_min_positive();
333	let big = sqrt_max_positive();
334	let one = one();
335	let re_abs = abs(re);
336	let im_abs = abs(im);
337
338	if re_abs > big || im_abs > big {
339		let re = re * small;
340		let im = im * small;
341		let inv = recip(abs2(re) + abs2(im));
342		(((re * inv) * small), ((-im * inv) * small))
343	} else if re_abs > one || im_abs > one {
344		let inv = recip(abs2(re) + abs2(im));
345		((re * inv), (-im * inv))
346	} else {
347		let re = re * big;
348		let im = im * big;
349		let inv = recip(abs2(re) + abs2(im));
350		(((re * inv) * big), ((-im * inv) * big))
351	}
352}
353
354#[faer_macros::math]
355fn sqrt_impl<T: RealField>(re: T, im: T) -> (T, T) {
356	let im_negative = im < zero();
357	let half = from_f64(0.5);
358	let abs = abs_impl(re.clone(), im.clone());
359
360	let mut sum = re + abs;
361	if sum < zero() {
362		sum = zero();
363	}
364
365	let out_re = sqrt(mul_pow2(sum, half));
366	let mut out_im = sqrt(max(zero(), mul_pow2(abs - re, half)));
367	if im_negative {
368		out_im = -out_im;
369	}
370	(out_re, out_im)
371}
372
373pub trait ByRef<T> {
374	fn by_ref(&self) -> &T;
375}
376impl<T> ByRef<T> for T {
377	#[inline]
378	fn by_ref(&self) -> &T {
379		self
380	}
381}
382impl<T> ByRef<T> for &T {
383	#[inline]
384	fn by_ref(&self) -> &T {
385		*self
386	}
387}
388impl<T> ByRef<T> for &mut T {
389	#[inline]
390	fn by_ref(&self) -> &T {
391		*self
392	}
393}
394
395#[repr(transparent)]
396pub struct SimdCtx<T: ComplexField, S: Simd>(pub T::SimdCtx<S>);
397
398#[repr(transparent)]
399pub struct SimdCtxCopy<T: ComplexField, S: Simd>(pub T::SimdCtx<S>);
400
401#[derive(Copy, Clone, Debug)]
402#[repr(transparent)]
403pub struct RealReg<T>(pub T);
404
405impl<T: ComplexField, S: Simd> SimdCtx<T, S> {
406	#[inline(always)]
407	pub fn new(ctx: &T::SimdCtx<S>) -> &Self {
408		unsafe { &*(ctx as *const T::SimdCtx<S> as *const Self) }
409	}
410
411	#[inline(always)]
412	pub fn zero(&self) -> T::SimdVec<S> {
413		unsafe { core::mem::zeroed() }
414	}
415
416	#[inline(always)]
417	pub fn splat(&self, value: &T) -> T::SimdVec<S> {
418		unsafe { core::mem::transmute_copy(&T::simd_splat(&self.0, (value).by_ref())) }
419	}
420
421	#[inline(always)]
422	pub fn splat_real(&self, value: &T::Real) -> RealReg<T::SimdVec<S>> {
423		RealReg(unsafe { core::mem::transmute_copy(&T::simd_splat_real(&self.0, (value).by_ref())) })
424	}
425
426	#[inline(always)]
427	pub fn add(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdVec<S> {
428		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
429		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
430		unsafe { core::mem::transmute_copy(&T::simd_add(&self.0, lhs, rhs)) }
431	}
432
433	#[inline(always)]
434	pub fn sub(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdVec<S> {
435		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
436		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
437		unsafe { core::mem::transmute_copy(&T::simd_sub(&self.0, lhs, rhs)) }
438	}
439
440	#[inline(always)]
441	pub fn neg(&self, value: T::SimdVec<S>) -> T::SimdVec<S> {
442		let value = unsafe { core::mem::transmute_copy(&value) };
443		unsafe { core::mem::transmute_copy(&T::simd_neg(&self.0, value)) }
444	}
445
446	#[inline(always)]
447	pub fn conj(&self, value: T::SimdVec<S>) -> T::SimdVec<S> {
448		let value = unsafe { core::mem::transmute_copy(&value) };
449		unsafe { core::mem::transmute_copy(&T::simd_conj(&self.0, value)) }
450	}
451
452	#[inline(always)]
453	pub fn abs1(&self, value: T::SimdVec<S>) -> RealReg<T::SimdVec<S>> {
454		let value = unsafe { core::mem::transmute_copy(&value) };
455		RealReg(unsafe { core::mem::transmute_copy(&T::simd_abs1(&self.0, value)) })
456	}
457
458	#[inline(always)]
459	pub fn abs_max(&self, value: T::SimdVec<S>) -> RealReg<T::SimdVec<S>> {
460		let value = unsafe { core::mem::transmute_copy(&value) };
461		RealReg(unsafe { core::mem::transmute_copy(&T::simd_abs_max(&self.0, value)) })
462	}
463
464	#[inline(always)]
465	pub fn mul_real(&self, lhs: T::SimdVec<S>, rhs: RealReg<T::SimdVec<S>>) -> T::SimdVec<S> {
466		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
467		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
468		unsafe { core::mem::transmute_copy(&T::simd_mul_real(&self.0, lhs, rhs)) }
469	}
470
471	#[inline(always)]
472	pub fn mul_pow2(&self, lhs: T::SimdVec<S>, rhs: RealReg<T::SimdVec<S>>) -> T::SimdVec<S> {
473		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
474		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
475		unsafe { core::mem::transmute_copy(&T::simd_mul_pow2(&self.0, lhs, rhs)) }
476	}
477
478	#[inline(always)]
479	pub fn mul(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdVec<S> {
480		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
481		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
482		unsafe { core::mem::transmute_copy(&T::simd_mul(&self.0, lhs, rhs)) }
483	}
484
485	#[inline(always)]
486	pub fn conj_mul(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdVec<S> {
487		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
488		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
489		unsafe { core::mem::transmute_copy(&T::simd_conj_mul(&self.0, lhs, rhs)) }
490	}
491
492	#[inline(always)]
493	pub fn mul_add(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>, acc: T::SimdVec<S>) -> T::SimdVec<S> {
494		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
495		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
496		let acc = unsafe { core::mem::transmute_copy(&acc) };
497		unsafe { core::mem::transmute_copy(&T::simd_mul_add(&self.0, lhs, rhs, acc)) }
498	}
499
500	#[inline(always)]
501	pub fn conj_mul_add(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>, acc: T::SimdVec<S>) -> T::SimdVec<S> {
502		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
503		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
504		let acc = unsafe { core::mem::transmute_copy(&acc) };
505		unsafe { core::mem::transmute_copy(&T::simd_conj_mul_add(&self.0, lhs, rhs, acc)) }
506	}
507
508	#[inline(always)]
509	pub fn abs2(&self, value: T::SimdVec<S>) -> RealReg<T::SimdVec<S>> {
510		let value = unsafe { core::mem::transmute_copy(&value) };
511		RealReg(unsafe { core::mem::transmute_copy(&T::simd_abs2(&self.0, value)) })
512	}
513
514	#[inline(always)]
515	pub fn abs2_add(&self, value: T::SimdVec<S>, acc: RealReg<T::SimdVec<S>>) -> RealReg<T::SimdVec<S>> {
516		let value = unsafe { core::mem::transmute_copy(&value) };
517		let acc = unsafe { core::mem::transmute_copy(&acc) };
518		RealReg(unsafe { core::mem::transmute_copy(&T::simd_abs2_add(&self.0, value, acc)) })
519	}
520
521	#[inline(always)]
522	pub fn reduce_sum(&self, value: T::SimdVec<S>) -> T {
523		let value = unsafe { core::mem::transmute_copy(&value) };
524		unsafe { core::mem::transmute_copy(&T::simd_reduce_sum(&self.0, value)) }
525	}
526
527	#[inline(always)]
528	pub fn reduce_max(&self, value: RealReg<T::SimdVec<S>>) -> T {
529		let value = unsafe { core::mem::transmute_copy(&value) };
530		unsafe { core::mem::transmute_copy(&T::simd_reduce_max(&self.0, value)) }
531	}
532
533	#[faer_macros::math]
534	#[inline(always)]
535	pub fn reduce_sum_real(&self, value: RealReg<T::SimdVec<S>>) -> Real<T> {
536		let value = T::simd_reduce_sum(&self.0, value.0);
537		if try_const! { T::SIMD_ABS_SPLIT_REAL_IMAG && !S::IS_SCALAR } {
538			add(real(value), imag(value))
539		} else {
540			real(value)
541		}
542	}
543
544	#[faer_macros::math]
545	#[inline(always)]
546	pub fn reduce_max_real(&self, value: RealReg<T::SimdVec<S>>) -> Real<T> {
547		let value = T::simd_reduce_max(&self.0, value.0);
548		if try_const! { T::SIMD_ABS_SPLIT_REAL_IMAG && !S::IS_SCALAR } {
549			max(real(value), imag(value))
550		} else {
551			real(value)
552		}
553	}
554
555	#[inline(always)]
556	pub fn max(&self, lhs: RealReg<T::SimdVec<S>>, rhs: RealReg<T::SimdVec<S>>) -> RealReg<T::SimdVec<S>> {
557		let cmp = self.gt(lhs, rhs);
558		RealReg(self.select(cmp, lhs.0, rhs.0))
559	}
560
561	#[inline(always)]
562	pub fn eq(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdMask<S> {
563		T::simd_equal(&self.0, lhs, rhs)
564	}
565
566	#[inline(always)]
567	pub fn lt(&self, lhs: RealReg<T::SimdVec<S>>, rhs: RealReg<T::SimdVec<S>>) -> T::SimdMask<S> {
568		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
569		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
570		unsafe { core::mem::transmute_copy(&T::simd_less_than(&self.0, lhs, rhs)) }
571	}
572
573	#[inline(always)]
574	pub fn gt(&self, lhs: RealReg<T::SimdVec<S>>, rhs: RealReg<T::SimdVec<S>>) -> T::SimdMask<S> {
575		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
576		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
577		unsafe { core::mem::transmute_copy(&T::simd_greater_than(&self.0, lhs, rhs)) }
578	}
579
580	#[inline(always)]
581	pub fn le(&self, lhs: RealReg<T::SimdVec<S>>, rhs: RealReg<T::SimdVec<S>>) -> T::SimdMask<S> {
582		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
583		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
584		unsafe { core::mem::transmute_copy(&T::simd_less_than_or_equal(&self.0, lhs, rhs)) }
585	}
586
587	#[inline(always)]
588	pub fn ge(&self, lhs: RealReg<T::SimdVec<S>>, rhs: RealReg<T::SimdVec<S>>) -> T::SimdMask<S> {
589		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
590		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
591		unsafe { core::mem::transmute_copy(&T::simd_greater_than_or_equal(&self.0, lhs, rhs)) }
592	}
593
594	#[inline(always)]
595	pub fn select(&self, mask: T::SimdMask<S>, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdVec<S> {
596		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
597		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
598		unsafe { core::mem::transmute_copy(&T::simd_select(&self.0, mask, lhs, rhs)) }
599	}
600
601	#[inline(always)]
602	pub fn iselect(&self, mask: T::SimdMask<S>, lhs: T::SimdIndex<S>, rhs: T::SimdIndex<S>) -> T::SimdIndex<S> {
603		unsafe { core::mem::transmute_copy(&T::simd_index_select(&self.0, mask, lhs, rhs)) }
604	}
605
606	#[inline(always)]
607	pub fn isplat(&self, value: T::Index) -> T::SimdIndex<S> {
608		unsafe { core::mem::transmute_copy(&T::simd_index_splat(&self.0, value)) }
609	}
610
611	#[inline(always)]
612	pub fn iadd(&self, lhs: T::SimdIndex<S>, rhs: T::SimdIndex<S>) -> T::SimdIndex<S> {
613		unsafe { core::mem::transmute_copy(&T::simd_index_add(&self.0, lhs, rhs)) }
614	}
615
616	#[inline(always)]
617	pub fn or_mask(&self, lhs: T::SimdMask<S>, rhs: T::SimdMask<S>) -> T::SimdMask<S> {
618		T::simd_or_mask(&self.0, lhs, rhs)
619	}
620
621	#[inline(always)]
622	pub fn and_mask(&self, lhs: T::SimdMask<S>, rhs: T::SimdMask<S>) -> T::SimdMask<S> {
623		T::simd_and_mask(&self.0, lhs, rhs)
624	}
625
626	#[inline(always)]
627	pub fn not_mask(&self, mask: T::SimdMask<S>) -> T::SimdMask<S> {
628		T::simd_not_mask(&self.0, mask)
629	}
630
631	#[inline(always)]
632	pub fn first_true_mask(&self, value: T::SimdMask<S>) -> usize {
633		T::simd_first_true_mask(&self.0, value)
634	}
635
636	#[inline(always)]
637	pub unsafe fn mask_load(&self, mask: T::SimdMemMask<S>, ptr: *const T::SimdVec<S>) -> T::SimdVec<S> {
638		unsafe { T::simd_mask_load(&self.0, mask, ptr) }
639	}
640
641	#[inline(always)]
642	pub unsafe fn mask_store(&self, mask: T::SimdMemMask<S>, ptr: *mut T::SimdVec<S>, value: T::SimdVec<S>) {
643		let value = unsafe { core::mem::transmute_copy(&value) };
644		unsafe { T::simd_mask_store(&self.0, mask, ptr, value) }
645	}
646
647	#[inline(always)]
648	pub fn load(&self, ptr: &T::SimdVec<S>) -> T::SimdVec<S> {
649		unsafe { core::mem::transmute_copy(&T::simd_load(&self.0, ptr)) }
650	}
651
652	#[inline(always)]
653	pub fn store(&self, ptr: &mut T::SimdVec<S>, value: T::SimdVec<S>) {
654		let value = unsafe { core::mem::transmute_copy(&value) };
655		unsafe { core::mem::transmute_copy(&T::simd_store(&self.0, ptr, value)) }
656	}
657}
658
659pub unsafe trait Conjugate: Send + Sync + core::fmt::Debug {
660	const IS_CANONICAL: bool;
661
662	type Conj: Conjugate<Conj = Self, Canonical = Self::Canonical>;
663	type Canonical: Conjugate<Canonical = Self::Canonical> + ComplexField;
664}
665
666pub type Real<T> = <<T as Conjugate>::Canonical as ComplexField>::Real;
667
668#[derive(Copy, Clone, Debug, PartialEq, Eq)]
669pub struct ComplexConj<T> {
670	pub re: T,
671	pub im_neg: T,
672}
673
674#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
675pub enum SimdCapabilities {
676	None,
677	Copy,
678	Simd,
679}
680
681impl SimdCapabilities {
682	#[inline]
683	pub const fn is_copy(self) -> bool {
684		matches!(self, Self::Copy | Self::Simd)
685	}
686
687	#[inline]
688	pub const fn is_simd(self) -> bool {
689		matches!(self, Self::Simd)
690	}
691}
692
693mod seal {
694	pub trait Seal {}
695	impl Seal for u8 {}
696	impl Seal for u16 {}
697	impl Seal for u32 {}
698	impl Seal for u64 {}
699	impl Seal for u128 {}
700	impl Seal for usize {}
701	impl Seal for i32 {}
702	impl Seal for i64 {}
703	impl Seal for isize {}
704}
705
706pub trait Seal: seal::Seal {}
707impl<T: seal::Seal> Seal for T {}
708
709/// Trait for signed integers corresponding to the ones satisfying [`Index`].
710///
711/// Always smaller than or equal to `isize`.
712pub trait SignedIndex:
713	Seal
714	+ core::fmt::Debug
715	+ core::ops::Neg<Output = Self>
716	+ core::ops::Add<Output = Self>
717	+ core::ops::Sub<Output = Self>
718	+ core::ops::AddAssign
719	+ core::ops::SubAssign
720	+ bytemuck::Pod
721	+ Eq
722	+ Ord
723	+ Send
724	+ Sync
725{
726	/// Maximum representable value.
727	const MAX: Self;
728
729	/// Truncate `value` to type [`Self`].
730	#[must_use]
731	fn truncate(value: usize) -> Self;
732
733	/// Zero extend `self`.
734	#[must_use]
735	fn zx(self) -> usize;
736	/// Sign extend `self`.
737	#[must_use]
738	fn sx(self) -> usize;
739
740	/// Sum nonnegative values while checking for overflow.
741	fn sum_nonnegative(slice: &[Self]) -> Option<Self> {
742		let mut acc = Self::zeroed();
743		for &i in slice {
744			if Self::MAX - i < acc {
745				return None;
746			}
747			acc += i;
748		}
749		Some(acc)
750	}
751}
752
753impl SignedIndex for i32 {
754	const MAX: Self = Self::MAX;
755
756	#[inline(always)]
757	fn truncate(value: usize) -> Self {
758		#[allow(clippy::assertions_on_constants)]
759		const _: () = {
760			core::assert!(i32::BITS <= usize::BITS);
761		};
762		value as isize as Self
763	}
764
765	#[inline(always)]
766	fn zx(self) -> usize {
767		self as u32 as usize
768	}
769
770	#[inline(always)]
771	fn sx(self) -> usize {
772		self as isize as usize
773	}
774}
775
776#[cfg(any(target_pointer_width = "64"))]
777impl SignedIndex for i64 {
778	const MAX: Self = Self::MAX;
779
780	#[inline(always)]
781	fn truncate(value: usize) -> Self {
782		#[allow(clippy::assertions_on_constants)]
783		const _: () = {
784			core::assert!(i64::BITS <= usize::BITS);
785		};
786		value as isize as Self
787	}
788
789	#[inline(always)]
790	fn zx(self) -> usize {
791		self as u64 as usize
792	}
793
794	#[inline(always)]
795	fn sx(self) -> usize {
796		self as isize as usize
797	}
798}
799
800impl SignedIndex for isize {
801	const MAX: Self = Self::MAX;
802
803	#[inline(always)]
804	fn truncate(value: usize) -> Self {
805		value as isize
806	}
807
808	#[inline(always)]
809	fn zx(self) -> usize {
810		self as usize
811	}
812
813	#[inline(always)]
814	fn sx(self) -> usize {
815		self as usize
816	}
817}
818
819pub trait IndexCore:
820	Seal
821	+ core::fmt::Debug
822	+ core::ops::Not<Output = Self>
823	+ core::ops::BitAnd<Output = Self>
824	+ core::ops::BitOr<Output = Self>
825	+ core::ops::BitXor<Output = Self>
826	+ core::ops::Add<Output = Self>
827	+ core::ops::Sub<Output = Self>
828	+ core::ops::AddAssign
829	+ core::ops::SubAssign
830	+ bytemuck::Pod
831	+ Eq
832	+ Ord
833	+ Send
834	+ Sync
835	+ Ord
836{
837	const MAX: Self;
838
839	/// Truncate `value` to type [`Self`].
840	#[must_use]
841	fn truncate(value: usize) -> Self;
842
843	/// Zero extend `self`.
844	#[must_use]
845	fn zx(self) -> usize;
846}
847
848pub trait Index: IndexCore {
849	/// Equally-sized index type with a fixed size (no `usize`).
850	type FixedWidth: Index;
851	/// Equally-sized signed index type.
852	type Signed: SignedIndex;
853
854	const BITS: u32 = core::mem::size_of::<Self>() as u32 * 8;
855
856	/// Convert a reference to a slice of [`Self`] to fixed width types.
857	#[inline(always)]
858	fn canonicalize(slice: &[Self]) -> &[Self::FixedWidth] {
859		bytemuck::cast_slice(slice)
860	}
861
862	/// Convert a mutable reference to a slice of [`Self`] to fixed width types.
863	#[inline(always)]
864	fn canonicalize_mut(slice: &mut [Self]) -> &mut [Self::FixedWidth] {
865		bytemuck::cast_slice_mut(slice)
866	}
867
868	/// Convert a signed value to an unsigned one.
869	#[inline(always)]
870	fn from_signed(value: Self::Signed) -> Self {
871		bytemuck::cast(value)
872	}
873
874	/// Convert an unsigned value to a signed one.
875	#[inline(always)]
876	fn to_signed(self) -> Self::Signed {
877		bytemuck::cast(self)
878	}
879
880	/// Sum values while checking for overflow.
881	#[inline]
882	fn sum_nonnegative(slice: &[Self]) -> Option<Self> {
883		Self::Signed::sum_nonnegative(bytemuck::cast_slice(slice)).map(Self::from_signed)
884	}
885}
886
887impl IndexCore for u8 {
888	const MAX: Self = Self::MAX;
889
890	#[inline(always)]
891	fn truncate(value: usize) -> Self {
892		value as _
893	}
894
895	#[inline(always)]
896	fn zx(self) -> usize {
897		self as _
898	}
899}
900impl IndexCore for u16 {
901	const MAX: Self = Self::MAX;
902
903	#[inline(always)]
904	fn truncate(value: usize) -> Self {
905		value as _
906	}
907
908	#[inline(always)]
909	fn zx(self) -> usize {
910		self as _
911	}
912}
913impl IndexCore for u32 {
914	const MAX: Self = Self::MAX;
915
916	#[inline(always)]
917	fn truncate(value: usize) -> Self {
918		value as _
919	}
920
921	#[inline(always)]
922	fn zx(self) -> usize {
923		self as _
924	}
925}
926
927impl IndexCore for u64 {
928	const MAX: Self = Self::MAX;
929
930	#[inline(always)]
931	fn truncate(value: usize) -> Self {
932		value as _
933	}
934
935	#[inline(always)]
936	fn zx(self) -> usize {
937		self as _
938	}
939}
940
941impl IndexCore for u128 {
942	const MAX: Self = Self::MAX;
943
944	#[inline(always)]
945	fn truncate(value: usize) -> Self {
946		value as _
947	}
948
949	#[inline(always)]
950	fn zx(self) -> usize {
951		self as _
952	}
953}
954
955impl IndexCore for usize {
956	const MAX: Self = Self::MAX;
957
958	#[inline(always)]
959	fn truncate(value: usize) -> Self {
960		value
961	}
962
963	#[inline(always)]
964	fn zx(self) -> usize {
965		self
966	}
967}
968
969impl Index for u32 {
970	type FixedWidth = u32;
971	type Signed = i32;
972}
973#[cfg(any(target_pointer_width = "64"))]
974impl Index for u64 {
975	type FixedWidth = u64;
976	type Signed = i64;
977}
978
979impl Index for usize {
980	#[cfg(target_pointer_width = "32")]
981	type FixedWidth = u32;
982	#[cfg(target_pointer_width = "64")]
983	type FixedWidth = u64;
984	type Signed = isize;
985}
986
987unsafe impl<T: RealField> Conjugate for T {
988	type Canonical = T;
989	type Conj = T;
990
991	const IS_CANONICAL: bool = true;
992}
993
994unsafe impl<T: RealField> Conjugate for Complex<T> {
995	type Canonical = Complex<T>;
996	type Conj = ComplexConj<T>;
997
998	const IS_CANONICAL: bool = true;
999}
1000unsafe impl<T: RealField> Conjugate for ComplexConj<T> {
1001	type Canonical = Complex<T>;
1002	type Conj = Complex<T>;
1003
1004	const IS_CANONICAL: bool = false;
1005}
1006
1007pub trait SimdArch: Copy + Default + Send + Sync {
1008	fn dispatch<R>(self, f: impl pulp::WithSimd<Output = R>) -> R;
1009}
1010
1011impl SimdArch for pulp::Arch {
1012	#[inline]
1013	fn dispatch<R>(self, f: impl pulp::WithSimd<Output = R>) -> R {
1014		self.dispatch(f)
1015	}
1016}
1017
1018impl SimdArch for pulp::Scalar {
1019	#[inline]
1020	fn dispatch<R>(self, f: impl pulp::WithSimd<Output = R>) -> R {
1021		f.with_simd(self)
1022	}
1023}
1024
1025pub trait ComplexField:
1026	Debug
1027	+ Clone
1028	+ Conjugate<Canonical = Self>
1029	+ PartialEq
1030	+ AddByRef<Output = Self>
1031	+ SubByRef<Output = Self>
1032	+ MulByRef<Output = Self>
1033	+ NegByRef<Output = Self>
1034{
1035	const IS_REAL: bool;
1036	const SIMD_ABS_SPLIT_REAL_IMAG: bool = false;
1037
1038	type Arch: SimdArch;
1039	type Unit: ComplexField;
1040
1041	type SimdCtx<S: Simd>: Copy;
1042	type Index: IndexCore;
1043
1044	type Real: RealField;
1045
1046	#[doc(hidden)]
1047	const IS_NATIVE_F32: bool = false;
1048	#[doc(hidden)]
1049	const IS_NATIVE_C32: bool = false;
1050	#[doc(hidden)]
1051	const IS_NATIVE_F64: bool = false;
1052	#[doc(hidden)]
1053	const IS_NATIVE_C64: bool = false;
1054
1055	const SIMD_CAPABILITIES: SimdCapabilities;
1056	type SimdMask<S: Simd>: Copy + Debug;
1057	type SimdMemMask<S: Simd>: Copy + Debug;
1058
1059	type SimdVec<S: Simd>: Pod + Debug;
1060	type SimdIndex<S: Simd>: Pod + Debug;
1061
1062	fn zero_impl() -> Self;
1063	fn one_impl() -> Self;
1064	fn nan_impl() -> Self;
1065	fn infinity_impl() -> Self;
1066
1067	fn from_real_impl(real: &Self::Real) -> Self;
1068	fn from_f64_impl(real: f64) -> Self;
1069
1070	fn real_part_impl(value: &Self) -> Self::Real;
1071	fn imag_part_impl(value: &Self) -> Self::Real;
1072
1073	fn copy_impl(value: &Self) -> Self;
1074	fn conj_impl(value: &Self) -> Self;
1075	fn recip_impl(value: &Self) -> Self;
1076	fn sqrt_impl(value: &Self) -> Self;
1077
1078	fn abs_impl(value: &Self) -> Self::Real;
1079	fn abs1_impl(value: &Self) -> Self::Real;
1080	fn abs2_impl(value: &Self) -> Self::Real;
1081
1082	fn mul_real_impl(lhs: &Self, rhs: &Self::Real) -> Self;
1083
1084	fn mul_pow2_impl(lhs: &Self, rhs: &Self::Real) -> Self;
1085
1086	fn is_finite_impl(value: &Self) -> bool;
1087	fn is_nan_impl(value: &Self) -> bool {
1088		value != value
1089	}
1090
1091	fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S>;
1092	fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S;
1093
1094	fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMask<S>;
1095	fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMemMask<S>;
1096	unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S>;
1097	unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>);
1098
1099	fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S>;
1100	fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S>;
1101
1102	fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1103	fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1104
1105	fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S>;
1106	fn simd_conj<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S>;
1107	fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S>;
1108	fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S>;
1109
1110	fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1111	fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1112
1113	fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1114	fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1115	fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S>;
1116	fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S>;
1117	fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S>;
1118	fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S>;
1119
1120	fn simd_reduce_sum<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self;
1121	fn simd_reduce_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self;
1122	fn simd_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S>;
1123	fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S>;
1124	fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S>;
1125	fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S>;
1126	fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S>;
1127
1128	fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1129	fn simd_index_select<S: Simd>(
1130		ctx: &Self::SimdCtx<S>,
1131		mask: Self::SimdMask<S>,
1132		lhs: Self::SimdIndex<S>,
1133		rhs: Self::SimdIndex<S>,
1134	) -> Self::SimdIndex<S>;
1135
1136	fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S>;
1137	fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S>;
1138	fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S>;
1139	#[inline(always)]
1140	fn simd_index_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
1141		Self::simd_index_less_than(ctx, rhs, lhs)
1142	}
1143	#[inline(always)]
1144	fn simd_index_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
1145		Self::simd_not_mask(ctx, Self::simd_index_less_than(ctx, rhs, lhs))
1146	}
1147	#[inline(always)]
1148	fn simd_index_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
1149		Self::simd_not_mask(ctx, Self::simd_index_greater_than(ctx, rhs, lhs))
1150	}
1151
1152	fn simd_and_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S>;
1153	fn simd_or_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S>;
1154	fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S>;
1155	fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize;
1156
1157	#[inline(always)]
1158	fn simd_load<S: Simd>(ctx: &Self::SimdCtx<S>, ptr: &Self::SimdVec<S>) -> Self::SimdVec<S> {
1159		let simd = Self::ctx_from_simd(ctx);
1160		if try_const! { Self::Unit::IS_NATIVE_F32 } {
1161			simd.deinterleave_shfl_f32s(*ptr)
1162		} else if try_const! { Self::Unit::IS_NATIVE_F64 } {
1163			simd.deinterleave_shfl_f64s(*ptr)
1164		} else {
1165			panic!();
1166		}
1167	}
1168
1169	#[inline(always)]
1170	fn simd_store<S: Simd>(ctx: &Self::SimdCtx<S>, ptr: &mut Self::SimdVec<S>, value: Self::SimdVec<S>) {
1171		let simd = Self::ctx_from_simd(ctx);
1172		if try_const! { Self::Unit::IS_NATIVE_F32 } {
1173			*ptr = simd.interleave_shfl_f32s(value)
1174		} else if try_const! { Self::Unit::IS_NATIVE_F64 } {
1175			*ptr = simd.interleave_shfl_f64s(value)
1176		} else {
1177			panic!();
1178		}
1179	}
1180
1181	#[inline(always)]
1182	unsafe fn simd_mask_load<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
1183		let simd = Self::ctx_from_simd(ctx);
1184		let value = Self::simd_mask_load_raw(ctx, mask, ptr);
1185		if try_const! { Self::Unit::IS_NATIVE_F32 } {
1186			simd.deinterleave_shfl_f32s(value)
1187		} else if try_const! { Self::Unit::IS_NATIVE_F64 } {
1188			simd.deinterleave_shfl_f64s(value)
1189		} else {
1190			panic!();
1191		}
1192	}
1193
1194	#[inline(always)]
1195	unsafe fn simd_mask_store<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, value: Self::SimdVec<S>) {
1196		let simd = Self::ctx_from_simd(ctx);
1197		if try_const! { Self::Unit::IS_NATIVE_F32 } {
1198			Self::simd_mask_store_raw(ctx, mask, ptr, simd.interleave_shfl_f32s(value))
1199		} else if try_const! { Self::Unit::IS_NATIVE_F64 } {
1200			Self::simd_mask_store_raw(ctx, mask, ptr, simd.interleave_shfl_f64s(value))
1201		} else {
1202			panic!();
1203		}
1204	}
1205
1206	#[inline(always)]
1207	fn simd_iota<S: Simd>(ctx: &Self::SimdCtx<S>) -> Self::SimdIndex<S> {
1208		let simd = Self::ctx_from_simd(ctx);
1209		struct Interleave<T>(T);
1210		unsafe impl<T> pulp::Interleave for Interleave<T> {}
1211
1212		unsafe {
1213			if try_const! { Self::Unit::IS_NATIVE_F32 } {
1214				core::mem::transmute_copy::<_, Self::SimdIndex<S>>(&simd.deinterleave_shfl_f32s(Interleave(core::mem::transmute_copy::<
1215					_,
1216					Self::SimdVec<S>,
1217				>(
1218					&pulp::iota_32::<Interleave<Self>>()
1219				))))
1220			} else if try_const! { Self::Unit::IS_NATIVE_F64 } {
1221				core::mem::transmute_copy::<_, Self::SimdIndex<S>>(
1222					&simd.deinterleave_shfl_f64s(core::mem::transmute_copy::<_, Self::SimdVec<S>>(&pulp::iota_64::<Interleave<Self>>())),
1223				)
1224			} else {
1225				panic!();
1226			}
1227		}
1228	}
1229}
1230
1231pub trait RealField:
1232	ComplexField<Real = Self, Conj = Self> + DivByRef<Output = Self> + PartialOrd + num_traits::NumOps + num_traits::Num + core::ops::Neg<Output = Self>
1233{
1234	fn epsilon_impl() -> Self;
1235	fn nbits_impl() -> usize;
1236
1237	fn min_positive_impl() -> Self;
1238	fn max_positive_impl() -> Self;
1239	fn sqrt_min_positive_impl() -> Self;
1240	fn sqrt_max_positive_impl() -> Self;
1241}
1242
1243impl ComplexField for f32 {
1244	type Arch = pulp::Arch;
1245	type Index = u32;
1246	type Real = Self;
1247	type SimdCtx<S: Simd> = S;
1248	type SimdIndex<S: Simd> = S::u32s;
1249	type SimdMask<S: Simd> = S::m32s;
1250	type SimdMemMask<S: Simd> = pulp::MemMask<S::m32s>;
1251	type SimdVec<S: Simd> = S::f32s;
1252	type Unit = Self;
1253
1254	const IS_NATIVE_F32: bool = true;
1255	const IS_REAL: bool = true;
1256	const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Simd;
1257
1258	#[inline(always)]
1259	fn zero_impl() -> Self {
1260		0.0
1261	}
1262
1263	#[inline(always)]
1264	fn one_impl() -> Self {
1265		1.0
1266	}
1267
1268	#[inline(always)]
1269	fn nan_impl() -> Self {
1270		Self::NAN
1271	}
1272
1273	#[inline(always)]
1274	fn infinity_impl() -> Self {
1275		Self::INFINITY
1276	}
1277
1278	#[inline(always)]
1279	fn from_real_impl(value: &Self) -> Self {
1280		*value
1281	}
1282
1283	#[inline(always)]
1284	fn from_f64_impl(value: f64) -> Self {
1285		value as _
1286	}
1287
1288	#[inline(always)]
1289	fn real_part_impl(value: &Self) -> Self {
1290		*value
1291	}
1292
1293	#[inline(always)]
1294	fn imag_part_impl(_: &Self) -> Self {
1295		0.0
1296	}
1297
1298	#[inline(always)]
1299	fn copy_impl(value: &Self) -> Self {
1300		*value
1301	}
1302
1303	#[inline(always)]
1304	fn conj_impl(value: &Self) -> Self {
1305		*value
1306	}
1307
1308	#[inline(always)]
1309	fn recip_impl(value: &Self) -> Self {
1310		1.0 / *value
1311	}
1312
1313	#[inline(always)]
1314	fn sqrt_impl(value: &Self) -> Self {
1315		libm::sqrtf(*value)
1316	}
1317
1318	#[inline(always)]
1319	fn abs_impl(value: &Self) -> Self {
1320		(*value).abs()
1321	}
1322
1323	#[inline(always)]
1324	fn abs1_impl(value: &Self) -> Self {
1325		(*value).abs()
1326	}
1327
1328	#[inline(always)]
1329	fn abs2_impl(value: &Self) -> Self {
1330		(*value) * (*value)
1331	}
1332
1333	#[inline(always)]
1334	fn mul_real_impl(lhs: &Self, rhs: &Self) -> Self {
1335		(*lhs) * (*rhs)
1336	}
1337
1338	#[inline(always)]
1339	fn mul_pow2_impl(lhs: &Self, rhs: &Self) -> Self {
1340		(*lhs) * (*rhs)
1341	}
1342
1343	#[inline(always)]
1344	fn is_finite_impl(value: &Self) -> bool {
1345		(*value).is_finite()
1346	}
1347
1348	#[inline(always)]
1349	fn is_nan_impl(value: &Self) -> bool {
1350		(*value).is_nan()
1351	}
1352
1353	#[inline(always)]
1354	fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
1355		simd
1356	}
1357
1358	#[inline(always)]
1359	fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S> {
1360		ctx.splat_f32s(*value)
1361	}
1362
1363	#[inline(always)]
1364	fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S> {
1365		ctx.splat_f32s(*value)
1366	}
1367
1368	#[inline(always)]
1369	fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1370		ctx.add_f32s(lhs, rhs)
1371	}
1372
1373	#[inline(always)]
1374	fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1375		ctx.sub_f32s(lhs, rhs)
1376	}
1377
1378	#[inline(always)]
1379	fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1380		ctx.neg_f32s(value)
1381	}
1382
1383	#[inline(always)]
1384	fn simd_conj<S: Simd>(_: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1385		value
1386	}
1387
1388	#[inline(always)]
1389	fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1390		ctx.abs_f32s(value)
1391	}
1392
1393	#[inline(always)]
1394	fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1395		ctx.mul_f32s(lhs, rhs)
1396	}
1397
1398	#[inline(always)]
1399	fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1400		ctx.mul_f32s(lhs, real_rhs)
1401	}
1402
1403	#[inline(always)]
1404	fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1405		ctx.mul_f32s(lhs, real_rhs)
1406	}
1407
1408	#[inline(always)]
1409	fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1410		ctx.mul_f32s(lhs, rhs)
1411	}
1412
1413	#[inline(always)]
1414	fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
1415		ctx.mul_add_e_f32s(lhs, rhs, acc)
1416	}
1417
1418	#[inline(always)]
1419	fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
1420		ctx.mul_add_e_f32s(lhs, rhs, acc)
1421	}
1422
1423	#[inline(always)]
1424	fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1425		ctx.mul_f32s(value, value)
1426	}
1427
1428	#[inline(always)]
1429	fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
1430		ctx.mul_add_e_f32s(value, value, acc)
1431	}
1432
1433	#[inline(always)]
1434	fn simd_reduce_sum<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
1435		ctx.reduce_sum_f32s(value)
1436	}
1437
1438	#[inline(always)]
1439	fn simd_reduce_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
1440		ctx.reduce_max_f32s(value)
1441	}
1442
1443	#[inline(always)]
1444	fn simd_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1445		ctx.equal_f32s(real_lhs, real_rhs)
1446	}
1447
1448	#[inline(always)]
1449	fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1450		ctx.less_than_f32s(real_lhs, real_rhs)
1451	}
1452
1453	#[inline(always)]
1454	fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1455		ctx.greater_than_f32s(real_lhs, real_rhs)
1456	}
1457
1458	#[inline(always)]
1459	fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1460		ctx.less_than_or_equal_f32s(real_lhs, real_rhs)
1461	}
1462
1463	#[inline(always)]
1464	fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1465		ctx.greater_than_or_equal_f32s(real_lhs, real_rhs)
1466	}
1467
1468	#[inline(always)]
1469	fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1470		ctx.select_f32s_m32s(mask, lhs, rhs)
1471	}
1472
1473	#[inline(always)]
1474	fn simd_index_select<S: Simd>(
1475		ctx: &Self::SimdCtx<S>,
1476		mask: Self::SimdMask<S>,
1477		lhs: Self::SimdIndex<S>,
1478		rhs: Self::SimdIndex<S>,
1479	) -> Self::SimdIndex<S> {
1480		ctx.select_u32s_m32s(mask, lhs, rhs)
1481	}
1482
1483	#[inline(always)]
1484	fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S> {
1485		ctx.splat_u32s(value as _)
1486	}
1487
1488	#[inline(always)]
1489	fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
1490		ctx.add_u32s(lhs, rhs)
1491	}
1492
1493	#[inline(always)]
1494	fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
1495		ctx.less_than_u32s(lhs, rhs)
1496	}
1497
1498	#[inline(always)]
1499	fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1500		ctx.abs_f32s(value)
1501	}
1502
1503	#[inline(always)]
1504	fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
1505		*ctx
1506	}
1507
1508	#[inline(always)]
1509	fn simd_and_mask<S: Simd>(simd: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
1510		simd.and_m32s(lhs, rhs)
1511	}
1512
1513	#[inline(always)]
1514	fn simd_or_mask<S: Simd>(simd: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
1515		simd.or_m32s(lhs, rhs)
1516	}
1517
1518	#[inline(always)]
1519	fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S> {
1520		ctx.not_m32s(mask)
1521	}
1522
1523	#[inline(always)]
1524	fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize {
1525		ctx.first_true_m32s(value)
1526	}
1527
1528	#[inline(always)]
1529	fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMemMask<S> {
1530		ctx.mask_between_m32s(start as _, end as _)
1531	}
1532
1533	#[inline(always)]
1534	fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMask<S> {
1535		ctx.mask_between_m32s(start as _, end as _).mask()
1536	}
1537
1538	#[inline(always)]
1539	unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
1540		ctx.mask_load_ptr_f32s(mask, ptr as _)
1541	}
1542
1543	#[inline(always)]
1544	unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>) {
1545		ctx.mask_store_ptr_f32s(mask, ptr as _, values);
1546	}
1547}
1548
1549impl RealField for f32 {
1550	#[inline(always)]
1551	fn epsilon_impl() -> Self {
1552		Self::EPSILON
1553	}
1554
1555	#[inline(always)]
1556	fn min_positive_impl() -> Self {
1557		Self::MIN_POSITIVE
1558	}
1559
1560	#[inline(always)]
1561	fn max_positive_impl() -> Self {
1562		Self::MIN_POSITIVE.recip()
1563	}
1564
1565	#[inline(always)]
1566	fn sqrt_min_positive_impl() -> Self {
1567		libm::sqrtf(Self::MIN_POSITIVE)
1568	}
1569
1570	#[inline(always)]
1571	fn sqrt_max_positive_impl() -> Self {
1572		libm::sqrtf(Self::MIN_POSITIVE.recip())
1573	}
1574
1575	#[inline(always)]
1576	fn nbits_impl() -> usize {
1577		Self::MANTISSA_DIGITS as usize
1578	}
1579}
1580
1581impl ComplexField for f64 {
1582	type Arch = pulp::Arch;
1583	type Index = u64;
1584	type Real = Self;
1585	type SimdCtx<S: Simd> = S;
1586	type SimdIndex<S: Simd> = S::u64s;
1587	type SimdMask<S: Simd> = S::m64s;
1588	type SimdMemMask<S: Simd> = pulp::MemMask<S::m64s>;
1589	type SimdVec<S: Simd> = S::f64s;
1590	type Unit = Self;
1591
1592	const IS_NATIVE_F64: bool = true;
1593	const IS_REAL: bool = true;
1594	const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Simd;
1595
1596	#[inline(always)]
1597	fn zero_impl() -> Self {
1598		0.0
1599	}
1600
1601	#[inline(always)]
1602	fn one_impl() -> Self {
1603		1.0
1604	}
1605
1606	#[inline(always)]
1607	fn nan_impl() -> Self {
1608		Self::NAN
1609	}
1610
1611	#[inline(always)]
1612	fn infinity_impl() -> Self {
1613		Self::INFINITY
1614	}
1615
1616	#[inline(always)]
1617	fn from_real_impl(value: &Self) -> Self {
1618		*value
1619	}
1620
1621	#[inline(always)]
1622	fn from_f64_impl(value: f64) -> Self {
1623		value as _
1624	}
1625
1626	#[inline(always)]
1627	fn real_part_impl(value: &Self) -> Self {
1628		*value
1629	}
1630
1631	#[inline(always)]
1632	fn imag_part_impl(_: &Self) -> Self {
1633		0.0
1634	}
1635
1636	#[inline(always)]
1637	fn copy_impl(value: &Self) -> Self {
1638		*value
1639	}
1640
1641	#[inline(always)]
1642	fn conj_impl(value: &Self) -> Self {
1643		*value
1644	}
1645
1646	#[inline(always)]
1647	fn recip_impl(value: &Self) -> Self {
1648		1.0 / *value
1649	}
1650
1651	#[inline(always)]
1652	fn sqrt_impl(value: &Self) -> Self {
1653		libm::sqrt(*value)
1654	}
1655
1656	#[inline(always)]
1657	fn abs_impl(value: &Self) -> Self {
1658		(*value).abs()
1659	}
1660
1661	#[inline(always)]
1662	fn abs1_impl(value: &Self) -> Self {
1663		(*value).abs()
1664	}
1665
1666	#[inline(always)]
1667	fn abs2_impl(value: &Self) -> Self {
1668		(*value) * (*value)
1669	}
1670
1671	#[inline(always)]
1672	fn mul_real_impl(lhs: &Self, rhs: &Self) -> Self {
1673		(*lhs) * (*rhs)
1674	}
1675
1676	#[inline(always)]
1677	fn mul_pow2_impl(lhs: &Self, rhs: &Self) -> Self {
1678		(*lhs) * (*rhs)
1679	}
1680
1681	#[inline(always)]
1682	fn is_nan_impl(value: &Self) -> bool {
1683		(*value).is_nan()
1684	}
1685
1686	#[inline(always)]
1687	fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
1688		simd
1689	}
1690
1691	#[inline(always)]
1692	fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S> {
1693		ctx.splat_f64s(*value)
1694	}
1695
1696	#[inline(always)]
1697	fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S> {
1698		ctx.splat_f64s(*value)
1699	}
1700
1701	#[inline(always)]
1702	fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1703		ctx.add_f64s(lhs, rhs)
1704	}
1705
1706	#[inline(always)]
1707	fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1708		ctx.sub_f64s(lhs, rhs)
1709	}
1710
1711	#[inline(always)]
1712	fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1713		ctx.neg_f64s(value)
1714	}
1715
1716	#[inline(always)]
1717	fn simd_conj<S: Simd>(_: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1718		value
1719	}
1720
1721	#[inline(always)]
1722	fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1723		ctx.abs_f64s(value)
1724	}
1725
1726	#[inline(always)]
1727	fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1728		ctx.mul_f64s(lhs, rhs)
1729	}
1730
1731	#[inline(always)]
1732	fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1733		ctx.mul_f64s(lhs, real_rhs)
1734	}
1735
1736	#[inline(always)]
1737	fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1738		ctx.mul_f64s(lhs, real_rhs)
1739	}
1740
1741	#[inline(always)]
1742	fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1743		ctx.mul_f64s(lhs, rhs)
1744	}
1745
1746	#[inline(always)]
1747	fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
1748		ctx.mul_add_e_f64s(lhs, rhs, acc)
1749	}
1750
1751	#[inline(always)]
1752	fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
1753		ctx.mul_add_e_f64s(lhs, rhs, acc)
1754	}
1755
1756	#[inline(always)]
1757	fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1758		ctx.mul_f64s(value, value)
1759	}
1760
1761	#[inline(always)]
1762	fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
1763		ctx.mul_add_e_f64s(value, value, acc)
1764	}
1765
1766	#[inline(always)]
1767	fn simd_reduce_sum<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
1768		ctx.reduce_sum_f64s(value)
1769	}
1770
1771	#[inline(always)]
1772	fn simd_reduce_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
1773		ctx.reduce_max_f64s(value)
1774	}
1775
1776	#[inline(always)]
1777	fn simd_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1778		ctx.equal_f64s(real_lhs, real_rhs)
1779	}
1780
1781	#[inline(always)]
1782	fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1783		ctx.less_than_f64s(real_lhs, real_rhs)
1784	}
1785
1786	#[inline(always)]
1787	fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1788		ctx.greater_than_f64s(real_lhs, real_rhs)
1789	}
1790
1791	#[inline(always)]
1792	fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1793		ctx.less_than_or_equal_f64s(real_lhs, real_rhs)
1794	}
1795
1796	#[inline(always)]
1797	fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1798		ctx.greater_than_or_equal_f64s(real_lhs, real_rhs)
1799	}
1800
1801	#[inline(always)]
1802	fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1803		ctx.select_f64s_m64s(mask, lhs, rhs)
1804	}
1805
1806	#[inline(always)]
1807	fn simd_index_select<S: Simd>(
1808		ctx: &Self::SimdCtx<S>,
1809		mask: Self::SimdMask<S>,
1810		lhs: Self::SimdIndex<S>,
1811		rhs: Self::SimdIndex<S>,
1812	) -> Self::SimdIndex<S> {
1813		ctx.select_u64s_m64s(mask, lhs, rhs)
1814	}
1815
1816	#[inline(always)]
1817	fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S> {
1818		ctx.splat_u64s(value as _)
1819	}
1820
1821	#[inline(always)]
1822	fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
1823		ctx.add_u64s(lhs, rhs)
1824	}
1825
1826	#[inline(always)]
1827	fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
1828		ctx.less_than_u64s(lhs, rhs)
1829	}
1830
1831	#[inline(always)]
1832	fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1833		ctx.abs_f64s(value)
1834	}
1835
1836	#[inline(always)]
1837	fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
1838		*ctx
1839	}
1840
1841	#[inline(always)]
1842	fn simd_and_mask<S: Simd>(simd: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
1843		simd.and_m64s(lhs, rhs)
1844	}
1845
1846	#[inline(always)]
1847	fn simd_or_mask<S: Simd>(simd: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
1848		simd.or_m64s(lhs, rhs)
1849	}
1850
1851	#[inline(always)]
1852	fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S> {
1853		ctx.not_m64s(mask)
1854	}
1855
1856	#[inline(always)]
1857	fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize {
1858		ctx.first_true_m64s(value)
1859	}
1860
1861	#[inline(always)]
1862	fn is_finite_impl(value: &Self) -> bool {
1863		(*value).is_finite()
1864	}
1865
1866	#[inline(always)]
1867	fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMemMask<S> {
1868		ctx.mask_between_m64s(start as _, end as _)
1869	}
1870
1871	#[inline(always)]
1872	fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMask<S> {
1873		ctx.mask_between_m64s(start as _, end as _).mask()
1874	}
1875
1876	#[inline(always)]
1877	unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
1878		ctx.mask_load_ptr_f64s(mask, ptr as _)
1879	}
1880
1881	#[inline(always)]
1882	unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>) {
1883		ctx.mask_store_ptr_f64s(mask, ptr as _, values);
1884	}
1885}
1886
1887impl RealField for f64 {
1888	#[inline(always)]
1889	fn epsilon_impl() -> Self {
1890		Self::EPSILON
1891	}
1892
1893	#[inline(always)]
1894	fn min_positive_impl() -> Self {
1895		Self::MIN_POSITIVE
1896	}
1897
1898	#[inline(always)]
1899	fn max_positive_impl() -> Self {
1900		Self::MIN_POSITIVE.recip()
1901	}
1902
1903	#[inline(always)]
1904	fn sqrt_min_positive_impl() -> Self {
1905		libm::sqrt(Self::MIN_POSITIVE)
1906	}
1907
1908	#[inline(always)]
1909	fn sqrt_max_positive_impl() -> Self {
1910		libm::sqrt(Self::MIN_POSITIVE.recip())
1911	}
1912
1913	#[inline(always)]
1914	fn nbits_impl() -> usize {
1915		Self::MANTISSA_DIGITS as usize
1916	}
1917}
1918
1919impl<T: RealField> ComplexField for Complex<T> {
1920	type Arch = T::Arch;
1921	type Index = T::Index;
1922	type Real = T;
1923	type SimdCtx<S: Simd> = T::SimdCtx<S>;
1924	type SimdIndex<S: Simd> = T::SimdIndex<S>;
1925	type SimdMask<S: Simd> = T::SimdMask<S>;
1926	type SimdMemMask<S: Simd> = Complex<T::SimdMemMask<S>>;
1927	type SimdVec<S: Simd> = Complex<T::SimdVec<S>>;
1928	type Unit = T::Unit;
1929
1930	const IS_NATIVE_C32: bool = T::IS_NATIVE_F32;
1931	const IS_NATIVE_C64: bool = T::IS_NATIVE_F64;
1932	const IS_REAL: bool = false;
1933	const SIMD_CAPABILITIES: SimdCapabilities = T::SIMD_CAPABILITIES;
1934
1935	#[inline]
1936	fn zero_impl() -> Self {
1937		Complex {
1938			re: T::zero_impl(),
1939			im: T::zero_impl(),
1940		}
1941	}
1942
1943	#[inline]
1944	fn one_impl() -> Self {
1945		Complex {
1946			re: T::one_impl(),
1947			im: T::zero_impl(),
1948		}
1949	}
1950
1951	#[inline]
1952	fn nan_impl() -> Self {
1953		Complex {
1954			re: T::nan_impl(),
1955			im: T::nan_impl(),
1956		}
1957	}
1958
1959	#[inline]
1960	fn infinity_impl() -> Self {
1961		Complex {
1962			re: T::infinity_impl(),
1963			im: T::infinity_impl(),
1964		}
1965	}
1966
1967	#[inline]
1968	fn from_real_impl(real: &Self::Real) -> Self {
1969		Complex {
1970			re: real.clone(),
1971			im: T::zero_impl(),
1972		}
1973	}
1974
1975	#[inline]
1976	fn from_f64_impl(real: f64) -> Self {
1977		Complex {
1978			re: T::from_f64_impl(real),
1979			im: T::zero_impl(),
1980		}
1981	}
1982
1983	#[inline]
1984	fn real_part_impl(value: &Self) -> Self::Real {
1985		value.re.clone()
1986	}
1987
1988	#[inline]
1989	fn imag_part_impl(value: &Self) -> Self::Real {
1990		value.im.clone()
1991	}
1992
1993	#[inline]
1994	fn copy_impl(value: &Self) -> Self {
1995		value.clone()
1996	}
1997
1998	#[inline]
1999	fn conj_impl(value: &Self) -> Self {
2000		Self {
2001			re: value.re.clone(),
2002			im: value.im.neg_by_ref(),
2003		}
2004	}
2005
2006	#[inline]
2007	fn recip_impl(value: &Self) -> Self {
2008		let (re, im) = recip_impl(value.re.clone(), value.im.clone());
2009		Complex { re, im }
2010	}
2011
2012	#[inline]
2013	fn sqrt_impl(value: &Self) -> Self {
2014		let (re, im) = sqrt_impl(value.re.clone(), value.im.clone());
2015		Complex { re, im }
2016	}
2017
2018	#[inline]
2019	fn abs_impl(value: &Self) -> Self::Real {
2020		abs_impl(value.re.clone(), value.im.clone())
2021	}
2022
2023	#[inline]
2024	#[faer_macros::math]
2025	fn abs1_impl(value: &Self) -> Self::Real {
2026		abs1(value.re) + abs1(value.im)
2027	}
2028
2029	#[inline]
2030	#[faer_macros::math]
2031	fn abs2_impl(value: &Self) -> Self::Real {
2032		abs2(value.re) + abs2(value.im)
2033	}
2034
2035	#[inline]
2036	#[faer_macros::math]
2037	fn mul_real_impl(lhs: &Self, rhs: &Self::Real) -> Self {
2038		Complex {
2039			re: lhs.re * rhs,
2040			im: lhs.im * rhs,
2041		}
2042	}
2043
2044	#[inline]
2045	#[faer_macros::math]
2046	fn mul_pow2_impl(lhs: &Self, rhs: &Self::Real) -> Self {
2047		Complex {
2048			re: mul_pow2(lhs.re, rhs),
2049			im: mul_pow2(lhs.im, rhs),
2050		}
2051	}
2052
2053	#[inline]
2054	#[faer_macros::math]
2055	fn is_finite_impl(value: &Self) -> bool {
2056		is_finite(value.re) && is_finite(value.im)
2057	}
2058
2059	#[inline(always)]
2060	fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
2061		T::simd_ctx(simd)
2062	}
2063
2064	#[inline(always)]
2065	fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
2066		T::ctx_from_simd(ctx)
2067	}
2068
2069	#[inline(always)]
2070	fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S> {
2071		Complex {
2072			re: T::simd_splat(ctx, &value.re),
2073			im: T::simd_splat(ctx, &value.im),
2074		}
2075	}
2076
2077	#[inline(always)]
2078	fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S> {
2079		Complex {
2080			re: T::simd_splat_real(ctx, value),
2081			im: T::simd_splat_real(ctx, value),
2082		}
2083	}
2084
2085	#[inline(always)]
2086	fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2087		Complex {
2088			re: T::simd_add(ctx, lhs.re, rhs.re),
2089			im: T::simd_add(ctx, lhs.im, rhs.im),
2090		}
2091	}
2092
2093	#[inline(always)]
2094	fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2095		Complex {
2096			re: T::simd_sub(ctx, lhs.re, rhs.re),
2097			im: T::simd_sub(ctx, lhs.im, rhs.im),
2098		}
2099	}
2100
2101	#[inline(always)]
2102	fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2103		Complex {
2104			re: T::simd_neg(ctx, value.re),
2105			im: T::simd_neg(ctx, value.im),
2106		}
2107	}
2108
2109	#[inline(always)]
2110	fn simd_conj<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2111		Complex {
2112			re: value.re,
2113			im: T::simd_neg(ctx, value.im),
2114		}
2115	}
2116
2117	#[inline(always)]
2118	fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2119		let v = T::simd_add(ctx, T::simd_abs1(ctx, value.re), T::simd_abs1(ctx, value.im));
2120		Complex { re: v, im: v }
2121	}
2122
2123	#[inline(always)]
2124	fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2125		let re = T::simd_abs_max(ctx, value.re);
2126		let im = T::simd_abs_max(ctx, value.im);
2127
2128		let v = T::simd_select(ctx, T::simd_greater_than(ctx, re, im), re, im);
2129		Complex { re: v, im: v }
2130	}
2131
2132	#[inline(always)]
2133	fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2134		Complex {
2135			re: T::simd_mul_real(ctx, lhs.re, real_rhs.re),
2136			im: T::simd_mul_real(ctx, lhs.im, real_rhs.re),
2137		}
2138	}
2139
2140	#[inline(always)]
2141	fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2142		Complex {
2143			re: T::simd_mul_pow2(ctx, lhs.re, real_rhs.re),
2144			im: T::simd_mul_pow2(ctx, lhs.im, real_rhs.re),
2145		}
2146	}
2147
2148	#[inline(always)]
2149	fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2150		Complex {
2151			re: T::simd_mul_add(ctx, lhs.re, rhs.re, T::simd_neg(ctx, T::simd_mul(ctx, lhs.im, rhs.im))),
2152			im: T::simd_mul_add(ctx, lhs.re, rhs.im, T::simd_mul(ctx, lhs.im, rhs.re)),
2153		}
2154	}
2155
2156	#[inline(always)]
2157	fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2158		Complex {
2159			re: T::simd_mul_add(ctx, lhs.re, rhs.re, T::simd_mul(ctx, lhs.im, rhs.im)),
2160			im: T::simd_mul_add(ctx, lhs.re, rhs.im, T::simd_neg(ctx, T::simd_mul(ctx, lhs.im, rhs.re))),
2161		}
2162	}
2163
2164	#[inline(always)]
2165	fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
2166		Complex {
2167			re: T::simd_mul_add(ctx, T::simd_neg(ctx, lhs.im), rhs.im, T::simd_mul_add(ctx, lhs.re, rhs.re, acc.re)),
2168			im: T::simd_mul_add(ctx, lhs.re, rhs.im, T::simd_mul_add(ctx, lhs.im, rhs.re, acc.im)),
2169		}
2170	}
2171
2172	#[inline(always)]
2173	fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
2174		Complex {
2175			re: T::simd_mul_add(ctx, lhs.im, rhs.im, T::simd_mul_add(ctx, lhs.re, rhs.re, acc.re)),
2176			im: T::simd_mul_add(ctx, lhs.re, rhs.im, T::simd_mul_add(ctx, T::simd_neg(ctx, lhs.im), rhs.re, acc.im)),
2177		}
2178	}
2179
2180	#[inline(always)]
2181	fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2182		let v = T::simd_abs2_add(ctx, value.re, T::simd_abs2(ctx, value.im));
2183		Complex { re: v, im: v }
2184	}
2185
2186	#[inline(always)]
2187	fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
2188		let v = T::simd_abs2_add(ctx, value.re, T::simd_abs2_add(ctx, value.im, acc.re));
2189		Complex { re: v, im: v }
2190	}
2191
2192	#[inline(always)]
2193	fn simd_reduce_sum<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
2194		Complex {
2195			re: T::simd_reduce_sum(ctx, value.re),
2196			im: T::simd_reduce_sum(ctx, value.im),
2197		}
2198	}
2199
2200	#[inline(always)]
2201	fn simd_reduce_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
2202		Complex {
2203			re: T::simd_reduce_max(ctx, value.re),
2204			im: T::simd_reduce_max(ctx, value.im),
2205		}
2206	}
2207
2208	#[inline(always)]
2209	fn simd_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2210		T::simd_and_mask(
2211			ctx,
2212			T::simd_equal(ctx, real_lhs.re, real_rhs.re),
2213			T::simd_equal(ctx, real_lhs.im, real_rhs.im),
2214		)
2215	}
2216
2217	#[inline(always)]
2218	fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2219		T::simd_less_than(ctx, real_lhs.re, real_rhs.re)
2220	}
2221
2222	#[inline(always)]
2223	fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2224		T::simd_less_than_or_equal(ctx, real_lhs.re, real_rhs.re)
2225	}
2226
2227	#[inline(always)]
2228	fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2229		T::simd_greater_than(ctx, real_lhs.re, real_rhs.re)
2230	}
2231
2232	#[inline(always)]
2233	fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2234		T::simd_greater_than_or_equal(ctx, real_lhs.re, real_rhs.re)
2235	}
2236
2237	#[inline(always)]
2238	fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2239		Complex {
2240			re: T::simd_select(ctx, mask, lhs.re, rhs.re),
2241			im: T::simd_select(ctx, mask, lhs.im, rhs.im),
2242		}
2243	}
2244
2245	#[inline(always)]
2246	fn simd_index_select<S: Simd>(
2247		ctx: &Self::SimdCtx<S>,
2248		mask: Self::SimdMask<S>,
2249		lhs: Self::SimdIndex<S>,
2250		rhs: Self::SimdIndex<S>,
2251	) -> Self::SimdIndex<S> {
2252		T::simd_index_select(ctx, mask, lhs, rhs)
2253	}
2254
2255	#[inline(always)]
2256	fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S> {
2257		T::simd_index_splat(ctx, value)
2258	}
2259
2260	#[inline(always)]
2261	fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
2262		T::simd_index_add(ctx, lhs, rhs)
2263	}
2264
2265	#[inline(always)]
2266	fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
2267		T::simd_index_less_than(ctx, lhs, rhs)
2268	}
2269
2270	#[inline(always)]
2271	fn simd_and_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
2272		T::simd_and_mask(ctx, lhs, rhs)
2273	}
2274
2275	#[inline(always)]
2276	fn simd_or_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
2277		T::simd_or_mask(ctx, lhs, rhs)
2278	}
2279
2280	#[inline(always)]
2281	fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S> {
2282		T::simd_not_mask(ctx, mask)
2283	}
2284
2285	#[inline(always)]
2286	fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize {
2287		T::simd_first_true_mask(ctx, value)
2288	}
2289
2290	#[inline(always)]
2291	fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMemMask<S> {
2292		let n = core::mem::size_of::<Self::SimdVec<S>>() / core::mem::size_of::<Self>();
2293		let start = start.zx() * 2;
2294		let end = end.zx() * 2;
2295
2296		let mut sa = start.min(n);
2297		let mut ea = end.min(n);
2298		let mut sb = start.max(n) - n;
2299		let mut eb = end.max(n) - n;
2300
2301		if sa == ea {
2302			sa = 0;
2303			ea = 0;
2304		}
2305		if sb == eb {
2306			sb = 0;
2307			eb = 0;
2308		}
2309
2310		let re = T::simd_mem_mask_between(ctx, T::Index::truncate(sa), T::Index::truncate(ea));
2311		let im = T::simd_mem_mask_between(ctx, T::Index::truncate(sb), T::Index::truncate(eb));
2312		Complex { re, im }
2313	}
2314
2315	#[inline(always)]
2316	fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMask<S> {
2317		T::simd_mask_between(ctx, start, end)
2318	}
2319
2320	#[inline(always)]
2321	unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
2322		Complex {
2323			re: T::simd_mask_load_raw(ctx, mask.re, core::ptr::addr_of!((*ptr).re)),
2324			im: T::simd_mask_load_raw(ctx, mask.im, core::ptr::addr_of!((*ptr).im)),
2325		}
2326	}
2327
2328	#[inline(always)]
2329	unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>) {
2330		T::simd_mask_store_raw(ctx, mask.re, core::ptr::addr_of_mut!((*ptr).re), values.re);
2331		T::simd_mask_store_raw(ctx, mask.im, core::ptr::addr_of_mut!((*ptr).im), values.im);
2332	}
2333}
2334
2335#[repr(transparent)]
2336#[doc(hidden)]
2337#[derive(Copy, Clone, Debug, PartialEq)]
2338pub struct ComplexImpl<T>(Complex<T>);
2339
2340#[repr(transparent)]
2341#[doc(hidden)]
2342#[derive(Copy, Clone, Debug, PartialEq)]
2343pub struct ComplexImplConj<T>(Complex<T>);
2344
2345unsafe impl Conjugate for ComplexImpl<f32> {
2346	type Canonical = ComplexImpl<f32>;
2347	type Conj = ComplexImplConj<f32>;
2348
2349	const IS_CANONICAL: bool = true;
2350}
2351unsafe impl Conjugate for ComplexImplConj<f32> {
2352	type Canonical = ComplexImpl<f32>;
2353	type Conj = ComplexImpl<f32>;
2354
2355	const IS_CANONICAL: bool = false;
2356}
2357unsafe impl Conjugate for ComplexImpl<f64> {
2358	type Canonical = ComplexImpl<f64>;
2359	type Conj = ComplexImplConj<f64>;
2360
2361	const IS_CANONICAL: bool = true;
2362}
2363unsafe impl Conjugate for ComplexImplConj<f64> {
2364	type Canonical = ComplexImpl<f64>;
2365	type Conj = ComplexImpl<f64>;
2366
2367	const IS_CANONICAL: bool = false;
2368}
2369
2370impl<T: RealField> core::ops::Neg for &ComplexImpl<T> {
2371	type Output = ComplexImpl<T>;
2372
2373	#[inline]
2374	fn neg(self) -> Self::Output {
2375		use math_utils::*;
2376
2377		ComplexImpl(neg(&self.0))
2378	}
2379}
2380impl<T: RealField> core::ops::Add<&ComplexImpl<T>> for &ComplexImpl<T> {
2381	type Output = ComplexImpl<T>;
2382
2383	#[inline]
2384	fn add(self, rhs: &ComplexImpl<T>) -> Self::Output {
2385		use math_utils::*;
2386
2387		ComplexImpl(add(&self.0, &rhs.0))
2388	}
2389}
2390impl<T: RealField> core::ops::Sub<&ComplexImpl<T>> for &ComplexImpl<T> {
2391	type Output = ComplexImpl<T>;
2392
2393	#[inline]
2394	fn sub(self, rhs: &ComplexImpl<T>) -> Self::Output {
2395		use math_utils::*;
2396
2397		ComplexImpl(sub(&self.0, &rhs.0))
2398	}
2399}
2400impl<T: RealField> core::ops::Mul<&ComplexImpl<T>> for &ComplexImpl<T> {
2401	type Output = ComplexImpl<T>;
2402
2403	#[inline]
2404	fn mul(self, rhs: &ComplexImpl<T>) -> Self::Output {
2405		use math_utils::*;
2406
2407		ComplexImpl(mul(&self.0, &rhs.0))
2408	}
2409}
2410
2411impl<T> From<Complex<T>> for ComplexImpl<T> {
2412	#[inline]
2413	fn from(value: Complex<T>) -> Self {
2414		Self(value)
2415	}
2416}
2417
2418impl ComplexField for ComplexImpl<f32> {
2419	type Arch = pulp::Arch;
2420	type Index = u32;
2421	type Real = f32;
2422	type SimdCtx<S: Simd> = S;
2423	type SimdIndex<S: Simd> = S::u32s;
2424	type SimdMask<S: Simd> = S::m32s;
2425	type SimdMemMask<S: Simd> = pulp::MemMask<S::m32s>;
2426	type SimdVec<S: Simd> = S::c32s;
2427	type Unit = f32;
2428
2429	const IS_NATIVE_C32: bool = true;
2430	const IS_REAL: bool = false;
2431	const SIMD_ABS_SPLIT_REAL_IMAG: bool = true;
2432	const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Simd;
2433
2434	#[inline]
2435	fn zero_impl() -> Self {
2436		Complex {
2437			re: f32::zero_impl(),
2438			im: f32::zero_impl(),
2439		}
2440		.into()
2441	}
2442
2443	#[inline]
2444	fn one_impl() -> Self {
2445		Complex {
2446			re: f32::one_impl(),
2447			im: f32::zero_impl(),
2448		}
2449		.into()
2450	}
2451
2452	#[inline]
2453	fn nan_impl() -> Self {
2454		Complex {
2455			re: f32::nan_impl(),
2456			im: f32::nan_impl(),
2457		}
2458		.into()
2459	}
2460
2461	#[inline]
2462	fn infinity_impl() -> Self {
2463		Complex {
2464			re: f32::infinity_impl(),
2465			im: f32::infinity_impl(),
2466		}
2467		.into()
2468	}
2469
2470	#[inline]
2471	fn from_real_impl(real: &Self::Real) -> Self {
2472		Complex {
2473			re: real.clone(),
2474			im: f32::zero_impl(),
2475		}
2476		.into()
2477	}
2478
2479	#[inline]
2480	fn from_f64_impl(real: f64) -> Self {
2481		Complex {
2482			re: f32::from_f64_impl(real),
2483			im: f32::zero_impl(),
2484		}
2485		.into()
2486	}
2487
2488	#[inline]
2489	fn real_part_impl(value: &Self) -> Self::Real {
2490		value.0.re.clone()
2491	}
2492
2493	#[inline]
2494	fn imag_part_impl(value: &Self) -> Self::Real {
2495		value.0.im.clone()
2496	}
2497
2498	#[inline]
2499	fn copy_impl(value: &Self) -> Self {
2500		value.clone()
2501	}
2502
2503	#[inline]
2504	fn conj_impl(value: &Self) -> Self {
2505		Complex {
2506			re: value.0.re.clone(),
2507			im: value.0.im.neg_by_ref(),
2508		}
2509		.into()
2510	}
2511
2512	#[inline]
2513	fn recip_impl(value: &Self) -> Self {
2514		let (re, im) = recip_impl(value.0.re.clone(), value.0.im.clone());
2515		Complex { re, im }.into()
2516	}
2517
2518	#[inline]
2519	fn sqrt_impl(value: &Self) -> Self {
2520		let (re, im) = sqrt_impl(value.0.re.clone(), value.0.im.clone());
2521		Complex { re, im }.into()
2522	}
2523
2524	#[inline]
2525	fn abs_impl(value: &Self) -> Self::Real {
2526		abs_impl(value.0.re.clone(), value.0.im.clone())
2527	}
2528
2529	#[inline]
2530	#[faer_macros::math]
2531	fn abs1_impl(value: &Self) -> Self::Real {
2532		abs1(value.0.re) + abs1(value.0.im)
2533	}
2534
2535	#[inline]
2536	#[faer_macros::math]
2537	fn abs2_impl(value: &Self) -> Self::Real {
2538		abs2(value.0.re) + abs2(value.0.im)
2539	}
2540
2541	#[inline]
2542	#[faer_macros::math]
2543	fn mul_real_impl(lhs: &Self, rhs: &Self::Real) -> Self {
2544		Complex {
2545			re: lhs.0.re * *rhs,
2546			im: lhs.0.im * *rhs,
2547		}
2548		.into()
2549	}
2550
2551	#[inline]
2552	#[faer_macros::math]
2553	fn mul_pow2_impl(lhs: &Self, rhs: &Self::Real) -> Self {
2554		Complex {
2555			re: mul_pow2(lhs.0.re, rhs),
2556			im: mul_pow2(lhs.0.im, rhs),
2557		}
2558		.into()
2559	}
2560
2561	#[inline]
2562	#[faer_macros::math]
2563	fn is_finite_impl(value: &Self) -> bool {
2564		is_finite(value.0.re) && is_finite(value.0.im)
2565	}
2566
2567	#[inline(always)]
2568	fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
2569		f32::simd_ctx(simd)
2570	}
2571
2572	#[inline(always)]
2573	fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
2574		f32::ctx_from_simd(ctx)
2575	}
2576
2577	#[inline(always)]
2578	fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S> {
2579		ctx.splat_c32s(value.0)
2580	}
2581
2582	#[inline(always)]
2583	fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S> {
2584		ctx.splat_c32s(Complex { re: *value, im: *value })
2585	}
2586
2587	#[inline(always)]
2588	fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2589		ctx.add_c32s(lhs, rhs)
2590	}
2591
2592	#[inline(always)]
2593	fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2594		ctx.sub_c32s(lhs, rhs)
2595	}
2596
2597	#[inline(always)]
2598	fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2599		ctx.neg_c32s(value)
2600	}
2601
2602	#[inline(always)]
2603	fn simd_conj<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2604		ctx.conj_c32s(value)
2605	}
2606
2607	#[inline(always)]
2608	fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2609		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2610			bytemuck::cast(ctx.abs_f32s(bytemuck::cast(value)))
2611		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2612			let value: Complex<f32> = bytemuck::cast(value);
2613			let v = value.re.abs() + value.im.abs();
2614			bytemuck::cast(Complex { re: v, im: v })
2615		} else {
2616			panic!();
2617		}
2618	}
2619
2620	#[inline(always)]
2621	fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2622		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2623			bytemuck::cast(ctx.abs_f32s(bytemuck::cast(value)))
2624		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2625			let value: Complex<f32> = bytemuck::cast(value);
2626			let re = value.re.abs();
2627			let im = value.im.abs();
2628			let v = if re > im { re } else { im };
2629			bytemuck::cast(Complex { re: v, im: v })
2630		} else {
2631			panic!();
2632		}
2633	}
2634
2635	#[inline(always)]
2636	fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2637		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2638			bytemuck::cast(ctx.mul_f32s(bytemuck::cast(lhs), bytemuck::cast(real_rhs)))
2639		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2640			let mut lhs: Complex<f32> = bytemuck::cast(lhs);
2641			let rhs: Complex<f32> = bytemuck::cast(real_rhs);
2642			lhs *= rhs.re;
2643			bytemuck::cast(lhs)
2644		} else {
2645			panic!();
2646		}
2647	}
2648
2649	#[inline(always)]
2650	fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2651		Self::simd_mul_real(ctx, lhs, real_rhs)
2652	}
2653
2654	#[inline(always)]
2655	fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2656		ctx.mul_e_c32s(lhs, rhs)
2657	}
2658
2659	#[inline(always)]
2660	fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2661		ctx.conj_mul_e_c32s(lhs, rhs)
2662	}
2663
2664	#[inline(always)]
2665	fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
2666		ctx.mul_add_e_c32s(lhs, rhs, acc)
2667	}
2668
2669	#[inline(always)]
2670	fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
2671		ctx.conj_mul_add_e_c32s(lhs, rhs, acc)
2672	}
2673
2674	#[inline(always)]
2675	fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2676		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2677			bytemuck::cast(ctx.mul_f32s(bytemuck::cast(value), bytemuck::cast(value)))
2678		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2679			let value: Complex<f32> = bytemuck::cast(value);
2680			let v = value.re * value.re + value.im * value.im;
2681			bytemuck::cast(Complex { re: v, im: v })
2682		} else {
2683			panic!();
2684		}
2685	}
2686
2687	#[inline(always)]
2688	fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
2689		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2690			bytemuck::cast(ctx.mul_add_f32s(bytemuck::cast(value), bytemuck::cast(value), bytemuck::cast(acc)))
2691		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2692			let value: Complex<f32> = bytemuck::cast(value);
2693			let acc: Complex<f32> = bytemuck::cast(acc);
2694			let v = value.re * value.re + value.im * value.im + acc.re;
2695			bytemuck::cast(Complex { re: v, im: v })
2696		} else {
2697			panic!();
2698		}
2699	}
2700
2701	#[inline(always)]
2702	fn simd_reduce_sum<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
2703		ctx.reduce_sum_c32s(value).into()
2704	}
2705
2706	#[inline(always)]
2707	fn simd_reduce_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
2708		ctx.reduce_max_c32s(value).into()
2709	}
2710
2711	#[inline(always)]
2712	fn simd_equal<S: Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
2713		panic!()
2714	}
2715
2716	#[inline(always)]
2717	fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2718		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2719			ctx.less_than_f32s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
2720		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2721			assert!(try_const! { core::mem::size_of::<S::m32s>() == core::mem::size_of::<bool>() });
2722
2723			let lhs: Complex<f32> = bytemuck::cast(real_lhs);
2724			let rhs: Complex<f32> = bytemuck::cast(real_rhs);
2725			unsafe { core::mem::transmute_copy(&(lhs.re < rhs.re)) }
2726		} else {
2727			panic!();
2728		}
2729	}
2730
2731	#[inline(always)]
2732	fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2733		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2734			ctx.less_than_or_equal_f32s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
2735		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2736			assert!(try_const! { core::mem::size_of::<S::m32s>() == core::mem::size_of::<bool>() });
2737
2738			let lhs: Complex<f32> = bytemuck::cast(real_lhs);
2739			let rhs: Complex<f32> = bytemuck::cast(real_rhs);
2740			unsafe { core::mem::transmute_copy(&(lhs.re <= rhs.re)) }
2741		} else {
2742			panic!();
2743		}
2744	}
2745
2746	#[inline(always)]
2747	fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2748		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2749			ctx.greater_than_f32s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
2750		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2751			assert!(try_const! { core::mem::size_of::<S::m32s>() == core::mem::size_of::<bool>() });
2752
2753			let lhs: Complex<f32> = bytemuck::cast(real_lhs);
2754			let rhs: Complex<f32> = bytemuck::cast(real_rhs);
2755			unsafe { core::mem::transmute_copy(&(lhs.re > rhs.re)) }
2756		} else {
2757			panic!();
2758		}
2759	}
2760
2761	#[inline(always)]
2762	fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2763		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2764			ctx.greater_than_or_equal_f32s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
2765		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2766			assert!(try_const! { core::mem::size_of::<S::m32s>() == core::mem::size_of::<bool>() });
2767
2768			let lhs: Complex<f32> = bytemuck::cast(real_lhs);
2769			let rhs: Complex<f32> = bytemuck::cast(real_rhs);
2770			unsafe { core::mem::transmute_copy(&(lhs.re >= rhs.re)) }
2771		} else {
2772			panic!();
2773		}
2774	}
2775
2776	#[inline(always)]
2777	fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2778		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2779			bytemuck::cast(ctx.select_f32s_m32s(mask, bytemuck::cast(lhs), bytemuck::cast(rhs)))
2780		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2781			assert!(try_const! { core::mem::size_of::<S::m32s>() == core::mem::size_of::<bool>() });
2782			let mask: bool = unsafe { core::mem::transmute_copy(&mask) };
2783			let lhs: Complex<f32> = bytemuck::cast(lhs);
2784			let rhs: Complex<f32> = bytemuck::cast(rhs);
2785			bytemuck::cast(if mask { lhs } else { rhs })
2786		} else {
2787			panic!();
2788		}
2789	}
2790
2791	#[inline(always)]
2792	fn simd_index_select<S: Simd>(
2793		ctx: &Self::SimdCtx<S>,
2794		mask: Self::SimdMask<S>,
2795		lhs: Self::SimdIndex<S>,
2796		rhs: Self::SimdIndex<S>,
2797	) -> Self::SimdIndex<S> {
2798		f32::simd_index_select(ctx, mask, lhs, rhs)
2799	}
2800
2801	#[inline(always)]
2802	fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S> {
2803		f32::simd_index_splat(ctx, value)
2804	}
2805
2806	#[inline(always)]
2807	fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
2808		f32::simd_index_add(ctx, lhs, rhs)
2809	}
2810
2811	#[inline(always)]
2812	fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
2813		f32::simd_index_less_than(ctx, lhs, rhs)
2814	}
2815
2816	#[inline(always)]
2817	fn simd_and_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
2818		f32::simd_and_mask(ctx, lhs, rhs)
2819	}
2820
2821	#[inline(always)]
2822	fn simd_or_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
2823		f32::simd_or_mask(ctx, lhs, rhs)
2824	}
2825
2826	#[inline(always)]
2827	fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S> {
2828		f32::simd_not_mask(ctx, mask)
2829	}
2830
2831	#[inline(always)]
2832	fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize {
2833		f32::simd_first_true_mask(ctx, value)
2834	}
2835
2836	#[inline(always)]
2837	fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMemMask<S> {
2838		ctx.mask_between_m32s((2 * start) as _, (2 * end) as _)
2839	}
2840
2841	#[inline(always)]
2842	fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMask<S> {
2843		ctx.mask_between_m32s((2 * start) as _, (2 * end) as _).mask()
2844	}
2845
2846	#[inline(always)]
2847	unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
2848		ctx.mask_load_ptr_c32s(mask, ptr as _)
2849	}
2850
2851	#[inline(always)]
2852	unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>) {
2853		ctx.mask_store_ptr_c32s(mask, ptr as _, values);
2854	}
2855}
2856
2857impl ComplexField for ComplexImpl<f64> {
2858	type Arch = pulp::Arch;
2859	type Index = u64;
2860	type Real = f64;
2861	type SimdCtx<S: Simd> = S;
2862	type SimdIndex<S: Simd> = S::u64s;
2863	type SimdMask<S: Simd> = S::m64s;
2864	type SimdMemMask<S: Simd> = pulp::MemMask<S::m64s>;
2865	type SimdVec<S: Simd> = S::c64s;
2866	type Unit = f64;
2867
2868	const IS_NATIVE_C64: bool = true;
2869	const IS_REAL: bool = false;
2870	const SIMD_ABS_SPLIT_REAL_IMAG: bool = true;
2871	const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Simd;
2872
2873	#[inline]
2874	fn zero_impl() -> Self {
2875		Complex {
2876			re: f64::zero_impl(),
2877			im: f64::zero_impl(),
2878		}
2879		.into()
2880	}
2881
2882	#[inline]
2883	fn one_impl() -> Self {
2884		Complex {
2885			re: f64::one_impl(),
2886			im: f64::zero_impl(),
2887		}
2888		.into()
2889	}
2890
2891	#[inline]
2892	fn nan_impl() -> Self {
2893		Complex {
2894			re: f64::nan_impl(),
2895			im: f64::nan_impl(),
2896		}
2897		.into()
2898	}
2899
2900	#[inline]
2901	fn infinity_impl() -> Self {
2902		Complex {
2903			re: f64::infinity_impl(),
2904			im: f64::infinity_impl(),
2905		}
2906		.into()
2907	}
2908
2909	#[inline]
2910	fn from_real_impl(real: &Self::Real) -> Self {
2911		Complex {
2912			re: real.clone(),
2913			im: f64::zero_impl(),
2914		}
2915		.into()
2916	}
2917
2918	#[inline]
2919	fn from_f64_impl(real: f64) -> Self {
2920		Complex {
2921			re: f64::from_f64_impl(real),
2922			im: f64::zero_impl(),
2923		}
2924		.into()
2925	}
2926
2927	#[inline]
2928	fn real_part_impl(value: &Self) -> Self::Real {
2929		value.0.re.clone()
2930	}
2931
2932	#[inline]
2933	fn imag_part_impl(value: &Self) -> Self::Real {
2934		value.0.im.clone()
2935	}
2936
2937	#[inline]
2938	fn copy_impl(value: &Self) -> Self {
2939		value.clone()
2940	}
2941
2942	#[inline]
2943	fn conj_impl(value: &Self) -> Self {
2944		Complex {
2945			re: value.0.re.clone(),
2946			im: value.0.im.neg_by_ref(),
2947		}
2948		.into()
2949	}
2950
2951	#[inline]
2952	fn recip_impl(value: &Self) -> Self {
2953		let (re, im) = recip_impl(value.0.re.clone(), value.0.im.clone());
2954		Complex { re, im }.into()
2955	}
2956
2957	#[inline]
2958	fn sqrt_impl(value: &Self) -> Self {
2959		let (re, im) = sqrt_impl(value.0.re.clone(), value.0.im.clone());
2960		Complex { re, im }.into()
2961	}
2962
2963	#[inline]
2964	fn abs_impl(value: &Self) -> Self::Real {
2965		abs_impl(value.0.re.clone(), value.0.im.clone())
2966	}
2967
2968	#[inline]
2969	#[faer_macros::math]
2970	fn abs1_impl(value: &Self) -> Self::Real {
2971		abs1(value.0.re) + abs1(value.0.im)
2972	}
2973
2974	#[inline]
2975	#[faer_macros::math]
2976	fn abs2_impl(value: &Self) -> Self::Real {
2977		abs2(value.0.re) + abs2(value.0.im)
2978	}
2979
2980	#[inline]
2981	#[faer_macros::math]
2982	fn mul_real_impl(lhs: &Self, rhs: &Self::Real) -> Self {
2983		Complex {
2984			re: lhs.0.re * *rhs,
2985			im: lhs.0.im * *rhs,
2986		}
2987		.into()
2988	}
2989
2990	#[inline]
2991	#[faer_macros::math]
2992	fn mul_pow2_impl(lhs: &Self, rhs: &Self::Real) -> Self {
2993		Complex {
2994			re: mul_pow2(lhs.0.re, rhs),
2995			im: mul_pow2(lhs.0.im, rhs),
2996		}
2997		.into()
2998	}
2999
3000	#[inline]
3001	#[faer_macros::math]
3002	fn is_finite_impl(value: &Self) -> bool {
3003		is_finite(value.0.re) && is_finite(value.0.im)
3004	}
3005
3006	#[inline(always)]
3007	fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
3008		f64::simd_ctx(simd)
3009	}
3010
3011	#[inline(always)]
3012	fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
3013		f64::ctx_from_simd(ctx)
3014	}
3015
3016	#[inline(always)]
3017	fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S> {
3018		ctx.splat_c64s(value.0)
3019	}
3020
3021	#[inline(always)]
3022	fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S> {
3023		ctx.splat_c64s(Complex { re: *value, im: *value })
3024	}
3025
3026	#[inline(always)]
3027	fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3028		ctx.add_c64s(lhs, rhs)
3029	}
3030
3031	#[inline(always)]
3032	fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3033		ctx.sub_c64s(lhs, rhs)
3034	}
3035
3036	#[inline(always)]
3037	fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
3038		ctx.neg_c64s(value)
3039	}
3040
3041	#[inline(always)]
3042	fn simd_conj<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
3043		ctx.conj_c64s(value)
3044	}
3045
3046	#[inline(always)]
3047	fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
3048		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3049			bytemuck::cast(ctx.abs_f64s(bytemuck::cast(value)))
3050		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3051			let value: Complex<f64> = bytemuck::cast(value);
3052			let v = value.re.abs() + value.im.abs();
3053			bytemuck::cast(Complex { re: v, im: v })
3054		} else {
3055			panic!();
3056		}
3057	}
3058
3059	#[inline(always)]
3060	fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
3061		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3062			bytemuck::cast(ctx.abs_f64s(bytemuck::cast(value)))
3063		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3064			let value: Complex<f64> = bytemuck::cast(value);
3065			let re = value.re.abs();
3066			let im = value.im.abs();
3067			let v = if re > im { re } else { im };
3068			bytemuck::cast(Complex { re: v, im: v })
3069		} else {
3070			panic!();
3071		}
3072	}
3073
3074	#[inline(always)]
3075	fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3076		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3077			bytemuck::cast(ctx.mul_f64s(bytemuck::cast(lhs), bytemuck::cast(real_rhs)))
3078		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3079			let mut lhs: Complex<f64> = bytemuck::cast(lhs);
3080			let rhs: Complex<f64> = bytemuck::cast(real_rhs);
3081			lhs *= rhs.re;
3082			bytemuck::cast(lhs)
3083		} else {
3084			panic!();
3085		}
3086	}
3087
3088	#[inline(always)]
3089	fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3090		Self::simd_mul_real(ctx, lhs, real_rhs)
3091	}
3092
3093	#[inline(always)]
3094	fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3095		ctx.mul_e_c64s(lhs, rhs)
3096	}
3097
3098	#[inline(always)]
3099	fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3100		ctx.conj_mul_e_c64s(lhs, rhs)
3101	}
3102
3103	#[inline(always)]
3104	fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
3105		ctx.mul_add_e_c64s(lhs, rhs, acc)
3106	}
3107
3108	#[inline(always)]
3109	fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
3110		ctx.conj_mul_add_e_c64s(lhs, rhs, acc)
3111	}
3112
3113	#[inline(always)]
3114	fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
3115		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3116			bytemuck::cast(ctx.mul_f64s(bytemuck::cast(value), bytemuck::cast(value)))
3117		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3118			let value: Complex<f64> = bytemuck::cast(value);
3119			let v = value.re * value.re + value.im * value.im;
3120			bytemuck::cast(Complex { re: v, im: v })
3121		} else {
3122			panic!();
3123		}
3124	}
3125
3126	#[inline(always)]
3127	fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
3128		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3129			bytemuck::cast(ctx.mul_add_f64s(bytemuck::cast(value), bytemuck::cast(value), bytemuck::cast(acc)))
3130		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3131			let value: Complex<f64> = bytemuck::cast(value);
3132			let acc: Complex<f64> = bytemuck::cast(acc);
3133			let v = value.re * value.re + value.im * value.im + acc.re;
3134			bytemuck::cast(Complex { re: v, im: v })
3135		} else {
3136			panic!();
3137		}
3138	}
3139
3140	#[inline(always)]
3141	fn simd_reduce_sum<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
3142		ctx.reduce_sum_c64s(value).into()
3143	}
3144
3145	#[inline(always)]
3146	fn simd_reduce_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
3147		ctx.reduce_max_c64s(value).into()
3148	}
3149
3150	#[inline(always)]
3151	fn simd_equal<S: Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
3152		panic!()
3153	}
3154
3155	#[inline(always)]
3156	fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3157		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3158			ctx.less_than_f64s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
3159		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3160			assert!(try_const! { core::mem::size_of::<S::m64s>() == core::mem::size_of::<bool>() });
3161
3162			let lhs: Complex<f64> = bytemuck::cast(real_lhs);
3163			let rhs: Complex<f64> = bytemuck::cast(real_rhs);
3164			unsafe { core::mem::transmute_copy(&(lhs.re < rhs.re)) }
3165		} else {
3166			panic!();
3167		}
3168	}
3169
3170	#[inline(always)]
3171	fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3172		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3173			ctx.less_than_or_equal_f64s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
3174		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3175			assert!(try_const! { core::mem::size_of::<S::m64s>() == core::mem::size_of::<bool>() });
3176
3177			let lhs: Complex<f64> = bytemuck::cast(real_lhs);
3178			let rhs: Complex<f64> = bytemuck::cast(real_rhs);
3179			unsafe { core::mem::transmute_copy(&(lhs.re <= rhs.re)) }
3180		} else {
3181			panic!();
3182		}
3183	}
3184
3185	#[inline(always)]
3186	fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3187		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3188			ctx.greater_than_f64s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
3189		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3190			assert!(try_const! { core::mem::size_of::<S::m64s>() == core::mem::size_of::<bool>() });
3191
3192			let lhs: Complex<f64> = bytemuck::cast(real_lhs);
3193			let rhs: Complex<f64> = bytemuck::cast(real_rhs);
3194			unsafe { core::mem::transmute_copy(&(lhs.re > rhs.re)) }
3195		} else {
3196			panic!();
3197		}
3198	}
3199
3200	#[inline(always)]
3201	fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3202		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3203			ctx.greater_than_or_equal_f64s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
3204		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3205			assert!(try_const! { core::mem::size_of::<S::m64s>() == core::mem::size_of::<bool>() });
3206
3207			let lhs: Complex<f64> = bytemuck::cast(real_lhs);
3208			let rhs: Complex<f64> = bytemuck::cast(real_rhs);
3209			unsafe { core::mem::transmute_copy(&(lhs.re >= rhs.re)) }
3210		} else {
3211			panic!();
3212		}
3213	}
3214
3215	#[inline(always)]
3216	fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3217		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3218			bytemuck::cast(ctx.select_f64s_m64s(mask, bytemuck::cast(lhs), bytemuck::cast(rhs)))
3219		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3220			assert!(try_const! { core::mem::size_of::<S::m64s>() == core::mem::size_of::<bool>() });
3221			let mask: bool = unsafe { core::mem::transmute_copy(&mask) };
3222			let lhs: Complex<f64> = bytemuck::cast(lhs);
3223			let rhs: Complex<f64> = bytemuck::cast(rhs);
3224			bytemuck::cast(if mask { lhs } else { rhs })
3225		} else {
3226			panic!();
3227		}
3228	}
3229
3230	#[inline(always)]
3231	fn simd_index_select<S: Simd>(
3232		ctx: &Self::SimdCtx<S>,
3233		mask: Self::SimdMask<S>,
3234		lhs: Self::SimdIndex<S>,
3235		rhs: Self::SimdIndex<S>,
3236	) -> Self::SimdIndex<S> {
3237		f64::simd_index_select(ctx, mask, lhs, rhs)
3238	}
3239
3240	#[inline(always)]
3241	fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S> {
3242		f64::simd_index_splat(ctx, value)
3243	}
3244
3245	#[inline(always)]
3246	fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
3247		f64::simd_index_add(ctx, lhs, rhs)
3248	}
3249
3250	#[inline(always)]
3251	fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
3252		f64::simd_index_less_than(ctx, lhs, rhs)
3253	}
3254
3255	#[inline(always)]
3256	fn simd_and_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
3257		f64::simd_and_mask(ctx, lhs, rhs)
3258	}
3259
3260	#[inline(always)]
3261	fn simd_or_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
3262		f64::simd_or_mask(ctx, lhs, rhs)
3263	}
3264
3265	#[inline(always)]
3266	fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S> {
3267		f64::simd_not_mask(ctx, mask)
3268	}
3269
3270	#[inline(always)]
3271	fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize {
3272		f64::simd_first_true_mask(ctx, value)
3273	}
3274
3275	#[inline(always)]
3276	fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMemMask<S> {
3277		ctx.mask_between_m64s((2 * start) as _, (2 * end) as _)
3278	}
3279
3280	#[inline(always)]
3281	fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMask<S> {
3282		ctx.mask_between_m64s((2 * start) as _, (2 * end) as _).mask()
3283	}
3284
3285	#[inline(always)]
3286	unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
3287		ctx.mask_load_ptr_c64s(mask, ptr as _)
3288	}
3289
3290	#[inline(always)]
3291	unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>) {
3292		ctx.mask_store_ptr_c64s(mask, ptr as _, values);
3293	}
3294}
3295
3296#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
3297pub struct Symbolic;
3298
3299impl core::ops::Add for Symbolic {
3300	type Output = Self;
3301
3302	fn add(self, _: Self) -> Self {
3303		Self
3304	}
3305}
3306impl core::ops::Sub for Symbolic {
3307	type Output = Self;
3308
3309	fn sub(self, _: Self) -> Self {
3310		Self
3311	}
3312}
3313impl core::ops::Mul for Symbolic {
3314	type Output = Self;
3315
3316	fn mul(self, _: Self) -> Self {
3317		Self
3318	}
3319}
3320impl core::ops::Div for Symbolic {
3321	type Output = Self;
3322
3323	fn div(self, _: Self) -> Self {
3324		Self
3325	}
3326}
3327impl core::ops::Neg for Symbolic {
3328	type Output = Self;
3329
3330	fn neg(self) -> Self {
3331		Self
3332	}
3333}
3334
3335impl core::ops::Add for &Symbolic {
3336	type Output = Symbolic;
3337
3338	fn add(self, _: Self) -> Symbolic {
3339		Symbolic
3340	}
3341}
3342impl core::ops::Sub for &Symbolic {
3343	type Output = Symbolic;
3344
3345	fn sub(self, _: Self) -> Symbolic {
3346		Symbolic
3347	}
3348}
3349impl core::ops::Mul for &Symbolic {
3350	type Output = Symbolic;
3351
3352	fn mul(self, _: Self) -> Symbolic {
3353		Symbolic
3354	}
3355}
3356impl core::ops::Div for &Symbolic {
3357	type Output = Symbolic;
3358
3359	fn div(self, _: Self) -> Symbolic {
3360		Symbolic
3361	}
3362}
3363impl core::ops::Neg for &Symbolic {
3364	type Output = Symbolic;
3365
3366	fn neg(self) -> Symbolic {
3367		Symbolic
3368	}
3369}
3370
3371impl core::ops::Rem for Symbolic {
3372	type Output = Self;
3373
3374	fn rem(self, _: Self) -> Self {
3375		Self
3376	}
3377}
3378impl num_traits::Zero for Symbolic {
3379	fn zero() -> Self {
3380		Self
3381	}
3382
3383	fn is_zero(&self) -> bool {
3384		true
3385	}
3386}
3387impl num_traits::One for Symbolic {
3388	fn one() -> Self {
3389		Self
3390	}
3391
3392	fn is_one(&self) -> bool {
3393		true
3394	}
3395}
3396impl num_traits::Num for Symbolic {
3397	type FromStrRadixErr = core::convert::Infallible;
3398
3399	fn from_str_radix(_: &str, _: u32) -> Result<Self, Self::FromStrRadixErr> {
3400		Ok(Self)
3401	}
3402}
3403
3404impl Symbolic {
3405	#[inline]
3406	pub fn materialize(len: usize) -> &'static mut [Self] {
3407		unsafe { core::slice::from_raw_parts_mut(core::ptr::NonNull::dangling().as_ptr(), len) }
3408	}
3409}
3410
3411impl RealField for Symbolic {
3412	fn epsilon_impl() -> Self {
3413		Self
3414	}
3415
3416	fn nbits_impl() -> usize {
3417		0
3418	}
3419
3420	fn min_positive_impl() -> Self {
3421		Self
3422	}
3423
3424	fn max_positive_impl() -> Self {
3425		Self
3426	}
3427
3428	fn sqrt_min_positive_impl() -> Self {
3429		Self
3430	}
3431
3432	fn sqrt_max_positive_impl() -> Self {
3433		Self
3434	}
3435}
3436
3437impl ComplexField for Symbolic {
3438	type Arch = pulp::Scalar;
3439	type Index = usize;
3440	type Real = Self;
3441	type SimdCtx<S: pulp::Simd> = S;
3442	type SimdIndex<S: pulp::Simd> = ();
3443	type SimdMask<S: pulp::Simd> = ();
3444	type SimdMemMask<S: pulp::Simd> = ();
3445	type SimdVec<S: pulp::Simd> = ();
3446	type Unit = Self;
3447
3448	const IS_REAL: bool = true;
3449	const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Copy;
3450
3451	fn zero_impl() -> Self {
3452		Self
3453	}
3454
3455	fn one_impl() -> Self {
3456		Self
3457	}
3458
3459	fn nan_impl() -> Self {
3460		Self
3461	}
3462
3463	fn infinity_impl() -> Self {
3464		Self
3465	}
3466
3467	fn from_real_impl(_: &Self::Real) -> Self {
3468		Self
3469	}
3470
3471	fn from_f64_impl(_: f64) -> Self {
3472		Self
3473	}
3474
3475	fn real_part_impl(_: &Self) -> Self::Real {
3476		Self
3477	}
3478
3479	fn imag_part_impl(_: &Self) -> Self::Real {
3480		Self
3481	}
3482
3483	fn copy_impl(_: &Self) -> Self {
3484		Self
3485	}
3486
3487	fn conj_impl(_: &Self) -> Self {
3488		Self
3489	}
3490
3491	fn recip_impl(_: &Self) -> Self {
3492		Self
3493	}
3494
3495	fn sqrt_impl(_: &Self) -> Self {
3496		Self
3497	}
3498
3499	fn abs_impl(_: &Self) -> Self::Real {
3500		Self
3501	}
3502
3503	fn abs1_impl(_: &Self) -> Self::Real {
3504		Self
3505	}
3506
3507	fn abs2_impl(_: &Self) -> Self::Real {
3508		Self
3509	}
3510
3511	fn mul_real_impl(_: &Self, _: &Self::Real) -> Self {
3512		Self
3513	}
3514
3515	fn mul_pow2_impl(_: &Self, _: &Self::Real) -> Self {
3516		Self
3517	}
3518
3519	fn is_finite_impl(_: &Self) -> bool {
3520		true
3521	}
3522
3523	fn simd_ctx<S: pulp::Simd>(simd: S) -> Self::SimdCtx<S> {
3524		simd
3525	}
3526
3527	fn ctx_from_simd<S: pulp::Simd>(simd: &Self::SimdCtx<S>) -> S {
3528		*simd
3529	}
3530
3531	fn simd_mem_mask_between<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::Index, _: Self::Index) -> Self::SimdMemMask<S> {
3532		()
3533	}
3534
3535	unsafe fn simd_mask_load_raw<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMemMask<S>, _: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
3536		()
3537	}
3538
3539	unsafe fn simd_mask_store_raw<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMemMask<S>, _: *mut Self::SimdVec<S>, _: Self::SimdVec<S>) {
3540		()
3541	}
3542
3543	fn simd_splat<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: &Self) -> Self::SimdVec<S> {
3544		()
3545	}
3546
3547	fn simd_splat_real<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: &Self::Real) -> Self::SimdVec<S> {
3548		()
3549	}
3550
3551	fn simd_add<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3552		()
3553	}
3554
3555	fn simd_sub<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3556		()
3557	}
3558
3559	fn simd_neg<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3560		()
3561	}
3562
3563	fn simd_conj<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3564		()
3565	}
3566
3567	fn simd_abs1<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3568		()
3569	}
3570
3571	fn simd_abs_max<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3572		()
3573	}
3574
3575	fn simd_mul_real<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3576		()
3577	}
3578
3579	fn simd_mul_pow2<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3580		()
3581	}
3582
3583	fn simd_mul<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3584		()
3585	}
3586
3587	fn simd_conj_mul<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3588		()
3589	}
3590
3591	fn simd_mul_add<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3592		()
3593	}
3594
3595	fn simd_conj_mul_add<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3596		()
3597	}
3598
3599	fn simd_abs2<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3600		()
3601	}
3602
3603	fn simd_abs2_add<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3604		()
3605	}
3606
3607	fn simd_reduce_sum<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self {
3608		Self
3609	}
3610
3611	fn simd_reduce_max<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self {
3612		Self
3613	}
3614
3615	fn simd_equal<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
3616		()
3617	}
3618
3619	fn simd_less_than<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
3620		()
3621	}
3622
3623	fn simd_less_than_or_equal<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
3624		()
3625	}
3626
3627	fn simd_greater_than<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
3628		()
3629	}
3630
3631	fn simd_greater_than_or_equal<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
3632		()
3633	}
3634
3635	fn simd_select<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMask<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3636		()
3637	}
3638
3639	fn simd_index_select<S: pulp::Simd>(
3640		_: &Self::SimdCtx<S>,
3641		_: Self::SimdMask<S>,
3642		_: Self::SimdIndex<S>,
3643		_: Self::SimdIndex<S>,
3644	) -> Self::SimdIndex<S> {
3645		()
3646	}
3647
3648	fn simd_index_splat<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::Index) -> Self::SimdIndex<S> {
3649		()
3650	}
3651
3652	fn simd_index_add<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdIndex<S>, _: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
3653		()
3654	}
3655
3656	fn simd_and_mask<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMask<S>, _: Self::SimdMask<S>) -> Self::SimdMask<S> {
3657		()
3658	}
3659
3660	fn simd_or_mask<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMask<S>, _: Self::SimdMask<S>) -> Self::SimdMask<S> {
3661		()
3662	}
3663
3664	fn simd_not_mask<S: Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMask<S>) -> Self::SimdMask<S> {
3665		()
3666	}
3667
3668	fn simd_first_true_mask<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMask<S>) -> usize {
3669		0
3670	}
3671
3672	fn simd_mask_between<S: Simd>(_: &Self::SimdCtx<S>, _: Self::Index, _: Self::Index) -> Self::SimdMask<S> {
3673		()
3674	}
3675
3676	fn simd_index_less_than<S: Simd>(_: &Self::SimdCtx<S>, _: Self::SimdIndex<S>, _: Self::SimdIndex<S>) -> Self::SimdMask<S> {
3677		()
3678	}
3679}
3680
3681pub type c64 = Complex<f64>;
3682pub type c32 = Complex<f32>;
3683pub type fx128 = qd::Quad;
3684pub type cx128 = Complex<fx128>;
3685
3686pub extern crate num_traits;
3687pub extern crate pulp;
3688
3689impl ComplexField for fx128 {
3690	type Arch = pulp::Arch;
3691	type Index = u64;
3692	type Real = Self;
3693	type SimdCtx<S: Simd> = S;
3694	type SimdIndex<S: Simd> = S::u64s;
3695	type SimdMask<S: Simd> = S::m64s;
3696	type SimdMemMask<S: Simd> = Quad<pulp::MemMask<S::m64s>>;
3697	type SimdVec<S: Simd> = Quad<S::f64s>;
3698	type Unit = f64;
3699
3700	const IS_REAL: bool = true;
3701	const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Simd;
3702
3703	#[inline(always)]
3704	fn zero_impl() -> Self {
3705		Self::ZERO
3706	}
3707
3708	#[inline(always)]
3709	fn one_impl() -> Self {
3710		Quad(1.0, 0.0)
3711	}
3712
3713	#[inline(always)]
3714	fn nan_impl() -> Self {
3715		Self::NAN
3716	}
3717
3718	#[inline(always)]
3719	fn infinity_impl() -> Self {
3720		Self::INFINITY
3721	}
3722
3723	#[inline(always)]
3724	fn from_real_impl(real: &Self::Real) -> Self {
3725		*real
3726	}
3727
3728	#[inline(always)]
3729	fn from_f64_impl(real: f64) -> Self {
3730		real.into()
3731	}
3732
3733	#[inline(always)]
3734	fn real_part_impl(value: &Self) -> Self::Real {
3735		*value
3736	}
3737
3738	#[inline(always)]
3739	fn imag_part_impl(_: &Self) -> Self::Real {
3740		Self::ZERO
3741	}
3742
3743	#[inline(always)]
3744	fn copy_impl(value: &Self) -> Self {
3745		*value
3746	}
3747
3748	#[inline(always)]
3749	fn conj_impl(value: &Self) -> Self {
3750		*value
3751	}
3752
3753	#[inline(always)]
3754	fn recip_impl(value: &Self) -> Self {
3755		if value.0.abs() == f64::INFINITY {
3756			Quad::ZERO
3757		} else {
3758			Quad::from(1.0) / *value
3759		}
3760	}
3761
3762	#[inline(always)]
3763	fn sqrt_impl(value: &Self) -> Self {
3764		value.sqrt()
3765	}
3766
3767	#[inline(always)]
3768	fn abs_impl(value: &Self) -> Self::Real {
3769		value.abs()
3770	}
3771
3772	#[inline(always)]
3773	fn abs1_impl(value: &Self) -> Self::Real {
3774		value.abs()
3775	}
3776
3777	#[inline(always)]
3778	fn abs2_impl(value: &Self) -> Self::Real {
3779		value * value
3780	}
3781
3782	#[inline(always)]
3783	fn mul_real_impl(lhs: &Self, rhs: &Self::Real) -> Self {
3784		lhs * rhs
3785	}
3786
3787	#[inline(always)]
3788	fn mul_pow2_impl(lhs: &Self, rhs: &Self::Real) -> Self {
3789		lhs * rhs
3790	}
3791
3792	#[inline(always)]
3793	fn is_finite_impl(value: &Self) -> bool {
3794		value.0.is_finite() && value.1.is_finite()
3795	}
3796
3797	#[inline(always)]
3798	fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
3799		simd
3800	}
3801
3802	#[inline(always)]
3803	fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
3804		*ctx
3805	}
3806
3807	#[inline(always)]
3808	fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMask<S> {
3809		ctx.mask_between_m64s(start as _, end as _).mask()
3810	}
3811
3812	#[inline(always)]
3813	fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMemMask<S> {
3814		let n = (core::mem::size_of::<Self::SimdVec<S>>() / core::mem::size_of::<Self>()) as u64;
3815		let start = start * 2;
3816		let end = end * 2;
3817
3818		let mut sa = start.min(n);
3819		let mut ea = end.min(n);
3820		let mut sb = start.max(n) - n;
3821		let mut eb = end.max(n) - n;
3822
3823		if sa == ea {
3824			sa = 0;
3825			ea = 0;
3826		}
3827		if sb == eb {
3828			sb = 0;
3829			eb = 0;
3830		}
3831
3832		let a = f64::simd_mem_mask_between(ctx, sa, ea);
3833		let b = f64::simd_mem_mask_between(ctx, sb, eb);
3834		Quad(a, b)
3835	}
3836
3837	#[inline(always)]
3838	unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
3839		unsafe {
3840			Quad(
3841				f64::simd_mask_load_raw(ctx, mask.0, &raw const (*ptr).0),
3842				f64::simd_mask_load_raw(ctx, mask.1, &raw const (*ptr).1),
3843			)
3844		}
3845	}
3846
3847	#[inline(always)]
3848	unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>) {
3849		unsafe {
3850			Quad(
3851				f64::simd_mask_store_raw(ctx, mask.0, &raw mut (*ptr).0, values.0),
3852				f64::simd_mask_store_raw(ctx, mask.1, &raw mut (*ptr).1, values.1),
3853			);
3854		}
3855	}
3856
3857	#[inline(always)]
3858	fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S> {
3859		Quad(ctx.splat_f64s(value.0), ctx.splat_f64s(value.1))
3860	}
3861
3862	#[inline(always)]
3863	fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S> {
3864		Quad(ctx.splat_f64s(value.0), ctx.splat_f64s(value.1))
3865	}
3866
3867	#[inline(always)]
3868	fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3869		qd::simd::add_estimate(*ctx, lhs, rhs)
3870	}
3871
3872	#[inline(always)]
3873	fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3874		qd::simd::sub_estimate(*ctx, lhs, rhs)
3875	}
3876
3877	#[inline(always)]
3878	fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
3879		qd::simd::neg(*ctx, value)
3880	}
3881
3882	#[inline(always)]
3883	fn simd_conj<S: Simd>(_: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
3884		value
3885	}
3886
3887	#[inline(always)]
3888	fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
3889		qd::simd::abs(*ctx, value)
3890	}
3891
3892	#[inline(always)]
3893	fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
3894		qd::simd::abs(*ctx, value)
3895	}
3896
3897	#[inline(always)]
3898	fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3899		qd::simd::mul(*ctx, lhs, real_rhs)
3900	}
3901
3902	#[inline(always)]
3903	fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3904		qd::simd::mul(*ctx, lhs, real_rhs)
3905	}
3906
3907	#[inline(always)]
3908	fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3909		qd::simd::mul(*ctx, lhs, rhs)
3910	}
3911
3912	#[inline(always)]
3913	fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3914		qd::simd::mul(*ctx, lhs, rhs)
3915	}
3916
3917	#[inline(always)]
3918	fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
3919		qd::simd::add_estimate(*ctx, qd::simd::mul(*ctx, lhs, rhs), acc)
3920	}
3921
3922	#[inline(always)]
3923	fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
3924		qd::simd::add_estimate(*ctx, qd::simd::mul(*ctx, lhs, rhs), acc)
3925	}
3926
3927	#[inline(always)]
3928	fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
3929		qd::simd::mul(*ctx, value, value)
3930	}
3931
3932	#[inline(always)]
3933	fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
3934		qd::simd::add_estimate(*ctx, qd::simd::mul(*ctx, value, value), acc)
3935	}
3936
3937	#[inline(always)]
3938	fn simd_reduce_sum<S: Simd>(_: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
3939		let a = value.0;
3940		let b = value.1;
3941		let a: &[f64] = bytemuck::cast_slice(core::slice::from_ref(&a));
3942		let b: &[f64] = bytemuck::cast_slice(core::slice::from_ref(&b));
3943		let mut acc = Quad::ZERO;
3944
3945		for (&a, &b) in core::iter::zip(a, b) {
3946			acc += Quad(a, b);
3947		}
3948
3949		acc
3950	}
3951
3952	#[inline(always)]
3953	fn simd_reduce_max<S: Simd>(_: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
3954		let a = value.0;
3955		let b = value.1;
3956		let a: &[f64] = bytemuck::cast_slice(core::slice::from_ref(&a));
3957		let b: &[f64] = bytemuck::cast_slice(core::slice::from_ref(&b));
3958		let mut acc = Quad::NEG_INFINITY;
3959
3960		for (&a, &b) in core::iter::zip(a, b) {
3961			let val = Quad(a, b);
3962			if val > acc {
3963				acc = val
3964			}
3965		}
3966
3967		acc
3968	}
3969
3970	#[inline(always)]
3971	fn simd_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3972		qd::simd::eq(*ctx, real_lhs, real_rhs)
3973	}
3974
3975	#[inline(always)]
3976	fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3977		qd::simd::less_than(*ctx, real_lhs, real_rhs)
3978	}
3979
3980	#[inline(always)]
3981	fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3982		qd::simd::less_than_or_equal(*ctx, real_lhs, real_rhs)
3983	}
3984
3985	#[inline(always)]
3986	fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3987		qd::simd::greater_than(*ctx, real_lhs, real_rhs)
3988	}
3989
3990	#[inline(always)]
3991	fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3992		qd::simd::greater_than_or_equal(*ctx, real_lhs, real_rhs)
3993	}
3994
3995	#[inline(always)]
3996	fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3997		Quad(ctx.select_f64s_m64s(mask, lhs.0, rhs.0), ctx.select_f64s_m64s(mask, lhs.1, rhs.1))
3998	}
3999
4000	#[inline(always)]
4001	fn simd_index_select<S: Simd>(
4002		ctx: &Self::SimdCtx<S>,
4003		mask: Self::SimdMask<S>,
4004		lhs: Self::SimdIndex<S>,
4005		rhs: Self::SimdIndex<S>,
4006	) -> Self::SimdIndex<S> {
4007		ctx.select_u64s_m64s(mask, lhs, rhs)
4008	}
4009
4010	#[inline(always)]
4011	fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S> {
4012		ctx.splat_u64s(value as u64)
4013	}
4014
4015	#[inline(always)]
4016	fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
4017		ctx.add_u64s(lhs, rhs)
4018	}
4019
4020	#[inline(always)]
4021	fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
4022		ctx.less_than_u64s(lhs, rhs)
4023	}
4024
4025	#[inline(always)]
4026	fn simd_and_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
4027		ctx.and_m64s(lhs, rhs)
4028	}
4029
4030	#[inline(always)]
4031	fn simd_or_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
4032		ctx.or_m64s(lhs, rhs)
4033	}
4034
4035	#[inline(always)]
4036	fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S> {
4037		ctx.not_m64s(mask)
4038	}
4039
4040	#[inline(always)]
4041	fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize {
4042		ctx.first_true_m64s(value)
4043	}
4044}
4045
4046impl RealField for fx128 {
4047	#[inline(always)]
4048	fn epsilon_impl() -> Self {
4049		let mut x = Quad::EPSILON;
4050		x.0 *= 8.0;
4051		x.1 *= 8.0;
4052		x
4053	}
4054
4055	#[inline(always)]
4056	fn nbits_impl() -> usize {
4057		100
4058	}
4059
4060	#[inline(always)]
4061	fn min_positive_impl() -> Self {
4062		Quad::MIN_POSITIVE
4063	}
4064
4065	#[inline(always)]
4066	fn max_positive_impl() -> Self {
4067		Quad::MIN_POSITIVE.recip()
4068	}
4069
4070	#[inline(always)]
4071	fn sqrt_min_positive_impl() -> Self {
4072		Quad::MIN_POSITIVE.sqrt()
4073	}
4074
4075	#[inline(always)]
4076	fn sqrt_max_positive_impl() -> Self {
4077		Quad::MIN_POSITIVE.recip().sqrt()
4078	}
4079}