faer_traits/
lib.rs

1#![no_std]
2
3use bytemuck::Pod;
4use core::fmt::Debug;
5use num_complex::Complex;
6use pulp::Simd;
7
8use math_utils::*;
9
10use pulp::try_const;
11
12pub mod math_utils {
13	use crate::{ByRef, ComplexField, RealField, abs_impl};
14	use pulp::try_const;
15
16	#[inline(always)]
17	#[must_use]
18	pub fn eps<T: RealField>() -> T {
19		T::Real::epsilon_impl()
20	}
21
22	#[inline(always)]
23	#[must_use]
24	pub fn nbits<T: ComplexField>() -> usize {
25		T::Real::nbits_impl()
26	}
27
28	#[inline(always)]
29	#[must_use]
30	pub fn min_positive<T: RealField>() -> T {
31		T::min_positive_impl()
32	}
33	#[inline(always)]
34	#[must_use]
35	pub fn max_positive<T: RealField>() -> T {
36		T::max_positive_impl()
37	}
38	#[inline(always)]
39	#[must_use]
40	pub fn sqrt_min_positive<T: RealField>() -> T {
41		T::sqrt_min_positive_impl()
42	}
43	#[inline(always)]
44	#[must_use]
45	pub fn sqrt_max_positive<T: RealField>() -> T {
46		T::sqrt_max_positive_impl()
47	}
48
49	#[inline(always)]
50	#[must_use]
51	pub fn zero<T: ComplexField>() -> T {
52		T::zero_impl()
53	}
54	#[inline(always)]
55	#[must_use]
56	pub fn one<T: ComplexField>() -> T {
57		T::one_impl()
58	}
59	#[inline(always)]
60	#[must_use]
61	pub fn nan<T: ComplexField>() -> T {
62		T::nan_impl()
63	}
64	#[inline(always)]
65	#[must_use]
66	pub fn infinity<T: ComplexField>() -> T {
67		T::infinity_impl()
68	}
69
70	#[inline(always)]
71	#[must_use]
72	pub fn real<T: ComplexField>(value: &T) -> T::Real {
73		T::real_part_impl((value).by_ref())
74	}
75	#[inline(always)]
76	#[must_use]
77	pub fn imag<T: ComplexField>(value: &T) -> T::Real {
78		T::imag_part_impl((value).by_ref())
79	}
80	#[inline(always)]
81	#[track_caller]
82	#[must_use]
83	pub fn neg<T: NegByRef>(value: &T) -> T::Output {
84		value.neg_by_ref()
85	}
86	#[inline(always)]
87	#[must_use]
88	pub fn copy<T: ComplexField>(value: &T) -> T {
89		T::copy_impl((value).by_ref())
90	}
91
92	#[inline(always)]
93	#[must_use]
94	pub fn conj<T: ComplexField>(value: &T) -> T {
95		T::conj_impl((value).by_ref())
96	}
97
98	#[inline(always)]
99	#[track_caller]
100	#[must_use]
101	pub fn add<T: AddByRef<U>, U>(lhs: &T, rhs: &U) -> T::Output {
102		lhs.add_by_ref(rhs)
103	}
104	#[inline(always)]
105	#[track_caller]
106	#[must_use]
107	pub fn sub<T: SubByRef<U>, U>(lhs: &T, rhs: &U) -> T::Output {
108		lhs.sub_by_ref(rhs)
109	}
110	#[inline(always)]
111	#[track_caller]
112	#[must_use]
113	pub fn mul<T: MulByRef<U>, U>(lhs: &T, rhs: &U) -> T::Output {
114		lhs.mul_by_ref(rhs)
115	}
116	#[inline(always)]
117	#[track_caller]
118	#[must_use]
119	pub fn div<T: DivByRef<U>, U>(lhs: &T, rhs: &U) -> T::Output {
120		lhs.div_by_ref(rhs)
121	}
122
123	#[inline(always)]
124	#[must_use]
125	pub fn mul_real<T: ComplexField>(lhs: &T, rhs: &T::Real) -> T {
126		T::mul_real_impl((lhs).by_ref(), (rhs).by_ref())
127	}
128
129	#[inline(always)]
130	#[must_use]
131	pub fn mul_pow2<T: ComplexField>(lhs: &T, rhs: &T::Real) -> T {
132		T::mul_real_impl((lhs).by_ref(), (rhs).by_ref())
133	}
134
135	#[inline(always)]
136	#[must_use]
137	pub fn abs1<T: ComplexField>(value: &T) -> T::Real {
138		T::abs1_impl((value).by_ref())
139	}
140
141	#[inline(always)]
142	#[must_use]
143	pub fn absmax<T: ComplexField>(value: &T) -> T::Real {
144		if try_const! { T::IS_REAL } {
145			T::abs1_impl(value)
146		} else {
147			add(&T::Real::abs1_impl(&real(value)), &T::Real::abs1_impl(&imag(value)))
148		}
149	}
150
151	#[inline(always)]
152	#[must_use]
153	pub fn abs<T: ComplexField>(value: &T) -> T::Real {
154		T::abs_impl((value).by_ref())
155	}
156
157	#[inline(always)]
158	#[must_use]
159	pub fn hypot<T: RealField>(lhs: &T, rhs: &T) -> T {
160		abs_impl::<T::Real>(lhs.clone(), rhs.clone())
161	}
162
163	#[inline(always)]
164	#[must_use]
165	pub fn abs2<T: ComplexField>(value: &T) -> T::Real {
166		T::abs2_impl((value).by_ref())
167	}
168
169	#[inline(always)]
170	#[must_use]
171	pub fn max<T: RealField>(lhs: &T, rhs: &T) -> T {
172		if lhs > rhs { copy(lhs) } else { copy(rhs) }
173	}
174	#[inline(always)]
175	#[must_use]
176	pub fn min<T: RealField>(lhs: &T, rhs: &T) -> T {
177		if lhs < rhs { copy(lhs) } else { copy(rhs) }
178	}
179
180	#[inline(always)]
181	#[must_use]
182	pub fn is_nan<T: ComplexField>(value: &T) -> bool {
183		T::is_nan_impl((value).by_ref())
184	}
185
186	#[inline(always)]
187	#[must_use]
188	pub fn is_finite<T: ComplexField>(value: &T) -> bool {
189		T::is_finite_impl((value).by_ref())
190	}
191
192	#[inline(always)]
193	#[must_use]
194	pub fn sqrt<T: ComplexField>(value: &T) -> T {
195		T::sqrt_impl((value).by_ref())
196	}
197	#[inline(always)]
198	#[must_use]
199	pub fn recip<T: ComplexField>(value: &T) -> T {
200		T::recip_impl((value).by_ref())
201	}
202
203	#[inline(always)]
204	#[must_use]
205	pub fn from_real<T: ComplexField>(value: &T::Real) -> T {
206		T::from_real_impl((value).by_ref())
207	}
208
209	#[inline(always)]
210	#[must_use]
211	pub fn from_f64<T: ComplexField>(value: f64) -> T {
212		T::from_f64_impl(value)
213	}
214
215	pub use crate::{AddByRef, DivByRef, MulByRef, NegByRef, SubByRef};
216}
217
218pub trait AddByRef<Rhs = Self> {
219	type Output;
220	fn add_by_ref(&self, rhs: &Rhs) -> Self::Output;
221}
222pub trait SubByRef<Rhs = Self> {
223	type Output;
224	fn sub_by_ref(&self, rhs: &Rhs) -> Self::Output;
225}
226pub trait NegByRef {
227	type Output;
228	fn neg_by_ref(&self) -> Self::Output;
229}
230pub trait MulByRef<Rhs = Self> {
231	type Output;
232	fn mul_by_ref(&self, rhs: &Rhs) -> Self::Output;
233}
234pub trait DivByRef<Rhs = Self> {
235	type Output;
236	fn div_by_ref(&self, rhs: &Rhs) -> Self::Output;
237}
238
239impl<Rhs, Lhs, Output> AddByRef<Rhs> for Lhs
240where
241	for<'a> &'a Lhs: core::ops::Add<&'a Rhs, Output = Output>,
242{
243	type Output = Output;
244
245	#[inline]
246	#[track_caller]
247	fn add_by_ref(&self, rhs: &Rhs) -> Self::Output {
248		self + rhs
249	}
250}
251impl<Rhs, Lhs, Output> SubByRef<Rhs> for Lhs
252where
253	for<'a> &'a Lhs: core::ops::Sub<&'a Rhs, Output = Output>,
254{
255	type Output = Output;
256
257	#[inline]
258	#[track_caller]
259	fn sub_by_ref(&self, rhs: &Rhs) -> Self::Output {
260		self - rhs
261	}
262}
263impl<Rhs, Lhs, Output> MulByRef<Rhs> for Lhs
264where
265	for<'a> &'a Lhs: core::ops::Mul<&'a Rhs, Output = Output>,
266{
267	type Output = Output;
268
269	#[inline]
270	#[track_caller]
271	fn mul_by_ref(&self, rhs: &Rhs) -> Self::Output {
272		self * rhs
273	}
274}
275impl<Rhs, Lhs, Output> DivByRef<Rhs> for Lhs
276where
277	for<'a> &'a Lhs: core::ops::Div<&'a Rhs, Output = Output>,
278{
279	type Output = Output;
280
281	#[inline]
282	#[track_caller]
283	fn div_by_ref(&self, rhs: &Rhs) -> Self::Output {
284		self / rhs
285	}
286}
287
288impl<T, Output> NegByRef for T
289where
290	for<'a> &'a T: core::ops::Neg<Output = Output>,
291{
292	type Output = Output;
293
294	#[inline]
295	#[track_caller]
296	fn neg_by_ref(&self) -> Self::Output {
297		-self
298	}
299}
300
301#[faer_macros::math]
302fn abs_impl<T: RealField>(re: T, im: T) -> T {
303	let small = sqrt_min_positive();
304	let big = sqrt_max_positive();
305	let one = one();
306	let re_abs = abs(re);
307	let im_abs = abs(im);
308
309	if re_abs > big || im_abs > big {
310		sqrt(abs2(re * small) + abs2(im * small)) * big
311	} else if re_abs > one || im_abs > one {
312		sqrt(abs2(re) + abs2(im))
313	} else {
314		sqrt(abs2(re * big) + abs2(im * big)) * small
315	}
316}
317
318#[faer_macros::math]
319fn recip_impl<T: RealField>(re: T, im: T) -> (T, T) {
320	if is_nan(re) || is_nan(im) {
321		return (nan(), nan());
322	}
323	if re == zero() && im == zero() {
324		return (infinity(), infinity());
325	}
326	if !is_finite(re) || !is_finite(im) {
327		return (zero(), zero());
328	}
329
330	let small = sqrt_min_positive();
331	let big = sqrt_max_positive();
332	let one = one();
333	let re_abs = abs(re);
334	let im_abs = abs(im);
335
336	if re_abs > big || im_abs > big {
337		let re = re * small;
338		let im = im * small;
339		let inv = recip(abs2(re) + abs2(im));
340		(((re * inv) * small), ((-im * inv) * small))
341	} else if re_abs > one || im_abs > one {
342		let inv = recip(abs2(re) + abs2(im));
343		((re * inv), (-im * inv))
344	} else {
345		let re = re * big;
346		let im = im * big;
347		let inv = recip(abs2(re) + abs2(im));
348		(((re * inv) * big), ((-im * inv) * big))
349	}
350}
351
352#[faer_macros::math]
353fn sqrt_impl<T: RealField>(re: T, im: T) -> (T, T) {
354	let im_negative = im < zero();
355	let half = from_f64(0.5);
356	let abs = abs_impl(re.clone(), im.clone());
357
358	let mut sum = re + abs;
359	if sum < zero() {
360		sum = zero();
361	}
362
363	let out_re = sqrt(mul_pow2(sum, half));
364	let mut out_im = sqrt(mul_pow2(abs - re, half));
365	if im_negative {
366		out_im = -out_im;
367	}
368	(out_re, out_im)
369}
370
371pub trait ByRef<T> {
372	fn by_ref(&self) -> &T;
373}
374impl<T> ByRef<T> for T {
375	#[inline]
376	fn by_ref(&self) -> &T {
377		self
378	}
379}
380impl<T> ByRef<T> for &T {
381	#[inline]
382	fn by_ref(&self) -> &T {
383		*self
384	}
385}
386impl<T> ByRef<T> for &mut T {
387	#[inline]
388	fn by_ref(&self) -> &T {
389		*self
390	}
391}
392
393#[repr(transparent)]
394pub struct SimdCtx<T: ComplexField, S: Simd>(pub T::SimdCtx<S>);
395
396#[repr(transparent)]
397pub struct SimdCtxCopy<T: ComplexField, S: Simd>(pub T::SimdCtx<S>);
398
399#[derive(Copy, Clone, Debug)]
400#[repr(transparent)]
401pub struct RealReg<T>(pub T);
402
403impl<T: ComplexField, S: Simd> SimdCtx<T, S> {
404	#[inline(always)]
405	pub fn new(ctx: &T::SimdCtx<S>) -> &Self {
406		unsafe { &*(ctx as *const T::SimdCtx<S> as *const Self) }
407	}
408
409	#[inline(always)]
410	pub fn zero(&self) -> T::SimdVec<S> {
411		unsafe { core::mem::zeroed() }
412	}
413
414	#[inline(always)]
415	pub fn splat(&self, value: &T) -> T::SimdVec<S> {
416		unsafe { core::mem::transmute_copy(&T::simd_splat(&self.0, (value).by_ref())) }
417	}
418
419	#[inline(always)]
420	pub fn splat_real(&self, value: &T::Real) -> RealReg<T::SimdVec<S>> {
421		RealReg(unsafe { core::mem::transmute_copy(&T::simd_splat_real(&self.0, (value).by_ref())) })
422	}
423
424	#[inline(always)]
425	pub fn add(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdVec<S> {
426		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
427		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
428		unsafe { core::mem::transmute_copy(&T::simd_add(&self.0, lhs, rhs)) }
429	}
430
431	#[inline(always)]
432	pub fn sub(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdVec<S> {
433		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
434		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
435		unsafe { core::mem::transmute_copy(&T::simd_sub(&self.0, lhs, rhs)) }
436	}
437
438	#[inline(always)]
439	pub fn neg(&self, value: T::SimdVec<S>) -> T::SimdVec<S> {
440		let value = unsafe { core::mem::transmute_copy(&value) };
441		unsafe { core::mem::transmute_copy(&T::simd_neg(&self.0, value)) }
442	}
443
444	#[inline(always)]
445	pub fn conj(&self, value: T::SimdVec<S>) -> T::SimdVec<S> {
446		let value = unsafe { core::mem::transmute_copy(&value) };
447		unsafe { core::mem::transmute_copy(&T::simd_conj(&self.0, value)) }
448	}
449
450	#[inline(always)]
451	pub fn abs1(&self, value: T::SimdVec<S>) -> RealReg<T::SimdVec<S>> {
452		let value = unsafe { core::mem::transmute_copy(&value) };
453		RealReg(unsafe { core::mem::transmute_copy(&T::simd_abs1(&self.0, value)) })
454	}
455
456	#[inline(always)]
457	pub fn abs_max(&self, value: T::SimdVec<S>) -> RealReg<T::SimdVec<S>> {
458		let value = unsafe { core::mem::transmute_copy(&value) };
459		RealReg(unsafe { core::mem::transmute_copy(&T::simd_abs_max(&self.0, value)) })
460	}
461
462	#[inline(always)]
463	pub fn mul_real(&self, lhs: T::SimdVec<S>, rhs: RealReg<T::SimdVec<S>>) -> T::SimdVec<S> {
464		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
465		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
466		unsafe { core::mem::transmute_copy(&T::simd_mul_real(&self.0, lhs, rhs)) }
467	}
468
469	#[inline(always)]
470	pub fn mul_pow2(&self, lhs: T::SimdVec<S>, rhs: RealReg<T::SimdVec<S>>) -> T::SimdVec<S> {
471		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
472		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
473		unsafe { core::mem::transmute_copy(&T::simd_mul_pow2(&self.0, lhs, rhs)) }
474	}
475
476	#[inline(always)]
477	pub fn mul(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdVec<S> {
478		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
479		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
480		unsafe { core::mem::transmute_copy(&T::simd_mul(&self.0, lhs, rhs)) }
481	}
482
483	#[inline(always)]
484	pub fn conj_mul(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdVec<S> {
485		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
486		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
487		unsafe { core::mem::transmute_copy(&T::simd_conj_mul(&self.0, lhs, rhs)) }
488	}
489
490	#[inline(always)]
491	pub fn mul_add(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>, acc: T::SimdVec<S>) -> T::SimdVec<S> {
492		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
493		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
494		let acc = unsafe { core::mem::transmute_copy(&acc) };
495		unsafe { core::mem::transmute_copy(&T::simd_mul_add(&self.0, lhs, rhs, acc)) }
496	}
497
498	#[inline(always)]
499	pub fn conj_mul_add(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>, acc: T::SimdVec<S>) -> T::SimdVec<S> {
500		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
501		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
502		let acc = unsafe { core::mem::transmute_copy(&acc) };
503		unsafe { core::mem::transmute_copy(&T::simd_conj_mul_add(&self.0, lhs, rhs, acc)) }
504	}
505
506	#[inline(always)]
507	pub fn abs2(&self, value: T::SimdVec<S>) -> RealReg<T::SimdVec<S>> {
508		let value = unsafe { core::mem::transmute_copy(&value) };
509		RealReg(unsafe { core::mem::transmute_copy(&T::simd_abs2(&self.0, value)) })
510	}
511
512	#[inline(always)]
513	pub fn abs2_add(&self, value: T::SimdVec<S>, acc: RealReg<T::SimdVec<S>>) -> RealReg<T::SimdVec<S>> {
514		let value = unsafe { core::mem::transmute_copy(&value) };
515		let acc = unsafe { core::mem::transmute_copy(&acc) };
516		RealReg(unsafe { core::mem::transmute_copy(&T::simd_abs2_add(&self.0, value, acc)) })
517	}
518
519	#[inline(always)]
520	pub fn reduce_sum(&self, value: T::SimdVec<S>) -> T {
521		let value = unsafe { core::mem::transmute_copy(&value) };
522		unsafe { core::mem::transmute_copy(&T::simd_reduce_sum(&self.0, value)) }
523	}
524
525	#[inline(always)]
526	pub fn reduce_max(&self, value: RealReg<T::SimdVec<S>>) -> T {
527		let value = unsafe { core::mem::transmute_copy(&value) };
528		unsafe { core::mem::transmute_copy(&T::simd_reduce_max(&self.0, value)) }
529	}
530
531	#[faer_macros::math]
532	#[inline(always)]
533	pub fn reduce_sum_real(&self, value: RealReg<T::SimdVec<S>>) -> Real<T> {
534		let value = T::simd_reduce_sum(&self.0, value.0);
535		if try_const! { T::SIMD_ABS_SPLIT_REAL_IMAG && !S::IS_SCALAR } {
536			add(real(value), imag(value))
537		} else {
538			real(value)
539		}
540	}
541
542	#[faer_macros::math]
543	#[inline(always)]
544	pub fn reduce_max_real(&self, value: RealReg<T::SimdVec<S>>) -> Real<T> {
545		let value = T::simd_reduce_max(&self.0, value.0);
546		if try_const! { T::SIMD_ABS_SPLIT_REAL_IMAG && !S::IS_SCALAR } {
547			max(real(value), imag(value))
548		} else {
549			real(value)
550		}
551	}
552
553	#[inline(always)]
554	pub fn max(&self, lhs: RealReg<T::SimdVec<S>>, rhs: RealReg<T::SimdVec<S>>) -> RealReg<T::SimdVec<S>> {
555		let cmp = self.gt(lhs, rhs);
556		RealReg(self.select(cmp, lhs.0, rhs.0))
557	}
558
559	#[inline(always)]
560	pub fn eq(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdMask<S> {
561		T::simd_equal(&self.0, lhs, rhs)
562	}
563
564	#[inline(always)]
565	pub fn lt(&self, lhs: RealReg<T::SimdVec<S>>, rhs: RealReg<T::SimdVec<S>>) -> T::SimdMask<S> {
566		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
567		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
568		unsafe { core::mem::transmute_copy(&T::simd_less_than(&self.0, lhs, rhs)) }
569	}
570
571	#[inline(always)]
572	pub fn gt(&self, lhs: RealReg<T::SimdVec<S>>, rhs: RealReg<T::SimdVec<S>>) -> T::SimdMask<S> {
573		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
574		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
575		unsafe { core::mem::transmute_copy(&T::simd_greater_than(&self.0, lhs, rhs)) }
576	}
577
578	#[inline(always)]
579	pub fn le(&self, lhs: RealReg<T::SimdVec<S>>, rhs: RealReg<T::SimdVec<S>>) -> T::SimdMask<S> {
580		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
581		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
582		unsafe { core::mem::transmute_copy(&T::simd_less_than_or_equal(&self.0, lhs, rhs)) }
583	}
584
585	#[inline(always)]
586	pub fn ge(&self, lhs: RealReg<T::SimdVec<S>>, rhs: RealReg<T::SimdVec<S>>) -> T::SimdMask<S> {
587		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
588		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
589		unsafe { core::mem::transmute_copy(&T::simd_greater_than_or_equal(&self.0, lhs, rhs)) }
590	}
591
592	#[inline(always)]
593	pub fn select(&self, mask: T::SimdMask<S>, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdVec<S> {
594		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
595		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
596		unsafe { core::mem::transmute_copy(&T::simd_select(&self.0, mask, lhs, rhs)) }
597	}
598
599	#[inline(always)]
600	pub fn iselect(&self, mask: T::SimdMask<S>, lhs: T::SimdIndex<S>, rhs: T::SimdIndex<S>) -> T::SimdIndex<S> {
601		unsafe { core::mem::transmute_copy(&T::simd_index_select(&self.0, mask, lhs, rhs)) }
602	}
603
604	#[inline(always)]
605	pub fn isplat(&self, value: T::Index) -> T::SimdIndex<S> {
606		unsafe { core::mem::transmute_copy(&T::simd_index_splat(&self.0, value)) }
607	}
608
609	#[inline(always)]
610	pub fn iadd(&self, lhs: T::SimdIndex<S>, rhs: T::SimdIndex<S>) -> T::SimdIndex<S> {
611		unsafe { core::mem::transmute_copy(&T::simd_index_add(&self.0, lhs, rhs)) }
612	}
613
614	#[inline(always)]
615	pub fn or_mask(&self, lhs: T::SimdMask<S>, rhs: T::SimdMask<S>) -> T::SimdMask<S> {
616		T::simd_or_mask(&self.0, lhs, rhs)
617	}
618
619	#[inline(always)]
620	pub fn and_mask(&self, lhs: T::SimdMask<S>, rhs: T::SimdMask<S>) -> T::SimdMask<S> {
621		T::simd_and_mask(&self.0, lhs, rhs)
622	}
623
624	#[inline(always)]
625	pub fn not_mask(&self, mask: T::SimdMask<S>) -> T::SimdMask<S> {
626		T::simd_not_mask(&self.0, mask)
627	}
628
629	#[inline(always)]
630	pub fn first_true_mask(&self, value: T::SimdMask<S>) -> usize {
631		T::simd_first_true_mask(&self.0, value)
632	}
633
634	#[inline(always)]
635	pub unsafe fn mask_load(&self, mask: T::SimdMemMask<S>, ptr: *const T::SimdVec<S>) -> T::SimdVec<S> {
636		unsafe { T::simd_mask_load(&self.0, mask, ptr) }
637	}
638
639	#[inline(always)]
640	pub unsafe fn mask_store(&self, mask: T::SimdMemMask<S>, ptr: *mut T::SimdVec<S>, value: T::SimdVec<S>) {
641		let value = unsafe { core::mem::transmute_copy(&value) };
642		unsafe { T::simd_mask_store(&self.0, mask, ptr, value) }
643	}
644
645	#[inline(always)]
646	pub fn load(&self, ptr: &T::SimdVec<S>) -> T::SimdVec<S> {
647		unsafe { core::mem::transmute_copy(&T::simd_load(&self.0, ptr)) }
648	}
649
650	#[inline(always)]
651	pub fn store(&self, ptr: &mut T::SimdVec<S>, value: T::SimdVec<S>) {
652		let value = unsafe { core::mem::transmute_copy(&value) };
653		unsafe { core::mem::transmute_copy(&T::simd_store(&self.0, ptr, value)) }
654	}
655}
656
657pub unsafe trait Conjugate: Send + Sync + core::fmt::Debug {
658	const IS_CANONICAL: bool;
659
660	type Conj: Conjugate<Conj = Self, Canonical = Self::Canonical>;
661	type Canonical: Conjugate<Canonical = Self::Canonical> + ComplexField;
662}
663
664pub type Real<T> = <<T as Conjugate>::Canonical as ComplexField>::Real;
665
666#[derive(Copy, Clone, Debug, PartialEq, Eq)]
667pub struct ComplexConj<T> {
668	pub re: T,
669	pub im_neg: T,
670}
671
672#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
673pub enum SimdCapabilities {
674	None,
675	Copy,
676	Simd,
677}
678
679impl SimdCapabilities {
680	#[inline]
681	pub const fn is_copy(self) -> bool {
682		matches!(self, Self::Copy | Self::Simd)
683	}
684
685	#[inline]
686	pub const fn is_simd(self) -> bool {
687		matches!(self, Self::Simd)
688	}
689}
690
691mod seal {
692	pub trait Seal {}
693	impl Seal for u32 {}
694	impl Seal for u64 {}
695	impl Seal for usize {}
696	impl Seal for i32 {}
697	impl Seal for i64 {}
698	impl Seal for isize {}
699}
700
701pub trait Seal: seal::Seal {}
702impl<T: seal::Seal> Seal for T {}
703
704/// Trait for signed integers corresponding to the ones satisfying [`Index`].
705///
706/// Always smaller than or equal to `isize`.
707pub trait SignedIndex:
708	Seal
709	+ core::fmt::Debug
710	+ core::ops::Neg<Output = Self>
711	+ core::ops::Add<Output = Self>
712	+ core::ops::Sub<Output = Self>
713	+ core::ops::AddAssign
714	+ core::ops::SubAssign
715	+ bytemuck::Pod
716	+ Eq
717	+ Ord
718	+ Send
719	+ Sync
720{
721	/// Maximum representable value.
722	const MAX: Self;
723
724	/// Truncate `value` to type [`Self`].
725	#[must_use]
726	fn truncate(value: usize) -> Self;
727
728	/// Zero extend `self`.
729	#[must_use]
730	fn zx(self) -> usize;
731	/// Sign extend `self`.
732	#[must_use]
733	fn sx(self) -> usize;
734
735	/// Sum nonnegative values while checking for overflow.
736	fn sum_nonnegative(slice: &[Self]) -> Option<Self> {
737		let mut acc = Self::zeroed();
738		for &i in slice {
739			if Self::MAX - i < acc {
740				return None;
741			}
742			acc += i;
743		}
744		Some(acc)
745	}
746}
747
748impl SignedIndex for i32 {
749	const MAX: Self = Self::MAX;
750
751	#[inline(always)]
752	fn truncate(value: usize) -> Self {
753		#[allow(clippy::assertions_on_constants)]
754		const _: () = {
755			core::assert!(i32::BITS <= usize::BITS);
756		};
757		value as isize as Self
758	}
759
760	#[inline(always)]
761	fn zx(self) -> usize {
762		self as u32 as usize
763	}
764
765	#[inline(always)]
766	fn sx(self) -> usize {
767		self as isize as usize
768	}
769}
770
771#[cfg(any(target_pointer_width = "64"))]
772impl SignedIndex for i64 {
773	const MAX: Self = Self::MAX;
774
775	#[inline(always)]
776	fn truncate(value: usize) -> Self {
777		#[allow(clippy::assertions_on_constants)]
778		const _: () = {
779			core::assert!(i64::BITS <= usize::BITS);
780		};
781		value as isize as Self
782	}
783
784	#[inline(always)]
785	fn zx(self) -> usize {
786		self as u64 as usize
787	}
788
789	#[inline(always)]
790	fn sx(self) -> usize {
791		self as isize as usize
792	}
793}
794
795impl SignedIndex for isize {
796	const MAX: Self = Self::MAX;
797
798	#[inline(always)]
799	fn truncate(value: usize) -> Self {
800		value as isize
801	}
802
803	#[inline(always)]
804	fn zx(self) -> usize {
805		self as usize
806	}
807
808	#[inline(always)]
809	fn sx(self) -> usize {
810		self as usize
811	}
812}
813
814pub trait Index:
815	Seal
816	+ core::fmt::Debug
817	+ core::ops::Not<Output = Self>
818	+ core::ops::BitAnd<Output = Self>
819	+ core::ops::BitOr<Output = Self>
820	+ core::ops::BitXor<Output = Self>
821	+ core::ops::Add<Output = Self>
822	+ core::ops::Sub<Output = Self>
823	+ core::ops::AddAssign
824	+ core::ops::SubAssign
825	+ bytemuck::Pod
826	+ Eq
827	+ Ord
828	+ Send
829	+ Sync
830	+ Ord
831{
832	/// Equally-sized index type with a fixed size (no `usize`).
833	type FixedWidth: Index;
834	/// Equally-sized signed index type.
835	type Signed: SignedIndex;
836
837	const BITS: u32 = core::mem::size_of::<Self>() as u32 * 8;
838
839	/// Truncate `value` to type [`Self`].
840	#[must_use]
841	#[inline(always)]
842	fn truncate(value: usize) -> Self {
843		Self::from_signed(<Self::Signed as SignedIndex>::truncate(value))
844	}
845
846	/// Zero extend `self`.
847	#[must_use]
848	#[inline(always)]
849	fn zx(self) -> usize {
850		self.to_signed().zx()
851	}
852
853	/// Convert a reference to a slice of [`Self`] to fixed width types.
854	#[inline(always)]
855	fn canonicalize(slice: &[Self]) -> &[Self::FixedWidth] {
856		bytemuck::cast_slice(slice)
857	}
858
859	/// Convert a mutable reference to a slice of [`Self`] to fixed width types.
860	#[inline(always)]
861	fn canonicalize_mut(slice: &mut [Self]) -> &mut [Self::FixedWidth] {
862		bytemuck::cast_slice_mut(slice)
863	}
864
865	/// Convert a signed value to an unsigned one.
866	#[inline(always)]
867	fn from_signed(value: Self::Signed) -> Self {
868		bytemuck::cast(value)
869	}
870
871	/// Convert an unsigned value to a signed one.
872	#[inline(always)]
873	fn to_signed(self) -> Self::Signed {
874		bytemuck::cast(self)
875	}
876
877	/// Sum values while checking for overflow.
878	#[inline]
879	fn sum_nonnegative(slice: &[Self]) -> Option<Self> {
880		Self::Signed::sum_nonnegative(bytemuck::cast_slice(slice)).map(Self::from_signed)
881	}
882}
883
884impl Index for u32 {
885	type FixedWidth = u32;
886	type Signed = i32;
887}
888#[cfg(any(target_pointer_width = "64"))]
889impl Index for u64 {
890	type FixedWidth = u64;
891	type Signed = i64;
892}
893
894impl Index for usize {
895	#[cfg(target_pointer_width = "32")]
896	type FixedWidth = u32;
897	#[cfg(target_pointer_width = "64")]
898	type FixedWidth = u64;
899	type Signed = isize;
900}
901
902unsafe impl<T: RealField> Conjugate for T {
903	type Canonical = T;
904	type Conj = T;
905
906	const IS_CANONICAL: bool = true;
907}
908
909unsafe impl<T: RealField> Conjugate for Complex<T> {
910	type Canonical = Complex<T>;
911	type Conj = ComplexConj<T>;
912
913	const IS_CANONICAL: bool = true;
914}
915unsafe impl<T: RealField> Conjugate for ComplexConj<T> {
916	type Canonical = Complex<T>;
917	type Conj = Complex<T>;
918
919	const IS_CANONICAL: bool = false;
920}
921
922pub trait SimdArch: Default {
923	fn dispatch<R>(self, f: impl pulp::WithSimd<Output = R>) -> R;
924}
925
926impl SimdArch for pulp::Arch {
927	#[inline]
928	fn dispatch<R>(self, f: impl pulp::WithSimd<Output = R>) -> R {
929		self.dispatch(f)
930	}
931}
932
933impl SimdArch for pulp::Scalar {
934	#[inline]
935	fn dispatch<R>(self, f: impl pulp::WithSimd<Output = R>) -> R {
936		f.with_simd(self)
937	}
938}
939
940pub trait ComplexField:
941	Debug
942	+ Clone
943	+ Conjugate<Canonical = Self>
944	+ PartialEq
945	+ AddByRef<Output = Self>
946	+ SubByRef<Output = Self>
947	+ MulByRef<Output = Self>
948	+ NegByRef<Output = Self>
949{
950	const IS_REAL: bool;
951	const SIMD_ABS_SPLIT_REAL_IMAG: bool = false;
952
953	type Arch: SimdArch;
954	type Unit: ComplexField;
955
956	type SimdCtx<S: Simd>: Copy;
957	type Index: Index;
958
959	type Real: RealField;
960
961	#[doc(hidden)]
962	const IS_NATIVE_F32: bool = false;
963	#[doc(hidden)]
964	const IS_NATIVE_C32: bool = false;
965	#[doc(hidden)]
966	const IS_NATIVE_F64: bool = false;
967	#[doc(hidden)]
968	const IS_NATIVE_C64: bool = false;
969
970	const SIMD_CAPABILITIES: SimdCapabilities;
971	type SimdMask<S: Simd>: Copy + Debug;
972	type SimdMemMask<S: Simd>: Copy + Debug;
973
974	type SimdVec<S: Simd>: Pod + Debug;
975	type SimdIndex<S: Simd>: Pod + Debug;
976
977	fn zero_impl() -> Self;
978	fn one_impl() -> Self;
979	fn nan_impl() -> Self;
980	fn infinity_impl() -> Self;
981
982	fn from_real_impl(real: &Self::Real) -> Self;
983	fn from_f64_impl(real: f64) -> Self;
984
985	fn real_part_impl(value: &Self) -> Self::Real;
986	fn imag_part_impl(value: &Self) -> Self::Real;
987
988	fn copy_impl(value: &Self) -> Self;
989	fn conj_impl(value: &Self) -> Self;
990	fn recip_impl(value: &Self) -> Self;
991	fn sqrt_impl(value: &Self) -> Self;
992
993	fn abs_impl(value: &Self) -> Self::Real;
994	fn abs1_impl(value: &Self) -> Self::Real;
995	fn abs2_impl(value: &Self) -> Self::Real;
996
997	fn mul_real_impl(lhs: &Self, rhs: &Self::Real) -> Self;
998
999	fn mul_pow2_impl(lhs: &Self, rhs: &Self::Real) -> Self;
1000
1001	fn is_finite_impl(value: &Self) -> bool;
1002	fn is_nan_impl(value: &Self) -> bool {
1003		value != value
1004	}
1005
1006	fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S>;
1007	fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S;
1008
1009	fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMask<S>;
1010	fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMemMask<S>;
1011	unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S>;
1012	unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>);
1013
1014	fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S>;
1015	fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S>;
1016
1017	fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1018	fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1019
1020	fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S>;
1021	fn simd_conj<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S>;
1022	fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S>;
1023	fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S>;
1024
1025	fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1026	fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1027
1028	fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1029	fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1030	fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S>;
1031	fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S>;
1032	fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S>;
1033	fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S>;
1034
1035	fn simd_reduce_sum<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self;
1036	fn simd_reduce_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self;
1037	fn simd_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S>;
1038	fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S>;
1039	fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S>;
1040	fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S>;
1041	fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S>;
1042
1043	fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1044	fn simd_index_select<S: Simd>(
1045		ctx: &Self::SimdCtx<S>,
1046		mask: Self::SimdMask<S>,
1047		lhs: Self::SimdIndex<S>,
1048		rhs: Self::SimdIndex<S>,
1049	) -> Self::SimdIndex<S>;
1050
1051	fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S>;
1052	fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S>;
1053	fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S>;
1054	#[inline(always)]
1055	fn simd_index_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
1056		Self::simd_index_less_than(ctx, rhs, lhs)
1057	}
1058	#[inline(always)]
1059	fn simd_index_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
1060		Self::simd_not_mask(ctx, Self::simd_index_less_than(ctx, rhs, lhs))
1061	}
1062	#[inline(always)]
1063	fn simd_index_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
1064		Self::simd_not_mask(ctx, Self::simd_index_greater_than(ctx, rhs, lhs))
1065	}
1066
1067	fn simd_and_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S>;
1068	fn simd_or_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S>;
1069	fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S>;
1070	fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize;
1071
1072	#[inline(always)]
1073	fn simd_load<S: Simd>(ctx: &Self::SimdCtx<S>, ptr: &Self::SimdVec<S>) -> Self::SimdVec<S> {
1074		let simd = Self::ctx_from_simd(ctx);
1075		if try_const! { Self::Unit::IS_NATIVE_F32 } {
1076			simd.deinterleave_shfl_f32s(*ptr)
1077		} else if try_const! { Self::Unit::IS_NATIVE_F64 } {
1078			simd.deinterleave_shfl_f64s(*ptr)
1079		} else {
1080			panic!();
1081		}
1082	}
1083
1084	#[inline(always)]
1085	fn simd_store<S: Simd>(ctx: &Self::SimdCtx<S>, ptr: &mut Self::SimdVec<S>, value: Self::SimdVec<S>) {
1086		let simd = Self::ctx_from_simd(ctx);
1087		if try_const! { Self::Unit::IS_NATIVE_F32 } {
1088			*ptr = simd.deinterleave_shfl_f32s(value)
1089		} else if try_const! { Self::Unit::IS_NATIVE_F64 } {
1090			*ptr = simd.deinterleave_shfl_f64s(value)
1091		} else {
1092			panic!();
1093		}
1094	}
1095
1096	#[inline(always)]
1097	unsafe fn simd_mask_load<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
1098		let simd = Self::ctx_from_simd(ctx);
1099		let value = Self::simd_mask_load_raw(ctx, mask, ptr);
1100		if try_const! { Self::Unit::IS_NATIVE_F32 } {
1101			simd.deinterleave_shfl_f32s(value)
1102		} else if try_const! { Self::Unit::IS_NATIVE_F64 } {
1103			simd.deinterleave_shfl_f64s(value)
1104		} else {
1105			panic!();
1106		}
1107	}
1108
1109	#[inline(always)]
1110	unsafe fn simd_mask_store<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, value: Self::SimdVec<S>) {
1111		let simd = Self::ctx_from_simd(ctx);
1112		if try_const! { Self::Unit::IS_NATIVE_F32 } {
1113			Self::simd_mask_store_raw(ctx, mask, ptr, simd.deinterleave_shfl_f32s(value))
1114		} else if try_const! { Self::Unit::IS_NATIVE_F64 } {
1115			Self::simd_mask_store_raw(ctx, mask, ptr, simd.deinterleave_shfl_f64s(value))
1116		} else {
1117			panic!();
1118		}
1119	}
1120
1121	#[inline(always)]
1122	fn simd_iota<S: Simd>(ctx: &Self::SimdCtx<S>) -> Self::SimdIndex<S> {
1123		let simd = Self::ctx_from_simd(ctx);
1124		struct Interleave<T>(T);
1125		unsafe impl<T> pulp::Interleave for Interleave<T> {}
1126
1127		unsafe {
1128			if try_const! { Self::Unit::IS_NATIVE_F32 } {
1129				core::mem::transmute_copy::<_, Self::SimdIndex<S>>(&simd.deinterleave_shfl_f32s(Interleave(core::mem::transmute_copy::<
1130					_,
1131					Self::SimdVec<S>,
1132				>(
1133					&pulp::iota_32::<Interleave<Self>>()
1134				))))
1135			} else if try_const! { Self::Unit::IS_NATIVE_F64 } {
1136				core::mem::transmute_copy::<_, Self::SimdIndex<S>>(
1137					&simd.deinterleave_shfl_f64s(core::mem::transmute_copy::<_, Self::SimdVec<S>>(&pulp::iota_64::<Interleave<Self>>())),
1138				)
1139			} else {
1140				panic!();
1141			}
1142		}
1143	}
1144}
1145
1146pub trait RealField:
1147	ComplexField<Real = Self, Conj = Self> + DivByRef<Output = Self> + PartialOrd + num_traits::NumOps + num_traits::Num + core::ops::Neg<Output = Self>
1148{
1149	fn epsilon_impl() -> Self;
1150	fn nbits_impl() -> usize;
1151
1152	fn min_positive_impl() -> Self;
1153	fn max_positive_impl() -> Self;
1154	fn sqrt_min_positive_impl() -> Self;
1155	fn sqrt_max_positive_impl() -> Self;
1156}
1157
1158impl ComplexField for f32 {
1159	type Arch = pulp::Arch;
1160	type Index = u32;
1161	type Real = Self;
1162	type SimdCtx<S: Simd> = S;
1163	type SimdIndex<S: Simd> = S::u32s;
1164	type SimdMask<S: Simd> = S::m32s;
1165	type SimdMemMask<S: Simd> = pulp::MemMask<S::m32s>;
1166	type SimdVec<S: Simd> = S::f32s;
1167	type Unit = Self;
1168
1169	const IS_NATIVE_F32: bool = true;
1170	const IS_REAL: bool = true;
1171	const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Simd;
1172
1173	#[inline(always)]
1174	fn zero_impl() -> Self {
1175		0.0
1176	}
1177
1178	#[inline(always)]
1179	fn one_impl() -> Self {
1180		1.0
1181	}
1182
1183	#[inline(always)]
1184	fn nan_impl() -> Self {
1185		Self::NAN
1186	}
1187
1188	#[inline(always)]
1189	fn infinity_impl() -> Self {
1190		Self::INFINITY
1191	}
1192
1193	#[inline(always)]
1194	fn from_real_impl(value: &Self) -> Self {
1195		*value
1196	}
1197
1198	#[inline(always)]
1199	fn from_f64_impl(value: f64) -> Self {
1200		value as _
1201	}
1202
1203	#[inline(always)]
1204	fn real_part_impl(value: &Self) -> Self {
1205		*value
1206	}
1207
1208	#[inline(always)]
1209	fn imag_part_impl(_: &Self) -> Self {
1210		0.0
1211	}
1212
1213	#[inline(always)]
1214	fn copy_impl(value: &Self) -> Self {
1215		*value
1216	}
1217
1218	#[inline(always)]
1219	fn conj_impl(value: &Self) -> Self {
1220		*value
1221	}
1222
1223	#[inline(always)]
1224	fn recip_impl(value: &Self) -> Self {
1225		1.0 / *value
1226	}
1227
1228	#[inline(always)]
1229	fn sqrt_impl(value: &Self) -> Self {
1230		libm::sqrtf(*value)
1231	}
1232
1233	#[inline(always)]
1234	fn abs_impl(value: &Self) -> Self {
1235		(*value).abs()
1236	}
1237
1238	#[inline(always)]
1239	fn abs1_impl(value: &Self) -> Self {
1240		(*value).abs()
1241	}
1242
1243	#[inline(always)]
1244	fn abs2_impl(value: &Self) -> Self {
1245		(*value) * (*value)
1246	}
1247
1248	#[inline(always)]
1249	fn mul_real_impl(lhs: &Self, rhs: &Self) -> Self {
1250		(*lhs) * (*rhs)
1251	}
1252
1253	#[inline(always)]
1254	fn mul_pow2_impl(lhs: &Self, rhs: &Self) -> Self {
1255		(*lhs) * (*rhs)
1256	}
1257
1258	#[inline(always)]
1259	fn is_finite_impl(value: &Self) -> bool {
1260		(*value).is_finite()
1261	}
1262
1263	#[inline(always)]
1264	fn is_nan_impl(value: &Self) -> bool {
1265		(*value).is_nan()
1266	}
1267
1268	#[inline(always)]
1269	fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
1270		simd
1271	}
1272
1273	#[inline(always)]
1274	fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S> {
1275		ctx.splat_f32s(*value)
1276	}
1277
1278	#[inline(always)]
1279	fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S> {
1280		ctx.splat_f32s(*value)
1281	}
1282
1283	#[inline(always)]
1284	fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1285		ctx.add_f32s(lhs, rhs)
1286	}
1287
1288	#[inline(always)]
1289	fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1290		ctx.sub_f32s(lhs, rhs)
1291	}
1292
1293	#[inline(always)]
1294	fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1295		ctx.neg_f32s(value)
1296	}
1297
1298	#[inline(always)]
1299	fn simd_conj<S: Simd>(_: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1300		value
1301	}
1302
1303	#[inline(always)]
1304	fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1305		ctx.abs_f32s(value)
1306	}
1307
1308	#[inline(always)]
1309	fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1310		ctx.mul_f32s(lhs, rhs)
1311	}
1312
1313	#[inline(always)]
1314	fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1315		ctx.mul_f32s(lhs, real_rhs)
1316	}
1317
1318	#[inline(always)]
1319	fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1320		ctx.mul_f32s(lhs, real_rhs)
1321	}
1322
1323	#[inline(always)]
1324	fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1325		ctx.mul_f32s(lhs, rhs)
1326	}
1327
1328	#[inline(always)]
1329	fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
1330		ctx.mul_add_e_f32s(lhs, rhs, acc)
1331	}
1332
1333	#[inline(always)]
1334	fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
1335		ctx.mul_add_e_f32s(lhs, rhs, acc)
1336	}
1337
1338	#[inline(always)]
1339	fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1340		ctx.mul_f32s(value, value)
1341	}
1342
1343	#[inline(always)]
1344	fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
1345		ctx.mul_add_e_f32s(value, value, acc)
1346	}
1347
1348	#[inline(always)]
1349	fn simd_reduce_sum<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
1350		ctx.reduce_sum_f32s(value)
1351	}
1352
1353	#[inline(always)]
1354	fn simd_reduce_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
1355		ctx.reduce_max_f32s(value)
1356	}
1357
1358	#[inline(always)]
1359	fn simd_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1360		ctx.equal_f32s(real_lhs, real_rhs)
1361	}
1362
1363	#[inline(always)]
1364	fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1365		ctx.less_than_f32s(real_lhs, real_rhs)
1366	}
1367
1368	#[inline(always)]
1369	fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1370		ctx.greater_than_f32s(real_lhs, real_rhs)
1371	}
1372
1373	#[inline(always)]
1374	fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1375		ctx.less_than_or_equal_f32s(real_lhs, real_rhs)
1376	}
1377
1378	#[inline(always)]
1379	fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1380		ctx.greater_than_or_equal_f32s(real_lhs, real_rhs)
1381	}
1382
1383	#[inline(always)]
1384	fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1385		ctx.select_f32s_m32s(mask, lhs, rhs)
1386	}
1387
1388	#[inline(always)]
1389	fn simd_index_select<S: Simd>(
1390		ctx: &Self::SimdCtx<S>,
1391		mask: Self::SimdMask<S>,
1392		lhs: Self::SimdIndex<S>,
1393		rhs: Self::SimdIndex<S>,
1394	) -> Self::SimdIndex<S> {
1395		ctx.select_u32s_m32s(mask, lhs, rhs)
1396	}
1397
1398	#[inline(always)]
1399	fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S> {
1400		ctx.splat_u32s(value)
1401	}
1402
1403	#[inline(always)]
1404	fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
1405		ctx.add_u32s(lhs, rhs)
1406	}
1407
1408	#[inline(always)]
1409	fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
1410		ctx.less_than_u32s(lhs, rhs)
1411	}
1412
1413	#[inline(always)]
1414	fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1415		ctx.abs_f32s(value)
1416	}
1417
1418	#[inline(always)]
1419	fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
1420		*ctx
1421	}
1422
1423	#[inline(always)]
1424	fn simd_and_mask<S: Simd>(simd: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
1425		simd.and_m32s(lhs, rhs)
1426	}
1427
1428	#[inline(always)]
1429	fn simd_or_mask<S: Simd>(simd: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
1430		simd.or_m32s(lhs, rhs)
1431	}
1432
1433	#[inline(always)]
1434	fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S> {
1435		ctx.not_m32s(mask)
1436	}
1437
1438	#[inline(always)]
1439	fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize {
1440		ctx.first_true_m32s(value)
1441	}
1442
1443	#[inline(always)]
1444	fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: u32, end: u32) -> Self::SimdMemMask<S> {
1445		ctx.mask_between_m32s(start, end)
1446	}
1447
1448	#[inline(always)]
1449	fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: u32, end: u32) -> Self::SimdMask<S> {
1450		ctx.mask_between_m32s(start, end).mask()
1451	}
1452
1453	#[inline(always)]
1454	unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
1455		ctx.mask_load_ptr_f32s(mask, ptr as _)
1456	}
1457
1458	#[inline(always)]
1459	unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>) {
1460		ctx.mask_store_ptr_f32s(mask, ptr as _, values);
1461	}
1462}
1463
1464impl RealField for f32 {
1465	#[inline(always)]
1466	fn epsilon_impl() -> Self {
1467		Self::EPSILON
1468	}
1469
1470	#[inline(always)]
1471	fn min_positive_impl() -> Self {
1472		Self::MIN_POSITIVE
1473	}
1474
1475	#[inline(always)]
1476	fn max_positive_impl() -> Self {
1477		Self::MIN_POSITIVE.recip()
1478	}
1479
1480	#[inline(always)]
1481	fn sqrt_min_positive_impl() -> Self {
1482		libm::sqrtf(Self::MIN_POSITIVE)
1483	}
1484
1485	#[inline(always)]
1486	fn sqrt_max_positive_impl() -> Self {
1487		libm::sqrtf(Self::MIN_POSITIVE.recip())
1488	}
1489
1490	#[inline(always)]
1491	fn nbits_impl() -> usize {
1492		Self::MANTISSA_DIGITS as usize
1493	}
1494}
1495
1496impl ComplexField for f64 {
1497	type Arch = pulp::Arch;
1498	type Index = u64;
1499	type Real = Self;
1500	type SimdCtx<S: Simd> = S;
1501	type SimdIndex<S: Simd> = S::u64s;
1502	type SimdMask<S: Simd> = S::m64s;
1503	type SimdMemMask<S: Simd> = pulp::MemMask<S::m64s>;
1504	type SimdVec<S: Simd> = S::f64s;
1505	type Unit = Self;
1506
1507	const IS_NATIVE_F64: bool = true;
1508	const IS_REAL: bool = true;
1509	const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Simd;
1510
1511	#[inline(always)]
1512	fn zero_impl() -> Self {
1513		0.0
1514	}
1515
1516	#[inline(always)]
1517	fn one_impl() -> Self {
1518		1.0
1519	}
1520
1521	#[inline(always)]
1522	fn nan_impl() -> Self {
1523		Self::NAN
1524	}
1525
1526	#[inline(always)]
1527	fn infinity_impl() -> Self {
1528		Self::INFINITY
1529	}
1530
1531	#[inline(always)]
1532	fn from_real_impl(value: &Self) -> Self {
1533		*value
1534	}
1535
1536	#[inline(always)]
1537	fn from_f64_impl(value: f64) -> Self {
1538		value as _
1539	}
1540
1541	#[inline(always)]
1542	fn real_part_impl(value: &Self) -> Self {
1543		*value
1544	}
1545
1546	#[inline(always)]
1547	fn imag_part_impl(_: &Self) -> Self {
1548		0.0
1549	}
1550
1551	#[inline(always)]
1552	fn copy_impl(value: &Self) -> Self {
1553		*value
1554	}
1555
1556	#[inline(always)]
1557	fn conj_impl(value: &Self) -> Self {
1558		*value
1559	}
1560
1561	#[inline(always)]
1562	fn recip_impl(value: &Self) -> Self {
1563		1.0 / *value
1564	}
1565
1566	#[inline(always)]
1567	fn sqrt_impl(value: &Self) -> Self {
1568		libm::sqrt(*value)
1569	}
1570
1571	#[inline(always)]
1572	fn abs_impl(value: &Self) -> Self {
1573		(*value).abs()
1574	}
1575
1576	#[inline(always)]
1577	fn abs1_impl(value: &Self) -> Self {
1578		(*value).abs()
1579	}
1580
1581	#[inline(always)]
1582	fn abs2_impl(value: &Self) -> Self {
1583		(*value) * (*value)
1584	}
1585
1586	#[inline(always)]
1587	fn mul_real_impl(lhs: &Self, rhs: &Self) -> Self {
1588		(*lhs) * (*rhs)
1589	}
1590
1591	#[inline(always)]
1592	fn mul_pow2_impl(lhs: &Self, rhs: &Self) -> Self {
1593		(*lhs) * (*rhs)
1594	}
1595
1596	#[inline(always)]
1597	fn is_nan_impl(value: &Self) -> bool {
1598		(*value).is_nan()
1599	}
1600
1601	#[inline(always)]
1602	fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
1603		simd
1604	}
1605
1606	#[inline(always)]
1607	fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S> {
1608		ctx.splat_f64s(*value)
1609	}
1610
1611	#[inline(always)]
1612	fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S> {
1613		ctx.splat_f64s(*value)
1614	}
1615
1616	#[inline(always)]
1617	fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1618		ctx.add_f64s(lhs, rhs)
1619	}
1620
1621	#[inline(always)]
1622	fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1623		ctx.sub_f64s(lhs, rhs)
1624	}
1625
1626	#[inline(always)]
1627	fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1628		ctx.neg_f64s(value)
1629	}
1630
1631	#[inline(always)]
1632	fn simd_conj<S: Simd>(_: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1633		value
1634	}
1635
1636	#[inline(always)]
1637	fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1638		ctx.abs_f64s(value)
1639	}
1640
1641	#[inline(always)]
1642	fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1643		ctx.mul_f64s(lhs, rhs)
1644	}
1645
1646	#[inline(always)]
1647	fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1648		ctx.mul_f64s(lhs, real_rhs)
1649	}
1650
1651	#[inline(always)]
1652	fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1653		ctx.mul_f64s(lhs, real_rhs)
1654	}
1655
1656	#[inline(always)]
1657	fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1658		ctx.mul_f64s(lhs, rhs)
1659	}
1660
1661	#[inline(always)]
1662	fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
1663		ctx.mul_add_e_f64s(lhs, rhs, acc)
1664	}
1665
1666	#[inline(always)]
1667	fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
1668		ctx.mul_add_e_f64s(lhs, rhs, acc)
1669	}
1670
1671	#[inline(always)]
1672	fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1673		ctx.mul_f64s(value, value)
1674	}
1675
1676	#[inline(always)]
1677	fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
1678		ctx.mul_add_e_f64s(value, value, acc)
1679	}
1680
1681	#[inline(always)]
1682	fn simd_reduce_sum<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
1683		ctx.reduce_sum_f64s(value)
1684	}
1685
1686	#[inline(always)]
1687	fn simd_reduce_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
1688		ctx.reduce_max_f64s(value)
1689	}
1690
1691	#[inline(always)]
1692	fn simd_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1693		ctx.equal_f64s(real_lhs, real_rhs)
1694	}
1695
1696	#[inline(always)]
1697	fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1698		ctx.less_than_f64s(real_lhs, real_rhs)
1699	}
1700
1701	#[inline(always)]
1702	fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1703		ctx.greater_than_f64s(real_lhs, real_rhs)
1704	}
1705
1706	#[inline(always)]
1707	fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1708		ctx.less_than_or_equal_f64s(real_lhs, real_rhs)
1709	}
1710
1711	#[inline(always)]
1712	fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1713		ctx.greater_than_or_equal_f64s(real_lhs, real_rhs)
1714	}
1715
1716	#[inline(always)]
1717	fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1718		ctx.select_f64s_m64s(mask, lhs, rhs)
1719	}
1720
1721	#[inline(always)]
1722	fn simd_index_select<S: Simd>(
1723		ctx: &Self::SimdCtx<S>,
1724		mask: Self::SimdMask<S>,
1725		lhs: Self::SimdIndex<S>,
1726		rhs: Self::SimdIndex<S>,
1727	) -> Self::SimdIndex<S> {
1728		ctx.select_u64s_m64s(mask, lhs, rhs)
1729	}
1730
1731	#[inline(always)]
1732	fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S> {
1733		ctx.splat_u64s(value)
1734	}
1735
1736	#[inline(always)]
1737	fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
1738		ctx.add_u64s(lhs, rhs)
1739	}
1740
1741	#[inline(always)]
1742	fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
1743		ctx.less_than_u64s(lhs, rhs)
1744	}
1745
1746	#[inline(always)]
1747	fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1748		ctx.abs_f64s(value)
1749	}
1750
1751	#[inline(always)]
1752	fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
1753		*ctx
1754	}
1755
1756	#[inline(always)]
1757	fn simd_and_mask<S: Simd>(simd: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
1758		simd.and_m64s(lhs, rhs)
1759	}
1760
1761	#[inline(always)]
1762	fn simd_or_mask<S: Simd>(simd: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
1763		simd.or_m64s(lhs, rhs)
1764	}
1765
1766	#[inline(always)]
1767	fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S> {
1768		ctx.not_m64s(mask)
1769	}
1770
1771	#[inline(always)]
1772	fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize {
1773		ctx.first_true_m64s(value)
1774	}
1775
1776	#[inline(always)]
1777	fn is_finite_impl(value: &Self) -> bool {
1778		(*value).is_finite()
1779	}
1780
1781	#[inline(always)]
1782	fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: u64, end: u64) -> Self::SimdMemMask<S> {
1783		ctx.mask_between_m64s(start, end)
1784	}
1785
1786	#[inline(always)]
1787	fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: u64, end: u64) -> Self::SimdMask<S> {
1788		ctx.mask_between_m64s(start, end).mask()
1789	}
1790
1791	#[inline(always)]
1792	unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
1793		ctx.mask_load_ptr_f64s(mask, ptr as _)
1794	}
1795
1796	#[inline(always)]
1797	unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>) {
1798		ctx.mask_store_ptr_f64s(mask, ptr as _, values);
1799	}
1800}
1801
1802impl RealField for f64 {
1803	#[inline(always)]
1804	fn epsilon_impl() -> Self {
1805		Self::EPSILON
1806	}
1807
1808	#[inline(always)]
1809	fn min_positive_impl() -> Self {
1810		Self::MIN_POSITIVE
1811	}
1812
1813	#[inline(always)]
1814	fn max_positive_impl() -> Self {
1815		Self::MIN_POSITIVE.recip()
1816	}
1817
1818	#[inline(always)]
1819	fn sqrt_min_positive_impl() -> Self {
1820		libm::sqrt(Self::MIN_POSITIVE)
1821	}
1822
1823	#[inline(always)]
1824	fn sqrt_max_positive_impl() -> Self {
1825		libm::sqrt(Self::MIN_POSITIVE.recip())
1826	}
1827
1828	#[inline(always)]
1829	fn nbits_impl() -> usize {
1830		Self::MANTISSA_DIGITS as usize
1831	}
1832}
1833
1834impl<T: RealField> ComplexField for Complex<T> {
1835	type Arch = T::Arch;
1836	type Index = T::Index;
1837	type Real = T;
1838	type SimdCtx<S: Simd> = T::SimdCtx<S>;
1839	type SimdIndex<S: Simd> = T::SimdIndex<S>;
1840	type SimdMask<S: Simd> = T::SimdMask<S>;
1841	type SimdMemMask<S: Simd> = Complex<T::SimdMemMask<S>>;
1842	type SimdVec<S: Simd> = Complex<T::SimdVec<S>>;
1843	type Unit = T::Unit;
1844
1845	const IS_NATIVE_C32: bool = T::IS_NATIVE_F32;
1846	const IS_NATIVE_C64: bool = T::IS_NATIVE_F64;
1847	const IS_REAL: bool = false;
1848	const SIMD_CAPABILITIES: SimdCapabilities = T::SIMD_CAPABILITIES;
1849
1850	#[inline]
1851	fn zero_impl() -> Self {
1852		Complex {
1853			re: T::zero_impl(),
1854			im: T::zero_impl(),
1855		}
1856	}
1857
1858	#[inline]
1859	fn one_impl() -> Self {
1860		Complex {
1861			re: T::one_impl(),
1862			im: T::zero_impl(),
1863		}
1864	}
1865
1866	#[inline]
1867	fn nan_impl() -> Self {
1868		Complex {
1869			re: T::nan_impl(),
1870			im: T::nan_impl(),
1871		}
1872	}
1873
1874	#[inline]
1875	fn infinity_impl() -> Self {
1876		Complex {
1877			re: T::infinity_impl(),
1878			im: T::infinity_impl(),
1879		}
1880	}
1881
1882	#[inline]
1883	fn from_real_impl(real: &Self::Real) -> Self {
1884		Complex {
1885			re: real.clone(),
1886			im: T::zero_impl(),
1887		}
1888	}
1889
1890	#[inline]
1891	fn from_f64_impl(real: f64) -> Self {
1892		Complex {
1893			re: T::from_f64_impl(real),
1894			im: T::zero_impl(),
1895		}
1896	}
1897
1898	#[inline]
1899	fn real_part_impl(value: &Self) -> Self::Real {
1900		value.re.clone()
1901	}
1902
1903	#[inline]
1904	fn imag_part_impl(value: &Self) -> Self::Real {
1905		value.im.clone()
1906	}
1907
1908	#[inline]
1909	fn copy_impl(value: &Self) -> Self {
1910		value.clone()
1911	}
1912
1913	#[inline]
1914	fn conj_impl(value: &Self) -> Self {
1915		Self {
1916			re: value.re.clone(),
1917			im: value.im.neg_by_ref(),
1918		}
1919	}
1920
1921	#[inline]
1922	fn recip_impl(value: &Self) -> Self {
1923		let (re, im) = recip_impl(value.re.clone(), value.im.clone());
1924		Complex { re, im }
1925	}
1926
1927	#[inline]
1928	fn sqrt_impl(value: &Self) -> Self {
1929		let (re, im) = sqrt_impl(value.re.clone(), value.im.clone());
1930		Complex { re, im }
1931	}
1932
1933	#[inline]
1934	fn abs_impl(value: &Self) -> Self::Real {
1935		abs_impl(value.re.clone(), value.im.clone())
1936	}
1937
1938	#[inline]
1939	#[faer_macros::math]
1940	fn abs1_impl(value: &Self) -> Self::Real {
1941		abs1(value.re) + abs1(value.im)
1942	}
1943
1944	#[inline]
1945	#[faer_macros::math]
1946	fn abs2_impl(value: &Self) -> Self::Real {
1947		abs2(value.re) + abs2(value.im)
1948	}
1949
1950	#[inline]
1951	#[faer_macros::math]
1952	fn mul_real_impl(lhs: &Self, rhs: &Self::Real) -> Self {
1953		Complex {
1954			re: lhs.re * rhs,
1955			im: lhs.im * rhs,
1956		}
1957	}
1958
1959	#[inline]
1960	#[faer_macros::math]
1961	fn mul_pow2_impl(lhs: &Self, rhs: &Self::Real) -> Self {
1962		Complex {
1963			re: mul_pow2(lhs.re, rhs),
1964			im: mul_pow2(lhs.im, rhs),
1965		}
1966	}
1967
1968	#[inline]
1969	#[faer_macros::math]
1970	fn is_finite_impl(value: &Self) -> bool {
1971		is_finite(value.re) && is_finite(value.im)
1972	}
1973
1974	#[inline(always)]
1975	fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
1976		T::simd_ctx(simd)
1977	}
1978
1979	#[inline(always)]
1980	fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
1981		T::ctx_from_simd(ctx)
1982	}
1983
1984	#[inline(always)]
1985	fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S> {
1986		Complex {
1987			re: T::simd_splat(ctx, &value.re),
1988			im: T::simd_splat(ctx, &value.im),
1989		}
1990	}
1991
1992	#[inline(always)]
1993	fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S> {
1994		Complex {
1995			re: T::simd_splat_real(ctx, value),
1996			im: T::simd_splat_real(ctx, value),
1997		}
1998	}
1999
2000	#[inline(always)]
2001	fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2002		Complex {
2003			re: T::simd_add(ctx, lhs.re, rhs.re),
2004			im: T::simd_add(ctx, lhs.im, rhs.im),
2005		}
2006	}
2007
2008	#[inline(always)]
2009	fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2010		Complex {
2011			re: T::simd_sub(ctx, lhs.re, rhs.re),
2012			im: T::simd_sub(ctx, lhs.im, rhs.im),
2013		}
2014	}
2015
2016	#[inline(always)]
2017	fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2018		Complex {
2019			re: T::simd_neg(ctx, value.re),
2020			im: T::simd_neg(ctx, value.im),
2021		}
2022	}
2023
2024	#[inline(always)]
2025	fn simd_conj<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2026		Complex {
2027			re: value.re,
2028			im: T::simd_neg(ctx, value.im),
2029		}
2030	}
2031
2032	#[inline(always)]
2033	fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2034		let v = T::simd_add(ctx, T::simd_abs1(ctx, value.re), T::simd_abs1(ctx, value.im));
2035		Complex { re: v, im: v }
2036	}
2037
2038	#[inline(always)]
2039	fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2040		let re = T::simd_abs_max(ctx, value.re);
2041		let im = T::simd_abs_max(ctx, value.im);
2042
2043		let v = T::simd_select(ctx, T::simd_greater_than(ctx, re, im), re, im);
2044		Complex { re: v, im: v }
2045	}
2046
2047	#[inline(always)]
2048	fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2049		Complex {
2050			re: T::simd_mul_real(ctx, lhs.re, real_rhs.re),
2051			im: T::simd_mul_real(ctx, lhs.im, real_rhs.re),
2052		}
2053	}
2054
2055	#[inline(always)]
2056	fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2057		Complex {
2058			re: T::simd_mul_pow2(ctx, lhs.re, real_rhs.re),
2059			im: T::simd_mul_pow2(ctx, lhs.im, real_rhs.re),
2060		}
2061	}
2062
2063	#[inline(always)]
2064	fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2065		Complex {
2066			re: T::simd_mul_add(ctx, lhs.re, rhs.re, T::simd_neg(ctx, T::simd_mul(ctx, lhs.im, rhs.im))),
2067			im: T::simd_mul_add(ctx, lhs.re, rhs.im, T::simd_mul(ctx, lhs.im, rhs.re)),
2068		}
2069	}
2070
2071	#[inline(always)]
2072	fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2073		Complex {
2074			re: T::simd_mul_add(ctx, lhs.re, rhs.re, T::simd_mul(ctx, lhs.im, rhs.im)),
2075			im: T::simd_mul_add(ctx, lhs.re, rhs.im, T::simd_neg(ctx, T::simd_mul(ctx, lhs.im, rhs.re))),
2076		}
2077	}
2078
2079	#[inline(always)]
2080	fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
2081		Complex {
2082			re: T::simd_mul_add(ctx, T::simd_neg(ctx, lhs.im), rhs.im, T::simd_mul_add(ctx, lhs.re, rhs.re, acc.re)),
2083			im: T::simd_mul_add(ctx, lhs.re, rhs.im, T::simd_mul_add(ctx, lhs.im, rhs.re, acc.im)),
2084		}
2085	}
2086
2087	#[inline(always)]
2088	fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
2089		Complex {
2090			re: T::simd_mul_add(ctx, lhs.im, rhs.im, T::simd_mul_add(ctx, lhs.re, rhs.re, acc.re)),
2091			im: T::simd_mul_add(ctx, lhs.re, rhs.im, T::simd_mul_add(ctx, T::simd_neg(ctx, lhs.im), rhs.re, acc.im)),
2092		}
2093	}
2094
2095	#[inline(always)]
2096	fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2097		let v = T::simd_abs2_add(ctx, value.re, T::simd_abs2(ctx, value.im));
2098		Complex { re: v, im: v }
2099	}
2100
2101	#[inline(always)]
2102	fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
2103		let v = T::simd_abs2_add(ctx, value.re, T::simd_abs2_add(ctx, value.im, acc.re));
2104		Complex { re: v, im: v }
2105	}
2106
2107	#[inline(always)]
2108	fn simd_reduce_sum<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
2109		Complex {
2110			re: T::simd_reduce_sum(ctx, value.re),
2111			im: T::simd_reduce_sum(ctx, value.im),
2112		}
2113	}
2114
2115	#[inline(always)]
2116	fn simd_reduce_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
2117		Complex {
2118			re: T::simd_reduce_max(ctx, value.re),
2119			im: T::simd_reduce_max(ctx, value.im),
2120		}
2121	}
2122
2123	#[inline(always)]
2124	fn simd_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2125		T::simd_and_mask(
2126			ctx,
2127			T::simd_equal(ctx, real_lhs.re, real_rhs.re),
2128			T::simd_equal(ctx, real_lhs.im, real_rhs.im),
2129		)
2130	}
2131
2132	#[inline(always)]
2133	fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2134		T::simd_less_than(ctx, real_lhs.re, real_rhs.re)
2135	}
2136
2137	#[inline(always)]
2138	fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2139		T::simd_less_than_or_equal(ctx, real_lhs.re, real_rhs.re)
2140	}
2141
2142	#[inline(always)]
2143	fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2144		T::simd_greater_than(ctx, real_lhs.re, real_rhs.re)
2145	}
2146
2147	#[inline(always)]
2148	fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2149		T::simd_greater_than_or_equal(ctx, real_lhs.re, real_rhs.re)
2150	}
2151
2152	#[inline(always)]
2153	fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2154		Complex {
2155			re: T::simd_select(ctx, mask, lhs.re, rhs.re),
2156			im: T::simd_select(ctx, mask, lhs.im, rhs.im),
2157		}
2158	}
2159
2160	#[inline(always)]
2161	fn simd_index_select<S: Simd>(
2162		ctx: &Self::SimdCtx<S>,
2163		mask: Self::SimdMask<S>,
2164		lhs: Self::SimdIndex<S>,
2165		rhs: Self::SimdIndex<S>,
2166	) -> Self::SimdIndex<S> {
2167		T::simd_index_select(ctx, mask, lhs, rhs)
2168	}
2169
2170	#[inline(always)]
2171	fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S> {
2172		T::simd_index_splat(ctx, value)
2173	}
2174
2175	#[inline(always)]
2176	fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
2177		T::simd_index_add(ctx, lhs, rhs)
2178	}
2179
2180	#[inline(always)]
2181	fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
2182		T::simd_index_less_than(ctx, lhs, rhs)
2183	}
2184
2185	#[inline(always)]
2186	fn simd_and_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
2187		T::simd_and_mask(ctx, lhs, rhs)
2188	}
2189
2190	#[inline(always)]
2191	fn simd_or_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
2192		T::simd_or_mask(ctx, lhs, rhs)
2193	}
2194
2195	#[inline(always)]
2196	fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S> {
2197		T::simd_not_mask(ctx, mask)
2198	}
2199
2200	#[inline(always)]
2201	fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize {
2202		T::simd_first_true_mask(ctx, value)
2203	}
2204
2205	#[inline(always)]
2206	fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMemMask<S> {
2207		let n = core::mem::size_of::<Self::SimdVec<S>>() / core::mem::size_of::<Self>();
2208		let start = start.zx() * 2;
2209		let end = end.zx() * 2;
2210
2211		let re = T::simd_mem_mask_between(ctx, Self::Index::truncate(start.min(n)), Self::Index::truncate(end.min(n)));
2212		let im = T::simd_mem_mask_between(ctx, Self::Index::truncate(start.max(n) - n), Self::Index::truncate(end.max(n) - n));
2213		Complex { re, im }
2214	}
2215
2216	#[inline(always)]
2217	fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMask<S> {
2218		T::simd_mask_between(ctx, start, end)
2219	}
2220
2221	#[inline(always)]
2222	unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
2223		Complex {
2224			re: T::simd_mask_load_raw(ctx, mask.re, core::ptr::addr_of!((*ptr).re)),
2225			im: T::simd_mask_load_raw(ctx, mask.im, core::ptr::addr_of!((*ptr).im)),
2226		}
2227	}
2228
2229	#[inline(always)]
2230	unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>) {
2231		T::simd_mask_store_raw(ctx, mask.re, core::ptr::addr_of_mut!((*ptr).re), values.re);
2232		T::simd_mask_store_raw(ctx, mask.im, core::ptr::addr_of_mut!((*ptr).im), values.im);
2233	}
2234}
2235
2236#[repr(transparent)]
2237#[doc(hidden)]
2238#[derive(Copy, Clone, Debug, PartialEq)]
2239pub struct ComplexImpl<T>(Complex<T>);
2240
2241#[repr(transparent)]
2242#[doc(hidden)]
2243#[derive(Copy, Clone, Debug, PartialEq)]
2244pub struct ComplexImplConj<T>(Complex<T>);
2245
2246unsafe impl Conjugate for ComplexImpl<f32> {
2247	type Canonical = ComplexImpl<f32>;
2248	type Conj = ComplexImplConj<f32>;
2249
2250	const IS_CANONICAL: bool = true;
2251}
2252unsafe impl Conjugate for ComplexImplConj<f32> {
2253	type Canonical = ComplexImpl<f32>;
2254	type Conj = ComplexImpl<f32>;
2255
2256	const IS_CANONICAL: bool = false;
2257}
2258unsafe impl Conjugate for ComplexImpl<f64> {
2259	type Canonical = ComplexImpl<f64>;
2260	type Conj = ComplexImplConj<f64>;
2261
2262	const IS_CANONICAL: bool = true;
2263}
2264unsafe impl Conjugate for ComplexImplConj<f64> {
2265	type Canonical = ComplexImpl<f64>;
2266	type Conj = ComplexImpl<f64>;
2267
2268	const IS_CANONICAL: bool = false;
2269}
2270
2271impl<T: RealField> core::ops::Neg for &ComplexImpl<T> {
2272	type Output = ComplexImpl<T>;
2273
2274	#[inline]
2275	fn neg(self) -> Self::Output {
2276		use math_utils::*;
2277
2278		ComplexImpl(neg(&self.0))
2279	}
2280}
2281impl<T: RealField> core::ops::Add<&ComplexImpl<T>> for &ComplexImpl<T> {
2282	type Output = ComplexImpl<T>;
2283
2284	#[inline]
2285	fn add(self, rhs: &ComplexImpl<T>) -> Self::Output {
2286		use math_utils::*;
2287
2288		ComplexImpl(add(&self.0, &rhs.0))
2289	}
2290}
2291impl<T: RealField> core::ops::Sub<&ComplexImpl<T>> for &ComplexImpl<T> {
2292	type Output = ComplexImpl<T>;
2293
2294	#[inline]
2295	fn sub(self, rhs: &ComplexImpl<T>) -> Self::Output {
2296		use math_utils::*;
2297
2298		ComplexImpl(sub(&self.0, &rhs.0))
2299	}
2300}
2301impl<T: RealField> core::ops::Mul<&ComplexImpl<T>> for &ComplexImpl<T> {
2302	type Output = ComplexImpl<T>;
2303
2304	#[inline]
2305	fn mul(self, rhs: &ComplexImpl<T>) -> Self::Output {
2306		use math_utils::*;
2307
2308		ComplexImpl(mul(&self.0, &rhs.0))
2309	}
2310}
2311
2312impl<T> From<Complex<T>> for ComplexImpl<T> {
2313	#[inline]
2314	fn from(value: Complex<T>) -> Self {
2315		Self(value)
2316	}
2317}
2318
2319impl ComplexField for ComplexImpl<f32> {
2320	type Arch = pulp::Arch;
2321	type Index = u32;
2322	type Real = f32;
2323	type SimdCtx<S: Simd> = S;
2324	type SimdIndex<S: Simd> = S::u32s;
2325	type SimdMask<S: Simd> = S::m32s;
2326	type SimdMemMask<S: Simd> = pulp::MemMask<S::m32s>;
2327	type SimdVec<S: Simd> = S::c32s;
2328	type Unit = f32;
2329
2330	const IS_NATIVE_C32: bool = true;
2331	const IS_REAL: bool = false;
2332	const SIMD_ABS_SPLIT_REAL_IMAG: bool = true;
2333	const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Simd;
2334
2335	#[inline]
2336	fn zero_impl() -> Self {
2337		Complex {
2338			re: f32::zero_impl(),
2339			im: f32::zero_impl(),
2340		}
2341		.into()
2342	}
2343
2344	#[inline]
2345	fn one_impl() -> Self {
2346		Complex {
2347			re: f32::one_impl(),
2348			im: f32::zero_impl(),
2349		}
2350		.into()
2351	}
2352
2353	#[inline]
2354	fn nan_impl() -> Self {
2355		Complex {
2356			re: f32::nan_impl(),
2357			im: f32::nan_impl(),
2358		}
2359		.into()
2360	}
2361
2362	#[inline]
2363	fn infinity_impl() -> Self {
2364		Complex {
2365			re: f32::infinity_impl(),
2366			im: f32::infinity_impl(),
2367		}
2368		.into()
2369	}
2370
2371	#[inline]
2372	fn from_real_impl(real: &Self::Real) -> Self {
2373		Complex {
2374			re: real.clone(),
2375			im: f32::zero_impl(),
2376		}
2377		.into()
2378	}
2379
2380	#[inline]
2381	fn from_f64_impl(real: f64) -> Self {
2382		Complex {
2383			re: f32::from_f64_impl(real),
2384			im: f32::zero_impl(),
2385		}
2386		.into()
2387	}
2388
2389	#[inline]
2390	fn real_part_impl(value: &Self) -> Self::Real {
2391		value.0.re.clone()
2392	}
2393
2394	#[inline]
2395	fn imag_part_impl(value: &Self) -> Self::Real {
2396		value.0.im.clone()
2397	}
2398
2399	#[inline]
2400	fn copy_impl(value: &Self) -> Self {
2401		value.clone()
2402	}
2403
2404	#[inline]
2405	fn conj_impl(value: &Self) -> Self {
2406		Complex {
2407			re: value.0.re.clone(),
2408			im: value.0.im.neg_by_ref(),
2409		}
2410		.into()
2411	}
2412
2413	#[inline]
2414	fn recip_impl(value: &Self) -> Self {
2415		let (re, im) = recip_impl(value.0.re.clone(), value.0.im.clone());
2416		Complex { re, im }.into()
2417	}
2418
2419	#[inline]
2420	fn sqrt_impl(value: &Self) -> Self {
2421		let (re, im) = sqrt_impl(value.0.re.clone(), value.0.im.clone());
2422		Complex { re, im }.into()
2423	}
2424
2425	#[inline]
2426	fn abs_impl(value: &Self) -> Self::Real {
2427		abs_impl(value.0.re.clone(), value.0.im.clone())
2428	}
2429
2430	#[inline]
2431	#[faer_macros::math]
2432	fn abs1_impl(value: &Self) -> Self::Real {
2433		abs1(value.0.re) + abs1(value.0.im)
2434	}
2435
2436	#[inline]
2437	#[faer_macros::math]
2438	fn abs2_impl(value: &Self) -> Self::Real {
2439		abs2(value.0.re) + abs2(value.0.im)
2440	}
2441
2442	#[inline]
2443	#[faer_macros::math]
2444	fn mul_real_impl(lhs: &Self, rhs: &Self::Real) -> Self {
2445		Complex {
2446			re: lhs.0.re * *rhs,
2447			im: lhs.0.im * *rhs,
2448		}
2449		.into()
2450	}
2451
2452	#[inline]
2453	#[faer_macros::math]
2454	fn mul_pow2_impl(lhs: &Self, rhs: &Self::Real) -> Self {
2455		Complex {
2456			re: mul_pow2(lhs.0.re, rhs),
2457			im: mul_pow2(lhs.0.im, rhs),
2458		}
2459		.into()
2460	}
2461
2462	#[inline]
2463	#[faer_macros::math]
2464	fn is_finite_impl(value: &Self) -> bool {
2465		is_finite(value.0.re) && is_finite(value.0.im)
2466	}
2467
2468	#[inline(always)]
2469	fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
2470		f32::simd_ctx(simd)
2471	}
2472
2473	#[inline(always)]
2474	fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
2475		f32::ctx_from_simd(ctx)
2476	}
2477
2478	#[inline(always)]
2479	fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S> {
2480		ctx.splat_c32s(value.0)
2481	}
2482
2483	#[inline(always)]
2484	fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S> {
2485		ctx.splat_c32s(Complex { re: *value, im: *value })
2486	}
2487
2488	#[inline(always)]
2489	fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2490		ctx.add_c32s(lhs, rhs)
2491	}
2492
2493	#[inline(always)]
2494	fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2495		ctx.sub_c32s(lhs, rhs)
2496	}
2497
2498	#[inline(always)]
2499	fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2500		ctx.neg_c32s(value)
2501	}
2502
2503	#[inline(always)]
2504	fn simd_conj<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2505		ctx.conj_c32s(value)
2506	}
2507
2508	#[inline(always)]
2509	fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2510		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2511			bytemuck::cast(ctx.abs_f32s(bytemuck::cast(value)))
2512		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2513			let value: Complex<f32> = bytemuck::cast(value);
2514			let v = value.re.abs() + value.im.abs();
2515			bytemuck::cast(Complex { re: v, im: v })
2516		} else {
2517			panic!();
2518		}
2519	}
2520
2521	#[inline(always)]
2522	fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2523		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2524			bytemuck::cast(ctx.abs_f32s(bytemuck::cast(value)))
2525		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2526			let value: Complex<f32> = bytemuck::cast(value);
2527			let re = value.re.abs();
2528			let im = value.im.abs();
2529			let v = if re > im { re } else { im };
2530			bytemuck::cast(Complex { re: v, im: v })
2531		} else {
2532			panic!();
2533		}
2534	}
2535
2536	#[inline(always)]
2537	fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2538		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2539			bytemuck::cast(ctx.mul_f32s(bytemuck::cast(lhs), bytemuck::cast(real_rhs)))
2540		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2541			let mut lhs: Complex<f32> = bytemuck::cast(lhs);
2542			let rhs: Complex<f32> = bytemuck::cast(real_rhs);
2543			lhs *= rhs.re;
2544			bytemuck::cast(lhs)
2545		} else {
2546			panic!();
2547		}
2548	}
2549
2550	#[inline(always)]
2551	fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2552		Self::simd_mul_real(ctx, lhs, real_rhs)
2553	}
2554
2555	#[inline(always)]
2556	fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2557		ctx.mul_e_c32s(lhs, rhs)
2558	}
2559
2560	#[inline(always)]
2561	fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2562		ctx.conj_mul_e_c32s(lhs, rhs)
2563	}
2564
2565	#[inline(always)]
2566	fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
2567		ctx.mul_add_e_c32s(lhs, rhs, acc)
2568	}
2569
2570	#[inline(always)]
2571	fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
2572		ctx.conj_mul_add_e_c32s(lhs, rhs, acc)
2573	}
2574
2575	#[inline(always)]
2576	fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2577		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2578			bytemuck::cast(ctx.mul_f32s(bytemuck::cast(value), bytemuck::cast(value)))
2579		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2580			let value: Complex<f32> = bytemuck::cast(value);
2581			let v = value.re * value.re + value.im * value.im;
2582			bytemuck::cast(Complex { re: v, im: v })
2583		} else {
2584			panic!();
2585		}
2586	}
2587
2588	#[inline(always)]
2589	fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
2590		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2591			bytemuck::cast(ctx.mul_add_f32s(bytemuck::cast(value), bytemuck::cast(value), bytemuck::cast(acc)))
2592		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2593			let value: Complex<f32> = bytemuck::cast(value);
2594			let acc: Complex<f32> = bytemuck::cast(acc);
2595			let v = value.re * value.re + value.im * value.im + acc.re;
2596			bytemuck::cast(Complex { re: v, im: v })
2597		} else {
2598			panic!();
2599		}
2600	}
2601
2602	#[inline(always)]
2603	fn simd_reduce_sum<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
2604		ctx.reduce_sum_c32s(value).into()
2605	}
2606
2607	#[inline(always)]
2608	fn simd_reduce_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
2609		ctx.reduce_max_c32s(value).into()
2610	}
2611
2612	#[inline(always)]
2613	fn simd_equal<S: Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
2614		panic!()
2615	}
2616
2617	#[inline(always)]
2618	fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2619		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2620			ctx.less_than_f32s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
2621		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2622			assert!(try_const! { core::mem::size_of::<S::m32s>() == core::mem::size_of::<bool>() });
2623
2624			let lhs: Complex<f32> = bytemuck::cast(real_lhs);
2625			let rhs: Complex<f32> = bytemuck::cast(real_rhs);
2626			unsafe { core::mem::transmute_copy(&(lhs.re < rhs.re)) }
2627		} else {
2628			panic!();
2629		}
2630	}
2631
2632	#[inline(always)]
2633	fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2634		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2635			ctx.less_than_or_equal_f32s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
2636		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2637			assert!(try_const! { core::mem::size_of::<S::m32s>() == core::mem::size_of::<bool>() });
2638
2639			let lhs: Complex<f32> = bytemuck::cast(real_lhs);
2640			let rhs: Complex<f32> = bytemuck::cast(real_rhs);
2641			unsafe { core::mem::transmute_copy(&(lhs.re <= rhs.re)) }
2642		} else {
2643			panic!();
2644		}
2645	}
2646
2647	#[inline(always)]
2648	fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2649		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2650			ctx.greater_than_f32s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
2651		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2652			assert!(try_const! { core::mem::size_of::<S::m32s>() == core::mem::size_of::<bool>() });
2653
2654			let lhs: Complex<f32> = bytemuck::cast(real_lhs);
2655			let rhs: Complex<f32> = bytemuck::cast(real_rhs);
2656			unsafe { core::mem::transmute_copy(&(lhs.re > rhs.re)) }
2657		} else {
2658			panic!();
2659		}
2660	}
2661
2662	#[inline(always)]
2663	fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2664		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2665			ctx.greater_than_or_equal_f32s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
2666		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2667			assert!(try_const! { core::mem::size_of::<S::m32s>() == core::mem::size_of::<bool>() });
2668
2669			let lhs: Complex<f32> = bytemuck::cast(real_lhs);
2670			let rhs: Complex<f32> = bytemuck::cast(real_rhs);
2671			unsafe { core::mem::transmute_copy(&(lhs.re >= rhs.re)) }
2672		} else {
2673			panic!();
2674		}
2675	}
2676
2677	#[inline(always)]
2678	fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2679		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2680			bytemuck::cast(ctx.select_f32s_m32s(mask, bytemuck::cast(lhs), bytemuck::cast(rhs)))
2681		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2682			assert!(try_const! { core::mem::size_of::<S::m32s>() == core::mem::size_of::<bool>() });
2683			let mask: bool = unsafe { core::mem::transmute_copy(&mask) };
2684			let lhs: Complex<f32> = bytemuck::cast(lhs);
2685			let rhs: Complex<f32> = bytemuck::cast(rhs);
2686			bytemuck::cast(if mask { lhs } else { rhs })
2687		} else {
2688			panic!();
2689		}
2690	}
2691
2692	#[inline(always)]
2693	fn simd_index_select<S: Simd>(
2694		ctx: &Self::SimdCtx<S>,
2695		mask: Self::SimdMask<S>,
2696		lhs: Self::SimdIndex<S>,
2697		rhs: Self::SimdIndex<S>,
2698	) -> Self::SimdIndex<S> {
2699		f32::simd_index_select(ctx, mask, lhs, rhs)
2700	}
2701
2702	#[inline(always)]
2703	fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S> {
2704		f32::simd_index_splat(ctx, value)
2705	}
2706
2707	#[inline(always)]
2708	fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
2709		f32::simd_index_add(ctx, lhs, rhs)
2710	}
2711
2712	#[inline(always)]
2713	fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
2714		f32::simd_index_less_than(ctx, lhs, rhs)
2715	}
2716
2717	#[inline(always)]
2718	fn simd_and_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
2719		f32::simd_and_mask(ctx, lhs, rhs)
2720	}
2721
2722	#[inline(always)]
2723	fn simd_or_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
2724		f32::simd_or_mask(ctx, lhs, rhs)
2725	}
2726
2727	#[inline(always)]
2728	fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S> {
2729		f32::simd_not_mask(ctx, mask)
2730	}
2731
2732	#[inline(always)]
2733	fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize {
2734		f32::simd_first_true_mask(ctx, value)
2735	}
2736
2737	#[inline(always)]
2738	fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: u32, end: u32) -> Self::SimdMemMask<S> {
2739		ctx.mask_between_m32s(2 * start, 2 * end)
2740	}
2741
2742	#[inline(always)]
2743	fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: u32, end: u32) -> Self::SimdMask<S> {
2744		ctx.mask_between_m32s(2 * start, 2 * end).mask()
2745	}
2746
2747	#[inline(always)]
2748	unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
2749		ctx.mask_load_ptr_c32s(mask, ptr as _)
2750	}
2751
2752	#[inline(always)]
2753	unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>) {
2754		ctx.mask_store_ptr_c32s(mask, ptr as _, values);
2755	}
2756}
2757
2758impl ComplexField for ComplexImpl<f64> {
2759	type Arch = pulp::Arch;
2760	type Index = u64;
2761	type Real = f64;
2762	type SimdCtx<S: Simd> = S;
2763	type SimdIndex<S: Simd> = S::u64s;
2764	type SimdMask<S: Simd> = S::m64s;
2765	type SimdMemMask<S: Simd> = pulp::MemMask<S::m64s>;
2766	type SimdVec<S: Simd> = S::c64s;
2767	type Unit = f64;
2768
2769	const IS_NATIVE_C64: bool = true;
2770	const IS_REAL: bool = false;
2771	const SIMD_ABS_SPLIT_REAL_IMAG: bool = true;
2772	const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Simd;
2773
2774	#[inline]
2775	fn zero_impl() -> Self {
2776		Complex {
2777			re: f64::zero_impl(),
2778			im: f64::zero_impl(),
2779		}
2780		.into()
2781	}
2782
2783	#[inline]
2784	fn one_impl() -> Self {
2785		Complex {
2786			re: f64::one_impl(),
2787			im: f64::zero_impl(),
2788		}
2789		.into()
2790	}
2791
2792	#[inline]
2793	fn nan_impl() -> Self {
2794		Complex {
2795			re: f64::nan_impl(),
2796			im: f64::nan_impl(),
2797		}
2798		.into()
2799	}
2800
2801	#[inline]
2802	fn infinity_impl() -> Self {
2803		Complex {
2804			re: f64::infinity_impl(),
2805			im: f64::infinity_impl(),
2806		}
2807		.into()
2808	}
2809
2810	#[inline]
2811	fn from_real_impl(real: &Self::Real) -> Self {
2812		Complex {
2813			re: real.clone(),
2814			im: f64::zero_impl(),
2815		}
2816		.into()
2817	}
2818
2819	#[inline]
2820	fn from_f64_impl(real: f64) -> Self {
2821		Complex {
2822			re: f64::from_f64_impl(real),
2823			im: f64::zero_impl(),
2824		}
2825		.into()
2826	}
2827
2828	#[inline]
2829	fn real_part_impl(value: &Self) -> Self::Real {
2830		value.0.re.clone()
2831	}
2832
2833	#[inline]
2834	fn imag_part_impl(value: &Self) -> Self::Real {
2835		value.0.im.clone()
2836	}
2837
2838	#[inline]
2839	fn copy_impl(value: &Self) -> Self {
2840		value.clone()
2841	}
2842
2843	#[inline]
2844	fn conj_impl(value: &Self) -> Self {
2845		Complex {
2846			re: value.0.re.clone(),
2847			im: value.0.im.neg_by_ref(),
2848		}
2849		.into()
2850	}
2851
2852	#[inline]
2853	fn recip_impl(value: &Self) -> Self {
2854		let (re, im) = recip_impl(value.0.re.clone(), value.0.im.clone());
2855		Complex { re, im }.into()
2856	}
2857
2858	#[inline]
2859	fn sqrt_impl(value: &Self) -> Self {
2860		let (re, im) = sqrt_impl(value.0.re.clone(), value.0.im.clone());
2861		Complex { re, im }.into()
2862	}
2863
2864	#[inline]
2865	fn abs_impl(value: &Self) -> Self::Real {
2866		abs_impl(value.0.re.clone(), value.0.im.clone())
2867	}
2868
2869	#[inline]
2870	#[faer_macros::math]
2871	fn abs1_impl(value: &Self) -> Self::Real {
2872		abs1(value.0.re) + abs1(value.0.im)
2873	}
2874
2875	#[inline]
2876	#[faer_macros::math]
2877	fn abs2_impl(value: &Self) -> Self::Real {
2878		abs2(value.0.re) + abs2(value.0.im)
2879	}
2880
2881	#[inline]
2882	#[faer_macros::math]
2883	fn mul_real_impl(lhs: &Self, rhs: &Self::Real) -> Self {
2884		Complex {
2885			re: lhs.0.re * *rhs,
2886			im: lhs.0.im * *rhs,
2887		}
2888		.into()
2889	}
2890
2891	#[inline]
2892	#[faer_macros::math]
2893	fn mul_pow2_impl(lhs: &Self, rhs: &Self::Real) -> Self {
2894		Complex {
2895			re: mul_pow2(lhs.0.re, rhs),
2896			im: mul_pow2(lhs.0.im, rhs),
2897		}
2898		.into()
2899	}
2900
2901	#[inline]
2902	#[faer_macros::math]
2903	fn is_finite_impl(value: &Self) -> bool {
2904		is_finite(value.0.re) && is_finite(value.0.im)
2905	}
2906
2907	#[inline(always)]
2908	fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
2909		f64::simd_ctx(simd)
2910	}
2911
2912	#[inline(always)]
2913	fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
2914		f64::ctx_from_simd(ctx)
2915	}
2916
2917	#[inline(always)]
2918	fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S> {
2919		ctx.splat_c64s(value.0)
2920	}
2921
2922	#[inline(always)]
2923	fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S> {
2924		ctx.splat_c64s(Complex { re: *value, im: *value })
2925	}
2926
2927	#[inline(always)]
2928	fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2929		ctx.add_c64s(lhs, rhs)
2930	}
2931
2932	#[inline(always)]
2933	fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2934		ctx.sub_c64s(lhs, rhs)
2935	}
2936
2937	#[inline(always)]
2938	fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2939		ctx.neg_c64s(value)
2940	}
2941
2942	#[inline(always)]
2943	fn simd_conj<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2944		ctx.conj_c64s(value)
2945	}
2946
2947	#[inline(always)]
2948	fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2949		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
2950			bytemuck::cast(ctx.abs_f64s(bytemuck::cast(value)))
2951		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
2952			let value: Complex<f64> = bytemuck::cast(value);
2953			let v = value.re.abs() + value.im.abs();
2954			bytemuck::cast(Complex { re: v, im: v })
2955		} else {
2956			panic!();
2957		}
2958	}
2959
2960	#[inline(always)]
2961	fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2962		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
2963			bytemuck::cast(ctx.abs_f64s(bytemuck::cast(value)))
2964		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
2965			let value: Complex<f64> = bytemuck::cast(value);
2966			let re = value.re.abs();
2967			let im = value.im.abs();
2968			let v = if re > im { re } else { im };
2969			bytemuck::cast(Complex { re: v, im: v })
2970		} else {
2971			panic!();
2972		}
2973	}
2974
2975	#[inline(always)]
2976	fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2977		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
2978			bytemuck::cast(ctx.mul_f64s(bytemuck::cast(lhs), bytemuck::cast(real_rhs)))
2979		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
2980			let mut lhs: Complex<f64> = bytemuck::cast(lhs);
2981			let rhs: Complex<f64> = bytemuck::cast(real_rhs);
2982			lhs *= rhs.re;
2983			bytemuck::cast(lhs)
2984		} else {
2985			panic!();
2986		}
2987	}
2988
2989	#[inline(always)]
2990	fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2991		Self::simd_mul_real(ctx, lhs, real_rhs)
2992	}
2993
2994	#[inline(always)]
2995	fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2996		ctx.mul_e_c64s(lhs, rhs)
2997	}
2998
2999	#[inline(always)]
3000	fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3001		ctx.conj_mul_e_c64s(lhs, rhs)
3002	}
3003
3004	#[inline(always)]
3005	fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
3006		ctx.mul_add_e_c64s(lhs, rhs, acc)
3007	}
3008
3009	#[inline(always)]
3010	fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
3011		ctx.conj_mul_add_e_c64s(lhs, rhs, acc)
3012	}
3013
3014	#[inline(always)]
3015	fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
3016		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3017			bytemuck::cast(ctx.mul_f64s(bytemuck::cast(value), bytemuck::cast(value)))
3018		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3019			let value: Complex<f64> = bytemuck::cast(value);
3020			let v = value.re * value.re + value.im * value.im;
3021			bytemuck::cast(Complex { re: v, im: v })
3022		} else {
3023			panic!();
3024		}
3025	}
3026
3027	#[inline(always)]
3028	fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
3029		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3030			bytemuck::cast(ctx.mul_add_f64s(bytemuck::cast(value), bytemuck::cast(value), bytemuck::cast(acc)))
3031		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3032			let value: Complex<f64> = bytemuck::cast(value);
3033			let acc: Complex<f64> = bytemuck::cast(acc);
3034			let v = value.re * value.re + value.im * value.im + acc.re;
3035			bytemuck::cast(Complex { re: v, im: v })
3036		} else {
3037			panic!();
3038		}
3039	}
3040
3041	#[inline(always)]
3042	fn simd_reduce_sum<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
3043		ctx.reduce_sum_c64s(value).into()
3044	}
3045
3046	#[inline(always)]
3047	fn simd_reduce_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
3048		ctx.reduce_max_c64s(value).into()
3049	}
3050
3051	#[inline(always)]
3052	fn simd_equal<S: Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
3053		panic!()
3054	}
3055
3056	#[inline(always)]
3057	fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3058		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3059			ctx.less_than_f64s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
3060		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3061			assert!(try_const! { core::mem::size_of::<S::m64s>() == core::mem::size_of::<bool>() });
3062
3063			let lhs: Complex<f64> = bytemuck::cast(real_lhs);
3064			let rhs: Complex<f64> = bytemuck::cast(real_rhs);
3065			unsafe { core::mem::transmute_copy(&(lhs.re < rhs.re)) }
3066		} else {
3067			panic!();
3068		}
3069	}
3070
3071	#[inline(always)]
3072	fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3073		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3074			ctx.less_than_or_equal_f64s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
3075		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3076			assert!(try_const! { core::mem::size_of::<S::m64s>() == core::mem::size_of::<bool>() });
3077
3078			let lhs: Complex<f64> = bytemuck::cast(real_lhs);
3079			let rhs: Complex<f64> = bytemuck::cast(real_rhs);
3080			unsafe { core::mem::transmute_copy(&(lhs.re <= rhs.re)) }
3081		} else {
3082			panic!();
3083		}
3084	}
3085
3086	#[inline(always)]
3087	fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3088		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3089			ctx.greater_than_f64s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
3090		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3091			assert!(try_const! { core::mem::size_of::<S::m64s>() == core::mem::size_of::<bool>() });
3092
3093			let lhs: Complex<f64> = bytemuck::cast(real_lhs);
3094			let rhs: Complex<f64> = bytemuck::cast(real_rhs);
3095			unsafe { core::mem::transmute_copy(&(lhs.re > rhs.re)) }
3096		} else {
3097			panic!();
3098		}
3099	}
3100
3101	#[inline(always)]
3102	fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3103		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3104			ctx.greater_than_or_equal_f64s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
3105		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3106			assert!(try_const! { core::mem::size_of::<S::m64s>() == core::mem::size_of::<bool>() });
3107
3108			let lhs: Complex<f64> = bytemuck::cast(real_lhs);
3109			let rhs: Complex<f64> = bytemuck::cast(real_rhs);
3110			unsafe { core::mem::transmute_copy(&(lhs.re >= rhs.re)) }
3111		} else {
3112			panic!();
3113		}
3114	}
3115
3116	#[inline(always)]
3117	fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3118		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3119			bytemuck::cast(ctx.select_f64s_m64s(mask, bytemuck::cast(lhs), bytemuck::cast(rhs)))
3120		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3121			assert!(try_const! { core::mem::size_of::<S::m64s>() == core::mem::size_of::<bool>() });
3122			let mask: bool = unsafe { core::mem::transmute_copy(&mask) };
3123			let lhs: Complex<f64> = bytemuck::cast(lhs);
3124			let rhs: Complex<f64> = bytemuck::cast(rhs);
3125			bytemuck::cast(if mask { lhs } else { rhs })
3126		} else {
3127			panic!();
3128		}
3129	}
3130
3131	#[inline(always)]
3132	fn simd_index_select<S: Simd>(
3133		ctx: &Self::SimdCtx<S>,
3134		mask: Self::SimdMask<S>,
3135		lhs: Self::SimdIndex<S>,
3136		rhs: Self::SimdIndex<S>,
3137	) -> Self::SimdIndex<S> {
3138		f64::simd_index_select(ctx, mask, lhs, rhs)
3139	}
3140
3141	#[inline(always)]
3142	fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S> {
3143		f64::simd_index_splat(ctx, value)
3144	}
3145
3146	#[inline(always)]
3147	fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
3148		f64::simd_index_add(ctx, lhs, rhs)
3149	}
3150
3151	#[inline(always)]
3152	fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
3153		f64::simd_index_less_than(ctx, lhs, rhs)
3154	}
3155
3156	#[inline(always)]
3157	fn simd_and_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
3158		f64::simd_and_mask(ctx, lhs, rhs)
3159	}
3160
3161	#[inline(always)]
3162	fn simd_or_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
3163		f64::simd_or_mask(ctx, lhs, rhs)
3164	}
3165
3166	#[inline(always)]
3167	fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S> {
3168		f64::simd_not_mask(ctx, mask)
3169	}
3170
3171	#[inline(always)]
3172	fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize {
3173		f64::simd_first_true_mask(ctx, value)
3174	}
3175
3176	#[inline(always)]
3177	fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: u64, end: u64) -> Self::SimdMemMask<S> {
3178		ctx.mask_between_m64s(2 * start, 2 * end)
3179	}
3180
3181	#[inline(always)]
3182	fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: u64, end: u64) -> Self::SimdMask<S> {
3183		ctx.mask_between_m64s(2 * start, 2 * end).mask()
3184	}
3185
3186	#[inline(always)]
3187	unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
3188		ctx.mask_load_ptr_c64s(mask, ptr as _)
3189	}
3190
3191	#[inline(always)]
3192	unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>) {
3193		ctx.mask_store_ptr_c64s(mask, ptr as _, values);
3194	}
3195}
3196
3197#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
3198pub struct Symbolic;
3199
3200impl core::ops::Add for Symbolic {
3201	type Output = Self;
3202
3203	fn add(self, _: Self) -> Self {
3204		Self
3205	}
3206}
3207impl core::ops::Sub for Symbolic {
3208	type Output = Self;
3209
3210	fn sub(self, _: Self) -> Self {
3211		Self
3212	}
3213}
3214impl core::ops::Mul for Symbolic {
3215	type Output = Self;
3216
3217	fn mul(self, _: Self) -> Self {
3218		Self
3219	}
3220}
3221impl core::ops::Div for Symbolic {
3222	type Output = Self;
3223
3224	fn div(self, _: Self) -> Self {
3225		Self
3226	}
3227}
3228impl core::ops::Neg for Symbolic {
3229	type Output = Self;
3230
3231	fn neg(self) -> Self {
3232		Self
3233	}
3234}
3235
3236impl core::ops::Add for &Symbolic {
3237	type Output = Symbolic;
3238
3239	fn add(self, _: Self) -> Symbolic {
3240		Symbolic
3241	}
3242}
3243impl core::ops::Sub for &Symbolic {
3244	type Output = Symbolic;
3245
3246	fn sub(self, _: Self) -> Symbolic {
3247		Symbolic
3248	}
3249}
3250impl core::ops::Mul for &Symbolic {
3251	type Output = Symbolic;
3252
3253	fn mul(self, _: Self) -> Symbolic {
3254		Symbolic
3255	}
3256}
3257impl core::ops::Div for &Symbolic {
3258	type Output = Symbolic;
3259
3260	fn div(self, _: Self) -> Symbolic {
3261		Symbolic
3262	}
3263}
3264impl core::ops::Neg for &Symbolic {
3265	type Output = Symbolic;
3266
3267	fn neg(self) -> Symbolic {
3268		Symbolic
3269	}
3270}
3271
3272impl core::ops::Rem for Symbolic {
3273	type Output = Self;
3274
3275	fn rem(self, _: Self) -> Self {
3276		Self
3277	}
3278}
3279impl num_traits::Zero for Symbolic {
3280	fn zero() -> Self {
3281		Self
3282	}
3283
3284	fn is_zero(&self) -> bool {
3285		true
3286	}
3287}
3288impl num_traits::One for Symbolic {
3289	fn one() -> Self {
3290		Self
3291	}
3292
3293	fn is_one(&self) -> bool {
3294		true
3295	}
3296}
3297impl num_traits::Num for Symbolic {
3298	type FromStrRadixErr = core::convert::Infallible;
3299
3300	fn from_str_radix(_: &str, _: u32) -> Result<Self, Self::FromStrRadixErr> {
3301		Ok(Self)
3302	}
3303}
3304
3305impl Symbolic {
3306	#[inline]
3307	pub fn materialize(len: usize) -> &'static mut [Self] {
3308		unsafe { core::slice::from_raw_parts_mut(core::ptr::NonNull::dangling().as_ptr(), len) }
3309	}
3310}
3311
3312impl RealField for Symbolic {
3313	fn epsilon_impl() -> Self {
3314		Self
3315	}
3316
3317	fn nbits_impl() -> usize {
3318		0
3319	}
3320
3321	fn min_positive_impl() -> Self {
3322		Self
3323	}
3324
3325	fn max_positive_impl() -> Self {
3326		Self
3327	}
3328
3329	fn sqrt_min_positive_impl() -> Self {
3330		Self
3331	}
3332
3333	fn sqrt_max_positive_impl() -> Self {
3334		Self
3335	}
3336}
3337
3338impl ComplexField for Symbolic {
3339	type Arch = pulp::Scalar;
3340	type Index = usize;
3341	type Real = Self;
3342	type SimdCtx<S: pulp::Simd> = S;
3343	type SimdIndex<S: pulp::Simd> = ();
3344	type SimdMask<S: pulp::Simd> = ();
3345	type SimdMemMask<S: pulp::Simd> = ();
3346	type SimdVec<S: pulp::Simd> = ();
3347	type Unit = Self;
3348
3349	const IS_REAL: bool = true;
3350	const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Copy;
3351
3352	fn zero_impl() -> Self {
3353		Self
3354	}
3355
3356	fn one_impl() -> Self {
3357		Self
3358	}
3359
3360	fn nan_impl() -> Self {
3361		Self
3362	}
3363
3364	fn infinity_impl() -> Self {
3365		Self
3366	}
3367
3368	fn from_real_impl(_: &Self::Real) -> Self {
3369		Self
3370	}
3371
3372	fn from_f64_impl(_: f64) -> Self {
3373		Self
3374	}
3375
3376	fn real_part_impl(_: &Self) -> Self::Real {
3377		Self
3378	}
3379
3380	fn imag_part_impl(_: &Self) -> Self::Real {
3381		Self
3382	}
3383
3384	fn copy_impl(_: &Self) -> Self {
3385		Self
3386	}
3387
3388	fn conj_impl(_: &Self) -> Self {
3389		Self
3390	}
3391
3392	fn recip_impl(_: &Self) -> Self {
3393		Self
3394	}
3395
3396	fn sqrt_impl(_: &Self) -> Self {
3397		Self
3398	}
3399
3400	fn abs_impl(_: &Self) -> Self::Real {
3401		Self
3402	}
3403
3404	fn abs1_impl(_: &Self) -> Self::Real {
3405		Self
3406	}
3407
3408	fn abs2_impl(_: &Self) -> Self::Real {
3409		Self
3410	}
3411
3412	fn mul_real_impl(_: &Self, _: &Self::Real) -> Self {
3413		Self
3414	}
3415
3416	fn mul_pow2_impl(_: &Self, _: &Self::Real) -> Self {
3417		Self
3418	}
3419
3420	fn is_finite_impl(_: &Self) -> bool {
3421		true
3422	}
3423
3424	fn simd_ctx<S: pulp::Simd>(simd: S) -> Self::SimdCtx<S> {
3425		simd
3426	}
3427
3428	fn ctx_from_simd<S: pulp::Simd>(simd: &Self::SimdCtx<S>) -> S {
3429		*simd
3430	}
3431
3432	fn simd_mem_mask_between<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::Index, _: Self::Index) -> Self::SimdMemMask<S> {
3433		()
3434	}
3435
3436	unsafe fn simd_mask_load_raw<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMemMask<S>, _: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
3437		()
3438	}
3439
3440	unsafe fn simd_mask_store_raw<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMemMask<S>, _: *mut Self::SimdVec<S>, _: Self::SimdVec<S>) {
3441		()
3442	}
3443
3444	fn simd_splat<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: &Self) -> Self::SimdVec<S> {
3445		()
3446	}
3447
3448	fn simd_splat_real<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: &Self::Real) -> Self::SimdVec<S> {
3449		()
3450	}
3451
3452	fn simd_add<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3453		()
3454	}
3455
3456	fn simd_sub<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3457		()
3458	}
3459
3460	fn simd_neg<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3461		()
3462	}
3463
3464	fn simd_conj<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3465		()
3466	}
3467
3468	fn simd_abs1<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3469		()
3470	}
3471
3472	fn simd_abs_max<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3473		()
3474	}
3475
3476	fn simd_mul_real<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3477		()
3478	}
3479
3480	fn simd_mul_pow2<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3481		()
3482	}
3483
3484	fn simd_mul<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3485		()
3486	}
3487
3488	fn simd_conj_mul<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3489		()
3490	}
3491
3492	fn simd_mul_add<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3493		()
3494	}
3495
3496	fn simd_conj_mul_add<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3497		()
3498	}
3499
3500	fn simd_abs2<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3501		()
3502	}
3503
3504	fn simd_abs2_add<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3505		()
3506	}
3507
3508	fn simd_reduce_sum<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self {
3509		Self
3510	}
3511
3512	fn simd_reduce_max<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self {
3513		Self
3514	}
3515
3516	fn simd_equal<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
3517		()
3518	}
3519
3520	fn simd_less_than<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
3521		()
3522	}
3523
3524	fn simd_less_than_or_equal<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
3525		()
3526	}
3527
3528	fn simd_greater_than<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
3529		()
3530	}
3531
3532	fn simd_greater_than_or_equal<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
3533		()
3534	}
3535
3536	fn simd_select<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMask<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3537		()
3538	}
3539
3540	fn simd_index_select<S: pulp::Simd>(
3541		_: &Self::SimdCtx<S>,
3542		_: Self::SimdMask<S>,
3543		_: Self::SimdIndex<S>,
3544		_: Self::SimdIndex<S>,
3545	) -> Self::SimdIndex<S> {
3546		()
3547	}
3548
3549	fn simd_index_splat<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::Index) -> Self::SimdIndex<S> {
3550		()
3551	}
3552
3553	fn simd_index_add<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdIndex<S>, _: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
3554		()
3555	}
3556
3557	fn simd_and_mask<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMask<S>, _: Self::SimdMask<S>) -> Self::SimdMask<S> {
3558		()
3559	}
3560
3561	fn simd_or_mask<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMask<S>, _: Self::SimdMask<S>) -> Self::SimdMask<S> {
3562		()
3563	}
3564
3565	fn simd_not_mask<S: Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMask<S>) -> Self::SimdMask<S> {
3566		()
3567	}
3568
3569	fn simd_first_true_mask<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMask<S>) -> usize {
3570		0
3571	}
3572
3573	fn simd_mask_between<S: Simd>(_: &Self::SimdCtx<S>, _: Self::Index, _: Self::Index) -> Self::SimdMask<S> {
3574		()
3575	}
3576
3577	fn simd_index_less_than<S: Simd>(_: &Self::SimdCtx<S>, _: Self::SimdIndex<S>, _: Self::SimdIndex<S>) -> Self::SimdMask<S> {
3578		()
3579	}
3580}