faer_traits/
lib.rs

1#![no_std]
2
3use bytemuck::Pod;
4use core::fmt::Debug;
5use num_complex::Complex;
6use pulp::Simd;
7
8use math_utils::*;
9
10use pulp::try_const;
11
12pub mod math_utils {
13	use crate::{ByRef, ComplexField, RealField, abs_impl};
14	use pulp::try_const;
15
16	#[inline(always)]
17	#[must_use]
18	pub fn eps<T: RealField>() -> T {
19		T::Real::epsilon_impl()
20	}
21
22	#[inline(always)]
23	#[must_use]
24	pub fn nbits<T: ComplexField>() -> usize {
25		T::Real::nbits_impl()
26	}
27
28	#[inline(always)]
29	#[must_use]
30	pub fn min_positive<T: RealField>() -> T {
31		T::min_positive_impl()
32	}
33	#[inline(always)]
34	#[must_use]
35	pub fn max_positive<T: RealField>() -> T {
36		T::max_positive_impl()
37	}
38	#[inline(always)]
39	#[must_use]
40	pub fn sqrt_min_positive<T: RealField>() -> T {
41		T::sqrt_min_positive_impl()
42	}
43	#[inline(always)]
44	#[must_use]
45	pub fn sqrt_max_positive<T: RealField>() -> T {
46		T::sqrt_max_positive_impl()
47	}
48
49	#[inline(always)]
50	#[must_use]
51	pub fn zero<T: ComplexField>() -> T {
52		T::zero_impl()
53	}
54	#[inline(always)]
55	#[must_use]
56	pub fn one<T: ComplexField>() -> T {
57		T::one_impl()
58	}
59	#[inline(always)]
60	#[must_use]
61	pub fn nan<T: ComplexField>() -> T {
62		T::nan_impl()
63	}
64	#[inline(always)]
65	#[must_use]
66	pub fn infinity<T: ComplexField>() -> T {
67		T::infinity_impl()
68	}
69
70	#[inline(always)]
71	#[must_use]
72	pub fn real<T: ComplexField>(value: &T) -> T::Real {
73		T::real_part_impl((value).by_ref())
74	}
75	#[inline(always)]
76	#[must_use]
77	pub fn imag<T: ComplexField>(value: &T) -> T::Real {
78		T::imag_part_impl((value).by_ref())
79	}
80	#[inline(always)]
81	#[track_caller]
82	#[must_use]
83	pub fn neg<T: NegByRef>(value: &T) -> T::Output {
84		value.neg_by_ref()
85	}
86	#[inline(always)]
87	#[must_use]
88	pub fn copy<T: ComplexField>(value: &T) -> T {
89		T::copy_impl((value).by_ref())
90	}
91
92	#[inline(always)]
93	#[must_use]
94	pub fn conj<T: ComplexField>(value: &T) -> T {
95		T::conj_impl((value).by_ref())
96	}
97
98	#[inline(always)]
99	#[track_caller]
100	#[must_use]
101	pub fn add<T: AddByRef<U>, U>(lhs: &T, rhs: &U) -> T::Output {
102		lhs.add_by_ref(rhs)
103	}
104	#[inline(always)]
105	#[track_caller]
106	#[must_use]
107	pub fn sub<T: SubByRef<U>, U>(lhs: &T, rhs: &U) -> T::Output {
108		lhs.sub_by_ref(rhs)
109	}
110	#[inline(always)]
111	#[track_caller]
112	#[must_use]
113	pub fn mul<T: MulByRef<U>, U>(lhs: &T, rhs: &U) -> T::Output {
114		lhs.mul_by_ref(rhs)
115	}
116	#[inline(always)]
117	#[track_caller]
118	#[must_use]
119	pub fn div<T: DivByRef<U>, U>(lhs: &T, rhs: &U) -> T::Output {
120		lhs.div_by_ref(rhs)
121	}
122
123	#[inline(always)]
124	#[must_use]
125	pub fn mul_real<T: ComplexField>(lhs: &T, rhs: &T::Real) -> T {
126		T::mul_real_impl((lhs).by_ref(), (rhs).by_ref())
127	}
128
129	#[inline(always)]
130	#[must_use]
131	pub fn mul_pow2<T: ComplexField>(lhs: &T, rhs: &T::Real) -> T {
132		T::mul_real_impl((lhs).by_ref(), (rhs).by_ref())
133	}
134
135	#[inline(always)]
136	#[must_use]
137	pub fn abs1<T: ComplexField>(value: &T) -> T::Real {
138		T::abs1_impl((value).by_ref())
139	}
140
141	#[inline(always)]
142	#[must_use]
143	pub fn absmax<T: ComplexField>(value: &T) -> T::Real {
144		if try_const! { T::IS_REAL } {
145			T::abs1_impl(value)
146		} else {
147			add(&T::Real::abs1_impl(&real(value)), &T::Real::abs1_impl(&imag(value)))
148		}
149	}
150
151	#[inline(always)]
152	#[must_use]
153	pub fn abs<T: ComplexField>(value: &T) -> T::Real {
154		T::abs_impl((value).by_ref())
155	}
156
157	#[inline(always)]
158	#[must_use]
159	pub fn hypot<T: RealField>(lhs: &T, rhs: &T) -> T {
160		abs_impl::<T::Real>(lhs.clone(), rhs.clone())
161	}
162
163	#[inline(always)]
164	#[must_use]
165	pub fn abs2<T: ComplexField>(value: &T) -> T::Real {
166		T::abs2_impl((value).by_ref())
167	}
168
169	#[inline(always)]
170	#[must_use]
171	pub fn max<T: RealField>(lhs: &T, rhs: &T) -> T {
172		if lhs > rhs { copy(lhs) } else { copy(rhs) }
173	}
174	#[inline(always)]
175	#[must_use]
176	pub fn min<T: RealField>(lhs: &T, rhs: &T) -> T {
177		if lhs < rhs { copy(lhs) } else { copy(rhs) }
178	}
179
180	#[inline(always)]
181	#[must_use]
182	pub fn is_nan<T: ComplexField>(value: &T) -> bool {
183		T::is_nan_impl((value).by_ref())
184	}
185
186	#[inline(always)]
187	#[must_use]
188	pub fn is_finite<T: ComplexField>(value: &T) -> bool {
189		T::is_finite_impl((value).by_ref())
190	}
191
192	#[inline(always)]
193	#[must_use]
194	pub fn sqrt<T: ComplexField>(value: &T) -> T {
195		T::sqrt_impl((value).by_ref())
196	}
197	#[inline(always)]
198	#[must_use]
199	pub fn recip<T: ComplexField>(value: &T) -> T {
200		T::recip_impl((value).by_ref())
201	}
202
203	#[inline(always)]
204	#[must_use]
205	pub fn from_real<T: ComplexField>(value: &T::Real) -> T {
206		T::from_real_impl((value).by_ref())
207	}
208
209	#[inline(always)]
210	#[must_use]
211	pub fn from_f64<T: ComplexField>(value: f64) -> T {
212		T::from_f64_impl(value)
213	}
214
215	pub use crate::{AddByRef, DivByRef, MulByRef, NegByRef, SubByRef};
216}
217
218pub trait AddByRef<Rhs = Self> {
219	type Output;
220	fn add_by_ref(&self, rhs: &Rhs) -> Self::Output;
221}
222pub trait SubByRef<Rhs = Self> {
223	type Output;
224	fn sub_by_ref(&self, rhs: &Rhs) -> Self::Output;
225}
226pub trait NegByRef {
227	type Output;
228	fn neg_by_ref(&self) -> Self::Output;
229}
230pub trait MulByRef<Rhs = Self> {
231	type Output;
232	fn mul_by_ref(&self, rhs: &Rhs) -> Self::Output;
233}
234pub trait DivByRef<Rhs = Self> {
235	type Output;
236	fn div_by_ref(&self, rhs: &Rhs) -> Self::Output;
237}
238
239impl<Rhs, Lhs, Output> AddByRef<Rhs> for Lhs
240where
241	for<'a> &'a Lhs: core::ops::Add<&'a Rhs, Output = Output>,
242{
243	type Output = Output;
244
245	#[inline]
246	#[track_caller]
247	fn add_by_ref(&self, rhs: &Rhs) -> Self::Output {
248		self + rhs
249	}
250}
251impl<Rhs, Lhs, Output> SubByRef<Rhs> for Lhs
252where
253	for<'a> &'a Lhs: core::ops::Sub<&'a Rhs, Output = Output>,
254{
255	type Output = Output;
256
257	#[inline]
258	#[track_caller]
259	fn sub_by_ref(&self, rhs: &Rhs) -> Self::Output {
260		self - rhs
261	}
262}
263impl<Rhs, Lhs, Output> MulByRef<Rhs> for Lhs
264where
265	for<'a> &'a Lhs: core::ops::Mul<&'a Rhs, Output = Output>,
266{
267	type Output = Output;
268
269	#[inline]
270	#[track_caller]
271	fn mul_by_ref(&self, rhs: &Rhs) -> Self::Output {
272		self * rhs
273	}
274}
275impl<Rhs, Lhs, Output> DivByRef<Rhs> for Lhs
276where
277	for<'a> &'a Lhs: core::ops::Div<&'a Rhs, Output = Output>,
278{
279	type Output = Output;
280
281	#[inline]
282	#[track_caller]
283	fn div_by_ref(&self, rhs: &Rhs) -> Self::Output {
284		self / rhs
285	}
286}
287
288impl<T, Output> NegByRef for T
289where
290	for<'a> &'a T: core::ops::Neg<Output = Output>,
291{
292	type Output = Output;
293
294	#[inline]
295	#[track_caller]
296	fn neg_by_ref(&self) -> Self::Output {
297		-self
298	}
299}
300
301#[faer_macros::math]
302fn abs_impl<T: RealField>(re: T, im: T) -> T {
303	let small = sqrt_min_positive();
304	let big = sqrt_max_positive();
305	let one = one();
306	let re_abs = abs(re);
307	let im_abs = abs(im);
308
309	if re_abs > big || im_abs > big {
310		sqrt(abs2(re * small) + abs2(im * small)) * big
311	} else if re_abs > one || im_abs > one {
312		sqrt(abs2(re) + abs2(im))
313	} else {
314		sqrt(abs2(re * big) + abs2(im * big)) * small
315	}
316}
317
318#[faer_macros::math]
319fn recip_impl<T: RealField>(re: T, im: T) -> (T, T) {
320	if is_nan(re) || is_nan(im) {
321		return (nan(), nan());
322	}
323	if re == zero() && im == zero() {
324		return (infinity(), infinity());
325	}
326	if !is_finite(re) || !is_finite(im) {
327		return (zero(), zero());
328	}
329
330	let small = sqrt_min_positive();
331	let big = sqrt_max_positive();
332	let one = one();
333	let re_abs = abs(re);
334	let im_abs = abs(im);
335
336	if re_abs > big || im_abs > big {
337		let re = re * small;
338		let im = im * small;
339		let inv = recip(abs2(re) + abs2(im));
340		(((re * inv) * small), ((-im * inv) * small))
341	} else if re_abs > one || im_abs > one {
342		let inv = recip(abs2(re) + abs2(im));
343		((re * inv), (-im * inv))
344	} else {
345		let re = re * big;
346		let im = im * big;
347		let inv = recip(abs2(re) + abs2(im));
348		(((re * inv) * big), ((-im * inv) * big))
349	}
350}
351
352#[faer_macros::math]
353fn sqrt_impl<T: RealField>(re: T, im: T) -> (T, T) {
354	let im_negative = im < zero();
355	let half = from_f64(0.5);
356	let abs = abs_impl(re.clone(), im.clone());
357
358	let mut sum = re + abs;
359	if sum < zero() {
360		sum = zero();
361	}
362
363	let out_re = sqrt(mul_pow2(sum, half));
364	let mut out_im = sqrt(mul_pow2(abs - re, half));
365	if im_negative {
366		out_im = -out_im;
367	}
368	(out_re, out_im)
369}
370
371pub trait ByRef<T> {
372	fn by_ref(&self) -> &T;
373}
374impl<T> ByRef<T> for T {
375	#[inline]
376	fn by_ref(&self) -> &T {
377		self
378	}
379}
380impl<T> ByRef<T> for &T {
381	#[inline]
382	fn by_ref(&self) -> &T {
383		*self
384	}
385}
386impl<T> ByRef<T> for &mut T {
387	#[inline]
388	fn by_ref(&self) -> &T {
389		*self
390	}
391}
392
393#[repr(transparent)]
394pub struct SimdCtx<T: ComplexField, S: Simd>(pub T::SimdCtx<S>);
395
396#[repr(transparent)]
397pub struct SimdCtxCopy<T: ComplexField, S: Simd>(pub T::SimdCtx<S>);
398
399#[derive(Copy, Clone, Debug)]
400#[repr(transparent)]
401pub struct RealReg<T>(pub T);
402
403impl<T: ComplexField, S: Simd> SimdCtx<T, S> {
404	#[inline(always)]
405	pub fn new(ctx: &T::SimdCtx<S>) -> &Self {
406		unsafe { &*(ctx as *const T::SimdCtx<S> as *const Self) }
407	}
408
409	#[inline(always)]
410	pub fn zero(&self) -> T::SimdVec<S> {
411		unsafe { core::mem::zeroed() }
412	}
413
414	#[inline(always)]
415	pub fn splat(&self, value: &T) -> T::SimdVec<S> {
416		unsafe { core::mem::transmute_copy(&T::simd_splat(&self.0, (value).by_ref())) }
417	}
418
419	#[inline(always)]
420	pub fn splat_real(&self, value: &T::Real) -> RealReg<T::SimdVec<S>> {
421		RealReg(unsafe { core::mem::transmute_copy(&T::simd_splat_real(&self.0, (value).by_ref())) })
422	}
423
424	#[inline(always)]
425	pub fn add(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdVec<S> {
426		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
427		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
428		unsafe { core::mem::transmute_copy(&T::simd_add(&self.0, lhs, rhs)) }
429	}
430
431	#[inline(always)]
432	pub fn sub(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdVec<S> {
433		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
434		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
435		unsafe { core::mem::transmute_copy(&T::simd_sub(&self.0, lhs, rhs)) }
436	}
437
438	#[inline(always)]
439	pub fn neg(&self, value: T::SimdVec<S>) -> T::SimdVec<S> {
440		let value = unsafe { core::mem::transmute_copy(&value) };
441		unsafe { core::mem::transmute_copy(&T::simd_neg(&self.0, value)) }
442	}
443
444	#[inline(always)]
445	pub fn conj(&self, value: T::SimdVec<S>) -> T::SimdVec<S> {
446		let value = unsafe { core::mem::transmute_copy(&value) };
447		unsafe { core::mem::transmute_copy(&T::simd_conj(&self.0, value)) }
448	}
449
450	#[inline(always)]
451	pub fn abs1(&self, value: T::SimdVec<S>) -> RealReg<T::SimdVec<S>> {
452		let value = unsafe { core::mem::transmute_copy(&value) };
453		RealReg(unsafe { core::mem::transmute_copy(&T::simd_abs1(&self.0, value)) })
454	}
455
456	#[inline(always)]
457	pub fn abs_max(&self, value: T::SimdVec<S>) -> RealReg<T::SimdVec<S>> {
458		let value = unsafe { core::mem::transmute_copy(&value) };
459		RealReg(unsafe { core::mem::transmute_copy(&T::simd_abs_max(&self.0, value)) })
460	}
461
462	#[inline(always)]
463	pub fn mul_real(&self, lhs: T::SimdVec<S>, rhs: RealReg<T::SimdVec<S>>) -> T::SimdVec<S> {
464		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
465		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
466		unsafe { core::mem::transmute_copy(&T::simd_mul_real(&self.0, lhs, rhs)) }
467	}
468
469	#[inline(always)]
470	pub fn mul_pow2(&self, lhs: T::SimdVec<S>, rhs: RealReg<T::SimdVec<S>>) -> T::SimdVec<S> {
471		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
472		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
473		unsafe { core::mem::transmute_copy(&T::simd_mul_pow2(&self.0, lhs, rhs)) }
474	}
475
476	#[inline(always)]
477	pub fn mul(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdVec<S> {
478		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
479		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
480		unsafe { core::mem::transmute_copy(&T::simd_mul(&self.0, lhs, rhs)) }
481	}
482
483	#[inline(always)]
484	pub fn conj_mul(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdVec<S> {
485		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
486		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
487		unsafe { core::mem::transmute_copy(&T::simd_conj_mul(&self.0, lhs, rhs)) }
488	}
489
490	#[inline(always)]
491	pub fn mul_add(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>, acc: T::SimdVec<S>) -> T::SimdVec<S> {
492		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
493		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
494		let acc = unsafe { core::mem::transmute_copy(&acc) };
495		unsafe { core::mem::transmute_copy(&T::simd_mul_add(&self.0, lhs, rhs, acc)) }
496	}
497
498	#[inline(always)]
499	pub fn conj_mul_add(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>, acc: T::SimdVec<S>) -> T::SimdVec<S> {
500		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
501		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
502		let acc = unsafe { core::mem::transmute_copy(&acc) };
503		unsafe { core::mem::transmute_copy(&T::simd_conj_mul_add(&self.0, lhs, rhs, acc)) }
504	}
505
506	#[inline(always)]
507	pub fn abs2(&self, value: T::SimdVec<S>) -> RealReg<T::SimdVec<S>> {
508		let value = unsafe { core::mem::transmute_copy(&value) };
509		RealReg(unsafe { core::mem::transmute_copy(&T::simd_abs2(&self.0, value)) })
510	}
511
512	#[inline(always)]
513	pub fn abs2_add(&self, value: T::SimdVec<S>, acc: RealReg<T::SimdVec<S>>) -> RealReg<T::SimdVec<S>> {
514		let value = unsafe { core::mem::transmute_copy(&value) };
515		let acc = unsafe { core::mem::transmute_copy(&acc) };
516		RealReg(unsafe { core::mem::transmute_copy(&T::simd_abs2_add(&self.0, value, acc)) })
517	}
518
519	#[inline(always)]
520	pub fn reduce_sum(&self, value: T::SimdVec<S>) -> T {
521		let value = unsafe { core::mem::transmute_copy(&value) };
522		unsafe { core::mem::transmute_copy(&T::simd_reduce_sum(&self.0, value)) }
523	}
524
525	#[inline(always)]
526	pub fn reduce_max(&self, value: RealReg<T::SimdVec<S>>) -> T {
527		let value = unsafe { core::mem::transmute_copy(&value) };
528		unsafe { core::mem::transmute_copy(&T::simd_reduce_max(&self.0, value)) }
529	}
530
531	#[faer_macros::math]
532	#[inline(always)]
533	pub fn reduce_sum_real(&self, value: RealReg<T::SimdVec<S>>) -> Real<T> {
534		let value = T::simd_reduce_sum(&self.0, value.0);
535		if try_const! { T::SIMD_ABS_SPLIT_REAL_IMAG && !S::IS_SCALAR } {
536			add(real(value), imag(value))
537		} else {
538			real(value)
539		}
540	}
541
542	#[faer_macros::math]
543	#[inline(always)]
544	pub fn reduce_max_real(&self, value: RealReg<T::SimdVec<S>>) -> Real<T> {
545		let value = T::simd_reduce_max(&self.0, value.0);
546		if try_const! { T::SIMD_ABS_SPLIT_REAL_IMAG && !S::IS_SCALAR } {
547			max(real(value), imag(value))
548		} else {
549			real(value)
550		}
551	}
552
553	#[inline(always)]
554	pub fn max(&self, lhs: RealReg<T::SimdVec<S>>, rhs: RealReg<T::SimdVec<S>>) -> RealReg<T::SimdVec<S>> {
555		let cmp = self.gt(lhs, rhs);
556		RealReg(self.select(cmp, lhs.0, rhs.0))
557	}
558
559	#[inline(always)]
560	pub fn eq(&self, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdMask<S> {
561		T::simd_equal(&self.0, lhs, rhs)
562	}
563
564	#[inline(always)]
565	pub fn lt(&self, lhs: RealReg<T::SimdVec<S>>, rhs: RealReg<T::SimdVec<S>>) -> T::SimdMask<S> {
566		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
567		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
568		unsafe { core::mem::transmute_copy(&T::simd_less_than(&self.0, lhs, rhs)) }
569	}
570
571	#[inline(always)]
572	pub fn gt(&self, lhs: RealReg<T::SimdVec<S>>, rhs: RealReg<T::SimdVec<S>>) -> T::SimdMask<S> {
573		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
574		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
575		unsafe { core::mem::transmute_copy(&T::simd_greater_than(&self.0, lhs, rhs)) }
576	}
577
578	#[inline(always)]
579	pub fn le(&self, lhs: RealReg<T::SimdVec<S>>, rhs: RealReg<T::SimdVec<S>>) -> T::SimdMask<S> {
580		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
581		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
582		unsafe { core::mem::transmute_copy(&T::simd_less_than_or_equal(&self.0, lhs, rhs)) }
583	}
584
585	#[inline(always)]
586	pub fn ge(&self, lhs: RealReg<T::SimdVec<S>>, rhs: RealReg<T::SimdVec<S>>) -> T::SimdMask<S> {
587		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
588		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
589		unsafe { core::mem::transmute_copy(&T::simd_greater_than_or_equal(&self.0, lhs, rhs)) }
590	}
591
592	#[inline(always)]
593	pub fn select(&self, mask: T::SimdMask<S>, lhs: T::SimdVec<S>, rhs: T::SimdVec<S>) -> T::SimdVec<S> {
594		let lhs = unsafe { core::mem::transmute_copy(&lhs) };
595		let rhs = unsafe { core::mem::transmute_copy(&rhs) };
596		unsafe { core::mem::transmute_copy(&T::simd_select(&self.0, mask, lhs, rhs)) }
597	}
598
599	#[inline(always)]
600	pub fn iselect(&self, mask: T::SimdMask<S>, lhs: T::SimdIndex<S>, rhs: T::SimdIndex<S>) -> T::SimdIndex<S> {
601		unsafe { core::mem::transmute_copy(&T::simd_index_select(&self.0, mask, lhs, rhs)) }
602	}
603
604	#[inline(always)]
605	pub fn isplat(&self, value: T::Index) -> T::SimdIndex<S> {
606		unsafe { core::mem::transmute_copy(&T::simd_index_splat(&self.0, value)) }
607	}
608
609	#[inline(always)]
610	pub fn iadd(&self, lhs: T::SimdIndex<S>, rhs: T::SimdIndex<S>) -> T::SimdIndex<S> {
611		unsafe { core::mem::transmute_copy(&T::simd_index_add(&self.0, lhs, rhs)) }
612	}
613
614	#[inline(always)]
615	pub fn or_mask(&self, lhs: T::SimdMask<S>, rhs: T::SimdMask<S>) -> T::SimdMask<S> {
616		T::simd_or_mask(&self.0, lhs, rhs)
617	}
618
619	#[inline(always)]
620	pub fn and_mask(&self, lhs: T::SimdMask<S>, rhs: T::SimdMask<S>) -> T::SimdMask<S> {
621		T::simd_and_mask(&self.0, lhs, rhs)
622	}
623
624	#[inline(always)]
625	pub fn not_mask(&self, mask: T::SimdMask<S>) -> T::SimdMask<S> {
626		T::simd_not_mask(&self.0, mask)
627	}
628
629	#[inline(always)]
630	pub fn first_true_mask(&self, value: T::SimdMask<S>) -> usize {
631		T::simd_first_true_mask(&self.0, value)
632	}
633
634	#[inline(always)]
635	pub unsafe fn mask_load(&self, mask: T::SimdMemMask<S>, ptr: *const T::SimdVec<S>) -> T::SimdVec<S> {
636		unsafe { T::simd_mask_load(&self.0, mask, ptr) }
637	}
638
639	#[inline(always)]
640	pub unsafe fn mask_store(&self, mask: T::SimdMemMask<S>, ptr: *mut T::SimdVec<S>, value: T::SimdVec<S>) {
641		let value = unsafe { core::mem::transmute_copy(&value) };
642		unsafe { T::simd_mask_store(&self.0, mask, ptr, value) }
643	}
644
645	#[inline(always)]
646	pub fn load(&self, ptr: &T::SimdVec<S>) -> T::SimdVec<S> {
647		unsafe { core::mem::transmute_copy(&T::simd_load(&self.0, ptr)) }
648	}
649
650	#[inline(always)]
651	pub fn store(&self, ptr: &mut T::SimdVec<S>, value: T::SimdVec<S>) {
652		let value = unsafe { core::mem::transmute_copy(&value) };
653		unsafe { core::mem::transmute_copy(&T::simd_store(&self.0, ptr, value)) }
654	}
655}
656
657pub unsafe trait Conjugate: Send + Sync + core::fmt::Debug {
658	const IS_CANONICAL: bool;
659
660	type Conj: Conjugate<Conj = Self, Canonical = Self::Canonical>;
661	type Canonical: Conjugate<Canonical = Self::Canonical> + ComplexField;
662}
663
664pub type Real<T> = <<T as Conjugate>::Canonical as ComplexField>::Real;
665
666#[derive(Copy, Clone, Debug, PartialEq, Eq)]
667pub struct ComplexConj<T> {
668	pub re: T,
669	pub im_neg: T,
670}
671
672#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
673pub enum SimdCapabilities {
674	None,
675	Copy,
676	Simd,
677}
678
679impl SimdCapabilities {
680	#[inline]
681	pub const fn is_copy(self) -> bool {
682		matches!(self, Self::Copy | Self::Simd)
683	}
684
685	#[inline]
686	pub const fn is_simd(self) -> bool {
687		matches!(self, Self::Simd)
688	}
689}
690
691mod seal {
692	pub trait Seal {}
693	impl Seal for u32 {}
694	impl Seal for u64 {}
695	impl Seal for usize {}
696	impl Seal for i32 {}
697	impl Seal for i64 {}
698	impl Seal for isize {}
699}
700
701pub trait Seal: seal::Seal {}
702impl<T: seal::Seal> Seal for T {}
703
704/// Trait for signed integers corresponding to the ones satisfying [`Index`].
705///
706/// Always smaller than or equal to `isize`.
707pub trait SignedIndex:
708	Seal
709	+ core::fmt::Debug
710	+ core::ops::Neg<Output = Self>
711	+ core::ops::Add<Output = Self>
712	+ core::ops::Sub<Output = Self>
713	+ core::ops::AddAssign
714	+ core::ops::SubAssign
715	+ bytemuck::Pod
716	+ Eq
717	+ Ord
718	+ Send
719	+ Sync
720{
721	/// Maximum representable value.
722	const MAX: Self;
723
724	/// Truncate `value` to type [`Self`].
725	#[must_use]
726	fn truncate(value: usize) -> Self;
727
728	/// Zero extend `self`.
729	#[must_use]
730	fn zx(self) -> usize;
731	/// Sign extend `self`.
732	#[must_use]
733	fn sx(self) -> usize;
734
735	/// Sum nonnegative values while checking for overflow.
736	fn sum_nonnegative(slice: &[Self]) -> Option<Self> {
737		let mut acc = Self::zeroed();
738		for &i in slice {
739			if Self::MAX - i < acc {
740				return None;
741			}
742			acc += i;
743		}
744		Some(acc)
745	}
746}
747
748impl SignedIndex for i32 {
749	const MAX: Self = Self::MAX;
750
751	#[inline(always)]
752	fn truncate(value: usize) -> Self {
753		#[allow(clippy::assertions_on_constants)]
754		const _: () = {
755			core::assert!(i32::BITS <= usize::BITS);
756		};
757		value as isize as Self
758	}
759
760	#[inline(always)]
761	fn zx(self) -> usize {
762		self as u32 as usize
763	}
764
765	#[inline(always)]
766	fn sx(self) -> usize {
767		self as isize as usize
768	}
769}
770
771#[cfg(any(target_pointer_width = "64"))]
772impl SignedIndex for i64 {
773	const MAX: Self = Self::MAX;
774
775	#[inline(always)]
776	fn truncate(value: usize) -> Self {
777		#[allow(clippy::assertions_on_constants)]
778		const _: () = {
779			core::assert!(i64::BITS <= usize::BITS);
780		};
781		value as isize as Self
782	}
783
784	#[inline(always)]
785	fn zx(self) -> usize {
786		self as u64 as usize
787	}
788
789	#[inline(always)]
790	fn sx(self) -> usize {
791		self as isize as usize
792	}
793}
794
795impl SignedIndex for isize {
796	const MAX: Self = Self::MAX;
797
798	#[inline(always)]
799	fn truncate(value: usize) -> Self {
800		value as isize
801	}
802
803	#[inline(always)]
804	fn zx(self) -> usize {
805		self as usize
806	}
807
808	#[inline(always)]
809	fn sx(self) -> usize {
810		self as usize
811	}
812}
813
814pub trait Index:
815	Seal
816	+ core::fmt::Debug
817	+ core::ops::Not<Output = Self>
818	+ core::ops::BitAnd<Output = Self>
819	+ core::ops::BitOr<Output = Self>
820	+ core::ops::BitXor<Output = Self>
821	+ core::ops::Add<Output = Self>
822	+ core::ops::Sub<Output = Self>
823	+ core::ops::AddAssign
824	+ core::ops::SubAssign
825	+ bytemuck::Pod
826	+ Eq
827	+ Ord
828	+ Send
829	+ Sync
830	+ Ord
831{
832	/// Equally-sized index type with a fixed size (no `usize`).
833	type FixedWidth: Index;
834	/// Equally-sized signed index type.
835	type Signed: SignedIndex;
836
837	const BITS: u32 = core::mem::size_of::<Self>() as u32 * 8;
838
839	/// Truncate `value` to type [`Self`].
840	#[must_use]
841	#[inline(always)]
842	fn truncate(value: usize) -> Self {
843		Self::from_signed(<Self::Signed as SignedIndex>::truncate(value))
844	}
845
846	/// Zero extend `self`.
847	#[must_use]
848	#[inline(always)]
849	fn zx(self) -> usize {
850		self.to_signed().zx()
851	}
852
853	/// Convert a reference to a slice of [`Self`] to fixed width types.
854	#[inline(always)]
855	fn canonicalize(slice: &[Self]) -> &[Self::FixedWidth] {
856		bytemuck::cast_slice(slice)
857	}
858
859	/// Convert a mutable reference to a slice of [`Self`] to fixed width types.
860	#[inline(always)]
861	fn canonicalize_mut(slice: &mut [Self]) -> &mut [Self::FixedWidth] {
862		bytemuck::cast_slice_mut(slice)
863	}
864
865	/// Convert a signed value to an unsigned one.
866	#[inline(always)]
867	fn from_signed(value: Self::Signed) -> Self {
868		bytemuck::cast(value)
869	}
870
871	/// Convert an unsigned value to a signed one.
872	#[inline(always)]
873	fn to_signed(self) -> Self::Signed {
874		bytemuck::cast(self)
875	}
876
877	/// Sum values while checking for overflow.
878	#[inline]
879	fn sum_nonnegative(slice: &[Self]) -> Option<Self> {
880		Self::Signed::sum_nonnegative(bytemuck::cast_slice(slice)).map(Self::from_signed)
881	}
882}
883
884impl Index for u32 {
885	type FixedWidth = u32;
886	type Signed = i32;
887}
888impl Index for u64 {
889	type FixedWidth = u64;
890	type Signed = i64;
891}
892
893impl Index for usize {
894	#[cfg(target_pointer_width = "32")]
895	type FixedWidth = u32;
896	#[cfg(target_pointer_width = "64")]
897	type FixedWidth = u64;
898	type Signed = isize;
899}
900
901unsafe impl<T: RealField> Conjugate for T {
902	type Canonical = T;
903	type Conj = T;
904
905	const IS_CANONICAL: bool = true;
906}
907
908unsafe impl<T: RealField> Conjugate for Complex<T> {
909	type Canonical = Complex<T>;
910	type Conj = ComplexConj<T>;
911
912	const IS_CANONICAL: bool = true;
913}
914unsafe impl<T: RealField> Conjugate for ComplexConj<T> {
915	type Canonical = Complex<T>;
916	type Conj = Complex<T>;
917
918	const IS_CANONICAL: bool = false;
919}
920
921pub trait SimdArch: Default {
922	fn dispatch<R>(self, f: impl pulp::WithSimd<Output = R>) -> R;
923}
924
925impl SimdArch for pulp::Arch {
926	#[inline]
927	fn dispatch<R>(self, f: impl pulp::WithSimd<Output = R>) -> R {
928		self.dispatch(f)
929	}
930}
931
932impl SimdArch for pulp::Scalar {
933	#[inline]
934	fn dispatch<R>(self, f: impl pulp::WithSimd<Output = R>) -> R {
935		f.with_simd(self)
936	}
937}
938
939pub trait ComplexField:
940	Debug
941	+ Clone
942	+ Conjugate<Canonical = Self>
943	+ PartialEq
944	+ AddByRef<Output = Self>
945	+ SubByRef<Output = Self>
946	+ MulByRef<Output = Self>
947	+ NegByRef<Output = Self>
948{
949	const IS_REAL: bool;
950	const SIMD_ABS_SPLIT_REAL_IMAG: bool = false;
951
952	type Arch: SimdArch;
953	type Unit: ComplexField;
954
955	type SimdCtx<S: Simd>: Copy;
956	type Index: Index;
957
958	type Real: RealField;
959
960	#[doc(hidden)]
961	const IS_NATIVE_F32: bool = false;
962	#[doc(hidden)]
963	const IS_NATIVE_C32: bool = false;
964	#[doc(hidden)]
965	const IS_NATIVE_F64: bool = false;
966	#[doc(hidden)]
967	const IS_NATIVE_C64: bool = false;
968
969	const SIMD_CAPABILITIES: SimdCapabilities;
970	type SimdMask<S: Simd>: Copy + Debug;
971	type SimdMemMask<S: Simd>: Copy + Debug;
972
973	type SimdVec<S: Simd>: Pod + Debug;
974	type SimdIndex<S: Simd>: Pod + Debug;
975
976	fn zero_impl() -> Self;
977	fn one_impl() -> Self;
978	fn nan_impl() -> Self;
979	fn infinity_impl() -> Self;
980
981	fn from_real_impl(real: &Self::Real) -> Self;
982	fn from_f64_impl(real: f64) -> Self;
983
984	fn real_part_impl(value: &Self) -> Self::Real;
985	fn imag_part_impl(value: &Self) -> Self::Real;
986
987	fn copy_impl(value: &Self) -> Self;
988	fn conj_impl(value: &Self) -> Self;
989	fn recip_impl(value: &Self) -> Self;
990	fn sqrt_impl(value: &Self) -> Self;
991
992	fn abs_impl(value: &Self) -> Self::Real;
993	fn abs1_impl(value: &Self) -> Self::Real;
994	fn abs2_impl(value: &Self) -> Self::Real;
995
996	fn mul_real_impl(lhs: &Self, rhs: &Self::Real) -> Self;
997
998	fn mul_pow2_impl(lhs: &Self, rhs: &Self::Real) -> Self;
999
1000	fn is_finite_impl(value: &Self) -> bool;
1001	fn is_nan_impl(value: &Self) -> bool {
1002		value != value
1003	}
1004
1005	fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S>;
1006	fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S;
1007
1008	fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMask<S>;
1009	fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMemMask<S>;
1010	unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S>;
1011	unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>);
1012
1013	fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S>;
1014	fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S>;
1015
1016	fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1017	fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1018
1019	fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S>;
1020	fn simd_conj<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S>;
1021	fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S>;
1022	fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S>;
1023
1024	fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1025	fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1026
1027	fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1028	fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1029	fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S>;
1030	fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S>;
1031	fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S>;
1032	fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S>;
1033
1034	fn simd_reduce_sum<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self;
1035	fn simd_reduce_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self;
1036	fn simd_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S>;
1037	fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S>;
1038	fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S>;
1039	fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S>;
1040	fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S>;
1041
1042	fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S>;
1043	fn simd_index_select<S: Simd>(
1044		ctx: &Self::SimdCtx<S>,
1045		mask: Self::SimdMask<S>,
1046		lhs: Self::SimdIndex<S>,
1047		rhs: Self::SimdIndex<S>,
1048	) -> Self::SimdIndex<S>;
1049
1050	fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S>;
1051	fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S>;
1052	fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S>;
1053	#[inline(always)]
1054	fn simd_index_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
1055		Self::simd_index_less_than(ctx, rhs, lhs)
1056	}
1057	#[inline(always)]
1058	fn simd_index_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
1059		Self::simd_not_mask(ctx, Self::simd_index_less_than(ctx, rhs, lhs))
1060	}
1061	#[inline(always)]
1062	fn simd_index_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
1063		Self::simd_not_mask(ctx, Self::simd_index_greater_than(ctx, rhs, lhs))
1064	}
1065
1066	fn simd_and_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S>;
1067	fn simd_or_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S>;
1068	fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S>;
1069	fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize;
1070
1071	#[inline(always)]
1072	fn simd_load<S: Simd>(ctx: &Self::SimdCtx<S>, ptr: &Self::SimdVec<S>) -> Self::SimdVec<S> {
1073		let simd = Self::ctx_from_simd(ctx);
1074		if try_const! { Self::Unit::IS_NATIVE_F32 } {
1075			simd.deinterleave_shfl_f32s(*ptr)
1076		} else if try_const! { Self::Unit::IS_NATIVE_F64 } {
1077			simd.deinterleave_shfl_f64s(*ptr)
1078		} else {
1079			panic!();
1080		}
1081	}
1082
1083	#[inline(always)]
1084	fn simd_store<S: Simd>(ctx: &Self::SimdCtx<S>, ptr: &mut Self::SimdVec<S>, value: Self::SimdVec<S>) {
1085		let simd = Self::ctx_from_simd(ctx);
1086		if try_const! { Self::Unit::IS_NATIVE_F32 } {
1087			*ptr = simd.deinterleave_shfl_f32s(value)
1088		} else if try_const! { Self::Unit::IS_NATIVE_F64 } {
1089			*ptr = simd.deinterleave_shfl_f64s(value)
1090		} else {
1091			panic!();
1092		}
1093	}
1094
1095	#[inline(always)]
1096	unsafe fn simd_mask_load<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
1097		let simd = Self::ctx_from_simd(ctx);
1098		let value = Self::simd_mask_load_raw(ctx, mask, ptr);
1099		if try_const! { Self::Unit::IS_NATIVE_F32 } {
1100			simd.deinterleave_shfl_f32s(value)
1101		} else if try_const! { Self::Unit::IS_NATIVE_F64 } {
1102			simd.deinterleave_shfl_f64s(value)
1103		} else {
1104			panic!();
1105		}
1106	}
1107
1108	#[inline(always)]
1109	unsafe fn simd_mask_store<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, value: Self::SimdVec<S>) {
1110		let simd = Self::ctx_from_simd(ctx);
1111		if try_const! { Self::Unit::IS_NATIVE_F32 } {
1112			Self::simd_mask_store_raw(ctx, mask, ptr, simd.deinterleave_shfl_f32s(value))
1113		} else if try_const! { Self::Unit::IS_NATIVE_F64 } {
1114			Self::simd_mask_store_raw(ctx, mask, ptr, simd.deinterleave_shfl_f64s(value))
1115		} else {
1116			panic!();
1117		}
1118	}
1119
1120	#[inline(always)]
1121	fn simd_iota<S: Simd>(ctx: &Self::SimdCtx<S>) -> Self::SimdIndex<S> {
1122		let simd = Self::ctx_from_simd(ctx);
1123		struct Interleave<T>(T);
1124		unsafe impl<T> pulp::Interleave for Interleave<T> {}
1125
1126		unsafe {
1127			if try_const! { Self::Unit::IS_NATIVE_F32 } {
1128				core::mem::transmute_copy::<_, Self::SimdIndex<S>>(&simd.deinterleave_shfl_f32s(Interleave(core::mem::transmute_copy::<
1129					_,
1130					Self::SimdVec<S>,
1131				>(
1132					&pulp::iota_32::<Interleave<Self>>()
1133				))))
1134			} else if try_const! { Self::Unit::IS_NATIVE_F64 } {
1135				core::mem::transmute_copy::<_, Self::SimdIndex<S>>(
1136					&simd.deinterleave_shfl_f64s(core::mem::transmute_copy::<_, Self::SimdVec<S>>(&pulp::iota_64::<Interleave<Self>>())),
1137				)
1138			} else {
1139				panic!();
1140			}
1141		}
1142	}
1143}
1144
1145pub trait RealField:
1146	ComplexField<Real = Self, Conj = Self> + DivByRef<Output = Self> + PartialOrd + num_traits::NumOps + num_traits::Num + core::ops::Neg<Output = Self>
1147{
1148	fn epsilon_impl() -> Self;
1149	fn nbits_impl() -> usize;
1150
1151	fn min_positive_impl() -> Self;
1152	fn max_positive_impl() -> Self;
1153	fn sqrt_min_positive_impl() -> Self;
1154	fn sqrt_max_positive_impl() -> Self;
1155}
1156
1157impl ComplexField for f32 {
1158	type Arch = pulp::Arch;
1159	type Index = u32;
1160	type Real = Self;
1161	type SimdCtx<S: Simd> = S;
1162	type SimdIndex<S: Simd> = S::u32s;
1163	type SimdMask<S: Simd> = S::m32s;
1164	type SimdMemMask<S: Simd> = pulp::MemMask<S::m32s>;
1165	type SimdVec<S: Simd> = S::f32s;
1166	type Unit = Self;
1167
1168	const IS_NATIVE_F32: bool = true;
1169	const IS_REAL: bool = true;
1170	const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Simd;
1171
1172	#[inline(always)]
1173	fn zero_impl() -> Self {
1174		0.0
1175	}
1176
1177	#[inline(always)]
1178	fn one_impl() -> Self {
1179		1.0
1180	}
1181
1182	#[inline(always)]
1183	fn nan_impl() -> Self {
1184		Self::NAN
1185	}
1186
1187	#[inline(always)]
1188	fn infinity_impl() -> Self {
1189		Self::INFINITY
1190	}
1191
1192	#[inline(always)]
1193	fn from_real_impl(value: &Self) -> Self {
1194		*value
1195	}
1196
1197	#[inline(always)]
1198	fn from_f64_impl(value: f64) -> Self {
1199		value as _
1200	}
1201
1202	#[inline(always)]
1203	fn real_part_impl(value: &Self) -> Self {
1204		*value
1205	}
1206
1207	#[inline(always)]
1208	fn imag_part_impl(_: &Self) -> Self {
1209		0.0
1210	}
1211
1212	#[inline(always)]
1213	fn copy_impl(value: &Self) -> Self {
1214		*value
1215	}
1216
1217	#[inline(always)]
1218	fn conj_impl(value: &Self) -> Self {
1219		*value
1220	}
1221
1222	#[inline(always)]
1223	fn recip_impl(value: &Self) -> Self {
1224		1.0 / *value
1225	}
1226
1227	#[inline(always)]
1228	fn sqrt_impl(value: &Self) -> Self {
1229		libm::sqrtf(*value)
1230	}
1231
1232	#[inline(always)]
1233	fn abs_impl(value: &Self) -> Self {
1234		(*value).abs()
1235	}
1236
1237	#[inline(always)]
1238	fn abs1_impl(value: &Self) -> Self {
1239		(*value).abs()
1240	}
1241
1242	#[inline(always)]
1243	fn abs2_impl(value: &Self) -> Self {
1244		(*value) * (*value)
1245	}
1246
1247	#[inline(always)]
1248	fn mul_real_impl(lhs: &Self, rhs: &Self) -> Self {
1249		(*lhs) * (*rhs)
1250	}
1251
1252	#[inline(always)]
1253	fn mul_pow2_impl(lhs: &Self, rhs: &Self) -> Self {
1254		(*lhs) * (*rhs)
1255	}
1256
1257	#[inline(always)]
1258	fn is_finite_impl(value: &Self) -> bool {
1259		(*value).is_finite()
1260	}
1261
1262	#[inline(always)]
1263	fn is_nan_impl(value: &Self) -> bool {
1264		(*value).is_nan()
1265	}
1266
1267	#[inline(always)]
1268	fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
1269		simd
1270	}
1271
1272	#[inline(always)]
1273	fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S> {
1274		ctx.splat_f32s(*value)
1275	}
1276
1277	#[inline(always)]
1278	fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S> {
1279		ctx.splat_f32s(*value)
1280	}
1281
1282	#[inline(always)]
1283	fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1284		ctx.add_f32s(lhs, rhs)
1285	}
1286
1287	#[inline(always)]
1288	fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1289		ctx.sub_f32s(lhs, rhs)
1290	}
1291
1292	#[inline(always)]
1293	fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1294		ctx.neg_f32s(value)
1295	}
1296
1297	#[inline(always)]
1298	fn simd_conj<S: Simd>(_: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1299		value
1300	}
1301
1302	#[inline(always)]
1303	fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1304		ctx.abs_f32s(value)
1305	}
1306
1307	#[inline(always)]
1308	fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1309		ctx.mul_f32s(lhs, rhs)
1310	}
1311
1312	#[inline(always)]
1313	fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1314		ctx.mul_f32s(lhs, real_rhs)
1315	}
1316
1317	#[inline(always)]
1318	fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1319		ctx.mul_f32s(lhs, real_rhs)
1320	}
1321
1322	#[inline(always)]
1323	fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1324		ctx.mul_f32s(lhs, rhs)
1325	}
1326
1327	#[inline(always)]
1328	fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
1329		ctx.mul_add_e_f32s(lhs, rhs, acc)
1330	}
1331
1332	#[inline(always)]
1333	fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
1334		ctx.mul_add_e_f32s(lhs, rhs, acc)
1335	}
1336
1337	#[inline(always)]
1338	fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1339		ctx.mul_f32s(value, value)
1340	}
1341
1342	#[inline(always)]
1343	fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
1344		ctx.mul_add_e_f32s(value, value, acc)
1345	}
1346
1347	#[inline(always)]
1348	fn simd_reduce_sum<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
1349		ctx.reduce_sum_f32s(value)
1350	}
1351
1352	#[inline(always)]
1353	fn simd_reduce_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
1354		ctx.reduce_max_f32s(value)
1355	}
1356
1357	#[inline(always)]
1358	fn simd_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1359		ctx.equal_f32s(real_lhs, real_rhs)
1360	}
1361
1362	#[inline(always)]
1363	fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1364		ctx.less_than_f32s(real_lhs, real_rhs)
1365	}
1366
1367	#[inline(always)]
1368	fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1369		ctx.greater_than_f32s(real_lhs, real_rhs)
1370	}
1371
1372	#[inline(always)]
1373	fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1374		ctx.less_than_or_equal_f32s(real_lhs, real_rhs)
1375	}
1376
1377	#[inline(always)]
1378	fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1379		ctx.greater_than_or_equal_f32s(real_lhs, real_rhs)
1380	}
1381
1382	#[inline(always)]
1383	fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1384		ctx.select_f32s_m32s(mask, lhs, rhs)
1385	}
1386
1387	#[inline(always)]
1388	fn simd_index_select<S: Simd>(
1389		ctx: &Self::SimdCtx<S>,
1390		mask: Self::SimdMask<S>,
1391		lhs: Self::SimdIndex<S>,
1392		rhs: Self::SimdIndex<S>,
1393	) -> Self::SimdIndex<S> {
1394		ctx.select_u32s_m32s(mask, lhs, rhs)
1395	}
1396
1397	#[inline(always)]
1398	fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S> {
1399		ctx.splat_u32s(value)
1400	}
1401
1402	#[inline(always)]
1403	fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
1404		ctx.add_u32s(lhs, rhs)
1405	}
1406
1407	#[inline(always)]
1408	fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
1409		ctx.less_than_u32s(lhs, rhs)
1410	}
1411
1412	#[inline(always)]
1413	fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1414		ctx.abs_f32s(value)
1415	}
1416
1417	#[inline(always)]
1418	fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
1419		*ctx
1420	}
1421
1422	#[inline(always)]
1423	fn simd_and_mask<S: Simd>(simd: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
1424		simd.and_m32s(lhs, rhs)
1425	}
1426
1427	#[inline(always)]
1428	fn simd_or_mask<S: Simd>(simd: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
1429		simd.or_m32s(lhs, rhs)
1430	}
1431
1432	#[inline(always)]
1433	fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S> {
1434		ctx.not_m32s(mask)
1435	}
1436
1437	#[inline(always)]
1438	fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize {
1439		ctx.first_true_m32s(value)
1440	}
1441
1442	#[inline(always)]
1443	fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: u32, end: u32) -> Self::SimdMemMask<S> {
1444		ctx.mask_between_m32s(start, end)
1445	}
1446
1447	#[inline(always)]
1448	fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: u32, end: u32) -> Self::SimdMask<S> {
1449		ctx.mask_between_m32s(start, end).mask()
1450	}
1451
1452	#[inline(always)]
1453	unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
1454		ctx.mask_load_ptr_f32s(mask, ptr as _)
1455	}
1456
1457	#[inline(always)]
1458	unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>) {
1459		ctx.mask_store_ptr_f32s(mask, ptr as _, values);
1460	}
1461}
1462
1463impl RealField for f32 {
1464	#[inline(always)]
1465	fn epsilon_impl() -> Self {
1466		Self::EPSILON
1467	}
1468
1469	#[inline(always)]
1470	fn min_positive_impl() -> Self {
1471		Self::MIN_POSITIVE
1472	}
1473
1474	#[inline(always)]
1475	fn max_positive_impl() -> Self {
1476		Self::MIN_POSITIVE.recip()
1477	}
1478
1479	#[inline(always)]
1480	fn sqrt_min_positive_impl() -> Self {
1481		libm::sqrtf(Self::MIN_POSITIVE)
1482	}
1483
1484	#[inline(always)]
1485	fn sqrt_max_positive_impl() -> Self {
1486		libm::sqrtf(Self::MIN_POSITIVE.recip())
1487	}
1488
1489	#[inline(always)]
1490	fn nbits_impl() -> usize {
1491		Self::MANTISSA_DIGITS as usize
1492	}
1493}
1494
1495impl ComplexField for f64 {
1496	type Arch = pulp::Arch;
1497	type Index = u64;
1498	type Real = Self;
1499	type SimdCtx<S: Simd> = S;
1500	type SimdIndex<S: Simd> = S::u64s;
1501	type SimdMask<S: Simd> = S::m64s;
1502	type SimdMemMask<S: Simd> = pulp::MemMask<S::m64s>;
1503	type SimdVec<S: Simd> = S::f64s;
1504	type Unit = Self;
1505
1506	const IS_NATIVE_F64: bool = true;
1507	const IS_REAL: bool = true;
1508	const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Simd;
1509
1510	#[inline(always)]
1511	fn zero_impl() -> Self {
1512		0.0
1513	}
1514
1515	#[inline(always)]
1516	fn one_impl() -> Self {
1517		1.0
1518	}
1519
1520	#[inline(always)]
1521	fn nan_impl() -> Self {
1522		Self::NAN
1523	}
1524
1525	#[inline(always)]
1526	fn infinity_impl() -> Self {
1527		Self::INFINITY
1528	}
1529
1530	#[inline(always)]
1531	fn from_real_impl(value: &Self) -> Self {
1532		*value
1533	}
1534
1535	#[inline(always)]
1536	fn from_f64_impl(value: f64) -> Self {
1537		value as _
1538	}
1539
1540	#[inline(always)]
1541	fn real_part_impl(value: &Self) -> Self {
1542		*value
1543	}
1544
1545	#[inline(always)]
1546	fn imag_part_impl(_: &Self) -> Self {
1547		0.0
1548	}
1549
1550	#[inline(always)]
1551	fn copy_impl(value: &Self) -> Self {
1552		*value
1553	}
1554
1555	#[inline(always)]
1556	fn conj_impl(value: &Self) -> Self {
1557		*value
1558	}
1559
1560	#[inline(always)]
1561	fn recip_impl(value: &Self) -> Self {
1562		1.0 / *value
1563	}
1564
1565	#[inline(always)]
1566	fn sqrt_impl(value: &Self) -> Self {
1567		libm::sqrt(*value)
1568	}
1569
1570	#[inline(always)]
1571	fn abs_impl(value: &Self) -> Self {
1572		(*value).abs()
1573	}
1574
1575	#[inline(always)]
1576	fn abs1_impl(value: &Self) -> Self {
1577		(*value).abs()
1578	}
1579
1580	#[inline(always)]
1581	fn abs2_impl(value: &Self) -> Self {
1582		(*value) * (*value)
1583	}
1584
1585	#[inline(always)]
1586	fn mul_real_impl(lhs: &Self, rhs: &Self) -> Self {
1587		(*lhs) * (*rhs)
1588	}
1589
1590	#[inline(always)]
1591	fn mul_pow2_impl(lhs: &Self, rhs: &Self) -> Self {
1592		(*lhs) * (*rhs)
1593	}
1594
1595	#[inline(always)]
1596	fn is_nan_impl(value: &Self) -> bool {
1597		(*value).is_nan()
1598	}
1599
1600	#[inline(always)]
1601	fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
1602		simd
1603	}
1604
1605	#[inline(always)]
1606	fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S> {
1607		ctx.splat_f64s(*value)
1608	}
1609
1610	#[inline(always)]
1611	fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S> {
1612		ctx.splat_f64s(*value)
1613	}
1614
1615	#[inline(always)]
1616	fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1617		ctx.add_f64s(lhs, rhs)
1618	}
1619
1620	#[inline(always)]
1621	fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1622		ctx.sub_f64s(lhs, rhs)
1623	}
1624
1625	#[inline(always)]
1626	fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1627		ctx.neg_f64s(value)
1628	}
1629
1630	#[inline(always)]
1631	fn simd_conj<S: Simd>(_: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1632		value
1633	}
1634
1635	#[inline(always)]
1636	fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1637		ctx.abs_f64s(value)
1638	}
1639
1640	#[inline(always)]
1641	fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1642		ctx.mul_f64s(lhs, rhs)
1643	}
1644
1645	#[inline(always)]
1646	fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1647		ctx.mul_f64s(lhs, real_rhs)
1648	}
1649
1650	#[inline(always)]
1651	fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1652		ctx.mul_f64s(lhs, real_rhs)
1653	}
1654
1655	#[inline(always)]
1656	fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1657		ctx.mul_f64s(lhs, rhs)
1658	}
1659
1660	#[inline(always)]
1661	fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
1662		ctx.mul_add_e_f64s(lhs, rhs, acc)
1663	}
1664
1665	#[inline(always)]
1666	fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
1667		ctx.mul_add_e_f64s(lhs, rhs, acc)
1668	}
1669
1670	#[inline(always)]
1671	fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1672		ctx.mul_f64s(value, value)
1673	}
1674
1675	#[inline(always)]
1676	fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
1677		ctx.mul_add_e_f64s(value, value, acc)
1678	}
1679
1680	#[inline(always)]
1681	fn simd_reduce_sum<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
1682		ctx.reduce_sum_f64s(value)
1683	}
1684
1685	#[inline(always)]
1686	fn simd_reduce_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
1687		ctx.reduce_max_f64s(value)
1688	}
1689
1690	#[inline(always)]
1691	fn simd_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1692		ctx.equal_f64s(real_lhs, real_rhs)
1693	}
1694
1695	#[inline(always)]
1696	fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1697		ctx.less_than_f64s(real_lhs, real_rhs)
1698	}
1699
1700	#[inline(always)]
1701	fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1702		ctx.greater_than_f64s(real_lhs, real_rhs)
1703	}
1704
1705	#[inline(always)]
1706	fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1707		ctx.less_than_or_equal_f64s(real_lhs, real_rhs)
1708	}
1709
1710	#[inline(always)]
1711	fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
1712		ctx.greater_than_or_equal_f64s(real_lhs, real_rhs)
1713	}
1714
1715	#[inline(always)]
1716	fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
1717		ctx.select_f64s_m64s(mask, lhs, rhs)
1718	}
1719
1720	#[inline(always)]
1721	fn simd_index_select<S: Simd>(
1722		ctx: &Self::SimdCtx<S>,
1723		mask: Self::SimdMask<S>,
1724		lhs: Self::SimdIndex<S>,
1725		rhs: Self::SimdIndex<S>,
1726	) -> Self::SimdIndex<S> {
1727		ctx.select_u64s_m64s(mask, lhs, rhs)
1728	}
1729
1730	#[inline(always)]
1731	fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S> {
1732		ctx.splat_u64s(value)
1733	}
1734
1735	#[inline(always)]
1736	fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
1737		ctx.add_u64s(lhs, rhs)
1738	}
1739
1740	#[inline(always)]
1741	fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
1742		ctx.less_than_u64s(lhs, rhs)
1743	}
1744
1745	#[inline(always)]
1746	fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
1747		ctx.abs_f64s(value)
1748	}
1749
1750	#[inline(always)]
1751	fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
1752		*ctx
1753	}
1754
1755	#[inline(always)]
1756	fn simd_and_mask<S: Simd>(simd: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
1757		simd.and_m64s(lhs, rhs)
1758	}
1759
1760	#[inline(always)]
1761	fn simd_or_mask<S: Simd>(simd: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
1762		simd.or_m64s(lhs, rhs)
1763	}
1764
1765	#[inline(always)]
1766	fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S> {
1767		ctx.not_m64s(mask)
1768	}
1769
1770	#[inline(always)]
1771	fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize {
1772		ctx.first_true_m64s(value)
1773	}
1774
1775	#[inline(always)]
1776	fn is_finite_impl(value: &Self) -> bool {
1777		(*value).is_finite()
1778	}
1779
1780	#[inline(always)]
1781	fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: u64, end: u64) -> Self::SimdMemMask<S> {
1782		ctx.mask_between_m64s(start, end)
1783	}
1784
1785	#[inline(always)]
1786	fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: u64, end: u64) -> Self::SimdMask<S> {
1787		ctx.mask_between_m64s(start, end).mask()
1788	}
1789
1790	#[inline(always)]
1791	unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
1792		ctx.mask_load_ptr_f64s(mask, ptr as _)
1793	}
1794
1795	#[inline(always)]
1796	unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>) {
1797		ctx.mask_store_ptr_f64s(mask, ptr as _, values);
1798	}
1799}
1800
1801impl RealField for f64 {
1802	#[inline(always)]
1803	fn epsilon_impl() -> Self {
1804		Self::EPSILON
1805	}
1806
1807	#[inline(always)]
1808	fn min_positive_impl() -> Self {
1809		Self::MIN_POSITIVE
1810	}
1811
1812	#[inline(always)]
1813	fn max_positive_impl() -> Self {
1814		Self::MIN_POSITIVE.recip()
1815	}
1816
1817	#[inline(always)]
1818	fn sqrt_min_positive_impl() -> Self {
1819		libm::sqrt(Self::MIN_POSITIVE)
1820	}
1821
1822	#[inline(always)]
1823	fn sqrt_max_positive_impl() -> Self {
1824		libm::sqrt(Self::MIN_POSITIVE.recip())
1825	}
1826
1827	#[inline(always)]
1828	fn nbits_impl() -> usize {
1829		Self::MANTISSA_DIGITS as usize
1830	}
1831}
1832
1833impl<T: RealField> ComplexField for Complex<T> {
1834	type Arch = T::Arch;
1835	type Index = T::Index;
1836	type Real = T;
1837	type SimdCtx<S: Simd> = T::SimdCtx<S>;
1838	type SimdIndex<S: Simd> = T::SimdIndex<S>;
1839	type SimdMask<S: Simd> = T::SimdMask<S>;
1840	type SimdMemMask<S: Simd> = Complex<T::SimdMemMask<S>>;
1841	type SimdVec<S: Simd> = Complex<T::SimdVec<S>>;
1842	type Unit = T::Unit;
1843
1844	const IS_NATIVE_C32: bool = T::IS_NATIVE_F32;
1845	const IS_NATIVE_C64: bool = T::IS_NATIVE_F64;
1846	const IS_REAL: bool = false;
1847	const SIMD_CAPABILITIES: SimdCapabilities = T::SIMD_CAPABILITIES;
1848
1849	#[inline]
1850	fn zero_impl() -> Self {
1851		Complex {
1852			re: T::zero_impl(),
1853			im: T::zero_impl(),
1854		}
1855	}
1856
1857	#[inline]
1858	fn one_impl() -> Self {
1859		Complex {
1860			re: T::one_impl(),
1861			im: T::zero_impl(),
1862		}
1863	}
1864
1865	#[inline]
1866	fn nan_impl() -> Self {
1867		Complex {
1868			re: T::nan_impl(),
1869			im: T::nan_impl(),
1870		}
1871	}
1872
1873	#[inline]
1874	fn infinity_impl() -> Self {
1875		Complex {
1876			re: T::infinity_impl(),
1877			im: T::infinity_impl(),
1878		}
1879	}
1880
1881	#[inline]
1882	fn from_real_impl(real: &Self::Real) -> Self {
1883		Complex {
1884			re: real.clone(),
1885			im: T::zero_impl(),
1886		}
1887	}
1888
1889	#[inline]
1890	fn from_f64_impl(real: f64) -> Self {
1891		Complex {
1892			re: T::from_f64_impl(real),
1893			im: T::zero_impl(),
1894		}
1895	}
1896
1897	#[inline]
1898	fn real_part_impl(value: &Self) -> Self::Real {
1899		value.re.clone()
1900	}
1901
1902	#[inline]
1903	fn imag_part_impl(value: &Self) -> Self::Real {
1904		value.im.clone()
1905	}
1906
1907	#[inline]
1908	fn copy_impl(value: &Self) -> Self {
1909		value.clone()
1910	}
1911
1912	#[inline]
1913	fn conj_impl(value: &Self) -> Self {
1914		Self {
1915			re: value.re.clone(),
1916			im: value.im.neg_by_ref(),
1917		}
1918	}
1919
1920	#[inline]
1921	fn recip_impl(value: &Self) -> Self {
1922		let (re, im) = recip_impl(value.re.clone(), value.im.clone());
1923		Complex { re, im }
1924	}
1925
1926	#[inline]
1927	fn sqrt_impl(value: &Self) -> Self {
1928		let (re, im) = sqrt_impl(value.re.clone(), value.im.clone());
1929		Complex { re, im }
1930	}
1931
1932	#[inline]
1933	fn abs_impl(value: &Self) -> Self::Real {
1934		abs_impl(value.re.clone(), value.im.clone())
1935	}
1936
1937	#[inline]
1938	#[faer_macros::math]
1939	fn abs1_impl(value: &Self) -> Self::Real {
1940		abs1(value.re) + abs1(value.im)
1941	}
1942
1943	#[inline]
1944	#[faer_macros::math]
1945	fn abs2_impl(value: &Self) -> Self::Real {
1946		abs2(value.re) + abs2(value.im)
1947	}
1948
1949	#[inline]
1950	#[faer_macros::math]
1951	fn mul_real_impl(lhs: &Self, rhs: &Self::Real) -> Self {
1952		Complex {
1953			re: lhs.re * rhs,
1954			im: lhs.im * rhs,
1955		}
1956	}
1957
1958	#[inline]
1959	#[faer_macros::math]
1960	fn mul_pow2_impl(lhs: &Self, rhs: &Self::Real) -> Self {
1961		Complex {
1962			re: mul_pow2(lhs.re, rhs),
1963			im: mul_pow2(lhs.im, rhs),
1964		}
1965	}
1966
1967	#[inline]
1968	#[faer_macros::math]
1969	fn is_finite_impl(value: &Self) -> bool {
1970		is_finite(value.re) && is_finite(value.im)
1971	}
1972
1973	#[inline(always)]
1974	fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
1975		T::simd_ctx(simd)
1976	}
1977
1978	#[inline(always)]
1979	fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
1980		T::ctx_from_simd(ctx)
1981	}
1982
1983	#[inline(always)]
1984	fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S> {
1985		Complex {
1986			re: T::simd_splat(ctx, &value.re),
1987			im: T::simd_splat(ctx, &value.im),
1988		}
1989	}
1990
1991	#[inline(always)]
1992	fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S> {
1993		Complex {
1994			re: T::simd_splat_real(ctx, value),
1995			im: T::simd_splat_real(ctx, value),
1996		}
1997	}
1998
1999	#[inline(always)]
2000	fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2001		Complex {
2002			re: T::simd_add(ctx, lhs.re, rhs.re),
2003			im: T::simd_add(ctx, lhs.im, rhs.im),
2004		}
2005	}
2006
2007	#[inline(always)]
2008	fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2009		Complex {
2010			re: T::simd_sub(ctx, lhs.re, rhs.re),
2011			im: T::simd_sub(ctx, lhs.im, rhs.im),
2012		}
2013	}
2014
2015	#[inline(always)]
2016	fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2017		Complex {
2018			re: T::simd_neg(ctx, value.re),
2019			im: T::simd_neg(ctx, value.im),
2020		}
2021	}
2022
2023	#[inline(always)]
2024	fn simd_conj<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2025		Complex {
2026			re: value.re,
2027			im: T::simd_neg(ctx, value.im),
2028		}
2029	}
2030
2031	#[inline(always)]
2032	fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2033		let v = T::simd_add(ctx, T::simd_abs1(ctx, value.re), T::simd_abs1(ctx, value.im));
2034		Complex { re: v, im: v }
2035	}
2036
2037	#[inline(always)]
2038	fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2039		let re = T::simd_abs_max(ctx, value.re);
2040		let im = T::simd_abs_max(ctx, value.im);
2041
2042		let v = T::simd_select(ctx, T::simd_greater_than(ctx, re, im), re, im);
2043		Complex { re: v, im: v }
2044	}
2045
2046	#[inline(always)]
2047	fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2048		Complex {
2049			re: T::simd_mul_real(ctx, lhs.re, real_rhs.re),
2050			im: T::simd_mul_real(ctx, lhs.im, real_rhs.re),
2051		}
2052	}
2053
2054	#[inline(always)]
2055	fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2056		Complex {
2057			re: T::simd_mul_pow2(ctx, lhs.re, real_rhs.re),
2058			im: T::simd_mul_pow2(ctx, lhs.im, real_rhs.re),
2059		}
2060	}
2061
2062	#[inline(always)]
2063	fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2064		Complex {
2065			re: T::simd_mul_add(ctx, lhs.re, rhs.re, T::simd_neg(ctx, T::simd_mul(ctx, lhs.im, rhs.im))),
2066			im: T::simd_mul_add(ctx, lhs.re, rhs.im, T::simd_mul(ctx, lhs.im, rhs.re)),
2067		}
2068	}
2069
2070	#[inline(always)]
2071	fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2072		Complex {
2073			re: T::simd_mul_add(ctx, lhs.re, rhs.re, T::simd_mul(ctx, lhs.im, rhs.im)),
2074			im: T::simd_mul_add(ctx, lhs.re, rhs.im, T::simd_neg(ctx, T::simd_mul(ctx, lhs.im, rhs.re))),
2075		}
2076	}
2077
2078	#[inline(always)]
2079	fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
2080		Complex {
2081			re: T::simd_mul_add(ctx, T::simd_neg(ctx, lhs.im), rhs.im, T::simd_mul_add(ctx, lhs.re, rhs.re, acc.re)),
2082			im: T::simd_mul_add(ctx, lhs.re, rhs.im, T::simd_mul_add(ctx, lhs.im, rhs.re, acc.im)),
2083		}
2084	}
2085
2086	#[inline(always)]
2087	fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
2088		Complex {
2089			re: T::simd_mul_add(ctx, lhs.im, rhs.im, T::simd_mul_add(ctx, lhs.re, rhs.re, acc.re)),
2090			im: T::simd_mul_add(ctx, lhs.re, rhs.im, T::simd_mul_add(ctx, T::simd_neg(ctx, lhs.im), rhs.re, acc.im)),
2091		}
2092	}
2093
2094	#[inline(always)]
2095	fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2096		let v = T::simd_abs2_add(ctx, value.re, T::simd_abs2(ctx, value.im));
2097		Complex { re: v, im: v }
2098	}
2099
2100	#[inline(always)]
2101	fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
2102		let v = T::simd_abs2_add(ctx, value.re, T::simd_abs2_add(ctx, value.im, acc.re));
2103		Complex { re: v, im: v }
2104	}
2105
2106	#[inline(always)]
2107	fn simd_reduce_sum<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
2108		Complex {
2109			re: T::simd_reduce_sum(ctx, value.re),
2110			im: T::simd_reduce_sum(ctx, value.im),
2111		}
2112	}
2113
2114	#[inline(always)]
2115	fn simd_reduce_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
2116		Complex {
2117			re: T::simd_reduce_max(ctx, value.re),
2118			im: T::simd_reduce_max(ctx, value.im),
2119		}
2120	}
2121
2122	#[inline(always)]
2123	fn simd_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2124		T::simd_and_mask(
2125			ctx,
2126			T::simd_equal(ctx, real_lhs.re, real_rhs.re),
2127			T::simd_equal(ctx, real_lhs.im, real_rhs.im),
2128		)
2129	}
2130
2131	#[inline(always)]
2132	fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2133		T::simd_less_than(ctx, real_lhs.re, real_rhs.re)
2134	}
2135
2136	#[inline(always)]
2137	fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2138		T::simd_less_than_or_equal(ctx, real_lhs.re, real_rhs.re)
2139	}
2140
2141	#[inline(always)]
2142	fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2143		T::simd_greater_than(ctx, real_lhs.re, real_rhs.re)
2144	}
2145
2146	#[inline(always)]
2147	fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2148		T::simd_greater_than_or_equal(ctx, real_lhs.re, real_rhs.re)
2149	}
2150
2151	#[inline(always)]
2152	fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2153		Complex {
2154			re: T::simd_select(ctx, mask, lhs.re, rhs.re),
2155			im: T::simd_select(ctx, mask, lhs.im, rhs.im),
2156		}
2157	}
2158
2159	#[inline(always)]
2160	fn simd_index_select<S: Simd>(
2161		ctx: &Self::SimdCtx<S>,
2162		mask: Self::SimdMask<S>,
2163		lhs: Self::SimdIndex<S>,
2164		rhs: Self::SimdIndex<S>,
2165	) -> Self::SimdIndex<S> {
2166		T::simd_index_select(ctx, mask, lhs, rhs)
2167	}
2168
2169	#[inline(always)]
2170	fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S> {
2171		T::simd_index_splat(ctx, value)
2172	}
2173
2174	#[inline(always)]
2175	fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
2176		T::simd_index_add(ctx, lhs, rhs)
2177	}
2178
2179	#[inline(always)]
2180	fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
2181		T::simd_index_less_than(ctx, lhs, rhs)
2182	}
2183
2184	#[inline(always)]
2185	fn simd_and_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
2186		T::simd_and_mask(ctx, lhs, rhs)
2187	}
2188
2189	#[inline(always)]
2190	fn simd_or_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
2191		T::simd_or_mask(ctx, lhs, rhs)
2192	}
2193
2194	#[inline(always)]
2195	fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S> {
2196		T::simd_not_mask(ctx, mask)
2197	}
2198
2199	#[inline(always)]
2200	fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize {
2201		T::simd_first_true_mask(ctx, value)
2202	}
2203
2204	#[inline(always)]
2205	fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMemMask<S> {
2206		let n = core::mem::size_of::<Self::SimdVec<S>>() / core::mem::size_of::<Self>();
2207		let start = start.zx() * 2;
2208		let end = end.zx() * 2;
2209
2210		let re = T::simd_mem_mask_between(ctx, Self::Index::truncate(start.min(n)), Self::Index::truncate(end.min(n)));
2211		let im = T::simd_mem_mask_between(ctx, Self::Index::truncate(start.max(n) - n), Self::Index::truncate(end.max(n) - n));
2212		Complex { re, im }
2213	}
2214
2215	#[inline(always)]
2216	fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: Self::Index, end: Self::Index) -> Self::SimdMask<S> {
2217		T::simd_mask_between(ctx, start, end)
2218	}
2219
2220	#[inline(always)]
2221	unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
2222		Complex {
2223			re: T::simd_mask_load_raw(ctx, mask.re, core::ptr::addr_of!((*ptr).re)),
2224			im: T::simd_mask_load_raw(ctx, mask.im, core::ptr::addr_of!((*ptr).im)),
2225		}
2226	}
2227
2228	#[inline(always)]
2229	unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>) {
2230		T::simd_mask_store_raw(ctx, mask.re, core::ptr::addr_of_mut!((*ptr).re), values.re);
2231		T::simd_mask_store_raw(ctx, mask.im, core::ptr::addr_of_mut!((*ptr).im), values.im);
2232	}
2233}
2234
2235#[repr(transparent)]
2236#[doc(hidden)]
2237#[derive(Copy, Clone, Debug, PartialEq)]
2238pub struct ComplexImpl<T>(Complex<T>);
2239
2240#[repr(transparent)]
2241#[doc(hidden)]
2242#[derive(Copy, Clone, Debug, PartialEq)]
2243pub struct ComplexImplConj<T>(Complex<T>);
2244
2245unsafe impl Conjugate for ComplexImpl<f32> {
2246	type Canonical = ComplexImpl<f32>;
2247	type Conj = ComplexImplConj<f32>;
2248
2249	const IS_CANONICAL: bool = true;
2250}
2251unsafe impl Conjugate for ComplexImplConj<f32> {
2252	type Canonical = ComplexImpl<f32>;
2253	type Conj = ComplexImpl<f32>;
2254
2255	const IS_CANONICAL: bool = false;
2256}
2257unsafe impl Conjugate for ComplexImpl<f64> {
2258	type Canonical = ComplexImpl<f64>;
2259	type Conj = ComplexImplConj<f64>;
2260
2261	const IS_CANONICAL: bool = true;
2262}
2263unsafe impl Conjugate for ComplexImplConj<f64> {
2264	type Canonical = ComplexImpl<f64>;
2265	type Conj = ComplexImpl<f64>;
2266
2267	const IS_CANONICAL: bool = false;
2268}
2269
2270impl<T: RealField> core::ops::Neg for &ComplexImpl<T> {
2271	type Output = ComplexImpl<T>;
2272
2273	#[inline]
2274	fn neg(self) -> Self::Output {
2275		use math_utils::*;
2276
2277		ComplexImpl(neg(&self.0))
2278	}
2279}
2280impl<T: RealField> core::ops::Add<&ComplexImpl<T>> for &ComplexImpl<T> {
2281	type Output = ComplexImpl<T>;
2282
2283	#[inline]
2284	fn add(self, rhs: &ComplexImpl<T>) -> Self::Output {
2285		use math_utils::*;
2286
2287		ComplexImpl(add(&self.0, &rhs.0))
2288	}
2289}
2290impl<T: RealField> core::ops::Sub<&ComplexImpl<T>> for &ComplexImpl<T> {
2291	type Output = ComplexImpl<T>;
2292
2293	#[inline]
2294	fn sub(self, rhs: &ComplexImpl<T>) -> Self::Output {
2295		use math_utils::*;
2296
2297		ComplexImpl(sub(&self.0, &rhs.0))
2298	}
2299}
2300impl<T: RealField> core::ops::Mul<&ComplexImpl<T>> for &ComplexImpl<T> {
2301	type Output = ComplexImpl<T>;
2302
2303	#[inline]
2304	fn mul(self, rhs: &ComplexImpl<T>) -> Self::Output {
2305		use math_utils::*;
2306
2307		ComplexImpl(mul(&self.0, &rhs.0))
2308	}
2309}
2310
2311impl<T> From<Complex<T>> for ComplexImpl<T> {
2312	#[inline]
2313	fn from(value: Complex<T>) -> Self {
2314		Self(value)
2315	}
2316}
2317
2318impl ComplexField for ComplexImpl<f32> {
2319	type Arch = pulp::Arch;
2320	type Index = u32;
2321	type Real = f32;
2322	type SimdCtx<S: Simd> = S;
2323	type SimdIndex<S: Simd> = S::u32s;
2324	type SimdMask<S: Simd> = S::m32s;
2325	type SimdMemMask<S: Simd> = pulp::MemMask<S::m32s>;
2326	type SimdVec<S: Simd> = S::c32s;
2327	type Unit = f32;
2328
2329	const IS_NATIVE_C32: bool = true;
2330	const IS_REAL: bool = false;
2331	const SIMD_ABS_SPLIT_REAL_IMAG: bool = true;
2332	const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Simd;
2333
2334	#[inline]
2335	fn zero_impl() -> Self {
2336		Complex {
2337			re: f32::zero_impl(),
2338			im: f32::zero_impl(),
2339		}
2340		.into()
2341	}
2342
2343	#[inline]
2344	fn one_impl() -> Self {
2345		Complex {
2346			re: f32::one_impl(),
2347			im: f32::zero_impl(),
2348		}
2349		.into()
2350	}
2351
2352	#[inline]
2353	fn nan_impl() -> Self {
2354		Complex {
2355			re: f32::nan_impl(),
2356			im: f32::nan_impl(),
2357		}
2358		.into()
2359	}
2360
2361	#[inline]
2362	fn infinity_impl() -> Self {
2363		Complex {
2364			re: f32::infinity_impl(),
2365			im: f32::infinity_impl(),
2366		}
2367		.into()
2368	}
2369
2370	#[inline]
2371	fn from_real_impl(real: &Self::Real) -> Self {
2372		Complex {
2373			re: real.clone(),
2374			im: f32::zero_impl(),
2375		}
2376		.into()
2377	}
2378
2379	#[inline]
2380	fn from_f64_impl(real: f64) -> Self {
2381		Complex {
2382			re: f32::from_f64_impl(real),
2383			im: f32::zero_impl(),
2384		}
2385		.into()
2386	}
2387
2388	#[inline]
2389	fn real_part_impl(value: &Self) -> Self::Real {
2390		value.0.re.clone()
2391	}
2392
2393	#[inline]
2394	fn imag_part_impl(value: &Self) -> Self::Real {
2395		value.0.im.clone()
2396	}
2397
2398	#[inline]
2399	fn copy_impl(value: &Self) -> Self {
2400		value.clone()
2401	}
2402
2403	#[inline]
2404	fn conj_impl(value: &Self) -> Self {
2405		Complex {
2406			re: value.0.re.clone(),
2407			im: value.0.im.neg_by_ref(),
2408		}
2409		.into()
2410	}
2411
2412	#[inline]
2413	fn recip_impl(value: &Self) -> Self {
2414		let (re, im) = recip_impl(value.0.re.clone(), value.0.im.clone());
2415		Complex { re, im }.into()
2416	}
2417
2418	#[inline]
2419	fn sqrt_impl(value: &Self) -> Self {
2420		let (re, im) = sqrt_impl(value.0.re.clone(), value.0.im.clone());
2421		Complex { re, im }.into()
2422	}
2423
2424	#[inline]
2425	fn abs_impl(value: &Self) -> Self::Real {
2426		abs_impl(value.0.re.clone(), value.0.im.clone())
2427	}
2428
2429	#[inline]
2430	#[faer_macros::math]
2431	fn abs1_impl(value: &Self) -> Self::Real {
2432		abs1(value.0.re) + abs1(value.0.im)
2433	}
2434
2435	#[inline]
2436	#[faer_macros::math]
2437	fn abs2_impl(value: &Self) -> Self::Real {
2438		abs2(value.0.re) + abs2(value.0.im)
2439	}
2440
2441	#[inline]
2442	#[faer_macros::math]
2443	fn mul_real_impl(lhs: &Self, rhs: &Self::Real) -> Self {
2444		Complex {
2445			re: lhs.0.re * *rhs,
2446			im: lhs.0.im * *rhs,
2447		}
2448		.into()
2449	}
2450
2451	#[inline]
2452	#[faer_macros::math]
2453	fn mul_pow2_impl(lhs: &Self, rhs: &Self::Real) -> Self {
2454		Complex {
2455			re: mul_pow2(lhs.0.re, rhs),
2456			im: mul_pow2(lhs.0.im, rhs),
2457		}
2458		.into()
2459	}
2460
2461	#[inline]
2462	#[faer_macros::math]
2463	fn is_finite_impl(value: &Self) -> bool {
2464		is_finite(value.0.re) && is_finite(value.0.im)
2465	}
2466
2467	#[inline(always)]
2468	fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
2469		f32::simd_ctx(simd)
2470	}
2471
2472	#[inline(always)]
2473	fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
2474		f32::ctx_from_simd(ctx)
2475	}
2476
2477	#[inline(always)]
2478	fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S> {
2479		ctx.splat_c32s(value.0)
2480	}
2481
2482	#[inline(always)]
2483	fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S> {
2484		ctx.splat_c32s(Complex { re: *value, im: *value })
2485	}
2486
2487	#[inline(always)]
2488	fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2489		ctx.add_c32s(lhs, rhs)
2490	}
2491
2492	#[inline(always)]
2493	fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2494		ctx.sub_c32s(lhs, rhs)
2495	}
2496
2497	#[inline(always)]
2498	fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2499		ctx.neg_c32s(value)
2500	}
2501
2502	#[inline(always)]
2503	fn simd_conj<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2504		ctx.conj_c32s(value)
2505	}
2506
2507	#[inline(always)]
2508	fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2509		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2510			bytemuck::cast(ctx.abs_f32s(bytemuck::cast(value)))
2511		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2512			let value: Complex<f32> = bytemuck::cast(value);
2513			let v = value.re.abs() + value.im.abs();
2514			bytemuck::cast(Complex { re: v, im: v })
2515		} else {
2516			panic!();
2517		}
2518	}
2519
2520	#[inline(always)]
2521	fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2522		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2523			bytemuck::cast(ctx.abs_f32s(bytemuck::cast(value)))
2524		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2525			let value: Complex<f32> = bytemuck::cast(value);
2526			let re = value.re.abs();
2527			let im = value.im.abs();
2528			let v = if re > im { re } else { im };
2529			bytemuck::cast(Complex { re: v, im: v })
2530		} else {
2531			panic!();
2532		}
2533	}
2534
2535	#[inline(always)]
2536	fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2537		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2538			bytemuck::cast(ctx.mul_f32s(bytemuck::cast(lhs), bytemuck::cast(real_rhs)))
2539		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2540			let mut lhs: Complex<f32> = bytemuck::cast(lhs);
2541			let rhs: Complex<f32> = bytemuck::cast(real_rhs);
2542			lhs *= rhs.re;
2543			bytemuck::cast(lhs)
2544		} else {
2545			panic!();
2546		}
2547	}
2548
2549	#[inline(always)]
2550	fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2551		Self::simd_mul_real(ctx, lhs, real_rhs)
2552	}
2553
2554	#[inline(always)]
2555	fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2556		ctx.mul_e_c32s(lhs, rhs)
2557	}
2558
2559	#[inline(always)]
2560	fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2561		ctx.conj_mul_e_c32s(lhs, rhs)
2562	}
2563
2564	#[inline(always)]
2565	fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
2566		ctx.mul_add_e_c32s(lhs, rhs, acc)
2567	}
2568
2569	#[inline(always)]
2570	fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
2571		ctx.conj_mul_add_e_c32s(lhs, rhs, acc)
2572	}
2573
2574	#[inline(always)]
2575	fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2576		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2577			bytemuck::cast(ctx.mul_f32s(bytemuck::cast(value), bytemuck::cast(value)))
2578		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2579			let value: Complex<f32> = bytemuck::cast(value);
2580			let v = value.re * value.re + value.im * value.im;
2581			bytemuck::cast(Complex { re: v, im: v })
2582		} else {
2583			panic!();
2584		}
2585	}
2586
2587	#[inline(always)]
2588	fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
2589		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2590			bytemuck::cast(ctx.mul_add_f32s(bytemuck::cast(value), bytemuck::cast(value), bytemuck::cast(acc)))
2591		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2592			let value: Complex<f32> = bytemuck::cast(value);
2593			let acc: Complex<f32> = bytemuck::cast(acc);
2594			let v = value.re * value.re + value.im * value.im + acc.re;
2595			bytemuck::cast(Complex { re: v, im: v })
2596		} else {
2597			panic!();
2598		}
2599	}
2600
2601	#[inline(always)]
2602	fn simd_reduce_sum<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
2603		ctx.reduce_sum_c32s(value).into()
2604	}
2605
2606	#[inline(always)]
2607	fn simd_reduce_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
2608		ctx.reduce_max_c32s(value).into()
2609	}
2610
2611	#[inline(always)]
2612	fn simd_equal<S: Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
2613		panic!()
2614	}
2615
2616	#[inline(always)]
2617	fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2618		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2619			ctx.less_than_f32s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
2620		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2621			assert!(try_const! { core::mem::size_of::<S::m32s>() == core::mem::size_of::<bool>() });
2622
2623			let lhs: Complex<f32> = bytemuck::cast(real_lhs);
2624			let rhs: Complex<f32> = bytemuck::cast(real_rhs);
2625			unsafe { core::mem::transmute_copy(&(lhs.re < rhs.re)) }
2626		} else {
2627			panic!();
2628		}
2629	}
2630
2631	#[inline(always)]
2632	fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2633		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2634			ctx.less_than_or_equal_f32s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
2635		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2636			assert!(try_const! { core::mem::size_of::<S::m32s>() == core::mem::size_of::<bool>() });
2637
2638			let lhs: Complex<f32> = bytemuck::cast(real_lhs);
2639			let rhs: Complex<f32> = bytemuck::cast(real_rhs);
2640			unsafe { core::mem::transmute_copy(&(lhs.re <= rhs.re)) }
2641		} else {
2642			panic!();
2643		}
2644	}
2645
2646	#[inline(always)]
2647	fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2648		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2649			ctx.greater_than_f32s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
2650		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2651			assert!(try_const! { core::mem::size_of::<S::m32s>() == core::mem::size_of::<bool>() });
2652
2653			let lhs: Complex<f32> = bytemuck::cast(real_lhs);
2654			let rhs: Complex<f32> = bytemuck::cast(real_rhs);
2655			unsafe { core::mem::transmute_copy(&(lhs.re > rhs.re)) }
2656		} else {
2657			panic!();
2658		}
2659	}
2660
2661	#[inline(always)]
2662	fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
2663		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2664			ctx.greater_than_or_equal_f32s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
2665		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2666			assert!(try_const! { core::mem::size_of::<S::m32s>() == core::mem::size_of::<bool>() });
2667
2668			let lhs: Complex<f32> = bytemuck::cast(real_lhs);
2669			let rhs: Complex<f32> = bytemuck::cast(real_rhs);
2670			unsafe { core::mem::transmute_copy(&(lhs.re >= rhs.re)) }
2671		} else {
2672			panic!();
2673		}
2674	}
2675
2676	#[inline(always)]
2677	fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2678		if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<S::f32s>() } {
2679			bytemuck::cast(ctx.select_f32s_m32s(mask, bytemuck::cast(lhs), bytemuck::cast(rhs)))
2680		} else if try_const! { core::mem::size_of::<S::c32s>() == core::mem::size_of::<Complex<f32>>() } {
2681			assert!(try_const! { core::mem::size_of::<S::m32s>() == core::mem::size_of::<bool>() });
2682			let mask: bool = unsafe { core::mem::transmute_copy(&mask) };
2683			let lhs: Complex<f32> = bytemuck::cast(lhs);
2684			let rhs: Complex<f32> = bytemuck::cast(rhs);
2685			bytemuck::cast(if mask { lhs } else { rhs })
2686		} else {
2687			panic!();
2688		}
2689	}
2690
2691	#[inline(always)]
2692	fn simd_index_select<S: Simd>(
2693		ctx: &Self::SimdCtx<S>,
2694		mask: Self::SimdMask<S>,
2695		lhs: Self::SimdIndex<S>,
2696		rhs: Self::SimdIndex<S>,
2697	) -> Self::SimdIndex<S> {
2698		f32::simd_index_select(ctx, mask, lhs, rhs)
2699	}
2700
2701	#[inline(always)]
2702	fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S> {
2703		f32::simd_index_splat(ctx, value)
2704	}
2705
2706	#[inline(always)]
2707	fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
2708		f32::simd_index_add(ctx, lhs, rhs)
2709	}
2710
2711	#[inline(always)]
2712	fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
2713		f32::simd_index_less_than(ctx, lhs, rhs)
2714	}
2715
2716	#[inline(always)]
2717	fn simd_and_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
2718		f32::simd_and_mask(ctx, lhs, rhs)
2719	}
2720
2721	#[inline(always)]
2722	fn simd_or_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
2723		f32::simd_or_mask(ctx, lhs, rhs)
2724	}
2725
2726	#[inline(always)]
2727	fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S> {
2728		f32::simd_not_mask(ctx, mask)
2729	}
2730
2731	#[inline(always)]
2732	fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize {
2733		f32::simd_first_true_mask(ctx, value)
2734	}
2735
2736	#[inline(always)]
2737	fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: u32, end: u32) -> Self::SimdMemMask<S> {
2738		ctx.mask_between_m32s(2 * start, 2 * end)
2739	}
2740
2741	#[inline(always)]
2742	fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: u32, end: u32) -> Self::SimdMask<S> {
2743		ctx.mask_between_m32s(2 * start, 2 * end).mask()
2744	}
2745
2746	#[inline(always)]
2747	unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
2748		ctx.mask_load_ptr_c32s(mask, ptr as _)
2749	}
2750
2751	#[inline(always)]
2752	unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>) {
2753		ctx.mask_store_ptr_c32s(mask, ptr as _, values);
2754	}
2755}
2756
2757impl ComplexField for ComplexImpl<f64> {
2758	type Arch = pulp::Arch;
2759	type Index = u64;
2760	type Real = f64;
2761	type SimdCtx<S: Simd> = S;
2762	type SimdIndex<S: Simd> = S::u64s;
2763	type SimdMask<S: Simd> = S::m64s;
2764	type SimdMemMask<S: Simd> = pulp::MemMask<S::m64s>;
2765	type SimdVec<S: Simd> = S::c64s;
2766	type Unit = f64;
2767
2768	const IS_NATIVE_C64: bool = true;
2769	const IS_REAL: bool = false;
2770	const SIMD_ABS_SPLIT_REAL_IMAG: bool = true;
2771	const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Simd;
2772
2773	#[inline]
2774	fn zero_impl() -> Self {
2775		Complex {
2776			re: f64::zero_impl(),
2777			im: f64::zero_impl(),
2778		}
2779		.into()
2780	}
2781
2782	#[inline]
2783	fn one_impl() -> Self {
2784		Complex {
2785			re: f64::one_impl(),
2786			im: f64::zero_impl(),
2787		}
2788		.into()
2789	}
2790
2791	#[inline]
2792	fn nan_impl() -> Self {
2793		Complex {
2794			re: f64::nan_impl(),
2795			im: f64::nan_impl(),
2796		}
2797		.into()
2798	}
2799
2800	#[inline]
2801	fn infinity_impl() -> Self {
2802		Complex {
2803			re: f64::infinity_impl(),
2804			im: f64::infinity_impl(),
2805		}
2806		.into()
2807	}
2808
2809	#[inline]
2810	fn from_real_impl(real: &Self::Real) -> Self {
2811		Complex {
2812			re: real.clone(),
2813			im: f64::zero_impl(),
2814		}
2815		.into()
2816	}
2817
2818	#[inline]
2819	fn from_f64_impl(real: f64) -> Self {
2820		Complex {
2821			re: f64::from_f64_impl(real),
2822			im: f64::zero_impl(),
2823		}
2824		.into()
2825	}
2826
2827	#[inline]
2828	fn real_part_impl(value: &Self) -> Self::Real {
2829		value.0.re.clone()
2830	}
2831
2832	#[inline]
2833	fn imag_part_impl(value: &Self) -> Self::Real {
2834		value.0.im.clone()
2835	}
2836
2837	#[inline]
2838	fn copy_impl(value: &Self) -> Self {
2839		value.clone()
2840	}
2841
2842	#[inline]
2843	fn conj_impl(value: &Self) -> Self {
2844		Complex {
2845			re: value.0.re.clone(),
2846			im: value.0.im.neg_by_ref(),
2847		}
2848		.into()
2849	}
2850
2851	#[inline]
2852	fn recip_impl(value: &Self) -> Self {
2853		let (re, im) = recip_impl(value.0.re.clone(), value.0.im.clone());
2854		Complex { re, im }.into()
2855	}
2856
2857	#[inline]
2858	fn sqrt_impl(value: &Self) -> Self {
2859		let (re, im) = sqrt_impl(value.0.re.clone(), value.0.im.clone());
2860		Complex { re, im }.into()
2861	}
2862
2863	#[inline]
2864	fn abs_impl(value: &Self) -> Self::Real {
2865		abs_impl(value.0.re.clone(), value.0.im.clone())
2866	}
2867
2868	#[inline]
2869	#[faer_macros::math]
2870	fn abs1_impl(value: &Self) -> Self::Real {
2871		abs1(value.0.re) + abs1(value.0.im)
2872	}
2873
2874	#[inline]
2875	#[faer_macros::math]
2876	fn abs2_impl(value: &Self) -> Self::Real {
2877		abs2(value.0.re) + abs2(value.0.im)
2878	}
2879
2880	#[inline]
2881	#[faer_macros::math]
2882	fn mul_real_impl(lhs: &Self, rhs: &Self::Real) -> Self {
2883		Complex {
2884			re: lhs.0.re * *rhs,
2885			im: lhs.0.im * *rhs,
2886		}
2887		.into()
2888	}
2889
2890	#[inline]
2891	#[faer_macros::math]
2892	fn mul_pow2_impl(lhs: &Self, rhs: &Self::Real) -> Self {
2893		Complex {
2894			re: mul_pow2(lhs.0.re, rhs),
2895			im: mul_pow2(lhs.0.im, rhs),
2896		}
2897		.into()
2898	}
2899
2900	#[inline]
2901	#[faer_macros::math]
2902	fn is_finite_impl(value: &Self) -> bool {
2903		is_finite(value.0.re) && is_finite(value.0.im)
2904	}
2905
2906	#[inline(always)]
2907	fn simd_ctx<S: Simd>(simd: S) -> Self::SimdCtx<S> {
2908		f64::simd_ctx(simd)
2909	}
2910
2911	#[inline(always)]
2912	fn ctx_from_simd<S: Simd>(ctx: &Self::SimdCtx<S>) -> S {
2913		f64::ctx_from_simd(ctx)
2914	}
2915
2916	#[inline(always)]
2917	fn simd_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self) -> Self::SimdVec<S> {
2918		ctx.splat_c64s(value.0)
2919	}
2920
2921	#[inline(always)]
2922	fn simd_splat_real<S: Simd>(ctx: &Self::SimdCtx<S>, value: &Self::Real) -> Self::SimdVec<S> {
2923		ctx.splat_c64s(Complex { re: *value, im: *value })
2924	}
2925
2926	#[inline(always)]
2927	fn simd_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2928		ctx.add_c64s(lhs, rhs)
2929	}
2930
2931	#[inline(always)]
2932	fn simd_sub<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2933		ctx.sub_c64s(lhs, rhs)
2934	}
2935
2936	#[inline(always)]
2937	fn simd_neg<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2938		ctx.neg_c64s(value)
2939	}
2940
2941	#[inline(always)]
2942	fn simd_conj<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2943		ctx.conj_c64s(value)
2944	}
2945
2946	#[inline(always)]
2947	fn simd_abs1<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2948		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
2949			bytemuck::cast(ctx.abs_f64s(bytemuck::cast(value)))
2950		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
2951			let value: Complex<f64> = bytemuck::cast(value);
2952			let v = value.re.abs() + value.im.abs();
2953			bytemuck::cast(Complex { re: v, im: v })
2954		} else {
2955			panic!();
2956		}
2957	}
2958
2959	#[inline(always)]
2960	fn simd_abs_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
2961		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
2962			bytemuck::cast(ctx.abs_f64s(bytemuck::cast(value)))
2963		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
2964			let value: Complex<f64> = bytemuck::cast(value);
2965			let re = value.re.abs();
2966			let im = value.im.abs();
2967			let v = if re > im { re } else { im };
2968			bytemuck::cast(Complex { re: v, im: v })
2969		} else {
2970			panic!();
2971		}
2972	}
2973
2974	#[inline(always)]
2975	fn simd_mul_real<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2976		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
2977			bytemuck::cast(ctx.mul_f64s(bytemuck::cast(lhs), bytemuck::cast(real_rhs)))
2978		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
2979			let mut lhs: Complex<f64> = bytemuck::cast(lhs);
2980			let rhs: Complex<f64> = bytemuck::cast(real_rhs);
2981			lhs *= rhs.re;
2982			bytemuck::cast(lhs)
2983		} else {
2984			panic!();
2985		}
2986	}
2987
2988	#[inline(always)]
2989	fn simd_mul_pow2<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2990		Self::simd_mul_real(ctx, lhs, real_rhs)
2991	}
2992
2993	#[inline(always)]
2994	fn simd_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
2995		ctx.mul_e_c64s(lhs, rhs)
2996	}
2997
2998	#[inline(always)]
2999	fn simd_conj_mul<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3000		ctx.conj_mul_e_c64s(lhs, rhs)
3001	}
3002
3003	#[inline(always)]
3004	fn simd_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
3005		ctx.mul_add_e_c64s(lhs, rhs, acc)
3006	}
3007
3008	#[inline(always)]
3009	fn simd_conj_mul_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
3010		ctx.conj_mul_add_e_c64s(lhs, rhs, acc)
3011	}
3012
3013	#[inline(always)]
3014	fn simd_abs2<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self::SimdVec<S> {
3015		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3016			bytemuck::cast(ctx.mul_f64s(bytemuck::cast(value), bytemuck::cast(value)))
3017		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3018			let value: Complex<f64> = bytemuck::cast(value);
3019			let v = value.re * value.re + value.im * value.im;
3020			bytemuck::cast(Complex { re: v, im: v })
3021		} else {
3022			panic!();
3023		}
3024	}
3025
3026	#[inline(always)]
3027	fn simd_abs2_add<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>, acc: Self::SimdVec<S>) -> Self::SimdVec<S> {
3028		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3029			bytemuck::cast(ctx.mul_add_f64s(bytemuck::cast(value), bytemuck::cast(value), bytemuck::cast(acc)))
3030		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3031			let value: Complex<f64> = bytemuck::cast(value);
3032			let acc: Complex<f64> = bytemuck::cast(acc);
3033			let v = value.re * value.re + value.im * value.im + acc.re;
3034			bytemuck::cast(Complex { re: v, im: v })
3035		} else {
3036			panic!();
3037		}
3038	}
3039
3040	#[inline(always)]
3041	fn simd_reduce_sum<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
3042		ctx.reduce_sum_c64s(value).into()
3043	}
3044
3045	#[inline(always)]
3046	fn simd_reduce_max<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdVec<S>) -> Self {
3047		ctx.reduce_max_c64s(value).into()
3048	}
3049
3050	#[inline(always)]
3051	fn simd_equal<S: Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
3052		panic!()
3053	}
3054
3055	#[inline(always)]
3056	fn simd_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3057		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3058			ctx.less_than_f64s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
3059		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3060			assert!(try_const! { core::mem::size_of::<S::m64s>() == core::mem::size_of::<bool>() });
3061
3062			let lhs: Complex<f64> = bytemuck::cast(real_lhs);
3063			let rhs: Complex<f64> = bytemuck::cast(real_rhs);
3064			unsafe { core::mem::transmute_copy(&(lhs.re < rhs.re)) }
3065		} else {
3066			panic!();
3067		}
3068	}
3069
3070	#[inline(always)]
3071	fn simd_less_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3072		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3073			ctx.less_than_or_equal_f64s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
3074		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3075			assert!(try_const! { core::mem::size_of::<S::m64s>() == core::mem::size_of::<bool>() });
3076
3077			let lhs: Complex<f64> = bytemuck::cast(real_lhs);
3078			let rhs: Complex<f64> = bytemuck::cast(real_rhs);
3079			unsafe { core::mem::transmute_copy(&(lhs.re <= rhs.re)) }
3080		} else {
3081			panic!();
3082		}
3083	}
3084
3085	#[inline(always)]
3086	fn simd_greater_than<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3087		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3088			ctx.greater_than_f64s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
3089		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3090			assert!(try_const! { core::mem::size_of::<S::m64s>() == core::mem::size_of::<bool>() });
3091
3092			let lhs: Complex<f64> = bytemuck::cast(real_lhs);
3093			let rhs: Complex<f64> = bytemuck::cast(real_rhs);
3094			unsafe { core::mem::transmute_copy(&(lhs.re > rhs.re)) }
3095		} else {
3096			panic!();
3097		}
3098	}
3099
3100	#[inline(always)]
3101	fn simd_greater_than_or_equal<S: Simd>(ctx: &Self::SimdCtx<S>, real_lhs: Self::SimdVec<S>, real_rhs: Self::SimdVec<S>) -> Self::SimdMask<S> {
3102		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3103			ctx.greater_than_or_equal_f64s(bytemuck::cast(real_lhs), bytemuck::cast(real_rhs))
3104		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3105			assert!(try_const! { core::mem::size_of::<S::m64s>() == core::mem::size_of::<bool>() });
3106
3107			let lhs: Complex<f64> = bytemuck::cast(real_lhs);
3108			let rhs: Complex<f64> = bytemuck::cast(real_rhs);
3109			unsafe { core::mem::transmute_copy(&(lhs.re >= rhs.re)) }
3110		} else {
3111			panic!();
3112		}
3113	}
3114
3115	#[inline(always)]
3116	fn simd_select<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>, lhs: Self::SimdVec<S>, rhs: Self::SimdVec<S>) -> Self::SimdVec<S> {
3117		if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<S::f64s>() } {
3118			bytemuck::cast(ctx.select_f64s_m64s(mask, bytemuck::cast(lhs), bytemuck::cast(rhs)))
3119		} else if try_const! { core::mem::size_of::<S::c64s>() == core::mem::size_of::<Complex<f64>>() } {
3120			assert!(try_const! { core::mem::size_of::<S::m64s>() == core::mem::size_of::<bool>() });
3121			let mask: bool = unsafe { core::mem::transmute_copy(&mask) };
3122			let lhs: Complex<f64> = bytemuck::cast(lhs);
3123			let rhs: Complex<f64> = bytemuck::cast(rhs);
3124			bytemuck::cast(if mask { lhs } else { rhs })
3125		} else {
3126			panic!();
3127		}
3128	}
3129
3130	#[inline(always)]
3131	fn simd_index_select<S: Simd>(
3132		ctx: &Self::SimdCtx<S>,
3133		mask: Self::SimdMask<S>,
3134		lhs: Self::SimdIndex<S>,
3135		rhs: Self::SimdIndex<S>,
3136	) -> Self::SimdIndex<S> {
3137		f64::simd_index_select(ctx, mask, lhs, rhs)
3138	}
3139
3140	#[inline(always)]
3141	fn simd_index_splat<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::Index) -> Self::SimdIndex<S> {
3142		f64::simd_index_splat(ctx, value)
3143	}
3144
3145	#[inline(always)]
3146	fn simd_index_add<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
3147		f64::simd_index_add(ctx, lhs, rhs)
3148	}
3149
3150	#[inline(always)]
3151	fn simd_index_less_than<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdIndex<S>, rhs: Self::SimdIndex<S>) -> Self::SimdMask<S> {
3152		f64::simd_index_less_than(ctx, lhs, rhs)
3153	}
3154
3155	#[inline(always)]
3156	fn simd_and_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
3157		f64::simd_and_mask(ctx, lhs, rhs)
3158	}
3159
3160	#[inline(always)]
3161	fn simd_or_mask<S: Simd>(ctx: &Self::SimdCtx<S>, lhs: Self::SimdMask<S>, rhs: Self::SimdMask<S>) -> Self::SimdMask<S> {
3162		f64::simd_or_mask(ctx, lhs, rhs)
3163	}
3164
3165	#[inline(always)]
3166	fn simd_not_mask<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMask<S>) -> Self::SimdMask<S> {
3167		f64::simd_not_mask(ctx, mask)
3168	}
3169
3170	#[inline(always)]
3171	fn simd_first_true_mask<S: Simd>(ctx: &Self::SimdCtx<S>, value: Self::SimdMask<S>) -> usize {
3172		f64::simd_first_true_mask(ctx, value)
3173	}
3174
3175	#[inline(always)]
3176	fn simd_mem_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: u64, end: u64) -> Self::SimdMemMask<S> {
3177		ctx.mask_between_m64s(2 * start, 2 * end)
3178	}
3179
3180	#[inline(always)]
3181	fn simd_mask_between<S: Simd>(ctx: &Self::SimdCtx<S>, start: u64, end: u64) -> Self::SimdMask<S> {
3182		ctx.mask_between_m64s(2 * start, 2 * end).mask()
3183	}
3184
3185	#[inline(always)]
3186	unsafe fn simd_mask_load_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
3187		ctx.mask_load_ptr_c64s(mask, ptr as _)
3188	}
3189
3190	#[inline(always)]
3191	unsafe fn simd_mask_store_raw<S: Simd>(ctx: &Self::SimdCtx<S>, mask: Self::SimdMemMask<S>, ptr: *mut Self::SimdVec<S>, values: Self::SimdVec<S>) {
3192		ctx.mask_store_ptr_c64s(mask, ptr as _, values);
3193	}
3194}
3195
3196#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
3197pub struct Symbolic;
3198
3199impl core::ops::Add for Symbolic {
3200	type Output = Self;
3201
3202	fn add(self, _: Self) -> Self {
3203		Self
3204	}
3205}
3206impl core::ops::Sub for Symbolic {
3207	type Output = Self;
3208
3209	fn sub(self, _: Self) -> Self {
3210		Self
3211	}
3212}
3213impl core::ops::Mul for Symbolic {
3214	type Output = Self;
3215
3216	fn mul(self, _: Self) -> Self {
3217		Self
3218	}
3219}
3220impl core::ops::Div for Symbolic {
3221	type Output = Self;
3222
3223	fn div(self, _: Self) -> Self {
3224		Self
3225	}
3226}
3227impl core::ops::Neg for Symbolic {
3228	type Output = Self;
3229
3230	fn neg(self) -> Self {
3231		Self
3232	}
3233}
3234
3235impl core::ops::Add for &Symbolic {
3236	type Output = Symbolic;
3237
3238	fn add(self, _: Self) -> Symbolic {
3239		Symbolic
3240	}
3241}
3242impl core::ops::Sub for &Symbolic {
3243	type Output = Symbolic;
3244
3245	fn sub(self, _: Self) -> Symbolic {
3246		Symbolic
3247	}
3248}
3249impl core::ops::Mul for &Symbolic {
3250	type Output = Symbolic;
3251
3252	fn mul(self, _: Self) -> Symbolic {
3253		Symbolic
3254	}
3255}
3256impl core::ops::Div for &Symbolic {
3257	type Output = Symbolic;
3258
3259	fn div(self, _: Self) -> Symbolic {
3260		Symbolic
3261	}
3262}
3263impl core::ops::Neg for &Symbolic {
3264	type Output = Symbolic;
3265
3266	fn neg(self) -> Symbolic {
3267		Symbolic
3268	}
3269}
3270
3271impl core::ops::Rem for Symbolic {
3272	type Output = Self;
3273
3274	fn rem(self, _: Self) -> Self {
3275		Self
3276	}
3277}
3278impl num_traits::Zero for Symbolic {
3279	fn zero() -> Self {
3280		Self
3281	}
3282
3283	fn is_zero(&self) -> bool {
3284		true
3285	}
3286}
3287impl num_traits::One for Symbolic {
3288	fn one() -> Self {
3289		Self
3290	}
3291
3292	fn is_one(&self) -> bool {
3293		true
3294	}
3295}
3296impl num_traits::Num for Symbolic {
3297	type FromStrRadixErr = core::convert::Infallible;
3298
3299	fn from_str_radix(_: &str, _: u32) -> Result<Self, Self::FromStrRadixErr> {
3300		Ok(Self)
3301	}
3302}
3303
3304impl Symbolic {
3305	#[inline]
3306	pub fn materialize(len: usize) -> &'static mut [Self] {
3307		unsafe { core::slice::from_raw_parts_mut(core::ptr::NonNull::dangling().as_ptr(), len) }
3308	}
3309}
3310
3311impl RealField for Symbolic {
3312	fn epsilon_impl() -> Self {
3313		Self
3314	}
3315
3316	fn nbits_impl() -> usize {
3317		0
3318	}
3319
3320	fn min_positive_impl() -> Self {
3321		Self
3322	}
3323
3324	fn max_positive_impl() -> Self {
3325		Self
3326	}
3327
3328	fn sqrt_min_positive_impl() -> Self {
3329		Self
3330	}
3331
3332	fn sqrt_max_positive_impl() -> Self {
3333		Self
3334	}
3335}
3336
3337impl ComplexField for Symbolic {
3338	type Arch = pulp::Scalar;
3339	type Index = usize;
3340	type Real = Self;
3341	type SimdCtx<S: pulp::Simd> = S;
3342	type SimdIndex<S: pulp::Simd> = ();
3343	type SimdMask<S: pulp::Simd> = ();
3344	type SimdMemMask<S: pulp::Simd> = ();
3345	type SimdVec<S: pulp::Simd> = ();
3346	type Unit = Self;
3347
3348	const IS_REAL: bool = true;
3349	const SIMD_CAPABILITIES: SimdCapabilities = SimdCapabilities::Copy;
3350
3351	fn zero_impl() -> Self {
3352		Self
3353	}
3354
3355	fn one_impl() -> Self {
3356		Self
3357	}
3358
3359	fn nan_impl() -> Self {
3360		Self
3361	}
3362
3363	fn infinity_impl() -> Self {
3364		Self
3365	}
3366
3367	fn from_real_impl(_: &Self::Real) -> Self {
3368		Self
3369	}
3370
3371	fn from_f64_impl(_: f64) -> Self {
3372		Self
3373	}
3374
3375	fn real_part_impl(_: &Self) -> Self::Real {
3376		Self
3377	}
3378
3379	fn imag_part_impl(_: &Self) -> Self::Real {
3380		Self
3381	}
3382
3383	fn copy_impl(_: &Self) -> Self {
3384		Self
3385	}
3386
3387	fn conj_impl(_: &Self) -> Self {
3388		Self
3389	}
3390
3391	fn recip_impl(_: &Self) -> Self {
3392		Self
3393	}
3394
3395	fn sqrt_impl(_: &Self) -> Self {
3396		Self
3397	}
3398
3399	fn abs_impl(_: &Self) -> Self::Real {
3400		Self
3401	}
3402
3403	fn abs1_impl(_: &Self) -> Self::Real {
3404		Self
3405	}
3406
3407	fn abs2_impl(_: &Self) -> Self::Real {
3408		Self
3409	}
3410
3411	fn mul_real_impl(_: &Self, _: &Self::Real) -> Self {
3412		Self
3413	}
3414
3415	fn mul_pow2_impl(_: &Self, _: &Self::Real) -> Self {
3416		Self
3417	}
3418
3419	fn is_finite_impl(_: &Self) -> bool {
3420		true
3421	}
3422
3423	fn simd_ctx<S: pulp::Simd>(simd: S) -> Self::SimdCtx<S> {
3424		simd
3425	}
3426
3427	fn ctx_from_simd<S: pulp::Simd>(simd: &Self::SimdCtx<S>) -> S {
3428		*simd
3429	}
3430
3431	fn simd_mem_mask_between<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::Index, _: Self::Index) -> Self::SimdMemMask<S> {
3432		()
3433	}
3434
3435	unsafe fn simd_mask_load_raw<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMemMask<S>, _: *const Self::SimdVec<S>) -> Self::SimdVec<S> {
3436		()
3437	}
3438
3439	unsafe fn simd_mask_store_raw<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMemMask<S>, _: *mut Self::SimdVec<S>, _: Self::SimdVec<S>) {
3440		()
3441	}
3442
3443	fn simd_splat<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: &Self) -> Self::SimdVec<S> {
3444		()
3445	}
3446
3447	fn simd_splat_real<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: &Self::Real) -> Self::SimdVec<S> {
3448		()
3449	}
3450
3451	fn simd_add<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3452		()
3453	}
3454
3455	fn simd_sub<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3456		()
3457	}
3458
3459	fn simd_neg<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3460		()
3461	}
3462
3463	fn simd_conj<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3464		()
3465	}
3466
3467	fn simd_abs1<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3468		()
3469	}
3470
3471	fn simd_abs_max<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3472		()
3473	}
3474
3475	fn simd_mul_real<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3476		()
3477	}
3478
3479	fn simd_mul_pow2<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3480		()
3481	}
3482
3483	fn simd_mul<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3484		()
3485	}
3486
3487	fn simd_conj_mul<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3488		()
3489	}
3490
3491	fn simd_mul_add<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3492		()
3493	}
3494
3495	fn simd_conj_mul_add<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3496		()
3497	}
3498
3499	fn simd_abs2<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3500		()
3501	}
3502
3503	fn simd_abs2_add<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3504		()
3505	}
3506
3507	fn simd_reduce_sum<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self {
3508		Self
3509	}
3510
3511	fn simd_reduce_max<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>) -> Self {
3512		Self
3513	}
3514
3515	fn simd_equal<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
3516		()
3517	}
3518
3519	fn simd_less_than<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
3520		()
3521	}
3522
3523	fn simd_less_than_or_equal<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
3524		()
3525	}
3526
3527	fn simd_greater_than<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
3528		()
3529	}
3530
3531	fn simd_greater_than_or_equal<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdMask<S> {
3532		()
3533	}
3534
3535	fn simd_select<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMask<S>, _: Self::SimdVec<S>, _: Self::SimdVec<S>) -> Self::SimdVec<S> {
3536		()
3537	}
3538
3539	fn simd_index_select<S: pulp::Simd>(
3540		_: &Self::SimdCtx<S>,
3541		_: Self::SimdMask<S>,
3542		_: Self::SimdIndex<S>,
3543		_: Self::SimdIndex<S>,
3544	) -> Self::SimdIndex<S> {
3545		()
3546	}
3547
3548	fn simd_index_splat<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::Index) -> Self::SimdIndex<S> {
3549		()
3550	}
3551
3552	fn simd_index_add<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdIndex<S>, _: Self::SimdIndex<S>) -> Self::SimdIndex<S> {
3553		()
3554	}
3555
3556	fn simd_and_mask<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMask<S>, _: Self::SimdMask<S>) -> Self::SimdMask<S> {
3557		()
3558	}
3559
3560	fn simd_or_mask<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMask<S>, _: Self::SimdMask<S>) -> Self::SimdMask<S> {
3561		()
3562	}
3563
3564	fn simd_not_mask<S: Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMask<S>) -> Self::SimdMask<S> {
3565		()
3566	}
3567
3568	fn simd_first_true_mask<S: pulp::Simd>(_: &Self::SimdCtx<S>, _: Self::SimdMask<S>) -> usize {
3569		0
3570	}
3571
3572	fn simd_mask_between<S: Simd>(_: &Self::SimdCtx<S>, _: Self::Index, _: Self::Index) -> Self::SimdMask<S> {
3573		()
3574	}
3575
3576	fn simd_index_less_than<S: Simd>(_: &Self::SimdCtx<S>, _: Self::SimdIndex<S>, _: Self::SimdIndex<S>) -> Self::SimdMask<S> {
3577		()
3578	}
3579}