1use proc_macro2::TokenStream;
7use quote::{format_ident, quote};
8
9use crate::algebra::{Algebra, Blade, ProductTable};
10use crate::spec::{AlgebraSpec, InvolutionKind, TypeSpec, WrapperKind};
11
12#[derive(Debug, Clone, Copy, PartialEq, Eq)]
14pub enum WrapperPosition {
15 Lhs,
17 Rhs,
19 Both,
21}
22
23use crate::symbolic::{
24 AtomToRust, ConstraintDeriver, ConstraintSolver, ExpressionSimplifier, GroebnerSimplifier,
25 ProductConstraintCollector, ProductKind as SymbolicProductKind, SolutionType, SymbolicProduct,
26};
27
28pub struct TraitsGenerator<'a> {
76 spec: &'a AlgebraSpec,
78 algebra: &'a Algebra,
80 table: ProductTable,
82 enable_groebner: bool,
84}
85
86impl<'a> TraitsGenerator<'a> {
87 pub fn new(spec: &'a AlgebraSpec, algebra: &'a Algebra, table: ProductTable) -> Self {
91 Self::with_options(spec, algebra, table, true)
92 }
93
94 pub fn with_options(
103 spec: &'a AlgebraSpec,
104 algebra: &'a Algebra,
105 table: ProductTable,
106 enable_groebner: bool,
107 ) -> Self {
108 Self {
109 spec,
110 algebra,
111 table,
112 enable_groebner,
113 }
114 }
115
116 pub fn generate_traits_file(&self) -> (TokenStream, String) {
124 let header = self.generate_header();
125 let imports = self.generate_imports();
126 let ops = self.generate_all_ops();
127 let product_traits = self.generate_all_product_traits();
128 let normed = self.generate_all_normed();
129 let approx = self.generate_all_approx();
130 let arbitrary = self.generate_all_arbitrary();
131 let verification_tests = self.generate_verification_tests_raw();
132
133 let main_tokens = quote! {
134 #header
135 #imports
136
137 #ops
141
142 #product_traits
146
147 #normed
151
152 #approx
156
157 #arbitrary
161 };
162
163 (main_tokens, verification_tests)
164 }
165
166 fn generate_header(&self) -> TokenStream {
168 let name = &self.spec.name;
169 let header_doc = format!(
170 r#"//! Trait implementations for {}.
171//!
172//! This file is auto-generated by clifford-codegen.
173//! Do not edit manually."#,
174 name
175 );
176
177 header_doc.parse().unwrap_or_else(|_| quote! {})
178 }
179
180 fn generate_imports(&self) -> TokenStream {
182 let type_names: Vec<_> = self
183 .spec
184 .types
185 .iter()
186 .filter(|t| t.alias_of.is_none())
187 .map(|t| format_ident!("{}", t.name))
188 .collect();
189
190 let has_versor_impls = self.will_generate_versor_impls();
193
194 let versor_import = if has_versor_impls {
195 quote! { Versor, VersorInverse, InverseSandwich, InverseAntisandwich, }
196 } else {
197 quote! {}
198 };
199
200 let projection_import = if self.spec.signature.r > 0 {
202 quote! { Project, Antiproject, }
203 } else {
204 quote! {}
205 };
206
207 let is_degenerate = self.spec.signature.r > 0;
209 let wrapper_import = if is_degenerate {
210 quote! { use crate::wrappers::Unitized; }
211 } else {
212 quote! { use crate::wrappers::Unit; }
213 };
214
215 quote! {
216 use crate::scalar::Float;
217 #[allow(unused_imports)]
218 use crate::ops::{
219 Wedge, Antiwedge, LeftContract, RightContract,
220 Sandwich, Antisandwich, Transform, ScalarProduct, BulkContract, WeightContract,
221 BulkExpand, WeightExpand, Dot, Antidot, WeightDual,
222 Reverse, Antireverse, Involute, RightComplement, #projection_import #versor_import
223 };
224 use super::types::{#(#type_names),*};
225 #[allow(unused_imports)]
226 #wrapper_import
227
228 use std::ops::{Add, Sub, Neg, Mul};
229
230 use approx::{AbsDiffEq, RelativeEq, UlpsEq};
231 }
232 }
233
234 fn generate_all_ops(&self) -> TokenStream {
240 let ops: Vec<TokenStream> = self
241 .spec
242 .types
243 .iter()
244 .filter(|t| t.alias_of.is_none())
245 .flat_map(|ty| self.generate_ops_for_type(ty))
246 .collect();
247
248 quote! { #(#ops)* }
249 }
250
251 fn generate_ops_for_type(&self, ty: &TypeSpec) -> Vec<TokenStream> {
253 let mut impls = vec![
255 self.generate_add(ty),
256 self.generate_sub(ty),
257 self.generate_neg(ty),
258 self.generate_scalar_mul(ty),
259 self.generate_scalar_mul_reverse_f32(ty),
260 self.generate_scalar_mul_reverse_f64(ty),
261 ];
262
263 for entry in &self.spec.products.geometric {
265 if entry.lhs == ty.name {
267 if let Some(other) = self.find_type(&entry.rhs) {
268 if let Some(output_type) = self.find_type(&entry.output) {
269 impls.push(self.generate_geometric_mul_from_entry(
270 ty,
271 other,
272 output_type,
273 entry,
274 ));
275 }
276 }
277 }
278 }
279
280 impls
281 }
282
283 fn find_type(&self, name: &str) -> Option<&TypeSpec> {
285 self.spec.types.iter().find(|t| t.name == name)
286 }
287
288 fn generate_constructor_call(_ty: &TypeSpec, field_exprs: &[TokenStream]) -> TokenStream {
290 quote! { Self::new_unchecked(#(#field_exprs),*) }
291 }
292
293 fn generate_add(&self, ty: &TypeSpec) -> TokenStream {
295 let name = format_ident!("{}", ty.name);
296 let field_adds: Vec<TokenStream> = ty
297 .fields
298 .iter()
299 .map(|f| {
300 let fname = format_ident!("{}", f.name);
301 quote! { self.#fname() + rhs.#fname() }
302 })
303 .collect();
304
305 let constructor_call = Self::generate_constructor_call(ty, &field_adds);
306
307 quote! {
308 impl<T: Float> Add for #name<T> {
309 type Output = Self;
310
311 #[inline]
312 fn add(self, rhs: Self) -> Self {
313 #constructor_call
314 }
315 }
316 }
317 }
318
319 fn generate_sub(&self, ty: &TypeSpec) -> TokenStream {
321 let name = format_ident!("{}", ty.name);
322 let field_subs: Vec<TokenStream> = ty
323 .fields
324 .iter()
325 .map(|f| {
326 let fname = format_ident!("{}", f.name);
327 quote! { self.#fname() - rhs.#fname() }
328 })
329 .collect();
330
331 let constructor_call = Self::generate_constructor_call(ty, &field_subs);
332
333 quote! {
334 impl<T: Float> Sub for #name<T> {
335 type Output = Self;
336
337 #[inline]
338 fn sub(self, rhs: Self) -> Self {
339 #constructor_call
340 }
341 }
342 }
343 }
344
345 fn generate_neg(&self, ty: &TypeSpec) -> TokenStream {
347 let name = format_ident!("{}", ty.name);
348 let field_negs: Vec<TokenStream> = ty
349 .fields
350 .iter()
351 .map(|f| {
352 let fname = format_ident!("{}", f.name);
353 quote! { -self.#fname() }
354 })
355 .collect();
356
357 let constructor_call = Self::generate_constructor_call(ty, &field_negs);
358
359 quote! {
360 impl<T: Float> Neg for #name<T> {
361 type Output = Self;
362
363 #[inline]
364 fn neg(self) -> Self {
365 #constructor_call
366 }
367 }
368 }
369 }
370
371 fn generate_scalar_mul(&self, ty: &TypeSpec) -> TokenStream {
373 let name = format_ident!("{}", ty.name);
374
375 quote! {
376 impl<T: Float> Mul<T> for #name<T> {
377 type Output = Self;
378
379 #[inline]
380 fn mul(self, scalar: T) -> Self {
381 self.scale(scalar)
382 }
383 }
384 }
385 }
386
387 fn generate_scalar_mul_reverse_f32(&self, ty: &TypeSpec) -> TokenStream {
389 let name = format_ident!("{}", ty.name);
390
391 quote! {
392 impl Mul<#name<f32>> for f32 {
393 type Output = #name<f32>;
394
395 #[inline]
396 fn mul(self, v: #name<f32>) -> #name<f32> {
397 v.scale(self)
398 }
399 }
400 }
401 }
402
403 fn generate_scalar_mul_reverse_f64(&self, ty: &TypeSpec) -> TokenStream {
405 let name = format_ident!("{}", ty.name);
406
407 quote! {
408 impl Mul<#name<f64>> for f64 {
409 type Output = #name<f64>;
410
411 #[inline]
412 fn mul(self, v: #name<f64>) -> #name<f64> {
413 v.scale(self)
414 }
415 }
416 }
417 }
418
419 fn generate_geometric_mul_from_entry(
429 &self,
430 a: &TypeSpec,
431 b: &TypeSpec,
432 output: &TypeSpec,
433 _entry: &crate::spec::ProductEntry,
434 ) -> TokenStream {
435 let a_name = format_ident!("{}", a.name);
436 let b_name = format_ident!("{}", b.name);
437 let out_name = format_ident!("{}", output.name);
438
439 let a_self_complement = a.versor.is_some()
444 && self
445 .find_complement_output_type(a)
446 .map(|t| t == a.name)
447 .unwrap_or(false);
448 let b_self_complement = b.versor.is_some()
449 && self
450 .find_complement_output_type(b)
451 .map(|t| t == b.name)
452 .unwrap_or(false);
453
454 let product_kind = if a_self_complement && b_self_complement {
458 SymbolicProductKind::Antigeometric
459 } else {
460 SymbolicProductKind::Geometric
461 };
462
463 let field_exprs = self.compute_product_expressions(a, b, output, product_kind);
464 let constructor_call = quote! { #out_name::new_unchecked(#(#field_exprs),*) };
465
466 quote! {
467 impl<T: Float> Mul<#b_name<T>> for #a_name<T> {
468 type Output = #out_name<T>;
469
470 #[inline]
471 fn mul(self, rhs: #b_name<T>) -> #out_name<T> {
472 #constructor_call
473 }
474 }
475 }
476 }
477
478 fn compute_product_expressions(
488 &self,
489 type_a: &TypeSpec,
490 type_b: &TypeSpec,
491 output_type: &TypeSpec,
492 kind: SymbolicProductKind,
493 ) -> Vec<TokenStream> {
494 self.compute_product_expressions_with_wrappers(
495 type_a,
496 None,
497 type_b,
498 None,
499 output_type,
500 kind,
501 )
502 }
503
504 fn compute_product_expressions_with_wrappers(
509 &self,
510 type_a: &TypeSpec,
511 wrapper_a: Option<WrapperKind>,
512 type_b: &TypeSpec,
513 wrapper_b: Option<WrapperKind>,
514 output_type: &TypeSpec,
515 kind: SymbolicProductKind,
516 ) -> Vec<TokenStream> {
517 let symbolic_product = SymbolicProduct::new(self.algebra);
518 let expr_simplifier = ExpressionSimplifier::new();
519
520 let groebner_simplifier =
522 self.create_groebner_simplifier_with_wrappers(type_a, wrapper_a, type_b, wrapper_b);
523
524 let a_symbols = symbolic_product.create_field_symbols(type_a, "self");
526 let b_symbols = symbolic_product.create_field_symbols(type_b, "rhs");
527
528 let symbolic_fields =
530 symbolic_product.compute(type_a, type_b, output_type, kind, &a_symbols, &b_symbols);
531
532 let mut wrapped_prefixes = Vec::new();
534 if wrapper_a.is_some() {
535 wrapped_prefixes.push("self");
536 }
537 if wrapper_b.is_some() {
538 wrapped_prefixes.push("rhs");
539 }
540
541 let converter =
543 AtomToRust::new_with_wrappers(&[type_a, type_b], &["self", "rhs"], &wrapped_prefixes);
544
545 symbolic_fields
547 .iter()
548 .map(|field| {
549 let simplified = expr_simplifier.simplify(&field.expression);
551 let reduced = groebner_simplifier.reduce_atom(&simplified);
553 converter.convert(&reduced)
554 })
555 .collect()
556 }
557
558 fn create_groebner_simplifier_with_wrappers(
564 &self,
565 type_a: &TypeSpec,
566 wrapper_a: Option<WrapperKind>,
567 type_b: &TypeSpec,
568 wrapper_b: Option<WrapperKind>,
569 ) -> GroebnerSimplifier {
570 if !self.enable_groebner {
572 return GroebnerSimplifier::new(vec![], true);
573 }
574
575 let collector =
576 ProductConstraintCollector::new(self.algebra, self.spec.norm.primary_involution);
577
578 let mut constraints = Vec::new();
580 if let Some(wrapper) = wrapper_a {
581 constraints.extend(collector.collect_wrapper_constraints(type_a, wrapper, "self"));
582 } else {
583 constraints.extend(collector.collect_constraints(type_a, "self"));
584 }
585 if let Some(wrapper) = wrapper_b {
586 constraints.extend(collector.collect_wrapper_constraints(type_b, wrapper, "rhs"));
587 } else {
588 constraints.extend(collector.collect_constraints(type_b, "rhs"));
589 }
590
591 GroebnerSimplifier::new(constraints, true)
593 }
594
595 fn create_groebner_simplifier_for_sandwich(
600 &self,
601 versor_type: &TypeSpec,
602 wrapper_versor: Option<WrapperKind>,
603 operand_type: &TypeSpec,
604 wrapper_operand: Option<WrapperKind>,
605 ) -> GroebnerSimplifier {
606 if !self.enable_groebner {
608 return GroebnerSimplifier::new(vec![], true);
609 }
610
611 let collector =
612 ProductConstraintCollector::new(self.algebra, self.spec.norm.primary_involution);
613
614 let mut constraints = Vec::new();
616 if let Some(wrapper) = wrapper_versor {
617 constraints.extend(collector.collect_wrapper_constraints(versor_type, wrapper, "self"));
618 } else {
619 constraints.extend(collector.collect_constraints(versor_type, "self"));
620 }
621 if let Some(wrapper) = wrapper_operand {
622 constraints.extend(collector.collect_wrapper_constraints(
623 operand_type,
624 wrapper,
625 "operand",
626 ));
627 } else {
628 constraints.extend(collector.collect_constraints(operand_type, "operand"));
629 }
630
631 GroebnerSimplifier::new(constraints, true)
632 }
633
634 fn compute_sandwich_expressions_with_wrappers(
639 &self,
640 versor: &TypeSpec,
641 wrapper_versor: Option<WrapperKind>,
642 operand: &TypeSpec,
643 wrapper_operand: Option<WrapperKind>,
644 use_antiproduct: bool,
645 ) -> Vec<TokenStream> {
646 let symbolic_product = SymbolicProduct::new(self.algebra);
647 let expr_simplifier = ExpressionSimplifier::new();
648
649 let groebner_simplifier = self.create_groebner_simplifier_for_sandwich(
651 versor,
652 wrapper_versor,
653 operand,
654 wrapper_operand,
655 );
656
657 let versor_symbols = symbolic_product.create_field_symbols(versor, "self");
659 let operand_symbols = symbolic_product.create_field_symbols(operand, "operand");
660
661 let symbolic_fields = symbolic_product.compute_sandwich(
663 versor,
664 operand,
665 &versor_symbols,
666 &operand_symbols,
667 use_antiproduct,
668 );
669
670 let mut wrapped_prefixes = Vec::new();
672 if wrapper_versor.is_some() {
673 wrapped_prefixes.push("self");
674 }
675 if wrapper_operand.is_some() {
676 wrapped_prefixes.push("operand");
677 }
678
679 let converter = AtomToRust::new_with_wrappers(
681 &[versor, operand],
682 &["self", "operand"],
683 &wrapped_prefixes,
684 );
685
686 symbolic_fields
688 .iter()
689 .map(|field| {
690 let simplified = expr_simplifier.simplify(&field.expression);
692 let reduced = groebner_simplifier.reduce_atom(&simplified);
694 converter.convert(&reduced)
695 })
696 .collect()
697 }
698
699 fn compute_sandwich_expressions(
703 &self,
704 versor: &TypeSpec,
705 operand: &TypeSpec,
706 ) -> Vec<TokenStream> {
707 self.compute_sandwich_expressions_impl(versor, operand, false)
708 }
709
710 fn compute_antisandwich_expressions(
714 &self,
715 versor: &TypeSpec,
716 operand: &TypeSpec,
717 ) -> Vec<TokenStream> {
718 self.compute_sandwich_expressions_impl(versor, operand, true)
719 }
720
721 fn compute_sandwich_expressions_impl(
726 &self,
727 versor: &TypeSpec,
728 operand: &TypeSpec,
729 use_antiproduct: bool,
730 ) -> Vec<TokenStream> {
731 operand
732 .fields
733 .iter()
734 .map(|field| {
735 let expr = self.compute_sandwich_field(
736 versor,
737 operand,
738 field.blade_index,
739 use_antiproduct,
740 );
741 if field.sign < 0 {
743 quote! { -(#expr) }
744 } else {
745 expr
746 }
747 })
748 .collect()
749 }
750
751 fn compute_sandwich_field(
755 &self,
756 versor: &TypeSpec,
757 operand: &TypeSpec,
758 result_blade: usize,
759 use_antiproduct: bool,
760 ) -> TokenStream {
761 let dim = self.algebra.dim();
762
763 let mut term_map: std::collections::HashMap<(String, String, String), i8> =
765 std::collections::HashMap::new();
766
767 for field_v1 in &versor.fields {
768 for field_x in &operand.fields {
769 for field_v2 in &versor.fields {
770 let v1_blade = field_v1.blade_index;
771 let x_blade = field_x.blade_index;
772 let v2_blade = field_v2.blade_index;
773
774 let (sign_vx, vx) = if use_antiproduct {
776 self.table.antiproduct(v1_blade, x_blade)
777 } else {
778 self.table.geometric(v1_blade, x_blade)
779 };
780 if sign_vx == 0 {
781 continue;
782 }
783
784 let v2_grade = Blade::from_index(v2_blade).grade();
786 let rev_sign: i8 = if use_antiproduct {
787 let antigrade = dim - v2_grade;
789 if (antigrade * antigrade.saturating_sub(1) / 2).is_multiple_of(2) {
790 1
791 } else {
792 -1
793 }
794 } else {
795 if (v2_grade * v2_grade.saturating_sub(1) / 2).is_multiple_of(2) {
797 1
798 } else {
799 -1
800 }
801 };
802
803 let (sign_vxr, result) = if use_antiproduct {
805 self.table.antiproduct(vx, v2_blade)
806 } else {
807 self.table.geometric(vx, v2_blade)
808 };
809 if sign_vxr == 0 {
810 continue;
811 }
812
813 if result == result_blade {
814 let input_sign = field_v1.sign * field_x.sign * field_v2.sign;
817 let final_sign = sign_vx * sign_vxr * rev_sign * input_sign;
818 let key = (
819 field_v1.name.clone(),
820 field_x.name.clone(),
821 field_v2.name.clone(),
822 );
823 *term_map.entry(key).or_insert(0) += final_sign;
824 }
825 }
826 }
827 }
828
829 if term_map.is_empty() {
831 return quote! { T::zero() };
832 }
833
834 let mut terms: Vec<_> = term_map
836 .into_iter()
837 .filter(|(_, coeff)| *coeff != 0)
838 .collect();
839
840 if terms.is_empty() {
841 return quote! { T::zero() };
842 }
843
844 terms.sort_by(|a, b| a.0.cmp(&b.0));
846
847 let mut expr_parts: Vec<TokenStream> = Vec::new();
848 for (i, ((v1, x, v2), coeff)) in terms.iter().enumerate() {
849 let v1_ident = format_ident!("{}", v1);
850 let x_ident = format_ident!("{}", x);
851 let v2_ident = format_ident!("{}", v2);
852
853 let abs_coeff = coeff.abs();
854 let is_negative = *coeff < 0;
855
856 let base_expr = quote! { self.#v1_ident() * operand.#x_ident() * self.#v2_ident() };
858
859 let coeff_expr = match abs_coeff {
861 1 => base_expr,
862 2 => quote! { T::TWO * #base_expr },
863 n => {
864 quote! { T::from_i8(#n) * #base_expr }
865 }
866 };
867
868 let term_expr = match (i, is_negative) {
870 (0, false) => coeff_expr,
871 (0, true) => quote! { -(#coeff_expr) },
872 (_, false) => quote! { + #coeff_expr },
873 (_, true) => quote! { - #coeff_expr },
874 };
875
876 expr_parts.push(term_expr);
877 }
878
879 quote! { #(#expr_parts)* }
880 }
881
882 fn compute_project_expressions(
887 &self,
888 source: &TypeSpec,
889 target: &TypeSpec,
890 output: &TypeSpec,
891 ) -> Vec<TokenStream> {
892 output
893 .fields
894 .iter()
895 .map(|field| self.compute_project_field(source, target, field.blade_index))
896 .collect()
897 }
898
899 fn compute_antiproject_expressions(
904 &self,
905 source: &TypeSpec,
906 target: &TypeSpec,
907 output: &TypeSpec,
908 ) -> Vec<TokenStream> {
909 output
910 .fields
911 .iter()
912 .map(|field| self.compute_antiproject_field(source, target, field.blade_index))
913 .collect()
914 }
915
916 fn compute_project_field(
921 &self,
922 source: &TypeSpec,
923 target: &TypeSpec,
924 result_blade: usize,
925 ) -> TokenStream {
926 self.compute_projection_field_impl(source, target, result_blade, false)
927 }
928
929 fn compute_antiproject_field(
934 &self,
935 source: &TypeSpec,
936 target: &TypeSpec,
937 result_blade: usize,
938 ) -> TokenStream {
939 self.compute_projection_field_impl(source, target, result_blade, true)
940 }
941
942 fn compute_projection_field_impl(
947 &self,
948 source: &TypeSpec,
949 target: &TypeSpec,
950 result_blade: usize,
951 use_antiproject: bool,
952 ) -> TokenStream {
953 let mut term_map: std::collections::HashMap<(String, String, String), i8> =
954 std::collections::HashMap::new();
955
956 for field_a in &source.fields {
957 for field_b_dual in &target.fields {
958 for field_b_outer in &target.fields {
959 let a_blade = field_a.blade_index;
960 let b_dual_blade = field_b_dual.blade_index;
961 let b_outer_blade = field_b_outer.blade_index;
962
963 let (sign, result) = if use_antiproject {
965 self.table
966 .antiproject_triple(a_blade, b_dual_blade, b_outer_blade)
967 } else {
968 self.table
969 .project_triple(a_blade, b_dual_blade, b_outer_blade)
970 };
971
972 if sign == 0 {
973 continue;
974 }
975
976 if result == result_blade {
977 let key = (
978 field_a.name.clone(),
979 field_b_dual.name.clone(),
980 field_b_outer.name.clone(),
981 );
982 *term_map.entry(key).or_insert(0) += sign;
983 }
984 }
985 }
986 }
987
988 self.triple_term_map_to_tokens(term_map, "target")
989 }
990
991 fn triple_term_map_to_tokens(
995 &self,
996 term_map: std::collections::HashMap<(String, String, String), i8>,
997 rhs_name: &str,
998 ) -> TokenStream {
999 if term_map.is_empty() {
1000 return quote! { T::zero() };
1001 }
1002
1003 let mut terms: Vec<_> = term_map
1005 .into_iter()
1006 .filter(|(_, coeff)| *coeff != 0)
1007 .collect();
1008
1009 if terms.is_empty() {
1010 return quote! { T::zero() };
1011 }
1012
1013 terms.sort_by(|a, b| a.0.cmp(&b.0));
1015
1016 let rhs_ident = format_ident!("{}", rhs_name);
1017 let mut expr_parts: Vec<TokenStream> = Vec::new();
1018
1019 for (i, ((a, b1, b2), coeff)) in terms.iter().enumerate() {
1020 let a_ident = format_ident!("{}", a);
1021 let b1_ident = format_ident!("{}", b1);
1022 let b2_ident = format_ident!("{}", b2);
1023
1024 let abs_coeff = coeff.abs();
1025 let is_negative = *coeff < 0;
1026
1027 let base_expr =
1029 quote! { self.#a_ident() * #rhs_ident.#b1_ident() * #rhs_ident.#b2_ident() };
1030
1031 let coeff_expr = match abs_coeff {
1033 1 => base_expr,
1034 2 => quote! { T::TWO * #base_expr },
1035 n => {
1036 quote! { T::from_i8(#n) * #base_expr }
1037 }
1038 };
1039
1040 let term_expr = match (i, is_negative) {
1042 (0, false) => coeff_expr,
1043 (0, true) => quote! { -(#coeff_expr) },
1044 (_, false) => quote! { + #coeff_expr },
1045 (_, true) => quote! { - #coeff_expr },
1046 };
1047
1048 expr_parts.push(term_expr);
1049 }
1050
1051 quote! { #(#expr_parts)* }
1052 }
1053
1054 fn all_expressions_are_zero(exprs: &[TokenStream]) -> bool {
1056 exprs.iter().all(|e| {
1057 let s = e.to_string();
1058 s.contains("T :: zero ()") || s.contains("T::zero()") || s == "T :: zero ()"
1060 })
1061 }
1062
1063 fn compute_scalar_product_expression(&self, a: &TypeSpec, b: &TypeSpec) -> TokenStream {
1067 let mut terms: Vec<TokenStream> = Vec::new();
1068
1069 for field_a in &a.fields {
1070 for field_b in &b.fields {
1071 let (sign, result) = self
1072 .table
1073 .geometric(field_a.blade_index, field_b.blade_index);
1074
1075 let result_grade = Blade::from_index(result).grade();
1077 if result_grade != 0 || sign == 0 {
1078 continue;
1079 }
1080
1081 let a_ident = format_ident!("{}", field_a.name);
1082 let b_ident = format_ident!("{}", field_b.name);
1083
1084 let term_expr = if terms.is_empty() {
1085 if sign > 0 {
1086 quote! { self.#a_ident() * rhs.#b_ident() }
1087 } else {
1088 quote! { -(self.#a_ident() * rhs.#b_ident()) }
1089 }
1090 } else if sign > 0 {
1091 quote! { + self.#a_ident() * rhs.#b_ident() }
1092 } else {
1093 quote! { - self.#a_ident() * rhs.#b_ident() }
1094 };
1095 terms.push(term_expr);
1096 }
1097 }
1098
1099 if terms.is_empty() {
1100 quote! { T::zero() }
1101 } else {
1102 quote! { #(#terms)* }
1103 }
1104 }
1105
1106 fn compute_dot_expression(&self, a: &TypeSpec, b: &TypeSpec) -> TokenStream {
1110 let mut terms: Vec<TokenStream> = Vec::new();
1111
1112 for field_a in &a.fields {
1113 for field_b in &b.fields {
1114 let (sign, _result) = self.table.dot(field_a.blade_index, field_b.blade_index);
1115
1116 if sign == 0 {
1118 continue;
1119 }
1120
1121 let a_ident = format_ident!("{}", field_a.name);
1122 let b_ident = format_ident!("{}", field_b.name);
1123
1124 let term_expr = if terms.is_empty() {
1125 if sign > 0 {
1126 quote! { self.#a_ident() * rhs.#b_ident() }
1127 } else {
1128 quote! { -(self.#a_ident() * rhs.#b_ident()) }
1129 }
1130 } else if sign > 0 {
1131 quote! { + self.#a_ident() * rhs.#b_ident() }
1132 } else {
1133 quote! { - self.#a_ident() * rhs.#b_ident() }
1134 };
1135 terms.push(term_expr);
1136 }
1137 }
1138
1139 if terms.is_empty() {
1140 quote! { T::zero() }
1141 } else {
1142 quote! { #(#terms)* }
1143 }
1144 }
1145
1146 fn compute_antidot_expression(&self, a: &TypeSpec, b: &TypeSpec) -> TokenStream {
1150 let mut terms: Vec<TokenStream> = Vec::new();
1151
1152 for field_a in &a.fields {
1153 for field_b in &b.fields {
1154 let (sign, _result) = self.table.antidot(field_a.blade_index, field_b.blade_index);
1155
1156 if sign == 0 {
1158 continue;
1159 }
1160
1161 let a_ident = format_ident!("{}", field_a.name);
1162 let b_ident = format_ident!("{}", field_b.name);
1163
1164 let term_expr = if terms.is_empty() {
1165 if sign > 0 {
1166 quote! { self.#a_ident() * rhs.#b_ident() }
1167 } else {
1168 quote! { -(self.#a_ident() * rhs.#b_ident()) }
1169 }
1170 } else if sign > 0 {
1171 quote! { + self.#a_ident() * rhs.#b_ident() }
1172 } else {
1173 quote! { - self.#a_ident() * rhs.#b_ident() }
1174 };
1175 terms.push(term_expr);
1176 }
1177 }
1178
1179 if terms.is_empty() {
1180 quote! { T::zero() }
1181 } else {
1182 quote! { #(#terms)* }
1183 }
1184 }
1185
1186 fn is_single_grade_blade(&self, ty: &TypeSpec) -> bool {
1199 ty.grades.len() == 1
1200 }
1201
1202 fn has_nonzero_bulk_norm(&self, ty: &TypeSpec) -> bool {
1210 let degenerate_indices: Vec<usize> = self
1212 .spec
1213 .signature
1214 .basis
1215 .iter()
1216 .filter(|b| b.metric == 0)
1217 .map(|b| b.index)
1218 .collect();
1219
1220 if degenerate_indices.is_empty() {
1222 return true;
1223 }
1224
1225 ty.fields.iter().any(|f| {
1227 !degenerate_indices.iter().any(|°_idx| {
1229 (f.blade_index >> deg_idx) & 1 == 1
1231 })
1232 })
1233 }
1234
1235 fn generate_all_product_traits(&self) -> TokenStream {
1237 let mut impls = Vec::new();
1238
1239 let is_degenerate = self.spec.signature.r > 0;
1248 let wrapper_kind = if is_degenerate {
1249 WrapperKind::Unitized } else {
1251 WrapperKind::Unit };
1253
1254 for entry in &self.spec.products.wedge {
1256 if let (Some(a), Some(b), Some(out)) = (
1257 self.find_type(&entry.lhs),
1258 self.find_type(&entry.rhs),
1259 self.find_type(&entry.output),
1260 ) {
1261 if self.is_single_grade_blade(a) && self.is_single_grade_blade(b) {
1262 impls.push(self.generate_wedge_trait(a, b, out, entry));
1263
1264 impls.push(self.generate_wrapper_product_trait(
1266 a,
1267 b,
1268 out,
1269 SymbolicProductKind::Wedge,
1270 wrapper_kind,
1271 WrapperPosition::Lhs,
1272 ));
1273 impls.push(self.generate_wrapper_product_trait(
1274 a,
1275 b,
1276 out,
1277 SymbolicProductKind::Wedge,
1278 wrapper_kind,
1279 WrapperPosition::Rhs,
1280 ));
1281 impls.push(self.generate_wrapper_product_trait(
1282 a,
1283 b,
1284 out,
1285 SymbolicProductKind::Wedge,
1286 wrapper_kind,
1287 WrapperPosition::Both,
1288 ));
1289 }
1290 }
1291 }
1292
1293 for entry in &self.spec.products.antiwedge {
1295 if let (Some(a), Some(b), Some(out)) = (
1296 self.find_type(&entry.lhs),
1297 self.find_type(&entry.rhs),
1298 self.find_type(&entry.output),
1299 ) {
1300 if self.is_single_grade_blade(a) && self.is_single_grade_blade(b) {
1301 impls.push(self.generate_antiwedge_trait(a, b, out, entry));
1302
1303 impls.push(self.generate_wrapper_product_trait(
1305 a,
1306 b,
1307 out,
1308 SymbolicProductKind::Antiwedge,
1309 wrapper_kind,
1310 WrapperPosition::Lhs,
1311 ));
1312 impls.push(self.generate_wrapper_product_trait(
1313 a,
1314 b,
1315 out,
1316 SymbolicProductKind::Antiwedge,
1317 wrapper_kind,
1318 WrapperPosition::Rhs,
1319 ));
1320 impls.push(self.generate_wrapper_product_trait(
1321 a,
1322 b,
1323 out,
1324 SymbolicProductKind::Antiwedge,
1325 wrapper_kind,
1326 WrapperPosition::Both,
1327 ));
1328 }
1329 }
1330 }
1331
1332 for entry in &self.spec.products.left_contraction {
1334 if let (Some(a), Some(b), Some(out)) = (
1335 self.find_type(&entry.lhs),
1336 self.find_type(&entry.rhs),
1337 self.find_type(&entry.output),
1338 ) {
1339 if self.is_single_grade_blade(a) && self.is_single_grade_blade(b) {
1340 impls.push(self.generate_left_contract_trait(a, b, out, entry));
1341
1342 impls.push(self.generate_wrapper_product_trait(
1344 a,
1345 b,
1346 out,
1347 SymbolicProductKind::LeftContraction,
1348 wrapper_kind,
1349 WrapperPosition::Lhs,
1350 ));
1351 impls.push(self.generate_wrapper_product_trait(
1352 a,
1353 b,
1354 out,
1355 SymbolicProductKind::LeftContraction,
1356 wrapper_kind,
1357 WrapperPosition::Rhs,
1358 ));
1359 impls.push(self.generate_wrapper_product_trait(
1360 a,
1361 b,
1362 out,
1363 SymbolicProductKind::LeftContraction,
1364 wrapper_kind,
1365 WrapperPosition::Both,
1366 ));
1367 }
1368 }
1369 }
1370
1371 for entry in &self.spec.products.right_contraction {
1373 if let (Some(a), Some(b), Some(out)) = (
1374 self.find_type(&entry.lhs),
1375 self.find_type(&entry.rhs),
1376 self.find_type(&entry.output),
1377 ) {
1378 if self.is_single_grade_blade(a) && self.is_single_grade_blade(b) {
1379 impls.push(self.generate_right_contract_trait(a, b, out, entry));
1380
1381 impls.push(self.generate_wrapper_product_trait(
1383 a,
1384 b,
1385 out,
1386 SymbolicProductKind::RightContraction,
1387 wrapper_kind,
1388 WrapperPosition::Lhs,
1389 ));
1390 impls.push(self.generate_wrapper_product_trait(
1391 a,
1392 b,
1393 out,
1394 SymbolicProductKind::RightContraction,
1395 wrapper_kind,
1396 WrapperPosition::Rhs,
1397 ));
1398 impls.push(self.generate_wrapper_product_trait(
1399 a,
1400 b,
1401 out,
1402 SymbolicProductKind::RightContraction,
1403 wrapper_kind,
1404 WrapperPosition::Both,
1405 ));
1406 }
1407 }
1408 }
1409
1410 for versor_type in &self.spec.types {
1412 if versor_type.alias_of.is_some() {
1413 continue;
1414 }
1415 if let Some(ref versor_spec) = versor_type.versor {
1416 let targets = if versor_spec.sandwich_targets.is_empty() {
1417 self.infer_sandwich_targets(versor_type)
1419 } else {
1420 versor_spec.sandwich_targets.clone()
1421 };
1422
1423 for target_name in &targets {
1424 if let Some(target_type) = self.find_type(target_name) {
1425 impls.push(
1426 self.generate_sandwich_trait_from_versor(versor_type, target_type),
1427 );
1428
1429 impls.push(self.generate_wrapper_sandwich_trait(
1431 versor_type,
1432 target_type,
1433 wrapper_kind,
1434 WrapperPosition::Lhs,
1435 ));
1436 impls.push(self.generate_wrapper_sandwich_trait(
1437 versor_type,
1438 target_type,
1439 wrapper_kind,
1440 WrapperPosition::Rhs,
1441 ));
1442 impls.push(self.generate_wrapper_sandwich_trait(
1443 versor_type,
1444 target_type,
1445 wrapper_kind,
1446 WrapperPosition::Both,
1447 ));
1448 }
1449 }
1450 }
1451 }
1452
1453 for versor_type in &self.spec.types {
1455 if versor_type.alias_of.is_some() {
1456 continue;
1457 }
1458 if let Some(ref versor_spec) = versor_type.versor {
1459 let targets = if versor_spec.sandwich_targets.is_empty() {
1460 self.infer_sandwich_targets(versor_type)
1462 } else {
1463 versor_spec.sandwich_targets.clone()
1464 };
1465
1466 for target_name in &targets {
1467 if let Some(target_type) = self.find_type(target_name) {
1468 impls.push(
1469 self.generate_antisandwich_trait_from_versor(versor_type, target_type),
1470 );
1471
1472 impls.push(self.generate_wrapper_antisandwich_trait(
1474 versor_type,
1475 target_type,
1476 wrapper_kind,
1477 WrapperPosition::Lhs,
1478 ));
1479 impls.push(self.generate_wrapper_antisandwich_trait(
1480 versor_type,
1481 target_type,
1482 wrapper_kind,
1483 WrapperPosition::Rhs,
1484 ));
1485 impls.push(self.generate_wrapper_antisandwich_trait(
1486 versor_type,
1487 target_type,
1488 wrapper_kind,
1489 WrapperPosition::Both,
1490 ));
1491 }
1492 }
1493 }
1494 }
1495
1496 for versor_type in &self.spec.types {
1499 if versor_type.alias_of.is_some() {
1500 continue;
1501 }
1502 if let Some(ref versor_spec) = versor_type.versor {
1503 let targets = if versor_spec.sandwich_targets.is_empty() {
1504 self.infer_sandwich_targets(versor_type)
1505 } else {
1506 versor_spec.sandwich_targets.clone()
1507 };
1508
1509 for target_name in &targets {
1510 if let Some(target_type) = self.find_type(target_name) {
1511 impls.push(
1512 self.generate_transform_trait_from_versor(versor_type, target_type),
1513 );
1514
1515 impls.push(self.generate_wrapper_transform_trait(
1517 versor_type,
1518 target_type,
1519 wrapper_kind,
1520 WrapperPosition::Lhs,
1521 ));
1522 impls.push(self.generate_wrapper_transform_trait(
1523 versor_type,
1524 target_type,
1525 wrapper_kind,
1526 WrapperPosition::Rhs,
1527 ));
1528 impls.push(self.generate_wrapper_transform_trait(
1529 versor_type,
1530 target_type,
1531 wrapper_kind,
1532 WrapperPosition::Both,
1533 ));
1534 }
1535 }
1536 }
1537 }
1538
1539 for versor_type in &self.spec.types {
1542 if versor_type.alias_of.is_some() {
1543 continue;
1544 }
1545 if let Some(ref versor_spec) = versor_type.versor {
1546 let targets = if versor_spec.sandwich_targets.is_empty() {
1547 self.infer_sandwich_targets(versor_type)
1548 } else {
1549 versor_spec.sandwich_targets.clone()
1550 };
1551
1552 for target_name in &targets {
1553 if let Some(target_type) = self.find_type(target_name) {
1554 impls.push(self.generate_inverse_sandwich_trait(versor_type, target_type));
1555 }
1556 }
1557 }
1558 }
1559
1560 for source_type in &self.spec.types {
1563 if source_type.alias_of.is_some() {
1564 continue;
1565 }
1566 if source_type.versor.is_some() {
1568 continue;
1569 }
1570 if source_type.inverse_sandwich_targets.is_empty() {
1572 continue;
1573 }
1574
1575 for target_name in &source_type.inverse_sandwich_targets {
1576 if let Some(target_type) = self.find_type(target_name) {
1577 impls.push(self.generate_inverse_sandwich_trait(source_type, target_type));
1578 }
1579 }
1580 }
1581
1582 for versor_type in &self.spec.types {
1585 if versor_type.alias_of.is_some() {
1586 continue;
1587 }
1588 if let Some(ref versor_spec) = versor_type.versor {
1589 let targets = if versor_spec.sandwich_targets.is_empty() {
1590 self.infer_sandwich_targets(versor_type)
1591 } else {
1592 versor_spec.sandwich_targets.clone()
1593 };
1594
1595 for target_name in &targets {
1596 if let Some(target_type) = self.find_type(target_name) {
1597 impls.push(
1598 self.generate_inverse_antisandwich_trait(versor_type, target_type),
1599 );
1600 }
1601 }
1602 }
1603 }
1604
1605 for source_type in &self.spec.types {
1608 if source_type.alias_of.is_some() {
1609 continue;
1610 }
1611 if source_type.versor.is_some() {
1613 continue;
1614 }
1615 if source_type.inverse_sandwich_targets.is_empty() {
1617 continue;
1618 }
1619
1620 for target_name in &source_type.inverse_sandwich_targets {
1621 if let Some(target_type) = self.find_type(target_name) {
1622 impls.push(self.generate_inverse_antisandwich_trait(source_type, target_type));
1623 }
1624 }
1625 }
1626
1627 impls.extend(self.generate_versor_traits());
1630
1631 for entry in &self.spec.products.scalar {
1633 if let (Some(a), Some(b), Some(out)) = (
1634 self.find_type(&entry.lhs),
1635 self.find_type(&entry.rhs),
1636 self.find_type(&entry.output),
1637 ) {
1638 if self.is_single_grade_blade(a) && self.is_single_grade_blade(b) {
1639 impls.push(self.generate_scalar_product_trait(a, b, out, entry));
1640
1641 impls.push(self.generate_wrapper_scalar_returning_product_trait(
1643 a,
1644 b,
1645 SymbolicProductKind::Scalar,
1646 wrapper_kind,
1647 WrapperPosition::Lhs,
1648 ));
1649 impls.push(self.generate_wrapper_scalar_returning_product_trait(
1650 a,
1651 b,
1652 SymbolicProductKind::Scalar,
1653 wrapper_kind,
1654 WrapperPosition::Rhs,
1655 ));
1656 impls.push(self.generate_wrapper_scalar_returning_product_trait(
1657 a,
1658 b,
1659 SymbolicProductKind::Scalar,
1660 wrapper_kind,
1661 WrapperPosition::Both,
1662 ));
1663 }
1664 }
1665 }
1666
1667 for entry in &self.spec.products.bulk_contraction {
1669 if let (Some(a), Some(b), Some(out)) = (
1670 self.find_type(&entry.lhs),
1671 self.find_type(&entry.rhs),
1672 self.find_type(&entry.output),
1673 ) {
1674 if self.is_single_grade_blade(a) && self.is_single_grade_blade(b) {
1675 impls.push(self.generate_bulk_contract_trait(a, b, out, entry));
1676
1677 impls.push(self.generate_wrapper_product_trait(
1679 a,
1680 b,
1681 out,
1682 SymbolicProductKind::BulkContraction,
1683 wrapper_kind,
1684 WrapperPosition::Lhs,
1685 ));
1686 impls.push(self.generate_wrapper_product_trait(
1687 a,
1688 b,
1689 out,
1690 SymbolicProductKind::BulkContraction,
1691 wrapper_kind,
1692 WrapperPosition::Rhs,
1693 ));
1694 impls.push(self.generate_wrapper_product_trait(
1695 a,
1696 b,
1697 out,
1698 SymbolicProductKind::BulkContraction,
1699 wrapper_kind,
1700 WrapperPosition::Both,
1701 ));
1702 }
1703 }
1704 }
1705
1706 for entry in &self.spec.products.weight_contraction {
1708 if let (Some(a), Some(b), Some(out)) = (
1709 self.find_type(&entry.lhs),
1710 self.find_type(&entry.rhs),
1711 self.find_type(&entry.output),
1712 ) {
1713 if self.is_single_grade_blade(a) && self.is_single_grade_blade(b) {
1714 impls.push(self.generate_weight_contract_trait(a, b, out, entry));
1715
1716 impls.push(self.generate_wrapper_product_trait(
1718 a,
1719 b,
1720 out,
1721 SymbolicProductKind::WeightContraction,
1722 wrapper_kind,
1723 WrapperPosition::Lhs,
1724 ));
1725 impls.push(self.generate_wrapper_product_trait(
1726 a,
1727 b,
1728 out,
1729 SymbolicProductKind::WeightContraction,
1730 wrapper_kind,
1731 WrapperPosition::Rhs,
1732 ));
1733 impls.push(self.generate_wrapper_product_trait(
1734 a,
1735 b,
1736 out,
1737 SymbolicProductKind::WeightContraction,
1738 wrapper_kind,
1739 WrapperPosition::Both,
1740 ));
1741 }
1742 }
1743 }
1744
1745 for entry in &self.spec.products.bulk_expansion {
1747 if let (Some(a), Some(b), Some(out)) = (
1748 self.find_type(&entry.lhs),
1749 self.find_type(&entry.rhs),
1750 self.find_type(&entry.output),
1751 ) {
1752 if self.is_single_grade_blade(a) && self.is_single_grade_blade(b) {
1753 impls.push(self.generate_bulk_expand_trait(a, b, out, entry));
1754
1755 impls.push(self.generate_wrapper_product_trait(
1757 a,
1758 b,
1759 out,
1760 SymbolicProductKind::BulkExpansion,
1761 wrapper_kind,
1762 WrapperPosition::Lhs,
1763 ));
1764 impls.push(self.generate_wrapper_product_trait(
1765 a,
1766 b,
1767 out,
1768 SymbolicProductKind::BulkExpansion,
1769 wrapper_kind,
1770 WrapperPosition::Rhs,
1771 ));
1772 impls.push(self.generate_wrapper_product_trait(
1773 a,
1774 b,
1775 out,
1776 SymbolicProductKind::BulkExpansion,
1777 wrapper_kind,
1778 WrapperPosition::Both,
1779 ));
1780 }
1781 }
1782 }
1783
1784 for entry in &self.spec.products.weight_expansion {
1786 if let (Some(a), Some(b), Some(out)) = (
1787 self.find_type(&entry.lhs),
1788 self.find_type(&entry.rhs),
1789 self.find_type(&entry.output),
1790 ) {
1791 if self.is_single_grade_blade(a) && self.is_single_grade_blade(b) {
1792 impls.push(self.generate_weight_expand_trait(a, b, out, entry));
1793
1794 impls.push(self.generate_wrapper_product_trait(
1796 a,
1797 b,
1798 out,
1799 SymbolicProductKind::WeightExpansion,
1800 wrapper_kind,
1801 WrapperPosition::Lhs,
1802 ));
1803 impls.push(self.generate_wrapper_product_trait(
1804 a,
1805 b,
1806 out,
1807 SymbolicProductKind::WeightExpansion,
1808 wrapper_kind,
1809 WrapperPosition::Rhs,
1810 ));
1811 impls.push(self.generate_wrapper_product_trait(
1812 a,
1813 b,
1814 out,
1815 SymbolicProductKind::WeightExpansion,
1816 wrapper_kind,
1817 WrapperPosition::Both,
1818 ));
1819 }
1820 }
1821 }
1822
1823 for entry in &self.spec.products.dot {
1829 if let (Some(a), Some(b)) = (self.find_type(&entry.lhs), self.find_type(&entry.rhs)) {
1830 impls.push(self.generate_dot_trait(a, b, entry));
1831
1832 impls.push(self.generate_wrapper_scalar_returning_product_trait(
1834 a,
1835 b,
1836 SymbolicProductKind::Dot,
1837 wrapper_kind,
1838 WrapperPosition::Lhs,
1839 ));
1840 impls.push(self.generate_wrapper_scalar_returning_product_trait(
1841 a,
1842 b,
1843 SymbolicProductKind::Dot,
1844 wrapper_kind,
1845 WrapperPosition::Rhs,
1846 ));
1847 impls.push(self.generate_wrapper_scalar_returning_product_trait(
1848 a,
1849 b,
1850 SymbolicProductKind::Dot,
1851 wrapper_kind,
1852 WrapperPosition::Both,
1853 ));
1854 }
1855 }
1856
1857 for entry in &self.spec.products.antidot {
1859 if let (Some(a), Some(b)) = (self.find_type(&entry.lhs), self.find_type(&entry.rhs)) {
1860 impls.push(self.generate_antidot_trait(a, b, entry));
1861
1862 impls.push(self.generate_wrapper_scalar_returning_product_trait(
1864 a,
1865 b,
1866 SymbolicProductKind::Antidot,
1867 wrapper_kind,
1868 WrapperPosition::Lhs,
1869 ));
1870 impls.push(self.generate_wrapper_scalar_returning_product_trait(
1871 a,
1872 b,
1873 SymbolicProductKind::Antidot,
1874 wrapper_kind,
1875 WrapperPosition::Rhs,
1876 ));
1877 impls.push(self.generate_wrapper_scalar_returning_product_trait(
1878 a,
1879 b,
1880 SymbolicProductKind::Antidot,
1881 wrapper_kind,
1882 WrapperPosition::Both,
1883 ));
1884 }
1885 }
1886
1887 if self.spec.signature.r > 0 {
1891 let single_grade_types: Vec<_> = self
1893 .spec
1894 .types
1895 .iter()
1896 .filter(|t| {
1897 t.alias_of.is_none() && self.is_single_grade_blade(t) && !t.grades.contains(&0) })
1899 .collect();
1900
1901 for source in &single_grade_types {
1904 for target in &single_grade_types {
1905 let source_grade = source.grades.first().copied().unwrap_or(0);
1906
1907 if let Some(output) = single_grade_types
1909 .iter()
1910 .find(|t| t.grades.contains(&source_grade))
1911 {
1912 let field_exprs = self.compute_project_expressions(source, target, output);
1914 if !Self::all_expressions_are_zero(&field_exprs) {
1915 impls.push(self.generate_project_trait(source, target, output));
1916
1917 impls.push(self.generate_wrapper_project_trait(
1919 source,
1920 target,
1921 output,
1922 wrapper_kind,
1923 WrapperPosition::Lhs,
1924 ));
1925 impls.push(self.generate_wrapper_project_trait(
1926 source,
1927 target,
1928 output,
1929 wrapper_kind,
1930 WrapperPosition::Rhs,
1931 ));
1932 impls.push(self.generate_wrapper_project_trait(
1933 source,
1934 target,
1935 output,
1936 wrapper_kind,
1937 WrapperPosition::Both,
1938 ));
1939 }
1940 }
1941 }
1942 }
1943
1944 for source in &single_grade_types {
1947 for target in &single_grade_types {
1948 let source_grade = source.grades.first().copied().unwrap_or(0);
1949
1950 if let Some(output) = single_grade_types
1952 .iter()
1953 .find(|t| t.grades.contains(&source_grade))
1954 {
1955 let field_exprs =
1957 self.compute_antiproject_expressions(source, target, output);
1958 if !Self::all_expressions_are_zero(&field_exprs) {
1959 impls.push(self.generate_antiproject_trait(source, target, output));
1960
1961 impls.push(self.generate_wrapper_antiproject_trait(
1963 source,
1964 target,
1965 output,
1966 wrapper_kind,
1967 WrapperPosition::Lhs,
1968 ));
1969 impls.push(self.generate_wrapper_antiproject_trait(
1970 source,
1971 target,
1972 output,
1973 wrapper_kind,
1974 WrapperPosition::Rhs,
1975 ));
1976 impls.push(self.generate_wrapper_antiproject_trait(
1977 source,
1978 target,
1979 output,
1980 wrapper_kind,
1981 WrapperPosition::Both,
1982 ));
1983 }
1984 }
1985 }
1986 }
1987 }
1988
1989 for ty in &self.spec.types {
1993 if ty.alias_of.is_none() {
1994 impls.push(self.generate_reverse_trait(ty));
1995 }
1996 }
1997
1998 for ty in &self.spec.types {
2000 if ty.alias_of.is_none() {
2001 impls.push(self.generate_antireverse_trait(ty));
2002 }
2003 }
2004
2005 for ty in &self.spec.types {
2007 if ty.alias_of.is_none() {
2008 impls.push(self.generate_involute_trait(ty));
2009 }
2010 }
2011
2012 for ty in &self.spec.types {
2015 if ty.alias_of.is_none() {
2016 if let Some(impl_tokens) = self.generate_right_complement_trait(ty) {
2017 impls.push(impl_tokens);
2018 }
2019 }
2020 }
2021
2022 for ty in &self.spec.types {
2024 if ty.alias_of.is_none() {
2025 if let Some(impl_tokens) = self.generate_weight_dual_trait(ty) {
2026 impls.push(impl_tokens);
2027 }
2028 }
2029 }
2030
2031 impls.extend(self.generate_versor_inverse_traits());
2033
2034 quote! { #(#impls)* }
2035 }
2036
2037 fn generate_dot_trait(
2042 &self,
2043 a: &TypeSpec,
2044 b: &TypeSpec,
2045 _entry: &crate::spec::ProductEntry,
2046 ) -> TokenStream {
2047 let a_name = format_ident!("{}", a.name);
2048 let b_name = format_ident!("{}", b.name);
2049
2050 let expr = self.compute_dot_expression(a, b);
2052
2053 quote! {
2054 impl<T: Float> Dot<#b_name<T>> for #a_name<T> {
2055 type Scalar = T;
2056
2057 #[inline]
2058 fn dot(&self, rhs: &#b_name<T>) -> T {
2059 #expr
2060 }
2061 }
2062 }
2063 }
2064
2065 fn generate_antidot_trait(
2067 &self,
2068 a: &TypeSpec,
2069 b: &TypeSpec,
2070 _entry: &crate::spec::ProductEntry,
2071 ) -> TokenStream {
2072 let a_name = format_ident!("{}", a.name);
2073 let b_name = format_ident!("{}", b.name);
2074
2075 let expr = self.compute_antidot_expression(a, b);
2077
2078 quote! {
2079 impl<T: Float> Antidot<#b_name<T>> for #a_name<T> {
2080 type Scalar = T;
2081
2082 #[inline]
2083 fn antidot(&self, rhs: &#b_name<T>) -> T {
2084 #expr
2085 }
2086 }
2087 }
2088 }
2089
2090 fn generate_project_trait(
2094 &self,
2095 source: &TypeSpec,
2096 target: &TypeSpec,
2097 output: &TypeSpec,
2098 ) -> TokenStream {
2099 let source_name = format_ident!("{}", source.name);
2100 let target_name = format_ident!("{}", target.name);
2101 let out_name = format_ident!("{}", output.name);
2102
2103 let field_exprs = self.compute_project_expressions(source, target, output);
2105
2106 let constructor_call = quote! { #out_name::new_unchecked(#(#field_exprs),*) };
2107
2108 quote! {
2109 impl<T: Float> Project<#target_name<T>> for #source_name<T> {
2110 type Output = #out_name<T>;
2111
2112 #[inline]
2113 fn project(&self, target: &#target_name<T>) -> #out_name<T> {
2114 #constructor_call
2115 }
2116 }
2117 }
2118 }
2119
2120 fn generate_antiproject_trait(
2124 &self,
2125 source: &TypeSpec,
2126 target: &TypeSpec,
2127 output: &TypeSpec,
2128 ) -> TokenStream {
2129 let source_name = format_ident!("{}", source.name);
2130 let target_name = format_ident!("{}", target.name);
2131 let out_name = format_ident!("{}", output.name);
2132
2133 let field_exprs = self.compute_antiproject_expressions(source, target, output);
2135
2136 let constructor_call = quote! { #out_name::new_unchecked(#(#field_exprs),*) };
2137
2138 quote! {
2139 impl<T: Float> Antiproject<#target_name<T>> for #source_name<T> {
2140 type Output = #out_name<T>;
2141
2142 #[inline]
2143 fn antiproject(&self, target: &#target_name<T>) -> #out_name<T> {
2144 #constructor_call
2145 }
2146 }
2147 }
2148 }
2149
2150 fn generate_wrapper_project_trait(
2154 &self,
2155 source: &TypeSpec,
2156 target: &TypeSpec,
2157 output: &TypeSpec,
2158 wrapper_kind: WrapperKind,
2159 wrapper_pos: WrapperPosition,
2160 ) -> TokenStream {
2161 let source_name = format_ident!("{}", source.name);
2162 let target_name = format_ident!("{}", target.name);
2163 let out_name = format_ident!("{}", output.name);
2164 let wrapper_name = Self::wrapper_type_name(wrapper_kind);
2165
2166 match wrapper_pos {
2167 WrapperPosition::Lhs => {
2168 quote! {
2169 impl<T: Float> Project<#target_name<T>> for #wrapper_name<#source_name<T>> {
2170 type Output = #out_name<T>;
2171
2172 #[inline]
2173 fn project(&self, target: &#target_name<T>) -> #out_name<T> {
2174 self.as_inner().project(target)
2175 }
2176 }
2177 }
2178 }
2179 WrapperPosition::Rhs => {
2180 quote! {
2181 impl<T: Float> Project<#wrapper_name<#target_name<T>>> for #source_name<T> {
2182 type Output = #out_name<T>;
2183
2184 #[inline]
2185 fn project(&self, target: &#wrapper_name<#target_name<T>>) -> #out_name<T> {
2186 self.project(target.as_inner())
2187 }
2188 }
2189 }
2190 }
2191 WrapperPosition::Both => {
2192 quote! {
2193 impl<T: Float> Project<#wrapper_name<#target_name<T>>> for #wrapper_name<#source_name<T>> {
2194 type Output = #out_name<T>;
2195
2196 #[inline]
2197 fn project(&self, target: &#wrapper_name<#target_name<T>>) -> #out_name<T> {
2198 self.as_inner().project(target.as_inner())
2199 }
2200 }
2201 }
2202 }
2203 }
2204 }
2205
2206 fn generate_wrapper_antiproject_trait(
2210 &self,
2211 source: &TypeSpec,
2212 target: &TypeSpec,
2213 output: &TypeSpec,
2214 wrapper_kind: WrapperKind,
2215 wrapper_pos: WrapperPosition,
2216 ) -> TokenStream {
2217 let source_name = format_ident!("{}", source.name);
2218 let target_name = format_ident!("{}", target.name);
2219 let out_name = format_ident!("{}", output.name);
2220 let wrapper_name = Self::wrapper_type_name(wrapper_kind);
2221
2222 match wrapper_pos {
2223 WrapperPosition::Lhs => {
2224 quote! {
2225 impl<T: Float> Antiproject<#target_name<T>> for #wrapper_name<#source_name<T>> {
2226 type Output = #out_name<T>;
2227
2228 #[inline]
2229 fn antiproject(&self, target: &#target_name<T>) -> #out_name<T> {
2230 self.as_inner().antiproject(target)
2231 }
2232 }
2233 }
2234 }
2235 WrapperPosition::Rhs => {
2236 quote! {
2237 impl<T: Float> Antiproject<#wrapper_name<#target_name<T>>> for #source_name<T> {
2238 type Output = #out_name<T>;
2239
2240 #[inline]
2241 fn antiproject(&self, target: &#wrapper_name<#target_name<T>>) -> #out_name<T> {
2242 self.antiproject(target.as_inner())
2243 }
2244 }
2245 }
2246 }
2247 WrapperPosition::Both => {
2248 quote! {
2249 impl<T: Float> Antiproject<#wrapper_name<#target_name<T>>> for #wrapper_name<#source_name<T>> {
2250 type Output = #out_name<T>;
2251
2252 #[inline]
2253 fn antiproject(&self, target: &#wrapper_name<#target_name<T>>) -> #out_name<T> {
2254 self.as_inner().antiproject(target.as_inner())
2255 }
2256 }
2257 }
2258 }
2259 }
2260 }
2261
2262 fn generate_wedge_trait(
2264 &self,
2265 a: &TypeSpec,
2266 b: &TypeSpec,
2267 output: &TypeSpec,
2268 _entry: &crate::spec::ProductEntry,
2269 ) -> TokenStream {
2270 let a_name = format_ident!("{}", a.name);
2271 let b_name = format_ident!("{}", b.name);
2272 let out_name = format_ident!("{}", output.name);
2273
2274 let field_exprs =
2276 self.compute_product_expressions(a, b, output, SymbolicProductKind::Wedge);
2277
2278 let constructor_call = quote! { #out_name::new_unchecked(#(#field_exprs),*) };
2280
2281 let doc = format!(
2283 "Wedge (exterior/outer) product of [`{}`] and [`{}`].\n\n\
2284 The wedge product `a ^ b` computes the outer product, which represents\n\
2285 the oriented subspace spanned by both operands. The result grade is the\n\
2286 sum of the input grades (or zero if they share common factors).",
2287 a.name, b.name
2288 );
2289
2290 quote! {
2291 #[doc = #doc]
2292 impl<T: Float> Wedge<#b_name<T>> for #a_name<T> {
2293 type Output = #out_name<T>;
2294
2295 #[inline]
2296 fn wedge(&self, rhs: &#b_name<T>) -> #out_name<T> {
2297 #constructor_call
2298 }
2299 }
2300 }
2301 }
2302
2303 fn generate_antiwedge_trait(
2305 &self,
2306 a: &TypeSpec,
2307 b: &TypeSpec,
2308 output: &TypeSpec,
2309 _entry: &crate::spec::ProductEntry,
2310 ) -> TokenStream {
2311 let a_name = format_ident!("{}", a.name);
2312 let b_name = format_ident!("{}", b.name);
2313 let out_name = format_ident!("{}", output.name);
2314
2315 let field_exprs =
2317 self.compute_product_expressions(a, b, output, SymbolicProductKind::Antiwedge);
2318
2319 let constructor_call = quote! { #out_name::new_unchecked(#(#field_exprs),*) };
2321
2322 let doc = format!(
2324 "Antiwedge (regressive/meet) product of [`{}`] and [`{}`].\n\n\
2325 The antiwedge product `a v b` computes the meet of two subspaces,\n\
2326 returning the largest subspace contained in both. In projective geometry,\n\
2327 this finds intersections (e.g., where two planes meet to form a line).",
2328 a.name, b.name
2329 );
2330
2331 quote! {
2332 #[doc = #doc]
2333 impl<T: Float> Antiwedge<#b_name<T>> for #a_name<T> {
2334 type Output = #out_name<T>;
2335
2336 #[inline]
2337 fn antiwedge(&self, rhs: &#b_name<T>) -> #out_name<T> {
2338 #constructor_call
2339 }
2340 }
2341 }
2342 }
2343
2344 fn generate_wrapper_product_trait(
2349 &self,
2350 a: &TypeSpec,
2351 b: &TypeSpec,
2352 output: &TypeSpec,
2353 kind: SymbolicProductKind,
2354 wrapper_kind: WrapperKind,
2355 wrapper_pos: WrapperPosition,
2356 ) -> TokenStream {
2357 let a_name = format_ident!("{}", a.name);
2358 let b_name = format_ident!("{}", b.name);
2359 let out_name = format_ident!("{}", output.name);
2360 let wrapper_name = Self::wrapper_type_name(wrapper_kind);
2361
2362 let (wrapper_a, wrapper_b) = match wrapper_pos {
2364 WrapperPosition::Lhs => (Some(wrapper_kind), None),
2365 WrapperPosition::Rhs => (None, Some(wrapper_kind)),
2366 WrapperPosition::Both => (Some(wrapper_kind), Some(wrapper_kind)),
2367 };
2368
2369 let field_exprs = self
2371 .compute_product_expressions_with_wrappers(a, wrapper_a, b, wrapper_b, output, kind);
2372
2373 let constructor_call = quote! { #out_name::new_unchecked(#(#field_exprs),*) };
2375
2376 let trait_type = Self::product_trait_type_name(kind);
2380 let trait_method = Self::product_trait_method_name(kind);
2381 match wrapper_pos {
2382 WrapperPosition::Lhs => {
2383 quote! {
2384 #[allow(unused_variables)]
2385 impl<T: Float> #trait_type<#b_name<T>> for #wrapper_name<#a_name<T>> {
2386 type Output = #out_name<T>;
2387
2388 #[inline]
2389 fn #trait_method(&self, rhs: &#b_name<T>) -> #out_name<T> {
2390 #constructor_call
2391 }
2392 }
2393 }
2394 }
2395 WrapperPosition::Rhs => {
2396 quote! {
2397 #[allow(unused_variables)]
2398 impl<T: Float> #trait_type<#wrapper_name<#b_name<T>>> for #a_name<T> {
2399 type Output = #out_name<T>;
2400
2401 #[inline]
2402 fn #trait_method(&self, rhs: &#wrapper_name<#b_name<T>>) -> #out_name<T> {
2403 #constructor_call
2404 }
2405 }
2406 }
2407 }
2408 WrapperPosition::Both => {
2409 quote! {
2410 #[allow(unused_variables)]
2411 impl<T: Float> #trait_type<#wrapper_name<#b_name<T>>> for #wrapper_name<#a_name<T>> {
2412 type Output = #out_name<T>;
2413
2414 #[inline]
2415 fn #trait_method(&self, rhs: &#wrapper_name<#b_name<T>>) -> #out_name<T> {
2416 #constructor_call
2417 }
2418 }
2419 }
2420 }
2421 }
2422 }
2423
2424 fn generate_wrapper_scalar_returning_product_trait(
2429 &self,
2430 a: &TypeSpec,
2431 b: &TypeSpec,
2432 kind: SymbolicProductKind,
2433 wrapper_kind: WrapperKind,
2434 wrapper_pos: WrapperPosition,
2435 ) -> TokenStream {
2436 let a_name = format_ident!("{}", a.name);
2437 let b_name = format_ident!("{}", b.name);
2438 let wrapper_name = Self::wrapper_type_name(wrapper_kind);
2439
2440 let (wrapper_a, wrapper_b) = match wrapper_pos {
2442 WrapperPosition::Lhs => (Some(wrapper_kind), None),
2443 WrapperPosition::Rhs => (None, Some(wrapper_kind)),
2444 WrapperPosition::Both => (Some(wrapper_kind), Some(wrapper_kind)),
2445 };
2446
2447 let scalar_type = self
2449 .spec
2450 .types
2451 .iter()
2452 .find(|t| t.grades == vec![0] && t.alias_of.is_none())
2453 .expect("Scalar type must exist");
2454
2455 let field_exprs = self.compute_product_expressions_with_wrappers(
2457 a,
2458 wrapper_a,
2459 b,
2460 wrapper_b,
2461 scalar_type,
2462 kind,
2463 );
2464
2465 let expr = if field_exprs.is_empty() {
2467 quote! { T::zero() }
2468 } else {
2469 field_exprs[0].clone()
2470 };
2471
2472 let trait_type = Self::product_trait_type_name(kind);
2474 let trait_method = Self::product_trait_method_name(kind);
2475 match wrapper_pos {
2476 WrapperPosition::Lhs => {
2477 quote! {
2478 #[allow(unused_variables)]
2479 impl<T: Float> #trait_type<#b_name<T>> for #wrapper_name<#a_name<T>> {
2480 type Scalar = T;
2481
2482 #[inline]
2483 fn #trait_method(&self, rhs: &#b_name<T>) -> T {
2484 #expr
2485 }
2486 }
2487 }
2488 }
2489 WrapperPosition::Rhs => {
2490 quote! {
2491 #[allow(unused_variables)]
2492 impl<T: Float> #trait_type<#wrapper_name<#b_name<T>>> for #a_name<T> {
2493 type Scalar = T;
2494
2495 #[inline]
2496 fn #trait_method(&self, rhs: &#wrapper_name<#b_name<T>>) -> T {
2497 #expr
2498 }
2499 }
2500 }
2501 }
2502 WrapperPosition::Both => {
2503 quote! {
2504 #[allow(unused_variables)]
2505 impl<T: Float> #trait_type<#wrapper_name<#b_name<T>>> for #wrapper_name<#a_name<T>> {
2506 type Scalar = T;
2507
2508 #[inline]
2509 fn #trait_method(&self, rhs: &#wrapper_name<#b_name<T>>) -> T {
2510 #expr
2511 }
2512 }
2513 }
2514 }
2515 }
2516 }
2517
2518 fn wrapper_type_name(wrapper: WrapperKind) -> proc_macro2::Ident {
2520 let name = match wrapper {
2521 WrapperKind::Unit => "Unit",
2522 WrapperKind::Bulk => "Bulk",
2523 WrapperKind::Unitized => "Unitized",
2524 WrapperKind::Ideal => "Ideal",
2525 WrapperKind::Proper => "Proper",
2526 WrapperKind::Spacelike => "Spacelike",
2527 WrapperKind::Null => "Null",
2528 };
2529 format_ident!("{}", name)
2530 }
2531
2532 fn product_trait_type_name(kind: SymbolicProductKind) -> proc_macro2::Ident {
2534 let name = match kind {
2535 SymbolicProductKind::Wedge => "Wedge",
2536 SymbolicProductKind::Antiwedge => "Antiwedge",
2537 SymbolicProductKind::LeftContraction => "LeftContract",
2538 SymbolicProductKind::RightContraction => "RightContract",
2539 SymbolicProductKind::Dot => "Dot",
2540 SymbolicProductKind::Antidot => "Antidot",
2541 SymbolicProductKind::Scalar => "ScalarProduct",
2542 SymbolicProductKind::BulkContraction => "BulkContract",
2543 SymbolicProductKind::WeightContraction => "WeightContract",
2544 SymbolicProductKind::BulkExpansion => "BulkExpand",
2545 SymbolicProductKind::WeightExpansion => "WeightExpand",
2546 _ => "UnknownProduct",
2547 };
2548 format_ident!("{}", name)
2549 }
2550
2551 fn product_trait_method_name(kind: SymbolicProductKind) -> proc_macro2::Ident {
2553 let name = match kind {
2554 SymbolicProductKind::Wedge => "wedge",
2555 SymbolicProductKind::Antiwedge => "antiwedge",
2556 SymbolicProductKind::LeftContraction => "left_contract",
2557 SymbolicProductKind::RightContraction => "right_contract",
2558 SymbolicProductKind::Dot => "dot",
2559 SymbolicProductKind::Antidot => "antidot",
2560 SymbolicProductKind::Scalar => "scalar_product",
2561 SymbolicProductKind::BulkContraction => "bulk_contract",
2562 SymbolicProductKind::WeightContraction => "weight_contract",
2563 SymbolicProductKind::BulkExpansion => "bulk_expand",
2564 SymbolicProductKind::WeightExpansion => "weight_expand",
2565 _ => "unknown_product",
2566 };
2567 format_ident!("{}", name)
2568 }
2569
2570 fn generate_left_contract_trait(
2572 &self,
2573 a: &TypeSpec,
2574 b: &TypeSpec,
2575 output: &TypeSpec,
2576 _entry: &crate::spec::ProductEntry,
2577 ) -> TokenStream {
2578 let a_name = format_ident!("{}", a.name);
2579 let b_name = format_ident!("{}", b.name);
2580 let out_name = format_ident!("{}", output.name);
2581
2582 let field_exprs =
2584 self.compute_product_expressions(a, b, output, SymbolicProductKind::LeftContraction);
2585
2586 let constructor_call = quote! { #out_name::new_unchecked(#(#field_exprs),*) };
2588
2589 let doc = format!(
2591 "Left contraction of [`{}`] into [`{}`].\n\n\
2592 The left contraction `a _| b` projects `a` onto `b`, returning the\n\
2593 component of `b` orthogonal to `a`. The result grade is grade(b) - grade(a)\n\
2594 (or zero if grade(a) > grade(b)).",
2595 a.name, b.name
2596 );
2597
2598 quote! {
2599 #[doc = #doc]
2600 impl<T: Float> LeftContract<#b_name<T>> for #a_name<T> {
2601 type Output = #out_name<T>;
2602
2603 #[inline]
2604 fn left_contract(&self, rhs: &#b_name<T>) -> #out_name<T> {
2605 #constructor_call
2606 }
2607 }
2608 }
2609 }
2610
2611 fn generate_right_contract_trait(
2613 &self,
2614 a: &TypeSpec,
2615 b: &TypeSpec,
2616 output: &TypeSpec,
2617 _entry: &crate::spec::ProductEntry,
2618 ) -> TokenStream {
2619 let a_name = format_ident!("{}", a.name);
2620 let b_name = format_ident!("{}", b.name);
2621 let out_name = format_ident!("{}", output.name);
2622
2623 let field_exprs =
2625 self.compute_product_expressions(a, b, output, SymbolicProductKind::RightContraction);
2626
2627 let constructor_call = quote! { #out_name::new_unchecked(#(#field_exprs),*) };
2629
2630 let doc = format!(
2632 "Right contraction of [`{}`] by [`{}`].\n\n\
2633 The right contraction `a |_ b` projects `b` onto `a`, returning the\n\
2634 component of `a` orthogonal to `b`. The result grade is grade(a) - grade(b)\n\
2635 (or zero if grade(b) > grade(a)).",
2636 a.name, b.name
2637 );
2638
2639 quote! {
2640 #[doc = #doc]
2641 impl<T: Float> RightContract<#b_name<T>> for #a_name<T> {
2642 type Output = #out_name<T>;
2643
2644 #[inline]
2645 fn right_contract(&self, rhs: &#b_name<T>) -> #out_name<T> {
2646 #constructor_call
2647 }
2648 }
2649 }
2650 }
2651
2652 fn generate_sandwich_trait_from_versor(
2654 &self,
2655 versor: &TypeSpec,
2656 operand: &TypeSpec,
2657 ) -> TokenStream {
2658 let versor_name = format_ident!("{}", versor.name);
2659 let operand_name = format_ident!("{}", operand.name);
2660
2661 let field_exprs = self.compute_sandwich_expressions(versor, operand);
2663
2664 let constructor_call = quote! { #operand_name::new_unchecked(#(#field_exprs),*) };
2666
2667 let doc = format!(
2669 "Sandwich product: [`{}`] x [`{}`] x rev([`{}`]).\n\n\
2670 The sandwich product `v x a x rev(v)` applies the transformation\n\
2671 represented by the versor `v` to the operand `a`. For rotors, this\n\
2672 performs rotation; for motors, it performs rigid body transformation.",
2673 versor.name, operand.name, versor.name
2674 );
2675
2676 quote! {
2679 #[doc = #doc]
2680 #[allow(unused_variables)]
2681 impl<T: Float> Sandwich<#operand_name<T>> for #versor_name<T> {
2682 type Output = #operand_name<T>;
2683
2684 #[inline]
2685 fn sandwich(&self, operand: &#operand_name<T>) -> #operand_name<T> {
2686 #constructor_call
2687 }
2688 }
2689 }
2690 }
2691
2692 fn generate_antisandwich_trait_from_versor(
2694 &self,
2695 versor: &TypeSpec,
2696 operand: &TypeSpec,
2697 ) -> TokenStream {
2698 let versor_name = format_ident!("{}", versor.name);
2699 let operand_name = format_ident!("{}", operand.name);
2700
2701 let field_exprs = self.compute_antisandwich_expressions(versor, operand);
2703
2704 let constructor_call = quote! { #operand_name::new_unchecked(#(#field_exprs),*) };
2706
2707 let doc = format!(
2709 "Antisandwich product: [`{}`] x [`{}`] x antirev([`{}`]).\n\n\
2710 The antisandwich product `v x a x antirev(v)` is the dual of the\n\
2711 sandwich product, used in Projective GA for transforming dual objects\n\
2712 (planes, ideal points). Motors use antisandwich for plane transforms.",
2713 versor.name, operand.name, versor.name
2714 );
2715
2716 quote! {
2719 #[doc = #doc]
2720 #[allow(unused_variables)]
2721 impl<T: Float> Antisandwich<#operand_name<T>> for #versor_name<T> {
2722 type Output = #operand_name<T>;
2723
2724 #[inline]
2725 fn antisandwich(&self, operand: &#operand_name<T>) -> #operand_name<T> {
2726 #constructor_call
2727 }
2728 }
2729 }
2730 }
2731
2732 fn generate_inverse_sandwich_trait(
2737 &self,
2738 versor: &TypeSpec,
2739 operand: &TypeSpec,
2740 ) -> TokenStream {
2741 let versor_name = format_ident!("{}", versor.name);
2742 let operand_name = format_ident!("{}", operand.name);
2743 let is_degenerate = self.spec.signature.r > 0;
2744
2745 let field_exprs = self.compute_sandwich_expressions(versor, operand);
2747
2748 let scaled_fields: Vec<TokenStream> = field_exprs
2750 .iter()
2751 .map(|expr| quote! { (#expr) * inv_norm_sq })
2752 .collect();
2753
2754 let constructor_call = quote! { #operand_name::new_unchecked(#(#scaled_fields),*) };
2756
2757 let norm_computation = if is_degenerate {
2760 quote! {
2761 let norm_sq = <Self as crate::norm::DegenerateNormed>::bulk_norm_squared(self);
2762 }
2763 } else {
2764 quote! {
2765 let norm_sq = <Self as crate::norm::Normed>::norm_squared(self);
2766 }
2767 };
2768
2769 quote! {
2771 #[allow(unused_variables)]
2772 impl<T: Float> InverseSandwich<#operand_name<T>> for #versor_name<T> {
2773 type Output = #operand_name<T>;
2774
2775 #[inline]
2776 fn try_inverse_sandwich(&self, operand: &#operand_name<T>) -> Option<#operand_name<T>> {
2777 #norm_computation
2778 if norm_sq.abs() < T::epsilon() {
2779 return None;
2780 }
2781 let inv_norm_sq = T::one() / norm_sq;
2782 Some(#constructor_call)
2783 }
2784 }
2785 }
2786 }
2787
2788 fn generate_inverse_antisandwich_trait(
2793 &self,
2794 versor: &TypeSpec,
2795 operand: &TypeSpec,
2796 ) -> TokenStream {
2797 let versor_name = format_ident!("{}", versor.name);
2798 let operand_name = format_ident!("{}", operand.name);
2799 let is_degenerate = self.spec.signature.r > 0;
2800
2801 let field_exprs = self.compute_antisandwich_expressions(versor, operand);
2803
2804 let scaled_fields: Vec<TokenStream> = field_exprs
2806 .iter()
2807 .map(|expr| quote! { (#expr) * inv_norm_sq })
2808 .collect();
2809
2810 let constructor_call = quote! { #operand_name::new_unchecked(#(#scaled_fields),*) };
2812
2813 let norm_computation = if is_degenerate {
2816 quote! {
2817 let norm_sq = <Self as crate::norm::DegenerateNormed>::bulk_norm_squared(self);
2818 }
2819 } else {
2820 quote! {
2821 let norm_sq = <Self as crate::norm::Normed>::norm_squared(self);
2822 }
2823 };
2824
2825 quote! {
2827 #[allow(unused_variables)]
2828 impl<T: Float> InverseAntisandwich<#operand_name<T>> for #versor_name<T> {
2829 type Output = #operand_name<T>;
2830
2831 #[inline]
2832 fn try_inverse_antisandwich(&self, operand: &#operand_name<T>) -> Option<#operand_name<T>> {
2833 #norm_computation
2834 if norm_sq.abs() < T::epsilon() {
2835 return None;
2836 }
2837 let inv_norm_sq = T::one() / norm_sq;
2838 Some(#constructor_call)
2839 }
2840 }
2841 }
2842 }
2843
2844 fn generate_wrapper_sandwich_trait(
2849 &self,
2850 versor: &TypeSpec,
2851 operand: &TypeSpec,
2852 wrapper_kind: WrapperKind,
2853 wrapper_pos: WrapperPosition,
2854 ) -> TokenStream {
2855 let versor_name = format_ident!("{}", versor.name);
2856 let operand_name = format_ident!("{}", operand.name);
2857 let wrapper_name = Self::wrapper_type_name(wrapper_kind);
2858
2859 let (wrapper_versor, wrapper_operand) = match wrapper_pos {
2861 WrapperPosition::Lhs => (Some(wrapper_kind), None),
2862 WrapperPosition::Rhs => (None, Some(wrapper_kind)),
2863 WrapperPosition::Both => (Some(wrapper_kind), Some(wrapper_kind)),
2864 };
2865
2866 let field_exprs = self.compute_sandwich_expressions_with_wrappers(
2868 versor,
2869 wrapper_versor,
2870 operand,
2871 wrapper_operand,
2872 false, );
2874
2875 let constructor_call = quote! { #operand_name::new_unchecked(#(#field_exprs),*) };
2876
2877 match wrapper_pos {
2878 WrapperPosition::Lhs => {
2879 quote! {
2880 #[allow(unused_variables)]
2881 impl<T: Float> Sandwich<#operand_name<T>> for #wrapper_name<#versor_name<T>> {
2882 type Output = #operand_name<T>;
2883
2884 #[inline]
2885 fn sandwich(&self, operand: &#operand_name<T>) -> #operand_name<T> {
2886 #constructor_call
2887 }
2888 }
2889 }
2890 }
2891 WrapperPosition::Rhs => {
2892 quote! {
2893 #[allow(unused_variables)]
2894 impl<T: Float> Sandwich<#wrapper_name<#operand_name<T>>> for #versor_name<T> {
2895 type Output = #operand_name<T>;
2896
2897 #[inline]
2898 fn sandwich(&self, operand: &#wrapper_name<#operand_name<T>>) -> #operand_name<T> {
2899 #constructor_call
2900 }
2901 }
2902 }
2903 }
2904 WrapperPosition::Both => {
2905 quote! {
2906 #[allow(unused_variables)]
2907 impl<T: Float> Sandwich<#wrapper_name<#operand_name<T>>> for #wrapper_name<#versor_name<T>> {
2908 type Output = #operand_name<T>;
2909
2910 #[inline]
2911 fn sandwich(&self, operand: &#wrapper_name<#operand_name<T>>) -> #operand_name<T> {
2912 #constructor_call
2913 }
2914 }
2915 }
2916 }
2917 }
2918 }
2919
2920 fn generate_wrapper_antisandwich_trait(
2925 &self,
2926 versor: &TypeSpec,
2927 operand: &TypeSpec,
2928 wrapper_kind: WrapperKind,
2929 wrapper_pos: WrapperPosition,
2930 ) -> TokenStream {
2931 let versor_name = format_ident!("{}", versor.name);
2932 let operand_name = format_ident!("{}", operand.name);
2933 let wrapper_name = Self::wrapper_type_name(wrapper_kind);
2934
2935 let (wrapper_versor, wrapper_operand) = match wrapper_pos {
2937 WrapperPosition::Lhs => (Some(wrapper_kind), None),
2938 WrapperPosition::Rhs => (None, Some(wrapper_kind)),
2939 WrapperPosition::Both => (Some(wrapper_kind), Some(wrapper_kind)),
2940 };
2941
2942 let field_exprs = self.compute_sandwich_expressions_with_wrappers(
2944 versor,
2945 wrapper_versor,
2946 operand,
2947 wrapper_operand,
2948 true, );
2950
2951 let constructor_call = quote! { #operand_name::new_unchecked(#(#field_exprs),*) };
2952
2953 match wrapper_pos {
2954 WrapperPosition::Lhs => {
2955 quote! {
2956 #[allow(unused_variables)]
2957 impl<T: Float> Antisandwich<#operand_name<T>> for #wrapper_name<#versor_name<T>> {
2958 type Output = #operand_name<T>;
2959
2960 #[inline]
2961 fn antisandwich(&self, operand: &#operand_name<T>) -> #operand_name<T> {
2962 #constructor_call
2963 }
2964 }
2965 }
2966 }
2967 WrapperPosition::Rhs => {
2968 quote! {
2969 #[allow(unused_variables)]
2970 impl<T: Float> Antisandwich<#wrapper_name<#operand_name<T>>> for #versor_name<T> {
2971 type Output = #operand_name<T>;
2972
2973 #[inline]
2974 fn antisandwich(&self, operand: &#wrapper_name<#operand_name<T>>) -> #operand_name<T> {
2975 #constructor_call
2976 }
2977 }
2978 }
2979 }
2980 WrapperPosition::Both => {
2981 quote! {
2982 #[allow(unused_variables)]
2983 impl<T: Float> Antisandwich<#wrapper_name<#operand_name<T>>> for #wrapper_name<#versor_name<T>> {
2984 type Output = #operand_name<T>;
2985
2986 #[inline]
2987 fn antisandwich(&self, operand: &#wrapper_name<#operand_name<T>>) -> #operand_name<T> {
2988 #constructor_call
2989 }
2990 }
2991 }
2992 }
2993 }
2994 }
2995
2996 fn generate_wrapper_transform_trait(
3000 &self,
3001 versor: &TypeSpec,
3002 operand: &TypeSpec,
3003 wrapper_kind: WrapperKind,
3004 wrapper_pos: WrapperPosition,
3005 ) -> TokenStream {
3006 let versor_name = format_ident!("{}", versor.name);
3007 let operand_name = format_ident!("{}", operand.name);
3008 let wrapper_name = Self::wrapper_type_name(wrapper_kind);
3009
3010 let is_degenerate = self.spec.signature.r > 0;
3011 let method_name = if is_degenerate {
3012 quote! { antisandwich }
3013 } else {
3014 quote! { sandwich }
3015 };
3016
3017 match wrapper_pos {
3018 WrapperPosition::Lhs => {
3019 quote! {
3020 impl<T: Float> Transform<#operand_name<T>> for #wrapper_name<#versor_name<T>> {
3021 type Output = #operand_name<T>;
3022
3023 #[inline]
3024 fn transform(&self, operand: &#operand_name<T>) -> #operand_name<T> {
3025 self.#method_name(operand)
3026 }
3027 }
3028 }
3029 }
3030 WrapperPosition::Rhs => {
3031 quote! {
3032 impl<T: Float> Transform<#wrapper_name<#operand_name<T>>> for #versor_name<T> {
3033 type Output = #operand_name<T>;
3034
3035 #[inline]
3036 fn transform(&self, operand: &#wrapper_name<#operand_name<T>>) -> #operand_name<T> {
3037 self.#method_name(operand)
3038 }
3039 }
3040 }
3041 }
3042 WrapperPosition::Both => {
3043 quote! {
3044 impl<T: Float> Transform<#wrapper_name<#operand_name<T>>> for #wrapper_name<#versor_name<T>> {
3045 type Output = #operand_name<T>;
3046
3047 #[inline]
3048 fn transform(&self, operand: &#wrapper_name<#operand_name<T>>) -> #operand_name<T> {
3049 self.#method_name(operand)
3050 }
3051 }
3052 }
3053 }
3054 }
3055 }
3056
3057 fn generate_transform_trait_from_versor(
3064 &self,
3065 versor: &TypeSpec,
3066 operand: &TypeSpec,
3067 ) -> TokenStream {
3068 let versor_name = format_ident!("{}", versor.name);
3069 let operand_name = format_ident!("{}", operand.name);
3070
3071 let is_degenerate = self.spec.signature.r > 0;
3073
3074 let method_call = if is_degenerate {
3075 quote! { self.antisandwich(operand) }
3076 } else {
3077 quote! { self.sandwich(operand) }
3078 };
3079
3080 let product_name = if is_degenerate {
3082 "antisandwich"
3083 } else {
3084 "sandwich"
3085 };
3086 let doc = format!(
3087 "Transform a [`{}`] using this [`{}`].\n\n\
3088 Applies the geometric transformation represented by this versor.\n\
3089 For rotors, this performs rotation. For motors, this performs rigid\n\
3090 body transformation (rotation + translation). Internally uses the\n\
3091 {} product.",
3092 operand.name, versor.name, product_name
3093 );
3094
3095 quote! {
3096 #[doc = #doc]
3097 impl<T: Float> Transform<#operand_name<T>> for #versor_name<T> {
3098 type Output = #operand_name<T>;
3099
3100 #[inline]
3101 fn transform(&self, operand: &#operand_name<T>) -> #operand_name<T> {
3102 #method_call
3103 }
3104 }
3105 }
3106 }
3107
3108 fn generate_versor_traits(&self) -> Vec<TokenStream> {
3120 let mut impls = Vec::new();
3121
3122 let versor_types: Vec<_> = self
3124 .spec
3125 .types
3126 .iter()
3127 .filter(|t| t.alias_of.is_none() && t.versor.is_some())
3128 .collect();
3129
3130 for lhs in &versor_types {
3132 for rhs in &versor_types {
3133 if let Some(output_type) = self.find_mul_output_type(&lhs.name, &rhs.name) {
3136 let lhs_name = format_ident!("{}", lhs.name);
3137 let rhs_name = format_ident!("{}", rhs.name);
3138 let out_name = format_ident!("{}", output_type);
3139
3140 let compose_body = quote! {
3145 *self * *other
3146 };
3147
3148 impls.push(quote! {
3149 impl<T: Float> Versor<#rhs_name<T>> for #lhs_name<T> {
3150 type Output = #out_name<T>;
3151
3152 #[inline]
3153 fn compose(&self, other: &#rhs_name<T>) -> #out_name<T> {
3154 #compose_body
3155 }
3156 }
3157 });
3158 }
3159 }
3160 }
3161
3162 impls
3163 }
3164
3165 fn generate_versor_inverse_traits(&self) -> Vec<TokenStream> {
3173 let mut impls = Vec::new();
3174 let is_degenerate = self.spec.signature.r > 0;
3175
3176 let types_needing_inverse: Vec<_> = self
3180 .spec
3181 .types
3182 .iter()
3183 .filter(|t| {
3184 t.alias_of.is_none()
3185 && (t.versor.is_some() || !t.inverse_sandwich_targets.is_empty())
3186 })
3187 .collect();
3188
3189 for ty in types_needing_inverse {
3190 let type_name = format_ident!("{}", ty.name);
3191
3192 let scaled_fields: Vec<TokenStream> = ty
3194 .fields
3195 .iter()
3196 .map(|field| {
3197 let field_name = format_ident!("{}", field.name);
3198 let grade = field.grade;
3199 if (grade * grade.saturating_sub(1) / 2).is_multiple_of(2) {
3201 quote! { self.#field_name() * inv_norm_sq }
3202 } else {
3203 quote! { -self.#field_name() * inv_norm_sq }
3204 }
3205 })
3206 .collect();
3207
3208 let norm_computation = if is_degenerate {
3211 quote! {
3212 let norm_sq = <Self as crate::norm::DegenerateNormed>::bulk_norm_squared(self);
3213 }
3214 } else {
3215 quote! {
3216 let norm_sq = <Self as crate::norm::Normed>::norm_squared(self);
3217 }
3218 };
3219
3220 impls.push(quote! {
3221 impl<T: Float> VersorInverse for #type_name<T> {
3222 fn try_inverse(&self) -> Option<Self> {
3223 #norm_computation
3224 if norm_sq.abs() < T::epsilon() {
3225 return None;
3226 }
3227 let inv_norm_sq = T::one() / norm_sq;
3228 Some(Self::new_unchecked(#(#scaled_fields),*))
3229 }
3230 }
3231 });
3232 }
3233
3234 impls
3235 }
3236
3237 fn find_mul_output_type(&self, lhs: &str, rhs: &str) -> Option<String> {
3239 for entry in &self.spec.products.geometric {
3241 if entry.lhs == lhs && entry.rhs == rhs {
3242 return Some(entry.output.clone());
3243 }
3244 }
3245 None
3246 }
3247
3248 fn will_generate_versor_impls(&self) -> bool {
3253 let versor_types: Vec<_> = self
3254 .spec
3255 .types
3256 .iter()
3257 .filter(|t| t.alias_of.is_none() && t.versor.is_some())
3258 .collect();
3259
3260 for lhs in &versor_types {
3261 for rhs in &versor_types {
3262 if self.find_mul_output_type(&lhs.name, &rhs.name).is_some() {
3263 return true;
3264 }
3265 }
3266 }
3267 false
3268 }
3269
3270 fn generate_scalar_product_trait(
3272 &self,
3273 a: &TypeSpec,
3274 b: &TypeSpec,
3275 _output: &TypeSpec,
3276 _entry: &crate::spec::ProductEntry,
3277 ) -> TokenStream {
3278 let a_name = format_ident!("{}", a.name);
3279 let b_name = format_ident!("{}", b.name);
3280
3281 let expr = self.compute_scalar_product_expression(a, b);
3283
3284 quote! {
3285 impl<T: Float> ScalarProduct<#b_name<T>> for #a_name<T> {
3286 type Scalar = T;
3287
3288 #[inline]
3289 fn scalar_product(&self, rhs: &#b_name<T>) -> T {
3290 #expr
3291 }
3292 }
3293 }
3294 }
3295
3296 fn generate_bulk_contract_trait(
3298 &self,
3299 a: &TypeSpec,
3300 b: &TypeSpec,
3301 output: &TypeSpec,
3302 _entry: &crate::spec::ProductEntry,
3303 ) -> TokenStream {
3304 let a_name = format_ident!("{}", a.name);
3305 let b_name = format_ident!("{}", b.name);
3306 let out_name = format_ident!("{}", output.name);
3307
3308 let field_exprs =
3310 self.compute_product_expressions(a, b, output, SymbolicProductKind::BulkContraction);
3311
3312 let constructor_call = quote! { #out_name::new_unchecked(#(#field_exprs),*) };
3314
3315 let doc = format!(
3317 "Bulk contraction of [`{}`] with [`{}`].\n\n\
3318 The bulk contraction extracts the Euclidean (non-degenerate) component\n\
3319 of the interior product. In PGA, this isolates the finite/spatial part.",
3320 a.name, b.name
3321 );
3322
3323 quote! {
3324 #[doc = #doc]
3325 impl<T: Float> BulkContract<#b_name<T>> for #a_name<T> {
3326 type Output = #out_name<T>;
3327
3328 #[inline]
3329 fn bulk_contract(&self, rhs: &#b_name<T>) -> #out_name<T> {
3330 #constructor_call
3331 }
3332 }
3333 }
3334 }
3335
3336 fn generate_weight_contract_trait(
3338 &self,
3339 a: &TypeSpec,
3340 b: &TypeSpec,
3341 output: &TypeSpec,
3342 _entry: &crate::spec::ProductEntry,
3343 ) -> TokenStream {
3344 let a_name = format_ident!("{}", a.name);
3345 let b_name = format_ident!("{}", b.name);
3346 let out_name = format_ident!("{}", output.name);
3347
3348 let field_exprs =
3350 self.compute_product_expressions(a, b, output, SymbolicProductKind::WeightContraction);
3351
3352 let constructor_call = quote! { #out_name::new_unchecked(#(#field_exprs),*) };
3354
3355 let doc = format!(
3357 "Weight contraction of [`{}`] with [`{}`].\n\n\
3358 The weight contraction extracts the degenerate/ideal component of the\n\
3359 interior product. In PGA, this measures the 'weight' or projective part.",
3360 a.name, b.name
3361 );
3362
3363 quote! {
3364 #[doc = #doc]
3365 impl<T: Float> WeightContract<#b_name<T>> for #a_name<T> {
3366 type Output = #out_name<T>;
3367
3368 #[inline]
3369 fn weight_contract(&self, rhs: &#b_name<T>) -> #out_name<T> {
3370 #constructor_call
3371 }
3372 }
3373 }
3374 }
3375
3376 fn generate_bulk_expand_trait(
3378 &self,
3379 a: &TypeSpec,
3380 b: &TypeSpec,
3381 output: &TypeSpec,
3382 _entry: &crate::spec::ProductEntry,
3383 ) -> TokenStream {
3384 let a_name = format_ident!("{}", a.name);
3385 let b_name = format_ident!("{}", b.name);
3386 let out_name = format_ident!("{}", output.name);
3387
3388 let field_exprs =
3390 self.compute_product_expressions(a, b, output, SymbolicProductKind::BulkExpansion);
3391
3392 let constructor_call = quote! { #out_name::new_unchecked(#(#field_exprs),*) };
3394
3395 let doc = format!(
3397 "Bulk expansion of [`{}`] with [`{}`].\n\n\
3398 The bulk expansion is the dual of bulk contraction, extracting the\n\
3399 Euclidean component of the exterior product complement.",
3400 a.name, b.name
3401 );
3402
3403 quote! {
3404 #[doc = #doc]
3405 impl<T: Float> BulkExpand<#b_name<T>> for #a_name<T> {
3406 type Output = #out_name<T>;
3407
3408 #[inline]
3409 fn bulk_expand(&self, rhs: &#b_name<T>) -> #out_name<T> {
3410 #constructor_call
3411 }
3412 }
3413 }
3414 }
3415
3416 fn generate_weight_expand_trait(
3418 &self,
3419 a: &TypeSpec,
3420 b: &TypeSpec,
3421 output: &TypeSpec,
3422 _entry: &crate::spec::ProductEntry,
3423 ) -> TokenStream {
3424 let a_name = format_ident!("{}", a.name);
3425 let b_name = format_ident!("{}", b.name);
3426 let out_name = format_ident!("{}", output.name);
3427
3428 let field_exprs =
3430 self.compute_product_expressions(a, b, output, SymbolicProductKind::WeightExpansion);
3431
3432 let constructor_call = quote! { #out_name::new_unchecked(#(#field_exprs),*) };
3434
3435 let doc = format!(
3437 "Weight expansion of [`{}`] with [`{}`].\n\n\
3438 The weight expansion is the dual of weight contraction, extracting the\n\
3439 degenerate/ideal component of the exterior product complement.",
3440 a.name, b.name
3441 );
3442
3443 quote! {
3444 #[doc = #doc]
3445 impl<T: Float> WeightExpand<#b_name<T>> for #a_name<T> {
3446 type Output = #out_name<T>;
3447
3448 #[inline]
3449 fn weight_expand(&self, rhs: &#b_name<T>) -> #out_name<T> {
3450 #constructor_call
3451 }
3452 }
3453 }
3454 }
3455
3456 fn infer_sandwich_targets(&self, _versor_type: &TypeSpec) -> Vec<String> {
3473 self.spec
3481 .types
3482 .iter()
3483 .filter(|t| t.alias_of.is_none())
3484 .map(|t| t.name.clone())
3485 .collect()
3486 }
3487
3488 fn generate_reverse_trait(&self, ty: &TypeSpec) -> TokenStream {
3494 let type_name = format_ident!("{}", ty.name);
3495
3496 let field_exprs: Vec<TokenStream> = ty
3498 .fields
3499 .iter()
3500 .map(|field| {
3501 let field_name = format_ident!("{}", field.name);
3502 let grade = field.grade;
3503 if (grade * grade.saturating_sub(1) / 2).is_multiple_of(2) {
3505 quote! { self.#field_name() }
3506 } else {
3507 quote! { -self.#field_name() }
3508 }
3509 })
3510 .collect();
3511
3512 let constructor = quote! { Self::new_unchecked(#(#field_exprs),*) };
3513
3514 quote! {
3515 impl<T: Float> Reverse for #type_name<T> {
3516 #[inline]
3517 fn reverse(&self) -> Self {
3518 #constructor
3519 }
3520 }
3521 }
3522 }
3523
3524 fn generate_antireverse_trait(&self, ty: &TypeSpec) -> TokenStream {
3526 let type_name = format_ident!("{}", ty.name);
3527 let dim = self.algebra.dim();
3528
3529 let field_exprs: Vec<TokenStream> = ty
3531 .fields
3532 .iter()
3533 .map(|field| {
3534 let field_name = format_ident!("{}", field.name);
3535 let grade = field.grade;
3536 let antigrade = dim - grade;
3537 if (antigrade * antigrade.saturating_sub(1) / 2).is_multiple_of(2) {
3539 quote! { self.#field_name() }
3540 } else {
3541 quote! { -self.#field_name() }
3542 }
3543 })
3544 .collect();
3545
3546 let constructor = quote! { Self::new_unchecked(#(#field_exprs),*) };
3547
3548 quote! {
3549 impl<T: Float> Antireverse for #type_name<T> {
3550 #[inline]
3551 fn antireverse(&self) -> Self {
3552 #constructor
3553 }
3554 }
3555 }
3556 }
3557
3558 fn generate_involute_trait(&self, ty: &TypeSpec) -> TokenStream {
3565 let type_name = format_ident!("{}", ty.name);
3566 let involution_kind = self.spec.norm.primary_involution;
3567
3568 let field_exprs: Vec<TokenStream> = ty
3570 .fields
3571 .iter()
3572 .map(|field| {
3573 let field_name = format_ident!("{}", field.name);
3574 let grade = field.grade;
3575
3576 let is_positive = match involution_kind {
3578 InvolutionKind::Reverse => {
3579 (grade * grade.saturating_sub(1) / 2).is_multiple_of(2)
3581 }
3582 InvolutionKind::GradeInvolution => {
3583 grade.is_multiple_of(2)
3585 }
3586 InvolutionKind::CliffordConjugate => {
3587 (grade * (grade + 1) / 2).is_multiple_of(2)
3589 }
3590 };
3591
3592 if is_positive {
3593 quote! { self.#field_name() }
3594 } else {
3595 quote! { -self.#field_name() }
3596 }
3597 })
3598 .collect();
3599
3600 let constructor = quote! { Self::new_unchecked(#(#field_exprs),*) };
3601
3602 quote! {
3603 impl<T: Float> Involute for #type_name<T> {
3604 #[inline]
3605 fn involute(&self) -> Self {
3606 #constructor
3607 }
3608 }
3609 }
3610 }
3611
3612 fn generate_right_complement_trait(&self, ty: &TypeSpec) -> Option<TokenStream> {
3616 let output_type_name = self.find_complement_output_type(ty)?;
3618 let output_type = self
3619 .spec
3620 .types
3621 .iter()
3622 .find(|t| t.name == output_type_name)?;
3623
3624 let type_name = format_ident!("{}", ty.name);
3625 let out_name = format_ident!("{}", output_type_name);
3626
3627 let field_exprs: Vec<TokenStream> = output_type
3629 .fields
3630 .iter()
3631 .map(|out_field| {
3632 let out_blade = out_field.blade_index;
3634
3635 for in_field in &ty.fields {
3636 let (sign, comp_blade) = self.table.complement(in_field.blade_index);
3637 if comp_blade == out_blade && sign != 0 {
3638 let in_name = format_ident!("{}", in_field.name);
3639 return if sign > 0 {
3640 quote! { self.#in_name() }
3641 } else {
3642 quote! { -self.#in_name() }
3643 };
3644 }
3645 }
3646 quote! { T::zero() }
3648 })
3649 .collect();
3650
3651 let constructor = quote! { #out_name::new_unchecked(#(#field_exprs),*) };
3652
3653 Some(quote! {
3654 impl<T: Float> RightComplement for #type_name<T> {
3655 type Output = #out_name<T>;
3656
3657 #[inline]
3658 fn right_complement(&self) -> #out_name<T> {
3659 #constructor
3660 }
3661 }
3662 })
3663 }
3664
3665 fn find_complement_output_type(&self, ty: &TypeSpec) -> Option<String> {
3674 let dim = self.algebra.dim();
3675 let complement_grades: Vec<usize> = ty.grades.iter().map(|g| dim - g).collect();
3676
3677 for candidate in &self.spec.types {
3679 if candidate.alias_of.is_some() {
3680 continue;
3681 }
3682 let mut candidate_grades = candidate.grades.clone();
3683 candidate_grades.sort();
3684 let mut sorted_complement = complement_grades.clone();
3685 sorted_complement.sort();
3686 if candidate_grades == sorted_complement {
3687 return Some(candidate.name.clone());
3688 }
3689 }
3690
3691 None
3693 }
3694
3695 fn generate_weight_dual_trait(&self, ty: &TypeSpec) -> Option<TokenStream> {
3699 let output_type_name = self.find_weight_dual_output_type(ty)?;
3701 let output_type = self
3702 .spec
3703 .types
3704 .iter()
3705 .find(|t| t.name == output_type_name)?;
3706
3707 let type_name = format_ident!("{}", ty.name);
3708 let out_name = format_ident!("{}", output_type_name);
3709
3710 let field_exprs: Vec<TokenStream> = output_type
3712 .fields
3713 .iter()
3714 .map(|out_field| {
3715 let out_blade = out_field.blade_index;
3717
3718 for in_field in &ty.fields {
3719 let (sign, dual_blade) = self.table.weight_dual(in_field.blade_index);
3720 if dual_blade == out_blade && sign != 0 {
3721 let in_name = format_ident!("{}", in_field.name);
3722 return if sign > 0 {
3723 quote! { self.#in_name() }
3724 } else {
3725 quote! { -self.#in_name() }
3726 };
3727 }
3728 }
3729 quote! { T::zero() }
3731 })
3732 .collect();
3733
3734 let constructor = quote! { #out_name::new_unchecked(#(#field_exprs),*) };
3735
3736 Some(quote! {
3737 impl<T: Float> WeightDual for #type_name<T> {
3738 type Output = #out_name<T>;
3739
3740 #[inline]
3741 fn weight_dual(&self) -> #out_name<T> {
3742 #constructor
3743 }
3744 }
3745 })
3746 }
3747
3748 fn find_weight_dual_output_type(&self, ty: &TypeSpec) -> Option<String> {
3753 let dim = self.algebra.dim();
3754 let dual_grades: Vec<usize> = ty.grades.iter().map(|g| dim - g).collect();
3755
3756 for candidate in &self.spec.types {
3758 if candidate.alias_of.is_some() {
3759 continue;
3760 }
3761 let mut candidate_grades = candidate.grades.clone();
3762 candidate_grades.sort();
3763 let mut sorted_dual = dual_grades.clone();
3764 sorted_dual.sort();
3765 if candidate_grades == sorted_dual {
3766 return Some(candidate.name.clone());
3767 }
3768 }
3769
3770 None
3772 }
3773
3774 fn compute_blade_metric_sign(&self, blade_index: usize, grade: usize) -> i8 {
3786 let reorder_sign: i8 = if (grade * grade.saturating_sub(1) / 2).is_multiple_of(2) {
3788 1
3789 } else {
3790 -1
3791 };
3792
3793 let mut metric_product: i8 = 1;
3795 for basis in &self.spec.signature.basis {
3796 if (blade_index >> basis.index) & 1 == 1 {
3797 metric_product *= basis.metric;
3799 }
3800 }
3801
3802 reorder_sign * metric_product
3803 }
3804
3805 fn generate_all_normed(&self) -> TokenStream {
3807 let impls: Vec<TokenStream> = self
3808 .spec
3809 .types
3810 .iter()
3811 .filter(|t| t.alias_of.is_none())
3812 .map(|ty| self.generate_normed_impl(ty))
3813 .collect();
3814
3815 let degenerate_impls: Vec<TokenStream> = if self.spec.signature.r > 0 {
3817 self.spec
3818 .types
3819 .iter()
3820 .filter(|t| t.alias_of.is_none())
3821 .filter_map(|ty| self.generate_degenerate_normed_impl(ty))
3822 .collect()
3823 } else {
3824 Vec::new()
3825 };
3826
3827 quote! {
3828 #(#impls)*
3829 #(#degenerate_impls)*
3830 }
3831 }
3832
3833 fn generate_normed_impl(&self, ty: &TypeSpec) -> TokenStream {
3843 let name = format_ident!("{}", ty.name);
3844 let involution_kind = self.spec.norm.primary_involution;
3845
3846 let norm_squared_terms: Vec<TokenStream> = ty
3849 .fields
3850 .iter()
3851 .filter_map(|f| {
3852 let fname = format_ident!("{}", f.name);
3853
3854 let inv_sign: i8 = match involution_kind {
3856 InvolutionKind::Reverse => {
3857 let k = f.grade;
3859 if (k * k.saturating_sub(1) / 2) % 2 == 0 {
3860 1
3861 } else {
3862 -1
3863 }
3864 }
3865 InvolutionKind::GradeInvolution => {
3866 if f.grade % 2 == 0 { 1 } else { -1 }
3868 }
3869 InvolutionKind::CliffordConjugate => {
3870 let k = f.grade;
3872 if (k * (k + 1) / 2) % 2 == 0 { 1 } else { -1 }
3873 }
3874 };
3875
3876 let metric_sign = self.compute_blade_metric_sign(f.blade_index, f.grade);
3880
3881 let total_sign = inv_sign * metric_sign;
3883
3884 if total_sign == 0 {
3886 None
3887 } else if total_sign > 0 {
3888 Some(quote! { self.#fname() * self.#fname() })
3889 } else {
3890 Some(quote! { -self.#fname() * self.#fname() })
3891 }
3892 })
3893 .collect();
3894
3895 let scale_fields: Vec<TokenStream> = ty
3897 .fields
3898 .iter()
3899 .map(|f| {
3900 let fname = format_ident!("{}", f.name);
3901 quote! { self.#fname() * factor }
3902 })
3903 .collect();
3904
3905 let norm_squared_expr = if norm_squared_terms.is_empty() {
3907 quote! { T::zero() }
3908 } else {
3909 quote! { #(#norm_squared_terms)+* }
3910 };
3911
3912 quote! {
3913 impl<T: Float> crate::norm::Normed for #name<T> {
3914 type Scalar = T;
3915
3916 #[inline]
3917 fn norm_squared(&self) -> T {
3918 #norm_squared_expr
3919 }
3920
3921 fn try_normalize(&self) -> Option<Self> {
3922 let n = self.norm();
3923 if n < T::epsilon() {
3924 None
3925 } else {
3926 Some(self.scale(T::one() / n))
3927 }
3928 }
3929
3930 #[inline]
3931 fn scale(&self, factor: T) -> Self {
3932 Self::new_unchecked(#(#scale_fields),*)
3933 }
3934 }
3935 }
3936 }
3937
3938 fn generate_degenerate_normed_impl(&self, ty: &TypeSpec) -> Option<TokenStream> {
3942 let name = format_ident!("{}", ty.name);
3943
3944 let degenerate_indices: Vec<usize> = self
3946 .spec
3947 .signature
3948 .basis
3949 .iter()
3950 .filter(|b| b.metric == 0)
3951 .map(|b| b.index)
3952 .collect();
3953
3954 let (bulk_fields, weight_fields): (Vec<_>, Vec<_>) = ty.fields.iter().partition(|f| {
3956 !degenerate_indices.iter().any(|°_idx| {
3958 (f.blade_index >> deg_idx) & 1 == 1
3960 })
3961 });
3962
3963 if bulk_fields.is_empty() && weight_fields.is_empty() {
3965 return None;
3966 }
3967
3968 let bulk_norm_terms: Vec<TokenStream> = bulk_fields
3970 .iter()
3971 .map(|f| {
3972 let fname = format_ident!("{}", f.name);
3973 quote! { self.#fname() * self.#fname() }
3974 })
3975 .collect();
3976
3977 let weight_norm_terms: Vec<TokenStream> = weight_fields
3979 .iter()
3980 .map(|f| {
3981 let fname = format_ident!("{}", f.name);
3982 quote! { self.#fname() * self.#fname() }
3983 })
3984 .collect();
3985
3986 let bulk_norm_expr = if bulk_norm_terms.is_empty() {
3987 quote! { T::zero() }
3988 } else {
3989 quote! { #(#bulk_norm_terms)+* }
3990 };
3991
3992 let weight_norm_expr = if weight_norm_terms.is_empty() {
3993 quote! { T::zero() }
3994 } else {
3995 quote! { #(#weight_norm_terms)+* }
3996 };
3997
3998 let scale_fields: Vec<TokenStream> = ty
4000 .fields
4001 .iter()
4002 .map(|f| {
4003 let fname = format_ident!("{}", f.name);
4004 quote! { self.#fname() * inv_w }
4005 })
4006 .collect();
4007
4008 Some(quote! {
4009 impl<T: Float> crate::norm::DegenerateNormed for #name<T> {
4010 #[inline]
4011 fn bulk_norm_squared(&self) -> T {
4012 #bulk_norm_expr
4013 }
4014
4015 #[inline]
4016 fn weight_norm_squared(&self) -> T {
4017 #weight_norm_expr
4018 }
4019
4020 fn try_unitize(&self) -> Option<Self> {
4021 let w = self.weight_norm();
4022 if w < T::epsilon() {
4023 None
4024 } else {
4025 let inv_w = T::one() / w;
4026 Some(Self::new_unchecked(#(#scale_fields),*))
4027 }
4028 }
4029 }
4030 })
4031 }
4032
4033 fn generate_all_approx(&self) -> TokenStream {
4039 let impls: Vec<TokenStream> = self
4040 .spec
4041 .types
4042 .iter()
4043 .filter(|t| t.alias_of.is_none())
4044 .map(|ty| self.generate_approx_impls(ty))
4045 .collect();
4046
4047 quote! { #(#impls)* }
4048 }
4049
4050 fn generate_approx_impls(&self, ty: &TypeSpec) -> TokenStream {
4052 let name = format_ident!("{}", ty.name);
4053
4054 let abs_diff_checks: Vec<TokenStream> = ty
4055 .fields
4056 .iter()
4057 .map(|f| {
4058 let fname = format_ident!("{}", f.name);
4059 quote! { self.#fname().abs_diff_eq(&other.#fname(), epsilon) }
4060 })
4061 .collect();
4062
4063 let relative_checks: Vec<TokenStream> = ty
4064 .fields
4065 .iter()
4066 .map(|f| {
4067 let fname = format_ident!("{}", f.name);
4068 quote! { self.#fname().relative_eq(&other.#fname(), epsilon, max_relative) }
4069 })
4070 .collect();
4071
4072 let ulps_checks: Vec<TokenStream> = ty
4073 .fields
4074 .iter()
4075 .map(|f| {
4076 let fname = format_ident!("{}", f.name);
4077 quote! { self.#fname().ulps_eq(&other.#fname(), epsilon, max_ulps) }
4078 })
4079 .collect();
4080
4081 quote! {
4082 impl<T: Float + AbsDiffEq<Epsilon = T>> AbsDiffEq for #name<T> {
4083 type Epsilon = T;
4084
4085 fn default_epsilon() -> Self::Epsilon {
4086 T::default_epsilon()
4087 }
4088
4089 fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool {
4090 #(#abs_diff_checks)&&*
4091 }
4092 }
4093
4094 impl<T: Float + RelativeEq<Epsilon = T>> RelativeEq for #name<T> {
4095 fn default_max_relative() -> Self::Epsilon {
4096 T::default_max_relative()
4097 }
4098
4099 fn relative_eq(
4100 &self,
4101 other: &Self,
4102 epsilon: Self::Epsilon,
4103 max_relative: Self::Epsilon,
4104 ) -> bool {
4105 #(#relative_checks)&&*
4106 }
4107 }
4108
4109 impl<T: Float + UlpsEq<Epsilon = T>> UlpsEq for #name<T> {
4110 fn default_max_ulps() -> u32 {
4111 T::default_max_ulps()
4112 }
4113
4114 fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
4115 #(#ulps_checks)&&*
4116 }
4117 }
4118 }
4119 }
4120
4121 fn generate_all_arbitrary(&self) -> TokenStream {
4127 let impls: Vec<TokenStream> = self
4128 .spec
4129 .types
4130 .iter()
4131 .filter(|t| t.alias_of.is_none())
4132 .map(|ty| self.generate_arbitrary_impl(ty))
4133 .collect();
4134
4135 quote! {
4136 #[cfg(any(test, feature = "proptest-support"))]
4137 #[allow(clippy::missing_docs_in_private_items)]
4138 mod arbitrary_impls {
4139 use super::*;
4140 use proptest::prelude::*;
4141 use proptest::strategy::BoxedStrategy;
4142 use std::fmt::Debug;
4143
4144 #(#impls)*
4145 }
4146 }
4147 }
4148
4149 fn generate_arbitrary_impl(&self, ty: &TypeSpec) -> TokenStream {
4155 if let Some(constraint_impl) = self.try_generate_constrained_arbitrary(ty) {
4157 return constraint_impl;
4158 }
4159
4160 self.generate_unconstrained_arbitrary(ty)
4162 }
4163
4164 fn try_generate_constrained_arbitrary(&self, ty: &TypeSpec) -> Option<TokenStream> {
4169 let deriver = ConstraintDeriver::new(self.algebra, self.spec.norm.primary_involution);
4172 let constraint = deriver.derive_geometric_constraint(ty, "x")?;
4173
4174 if constraint.zero_expressions.len() != 1 {
4176 return None;
4177 }
4178
4179 let expr = &constraint.zero_expressions[0];
4180
4181 let expr_str = format!("{} = 0", expr);
4183
4184 let solve_for_field = ty.fields.iter().max_by_key(|f| f.grade)?;
4186
4187 let solver = ConstraintSolver::new();
4189 let symbol_name = format!("x_{}", solve_for_field.name);
4190 let solution = solver.solve(&expr_str, &symbol_name).ok()?;
4191
4192 if solution.solution_type == SolutionType::Quadratic {
4194 return self.generate_filtered_arbitrary(ty);
4195 }
4196
4197 Some(self.generate_solving_arbitrary(ty, &solve_for_field.name, &solution))
4199 }
4200
4201 fn generate_solving_arbitrary(
4203 &self,
4204 ty: &TypeSpec,
4205 solve_for: &str,
4206 solution: &crate::symbolic::SolveResult,
4207 ) -> TokenStream {
4208 let name = format_ident!("{}", ty.name);
4209 let num_fields = ty.fields.len();
4210
4211 let solve_for_idx = ty.fields.iter().position(|f| f.name == solve_for).unwrap();
4213 let free_indices: Vec<usize> = (0..num_fields).filter(|&i| i != solve_for_idx).collect();
4214
4215 if free_indices.len() > 12 {
4218 return self.generate_vec_based_solving_arbitrary(ty, solve_for, solution);
4219 }
4220
4221 let range_tuple: Vec<TokenStream> = free_indices
4223 .iter()
4224 .map(|_| quote! { -100.0f64..100.0 })
4225 .collect();
4226
4227 let prop_map_args: Vec<TokenStream> = free_indices
4229 .iter()
4230 .enumerate()
4231 .map(|(i, _)| {
4232 let var = format_ident!("x{}", i);
4233 quote! { #var }
4234 })
4235 .collect();
4236
4237 let numerator_expr = self.convert_solution_to_tokens(&solution.numerator, ty);
4239 let solution_expr = if let Some(ref divisor) = solution.divisor {
4240 let divisor_expr = self.convert_solution_to_tokens(divisor, ty);
4241 quote! { (#numerator_expr) / (#divisor_expr) }
4242 } else {
4243 numerator_expr
4244 };
4245
4246 let mut field_var_map: Vec<Option<usize>> = vec![None; num_fields];
4248 for (var_idx, &field_idx) in free_indices.iter().enumerate() {
4249 field_var_map[field_idx] = Some(var_idx);
4250 }
4251
4252 let field_inits: Vec<TokenStream> = ty
4253 .fields
4254 .iter()
4255 .enumerate()
4256 .map(|(i, _f)| {
4257 if i == solve_for_idx {
4258 quote! { T::from_f64(#solution_expr) }
4259 } else {
4260 let var_idx = field_var_map[i].unwrap();
4261 let var = format_ident!("x{}", var_idx);
4262 quote! { T::from_f64(#var) }
4263 }
4264 })
4265 .collect();
4266
4267 let filter_expr = if let Some(ref divisor) = solution.divisor {
4269 let divisor_var = self.find_divisor_variable(divisor, ty);
4270 if let Some(var_idx) = divisor_var {
4271 let var = format_ident!("x{}", var_idx);
4272 let filter_args: Vec<TokenStream> = free_indices
4274 .iter()
4275 .enumerate()
4276 .map(|(i, _)| {
4277 if i == var_idx {
4278 let v = format_ident!("x{}", i);
4279 quote! { #v }
4280 } else {
4281 let v = format_ident!("_x{}", i);
4282 quote! { #v }
4283 }
4284 })
4285 .collect();
4286 Some(quote! {
4287 .prop_filter("non-zero divisor", |(#(#filter_args),*)| (#var).abs() > 0.1)
4288 })
4289 } else {
4290 None
4291 }
4292 } else {
4293 None
4294 };
4295
4296 let filter_chain = filter_expr.unwrap_or_else(|| quote! {});
4297
4298 if free_indices.len() == 1 {
4299 let var = format_ident!("x0");
4300 quote! {
4301 impl<T: Float + Debug + 'static> Arbitrary for #name<T> {
4302 type Parameters = ();
4303 type Strategy = BoxedStrategy<Self>;
4304
4305 fn arbitrary_with(_: Self::Parameters) -> Self::Strategy {
4306 (-100.0f64..100.0)
4307 #filter_chain
4308 .prop_map(|#var| {
4309 #name::new_unchecked(#(#field_inits),*)
4310 })
4311 .boxed()
4312 }
4313 }
4314 }
4315 } else {
4316 quote! {
4317 impl<T: Float + Debug + 'static> Arbitrary for #name<T> {
4318 type Parameters = ();
4319 type Strategy = BoxedStrategy<Self>;
4320
4321 fn arbitrary_with(_: Self::Parameters) -> Self::Strategy {
4322 (#(#range_tuple),*)
4323 #filter_chain
4324 .prop_map(|(#(#prop_map_args),*)| {
4325 #name::new_unchecked(#(#field_inits),*)
4326 })
4327 .boxed()
4328 }
4329 }
4330 }
4331 }
4332 }
4333
4334 fn generate_vec_based_solving_arbitrary(
4336 &self,
4337 ty: &TypeSpec,
4338 solve_for: &str,
4339 solution: &crate::symbolic::SolveResult,
4340 ) -> TokenStream {
4341 let name = format_ident!("{}", ty.name);
4342 let num_fields = ty.fields.len();
4343
4344 let solve_for_idx = ty.fields.iter().position(|f| f.name == solve_for).unwrap();
4346 let free_indices: Vec<usize> = (0..num_fields).filter(|&i| i != solve_for_idx).collect();
4347 let num_free = free_indices.len();
4348
4349 let numerator_expr = self.convert_solution_to_vec_tokens(&solution.numerator, ty);
4351 let solution_expr = if let Some(ref divisor) = solution.divisor {
4352 let divisor_expr = self.convert_solution_to_vec_tokens(divisor, ty);
4353 quote! { (#numerator_expr) / (#divisor_expr) }
4354 } else {
4355 numerator_expr
4356 };
4357
4358 let mut field_var_map: Vec<Option<usize>> = vec![None; num_fields];
4360 for (var_idx, &field_idx) in free_indices.iter().enumerate() {
4361 field_var_map[field_idx] = Some(var_idx);
4362 }
4363
4364 let field_inits: Vec<TokenStream> = ty
4365 .fields
4366 .iter()
4367 .enumerate()
4368 .map(|(i, _f)| {
4369 if i == solve_for_idx {
4370 quote! { T::from_f64(#solution_expr) }
4371 } else {
4372 let var_idx = field_var_map[i].unwrap();
4373 quote! { T::from_f64(v[#var_idx]) }
4374 }
4375 })
4376 .collect();
4377
4378 let filter_expr = solution.divisor.as_ref().and_then(|divisor| {
4380 self.find_divisor_variable(divisor, ty).map(|var_idx| {
4381 quote! {
4382 .prop_filter("non-zero divisor", |v| v[#var_idx].abs() > 0.1)
4383 }
4384 })
4385 });
4386
4387 let filter_chain = filter_expr.unwrap_or_else(|| quote! {});
4388
4389 quote! {
4390 impl<T: Float + Debug + 'static> Arbitrary for #name<T> {
4391 type Parameters = ();
4392 type Strategy = BoxedStrategy<Self>;
4393
4394 fn arbitrary_with(_: Self::Parameters) -> Self::Strategy {
4395 proptest::collection::vec(-100.0f64..100.0, #num_free)
4396 #filter_chain
4397 .prop_map(|v| {
4398 #name::new_unchecked(#(#field_inits),*)
4399 })
4400 .boxed()
4401 }
4402 }
4403 }
4404 }
4405
4406 fn convert_solution_to_tokens(&self, expr: &str, ty: &TypeSpec) -> TokenStream {
4412 let mut result = expr.to_string();
4413
4414 let solve_for_field = ty.fields.iter().max_by_key(|f| f.grade).unwrap();
4416
4417 let free_fields: Vec<_> = ty
4418 .fields
4419 .iter()
4420 .filter(|f| f.name != solve_for_field.name)
4421 .collect();
4422
4423 let mut sorted_fields: Vec<_> = free_fields.iter().enumerate().collect();
4426 sorted_fields.sort_by(|a, b| b.1.name.len().cmp(&a.1.name.len()));
4427
4428 for (i, field) in sorted_fields {
4429 let field_pattern = format!("x_{}", field.name);
4430 let var_name = format!("x{}", i);
4431 result = result.replace(&field_pattern, &var_name);
4432 }
4433
4434 result.parse().unwrap_or_else(|_| quote! { T::zero() })
4436 }
4437
4438 fn convert_solution_to_vec_tokens(&self, expr: &str, ty: &TypeSpec) -> TokenStream {
4443 let mut result = expr.to_string();
4444
4445 let solve_for_field = ty.fields.iter().max_by_key(|f| f.grade).unwrap();
4447
4448 let free_fields: Vec<_> = ty
4449 .fields
4450 .iter()
4451 .filter(|f| f.name != solve_for_field.name)
4452 .collect();
4453
4454 let mut sorted_fields: Vec<_> = free_fields.iter().enumerate().collect();
4457 sorted_fields.sort_by(|a, b| b.1.name.len().cmp(&a.1.name.len()));
4458
4459 for (i, field) in sorted_fields {
4460 let field_pattern = format!("x_{}", field.name);
4461 let var_name = format!("v[{}]", i);
4462 result = result.replace(&field_pattern, &var_name);
4463 }
4464
4465 result.parse().unwrap_or_else(|_| quote! { T::zero() })
4467 }
4468
4469 fn find_divisor_variable(&self, divisor: &str, ty: &TypeSpec) -> Option<usize> {
4473 let solve_for_field = ty.fields.iter().max_by_key(|f| f.grade)?;
4474
4475 let free_fields: Vec<_> = ty
4476 .fields
4477 .iter()
4478 .filter(|f| f.name != solve_for_field.name)
4479 .collect();
4480
4481 for (i, field) in free_fields.iter().enumerate() {
4482 let pattern = format!("x_{}", field.name);
4484 if divisor.contains(&pattern) {
4485 return Some(i);
4486 }
4487 }
4488 None
4489 }
4490
4491 fn generate_filtered_arbitrary(&self, ty: &TypeSpec) -> Option<TokenStream> {
4496 let name = format_ident!("{}", ty.name);
4497 let num_fields = ty.fields.len();
4498
4499 let range_tuple: Vec<TokenStream> =
4501 (0..num_fields).map(|_| quote! { -10.0f64..10.0 }).collect();
4502
4503 let prop_map_args: Vec<TokenStream> = (0..num_fields)
4504 .map(|i| {
4505 let var = format_ident!("x{}", i);
4506 quote! { #var }
4507 })
4508 .collect();
4509
4510 let field_inits: Vec<TokenStream> = (0..num_fields)
4511 .map(|i| {
4512 let var = format_ident!("x{}", i);
4513 quote! { T::from_f64(#var) }
4514 })
4515 .collect();
4516
4517 Some(quote! {
4518 impl<T: Float + Debug + 'static> Arbitrary for #name<T> {
4519 type Parameters = ();
4520 type Strategy = BoxedStrategy<Self>;
4521
4522 fn arbitrary_with(_: Self::Parameters) -> Self::Strategy {
4523 (#(#range_tuple),*)
4524 .prop_map(|(#(#prop_map_args),*)| {
4525 #name::new_unchecked(#(#field_inits),*)
4526 })
4527 .boxed()
4528 }
4529 }
4530 })
4531 }
4532
4533 fn generate_unconstrained_arbitrary(&self, ty: &TypeSpec) -> TokenStream {
4535 let name = format_ident!("{}", ty.name);
4536 let num_fields = ty.fields.len();
4537
4538 if num_fields > 12 {
4541 return self.generate_vec_based_arbitrary(ty);
4542 }
4543
4544 let range_tuple: Vec<TokenStream> = (0..num_fields)
4546 .map(|_| quote! { -100.0f64..100.0 })
4547 .collect();
4548
4549 let field_inits: Vec<TokenStream> = (0..num_fields)
4550 .map(|i| {
4551 let var = format_ident!("x{}", i);
4552 quote! { T::from_f64(#var) }
4553 })
4554 .collect();
4555
4556 if num_fields == 1 {
4557 quote! {
4558 impl<T: Float + Debug + 'static> Arbitrary for #name<T> {
4559 type Parameters = ();
4560 type Strategy = BoxedStrategy<Self>;
4561
4562 fn arbitrary_with(_: Self::Parameters) -> Self::Strategy {
4563 (-100.0f64..100.0)
4564 .prop_map(|x0| {
4565 #name::new_unchecked(#(#field_inits),*)
4566 })
4567 .boxed()
4568 }
4569 }
4570 }
4571 } else {
4572 let prop_map_args: Vec<TokenStream> = (0..num_fields)
4573 .map(|i| {
4574 let var = format_ident!("x{}", i);
4575 quote! { #var }
4576 })
4577 .collect();
4578
4579 quote! {
4580 impl<T: Float + Debug + 'static> Arbitrary for #name<T> {
4581 type Parameters = ();
4582 type Strategy = BoxedStrategy<Self>;
4583
4584 fn arbitrary_with(_: Self::Parameters) -> Self::Strategy {
4585 (#(#range_tuple),*)
4586 .prop_map(|(#(#prop_map_args),*)| {
4587 #name::new_unchecked(#(#field_inits),*)
4588 })
4589 .boxed()
4590 }
4591 }
4592 }
4593 }
4594 }
4595
4596 fn generate_vec_based_arbitrary(&self, ty: &TypeSpec) -> TokenStream {
4601 let name = format_ident!("{}", ty.name);
4602 let num_fields = ty.fields.len();
4603
4604 let field_inits: Vec<TokenStream> = (0..num_fields)
4605 .map(|i| {
4606 quote! { T::from_f64(v[#i]) }
4607 })
4608 .collect();
4609
4610 quote! {
4611 impl<T: Float + Debug + 'static> Arbitrary for #name<T> {
4612 type Parameters = ();
4613 type Strategy = BoxedStrategy<Self>;
4614
4615 fn arbitrary_with(_: Self::Parameters) -> Self::Strategy {
4616 proptest::collection::vec(-100.0f64..100.0, #num_fields)
4617 .prop_map(|v| {
4618 #name::new_unchecked(#(#field_inits),*)
4619 })
4620 .boxed()
4621 }
4622 }
4623 }
4624 }
4625
4626 fn generate_verification_tests_raw(&self) -> String {
4636 let signature_name = self.generate_signature_name();
4637 let add_sub_tests = self.generate_add_sub_verification_tests_raw();
4638 let exterior_tests = self.generate_exterior_verification_tests_raw();
4639 let bulk_contraction_tests = self.generate_bulk_contraction_verification_tests_raw();
4640 let weight_contraction_tests = self.generate_weight_contraction_verification_tests_raw();
4641 let bulk_expansion_tests = self.generate_bulk_expansion_verification_tests_raw();
4642 let weight_expansion_tests = self.generate_weight_expansion_verification_tests_raw();
4643 let de_morgan_tests = self.generate_de_morgan_verification_tests_raw();
4644 let project_idempotency_tests = self.generate_project_idempotency_tests_raw();
4648 let antiproject_idempotency_tests = self.generate_antiproject_idempotency_tests_raw();
4649 let wrapper_equivalence_tests = self.generate_wrapper_equivalence_tests_raw();
4650 let is_degenerate = self.spec.signature.r > 0;
4651
4652 let wrapper_imports = if is_degenerate {
4655 "Unit, Unitized, Bulk"
4656 } else {
4657 "Unit"
4658 };
4659
4660 format!(
4661 r#"
4662// ============================================================
4663// Verification Tests (compare against Multivector)
4664// ============================================================
4665
4666#[cfg(test)]
4667#[allow(clippy::missing_docs_in_private_items)]
4668mod verification_tests {{
4669 use super::*;
4670 use crate::algebra::Multivector;
4671 use crate::signature::{sig};
4672 #[allow(unused_imports)]
4673 use crate::wrappers::{{{wrapper_imports}}};
4674 #[allow(unused_imports)]
4675 use crate::norm::{{Normed, DegenerateNormed}};
4676 use approx::relative_eq;
4677 use proptest::prelude::*;
4678
4679 /// Relative epsilon for floating-point comparisons in verification tests.
4680 /// Using relative comparison handles varying magnitudes better than absolute.
4681 const REL_EPSILON: f64 = 1e-10;
4682{add_sub}{exterior}{bulk_contraction}{weight_contraction}{bulk_expansion}{weight_expansion}{de_morgan}{project_idempotency}{antiproject_idempotency}{wrapper_equivalence}}}
4683"#,
4684 sig = signature_name,
4685 wrapper_imports = wrapper_imports,
4686 add_sub = add_sub_tests,
4687 exterior = exterior_tests,
4688 bulk_contraction = bulk_contraction_tests,
4689 weight_contraction = weight_contraction_tests,
4690 bulk_expansion = bulk_expansion_tests,
4691 weight_expansion = weight_expansion_tests,
4692 de_morgan = de_morgan_tests,
4693 project_idempotency = project_idempotency_tests,
4694 antiproject_idempotency = antiproject_idempotency_tests,
4695 wrapper_equivalence = wrapper_equivalence_tests,
4696 )
4697 }
4698
4699 fn generate_signature_name(&self) -> proc_macro2::Ident {
4704 let sig = &self.spec.signature;
4705 let (p, q, r) = (sig.p, sig.q, sig.r);
4706
4707 let sig_name = match (p, q, r) {
4709 (2, 0, 0) => "Euclidean2",
4711 (3, 0, 0) => "Euclidean3",
4712
4713 (2, 0, 1) => "Projective2",
4715 (3, 0, 1) => "Projective3",
4716
4717 (4, 1, 0) => "Conformal3",
4721
4722 (1, 3, 0) => "Minkowski4",
4724
4725 _ => {
4727 return format_ident!("Cl{}_{}_{}", p, q, r);
4728 }
4729 };
4730 format_ident!("{}", sig_name)
4731 }
4732
4733 fn generate_add_sub_verification_tests_raw(&self) -> String {
4735 let signature_name = self.generate_signature_name();
4736 self.spec
4737 .types
4738 .iter()
4739 .filter(|t| t.alias_of.is_none())
4740 .map(|ty| {
4741 let name = &ty.name;
4742 let name_lower = ty.name.to_lowercase();
4743
4744 format!(
4745 r#"
4746 proptest! {{
4747 #[test]
4748 fn {name_lower}_add_matches_multivector(a in any::<{name}<f64>>(), b in any::<{name}<f64>>()) {{
4749 let mv_a: Multivector<f64, {sig}> = a.into();
4750 let mv_b: Multivector<f64, {sig}> = b.into();
4751
4752 let specialized_result = a + b;
4753 let generic_result = mv_a + mv_b;
4754
4755 let specialized_mv: Multivector<f64, {sig}> = specialized_result.into();
4756 prop_assert!(
4757 relative_eq!(specialized_mv, generic_result, epsilon = REL_EPSILON, max_relative = REL_EPSILON),
4758 "Add mismatch: specialized={{:?}}, generic={{:?}}",
4759 specialized_mv, generic_result
4760 );
4761 }}
4762
4763 #[test]
4764 fn {name_lower}_sub_matches_multivector(a in any::<{name}<f64>>(), b in any::<{name}<f64>>()) {{
4765 let mv_a: Multivector<f64, {sig}> = a.into();
4766 let mv_b: Multivector<f64, {sig}> = b.into();
4767
4768 let specialized_result = a - b;
4769 let generic_result = mv_a - mv_b;
4770
4771 let specialized_mv: Multivector<f64, {sig}> = specialized_result.into();
4772 prop_assert!(
4773 relative_eq!(specialized_mv, generic_result, epsilon = REL_EPSILON, max_relative = REL_EPSILON),
4774 "Sub mismatch: specialized={{:?}}, generic={{:?}}",
4775 specialized_mv, generic_result
4776 );
4777 }}
4778
4779 #[test]
4780 fn {name_lower}_neg_matches_multivector(a in any::<{name}<f64>>()) {{
4781 let mv_a: Multivector<f64, {sig}> = a.into();
4782
4783 let specialized_result = -a;
4784 let generic_result = -mv_a;
4785
4786 let specialized_mv: Multivector<f64, {sig}> = specialized_result.into();
4787 prop_assert!(
4788 relative_eq!(specialized_mv, generic_result, epsilon = REL_EPSILON, max_relative = REL_EPSILON),
4789 "Neg mismatch: specialized={{:?}}, generic={{:?}}",
4790 specialized_mv, generic_result
4791 );
4792 }}
4793 }}
4794"#,
4795 name_lower = name_lower,
4796 name = name,
4797 sig = signature_name,
4798 )
4799 })
4800 .collect()
4801 }
4802
4803 fn generate_exterior_verification_tests_raw(&self) -> String {
4805 if self.spec.products.wedge.is_empty() {
4807 return String::new();
4808 }
4809
4810 let signature_name = self.generate_signature_name();
4811 self.spec
4812 .products
4813 .wedge
4814 .iter()
4815 .filter(|entry| {
4817 self.find_type(&entry.lhs)
4818 .is_some_and(|t| self.is_single_grade_blade(t))
4819 && self
4820 .find_type(&entry.rhs)
4821 .is_some_and(|t| self.is_single_grade_blade(t))
4822 })
4823 .map(|entry| {
4824 let lhs_lower = entry.lhs.to_lowercase();
4825 let rhs_lower = entry.rhs.to_lowercase();
4826 let out_lower = entry.output.to_lowercase();
4827
4828 format!(
4829 r#"
4830 proptest! {{
4831 #[test]
4832 fn wedge_{lhs_lower}_{rhs_lower}_{out_lower}_matches_multivector(a in any::<{lhs}<f64>>(), b in any::<{rhs}<f64>>()) {{
4833 use crate::ops::Wedge;
4834 let mv_a: Multivector<f64, {sig}> = a.into();
4835 let mv_b: Multivector<f64, {sig}> = b.into();
4836
4837 let specialized_result: {out}<f64> = a.wedge(&b);
4838 let generic_result = mv_a.exterior(&mv_b);
4839
4840 let specialized_mv: Multivector<f64, {sig}> = specialized_result.into();
4841 prop_assert!(
4842 relative_eq!(specialized_mv, generic_result, epsilon = REL_EPSILON, max_relative = REL_EPSILON),
4843 "Wedge product mismatch: specialized={{:?}}, generic={{:?}}",
4844 specialized_mv, generic_result
4845 );
4846 }}
4847 }}
4848"#,
4849 lhs_lower = lhs_lower,
4850 rhs_lower = rhs_lower,
4851 out_lower = out_lower,
4852 lhs = entry.lhs,
4853 rhs = entry.rhs,
4854 out = entry.output,
4855 sig = signature_name,
4856 )
4857 })
4858 .collect()
4859 }
4860
4861 fn generate_bulk_contraction_verification_tests_raw(&self) -> String {
4863 if self.spec.products.bulk_contraction.is_empty() {
4864 return String::new();
4865 }
4866
4867 let signature_name = self.generate_signature_name();
4868 self.spec
4869 .products
4870 .bulk_contraction
4871 .iter()
4872 .filter(|entry| {
4874 self.find_type(&entry.lhs)
4875 .is_some_and(|t| self.is_single_grade_blade(t))
4876 && self
4877 .find_type(&entry.rhs)
4878 .is_some_and(|t| self.is_single_grade_blade(t))
4879 })
4880 .map(|entry| {
4881 let lhs_lower = entry.lhs.to_lowercase();
4882 let rhs_lower = entry.rhs.to_lowercase();
4883 let out_lower = entry.output.to_lowercase();
4884
4885 format!(
4886 r#"
4887 proptest! {{
4888 #[test]
4889 fn bulk_contraction_{lhs_lower}_{rhs_lower}_{out_lower}_matches_multivector(a in any::<{lhs}<f64>>(), b in any::<{rhs}<f64>>()) {{
4890 use crate::ops::BulkContract;
4891 let mv_a: Multivector<f64, {sig}> = a.into();
4892 let mv_b: Multivector<f64, {sig}> = b.into();
4893
4894 let specialized_result: {out}<f64> = a.bulk_contract(&b);
4895 let generic_result = mv_a.bulk_contraction(&mv_b);
4896
4897 let specialized_mv: Multivector<f64, {sig}> = specialized_result.into();
4898 prop_assert!(
4899 relative_eq!(specialized_mv, generic_result, epsilon = REL_EPSILON, max_relative = REL_EPSILON),
4900 "Bulk contraction mismatch: specialized={{:?}}, generic={{:?}}",
4901 specialized_mv, generic_result
4902 );
4903 }}
4904 }}
4905"#,
4906 lhs_lower = lhs_lower,
4907 rhs_lower = rhs_lower,
4908 out_lower = out_lower,
4909 lhs = entry.lhs,
4910 rhs = entry.rhs,
4911 out = entry.output,
4912 sig = signature_name,
4913 )
4914 })
4915 .collect()
4916 }
4917
4918 fn generate_weight_contraction_verification_tests_raw(&self) -> String {
4920 if self.spec.products.weight_contraction.is_empty() {
4921 return String::new();
4922 }
4923
4924 let signature_name = self.generate_signature_name();
4925 self.spec
4926 .products
4927 .weight_contraction
4928 .iter()
4929 .filter(|entry| {
4931 self.find_type(&entry.lhs)
4932 .is_some_and(|t| self.is_single_grade_blade(t))
4933 && self
4934 .find_type(&entry.rhs)
4935 .is_some_and(|t| self.is_single_grade_blade(t))
4936 })
4937 .map(|entry| {
4938 let lhs_lower = entry.lhs.to_lowercase();
4939 let rhs_lower = entry.rhs.to_lowercase();
4940 let out_lower = entry.output.to_lowercase();
4941
4942 format!(
4943 r#"
4944 proptest! {{
4945 #[test]
4946 fn weight_contraction_{lhs_lower}_{rhs_lower}_{out_lower}_matches_multivector(a in any::<{lhs}<f64>>(), b in any::<{rhs}<f64>>()) {{
4947 use crate::ops::WeightContract;
4948 let mv_a: Multivector<f64, {sig}> = a.into();
4949 let mv_b: Multivector<f64, {sig}> = b.into();
4950
4951 let specialized_result: {out}<f64> = a.weight_contract(&b);
4952 let generic_result = mv_a.weight_contraction(&mv_b);
4953
4954 let specialized_mv: Multivector<f64, {sig}> = specialized_result.into();
4955 prop_assert!(
4956 relative_eq!(specialized_mv, generic_result, epsilon = REL_EPSILON, max_relative = REL_EPSILON),
4957 "Weight contraction mismatch: specialized={{:?}}, generic={{:?}}",
4958 specialized_mv, generic_result
4959 );
4960 }}
4961 }}
4962"#,
4963 lhs_lower = lhs_lower,
4964 rhs_lower = rhs_lower,
4965 out_lower = out_lower,
4966 lhs = entry.lhs,
4967 rhs = entry.rhs,
4968 out = entry.output,
4969 sig = signature_name,
4970 )
4971 })
4972 .collect()
4973 }
4974
4975 fn generate_bulk_expansion_verification_tests_raw(&self) -> String {
4977 if self.spec.products.bulk_expansion.is_empty() {
4978 return String::new();
4979 }
4980
4981 let signature_name = self.generate_signature_name();
4982 self.spec
4983 .products
4984 .bulk_expansion
4985 .iter()
4986 .filter(|entry| {
4988 self.find_type(&entry.lhs)
4989 .is_some_and(|t| self.is_single_grade_blade(t))
4990 && self
4991 .find_type(&entry.rhs)
4992 .is_some_and(|t| self.is_single_grade_blade(t))
4993 })
4994 .map(|entry| {
4995 let lhs_lower = entry.lhs.to_lowercase();
4996 let rhs_lower = entry.rhs.to_lowercase();
4997 let out_lower = entry.output.to_lowercase();
4998
4999 format!(
5000 r#"
5001 proptest! {{
5002 #[test]
5003 fn bulk_expansion_{lhs_lower}_{rhs_lower}_{out_lower}_matches_multivector(a in any::<{lhs}<f64>>(), b in any::<{rhs}<f64>>()) {{
5004 use crate::ops::BulkExpand;
5005 let mv_a: Multivector<f64, {sig}> = a.into();
5006 let mv_b: Multivector<f64, {sig}> = b.into();
5007
5008 let specialized_result: {out}<f64> = a.bulk_expand(&b);
5009 let generic_result = mv_a.bulk_expansion(&mv_b);
5010
5011 let specialized_mv: Multivector<f64, {sig}> = specialized_result.into();
5012 prop_assert!(
5013 relative_eq!(specialized_mv, generic_result, epsilon = REL_EPSILON, max_relative = REL_EPSILON),
5014 "Bulk expansion mismatch: specialized={{:?}}, generic={{:?}}",
5015 specialized_mv, generic_result
5016 );
5017 }}
5018 }}
5019"#,
5020 lhs_lower = lhs_lower,
5021 rhs_lower = rhs_lower,
5022 out_lower = out_lower,
5023 lhs = entry.lhs,
5024 rhs = entry.rhs,
5025 out = entry.output,
5026 sig = signature_name,
5027 )
5028 })
5029 .collect()
5030 }
5031
5032 fn generate_weight_expansion_verification_tests_raw(&self) -> String {
5034 if self.spec.products.weight_expansion.is_empty() {
5035 return String::new();
5036 }
5037
5038 let signature_name = self.generate_signature_name();
5039 self.spec
5040 .products
5041 .weight_expansion
5042 .iter()
5043 .filter(|entry| {
5045 self.find_type(&entry.lhs)
5046 .is_some_and(|t| self.is_single_grade_blade(t))
5047 && self
5048 .find_type(&entry.rhs)
5049 .is_some_and(|t| self.is_single_grade_blade(t))
5050 })
5051 .map(|entry| {
5052 let lhs_lower = entry.lhs.to_lowercase();
5053 let rhs_lower = entry.rhs.to_lowercase();
5054 let out_lower = entry.output.to_lowercase();
5055
5056 format!(
5057 r#"
5058 proptest! {{
5059 #[test]
5060 fn weight_expansion_{lhs_lower}_{rhs_lower}_{out_lower}_matches_multivector(a in any::<{lhs}<f64>>(), b in any::<{rhs}<f64>>()) {{
5061 use crate::ops::WeightExpand;
5062 let mv_a: Multivector<f64, {sig}> = a.into();
5063 let mv_b: Multivector<f64, {sig}> = b.into();
5064
5065 let specialized_result: {out}<f64> = a.weight_expand(&b);
5066 let generic_result = mv_a.weight_expansion(&mv_b);
5067
5068 let specialized_mv: Multivector<f64, {sig}> = specialized_result.into();
5069 prop_assert!(
5070 relative_eq!(specialized_mv, generic_result, epsilon = REL_EPSILON, max_relative = REL_EPSILON),
5071 "Weight expansion mismatch: specialized={{:?}}, generic={{:?}}",
5072 specialized_mv, generic_result
5073 );
5074 }}
5075 }}
5076"#,
5077 lhs_lower = lhs_lower,
5078 rhs_lower = rhs_lower,
5079 out_lower = out_lower,
5080 lhs = entry.lhs,
5081 rhs = entry.rhs,
5082 out = entry.output,
5083 sig = signature_name,
5084 )
5085 })
5086 .collect()
5087 }
5088
5089 fn generate_de_morgan_verification_tests_raw(&self) -> String {
5105 let signature_name = self.generate_signature_name();
5106
5107 self.spec
5110 .types
5111 .iter()
5112 .filter(|t| t.alias_of.is_none() && t.grades.len() == 1)
5113 .map(|ty| {
5114 let name = &ty.name;
5115 let name_lower = ty.name.to_lowercase();
5116
5117 format!(
5118 r#"
5119 proptest! {{
5120 /// De Morgan: complement(a * b) = complement(a) ⋇ complement(b)
5121 #[test]
5122 fn de_morgan_geometric_{name_lower}(a in any::<{name}<f64>>(), b in any::<{name}<f64>>()) {{
5123 let mv_a: Multivector<f64, {sig}> = a.into();
5124 let mv_b: Multivector<f64, {sig}> = b.into();
5125
5126 // LHS: complement(a * b)
5127 let lhs = (mv_a * mv_b).complement();
5128
5129 // RHS: complement(a) ⋇ complement(b)
5130 let rhs = mv_a.complement().antiproduct(&mv_b.complement());
5131
5132 prop_assert!(
5133 relative_eq!(lhs, rhs, epsilon = REL_EPSILON, max_relative = REL_EPSILON),
5134 "De Morgan (geometric) failed: complement(a*b)={{:?}}, complement(a)⋇complement(b)={{:?}}",
5135 lhs, rhs
5136 );
5137 }}
5138
5139 /// De Morgan: complement(a ⋇ b) = complement(a) * complement(b)
5140 #[test]
5141 fn de_morgan_antiproduct_{name_lower}(a in any::<{name}<f64>>(), b in any::<{name}<f64>>()) {{
5142 let mv_a: Multivector<f64, {sig}> = a.into();
5143 let mv_b: Multivector<f64, {sig}> = b.into();
5144
5145 // LHS: complement(a ⋇ b)
5146 let lhs = mv_a.antiproduct(&mv_b).complement();
5147
5148 // RHS: complement(a) * complement(b)
5149 let rhs = mv_a.complement() * mv_b.complement();
5150
5151 prop_assert!(
5152 relative_eq!(lhs, rhs, epsilon = REL_EPSILON, max_relative = REL_EPSILON),
5153 "De Morgan (antiproduct) failed: complement(a⋇b)={{:?}}, complement(a)*complement(b)={{:?}}",
5154 lhs, rhs
5155 );
5156 }}
5157 }}
5158"#,
5159 name_lower = name_lower,
5160 name = name,
5161 sig = signature_name,
5162 )
5163 })
5164 .collect()
5165 }
5166
5167 fn generate_project_idempotency_tests_raw(&self) -> String {
5178 let sig = self.generate_signature_name().to_string();
5179 let is_degenerate = self.spec.signature.r > 0;
5180 let wrapper = if is_degenerate { "Unitized" } else { "Unit" };
5181
5182 let single_grade_types: Vec<_> = self
5184 .spec
5185 .types
5186 .iter()
5187 .filter(|t| t.alias_of.is_none() && self.is_single_grade_blade(t))
5188 .collect();
5189
5190 let mut result = String::new();
5192 for ty_a in &single_grade_types {
5193 for ty_b in &single_grade_types {
5194 let grade_a = ty_a
5196 .fields
5197 .first()
5198 .map(|f| (f.blade_index as u32).count_ones() as usize)
5199 .unwrap_or(0);
5200 let grade_b = ty_b
5201 .fields
5202 .first()
5203 .map(|f| (f.blade_index as u32).count_ones() as usize)
5204 .unwrap_or(0);
5205
5206 if grade_a <= grade_b || grade_a == 0 || grade_b == 0 {
5209 continue;
5210 }
5211
5212 let a_name = &ty_a.name;
5213 let b_name = &ty_b.name;
5214 let a_lower = ty_a.name.to_lowercase();
5215 let b_lower = ty_b.name.to_lowercase();
5216
5217 result.push_str(&format!(
5218 r#"
5219 proptest! {{
5220 /// Project idempotency with normalized target: project(project(a, unit_b), unit_b) == project(a, unit_b)
5221 #[test]
5222 fn project_idempotent_{a_lower}_{b_lower}(a in any::<{a_name}<f64>>(), unit_b in any::<{wrapper}<{b_name}<f64>>>()) {{
5223 let mv_a: Multivector<f64, {sig}> = a.into();
5224 let mv_b: Multivector<f64, {sig}> = unit_b.into_inner().into();
5225
5226 let first = mv_a.project(&mv_b);
5227 let second = first.project(&mv_b);
5228
5229 prop_assert!(
5230 relative_eq!(first, second, epsilon = REL_EPSILON, max_relative = REL_EPSILON),
5231 "Project idempotency failed: first={{:?}}, second={{:?}}",
5232 first, second
5233 );
5234 }}
5235 }}
5236"#,
5237 a_lower = a_lower,
5238 b_lower = b_lower,
5239 a_name = a_name,
5240 b_name = b_name,
5241 wrapper = wrapper,
5242 sig = sig,
5243 ));
5244 }
5245 }
5246 result
5247 }
5248
5249 fn generate_antiproject_idempotency_tests_raw(&self) -> String {
5260 let sig = self.generate_signature_name().to_string();
5261 let is_degenerate = self.spec.signature.r > 0;
5262 let wrapper = if is_degenerate { "Unitized" } else { "Unit" };
5263
5264 let single_grade_types: Vec<_> = self
5266 .spec
5267 .types
5268 .iter()
5269 .filter(|t| t.alias_of.is_none() && self.is_single_grade_blade(t))
5270 .collect();
5271
5272 let mut result = String::new();
5274 for ty_a in &single_grade_types {
5275 for ty_b in &single_grade_types {
5276 let grade_a = ty_a
5278 .fields
5279 .first()
5280 .map(|f| (f.blade_index as u32).count_ones() as usize)
5281 .unwrap_or(0);
5282 let grade_b = ty_b
5283 .fields
5284 .first()
5285 .map(|f| (f.blade_index as u32).count_ones() as usize)
5286 .unwrap_or(0);
5287
5288 if grade_a >= grade_b || grade_a == 0 {
5291 continue;
5292 }
5293
5294 let a_name = &ty_a.name;
5295 let b_name = &ty_b.name;
5296 let a_lower = ty_a.name.to_lowercase();
5297 let b_lower = ty_b.name.to_lowercase();
5298
5299 result.push_str(&format!(
5300 r#"
5301 proptest! {{
5302 /// Antiproject idempotency with normalized target: antiproject(antiproject(a, unit_b), unit_b) == antiproject(a, unit_b)
5303 #[test]
5304 fn antiproject_idempotent_{a_lower}_{b_lower}(a in any::<{a_name}<f64>>(), unit_b in any::<{wrapper}<{b_name}<f64>>>()) {{
5305 let mv_a: Multivector<f64, {sig}> = a.into();
5306 let mv_b: Multivector<f64, {sig}> = unit_b.into_inner().into();
5307
5308 let first = mv_a.antiproject(&mv_b);
5309 let second = first.antiproject(&mv_b);
5310
5311 prop_assert!(
5312 relative_eq!(first, second, epsilon = REL_EPSILON, max_relative = REL_EPSILON),
5313 "Antiproject idempotency failed: first={{:?}}, second={{:?}}",
5314 first, second
5315 );
5316 }}
5317 }}
5318"#,
5319 a_lower = a_lower,
5320 b_lower = b_lower,
5321 a_name = a_name,
5322 b_name = b_name,
5323 wrapper = wrapper,
5324 sig = sig,
5325 ));
5326 }
5327 }
5328 result
5329 }
5330
5331 fn generate_wrapper_equivalence_tests_raw(&self) -> String {
5338 use crate::spec::InvolutionKind;
5339
5340 let is_degenerate = self.spec.signature.r > 0;
5341 let has_positive_definite_norm = self.spec.signature.q == 0
5346 && self.spec.signature.r == 0
5347 && self.spec.norm.primary_involution == InvolutionKind::Reverse;
5348
5349 let unit_tests: String = if has_positive_definite_norm {
5351 self.spec
5352 .types
5353 .iter()
5354 .filter(|t| t.alias_of.is_none())
5355 .filter(|t| {
5357 !t.grades.is_empty()
5359 })
5360 .map(|ty| {
5361 let name = &ty.name;
5362 let name_lower = ty.name.to_lowercase();
5363
5364 format!(
5365 r#"
5366 proptest! {{
5367 /// Unit<{name}>.norm() should equal inner's norm (both are 1.0).
5368 #[test]
5369 fn unit_{name_lower}_norm_matches_inner(u in any::<Unit<{name}<f64>>>()) {{
5370 // Use explicit trait syntax to specify the type
5371 let inner_norm = <{name}<f64> as Normed>::norm(u.as_inner());
5372 let wrapper_norm = <Unit<{name}<f64>> as Normed>::norm(&u);
5373
5374 prop_assert!(
5375 relative_eq!(inner_norm, 1.0, epsilon = REL_EPSILON, max_relative = REL_EPSILON),
5376 "Inner norm should be 1.0, got {{}}", inner_norm
5377 );
5378 prop_assert!(
5379 relative_eq!(wrapper_norm, 1.0, epsilon = REL_EPSILON, max_relative = REL_EPSILON),
5380 "Wrapper norm should be 1.0, got {{}}", wrapper_norm
5381 );
5382 prop_assert!(
5383 relative_eq!(inner_norm, wrapper_norm, epsilon = REL_EPSILON, max_relative = REL_EPSILON),
5384 "Norms should match: {{}} vs {{}}", inner_norm, wrapper_norm
5385 );
5386 }}
5387
5388 /// Unit<{name}>.norm_squared() should equal inner's norm_squared (both are 1.0).
5389 #[test]
5390 fn unit_{name_lower}_norm_squared_matches_inner(u in any::<Unit<{name}<f64>>>()) {{
5391 // Use explicit trait syntax to specify the type
5392 let inner_ns = <{name}<f64> as Normed>::norm_squared(u.as_inner());
5393 let wrapper_ns = <Unit<{name}<f64>> as Normed>::norm_squared(&u);
5394
5395 prop_assert!(
5396 relative_eq!(inner_ns, 1.0, epsilon = REL_EPSILON, max_relative = REL_EPSILON),
5397 "Inner norm_squared should be 1.0, got {{}}", inner_ns
5398 );
5399 prop_assert!(
5400 relative_eq!(wrapper_ns, 1.0, epsilon = REL_EPSILON, max_relative = REL_EPSILON),
5401 "Wrapper norm_squared should be 1.0, got {{}}", wrapper_ns
5402 );
5403 }}
5404 }}
5405"#,
5406 name = name,
5407 name_lower = name_lower,
5408 )
5409 })
5410 .collect()
5411 } else {
5412 String::new()
5413 };
5414
5415 let bulk_tests: String = if is_degenerate {
5417 self.spec
5418 .types
5419 .iter()
5420 .filter(|t| t.alias_of.is_none())
5421 .filter(|t| t.versor.is_some())
5423 .filter(|t| self.has_nonzero_bulk_norm(t))
5425 .map(|ty| {
5426 let name = &ty.name;
5427 let name_lower = ty.name.to_lowercase();
5428
5429 format!(
5430 r#"
5431 proptest! {{
5432 /// Bulk<{name}>.bulk_norm() should equal 1.0 (by definition of Bulk wrapper).
5433 #[test]
5434 fn bulk_{name_lower}_bulk_norm_matches_inner(b in any::<Bulk<{name}<f64>>>()) {{
5435 // Use explicit trait syntax to specify the type
5436 let inner_bulk = <{name}<f64> as DegenerateNormed>::bulk_norm(b.as_inner());
5437 let wrapper_bulk = <Bulk<{name}<f64>> as DegenerateNormed>::bulk_norm(&b);
5438
5439 prop_assert!(
5440 relative_eq!(inner_bulk, 1.0, epsilon = REL_EPSILON, max_relative = REL_EPSILON),
5441 "Inner bulk_norm should be 1.0, got {{}}", inner_bulk
5442 );
5443 prop_assert!(
5444 relative_eq!(wrapper_bulk, 1.0, epsilon = REL_EPSILON, max_relative = REL_EPSILON),
5445 "Wrapper bulk_norm should be 1.0, got {{}}", wrapper_bulk
5446 );
5447 }}
5448
5449 /// Bulk<{name}>.weight_norm() should match inner's weight_norm (delegation).
5450 #[test]
5451 fn bulk_{name_lower}_weight_norm_delegates(b in any::<Bulk<{name}<f64>>>()) {{
5452 // Use explicit trait syntax to specify the type
5453 let inner_weight = <{name}<f64> as DegenerateNormed>::weight_norm(b.as_inner());
5454 let wrapper_weight = <Bulk<{name}<f64>> as DegenerateNormed>::weight_norm(&b);
5455
5456 prop_assert!(
5457 relative_eq!(inner_weight, wrapper_weight, epsilon = REL_EPSILON, max_relative = REL_EPSILON),
5458 "Weight norms should match: {{}} vs {{}}", inner_weight, wrapper_weight
5459 );
5460 }}
5461 }}
5462"#,
5463 name = name,
5464 name_lower = name_lower,
5465 )
5466 })
5467 .collect()
5468 } else {
5469 String::new()
5470 };
5471
5472 format!("{}{}", unit_tests, bulk_tests)
5473 }
5474}
5475
5476#[cfg(test)]
5477mod tests {
5478 use super::*;
5479 use crate::spec::parse_spec;
5480
5481 #[test]
5482 fn symbolica_generates_add_impl() {
5483 let spec = parse_spec(include_str!("../../algebras/euclidean3.toml")).unwrap();
5484 let algebra = Algebra::euclidean(3);
5485 let table = ProductTable::new(&algebra);
5486 let generator = TraitsGenerator::new(&spec, &algebra, table);
5487
5488 let (tokens, _tests) = generator.generate_traits_file();
5489 let code = tokens.to_string();
5490
5491 assert!(code.contains("impl < T : Float > Add for Vector"));
5492 }
5493
5494 #[test]
5495 fn symbolica_generates_sub_impl() {
5496 let spec = parse_spec(include_str!("../../algebras/euclidean3.toml")).unwrap();
5497 let algebra = Algebra::euclidean(3);
5498 let table = ProductTable::new(&algebra);
5499 let generator = TraitsGenerator::new(&spec, &algebra, table);
5500
5501 let (tokens, _tests) = generator.generate_traits_file();
5502 let code = tokens.to_string();
5503
5504 assert!(code.contains("impl < T : Float > Sub for Vector"));
5505 }
5506
5507 #[test]
5508 fn symbolica_generates_neg_impl() {
5509 let spec = parse_spec(include_str!("../../algebras/euclidean3.toml")).unwrap();
5510 let algebra = Algebra::euclidean(3);
5511 let table = ProductTable::new(&algebra);
5512 let generator = TraitsGenerator::new(&spec, &algebra, table);
5513
5514 let (tokens, _tests) = generator.generate_traits_file();
5515 let code = tokens.to_string();
5516
5517 assert!(code.contains("impl < T : Float > Neg for Vector"));
5518 }
5519
5520 #[test]
5521 fn symbolica_generates_scalar_mul() {
5522 let spec = parse_spec(include_str!("../../algebras/euclidean3.toml")).unwrap();
5523 let algebra = Algebra::euclidean(3);
5524 let table = ProductTable::new(&algebra);
5525 let generator = TraitsGenerator::new(&spec, &algebra, table);
5526
5527 let (tokens, _tests) = generator.generate_traits_file();
5528 let code = tokens.to_string();
5529
5530 assert!(code.contains("Mul"));
5532 assert!(code.contains("for Vector"));
5533 assert!(code.contains("scale"));
5534 }
5535
5536 #[test]
5537 fn symbolica_generates_geometric_mul() {
5538 let spec = parse_spec(include_str!("../../algebras/euclidean3.toml")).unwrap();
5539 let algebra = Algebra::euclidean(3);
5540 let table = ProductTable::new(&algebra);
5541 let generator = TraitsGenerator::new(&spec, &algebra, table);
5542
5543 let (tokens, _tests) = generator.generate_traits_file();
5544 let code = tokens.to_string();
5545
5546 assert!(
5548 code.contains("Mul") && code.contains("for Vector"),
5549 "Expected Mul trait impl for Vector"
5550 );
5551 }
5552
5553 #[test]
5554 fn symbolica_generates_wedge() {
5555 let spec = parse_spec(include_str!("../../algebras/euclidean3.toml")).unwrap();
5556 let algebra = Algebra::euclidean(3);
5557 let table = ProductTable::new(&algebra);
5558 let generator = TraitsGenerator::new(&spec, &algebra, table);
5559
5560 let (tokens, _tests) = generator.generate_traits_file();
5561 let code = tokens.to_string();
5562
5563 assert!(
5565 code.contains("Wedge") && code.contains("for Vector"),
5566 "Expected Wedge trait impl for Vector, got:\n{}",
5567 &code[..2000.min(code.len())]
5568 );
5569 }
5570
5571 #[test]
5572 fn symbolica_generates_approx_impls() {
5573 let spec = parse_spec(include_str!("../../algebras/euclidean3.toml")).unwrap();
5574 let algebra = Algebra::euclidean(3);
5575 let table = ProductTable::new(&algebra);
5576 let generator = TraitsGenerator::new(&spec, &algebra, table);
5577
5578 let (tokens, _tests) = generator.generate_traits_file();
5579 let code = tokens.to_string();
5580
5581 assert!(code.contains("AbsDiffEq for Vector"));
5582 assert!(code.contains("RelativeEq for Vector"));
5583 assert!(code.contains("UlpsEq for Vector"));
5584 }
5585
5586 #[test]
5587 fn symbolica_generates_arbitrary_impls() {
5588 let spec = parse_spec(include_str!("../../algebras/euclidean3.toml")).unwrap();
5589 let algebra = Algebra::euclidean(3);
5590 let table = ProductTable::new(&algebra);
5591 let generator = TraitsGenerator::new(&spec, &algebra, table);
5592
5593 let (tokens, _tests) = generator.generate_traits_file();
5594 let code = tokens.to_string();
5595
5596 assert!(code.contains("impl < T : Float + Debug + 'static > Arbitrary for Vector"));
5597 }
5598}