#![allow(unused)]
#[cfg(target_arch = "x86_64")]
mod x86_tests {
#[cfg(feature = "avx512")]
use archmage::Avx512Token;
use archmage::{Avx2FmaToken, Desktop64, SimdToken, X64V3Token, arcane};
use std::arch::x86_64::*;
#[arcane]
fn double_values(token: X64V3Token, data: &[f32; 8]) -> [f32; 8] {
let v = unsafe { _mm256_loadu_ps(data.as_ptr()) };
let doubled = _mm256_add_ps(v, v);
let mut out = [0.0f32; 8];
unsafe { _mm256_storeu_ps(out.as_mut_ptr(), doubled) };
out
}
#[test]
fn test_arcane_basic() {
if let Some(token) = X64V3Token::summon() {
let input = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
let output = double_values(token, &input);
assert_eq!(output, [2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0]);
}
}
#[arcane]
fn fma_operation(token: Avx2FmaToken, a: &[f32; 8], b: &[f32; 8], c: &[f32; 8]) -> [f32; 8] {
let va = unsafe { _mm256_loadu_ps(a.as_ptr()) };
let vb = unsafe { _mm256_loadu_ps(b.as_ptr()) };
let vc = unsafe { _mm256_loadu_ps(c.as_ptr()) };
let result = _mm256_fmadd_ps(va, vb, vc);
let mut out = [0.0f32; 8];
unsafe { _mm256_storeu_ps(out.as_mut_ptr(), result) };
out
}
#[test]
fn test_arcane_fma() {
if let Some(token) = Avx2FmaToken::summon() {
let a = [2.0f32; 8];
let b = [3.0f32; 8];
let c = [1.0f32; 8];
let output = fma_operation(token, &a, &b, &c);
assert_eq!(output, [7.0f32; 8]);
}
}
#[arcane]
fn profile_token_test(token: X64V3Token, data: &[f32; 8]) -> [f32; 8] {
let v = unsafe { _mm256_loadu_ps(data.as_ptr()) };
let squared = _mm256_mul_ps(v, v);
let result = _mm256_fmadd_ps(v, v, squared); let mut out = [0.0f32; 8];
unsafe { _mm256_storeu_ps(out.as_mut_ptr(), result) };
out
}
#[test]
fn test_arcane_profile_token() {
if let Some(token) = X64V3Token::summon() {
let input = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
let output = profile_token_test(token, &input);
let expected: [f32; 8] = input.map(|x| 2.0 * x * x);
assert_eq!(output, expected);
}
}
#[arcane]
fn multi_param(token: X64V3Token, a: &[f32; 8], b: &[f32; 8], scale: f32) -> [f32; 8] {
let va = unsafe { _mm256_loadu_ps(a.as_ptr()) };
let vb = unsafe { _mm256_loadu_ps(b.as_ptr()) };
let vs = _mm256_set1_ps(scale);
let sum = _mm256_add_ps(va, vb);
let result = _mm256_mul_ps(sum, vs);
let mut out = [0.0f32; 8];
unsafe { _mm256_storeu_ps(out.as_mut_ptr(), result) };
out
}
#[test]
fn test_arcane_multi_param() {
if let Some(token) = X64V3Token::summon() {
let a = [1.0f32; 8];
let b = [2.0f32; 8];
let output = multi_param(token, &a, &b, 3.0);
assert_eq!(output, [9.0f32; 8]);
}
}
#[arcane]
fn horizontal_sum(token: X64V3Token, data: &[f32; 8]) -> f32 {
let v = unsafe { _mm256_loadu_ps(data.as_ptr()) };
let sum1 = _mm256_hadd_ps(v, v);
let sum2 = _mm256_hadd_ps(sum1, sum1);
let low = _mm256_castps256_ps128(sum2);
let high = _mm256_extractf128_ps::<1>(sum2);
let final_sum = _mm_add_ss(low, high);
unsafe { _mm_cvtss_f32(final_sum) }
}
#[test]
fn test_arcane_scalar_return() {
if let Some(token) = X64V3Token::summon() {
let input = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
let sum = horizontal_sum(token, &input);
assert_eq!(sum, 36.0); }
}
#[arcane]
fn safe_value_ops(token: X64V3Token, a: __m256, b: __m256) -> __m256 {
let sum = _mm256_add_ps(a, b);
let product = _mm256_mul_ps(a, b);
let blended = _mm256_blend_ps::<0b10101010>(sum, product);
_mm256_shuffle_ps::<0b00_01_10_11>(blended, blended)
}
#[test]
fn test_arcane_value_ops() {
if let Some(token) = X64V3Token::summon() {
let a = unsafe { _mm256_set1_ps(1.0) };
let b = unsafe { _mm256_set1_ps(2.0) };
let _result = safe_value_ops(token, a, b);
}
}
#[arcane]
fn wildcard_negate(_: X64V3Token, data: &[f32; 8]) -> [f32; 8] {
let v = unsafe { _mm256_loadu_ps(data.as_ptr()) };
let neg = _mm256_sub_ps(_mm256_setzero_ps(), v);
let mut out = [0.0f32; 8];
unsafe { _mm256_storeu_ps(out.as_mut_ptr(), neg) };
out
}
#[test]
fn test_arcane_wildcard_token() {
if let Some(token) = X64V3Token::summon() {
let input = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
let output = wildcard_negate(token, &input);
assert_eq!(output, [-1.0, -2.0, -3.0, -4.0, -5.0, -6.0, -7.0, -8.0]);
}
}
#[arcane]
fn impl_trait_test(token: X64V3Token, data: &[f32; 8]) -> [f32; 8] {
let v = unsafe { _mm256_loadu_ps(data.as_ptr()) };
let doubled = _mm256_add_ps(v, v);
let mut out = [0.0f32; 8];
unsafe { _mm256_storeu_ps(out.as_mut_ptr(), doubled) };
out
}
#[test]
fn test_arcane_impl_trait() {
if let Some(token) = X64V3Token::summon() {
let input = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
let output = impl_trait_test(token, &input);
assert_eq!(output, [2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0]);
}
}
#[test]
fn test_arcane_impl_trait_accepts_x64v3() {
if let Some(token) = X64V3Token::summon() {
let input = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
let output = impl_trait_test(token, &input);
assert_eq!(output, [2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0]);
}
}
#[arcane]
fn generic_inline_bounds(token: X64V3Token, data: &[f32; 8]) -> [f32; 8] {
let v = unsafe { _mm256_loadu_ps(data.as_ptr()) };
let doubled = _mm256_add_ps(v, v);
let mut out = [0.0f32; 8];
unsafe { _mm256_storeu_ps(out.as_mut_ptr(), doubled) };
out
}
#[test]
fn test_arcane_generic_inline_bounds() {
if let Some(token) = X64V3Token::summon() {
let input = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
let output = generic_inline_bounds(token, &input);
assert_eq!(output, [2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0]);
}
}
#[arcane]
fn generic_where_clause(token: X64V3Token, data: &[f32; 8]) -> [f32; 8] {
let v = unsafe { _mm256_loadu_ps(data.as_ptr()) };
let doubled = _mm256_add_ps(v, v);
let mut out = [0.0f32; 8];
unsafe { _mm256_storeu_ps(out.as_mut_ptr(), doubled) };
out
}
#[test]
fn test_arcane_generic_where_clause() {
if let Some(token) = X64V3Token::summon() {
let input = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
let output = generic_where_clause(token, &input);
assert_eq!(output, [2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0]);
}
}
#[arcane]
fn impl_trait_multi_bounds(
token: Avx2FmaToken,
a: &[f32; 8],
b: &[f32; 8],
c: &[f32; 8],
) -> [f32; 8] {
let va = unsafe { _mm256_loadu_ps(a.as_ptr()) };
let vb = unsafe { _mm256_loadu_ps(b.as_ptr()) };
let vc = unsafe { _mm256_loadu_ps(c.as_ptr()) };
let result = _mm256_fmadd_ps(va, vb, vc);
let mut out = [0.0f32; 8];
unsafe { _mm256_storeu_ps(out.as_mut_ptr(), result) };
out
}
#[test]
fn test_arcane_impl_trait_multi_bounds() {
if let Some(token) = Avx2FmaToken::summon() {
let a = [2.0f32; 8];
let b = [3.0f32; 8];
let c = [1.0f32; 8];
let output = impl_trait_multi_bounds(token, &a, &b, &c);
assert_eq!(output, [7.0f32; 8]);
}
}
#[arcane]
fn generic_multi_bounds(
token: Avx2FmaToken,
a: &[f32; 8],
b: &[f32; 8],
c: &[f32; 8],
) -> [f32; 8] {
let va = unsafe { _mm256_loadu_ps(a.as_ptr()) };
let vb = unsafe { _mm256_loadu_ps(b.as_ptr()) };
let vc = unsafe { _mm256_loadu_ps(c.as_ptr()) };
let result = _mm256_fmadd_ps(va, vb, vc);
let mut out = [0.0f32; 8];
unsafe { _mm256_storeu_ps(out.as_mut_ptr(), result) };
out
}
#[test]
fn test_arcane_generic_multi_bounds() {
if let Some(token) = Avx2FmaToken::summon() {
let a = [2.0f32; 8];
let b = [3.0f32; 8];
let c = [1.0f32; 8];
let output = generic_multi_bounds(token, &a, &b, &c);
assert_eq!(output, [7.0f32; 8]);
}
}
#[arcane]
fn lower_bound_test(token: X64V3Token, data: &[f32; 8]) -> [f32; 8] {
let v = unsafe { _mm256_loadu_ps(data.as_ptr()) };
let doubled = _mm256_add_ps(v, v);
let mut out = [0.0f32; 8];
unsafe { _mm256_storeu_ps(out.as_mut_ptr(), doubled) };
out
}
#[test]
fn test_arcane_lower_bound_accepts_higher_token() {
if let Some(token) = X64V3Token::summon() {
let input = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
let output = lower_bound_test(token, &input);
assert_eq!(output, [2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0]);
}
}
#[arcane]
fn desktop64_test(token: Desktop64, data: &[f32; 8]) -> [f32; 8] {
let v = unsafe { _mm256_loadu_ps(data.as_ptr()) };
let result = _mm256_fmadd_ps(v, v, v); let mut out = [0.0f32; 8];
unsafe { _mm256_storeu_ps(out.as_mut_ptr(), result) };
out
}
#[test]
fn test_arcane_desktop64_alias() {
if let Some(token) = Desktop64::summon() {
let input = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
let output = desktop64_test(token, &input);
let expected: [f32; 8] = input.map(|x| x * x + x);
assert_eq!(output, expected);
}
}
#[test]
fn test_desktop64_is_x64v3() {
if let Some(token) = Desktop64::summon() {
let input = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
let output = profile_token_test(token, &input);
let expected: [f32; 8] = input.map(|x| 2.0 * x * x);
assert_eq!(output, expected);
}
}
#[test]
fn test_desktop64_with_impl_trait() {
if let Some(token) = Desktop64::summon() {
let input = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
let output = impl_trait_test(token, &input);
assert_eq!(output, [2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0]);
}
}
#[cfg(feature = "avx512")]
#[arcane]
fn server64_test(token: Avx512Token, data: &[f32; 8]) -> [f32; 8] {
let v = unsafe { _mm256_loadu_ps(data.as_ptr()) };
let doubled = _mm256_add_ps(v, v);
let mut out = [0.0f32; 8];
unsafe { _mm256_storeu_ps(out.as_mut_ptr(), doubled) };
out
}
#[cfg(feature = "avx512")]
#[test]
fn test_arcane_server64_alias() {
if let Some(token) = Avx512Token::summon() {
let input = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
let output = server64_test(token, &input);
assert_eq!(output, [2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0]);
}
}
#[test]
fn test_summon_alias() {
let via_summon = Desktop64::summon();
let via_summon = Desktop64::summon();
assert_eq!(via_summon.is_some(), via_summon.is_some());
if let Some(token) = Desktop64::summon() {
let input = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
let output = desktop64_test(token, &input);
let expected: [f32; 8] = input.map(|x| x * x + x);
assert_eq!(output, expected);
}
}
#[derive(Clone, Copy, Debug, PartialEq)]
struct SimdVec8([f32; 8]);
impl SimdVec8 {
fn new(data: [f32; 8]) -> Self {
Self(data)
}
fn as_array(&self) -> &[f32; 8] {
&self.0
}
}
trait SimdOps {
fn double(&self, token: X64V3Token) -> Self;
fn square(self, token: X64V3Token) -> Self;
fn scale(&mut self, token: X64V3Token, factor: f32);
}
impl SimdOps for SimdVec8 {
#[arcane(_self = SimdVec8)]
fn double(&self, _token: X64V3Token) -> Self {
let v = unsafe { _mm256_loadu_ps(_self.0.as_ptr()) };
let doubled = _mm256_add_ps(v, v);
let mut out = [0.0f32; 8];
unsafe { _mm256_storeu_ps(out.as_mut_ptr(), doubled) };
SimdVec8(out)
}
#[arcane(_self = SimdVec8)]
fn square(self, _token: X64V3Token) -> Self {
let v = unsafe { _mm256_loadu_ps(_self.0.as_ptr()) };
let squared = _mm256_mul_ps(v, v);
let mut out = [0.0f32; 8];
unsafe { _mm256_storeu_ps(out.as_mut_ptr(), squared) };
SimdVec8(out)
}
#[arcane(_self = SimdVec8)]
fn scale(&mut self, _token: X64V3Token, factor: f32) {
let v = unsafe { _mm256_loadu_ps(_self.0.as_ptr()) };
let scale = _mm256_set1_ps(factor);
let scaled = _mm256_mul_ps(v, scale);
unsafe { _mm256_storeu_ps(_self.0.as_mut_ptr(), scaled) };
}
}
#[test]
fn test_self_receiver_ref() {
if let Some(token) = Desktop64::summon() {
let v = SimdVec8::new([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]);
let result = v.double(token);
assert_eq!(
result.as_array(),
&[2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0]
);
}
}
#[test]
fn test_self_receiver_owned() {
if let Some(token) = Desktop64::summon() {
let v = SimdVec8::new([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]);
let result = v.square(token);
assert_eq!(
result.as_array(),
&[1.0, 4.0, 9.0, 16.0, 25.0, 36.0, 49.0, 64.0]
);
}
}
#[test]
fn test_self_receiver_mut_ref() {
if let Some(token) = Desktop64::summon() {
let mut v = SimdVec8::new([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]);
v.scale(token, 2.0);
assert_eq!(v.as_array(), &[2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0]);
}
}
#[arcane]
fn fill_array<const N: usize>(token: X64V3Token, val: f32) -> [f32; N] {
[val; N]
}
#[test]
fn test_arcane_const_generic() {
if let Some(token) = X64V3Token::summon() {
let result: [f32; 4] = fill_array(token, 3.14);
assert_eq!(result, [3.14; 4]);
}
}
#[arcane]
fn sum_chunks<const CHUNK: usize>(token: X64V3Token, data: &[f32]) -> f32 {
let mut total = 0.0f32;
for chunk in data.chunks(CHUNK) {
for &x in chunk {
total += x;
}
}
total
}
#[test]
fn test_arcane_const_generic_body_only() {
if let Some(token) = X64V3Token::summon() {
let data = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
let result = sum_chunks::<4>(token, &data);
assert!(
(result - 36.0).abs() < 1e-6,
"arcane const generic: {result}"
);
}
}
#[arcane]
fn tile_copy<const ROWS: usize, const COLS: usize>(
token: X64V3Token,
src: &[f32],
) -> [[f32; COLS]; ROWS] {
let mut out = [[0.0f32; COLS]; ROWS];
for r in 0..ROWS {
for c in 0..COLS {
let idx = r * COLS + c;
if idx < src.len() {
out[r][c] = src[idx];
}
}
}
out
}
#[test]
fn test_arcane_const_generic_multiple() {
if let Some(token) = X64V3Token::summon() {
let data = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0];
let result = tile_copy::<2, 3>(token, &data);
assert_eq!(result, [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]);
}
}
struct ChunkProcessor {
scale: f32,
}
impl ChunkProcessor {
#[arcane(_self = ChunkProcessor)]
fn process_chunks<const CHUNK: usize>(&self, token: X64V3Token, data: &[f32]) -> Vec<f32> {
data.chunks(CHUNK)
.map(|c| c.iter().sum::<f32>() * _self.scale)
.collect()
}
}
#[test]
fn test_arcane_const_generic_nested_self() {
if let Some(token) = X64V3Token::summon() {
let proc = ChunkProcessor { scale: 2.0 };
let data = [1.0f32, 2.0, 3.0, 4.0];
let result = proc.process_chunks::<2>(token, &data);
assert_eq!(result, vec![6.0, 14.0]);
}
}
#[arcane]
fn first_elements<'a, const N: usize, T: Copy + Default>(
token: X64V3Token,
data: &'a [T],
) -> [T; N] {
let mut out = [T::default(); N];
let len = N.min(data.len());
let mut i = 0;
while i < len {
out[i] = data[i];
i += 1;
}
out
}
#[test]
fn test_arcane_const_generic_with_lifetime_and_type() {
if let Some(token) = X64V3Token::summon() {
let data = [10i32, 20, 30, 40, 50];
let result: [i32; 3] = first_elements(token, &data);
assert_eq!(result, [10, 20, 30]);
}
}
}
#[cfg(target_arch = "x86_64")]
mod cross_arch_cfgout_tests {
use archmage::{NeonToken, SimdToken, arcane};
#[arcane]
fn arm_function_cfgout(_token: NeonToken, data: &[f32]) -> f32 {
data.iter().sum()
}
#[test]
fn cfgout_function_not_callable() {
assert!(NeonToken::summon().is_none());
}
}
#[cfg(target_arch = "aarch64")]
mod cross_arch_cfgout_tests_arm {
use archmage::{SimdToken, X64V3Token, arcane};
#[arcane]
fn x86_function_cfgout(_token: X64V3Token, data: &[f32]) -> f32 {
data.iter().sum()
}
#[test]
fn cfgout_function_not_callable() {
assert!(X64V3Token::summon().is_none());
}
}
mod scalar_token_tests {
use archmage::{ScalarToken, SimdToken};
#[test]
fn scalar_token_always_available() {
assert!(ScalarToken::summon().is_some());
}
#[test]
fn scalar_token_compiled_with() {
assert_eq!(ScalarToken::compiled_with(), Some(true));
}
#[test]
fn scalar_token_name() {
assert_eq!(ScalarToken::NAME, "Scalar");
}
#[test]
fn scalar_token_is_copy() {
let token = ScalarToken::summon().unwrap();
let token2 = token; let _ = token; let _ = token2;
}
#[test]
fn scalar_token_can_be_constructed_directly() {
let _token: ScalarToken = ScalarToken;
}
}
mod into_concrete_token_tests {
use archmage::{IntoConcreteToken, ScalarToken, SimdToken, X64V2Token, X64V3Token};
#[test]
fn scalar_token_as_scalar() {
let token = ScalarToken;
assert!(token.as_scalar().is_some());
assert!(token.as_x64v2().is_none());
assert!(token.as_x64v3().is_none());
assert!(token.as_neon().is_none());
assert!(token.as_wasm128().is_none());
}
#[cfg(target_arch = "x86_64")]
#[test]
fn x64v2_token_as_x64v2() {
if let Some(token) = X64V2Token::summon() {
assert!(token.as_x64v2().is_some());
assert!(token.as_x64v3().is_none());
assert!(token.as_scalar().is_none());
}
}
#[cfg(target_arch = "x86_64")]
#[test]
fn x64v3_token_as_x64v3() {
if let Some(token) = X64V3Token::summon() {
assert!(token.as_x64v3().is_some());
assert!(token.as_x64v2().is_none());
assert!(token.as_scalar().is_none());
}
}
fn dispatch_sum<T: IntoConcreteToken>(token: T, data: &[f32]) -> f32 {
if token.as_scalar().is_some() {
return data.iter().sum();
}
#[cfg(target_arch = "x86_64")]
if token.as_x64v3().is_some() {
return data.iter().sum();
}
#[cfg(target_arch = "x86_64")]
if token.as_x64v2().is_some() {
return data.iter().sum();
}
data.iter().sum()
}
#[test]
fn generic_dispatch_with_scalar() {
let result = dispatch_sum(ScalarToken, &[1.0, 2.0, 3.0, 4.0]);
assert_eq!(result, 10.0);
}
#[cfg(target_arch = "x86_64")]
#[test]
fn generic_dispatch_with_x64v3() {
if let Some(token) = X64V3Token::summon() {
let result = dispatch_sum(token, &[1.0, 2.0, 3.0, 4.0]);
assert_eq!(result, 10.0);
}
}
}
#[cfg(target_arch = "x86_64")]
mod alias_tests {
use archmage::{
Desktop64, ScalarToken, SimdToken, X64V3Token, dispatch_variant, token_target_features,
token_target_features_boundary,
};
use std::arch::x86_64::*;
#[token_target_features_boundary]
fn add_aliased(token: X64V3Token, a: &[f32; 8], b: &[f32; 8]) -> [f32; 8] {
let va = unsafe { _mm256_loadu_ps(a.as_ptr()) };
let vb = unsafe { _mm256_loadu_ps(b.as_ptr()) };
let sum = _mm256_add_ps(va, vb);
let mut out = [0.0f32; 8];
unsafe { _mm256_storeu_ps(out.as_mut_ptr(), sum) };
out
}
#[token_target_features]
fn helper_aliased(_token: X64V3Token, v: __m256) -> __m256 {
_mm256_add_ps(v, v)
}
#[cfg(target_arch = "x86_64")]
fn sum_aliased_v3(token: X64V3Token, data: &[f32]) -> f32 {
data.iter().sum()
}
fn sum_aliased_scalar(_token: ScalarToken, data: &[f32]) -> f32 {
data.iter().sum()
}
fn sum_aliased(data: &[f32]) -> f32 {
dispatch_variant!(sum_aliased(data), [v3, scalar])
}
#[test]
fn test_token_target_features_boundary_alias() {
if let Some(token) = X64V3Token::summon() {
let a = [1.0f32; 8];
let b = [2.0f32; 8];
let out = add_aliased(token, &a, &b);
assert_eq!(out, [3.0; 8]);
}
}
#[test]
fn test_dispatch_variant_alias() {
let result = sum_aliased(&[1.0, 2.0, 3.0]);
assert_eq!(result, 6.0);
}
}