Skip to main content

runmat_runtime/builtins/math/poly/
polyder.rs

1//! MATLAB-compatible `polyder` builtin with GPU-aware semantics for RunMat.
2
3use log::trace;
4use num_complex::Complex64;
5use runmat_builtins::{ComplexTensor, Tensor, Value};
6use runmat_macros::runtime_builtin;
7
8use crate::builtins::common::random_args::complex_tensor_into_value;
9use crate::builtins::common::spec::{
10    BroadcastSemantics, BuiltinFusionSpec, BuiltinGpuSpec, ConstantStrategy, GpuOpKind,
11    ProviderHook, ReductionNaN, ResidencyPolicy, ScalarType, ShapeRequirements,
12};
13use crate::builtins::common::{tensor, tensor::tensor_into_value};
14use crate::builtins::math::poly::type_resolvers::polyder_type;
15use crate::dispatcher;
16use crate::{build_runtime_error, BuiltinResult, RuntimeError};
17
18const EPS: f64 = 1.0e-12;
19const BUILTIN_NAME: &str = "polyder";
20
21#[runmat_macros::register_gpu_spec(builtin_path = "crate::builtins::math::poly::polyder")]
22pub const GPU_SPEC: BuiltinGpuSpec = BuiltinGpuSpec {
23    name: "polyder",
24    op_kind: GpuOpKind::Custom("polynomial-derivative"),
25    supported_precisions: &[ScalarType::F32, ScalarType::F64],
26    broadcast: BroadcastSemantics::None,
27    provider_hooks: &[
28        ProviderHook::Custom("polyder-single"),
29        ProviderHook::Custom("polyder-product"),
30        ProviderHook::Custom("polyder-quotient"),
31    ],
32    constant_strategy: ConstantStrategy::InlineLiteral,
33    residency: ResidencyPolicy::NewHandle,
34    nan_mode: ReductionNaN::Include,
35    two_pass_threshold: None,
36    workgroup_size: None,
37    accepts_nan_mode: false,
38    notes: "Runs on-device when providers expose polyder hooks; falls back to the host for complex coefficients or unsupported shapes.",
39};
40
41fn polyder_error(message: impl Into<String>) -> RuntimeError {
42    build_runtime_error(message)
43        .with_builtin(BUILTIN_NAME)
44        .build()
45}
46
47#[runmat_macros::register_fusion_spec(builtin_path = "crate::builtins::math::poly::polyder")]
48pub const FUSION_SPEC: BuiltinFusionSpec = BuiltinFusionSpec {
49    name: "polyder",
50    shape: ShapeRequirements::Any,
51    constant_strategy: ConstantStrategy::InlineLiteral,
52    elementwise: None,
53    reduction: None,
54    emits_nan: false,
55    notes: "Symbolic operation on coefficient vectors; fusion bypasses this builtin.",
56};
57
58#[runtime_builtin(
59    name = "polyder",
60    category = "math/poly",
61    summary = "Differentiate polynomials, products, and ratios with MATLAB-compatible coefficient vectors.",
62    keywords = "polyder,polynomial,derivative,product,quotient",
63    type_resolver(polyder_type),
64    builtin_path = "crate::builtins::math::poly::polyder"
65)]
66async fn polyder_builtin(first: Value, rest: Vec<Value>) -> crate::BuiltinResult<Value> {
67    if let Some(out_count) = crate::output_count::current_output_count() {
68        if out_count <= 1 {
69            let result = match rest.len() {
70                0 => derivative_single(first).await,
71                1 => derivative_product(first, rest.into_iter().next().unwrap()).await,
72                _ => Err(polyder_error("polyder: too many input arguments")),
73            }?;
74            if out_count == 0 {
75                return Ok(Value::OutputList(Vec::new()));
76            }
77            return Ok(Value::OutputList(vec![result]));
78        }
79        if rest.len() != 1 {
80            return Err(polyder_error(
81                "Not enough input arguments for quotient form.",
82            ));
83        }
84        let eval = evaluate_quotient(first, rest.into_iter().next().unwrap()).await?;
85        let outputs = vec![eval.numerator(), eval.denominator()];
86        return Ok(crate::output_count::output_list_with_padding(
87            out_count, outputs,
88        ));
89    }
90    match rest.len() {
91        0 => derivative_single(first).await,
92        1 => derivative_product(first, rest.into_iter().next().unwrap()).await,
93        _ => Err(polyder_error("polyder: too many input arguments")),
94    }
95}
96
97async fn try_gpu_derivative_single(value: &Value) -> BuiltinResult<Option<Value>> {
98    let Value::GpuTensor(handle) = value else {
99        return Ok(None);
100    };
101    let Some(provider) = runmat_accelerate_api::provider() else {
102        return Ok(None);
103    };
104    match provider.polyder_single(handle).await {
105        Ok(out) => Ok(Some(Value::GpuTensor(out))),
106        Err(err) => {
107            trace!("polyder: provider polyder_single fallback: {err}");
108            Ok(None)
109        }
110    }
111}
112
113async fn try_gpu_derivative_product(first: &Value, second: &Value) -> BuiltinResult<Option<Value>> {
114    match (first, second) {
115        (Value::GpuTensor(p), Value::GpuTensor(q)) => {
116            let Some(provider) = runmat_accelerate_api::provider() else {
117                return Ok(None);
118            };
119            match provider.polyder_product(p, q).await {
120                Ok(out) => Ok(Some(Value::GpuTensor(out))),
121                Err(err) => {
122                    trace!("polyder: provider polyder_product fallback: {err}");
123                    Ok(None)
124                }
125            }
126        }
127        _ => Ok(None),
128    }
129}
130
131async fn try_gpu_quotient(u: &Value, v: &Value) -> BuiltinResult<Option<PolyderEval>> {
132    match (u, v) {
133        (Value::GpuTensor(uh), Value::GpuTensor(vh)) => {
134            let Some(provider) = runmat_accelerate_api::provider() else {
135                return Ok(None);
136            };
137            match provider.polyder_quotient(uh, vh).await {
138                Ok(result) => Ok(Some(PolyderEval {
139                    numerator: Value::GpuTensor(result.numerator),
140                    denominator: Value::GpuTensor(result.denominator),
141                })),
142                Err(err) => {
143                    trace!("polyder: provider polyder_quotient fallback: {err}");
144                    Ok(None)
145                }
146            }
147        }
148        _ => Ok(None),
149    }
150}
151
152/// Evaluate the quotient rule derivative `[num, den] = polyder(u, v)`.
153pub async fn evaluate_quotient(u: Value, v: Value) -> BuiltinResult<PolyderEval> {
154    if let Some(eval) = try_gpu_quotient(&u, &v).await? {
155        return Ok(eval);
156    }
157    let u_poly = parse_polynomial("polyder", "U", u).await?;
158    let v_poly = parse_polynomial("polyder", "V", v).await?;
159    let numerator = quotient_numerator(&u_poly, &v_poly)?;
160    let denominator = quotient_denominator(&v_poly)?;
161    Ok(PolyderEval {
162        numerator,
163        denominator,
164    })
165}
166
167/// Differentiated outputs for the quotient rule.
168#[derive(Clone)]
169pub struct PolyderEval {
170    numerator: Value,
171    denominator: Value,
172}
173
174impl PolyderEval {
175    /// Numerator coefficients of the derivative `(u' * v - u * v')`.
176    pub fn numerator(&self) -> Value {
177        self.numerator.clone()
178    }
179
180    /// Denominator coefficients `v^2`.
181    pub fn denominator(&self) -> Value {
182        self.denominator.clone()
183    }
184}
185
186pub async fn derivative_single(value: Value) -> BuiltinResult<Value> {
187    if let Some(out) = try_gpu_derivative_single(&value).await? {
188        return Ok(out);
189    }
190    let poly = parse_polynomial("polyder", "P", value).await?;
191    differentiate_polynomial(&poly)
192}
193
194pub async fn derivative_product(first: Value, second: Value) -> BuiltinResult<Value> {
195    if let Some(out) = try_gpu_derivative_product(&first, &second).await? {
196        return Ok(out);
197    }
198    let p = parse_polynomial("polyder", "P", first).await?;
199    let q = parse_polynomial("polyder", "A", second).await?;
200    product_derivative(&p, &q)
201}
202
203fn quotient_numerator(u: &Polynomial, v: &Polynomial) -> BuiltinResult<Value> {
204    let du = raw_derivative(&u.coeffs);
205    let dv = raw_derivative(&v.coeffs);
206    let term1 = poly_convolve(&du, &v.coeffs);
207    let term2 = poly_convolve(&u.coeffs, &dv);
208    let mut numerator = poly_sub(&term1, &term2);
209    numerator = trim_leading_zeros(&numerator);
210    coeffs_to_value(&numerator, u.orientation)
211}
212
213fn quotient_denominator(v: &Polynomial) -> BuiltinResult<Value> {
214    let mut denominator = poly_convolve(&v.coeffs, &v.coeffs);
215    denominator = trim_leading_zeros(&denominator);
216    coeffs_to_value(&denominator, v.orientation)
217}
218
219fn differentiate_polynomial(poly: &Polynomial) -> BuiltinResult<Value> {
220    let mut coeffs = raw_derivative(&poly.coeffs);
221    coeffs = trim_leading_zeros(&coeffs);
222    coeffs_to_value(&coeffs, poly.orientation)
223}
224
225fn product_derivative(p: &Polynomial, q: &Polynomial) -> BuiltinResult<Value> {
226    let dp = raw_derivative(&p.coeffs);
227    let dq = raw_derivative(&q.coeffs);
228    let term1 = poly_convolve(&dp, &q.coeffs);
229    let term2 = poly_convolve(&p.coeffs, &dq);
230    let mut result = poly_add(&term1, &term2);
231    result = trim_leading_zeros(&result);
232    coeffs_to_value(&result, p.orientation)
233}
234
235fn raw_derivative(coeffs: &[Complex64]) -> Vec<Complex64> {
236    if coeffs.len() <= 1 {
237        return vec![Complex64::new(0.0, 0.0)];
238    }
239    let mut output = Vec::with_capacity(coeffs.len() - 1);
240    let mut power = coeffs.len() - 1;
241    for coeff in coeffs.iter().take(coeffs.len() - 1) {
242        output.push(*coeff * (power as f64));
243        power -= 1;
244    }
245    output
246}
247
248fn poly_convolve(a: &[Complex64], b: &[Complex64]) -> Vec<Complex64> {
249    if a.is_empty() || b.is_empty() {
250        return Vec::new();
251    }
252    let mut result = vec![Complex64::new(0.0, 0.0); a.len() + b.len() - 1];
253    for (i, &ai) in a.iter().enumerate() {
254        for (j, &bj) in b.iter().enumerate() {
255            result[i + j] += ai * bj;
256        }
257    }
258    result
259}
260
261fn poly_add(a: &[Complex64], b: &[Complex64]) -> Vec<Complex64> {
262    let len = a.len().max(b.len());
263    let mut result = vec![Complex64::new(0.0, 0.0); len];
264    for (idx, &value) in a.iter().enumerate() {
265        result[len - a.len() + idx] += value;
266    }
267    for (idx, &value) in b.iter().enumerate() {
268        result[len - b.len() + idx] += value;
269    }
270    result
271}
272
273fn poly_sub(a: &[Complex64], b: &[Complex64]) -> Vec<Complex64> {
274    let len = a.len().max(b.len());
275    let mut result = vec![Complex64::new(0.0, 0.0); len];
276    for (idx, &value) in a.iter().enumerate() {
277        result[len - a.len() + idx] += value;
278    }
279    for (idx, &value) in b.iter().enumerate() {
280        result[len - b.len() + idx] -= value;
281    }
282    result
283}
284
285fn trim_leading_zeros(coeffs: &[Complex64]) -> Vec<Complex64> {
286    let mut first = None;
287    for (idx, coeff) in coeffs.iter().enumerate() {
288        if coeff.norm() > EPS {
289            first = Some(idx);
290            break;
291        }
292    }
293    match first {
294        Some(idx) => coeffs[idx..].to_vec(),
295        None => vec![Complex64::new(0.0, 0.0)],
296    }
297}
298
299fn coeffs_to_value(coeffs: &[Complex64], orientation: Orientation) -> BuiltinResult<Value> {
300    if coeffs.iter().all(|c| c.im.abs() <= EPS) {
301        let data: Vec<f64> = coeffs.iter().map(|c| c.re).collect();
302        let shape = orientation.shape_for_len(data.len());
303        let tensor =
304            Tensor::new(data, shape).map_err(|e| polyder_error(format!("polyder: {e}")))?;
305        Ok(tensor_into_value(tensor))
306    } else {
307        let data: Vec<(f64, f64)> = coeffs.iter().map(|c| (c.re, c.im)).collect();
308        let shape = orientation.shape_for_len(data.len());
309        let tensor =
310            ComplexTensor::new(data, shape).map_err(|e| polyder_error(format!("polyder: {e}")))?;
311        Ok(complex_tensor_into_value(tensor))
312    }
313}
314
315async fn parse_polynomial(context: &str, label: &str, value: Value) -> BuiltinResult<Polynomial> {
316    let gathered = dispatcher::gather_if_needed_async(&value).await?;
317    let (coeffs, orientation) = match gathered {
318        Value::Tensor(tensor) => {
319            ensure_vector_shape(context, label, &tensor.shape)?;
320            let orientation = orientation_from_shape(&tensor.shape);
321            if tensor.data.is_empty() {
322                (vec![Complex64::new(0.0, 0.0)], orientation)
323            } else {
324                (
325                    tensor
326                        .data
327                        .into_iter()
328                        .map(|re| Complex64::new(re, 0.0))
329                        .collect(),
330                    orientation,
331                )
332            }
333        }
334        Value::ComplexTensor(tensor) => {
335            ensure_vector_shape(context, label, &tensor.shape)?;
336            let orientation = orientation_from_shape(&tensor.shape);
337            if tensor.data.is_empty() {
338                (vec![Complex64::new(0.0, 0.0)], orientation)
339            } else {
340                (
341                    tensor
342                        .data
343                        .into_iter()
344                        .map(|(re, im)| Complex64::new(re, im))
345                        .collect(),
346                    orientation,
347                )
348            }
349        }
350        Value::LogicalArray(logical) => {
351            let tensor = tensor::logical_to_tensor(&logical).map_err(polyder_error)?;
352            ensure_vector_shape(context, label, &tensor.shape)?;
353            let orientation = orientation_from_shape(&tensor.shape);
354            if tensor.data.is_empty() {
355                (vec![Complex64::new(0.0, 0.0)], orientation)
356            } else {
357                (
358                    tensor
359                        .data
360                        .into_iter()
361                        .map(|re| Complex64::new(re, 0.0))
362                        .collect(),
363                    orientation,
364                )
365            }
366        }
367        Value::Num(n) => (vec![Complex64::new(n, 0.0)], Orientation::Scalar),
368        Value::Int(i) => (vec![Complex64::new(i.to_f64(), 0.0)], Orientation::Scalar),
369        Value::Bool(b) => (
370            vec![Complex64::new(if b { 1.0 } else { 0.0 }, 0.0)],
371            Orientation::Scalar,
372        ),
373        Value::Complex(re, im) => (vec![Complex64::new(re, im)], Orientation::Scalar),
374        other => {
375            return Err(polyder_error(format!(
376                "{context}: expected {label} to be a numeric vector, got {other:?}"
377            )));
378        }
379    };
380
381    Ok(Polynomial {
382        coeffs,
383        orientation,
384    })
385}
386
387fn ensure_vector_shape(context: &str, label: &str, shape: &[usize]) -> BuiltinResult<()> {
388    let non_unit = shape.iter().copied().filter(|&dim| dim > 1).count();
389    if non_unit <= 1 {
390        Ok(())
391    } else {
392        Err(polyder_error(format!(
393            "{context}: {label} must be a vector of coefficients"
394        )))
395    }
396}
397
398#[derive(Clone, Copy)]
399enum Orientation {
400    Scalar,
401    Row,
402    Column,
403}
404
405impl Orientation {
406    fn shape_for_len(self, len: usize) -> Vec<usize> {
407        if len <= 1 {
408            return vec![1, 1];
409        }
410        match self {
411            Orientation::Scalar => vec![1, len],
412            Orientation::Row => vec![1, len],
413            Orientation::Column => vec![len, 1],
414        }
415    }
416}
417
418fn orientation_from_shape(shape: &[usize]) -> Orientation {
419    for (idx, &dim) in shape.iter().enumerate() {
420        if dim != 1 {
421            return match idx {
422                0 => Orientation::Column,
423                1 => Orientation::Row,
424                _ => Orientation::Column,
425            };
426        }
427    }
428    Orientation::Scalar
429}
430
431#[derive(Clone)]
432struct Polynomial {
433    coeffs: Vec<Complex64>,
434    orientation: Orientation,
435}
436
437#[cfg(test)]
438pub(crate) mod tests {
439    use super::*;
440    use crate::builtins::common::test_support;
441    use futures::executor::block_on;
442    use runmat_builtins::{IntValue, Tensor};
443
444    fn assert_error_contains(err: crate::RuntimeError, needle: &str) {
445        assert!(
446            err.message().contains(needle),
447            "expected error containing '{needle}', got '{}'",
448            err.message()
449        );
450    }
451
452    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
453    #[test]
454    fn derivative_of_cubic_polynomial_is_correct() {
455        let tensor = Tensor::new(vec![3.0, -2.0, 5.0, 7.0], vec![1, 4]).unwrap();
456        let result = derivative_single(Value::Tensor(tensor)).expect("polyder");
457        match result {
458            Value::Tensor(t) => {
459                assert_eq!(t.shape, vec![1, 3]);
460                assert!(t
461                    .data
462                    .iter()
463                    .zip([9.0, -4.0, 5.0])
464                    .all(|(lhs, rhs)| (lhs - rhs).abs() < 1e-12));
465            }
466            other => panic!("expected tensor result, got {other:?}"),
467        }
468    }
469
470    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
471    #[test]
472    fn derivative_of_product_matches_manual_rule() {
473        let p = Tensor::new(vec![1.0, 0.0, -2.0], vec![1, 3]).unwrap();
474        let a = Tensor::new(vec![1.0, 1.0], vec![1, 2]).unwrap();
475        let result =
476            derivative_product(Value::Tensor(p), Value::Tensor(a)).expect("polyder product");
477        match result {
478            Value::Tensor(t) => {
479                assert_eq!(t.shape, vec![1, 3]);
480                assert!(t
481                    .data
482                    .iter()
483                    .zip([3.0, 2.0, -2.0])
484                    .all(|(lhs, rhs)| (lhs - rhs).abs() < 1e-12));
485            }
486            other => panic!("expected tensor result, got {other:?}"),
487        }
488    }
489
490    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
491    #[test]
492    fn quotient_rule_produces_expected_num_and_den() {
493        let u = Tensor::new(vec![1.0, 0.0, -4.0], vec![1, 3]).unwrap();
494        let v = Tensor::new(vec![1.0, -1.0], vec![1, 2]).unwrap();
495        let eval = evaluate_quotient(Value::Tensor(u), Value::Tensor(v)).expect("polyder quotient");
496        match eval.numerator() {
497            Value::Tensor(t) => {
498                assert_eq!(t.shape, vec![1, 3]);
499                assert!(t
500                    .data
501                    .iter()
502                    .zip([1.0, -2.0, 4.0])
503                    .all(|(lhs, rhs)| (lhs - rhs).abs() < 1e-12));
504            }
505            other => panic!("expected tensor numerator, got {other:?}"),
506        }
507        match eval.denominator() {
508            Value::Tensor(t) => {
509                assert_eq!(t.shape, vec![1, 3]);
510                assert!(t
511                    .data
512                    .iter()
513                    .zip([1.0, -2.0, 1.0])
514                    .all(|(lhs, rhs)| (lhs - rhs).abs() < 1e-12));
515            }
516            other => panic!("expected tensor denominator, got {other:?}"),
517        }
518    }
519
520    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
521    #[test]
522    fn column_vector_orientation_is_preserved() {
523        let tensor = Tensor::new(vec![1.0, 0.0, -3.0], vec![3, 1]).unwrap();
524        let result = derivative_single(Value::Tensor(tensor)).expect("polyder column");
525        match result {
526            Value::Tensor(t) => {
527                assert_eq!(t.shape, vec![2, 1]);
528                assert!(t
529                    .data
530                    .iter()
531                    .zip([2.0, 0.0])
532                    .all(|(lhs, rhs)| (lhs - rhs).abs() < 1e-12));
533            }
534            other => panic!("expected column tensor, got {other:?}"),
535        }
536    }
537
538    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
539    #[test]
540    fn complex_coefficients_are_supported() {
541        let tensor =
542            ComplexTensor::new(vec![(1.0, 2.0), (-3.0, 0.0), (0.0, 4.0)], vec![1, 3]).unwrap();
543        let result = derivative_single(Value::ComplexTensor(tensor)).expect("polyder complex");
544        match result {
545            Value::ComplexTensor(t) => {
546                assert_eq!(t.shape, vec![1, 2]);
547                let expected = [Complex64::new(2.0, 4.0), Complex64::new(-3.0, 0.0)];
548                assert!(t
549                    .data
550                    .iter()
551                    .zip(expected.iter())
552                    .all(|((re, im), expected)| {
553                        (re - expected.re).abs() < 1e-12 && (im - expected.im).abs() < 1e-12
554                    }));
555            }
556            other => panic!("expected complex tensor, got {other:?}"),
557        }
558    }
559
560    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
561    #[test]
562    fn empty_polynomial_returns_zero() {
563        let tensor = Tensor::new(Vec::new(), vec![1, 0]).unwrap();
564        let result = derivative_single(Value::Tensor(tensor)).expect("polyder empty");
565        assert_eq!(result, Value::Num(0.0));
566    }
567
568    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
569    #[test]
570    fn rejects_matrix_input() {
571        let tensor = Tensor::new(vec![1.0, 2.0, 3.0, 4.0], vec![2, 2]).unwrap();
572        let err = derivative_single(Value::Tensor(tensor)).unwrap_err();
573        assert_error_contains(err, "vector of coefficients");
574    }
575
576    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
577    #[test]
578    fn rejects_string_input() {
579        let err = derivative_single(Value::String("abc".into())).unwrap_err();
580        assert_error_contains(err, "numeric vector");
581    }
582
583    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
584    #[test]
585    fn mixed_gpu_cpu_product_falls_back_to_host() {
586        test_support::with_test_provider(|provider| {
587            let p = Tensor::new(vec![1.0, 0.0, -2.0], vec![1, 3]).unwrap();
588            let q = Tensor::new(vec![1.0, 1.0], vec![1, 2]).unwrap();
589            let cpu_expected =
590                derivative_product(Value::Tensor(p.clone()), Value::Tensor(q.clone()))
591                    .expect("cpu product");
592            let Value::Tensor(cpu_tensor) = cpu_expected else {
593                panic!("expected tensor result");
594            };
595
596            let view_p = runmat_accelerate_api::HostTensorView {
597                data: &p.data,
598                shape: &p.shape,
599            };
600            let handle_p = provider.upload(&view_p).expect("upload p");
601            let result = derivative_product(Value::GpuTensor(handle_p), Value::Tensor(q))
602                .expect("mixed product");
603            let Value::Tensor(host_tensor) = result else {
604                panic!("expected host tensor result");
605            };
606            assert_eq!(host_tensor.shape, cpu_tensor.shape);
607            assert!(host_tensor
608                .data
609                .iter()
610                .zip(cpu_tensor.data.iter())
611                .all(|(lhs, rhs)| (lhs - rhs).abs() < 1e-12));
612        });
613    }
614
615    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
616    #[test]
617    fn builtin_rejects_too_many_inputs() {
618        let err = futures::executor::block_on(super::polyder_builtin(
619            Value::Num(1.0),
620            vec![Value::Num(2.0), Value::Num(3.0)],
621        ))
622        .unwrap_err();
623        assert_error_contains(err, "too many input arguments");
624    }
625
626    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
627    #[test]
628    fn gpu_inputs_remain_on_device() {
629        test_support::with_test_provider(|provider| {
630            let tensor = Tensor::new(vec![2.0, 0.0, -5.0, 4.0], vec![1, 4]).unwrap();
631            let view = runmat_accelerate_api::HostTensorView {
632                data: &tensor.data,
633                shape: &tensor.shape,
634            };
635            let handle = provider.upload(&view).expect("upload");
636            let result = derivative_single(Value::GpuTensor(handle)).expect("polyder gpu");
637            let Value::GpuTensor(out_handle) = result else {
638                panic!("expected GPU tensor result");
639            };
640            let gathered = test_support::gather(Value::GpuTensor(out_handle)).expect("gather");
641            assert_eq!(gathered.shape, vec![1, 3]);
642            assert!(gathered
643                .data
644                .iter()
645                .zip([6.0, 0.0, -5.0])
646                .all(|(lhs, rhs)| (lhs - rhs).abs() < 1e-12));
647        });
648    }
649
650    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
651    #[test]
652    fn gpu_product_matches_cpu() {
653        test_support::with_test_provider(|provider| {
654            let p = Tensor::new(vec![1.0, 0.0, -2.0], vec![1, 3]).unwrap();
655            let q = Tensor::new(vec![1.0, 1.0], vec![1, 2]).unwrap();
656            let expected = derivative_product(Value::Tensor(p.clone()), Value::Tensor(q.clone()))
657                .expect("cpu product");
658            let Value::Tensor(expected_tensor) = expected else {
659                panic!("expected tensor output");
660            };
661
662            let view_p = runmat_accelerate_api::HostTensorView {
663                data: &p.data,
664                shape: &p.shape,
665            };
666            let view_q = runmat_accelerate_api::HostTensorView {
667                data: &q.data,
668                shape: &q.shape,
669            };
670            let handle_p = provider.upload(&view_p).expect("upload p");
671            let handle_q = provider.upload(&view_q).expect("upload q");
672            let gpu_result =
673                derivative_product(Value::GpuTensor(handle_p), Value::GpuTensor(handle_q))
674                    .expect("gpu product");
675            let Value::GpuTensor(gpu_handle) = gpu_result else {
676                panic!("expected GPU tensor");
677            };
678            let gathered = test_support::gather(Value::GpuTensor(gpu_handle)).expect("gather");
679            assert_eq!(gathered.shape, expected_tensor.shape);
680            assert!(gathered
681                .data
682                .iter()
683                .zip(expected_tensor.data.iter())
684                .all(|(lhs, rhs)| (lhs - rhs).abs() < 1e-12));
685        });
686    }
687
688    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
689    #[test]
690    fn gpu_quotient_matches_cpu() {
691        test_support::with_test_provider(|provider| {
692            let u = Tensor::new(vec![1.0, 0.0, -4.0], vec![1, 3]).unwrap();
693            let v = Tensor::new(vec![1.0, -1.0], vec![1, 2]).unwrap();
694            let expected = evaluate_quotient(Value::Tensor(u.clone()), Value::Tensor(v.clone()))
695                .expect("cpu quotient");
696            let Value::Tensor(expected_num) = expected.numerator() else {
697                panic!("expected tensor numerator");
698            };
699            let Value::Tensor(expected_den) = expected.denominator() else {
700                panic!("expected tensor denominator");
701            };
702
703            let view_u = runmat_accelerate_api::HostTensorView {
704                data: &u.data,
705                shape: &u.shape,
706            };
707            let view_v = runmat_accelerate_api::HostTensorView {
708                data: &v.data,
709                shape: &v.shape,
710            };
711            let handle_u = provider.upload(&view_u).expect("upload u");
712            let handle_v = provider.upload(&view_v).expect("upload v");
713            let gpu_eval =
714                evaluate_quotient(Value::GpuTensor(handle_u), Value::GpuTensor(handle_v))
715                    .expect("gpu quotient");
716            let gpu_num = test_support::gather(gpu_eval.numerator()).expect("gather num");
717            let gpu_den = test_support::gather(gpu_eval.denominator()).expect("gather den");
718            assert_eq!(gpu_num.shape, expected_num.shape);
719            assert_eq!(gpu_den.shape, expected_den.shape);
720            assert!(gpu_num
721                .data
722                .iter()
723                .zip(expected_num.data.iter())
724                .all(|(lhs, rhs)| (lhs - rhs).abs() < 1e-12));
725            assert!(gpu_den
726                .data
727                .iter()
728                .zip(expected_den.data.iter())
729                .all(|(lhs, rhs)| (lhs - rhs).abs() < 1e-12));
730        });
731    }
732
733    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
734    #[test]
735    #[cfg(feature = "wgpu")]
736    fn wgpu_polyder_single_matches_cpu() {
737        let _ = runmat_accelerate::backend::wgpu::provider::register_wgpu_provider(
738            runmat_accelerate::backend::wgpu::provider::WgpuProviderOptions::default(),
739        );
740        let provider = runmat_accelerate_api::provider().expect("wgpu provider");
741        let tensor = Tensor::new(vec![3.0, -2.0, 5.0, 7.0], vec![1, 4]).unwrap();
742        let expected = derivative_single(Value::Tensor(tensor.clone())).expect("cpu polyder");
743        let Value::Tensor(expected_tensor) = expected else {
744            panic!("expected tensor");
745        };
746        let view = runmat_accelerate_api::HostTensorView {
747            data: &tensor.data,
748            shape: &tensor.shape,
749        };
750        let handle = provider.upload(&view).expect("upload");
751        let gpu_result = derivative_single(Value::GpuTensor(handle)).expect("gpu polyder");
752        let gathered = test_support::gather(gpu_result).expect("gather");
753        assert_eq!(gathered.shape, expected_tensor.shape);
754        assert!(gathered
755            .data
756            .iter()
757            .zip(expected_tensor.data.iter())
758            .all(|(lhs, rhs)| (lhs - rhs).abs() < 1e-12));
759    }
760
761    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
762    #[test]
763    #[cfg(feature = "wgpu")]
764    fn wgpu_polyder_product_matches_cpu() {
765        let _ = runmat_accelerate::backend::wgpu::provider::register_wgpu_provider(
766            runmat_accelerate::backend::wgpu::provider::WgpuProviderOptions::default(),
767        );
768        let provider = runmat_accelerate_api::provider().expect("wgpu provider");
769        let p = Tensor::new(vec![1.0, 0.0, -2.0], vec![1, 3]).unwrap();
770        let q = Tensor::new(vec![1.0, 1.0], vec![1, 2]).unwrap();
771        let expected = derivative_product(Value::Tensor(p.clone()), Value::Tensor(q.clone()))
772            .expect("cpu product");
773        let Value::Tensor(expected_tensor) = expected else {
774            panic!("expected tensor");
775        };
776        let view_p = runmat_accelerate_api::HostTensorView {
777            data: &p.data,
778            shape: &p.shape,
779        };
780        let view_q = runmat_accelerate_api::HostTensorView {
781            data: &q.data,
782            shape: &q.shape,
783        };
784        let handle_p = provider.upload(&view_p).expect("upload p");
785        let handle_q = provider.upload(&view_q).expect("upload q");
786        let gpu_result = derivative_product(Value::GpuTensor(handle_p), Value::GpuTensor(handle_q))
787            .expect("gpu product");
788        let gathered = test_support::gather(gpu_result).expect("gather");
789        assert_eq!(gathered.shape, expected_tensor.shape);
790        assert!(gathered
791            .data
792            .iter()
793            .zip(expected_tensor.data.iter())
794            .all(|(lhs, rhs)| (lhs - rhs).abs() < 1e-12));
795    }
796
797    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
798    #[test]
799    #[cfg(feature = "wgpu")]
800    fn wgpu_polyder_quotient_matches_cpu() {
801        let _ = runmat_accelerate::backend::wgpu::provider::register_wgpu_provider(
802            runmat_accelerate::backend::wgpu::provider::WgpuProviderOptions::default(),
803        );
804        let provider = runmat_accelerate_api::provider().expect("wgpu provider");
805        let u = Tensor::new(vec![1.0, 0.0, -4.0], vec![1, 3]).unwrap();
806        let v = Tensor::new(vec![1.0, -1.0], vec![1, 2]).unwrap();
807        let expected = evaluate_quotient(Value::Tensor(u.clone()), Value::Tensor(v.clone()))
808            .expect("cpu quotient");
809        let expected_num = match expected.numerator() {
810            Value::Tensor(t) => t,
811            other => panic!("expected tensor numerator, got {other:?}"),
812        };
813        let expected_den = match expected.denominator() {
814            Value::Tensor(t) => t,
815            other => panic!("expected tensor denominator, got {other:?}"),
816        };
817        let view_u = runmat_accelerate_api::HostTensorView {
818            data: &u.data,
819            shape: &u.shape,
820        };
821        let view_v = runmat_accelerate_api::HostTensorView {
822            data: &v.data,
823            shape: &v.shape,
824        };
825        let handle_u = provider.upload(&view_u).expect("upload u");
826        let handle_v = provider.upload(&view_v).expect("upload v");
827        let gpu_eval = evaluate_quotient(Value::GpuTensor(handle_u), Value::GpuTensor(handle_v))
828            .expect("gpu quotient");
829        let gpu_num = test_support::gather(gpu_eval.numerator()).expect("gather num");
830        let gpu_den = test_support::gather(gpu_eval.denominator()).expect("gather den");
831        assert_eq!(gpu_num.shape, expected_num.shape);
832        assert_eq!(gpu_den.shape, expected_den.shape);
833        assert!(gpu_num
834            .data
835            .iter()
836            .zip(expected_num.data.iter())
837            .all(|(lhs, rhs)| (lhs - rhs).abs() < 1e-12));
838        assert!(gpu_den
839            .data
840            .iter()
841            .zip(expected_den.data.iter())
842            .all(|(lhs, rhs)| (lhs - rhs).abs() < 1e-12));
843    }
844
845    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
846    #[test]
847    fn derivative_promotes_integers() {
848        let value = Value::Int(IntValue::I32(5));
849        let result = derivative_single(value).expect("polyder int");
850        assert_eq!(result, Value::Num(0.0));
851    }
852
853    fn derivative_single(value: Value) -> BuiltinResult<Value> {
854        block_on(super::derivative_single(value))
855    }
856
857    fn derivative_product(first: Value, second: Value) -> BuiltinResult<Value> {
858        block_on(super::derivative_product(first, second))
859    }
860
861    fn evaluate_quotient(u: Value, v: Value) -> BuiltinResult<PolyderEval> {
862        block_on(super::evaluate_quotient(u, v))
863    }
864}