oxiz_math/polynomial/extended_ops.rs
1//! Extended polynomial operations: calculus, GCD, and evaluation.
2
3use super::helpers::*;
4use super::types::*;
5#[allow(unused_imports)]
6use crate::prelude::*;
7use crate::simd::polynomial_simd::{poly_add_coeffs, poly_dot_product, poly_mul_scalar};
8use num_bigint::BigInt;
9use num_rational::BigRational;
10use num_traits::{One, Signed, Zero};
11
12type Polynomial = super::Polynomial;
13
14impl super::Polynomial {
15 /// Compute the derivative with respect to a variable.
16 pub fn derivative(&self, var: Var) -> Polynomial {
17 let terms: Vec<Term> = self
18 .terms
19 .iter()
20 .filter_map(|t| {
21 let d = t.monomial.degree(var);
22 if d == 0 {
23 return None;
24 }
25 let new_coeff = &t.coeff * BigRational::from_integer(BigInt::from(d));
26 let new_mon = if d == 1 {
27 t.monomial
28 .div(&Monomial::from_var(var))
29 .unwrap_or_else(Monomial::unit)
30 } else {
31 let new_powers: Vec<(Var, u32)> = t
32 .monomial
33 .vars()
34 .iter()
35 .map(|vp| {
36 if vp.var == var {
37 (vp.var, vp.power - 1)
38 } else {
39 (vp.var, vp.power)
40 }
41 })
42 .filter(|(_, p)| *p > 0)
43 .collect();
44 Monomial::from_powers(new_powers)
45 };
46 Some(Term::new(new_coeff, new_mon))
47 })
48 .collect();
49 Polynomial::from_terms(terms, self.order)
50 }
51
52 /// Compute the nth derivative of the polynomial with respect to a variable.
53 ///
54 /// # Arguments
55 /// * `var` - The variable to differentiate with respect to
56 /// * `n` - The order of the derivative (n = 0 returns the polynomial itself)
57 ///
58 /// # Examples
59 /// ```
60 /// use oxiz_math::polynomial::Polynomial;
61 ///
62 /// // f(x) = x^3 = x³
63 /// let f = Polynomial::from_coeffs_int(&[(1, &[(0, 3)])]);
64 ///
65 /// // f'(x) = 3x^2
66 /// let f_prime = f.nth_derivative(0, 1);
67 /// assert_eq!(f_prime.total_degree(), 2);
68 ///
69 /// // f''(x) = 6x
70 /// let f_double_prime = f.nth_derivative(0, 2);
71 /// assert_eq!(f_double_prime.total_degree(), 1);
72 ///
73 /// // f'''(x) = 6 (constant)
74 /// let f_triple_prime = f.nth_derivative(0, 3);
75 /// assert_eq!(f_triple_prime.total_degree(), 0);
76 /// assert!(!f_triple_prime.is_zero());
77 ///
78 /// // f''''(x) = 0
79 /// let f_fourth = f.nth_derivative(0, 4);
80 /// assert!(f_fourth.is_zero());
81 /// ```
82 pub fn nth_derivative(&self, var: Var, n: u32) -> Polynomial {
83 if n == 0 {
84 return self.clone();
85 }
86
87 let mut result = self.clone();
88 for _ in 0..n {
89 result = result.derivative(var);
90 if result.is_zero() {
91 break;
92 }
93 }
94 result
95 }
96
97 /// Computes the gradient (vector of partial derivatives) with respect to all variables.
98 ///
99 /// For a multivariate polynomial f(x₁, x₂, ..., xₙ), returns the vector:
100 /// ∇f = [∂f/∂x₁, ∂f/∂x₂, ..., ∂f/∂xₙ]
101 ///
102 /// # Returns
103 /// A vector of polynomials, one for each variable, ordered by variable index.
104 ///
105 /// # Example
106 /// ```
107 /// use oxiz_math::polynomial::Polynomial;
108 /// // f(x,y) = x²y + 2xy + y²
109 /// let f = Polynomial::from_coeffs_int(&[
110 /// (1, &[(0, 2), (1, 1)]), // x²y
111 /// (2, &[(0, 1), (1, 1)]), // 2xy
112 /// (1, &[(1, 2)]), // y²
113 /// ]);
114 ///
115 /// let grad = f.gradient();
116 /// // ∂f/∂x = 2xy + 2y
117 /// // ∂f/∂y = x² + 2x + 2y
118 /// assert_eq!(grad.len(), 2);
119 /// ```
120 pub fn gradient(&self) -> Vec<Polynomial> {
121 let vars = self.vars();
122 if vars.is_empty() {
123 return vec![];
124 }
125
126 let max_var = *vars.iter().max().expect("operation should succeed");
127 let mut grad = Vec::new();
128
129 // Compute partial derivative for each variable from 0 to max_var
130 for var in 0..=max_var {
131 grad.push(self.derivative(var));
132 }
133
134 grad
135 }
136
137 /// Computes the Hessian matrix (matrix of second-order partial derivatives).
138 ///
139 /// For a multivariate polynomial f(x₁, x₂, ..., xₙ), returns the symmetric matrix:
140 /// `H[i,j] = ∂²f/(∂xᵢ∂xⱼ)`
141 ///
142 /// The Hessian is useful for:
143 /// - Optimization (finding local minima/maxima)
144 /// - Convexity analysis
145 /// - Second-order Taylor approximations
146 ///
147 /// # Returns
148 /// A vector of vectors representing the Hessian matrix.
149 /// The matrix is symmetric: `H[i][j] = H[j][i]`
150 ///
151 /// # Example
152 /// ```
153 /// use oxiz_math::polynomial::Polynomial;
154 /// // f(x,y) = x² + xy + y²
155 /// let f = Polynomial::from_coeffs_int(&[
156 /// (1, &[(0, 2)]), // x²
157 /// (1, &[(0, 1), (1, 1)]), // xy
158 /// (1, &[(1, 2)]), // y²
159 /// ]);
160 ///
161 /// let hessian = f.hessian();
162 /// // H = [[2, 1],
163 /// // [1, 2]]
164 /// assert_eq!(hessian.len(), 2);
165 /// assert_eq!(hessian[0].len(), 2);
166 /// ```
167 pub fn hessian(&self) -> Vec<Vec<Polynomial>> {
168 let vars = self.vars();
169 if vars.is_empty() {
170 return vec![];
171 }
172
173 let max_var = *vars.iter().max().expect("operation should succeed");
174 let n = (max_var + 1) as usize;
175
176 let mut hessian = vec![vec![Polynomial::zero(); n]; n];
177
178 // Compute all second-order partial derivatives
179 for i in 0..=max_var {
180 for j in 0..=max_var {
181 // ∂²f/(∂xᵢ∂xⱼ) = ∂/∂xⱼ(∂f/∂xᵢ)
182 let first_deriv = self.derivative(i);
183 let second_deriv = first_deriv.derivative(j);
184 hessian[i as usize][j as usize] = second_deriv;
185 }
186 }
187
188 hessian
189 }
190
191 /// Computes the Jacobian matrix for a vector of polynomials.
192 ///
193 /// For a vector of polynomials f = (f₁, f₂, ..., fₘ) each depending on variables
194 /// x = (x₁, x₂, ..., xₙ), the Jacobian is the m×n matrix:
195 /// `J[i,j] = ∂fᵢ/∂xⱼ`
196 ///
197 /// # Arguments
198 /// * `polys` - Vector of polynomials representing the function components
199 ///
200 /// # Returns
201 /// A matrix where each row i contains the gradient of polynomial i
202 ///
203 /// # Example
204 /// ```
205 /// use oxiz_math::polynomial::Polynomial;
206 /// // f₁(x,y) = x² + y
207 /// // f₂(x,y) = x + y²
208 /// let f1 = Polynomial::from_coeffs_int(&[(1, &[(0, 2)]), (1, &[(1, 1)])]);
209 /// let f2 = Polynomial::from_coeffs_int(&[(1, &[(0, 1)]), (1, &[(1, 2)])]);
210 ///
211 /// let jacobian = Polynomial::jacobian(&[f1, f2]);
212 /// // J = [[2x, 1 ],
213 /// // [1, 2y]]
214 /// assert_eq!(jacobian.len(), 2); // 2 functions
215 /// ```
216 pub fn jacobian(polys: &[Polynomial]) -> Vec<Vec<Polynomial>> {
217 if polys.is_empty() {
218 return vec![];
219 }
220
221 // Find the maximum variable across all polynomials
222 let max_var = polys.iter().flat_map(|p| p.vars()).max().unwrap_or(0);
223
224 let n_vars = (max_var + 1) as usize;
225 let mut jacobian = Vec::with_capacity(polys.len());
226
227 for poly in polys {
228 let mut row = Vec::with_capacity(n_vars);
229 for var in 0..=max_var {
230 row.push(poly.derivative(var));
231 }
232 jacobian.push(row);
233 }
234
235 jacobian
236 }
237
238 /// Compute the indefinite integral (antiderivative) of the polynomial with respect to a variable.
239 ///
240 /// For a polynomial p(x), returns ∫p(x)dx. The constant of integration is implicitly zero.
241 ///
242 /// # Arguments
243 /// * `var` - The variable to integrate with respect to
244 ///
245 /// # Examples
246 /// ```
247 /// use oxiz_math::polynomial::Polynomial;
248 /// use num_bigint::BigInt;
249 /// use num_rational::BigRational;
250 ///
251 /// // f(x) = 3x^2
252 /// let f = Polynomial::from_coeffs_int(&[(3, &[(0, 2)])]);
253 ///
254 /// // ∫f(x)dx = x^3
255 /// let integral = f.integrate(0);
256 ///
257 /// // Verify: derivative of integral should be original
258 /// let derivative = integral.derivative(0);
259 /// assert_eq!(derivative, f);
260 /// ```
261 pub fn integrate(&self, var: Var) -> Polynomial {
262 let terms: Vec<Term> = self
263 .terms
264 .iter()
265 .map(|t| {
266 let d = t.monomial.degree(var);
267 let new_power = d + 1;
268
269 // Divide coefficient by (power + 1)
270 let new_coeff = &t.coeff / BigRational::from_integer(BigInt::from(new_power));
271
272 // Increment the power of var
273 let new_powers: Vec<(Var, u32)> = if d == 0 {
274 // Constant term: add var^1
275 let mut powers = t
276 .monomial
277 .vars()
278 .iter()
279 .map(|vp| (vp.var, vp.power))
280 .collect::<Vec<_>>();
281 powers.push((var, 1));
282 powers.sort_by_key(|(v, _)| *v);
283 powers
284 } else {
285 // Variable already exists: increment power
286 t.monomial
287 .vars()
288 .iter()
289 .map(|vp| {
290 if vp.var == var {
291 (vp.var, vp.power + 1)
292 } else {
293 (vp.var, vp.power)
294 }
295 })
296 .collect()
297 };
298
299 let new_mon = Monomial::from_powers(new_powers);
300 Term::new(new_coeff, new_mon)
301 })
302 .collect();
303 Polynomial::from_terms(terms, self.order)
304 }
305
306 /// Compute the definite integral of a univariate polynomial over an interval (a, b).
307 ///
308 /// For a univariate polynomial p(x), returns ∫ₐᵇ p(x)dx = F(b) - F(a)
309 /// where F is the antiderivative of p.
310 ///
311 /// # Arguments
312 /// * `var` - The variable to integrate with respect to
313 /// * `lower` - Lower bound of integration
314 /// * `upper` - Upper bound of integration
315 ///
316 /// # Returns
317 /// The definite integral value, or None if the polynomial is not univariate in the given variable
318 ///
319 /// # Examples
320 /// ```
321 /// use oxiz_math::polynomial::Polynomial;
322 /// use num_bigint::BigInt;
323 /// use num_rational::BigRational;
324 ///
325 /// // ∫[0,2] x^2 dx = [x^3/3] from 0 to 2 = 8/3
326 /// let f = Polynomial::from_coeffs_int(&[(1, &[(0, 2)])]);
327 /// let result = f.definite_integral(0, &BigRational::from_integer(BigInt::from(0)),
328 /// &BigRational::from_integer(BigInt::from(2)));
329 /// assert_eq!(result, Some(BigRational::new(BigInt::from(8), BigInt::from(3))));
330 /// ```
331 pub fn definite_integral(
332 &self,
333 var: Var,
334 lower: &BigRational,
335 upper: &BigRational,
336 ) -> Option<BigRational> {
337 // Get the antiderivative
338 let antideriv = self.integrate(var);
339
340 // Evaluate at upper and lower bounds
341 let mut upper_assignment = crate::prelude::FxHashMap::default();
342 upper_assignment.insert(var, upper.clone());
343 let upper_val = antideriv.eval(&upper_assignment);
344
345 let mut lower_assignment = crate::prelude::FxHashMap::default();
346 lower_assignment.insert(var, lower.clone());
347 let lower_val = antideriv.eval(&lower_assignment);
348
349 // Return F(b) - F(a)
350 Some(upper_val - lower_val)
351 }
352
353 /// Find critical points of a univariate polynomial by solving f'(x) = 0.
354 ///
355 /// Critical points are values where the derivative equals zero, which correspond
356 /// to local maxima, minima, or saddle points.
357 ///
358 /// # Arguments
359 /// * `var` - The variable to find critical points for
360 ///
361 /// # Returns
362 /// A vector of isolating intervals containing the critical points. Each interval
363 /// contains exactly one root of the derivative.
364 ///
365 /// # Examples
366 /// ```
367 /// use oxiz_math::polynomial::Polynomial;
368 ///
369 /// // f(x) = x^3 - 3x = x(x^2 - 3)
370 /// // f'(x) = 3x^2 - 3 = 3(x^2 - 1) = 3(x-1)(x+1)
371 /// // Critical points at x = -1 and x = 1
372 /// let f = Polynomial::from_coeffs_int(&[
373 /// (1, &[(0, 3)]), // x^3
374 /// (-3, &[(0, 1)]), // -3x
375 /// ]);
376 ///
377 /// let critical_points = f.find_critical_points(0);
378 /// assert_eq!(critical_points.len(), 2); // Two critical points
379 /// ```
380 pub fn find_critical_points(&self, var: Var) -> Vec<(BigRational, BigRational)> {
381 // Compute the derivative
382 let deriv = self.derivative(var);
383
384 // Find roots of the derivative (where f'(x) = 0)
385 deriv.isolate_roots(var)
386 }
387
388 /// Numerically integrate using the trapezoidal rule.
389 ///
390 /// Approximates ∫ₐᵇ f(x)dx using the trapezoidal rule with n subintervals.
391 /// The trapezoidal rule approximates the integral by summing the areas of trapezoids.
392 ///
393 /// # Arguments
394 /// * `var` - The variable to integrate with respect to
395 /// * `lower` - Lower bound of integration
396 /// * `upper` - Upper bound of integration
397 /// * `n` - Number of subintervals (more subintervals = higher accuracy)
398 ///
399 /// # Examples
400 /// ```
401 /// use oxiz_math::polynomial::Polynomial;
402 /// use num_bigint::BigInt;
403 /// use num_rational::BigRational;
404 ///
405 /// // ∫[0,1] x^2 dx = 1/3
406 /// let f = Polynomial::from_coeffs_int(&[(1, &[(0, 2)])]);
407 /// let approx = f.trapezoidal_rule(0,
408 /// &BigRational::from_integer(BigInt::from(0)),
409 /// &BigRational::from_integer(BigInt::from(1)),
410 /// 100);
411 ///
412 /// let exact = BigRational::new(BigInt::from(1), BigInt::from(3));
413 /// // With 100 intervals, approximation should be very close
414 /// ```
415 pub fn trapezoidal_rule(
416 &self,
417 var: Var,
418 lower: &BigRational,
419 upper: &BigRational,
420 n: u32,
421 ) -> BigRational {
422 if n == 0 {
423 return BigRational::zero();
424 }
425
426 // Step size h = (b - a) / n
427 let h = (upper - lower) / BigRational::from_integer(BigInt::from(n));
428
429 let mut sum = BigRational::zero();
430
431 // First and last terms: f(a)/2 + f(b)/2
432 let mut assignment = crate::prelude::FxHashMap::default();
433 assignment.insert(var, lower.clone());
434 sum += &self.eval(&assignment) / BigRational::from_integer(BigInt::from(2));
435
436 assignment.insert(var, upper.clone());
437 sum += &self.eval(&assignment) / BigRational::from_integer(BigInt::from(2));
438
439 // Middle terms: sum of f(x_i) for i = 1 to n-1
440 for i in 1..n {
441 let x_i = lower + &h * BigRational::from_integer(BigInt::from(i));
442 assignment.insert(var, x_i);
443 sum += &self.eval(&assignment);
444 }
445
446 // Multiply by step size
447 sum * h
448 }
449
450 /// Numerically integrate using Simpson's rule.
451 ///
452 /// Approximates ∫ₐᵇ f(x)dx using Simpson's rule with n subintervals (n must be even).
453 /// Simpson's rule uses parabolic approximation and is generally more accurate than
454 /// the trapezoidal rule for smooth functions.
455 ///
456 /// # Arguments
457 /// * `var` - The variable to integrate with respect to
458 /// * `lower` - Lower bound of integration
459 /// * `upper` - Upper bound of integration
460 /// * `n` - Number of subintervals (must be even for Simpson's rule)
461 ///
462 /// # Examples
463 /// ```
464 /// use oxiz_math::polynomial::Polynomial;
465 /// use num_bigint::BigInt;
466 /// use num_rational::BigRational;
467 ///
468 /// // ∫[0,1] x^2 dx = 1/3
469 /// let f = Polynomial::from_coeffs_int(&[(1, &[(0, 2)])]);
470 /// let approx = f.simpsons_rule(0,
471 /// &BigRational::from_integer(BigInt::from(0)),
472 /// &BigRational::from_integer(BigInt::from(1)),
473 /// 100);
474 ///
475 /// let exact = BigRational::new(BigInt::from(1), BigInt::from(3));
476 /// // Simpson's rule should give very accurate results for polynomials
477 /// ```
478 pub fn simpsons_rule(
479 &self,
480 var: Var,
481 lower: &BigRational,
482 upper: &BigRational,
483 n: u32,
484 ) -> BigRational {
485 if n == 0 {
486 return BigRational::zero();
487 }
488
489 // Ensure n is even for Simpson's rule
490 let n = if n % 2 == 1 { n + 1 } else { n };
491
492 // Step size h = (b - a) / n
493 let h = (upper - lower) / BigRational::from_integer(BigInt::from(n));
494
495 let mut sum = BigRational::zero();
496 let mut assignment = crate::prelude::FxHashMap::default();
497
498 // First and last terms: f(a) + f(b)
499 assignment.insert(var, lower.clone());
500 sum += &self.eval(&assignment);
501
502 assignment.insert(var, upper.clone());
503 sum += &self.eval(&assignment);
504
505 // Odd-indexed terms (multiplied by 4): i = 1, 3, 5, ..., n-1
506 for i in (1..n).step_by(2) {
507 let x_i = lower + &h * BigRational::from_integer(BigInt::from(i));
508 assignment.insert(var, x_i);
509 sum += BigRational::from_integer(BigInt::from(4)) * &self.eval(&assignment);
510 }
511
512 // Even-indexed terms (multiplied by 2): i = 2, 4, 6, ..., n-2
513 for i in (2..n).step_by(2) {
514 let x_i = lower + &h * BigRational::from_integer(BigInt::from(i));
515 assignment.insert(var, x_i);
516 sum += BigRational::from_integer(BigInt::from(2)) * &self.eval(&assignment);
517 }
518
519 // Multiply by h/3
520 sum * h / BigRational::from_integer(BigInt::from(3))
521 }
522
523 /// Evaluate the polynomial at a point (substituting a value for a variable).
524 pub fn eval_at(&self, var: Var, value: &BigRational) -> Polynomial {
525 let terms: Vec<Term> = self
526 .terms
527 .iter()
528 .map(|t| {
529 let d = t.monomial.degree(var);
530 if d == 0 {
531 t.clone()
532 } else {
533 let new_coeff = &t.coeff * value.pow(d as i32);
534 let new_mon = t
535 .monomial
536 .div(&Monomial::from_var_power(var, d))
537 .unwrap_or_else(Monomial::unit);
538 Term::new(new_coeff, new_mon)
539 }
540 })
541 .collect();
542 Polynomial::from_terms(terms, self.order)
543 }
544
545 /// Evaluate the polynomial completely (all variables assigned).
546 pub fn eval(&self, assignment: &FxHashMap<Var, BigRational>) -> BigRational {
547 let mut result = BigRational::zero();
548
549 for term in &self.terms {
550 let mut val = term.coeff.clone();
551 for vp in term.monomial.vars() {
552 if let Some(v) = assignment.get(&vp.var) {
553 val *= v.pow(vp.power as i32);
554 } else {
555 panic!("Variable x{} not in assignment", vp.var);
556 }
557 }
558 result += val;
559 }
560
561 result
562 }
563
564 /// Evaluate a univariate polynomial using Horner's method.
565 ///
566 /// Horner's method evaluates a polynomial p(x) = a_n x^n + ... + a_1 x + a_0
567 /// as (((...((a_n) x + a_{n-1}) x + ...) x + a_1) x + a_0), which requires
568 /// only n multiplications instead of n(n+1)/2 for the naive method.
569 ///
570 /// This method is more efficient than eval() for univariate polynomials.
571 ///
572 /// # Arguments
573 /// * `var` - The variable to evaluate
574 /// * `value` - The value to substitute for the variable
575 ///
576 /// # Returns
577 /// The evaluated value
578 ///
579 /// # Panics
580 /// Panics if the polynomial is not univariate in the given variable
581 pub fn eval_horner(&self, var: Var, value: &BigRational) -> BigRational {
582 if self.is_zero() {
583 return BigRational::zero();
584 }
585
586 // For multivariate polynomials, use eval_at instead
587 if !self.is_univariate() || self.max_var() != var {
588 let result = self.eval_at(var, value);
589 // If result is constant, return its value
590 if result.is_constant() {
591 return result.constant_value();
592 }
593 panic!("Polynomial is not univariate in variable x{}", var);
594 }
595
596 let deg = self.degree(var);
597 if deg == 0 {
598 return self.constant_value();
599 }
600
601 // Collect coefficients in descending order of degree
602 let mut coeffs = vec![BigRational::zero(); (deg + 1) as usize];
603 for k in 0..=deg {
604 coeffs[k as usize] = self.univ_coeff(var, k);
605 }
606
607 // Apply Horner's method: start from highest degree
608 let mut result = coeffs[deg as usize].clone();
609 for k in (0..deg).rev() {
610 result = &result * value + &coeffs[k as usize];
611 }
612
613 result
614 }
615
616 /// Substitute a polynomial for a variable.
617 pub fn substitute(&self, var: Var, replacement: &Polynomial) -> Polynomial {
618 let mut result = Polynomial::zero();
619
620 for term in &self.terms {
621 let d = term.monomial.degree(var);
622 if d == 0 {
623 result = Polynomial::add(
624 &result,
625 &Polynomial::from_terms(vec![term.clone()], self.order),
626 );
627 } else {
628 let remainder = term
629 .monomial
630 .div(&Monomial::from_var_power(var, d))
631 .unwrap_or_else(Monomial::unit);
632 let coeff_poly = Polynomial::from_terms(
633 vec![Term::new(term.coeff.clone(), remainder)],
634 self.order,
635 );
636 let rep_pow = replacement.pow(d);
637 result = Polynomial::add(&result, &Polynomial::mul(&coeff_poly, &rep_pow));
638 }
639 }
640
641 result
642 }
643
644 /// Integer content: GCD of all coefficients (as integers).
645 /// Assumes all coefficients are integers.
646 pub fn integer_content(&self) -> BigInt {
647 if self.terms.is_empty() {
648 return BigInt::one();
649 }
650
651 let mut gcd: Option<BigInt> = None;
652 for term in &self.terms {
653 let num = term.coeff.numer().clone();
654 gcd = Some(match gcd {
655 None => num.abs(),
656 Some(g) => gcd_bigint(g, num.abs()),
657 });
658 }
659 gcd.unwrap_or_else(BigInt::one)
660 }
661
662 /// Make the polynomial primitive (divide by integer content).
663 pub fn primitive(&self) -> Polynomial {
664 let content = self.integer_content();
665 if content.is_one() {
666 return self.clone();
667 }
668 let c = BigRational::from_integer(content);
669 self.scale(&(BigRational::one() / c))
670 }
671
672 /// GCD of two polynomials (univariate, using Euclidean algorithm).
673 pub fn gcd_univariate(&self, other: &Polynomial) -> Polynomial {
674 if self.is_zero() {
675 return other.primitive();
676 }
677 if other.is_zero() {
678 return self.primitive();
679 }
680
681 let var = self.max_var().max(other.max_var());
682 if var == NULL_VAR {
683 // Both are constants, GCD is 1
684 return Polynomial::one();
685 }
686
687 let mut a = self.primitive();
688 let mut b = other.primitive();
689
690 // Ensure deg(a) >= deg(b)
691 if a.degree(var) < b.degree(var) {
692 core::mem::swap(&mut a, &mut b);
693 }
694
695 // Limit iterations for safety
696 let mut iter_count = 0;
697 let max_iters = a.degree(var) as usize + b.degree(var) as usize + 10;
698
699 while !b.is_zero() && iter_count < max_iters {
700 iter_count += 1;
701 let r = a.pseudo_remainder(&b, var);
702 a = b;
703 b = if r.is_zero() { r } else { r.primitive() };
704 }
705
706 a.primitive()
707 }
708
709 /// Pseudo-remainder for univariate polynomials.
710 /// Returns r such that lc(b)^d * a = q * b + r for some q,
711 /// where d = max(deg(a) - deg(b) + 1, 0).
712 pub fn pseudo_remainder(&self, divisor: &Polynomial, var: Var) -> Polynomial {
713 if divisor.is_zero() {
714 panic!("Division by zero polynomial");
715 }
716
717 if self.is_zero() {
718 return Polynomial::zero();
719 }
720
721 let deg_a = self.degree(var);
722 let deg_b = divisor.degree(var);
723
724 if deg_a < deg_b {
725 return self.clone();
726 }
727
728 let lc_b = divisor.univ_coeff(var, deg_b);
729 let mut r = self.clone();
730
731 // Limit iterations
732 let max_iters = (deg_a - deg_b + 2) as usize;
733 let mut iters = 0;
734
735 while !r.is_zero() && r.degree(var) >= deg_b && iters < max_iters {
736 iters += 1;
737 let deg_r = r.degree(var);
738 let lc_r = r.univ_coeff(var, deg_r);
739 let shift = deg_r - deg_b;
740
741 // r = lc_b * r - lc_r * x^shift * divisor
742 r = r.scale(&lc_b);
743 let subtractor = divisor
744 .scale(&lc_r)
745 .mul_monomial(&Monomial::from_var_power(var, shift));
746 r = Polynomial::sub(&r, &subtractor);
747 }
748
749 r
750 }
751
752 /// Pseudo-division for univariate polynomials.
753 /// Returns (quotient, remainder) such that lc(b)^d * a = q * b + r
754 /// where d = deg(a) - deg(b) + 1.
755 pub fn pseudo_div_univariate(&self, divisor: &Polynomial) -> (Polynomial, Polynomial) {
756 if divisor.is_zero() {
757 panic!("Division by zero polynomial");
758 }
759
760 if self.is_zero() {
761 return (Polynomial::zero(), Polynomial::zero());
762 }
763
764 let var = self.max_var().max(divisor.max_var());
765 if var == NULL_VAR {
766 // Both are constants
767 return (Polynomial::zero(), self.clone());
768 }
769
770 let deg_a = self.degree(var);
771 let deg_b = divisor.degree(var);
772
773 if deg_a < deg_b {
774 return (Polynomial::zero(), self.clone());
775 }
776
777 let lc_b = divisor.univ_coeff(var, deg_b);
778 let mut q = Polynomial::zero();
779 let mut r = self.clone();
780
781 // Limit iterations
782 let max_iters = (deg_a - deg_b + 2) as usize;
783 let mut iters = 0;
784
785 while !r.is_zero() && r.degree(var) >= deg_b && iters < max_iters {
786 iters += 1;
787 let deg_r = r.degree(var);
788 let lc_r = r.univ_coeff(var, deg_r);
789 let shift = deg_r - deg_b;
790
791 let term = Polynomial::from_terms(
792 vec![Term::new(
793 lc_r.clone(),
794 Monomial::from_var_power(var, shift),
795 )],
796 self.order,
797 );
798
799 q = q.scale(&lc_b);
800 q = Polynomial::add(&q, &term);
801
802 r = r.scale(&lc_b);
803 let subtractor = Polynomial::mul(divisor, &term);
804 r = Polynomial::sub(&r, &subtractor);
805 }
806
807 (q, r)
808 }
809
810 /// Compute the subresultant polynomial remainder sequence (PRS).
811 ///
812 /// The subresultant PRS is a more efficient variant of the pseudo-remainder sequence
813 /// that avoids coefficient explosion through careful normalization. It's useful for
814 /// GCD computation and other polynomial algorithms.
815 ///
816 /// Returns a sequence of polynomials [p0, p1, ..., pk] where:
817 /// - p0 = self
818 /// - p1 = other
819 /// - Each subsequent polynomial is derived using the subresultant algorithm
820 ///
821 /// Reference: "Algorithms for Computer Algebra" by Geddes, Czapor, Labahn
822 pub fn subresultant_prs(&self, other: &Polynomial, var: Var) -> Vec<Polynomial> {
823 if self.is_zero() || other.is_zero() {
824 return vec![];
825 }
826
827 let mut prs = Vec::new();
828 let mut a = self.clone();
829 let mut b = other.clone();
830
831 // Ensure deg(a) >= deg(b)
832 if a.degree(var) < b.degree(var) {
833 core::mem::swap(&mut a, &mut b);
834 }
835
836 prs.push(a.clone());
837 prs.push(b.clone());
838
839 let mut g = Polynomial::one();
840 let mut h = Polynomial::one();
841
842 let max_iters = a.degree(var) as usize + b.degree(var) as usize + 10;
843 let mut iter_count = 0;
844
845 while !b.is_zero() && iter_count < max_iters {
846 iter_count += 1;
847
848 let delta = a.degree(var) as i32 - b.degree(var) as i32;
849 if delta < 0 {
850 break;
851 }
852
853 // Compute pseudo-remainder
854 let prem = a.pseudo_remainder(&b, var);
855
856 if prem.is_zero() {
857 break;
858 }
859
860 // Subresultant normalization to prevent coefficient explosion
861 let normalized = if delta == 0 {
862 // No adjustment needed for delta = 0
863 prem.scale(&(BigRational::one() / &h.constant_value()))
864 } else {
865 // For delta > 0, divide by g^delta * h
866 let g_pow = g.constant_value().pow(delta);
867 let divisor = &g_pow * &h.constant_value();
868 prem.scale(&(BigRational::one() / divisor))
869 };
870
871 // Update g and h for next iteration
872 let lc_b = b.leading_coeff_wrt(var);
873 g = lc_b;
874
875 h = if delta == 0 {
876 Polynomial::one()
877 } else {
878 let g_val = g.constant_value();
879 let h_val = h.constant_value();
880 let new_h = g_val.pow(delta) / h_val.pow(delta - 1);
881 Polynomial::constant(new_h)
882 };
883
884 // Move to next iteration
885 a = b;
886 b = normalized.primitive(); // Make primitive to keep coefficients manageable
887 prs.push(b.clone());
888 }
889
890 prs
891 }
892
893 /// Resultant of two univariate polynomials with respect to a variable.
894 pub fn resultant(&self, other: &Polynomial, var: Var) -> Polynomial {
895 if self.is_zero() || other.is_zero() {
896 return Polynomial::zero();
897 }
898
899 let deg_p = self.degree(var);
900 let deg_q = other.degree(var);
901
902 if deg_p == 0 {
903 return self.pow(deg_q);
904 }
905 if deg_q == 0 {
906 return other.pow(deg_p);
907 }
908
909 // Use subresultant PRS for efficiency
910 let mut a = self.clone();
911 let mut b = other.clone();
912 let mut g = Polynomial::one();
913 let mut h = Polynomial::one();
914 let mut sign = if (deg_p & 1 == 1) && (deg_q & 1 == 1) {
915 -1i32
916 } else {
917 1i32
918 };
919
920 // Add iteration limit to prevent infinite loops when exact division is not available
921 let max_iters = (deg_p + deg_q) * 10;
922 let mut iter_count = 0;
923
924 while !b.is_zero() && iter_count < max_iters {
925 iter_count += 1;
926 let delta = a.degree(var) as i32 - b.degree(var) as i32;
927 if delta < 0 {
928 core::mem::swap(&mut a, &mut b);
929 if (a.degree(var) & 1 == 1) && (b.degree(var) & 1 == 1) {
930 sign = -sign;
931 }
932 continue;
933 }
934
935 let (_, r) = a.pseudo_div_univariate(&b);
936
937 if r.is_zero() {
938 if b.degree(var) > 0 {
939 return Polynomial::zero();
940 } else {
941 let d = a.degree(var);
942 return b.pow(d);
943 }
944 }
945
946 a = b;
947 let g_pow = g.pow((delta + 1) as u32);
948 let h_pow = h.pow(delta as u32);
949 b = r;
950
951 // Simplify b by dividing out common factors
952 // Since exact division is not implemented, use primitive() to prevent growth
953 if delta > 0 {
954 let denom = Polynomial::mul(&g_pow, &h_pow);
955 // Try to cancel common content by making primitive
956 b = b.primitive();
957 // Note: This is an approximation and may not give the exact mathematical resultant
958 let _ = denom; // Acknowledge we should divide by this
959 }
960
961 g = a.leading_coeff_wrt(var);
962 let g_delta = g.pow(delta as u32);
963 let h_new = if delta == 0 {
964 h.clone()
965 } else if delta == 1 {
966 g.clone()
967 } else {
968 g_delta
969 };
970 h = h_new;
971 }
972
973 // The resultant is in b (last non-zero remainder)
974 if sign < 0 { a.neg() } else { a }
975 }
976
977 /// Discriminant of a polynomial with respect to a variable.
978 /// discriminant(p) = resultant(p, dp/dx) / lc(p)
979 pub fn discriminant(&self, var: Var) -> Polynomial {
980 let deriv = self.derivative(var);
981 self.resultant(&deriv, var)
982 }
983
984 /// Check if the polynomial has a positive sign for all variable assignments.
985 /// This is an incomplete check that only handles obvious cases.
986 pub fn is_definitely_positive(&self) -> bool {
987 // All terms with even total degree and positive coefficients
988 if self.terms.is_empty() {
989 return false;
990 }
991
992 // Check if all monomials are even powers and coefficients are positive
993 self.terms
994 .iter()
995 .all(|t| t.coeff.is_positive() && t.monomial.vars().iter().all(|vp| vp.power % 2 == 0))
996 }
997
998 /// Check if the polynomial has a negative sign for all variable assignments.
999 /// This is an incomplete check.
1000 pub fn is_definitely_negative(&self) -> bool {
1001 self.neg().is_definitely_positive()
1002 }
1003
1004 /// Make the polynomial monic (leading coefficient = 1).
1005 pub fn make_monic(&self) -> Polynomial {
1006 if self.is_zero() {
1007 return self.clone();
1008 }
1009 let lc = self.leading_coeff();
1010 if lc.is_one() {
1011 return self.clone();
1012 }
1013 self.scale(&(BigRational::one() / lc))
1014 }
1015
1016 /// Compute the square-free part of a polynomial (removes repeated factors).
1017 pub fn square_free(&self) -> Polynomial {
1018 if self.is_zero() || self.is_constant() {
1019 return self.clone();
1020 }
1021
1022 let var = self.max_var();
1023 if var == NULL_VAR {
1024 return self.clone();
1025 }
1026
1027 // Square-free: p / gcd(p, p')
1028 let deriv = self.derivative(var);
1029 if deriv.is_zero() {
1030 return self.clone();
1031 }
1032
1033 let g = self.gcd_univariate(&deriv);
1034 if g.is_constant() {
1035 self.primitive()
1036 } else {
1037 let (q, r) = self.pseudo_div_univariate(&g);
1038 if r.is_zero() {
1039 q.primitive().square_free()
1040 } else {
1041 self.primitive()
1042 }
1043 }
1044 }
1045
1046 /// Compute the Sturm sequence for a univariate polynomial.
1047 /// The Sturm sequence is used for counting real roots in an interval.
1048 pub fn sturm_sequence(&self, var: Var) -> Vec<Polynomial> {
1049 if self.is_zero() || self.degree(var) == 0 {
1050 return vec![self.clone()];
1051 }
1052
1053 let mut seq = Vec::new();
1054 seq.push(self.clone());
1055 seq.push(self.derivative(var));
1056
1057 // Build Sturm sequence: p_i+1 = -rem(p_i-1, p_i)
1058 let max_iterations = self.degree(var) as usize + 5;
1059 let mut iterations = 0;
1060
1061 while !seq
1062 .last()
1063 .expect("collection should not be empty")
1064 .is_zero()
1065 && iterations < max_iterations
1066 {
1067 iterations += 1;
1068 let n = seq.len();
1069 let rem = seq[n - 2].pseudo_remainder(&seq[n - 1], var);
1070 if rem.is_zero() {
1071 break;
1072 }
1073 seq.push(rem.neg());
1074 }
1075
1076 seq
1077 }
1078
1079 /// Count the number of real roots in an interval using Sturm's theorem.
1080 /// Returns the number of distinct real roots in (a, b).
1081 pub fn count_roots_in_interval(&self, var: Var, a: &BigRational, b: &BigRational) -> usize {
1082 if self.is_zero() {
1083 return 0;
1084 }
1085
1086 let sturm_seq = self.sturm_sequence(var);
1087 if sturm_seq.is_empty() {
1088 return 0;
1089 }
1090
1091 // Count sign variations at a and b
1092 let var_a = count_sign_variations(&sturm_seq, var, a);
1093 let var_b = count_sign_variations(&sturm_seq, var, b);
1094
1095 // Number of roots = var_a - var_b
1096 var_a.saturating_sub(var_b)
1097 }
1098
1099 /// Compute Cauchy's root bound for a univariate polynomial.
1100 /// Returns B such that all roots have absolute value <= B.
1101 ///
1102 /// Cauchy bound: 1 + max(|a_i| / |a_n|) for i < n
1103 ///
1104 /// This is a simple, conservative bound that's fast to compute.
1105 pub fn cauchy_bound(&self, var: Var) -> BigRational {
1106 cauchy_root_bound(self, var)
1107 }
1108
1109 /// Compute Fujiwara's root bound for a univariate polynomial.
1110 /// Returns B such that all roots have absolute value <= B.
1111 ///
1112 /// Fujiwara bound: 2 * max(|a_i/a_n|^(1/(n-i))) for i < n
1113 ///
1114 /// This bound is generally tighter than Cauchy's bound but more expensive to compute.
1115 ///
1116 /// Reference: Fujiwara, "Über die obere Schranke des absoluten Betrages
1117 /// der Wurzeln einer algebraischen Gleichung" (1916)
1118 pub fn fujiwara_bound(&self, var: Var) -> BigRational {
1119 fujiwara_root_bound(self, var)
1120 }
1121
1122 /// Compute Lagrange's bound for positive roots of a univariate polynomial.
1123 /// Returns B such that all positive roots are <= B.
1124 ///
1125 /// This can provide a tighter bound than general root bounds when analyzing
1126 /// only positive roots, useful for root isolation optimization.
1127 pub fn lagrange_positive_bound(&self, var: Var) -> BigRational {
1128 lagrange_positive_root_bound(self, var)
1129 }
1130
1131 /// Isolate real roots of a univariate polynomial.
1132 /// Returns a list of intervals, each containing exactly one root.
1133 /// Uses Descartes' rule of signs to optimize the search.
1134 pub fn isolate_roots(&self, var: Var) -> Vec<(BigRational, BigRational)> {
1135 if self.is_zero() || self.is_constant() {
1136 return vec![];
1137 }
1138
1139 // Make square-free first
1140 let p = self.square_free();
1141 if p.is_constant() {
1142 return vec![];
1143 }
1144
1145 // Find a bound for all real roots using Cauchy's bound
1146 let bound = cauchy_root_bound(&p, var);
1147
1148 // Use Descartes' rule to check if there are any roots at all
1149 let (_pos_lower, pos_upper) = descartes_positive_roots(&p, var);
1150 let (_neg_lower, neg_upper) = descartes_negative_roots(&p, var);
1151
1152 // Use interval bisection with Sturm's theorem
1153 let mut intervals = Vec::new();
1154 let mut queue = Vec::new();
1155
1156 // Only search in positive interval if there might be positive roots
1157 if pos_upper > 0 {
1158 queue.push((BigRational::zero(), bound.clone()));
1159 }
1160
1161 // Only search in negative interval if there might be negative roots
1162 if neg_upper > 0 {
1163 queue.push((-bound, BigRational::zero()));
1164 }
1165
1166 let max_iterations = 1000;
1167 let mut iterations = 0;
1168
1169 while let Some((a, b)) = queue.pop() {
1170 iterations += 1;
1171 if iterations > max_iterations {
1172 break;
1173 }
1174
1175 let num_roots = p.count_roots_in_interval(var, &a, &b);
1176
1177 if num_roots == 0 {
1178 // No roots in this interval
1179 continue;
1180 } else if num_roots == 1 {
1181 // Exactly one root
1182 intervals.push((a, b));
1183 } else {
1184 // Multiple roots, bisect
1185 let mid = (&a + &b) / BigRational::from_integer(BigInt::from(2));
1186
1187 // Check if mid is a root
1188 let val_mid = p.eval_at(var, &mid);
1189 if val_mid.constant_term().is_zero() {
1190 // Found an exact root
1191 intervals.push((mid.clone(), mid.clone()));
1192 // Don't add intervals that would contain this root again
1193 let left_roots = p.count_roots_in_interval(var, &a, &mid);
1194 let right_roots = p.count_roots_in_interval(var, &mid, &b);
1195 if left_roots > 0 {
1196 queue.push((a, mid.clone()));
1197 }
1198 if right_roots > 0 {
1199 queue.push((mid, b));
1200 }
1201 } else {
1202 queue.push((a, mid.clone()));
1203 queue.push((mid, b));
1204 }
1205 }
1206 }
1207
1208 intervals
1209 }
1210
1211 /// Refine a root approximation using Newton-Raphson iteration.
1212 ///
1213 /// Given an initial approximation and bounds, refines the root using the formula:
1214 /// x_{n+1} = x_n - f(x_n) / f'(x_n)
1215 ///
1216 /// # Arguments
1217 ///
1218 /// * `var` - The variable to solve for
1219 /// * `initial` - Initial approximation of the root
1220 /// * `lower` - Lower bound for the root (for verification)
1221 /// * `upper` - Upper bound for the root (for verification)
1222 /// * `max_iterations` - Maximum number of iterations
1223 /// * `tolerance` - Convergence tolerance (stop when |f(x)| < tolerance)
1224 ///
1225 /// # Returns
1226 ///
1227 /// The refined root approximation, or None if the method fails to converge
1228 /// or if the derivative is zero.
1229 ///
1230 /// # Examples
1231 ///
1232 /// ```
1233 /// use num_bigint::BigInt;
1234 /// use num_rational::BigRational;
1235 /// use oxiz_math::polynomial::Polynomial;
1236 ///
1237 /// // Solve x^2 - 2 = 0 (find sqrt(2) ≈ 1.414...)
1238 /// let p = Polynomial::from_coeffs_int(&[
1239 /// (1, &[(0, 2)]), // x^2
1240 /// (-2, &[]), // -2
1241 /// ]);
1242 ///
1243 /// let initial = BigRational::new(BigInt::from(3), BigInt::from(2)); // 1.5
1244 /// let lower = BigRational::from_integer(BigInt::from(1));
1245 /// let upper = BigRational::from_integer(BigInt::from(2));
1246 ///
1247 /// let root = p.newton_raphson(0, initial, lower, upper, 10, &BigRational::new(BigInt::from(1), BigInt::from(1000000)));
1248 /// assert!(root.is_some());
1249 /// ```
1250 pub fn newton_raphson(
1251 &self,
1252 var: Var,
1253 initial: BigRational,
1254 lower: BigRational,
1255 upper: BigRational,
1256 max_iterations: usize,
1257 tolerance: &BigRational,
1258 ) -> Option<BigRational> {
1259 use num_traits::{Signed, Zero};
1260
1261 let derivative = self.derivative(var);
1262
1263 let mut x = initial;
1264
1265 for _ in 0..max_iterations {
1266 // Evaluate f(x)
1267 let mut assignment = FxHashMap::default();
1268 assignment.insert(var, x.clone());
1269 let fx = self.eval(&assignment);
1270
1271 // Check if we're close enough
1272 if fx.abs() < *tolerance {
1273 return Some(x);
1274 }
1275
1276 // Evaluate f'(x)
1277 let fpx = derivative.eval(&assignment);
1278
1279 // Check for zero derivative
1280 if fpx.is_zero() {
1281 return None;
1282 }
1283
1284 // Newton-Raphson update: x_new = x - f(x)/f'(x)
1285 let x_new = x - (fx / fpx);
1286
1287 // Verify the new point is within bounds
1288 if x_new < lower || x_new > upper {
1289 // If out of bounds, use bisection fallback
1290 return None;
1291 }
1292
1293 x = x_new;
1294 }
1295
1296 // Return the result even if not fully converged
1297 Some(x)
1298 }
1299
1300 // ── Dense i64 coefficient fast paths (SIMD-accelerated) ─────────────────
1301
1302 /// Extract the dense integer coefficient vector for a univariate polynomial.
1303 ///
1304 /// Returns `Some(coeffs)` where `coeffs[k]` is the coefficient of `var^k`,
1305 /// if and only if every coefficient is an integer that fits in `i64`.
1306 /// Returns `None` for multivariate, non-integer, or out-of-range coefficients.
1307 pub fn as_dense_i64(&self, var: Var) -> Option<Vec<i64>> {
1308 if self.is_zero() {
1309 return Some(vec![0i64]);
1310 }
1311 // Allow univariate-in-var polynomials
1312 if !self.is_univariate() && self.max_var() != var {
1313 return None;
1314 }
1315
1316 let deg = self.degree(var) as usize;
1317 let mut coeffs = vec![0i64; deg + 1];
1318
1319 for term in &self.terms {
1320 let d = term.monomial.degree(var) as usize;
1321 if !term.coeff.is_integer() {
1322 return None;
1323 }
1324 let numer = term.coeff.numer();
1325 match i64::try_from(numer.clone()) {
1326 Ok(v) => coeffs[d] = v,
1327 Err(_) => return None,
1328 }
1329 }
1330
1331 Some(coeffs)
1332 }
1333
1334 /// Add two univariate polynomials using the SIMD-accelerated i64 fast path.
1335 ///
1336 /// Falls back to the generic [`Polynomial::add`] when coefficients do not fit
1337 /// in `i64` or the polynomials are not univariate in the same variable.
1338 pub fn add_fast_i64(&self, other: &Polynomial, var: Var) -> Polynomial {
1339 let a_opt = self.as_dense_i64(var);
1340 let b_opt = other.as_dense_i64(var);
1341
1342 match (a_opt, b_opt) {
1343 (Some(a), Some(b)) => {
1344 // poly_add_coeffs pads the shorter slice to max(a.len(), b.len())
1345 let out = poly_add_coeffs(&a, &b);
1346 Polynomial::from_dense_i64(&out, var, self.order)
1347 }
1348 _ => Polynomial::add(self, other),
1349 }
1350 }
1351
1352 /// Scale a univariate polynomial by an integer scalar using the SIMD fast path.
1353 ///
1354 /// Falls back to generic scalar-multiply when coefficients do not fit in `i64`.
1355 pub fn scale_fast_i64(&self, scalar: i64, var: Var) -> Polynomial {
1356 match self.as_dense_i64(var) {
1357 Some(coeffs) => {
1358 let scaled = poly_mul_scalar(&coeffs, scalar);
1359 Polynomial::from_dense_i64(&scaled, var, self.order)
1360 }
1361 None => {
1362 let s = BigRational::from_integer(BigInt::from(scalar));
1363 let scaled_terms: Vec<Term> = self
1364 .terms
1365 .iter()
1366 .map(|t| Term::new(&t.coeff * &s, t.monomial.clone()))
1367 .collect();
1368 Polynomial::from_terms(scaled_terms, self.order)
1369 }
1370 }
1371 }
1372
1373 /// Compute the i64 dot product between two same-degree dense coefficient arrays.
1374 ///
1375 /// Returns `Some(value)` when both polynomials are expressible as dense univariate
1376 /// `i64` coefficient arrays of equal length, `None` otherwise.
1377 pub fn dot_product_i64(&self, other: &Polynomial, var: Var) -> Option<i64> {
1378 let a = self.as_dense_i64(var)?;
1379 let b = other.as_dense_i64(var)?;
1380 if a.len() != b.len() {
1381 return None;
1382 }
1383 poly_dot_product(&a, &b).ok()
1384 }
1385
1386 /// Reconstruct a [`Polynomial`] from a dense `i64` coefficient array.
1387 ///
1388 /// `coeffs[k]` is the coefficient of `var^k`. Zero coefficients are omitted.
1389 fn from_dense_i64(coeffs: &[i64], var: Var, order: super::types::MonomialOrder) -> Polynomial {
1390 let terms: Vec<Term> = coeffs
1391 .iter()
1392 .enumerate()
1393 .filter(|(_, c)| **c != 0)
1394 .map(|(k, c)| {
1395 let coeff = BigRational::from_integer(BigInt::from(*c));
1396 let mon = if k == 0 {
1397 Monomial::unit()
1398 } else {
1399 Monomial::from_var_power(var, k as u32)
1400 };
1401 Term::new(coeff, mon)
1402 })
1403 .collect();
1404 Polynomial::from_terms(terms, order)
1405 }
1406}