use ndarray::{ArrayView1, ArrayView2};
use ndarray_linalg::Scalar;
use num_traits::Float;
use std::marker::PhantomData;
use crate::problem::{Constraint, Covariance, Problem, ScoringStrategy};
use crate::utils::{form_rescaled_variables, Rescaled};
use crate::PolyCalError;
#[derive(Default)]
pub struct Set {}
#[derive(Default)]
pub struct Unset {}
#[allow(clippy::module_name_repetitions)]
pub struct ProblemBuilder<'a, E, DU, IU, DC, IC, C> {
dependent: ArrayView1<'a, E>,
independent: ArrayView1<'a, E>,
dependent_variance: Option<ArrayView1<'a, E>>,
independent_variance: Option<ArrayView1<'a, E>>,
dependent_covariance: Option<ArrayView2<'a, E>>,
independent_covariance: Option<ArrayView2<'a, E>>,
constraint: Option<C>,
strategy: ScoringStrategy,
typestate: PhantomData<(DU, IU, DC, IC, C)>,
}
impl<'a, E: Float> ProblemBuilder<'a, E, Unset, Unset, Unset, Unset, Unset> {
pub fn new<V: Into<ArrayView1<'a, E>>>(
independent: V,
dependent: V,
) -> Result<Self, PolyCalError<E>> {
let dependent: ArrayView1<'a, E> = dependent.into();
let independent: ArrayView1<'a, E> = independent.into();
if independent.len() != dependent.len() {
return Err(PolyCalError::InvalidData(
"dependent and independent data must contain equal numbers of observations".into(),
));
}
if dependent.iter().any(|each| !each.is_finite()) {
return Err(PolyCalError::InvalidData(
"dependent values cannot contain NaN or infinity".into(),
));
}
if independent.iter().any(|each| !each.is_finite()) {
return Err(PolyCalError::InvalidData(
"independent values cannot contain NaN or infinity".into(),
));
}
let builder = Self {
dependent,
independent,
dependent_variance: None,
independent_variance: None,
dependent_covariance: None,
independent_covariance: None,
constraint: None,
strategy: ScoringStrategy::ChiSquare,
typestate: PhantomData,
};
Ok(builder)
}
}
impl<E, DU, IU, DC, IC, C> ProblemBuilder<'_, E, DU, IU, DC, IC, C> {
#[must_use]
pub const fn with_scoring_strategy(mut self, scoring_strategy: ScoringStrategy) -> Self {
self.strategy = scoring_strategy;
self
}
}
impl<'a, E: Float, C> ProblemBuilder<'a, E, Unset, Unset, Unset, Unset, C> {
#[allow(clippy::type_complexity)]
pub fn with_dependent_variance(
self,
dependent_variance: impl Into<ArrayView1<'a, E>>,
) -> Result<ProblemBuilder<'a, E, Set, Unset, Unset, Unset, C>, PolyCalError<E>> {
let dependent_variance: ArrayView1<'a, E> = dependent_variance.into();
if dependent_variance
.iter()
.any(|each| !each.is_finite() || (*each == E::zero()))
{
return Err(PolyCalError::InvalidData(
"dependent variance cannot contain NaN or zeroes".into(),
));
}
Ok(ProblemBuilder {
dependent: self.dependent,
independent: self.independent,
dependent_variance: Some(dependent_variance),
independent_variance: self.independent_variance,
dependent_covariance: self.dependent_covariance,
independent_covariance: self.independent_covariance,
strategy: self.strategy,
constraint: self.constraint,
typestate: PhantomData,
})
}
}
impl<'a, E: Float, C> ProblemBuilder<'a, E, Unset, Set, Unset, Unset, C> {
#[allow(clippy::type_complexity)]
pub fn with_dependent_variance(
self,
dependent_variance: impl Into<ArrayView1<'a, E>>,
) -> Result<ProblemBuilder<'a, E, Set, Set, Unset, Unset, C>, PolyCalError<E>> {
let dependent_variance: ArrayView1<'a, E> = dependent_variance.into();
if dependent_variance
.iter()
.any(|each| !each.is_finite() || (*each == E::zero()))
{
return Err(PolyCalError::InvalidData(
"dependent variance cannot contain NaN or zeroes".into(),
));
}
Ok(ProblemBuilder {
dependent: self.dependent,
independent: self.independent,
dependent_variance: Some(dependent_variance),
independent_variance: self.independent_variance,
dependent_covariance: self.dependent_covariance,
independent_covariance: self.independent_covariance,
strategy: self.strategy,
constraint: self.constraint,
typestate: PhantomData,
})
}
}
impl<'a, E: Float, C> ProblemBuilder<'a, E, Unset, Unset, Unset, Unset, C> {
#[allow(clippy::type_complexity)]
fn with_independent_variance(
self,
independent_variance: impl Into<ArrayView1<'a, E>>,
) -> Result<ProblemBuilder<'a, E, Unset, Set, Unset, Unset, C>, PolyCalError<E>> {
let independent_variance: ArrayView1<'a, E> = independent_variance.into();
if independent_variance
.iter()
.any(|each| !each.is_finite() || (*each == E::zero()))
{
return Err(PolyCalError::InvalidData(
"independent variance cannot contain NaN or zeroes".into(),
));
}
Ok(ProblemBuilder {
dependent: self.dependent,
independent: self.independent,
dependent_variance: self.dependent_variance,
independent_variance: Some(independent_variance),
dependent_covariance: self.dependent_covariance,
independent_covariance: self.independent_covariance,
strategy: self.strategy,
constraint: self.constraint,
typestate: PhantomData,
})
}
}
impl<'a, E: Float, C> ProblemBuilder<'a, E, Set, Unset, Unset, Unset, C> {
#[allow(clippy::type_complexity)]
fn with_independent_variance(
self,
independent_variance: impl Into<ArrayView1<'a, E>>,
) -> Result<ProblemBuilder<'a, E, Set, Set, Unset, Unset, C>, PolyCalError<E>> {
let independent_variance: ArrayView1<'a, E> = independent_variance.into();
if independent_variance
.iter()
.any(|each| !each.is_finite() || (*each == E::zero()))
{
return Err(PolyCalError::InvalidData(
"independent variance cannot contain NaN or zeroes".into(),
));
}
Ok(ProblemBuilder {
dependent: self.dependent,
independent: self.independent,
dependent_variance: self.dependent_variance,
independent_variance: Some(independent_variance),
dependent_covariance: self.dependent_covariance,
independent_covariance: self.independent_covariance,
strategy: self.strategy,
constraint: self.constraint,
typestate: PhantomData,
})
}
}
impl<'a, E, C> ProblemBuilder<'a, E, Unset, Unset, Unset, Unset, C> {
pub(crate) fn with_dependent_covariance(
self,
dependent_covariance: impl Into<ArrayView2<'a, E>>,
) -> ProblemBuilder<'a, E, Unset, Unset, Set, Unset, C> {
ProblemBuilder {
dependent: self.dependent,
independent: self.independent,
dependent_variance: self.dependent_variance,
independent_variance: self.independent_variance,
dependent_covariance: Some(dependent_covariance.into()),
independent_covariance: self.independent_covariance,
strategy: self.strategy,
constraint: self.constraint,
typestate: PhantomData,
}
}
}
impl<'a, E, C> ProblemBuilder<'a, E, Unset, Unset, Unset, Set, C> {
pub(crate) fn with_dependent_covariance(
self,
dependent_covariance: impl Into<ArrayView2<'a, E>>,
) -> ProblemBuilder<'a, E, Unset, Unset, Set, Set, C> {
ProblemBuilder {
dependent: self.dependent,
independent: self.independent,
dependent_variance: self.dependent_variance,
independent_variance: self.independent_variance,
dependent_covariance: Some(dependent_covariance.into()),
independent_covariance: self.independent_covariance,
strategy: self.strategy,
constraint: self.constraint,
typestate: PhantomData,
}
}
}
impl<'a, E, C> ProblemBuilder<'a, E, Unset, Unset, Unset, Unset, C> {
fn with_independent_covariance(
self,
independent_covariance: impl Into<ArrayView2<'a, E>>,
) -> ProblemBuilder<'a, E, Unset, Unset, Unset, Set, C> {
ProblemBuilder {
dependent: self.dependent,
independent: self.independent,
dependent_variance: self.dependent_variance,
independent_variance: self.independent_variance,
dependent_covariance: self.dependent_covariance,
independent_covariance: Some(independent_covariance.into()),
strategy: self.strategy,
constraint: self.constraint,
typestate: PhantomData,
}
}
}
impl<'a, E, C> ProblemBuilder<'a, E, Unset, Unset, Set, Unset, C> {
fn with_independent_covariance(
self,
independent_covariance: impl Into<ArrayView2<'a, E>>,
) -> ProblemBuilder<'a, E, Unset, Unset, Set, Set, C> {
ProblemBuilder {
dependent: self.dependent,
independent: self.independent,
dependent_variance: self.dependent_variance,
independent_variance: self.independent_variance,
dependent_covariance: self.dependent_covariance,
independent_covariance: Some(independent_covariance.into()),
strategy: self.strategy,
constraint: self.constraint,
typestate: PhantomData,
}
}
}
impl<'a, E, DU, IU, DC, IC> ProblemBuilder<'a, E, DU, IU, DC, IC, Unset> {
pub const fn with_constraint<C>(
self,
constraint: C,
) -> ProblemBuilder<'a, E, DU, IU, DC, IC, C> {
ProblemBuilder {
dependent: self.dependent,
independent: self.independent,
dependent_variance: self.dependent_variance,
independent_variance: self.independent_variance,
dependent_covariance: self.dependent_covariance,
independent_covariance: self.independent_covariance,
strategy: self.strategy,
constraint: Some(constraint),
typestate: PhantomData,
}
}
}
impl<'a, E: PartialOrd + Scalar> ProblemBuilder<'a, E, Unset, Unset, Unset, Unset, Unset> {
#[must_use]
pub fn build(self) -> Problem<'a, E> {
let Rescaled { t, domain } = form_rescaled_variables(self.independent);
Problem {
t,
y: self.dependent,
uncertainties: Covariance::None,
domain,
strategy: self.strategy,
constraint: None,
}
}
}
impl<'a, E: PartialOrd + Scalar> ProblemBuilder<'a, E, Set, Unset, Unset, Unset, Unset> {
#[must_use]
pub fn build(self) -> Problem<'a, E> {
let Rescaled { t, domain } = form_rescaled_variables(self.independent);
Problem {
t,
y: self.dependent,
uncertainties: Covariance::Diagonal {
ux: None,
uy: self.dependent_variance.unwrap(), },
domain,
strategy: self.strategy,
constraint: None,
}
}
}
impl<'a, E: PartialOrd + Scalar> ProblemBuilder<'a, E, Set, Set, Unset, Unset, Unset> {
fn build(self) -> Problem<'a, E> {
let Rescaled { t, domain } = form_rescaled_variables(self.independent);
Problem {
t,
y: self.dependent,
uncertainties: Covariance::Diagonal {
ux: Some(self.independent_variance.unwrap()),
uy: self.dependent_variance.unwrap(),
},
domain,
strategy: self.strategy,
constraint: None,
}
}
}
impl<'a, E: PartialOrd + Scalar> ProblemBuilder<'a, E, Unset, Unset, Set, Unset, Unset> {
pub(crate) fn build(self) -> Problem<'a, E> {
let Rescaled { t, domain } = form_rescaled_variables(self.independent);
Problem {
t,
y: self.dependent,
uncertainties: Covariance::Matrix {
vx: None,
vy: self.dependent_covariance.unwrap(),
},
domain,
strategy: self.strategy,
constraint: None,
}
}
}
impl<'a, E: PartialOrd + Scalar> ProblemBuilder<'a, E, Unset, Unset, Set, Set, Unset> {
fn build(self) -> Problem<'a, E> {
let Rescaled { t, domain } = form_rescaled_variables(self.independent);
Problem {
t,
y: self.dependent,
uncertainties: Covariance::Matrix {
vx: Some(self.independent_covariance.unwrap()),
vy: self.dependent_covariance.unwrap(),
},
domain,
strategy: self.strategy,
constraint: None,
}
}
}
impl<'a, E: PartialOrd + Scalar, C: Into<Constraint<E>>>
ProblemBuilder<'a, E, Unset, Unset, Unset, Unset, C>
{
pub fn build(self) -> Problem<'a, E> {
let Rescaled { t, domain } = form_rescaled_variables(self.independent);
Problem {
t,
y: self.dependent,
uncertainties: Covariance::None,
domain,
strategy: self.strategy,
constraint: self.constraint.map(std::convert::Into::into),
}
}
}
impl<'a, E: PartialOrd + Scalar, C: Into<Constraint<E>>>
ProblemBuilder<'a, E, Set, Unset, Unset, Unset, C>
{
#[must_use]
pub fn build(self) -> Problem<'a, E> {
let Rescaled { t, domain } = form_rescaled_variables(self.independent);
Problem {
t,
y: self.dependent,
uncertainties: Covariance::Diagonal {
ux: None,
uy: self.dependent_variance.unwrap(),
},
domain,
strategy: self.strategy,
constraint: self.constraint.map(std::convert::Into::into),
}
}
}
impl<'a, E: PartialOrd + Scalar, C: Into<Constraint<E>>>
ProblemBuilder<'a, E, Set, Set, Unset, Unset, C>
{
fn build(self) -> Problem<'a, E> {
let Rescaled { t, domain } = form_rescaled_variables(self.independent);
Problem {
t,
y: self.dependent,
uncertainties: Covariance::Diagonal {
ux: Some(self.independent_variance.unwrap()),
uy: self.dependent_variance.unwrap(),
},
domain,
strategy: self.strategy,
constraint: self.constraint.map(std::convert::Into::into),
}
}
}
impl<'a, E: PartialOrd + Scalar, C: Into<Constraint<E>>>
ProblemBuilder<'a, E, Unset, Unset, Set, Unset, C>
{
fn build(self) -> Problem<'a, E> {
let Rescaled { t, domain } = form_rescaled_variables(self.independent);
Problem {
t,
y: self.dependent,
uncertainties: Covariance::Matrix {
vx: None,
vy: self.dependent_covariance.unwrap(),
},
domain,
strategy: self.strategy,
constraint: self.constraint.map(std::convert::Into::into),
}
}
}
impl<'a, E: PartialOrd + Scalar, C: Into<Constraint<E>>>
ProblemBuilder<'a, E, Unset, Unset, Set, Set, C>
{
fn build(self) -> Problem<'a, E> {
let Rescaled { t, domain } = form_rescaled_variables(self.independent);
Problem {
t,
y: self.dependent,
uncertainties: Covariance::Matrix {
vx: Some(self.independent_covariance.unwrap()),
vy: self.dependent_covariance.unwrap(),
},
domain,
strategy: self.strategy,
constraint: self.constraint.map(std::convert::Into::into),
}
}
}
#[cfg(test)]
mod test {
use crate::{ChebyshevBuilder, Constraint, PolynomialSeries, Series};
use super::ProblemBuilder;
use cert::{AbsUncertainty, Uncertainty};
use ndarray::Array1;
use ndarray_rand::rand::{Rng, SeedableRng};
use num_traits::Float;
use rand_isaac::Isaac64Rng;
#[test]
fn fit_with_dependent_variance_works_in_direct_evaluation() {
let state = 40;
let mut rng = Isaac64Rng::seed_from_u64(state);
let order = 5;
let domain_max = rng.gen::<f64>().abs();
let domain_min = -domain_max;
let num_calibration_points = rng.gen_range(50..200);
#[allow(clippy::cast_precision_loss)]
let x = (0..num_calibration_points)
.map(|m| {
domain_min
+ m as f64 * (domain_max - domain_min) / (num_calibration_points as f64 - 1.0)
})
.collect::<Array1<_>>();
let mut series = None;
loop {
let coeff = (0..order).map(|_| rng.gen()).collect::<Vec<f64>>();
let this = Series::from_coeff(coeff.clone(), x.as_slice().unwrap());
if this.is_monotonic().unwrap() {
let _ = series.replace(this);
break;
}
}
let series = series.unwrap();
let y = x.iter().map(|x| series.evaluate(*x)).collect::<Array1<_>>();
let uy = y
.iter()
.map(|y| rng.gen_range(1e-5..1e-3) * y)
.collect::<Array1<_>>();
let builder = ProblemBuilder::new(x.view(), y.view())
.unwrap()
.with_dependent_variance(uy.view())
.unwrap()
.with_scoring_strategy(crate::ScoringStrategy::Aicc);
let problem = builder.build();
let solution = problem.solve(4 * order).unwrap();
let num_tests = 10;
for _ in 0..num_tests {
let idx = rng.gen_range(0..num_calibration_points);
let x0 = x[idx];
let y0 = y[idx];
let predicted_y = solution
.response(AbsUncertainty::new(x0, x0 / 100.0))
.unwrap();
assert!((y0 - predicted_y.mean()).abs() < predicted_y.standard_deviation());
}
}
#[test]
fn fit_with_additive_constraints_respects_constraints() {
let state = 40;
let mut rng = Isaac64Rng::seed_from_u64(state);
let order = 5;
let domain_max = rng.gen::<f64>().abs();
let domain_min = -domain_max;
let domain = domain_min..domain_max;
let num_calibration_points = rng.gen_range(10..15);
#[allow(clippy::cast_precision_loss)]
let x = (0..num_calibration_points)
.map(|m| {
domain_min
+ m as f64 * (domain_max - domain_min) / (num_calibration_points as f64 - 1.0)
})
.collect::<Array1<_>>();
let intercept_at_t = 0.05;
let constraint = Constraint {
additive: ChebyshevBuilder::new(0)
.with_coefficients(vec![intercept_at_t])
.on_domain(domain.clone())
.build(),
multiplicative: ChebyshevBuilder::new(0)
.with_coefficients(vec![1.0])
.on_domain(domain.clone())
.build(),
};
let mut series = None;
loop {
let coeff = (0..order).map(|_| rng.gen()).collect::<Vec<f64>>();
let this = ChebyshevBuilder::new(order - 1)
.with_coefficients(coeff.clone())
.on_domain(domain.clone())
.build();
let constrained =
this.clone() * constraint.multiplicative.clone() + constraint.additive.clone();
if constrained.is_monotonic().unwrap() {
let _ = series.replace(this);
break;
}
}
let series =
series.unwrap() * constraint.multiplicative.clone() + constraint.additive.clone();
#[allow(clippy::cast_precision_loss)]
let t = (0..num_calibration_points)
.map(|m| -1.0 + m as f64 * (2.0) / (num_calibration_points as f64 - 1.0))
.collect::<Array1<_>>();
let y = t.iter().map(|t| series.evaluate(*t)).collect::<Array1<_>>();
let uy = y
.iter()
.map(|y| rng.gen_range(1e-5..1e-3).mul_add(*y, 1e-7)) .collect::<Array1<_>>();
let builder = ProblemBuilder::new(x.view(), y.view())
.unwrap()
.with_dependent_variance(uy.view())
.unwrap()
.with_constraint(constraint)
.with_scoring_strategy(crate::ScoringStrategy::Aicc);
let problem = builder.build();
let solution = problem.solve(4 * order).unwrap();
let num_tests = 10;
for _ in 0..num_tests {
let idx = rng.gen_range(1..(num_calibration_points - 1));
let x0 = x[idx];
let y0 = y[idx];
let predicted_y = solution
.response(AbsUncertainty::new(x0, x0 / 100.0))
.unwrap();
assert!((y0 - predicted_y.mean()).abs() < predicted_y.standard_deviation());
}
}
#[test]
fn fit_with_full_constraints_respects_constraints() {
let state = 40;
let mut rng = Isaac64Rng::seed_from_u64(state);
let order = 3;
let domain_max = 2.0;
let domain_min = -2.0;
let domain = domain_min..domain_max;
let num_calibration_points = rng.gen_range(20..25);
#[allow(clippy::cast_precision_loss)]
let x = (0..num_calibration_points)
.map(|m| {
domain_min
+ m as f64 * (domain_max - domain_min) / (num_calibration_points as f64 - 1.0)
})
.collect::<Array1<_>>();
let intercept_at_t = 0.25;
let constraint = Constraint {
additive: ChebyshevBuilder::new(0)
.with_coefficients(vec![intercept_at_t])
.on_domain(domain.clone())
.build(),
multiplicative: ChebyshevBuilder::new(1)
.with_coefficients(vec![0.0, 1.0])
.on_domain(domain.clone())
.build(),
};
let mut series = None;
loop {
let coeff = (0..order).map(|_| rng.gen()).collect::<Vec<f64>>();
let this = ChebyshevBuilder::new(order - 1)
.with_coefficients(coeff.clone())
.on_domain(domain.clone())
.build();
let constrained =
this.clone() * constraint.multiplicative.clone() + constraint.additive.clone();
if constrained.is_monotonic().unwrap() {
let _ = series.replace(this);
break;
}
}
let series =
series.unwrap() * constraint.multiplicative.clone() + constraint.additive.clone();
#[allow(clippy::cast_precision_loss)]
let t = (0..num_calibration_points)
.map(|m| -1.0 + m as f64 * (2.0) / (num_calibration_points as f64 - 1.0))
.collect::<Array1<_>>();
let y = t.iter().map(|t| series.evaluate(*t)).collect::<Array1<_>>();
let uy = y
.iter()
.map(|y| rng.gen_range(1e-5..1e-3).mul_add(y.abs(), 1e-7)) .collect::<Array1<_>>();
let builder = ProblemBuilder::new(x.view(), y.view())
.unwrap()
.with_dependent_variance(uy.view())
.unwrap()
.with_constraint(constraint)
.with_scoring_strategy(crate::ScoringStrategy::Aicc);
let problem = builder.build();
let solution = problem.solve(4 * order).unwrap();
let num_tests = 10;
for _ in 0..num_tests {
let idx = rng.gen_range(1..(num_calibration_points - 1));
let x0 = x[idx];
let y0 = y[idx];
let predicted_y = solution
.response(AbsUncertainty::new(x0, x0 / 100.0))
.unwrap();
approx::assert_relative_eq!(y0, predicted_y.mean(), epsilon = 1e-8);
}
}
#[test]
fn fit_with_full_constraints_respects_constraints_for_non_centered_domain() {
let state = 40;
let mut rng = Isaac64Rng::seed_from_u64(state);
let order = 1;
let domain_max = 2.01;
let domain_min = -1.0;
let domain = domain_min..domain_max;
let num_calibration_points = rng.gen_range(20..25);
#[allow(clippy::cast_precision_loss)]
let x = (0..num_calibration_points)
.map(|m| {
domain_min
+ m as f64 * (domain_max - domain_min) / (num_calibration_points as f64 - 1.0)
})
.collect::<Array1<_>>();
let intercept_at_t = -0.0;
let constraint = Constraint {
additive: ChebyshevBuilder::new(0)
.with_coefficients(vec![intercept_at_t + (domain_min + domain_max) / 2.0])
.on_domain(domain.clone())
.build(),
multiplicative: ChebyshevBuilder::new(1)
.with_coefficients(vec![0.0, 1.0 * (domain_max - domain_min) / 2.0])
.on_domain(domain.clone())
.build(),
};
let mut series = None;
loop {
let coeff = (0..order).map(|_| rng.gen()).collect::<Vec<f64>>();
let this = ChebyshevBuilder::new(order - 1)
.with_coefficients(coeff.clone())
.on_domain(domain.clone())
.build();
let constrained =
this.clone() * constraint.multiplicative.clone() + constraint.additive.clone();
if constrained.is_monotonic().unwrap() {
let _ = series.replace(this);
break;
}
}
let series =
series.unwrap() * constraint.multiplicative.clone() + constraint.additive.clone();
#[allow(clippy::cast_precision_loss)]
let t = (0..num_calibration_points)
.map(|m| -1.0 + m as f64 * (2.0) / (num_calibration_points as f64 - 1.0))
.collect::<Array1<_>>();
let y = t.iter().map(|t| series.evaluate(*t)).collect::<Array1<_>>();
let uy = y
.iter()
.map(|y| rng.gen_range(1e-5..1e-3).mul_add(y.abs(), 1e-7)) .collect::<Array1<_>>();
let builder = ProblemBuilder::new(x.view(), y.view())
.unwrap()
.with_dependent_variance(uy.view())
.unwrap()
.with_constraint(constraint)
.with_scoring_strategy(crate::ScoringStrategy::Aicc);
let problem = builder.build();
let solution = problem.solve(4 * order).unwrap();
let num_tests = 10;
for _ in 0..num_tests {
let idx = rng.gen_range(1..(num_calibration_points - 1));
let x0 = x[idx];
let y0 = y[idx];
let predicted_y = solution
.response(AbsUncertainty::new(x0, x0 / 100.0))
.unwrap();
approx::assert_relative_eq!(y0, predicted_y.mean(), epsilon = 1e-8);
}
}
#[test]
fn test_problem_iso() {
use ndarray::arr1;
let x = arr1(&[
0.0, 65.0, 130.0, 195.0, 260.0, 325.0, 390.0, 455.0, 520.0, 585.0, 650.0, 715.0,
]);
let y = arr1(&[
0.0004, 0.0812, 0.1440, 0.1957, 0.2437, 0.2840, 0.3201, 0.3499, 0.3829, 0.4100, 0.4353,
0.4543,
]);
let u2y = vec![
0.0017, 0.0016, 0.0017, 0.0020, 0.0020, 0.0024, 0.0024, 0.0026, 0.0026, 0.0029, 0.0029,
0.0031,
]
.into_iter()
.map(|x| x * x)
.collect::<ndarray::Array1<_>>();
let builder = ProblemBuilder::new(x.view(), y.view())
.unwrap()
.with_dependent_variance(u2y.view())
.unwrap()
.with_scoring_strategy(crate::ScoringStrategy::Bic);
let problem = builder.build();
let solution = problem.solve(6).unwrap();
dbg!(&solution);
let guess = solution
.stimulus(AbsUncertainty::new(0.3905, 0.0027), None, None)
.unwrap();
dbg!(&guess);
}
}