mathru 0.6.3

Simple mathematics library written in Rust
Documentation

mathru

crate documentation minimum rustc 1.38.0 maintenance

mathru is a numeric library containing algorithms for linear algebra, analysis and statistics written in pure Rust with BLAS/LAPACK support.

Features

- Linear algebra
    - Vector
    - Matrix
        - Basic matrix operations(+,-,*)
        - Transposition
        - LU decomposition (native/lapack)
        - QR decomposition (native/lapack)
        - Hessenberg decomposition (native/lapack)
        - Singular value decomposition
        - Inverse (native/lapack)
        - Pseudo inverse (native/lapack)
        - Determinant (native/lapack)
        - Trace
        - Eigenvalue (native/lapack)

- Ordinary differential equation (ODE)
    - Explicit methods
        - Heun's method
        - Euler method
        - Midpoint method
        - Ralston's method
        - Kutta 3rd order
        - Runge-Kutta 4th order
        - Runge-Kutta-Felhberg 4(5)
        - Dormand-Prince 4(5)
        - Cash-Karp 4(5)
        - Tsitouras 4(5)
        - Bogacki-Shampine 2(3)
        - Adams-Bashforth
    - Automatic step size control with starting step size
    
- Optimization
    - Gauss-Newton algorithm
    - Gradient descent
    - Newton method
    - Levenberg-Marquardt algorithm
    - Conjugate gradient method

- Statistics
    - probability distribution
        - Bernoulli
        - Beta
        - Binomial
        - Chisquared
        - Exponential
        - Gamma
        - Chi-squared
        - Normal
        - Poisson
        - Raised cosine
        - Student-t
        - Uniform
    - test
        - Chi-squared 
        - G
        - Student-t

- elementary functions
    - trigonometric functions
    - hyperbolic functions
    - exponential functions

- special functions
    - gamma functions
    - beta functions
    - hypergeometrical functions

Usage

Add this to your Cargo.toml for the native Rust implementation:

[dependencies.mathru]
version = "0.6"

Add the following lines to 'Cargo.toml' if the blas/lapack backend should be used:

[dependencies.mathru]
version = "0.6"
default-features = false
features = ["blaslapack"]

Then import the modules and it is ready to be used.

Example

use mathru::algebra::linear::{Matrix};

// Compute the LU decomposition of a 2x2 matrix
let a: Matrix<f64> = Matrix::new(2, 2, vec![1.0, -2.0, 3.0, -7.0]);
let l_ref: Matrix<f64> = Matrix::new(2, 2, vec![1.0, 0.0, 1.0 / 3.0, 1.0]);

let (l, u, p): (Matrix<f64>, Matrix<f64>, Matrix<f64>) = a.dec_lu();

assert_eq!(l_ref, l);

Solve an initial value problem with Dormand-Prince method :

use mathru::*;
use mathru::algebra::linear::{Vector};
use mathru::analysis::ode::{DormandPrince54};
use mathru::analysis::ode::ExplicitODE;

// Define the ODE
// x' = (5t^2 - x) / e^(t + x) `$
// x(0) = 1
pub struct ExplicitODE1
{
	time_span: (f64, f64),
	init_cond: Vector<f64>
}

impl Default for ExplicitODE1
{
	fn default() -> ExplicitODE1
	{
		ExplicitODE1
		{
			time_span: (0.0, 10.0),
			init_cond: vector![1.0],
		}
	}
}

impl ExplicitODE<f64> for ExplicitODE1
{
   	fn func(self: &Self, t: &f64, x: &Vector<f64>) -> Vector<f64>
	{
		return vector!((5.0 * t * t - *x.get(0)) / (t + *x.get(0)).exp());
	}
    
    // Returns the time span
    fn time_span(self: &Self) -> (f64, f64)
	{
		return self.time_span;
	}

    // Initial value at x(0)
    fn init_cond(self: &Self) -> Vector<f64>
	{
		return self.init_cond.clone();
	}
}


fn main() {
	let h_0: f64 = 0.001;
	let n_max: u32 = 300;
	let abs_tol: f64 = 10e-7;

	let solver: DormandPrince54<f64> = DormandPrince54::new(abs_tol, h_0, n_max);

	let (t, x): (Vec<f64>, Vec<Vector<f64>>) = solver.solve(&problem).unwrap();
}

Example image

Fitting with Levenberg-Marquardt

///y = a + b * exp(c * t) = f(t)
pub struct Example
{
	x: Vector<f64>,
	y: Vector<f64>
}

impl Example
{
	pub fn new(x: Vector<f64>, y: Vector<f64>) -> Example
	{
		Example
		{
			x: x,
			y: y
		}
	}

	pub fn function(x: f64, beta: &Vector<f64>) -> f64
	{
		let beta_0: f64 = *beta.get(0);
		let beta_1: f64 = *beta.get(1);
		let beta_2: f64 = *beta.get(2);
		let f_x: f64 = beta_0 + beta_1 * (beta_2 * x).exp();

		return f_x;
	}
}

impl Jacobian<f64> for Example
{
	fn eval(self: &Self, beta: &Vector<f64>) -> Vector<f64>
	{
		let f_x = self.x.clone().apply(&|x: &f64| Example::function(*x, beta));
		let r: Vector<f64> = &self.y - &f_x;
		return vector![r.dotp(&r)]
	}

	fn jacobian(self: &Self, beta: &Vector<f64>) -> Matrix<f64>
	{
		let (x_m, _x_n) = self.x.dim();
		let (beta_m, _beta_n) = beta.dim();

		let mut jacobian_f: Matrix<f64> = Matrix::zero(x_m, beta_m);

		let f_x = self.x.clone().apply(&|x: &f64| Example::function(*x, beta));
		let residual: Vector<f64> = &self.y - &f_x;

		for i in 0..x_m
		{
			let beta_1: f64 = *beta.get(1);
			let beta_2: f64 = *beta.get(2);

			let x_i: f64 = *self.x.get(i);

			*jacobian_f.get_mut(i, 0) = 1.0;
			*jacobian_f.get_mut(i, 1) = (beta_2 * x_i).exp();
			*jacobian_f.get_mut(i, 2) = beta_1 * x_i * (beta_2 * x_i).exp();
		}

		return (residual.transpose() * jacobian_f * -2.0).into();
	}
}

fn main()
{
	let num_samples: usize = 100;

	let noise: Normal = Normal::new(0.0, 0.05);

	let mut t_vec: Vec<f64> = Vec::with_capacity(num_samples);
	// Start time
	let t_0 = 0.0f64;
	// End time
	let t_1 = 5.0f64;

	let mut y_vec: Vec<f64> = Vec::with_capacity(num_samples);

	// True function parameters
	let beta: Vector<f64> = vector![0.5; 5.0; -1.0];

    // Create samples with noise
	for i in 0..num_samples
	{
		let t_i: f64 = (t_1 - t_0) / (num_samples as f64) * (i as f64);

		//Add some noise
		y_vec.push(Example::function(t_i, &beta) + noise.random());

		t_vec.push(t_i);
	}
    	
	let t: Vector<f64> = Vector::new_column(num_samples, t_vec.clone());
	let y: Vector<f64> = Vector::new_column(num_samples, y_vec.clone());

	let example_function = Example::new(t, y);
     
    // Optimise fitting parameters with Levenberg-Marquard algorithm
    let optim: LevenbergMarquardt<f64> = LevenbergMarquardt::new(100, 0.3, 0.95);
	let beta_opt: Vector<f64> = optim.minimize(&example_function, &vector![-1.5; 1.0; -2.0]).arg();

    println!("{}", beta_opt);
}

Fitting with Levenberg-Marquardt

License

Licensed under either of

at your option.

Contribution

Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions.

Any contribution is welcome!