use num_complex::Complex;
use serde::Deserialize;
use serde::Serialize;
pub type Point2D = (f64, f64);
pub type Point3D = (f64, f64, f64);
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FractalData {
pub width: usize,
pub height: usize,
pub data: Vec<u32>,
pub max_iter: u32,
}
impl FractalData {
#[must_use]
pub fn new(
width: usize,
height: usize,
max_iter: u32,
) -> Self {
Self {
width,
height,
data: vec![0; width * height],
max_iter,
}
}
#[must_use]
pub fn get(
&self,
x: usize,
y: usize,
) -> Option<u32> {
if x < self.width && y < self.height {
Some(self.data[y * self.width + x])
} else {
None
}
}
pub fn set(
&mut self,
x: usize,
y: usize,
value: u32,
) {
if x < self.width && y < self.height {
self.data[y * self.width + x] = value;
}
}
}
#[must_use]
pub fn generate_mandelbrot_set(
width: usize,
height: usize,
x_range: (f64, f64),
y_range: (f64, f64),
max_iter: u32,
) -> Vec<Vec<u32>> {
let mut data = vec![vec![0; width]; height];
let x_scale = (x_range.1 - x_range.0) / width as f64;
let y_scale = (y_range.1 - y_range.0) / height as f64;
for (r, row) in data.iter_mut().enumerate() {
for (c, val) in row.iter_mut().enumerate() {
let x0 = (c as f64).mul_add(x_scale, x_range.0);
let y0 = (r as f64).mul_add(y_scale, y_range.0);
let mut z = Complex::new(0.0, 0.0);
let c_val = Complex::new(x0, y0);
let mut iter = 0;
while z.norm_sqr() <= 4.0 && iter < max_iter {
z = z * z + c_val;
iter += 1;
}
*val = iter;
}
}
data
}
#[must_use]
pub fn mandelbrot_escape_time(
c_real: f64,
c_imag: f64,
max_iter: u32,
) -> u32 {
let c = Complex::new(c_real, c_imag);
let mut z = Complex::new(0.0, 0.0);
let mut iter = 0;
while z.norm_sqr() <= 4.0 && iter < max_iter {
z = z * z + c;
iter += 1;
}
iter
}
#[must_use]
pub fn generate_julia_set(
width: usize,
height: usize,
x_range: (f64, f64),
y_range: (f64, f64),
c: (f64, f64),
max_iter: u32,
) -> Vec<Vec<u32>> {
let mut data = vec![vec![0; width]; height];
let c_val = Complex::new(c.0, c.1);
let x_scale = (x_range.1 - x_range.0) / width as f64;
let y_scale = (y_range.1 - y_range.0) / height as f64;
for (r, row) in data.iter_mut().enumerate() {
for (col, val) in row.iter_mut().enumerate() {
let x0 = (col as f64).mul_add(x_scale, x_range.0);
let y0 = (r as f64).mul_add(y_scale, y_range.0);
let mut z = Complex::new(x0, y0);
let mut iter = 0;
while z.norm_sqr() <= 4.0 && iter < max_iter {
z = z * z + c_val;
iter += 1;
}
*val = iter;
}
}
data
}
#[must_use]
pub fn julia_escape_time(
z_real: f64,
z_imag: f64,
c_real: f64,
c_imag: f64,
max_iter: u32,
) -> u32 {
let c = Complex::new(c_real, c_imag);
let mut z = Complex::new(z_real, z_imag);
let mut iter = 0;
while z.norm_sqr() <= 4.0 && iter < max_iter {
z = z * z + c;
iter += 1;
}
iter
}
#[must_use]
pub fn generate_burning_ship(
width: usize,
height: usize,
x_range: (f64, f64),
y_range: (f64, f64),
max_iter: u32,
) -> Vec<Vec<u32>> {
let mut data = vec![vec![0; width]; height];
let x_scale = (x_range.1 - x_range.0) / width as f64;
let y_scale = (y_range.1 - y_range.0) / height as f64;
for (r, row) in data.iter_mut().enumerate() {
for (col, val) in row.iter_mut().enumerate() {
let cx = (col as f64).mul_add(x_scale, x_range.0);
let cy = (r as f64).mul_add(y_scale, y_range.0);
let mut zx = 0.0;
let mut zy = 0.0;
let mut iter = 0;
while zx * zx + zy * zy <= 4.0 && iter < max_iter {
let xtemp = zx * zx - zy * zy + cx;
zy = (2.0 * zx.abs()).mul_add(zy.abs(), cy);
zx = xtemp;
iter += 1;
}
*val = iter;
}
}
data
}
#[must_use]
pub fn generate_multibrot(
width: usize,
height: usize,
x_range: (f64, f64),
y_range: (f64, f64),
d: f64,
max_iter: u32,
) -> Vec<Vec<u32>> {
let mut data = vec![vec![0; width]; height];
let x_scale = (x_range.1 - x_range.0) / width as f64;
let y_scale = (y_range.1 - y_range.0) / height as f64;
for (r, row) in data.iter_mut().enumerate() {
for (col, val) in row.iter_mut().enumerate() {
let x0 = (col as f64).mul_add(x_scale, x_range.0);
let y0 = (r as f64).mul_add(y_scale, y_range.0);
let mut z = Complex::new(0.0, 0.0);
let c_val = Complex::new(x0, y0);
let mut iter = 0;
let escape_radius = 2.0_f64.max(c_val.norm().powf(1.0 / (d - 1.0)));
while z.norm() <= escape_radius && iter < max_iter {
z = z.powf(d) + c_val;
iter += 1;
}
*val = iter;
}
}
data
}
#[must_use]
pub fn generate_newton_fractal(
width: usize,
height: usize,
x_range: (f64, f64),
y_range: (f64, f64),
max_iter: u32,
tolerance: f64,
) -> Vec<Vec<u32>> {
let mut data = vec![vec![0; width]; height];
let roots = [
Complex::new(1.0, 0.0),
Complex::new(-0.5, 3.0_f64.sqrt() / 2.0),
Complex::new(-0.5, -3.0_f64.sqrt() / 2.0),
];
let x_scale = (x_range.1 - x_range.0) / width as f64;
let y_scale = (y_range.1 - y_range.0) / height as f64;
for (r, row) in data.iter_mut().enumerate() {
for (col, val) in row.iter_mut().enumerate() {
let x0 = (col as f64).mul_add(x_scale, x_range.0);
let y0 = (r as f64).mul_add(y_scale, y_range.0);
let mut z = Complex::new(x0, y0);
let mut root_index = 3u32; for _ in 0..max_iter {
let z2 = z * z;
let z3 = z2 * z;
let f = z3 - Complex::new(1.0, 0.0);
let df = Complex::new(3.0, 0.0) * z2;
if df.norm() < 1e-10 {
break;
}
z -= f / df;
for (i, &root) in roots.iter().enumerate() {
if (z - root).norm() < tolerance {
root_index = i as u32;
break;
}
}
if root_index < 3 {
break;
}
}
*val = root_index;
}
}
data
}
#[must_use]
pub fn generate_lorenz_attractor(
start_point: (f64, f64, f64),
dt: f64,
num_steps: usize,
) -> Vec<(f64, f64, f64)> {
generate_lorenz_attractor_custom(start_point, dt, num_steps, 10.0, 28.0, 8.0 / 3.0)
}
#[must_use]
pub fn generate_lorenz_attractor_custom(
start_point: (f64, f64, f64),
dt: f64,
num_steps: usize,
sigma: f64,
rho: f64,
beta: f64,
) -> Vec<(f64, f64, f64)> {
let mut points = Vec::with_capacity(num_steps);
let (mut x, mut y, mut z) = start_point;
for _ in 0..num_steps {
let dx = sigma * (y - x);
let dy = x.mul_add(rho - z, -y);
let dz = x.mul_add(y, -(beta * z));
x += dx * dt;
y += dy * dt;
z += dz * dt;
points.push((x, y, z));
}
points
}
#[must_use]
pub fn generate_rossler_attractor(
start_point: (f64, f64, f64),
dt: f64,
num_steps: usize,
a: f64,
b: f64,
c: f64,
) -> Vec<(f64, f64, f64)> {
let mut points = Vec::with_capacity(num_steps);
let (mut x, mut y, mut z) = start_point;
for _ in 0..num_steps {
let dx = -y - z;
let dy = a.mul_add(y, x);
let dz = z.mul_add(x - c, b);
x += dx * dt;
y += dy * dt;
z += dz * dt;
points.push((x, y, z));
}
points
}
#[must_use]
pub fn generate_henon_map(
start_point: (f64, f64),
num_steps: usize,
a: f64,
b: f64,
) -> Vec<(f64, f64)> {
let mut points = Vec::with_capacity(num_steps);
let (mut x, mut y) = start_point;
for _ in 0..num_steps {
let x_new = (a * x).mul_add(-x, 1.0) + y;
let y_new = b * x;
x = x_new;
y = y_new;
points.push((x, y));
}
points
}
#[must_use]
pub fn generate_tinkerbell_map(
start_point: (f64, f64),
num_steps: usize,
a: f64,
b: f64,
c: f64,
d: f64,
) -> Vec<(f64, f64)> {
let mut points = Vec::with_capacity(num_steps);
let (mut x, mut y) = start_point;
for _ in 0..num_steps {
let x_new = b.mul_add(y, a.mul_add(x, x.mul_add(x, -(y * y))));
let y_new = d.mul_add(y, (2.0 * x).mul_add(y, c * x));
x = x_new;
y = y_new;
points.push((x, y));
}
points
}
#[must_use]
pub fn logistic_map_iterate(
x0: f64,
r: f64,
num_steps: usize,
) -> Vec<f64> {
let mut orbit = Vec::with_capacity(num_steps + 1);
let mut x = x0;
orbit.push(x);
for _ in 0..num_steps {
x = r * x * (1.0 - x);
orbit.push(x);
}
orbit
}
#[must_use]
pub fn logistic_bifurcation(
r_range: (f64, f64),
num_r_values: usize,
transient: usize,
num_points: usize,
x0: f64,
) -> Vec<(f64, f64)> {
let mut data = Vec::with_capacity(num_r_values * num_points);
let r_step = (r_range.1 - r_range.0) / (num_r_values as f64 - 1.0);
for i in 0..num_r_values {
let r = (i as f64).mul_add(r_step, r_range.0);
let mut x = x0;
for _ in 0..transient {
x = r * x * (1.0 - x);
}
for _ in 0..num_points {
x = r * x * (1.0 - x);
data.push((r, x));
}
}
data
}
#[must_use]
pub fn lyapunov_exponent_logistic(
r: f64,
x0: f64,
transient: usize,
num_iterations: usize,
) -> f64 {
let mut x = x0;
for _ in 0..transient {
x = r * x * (1.0 - x);
}
let mut sum = 0.0;
for _ in 0..num_iterations {
let deriv = r * 2.0f64.mul_add(-x, 1.0);
if deriv.abs() > 0.0 {
sum += deriv.abs().ln();
}
x = r * x * (1.0 - x);
}
sum / num_iterations as f64
}
#[must_use]
pub fn lyapunov_exponent_lorenz(
start_point: (f64, f64, f64),
dt: f64,
num_steps: usize,
sigma: f64,
rho: f64,
beta: f64,
) -> f64 {
let (mut x, mut y, mut z) = start_point;
let mut sum = 0.0;
let eps = 1e-8;
let mut dx = eps;
let mut dy = 0.0;
let mut dz = 0.0;
for _ in 0..num_steps {
let dx_main = sigma * (y - x);
let dy_main = x.mul_add(rho - z, -y);
let dz_main = x.mul_add(y, -(beta * z));
x += dx_main * dt;
y += dy_main * dt;
z += dz_main * dt;
let ddx = sigma * (dy - dx);
let ddy = dx * (rho - z) - x * dz - dy;
let ddz = beta.mul_add(-dz, dx * y + x * dy);
dx += ddx * dt;
dy += ddy * dt;
dz += ddz * dt;
let norm = (dx * dx + dy * dy + dz * dz).sqrt();
if norm > 0.0 {
sum += norm.ln();
dx /= norm;
dy /= norm;
dz /= norm;
dx *= eps;
dy *= eps;
dz *= eps;
}
}
sum / (num_steps as f64 * dt) - eps.ln() / dt
}
#[must_use]
pub fn box_counting_dimension(
points: &[(f64, f64)],
num_scales: usize,
) -> f64 {
if points.is_empty() || num_scales < 2 {
return 0.0;
}
let mut x_min = f64::MAX;
let mut x_max = f64::MIN;
let mut y_min = f64::MAX;
let mut y_max = f64::MIN;
for &(x, y) in points {
x_min = x_min.min(x);
x_max = x_max.max(x);
y_min = y_min.min(y);
y_max = y_max.max(y);
}
let extent = (x_max - x_min).max(y_max - y_min);
if extent <= 0.0 {
return 0.0;
}
let mut log_inv_eps = Vec::with_capacity(num_scales);
let mut log_n = Vec::with_capacity(num_scales);
for scale_idx in 0..num_scales {
let num_boxes = 1 << (scale_idx + 1); let eps = extent / f64::from(num_boxes);
let mut occupied = std::collections::HashSet::new();
for &(x, y) in points {
let bx = ((x - x_min) / eps).floor() as i64;
let by = ((y - y_min) / eps).floor() as i64;
occupied.insert((bx, by));
}
let n = occupied.len();
if n > 0 && eps > 0.0 {
log_inv_eps.push((1.0 / eps).ln());
log_n.push((n as f64).ln());
}
}
if log_inv_eps.len() < 2 {
return 0.0;
}
linear_regression_slope(&log_inv_eps, &log_n)
}
#[must_use]
pub fn correlation_dimension(
points: &[(f64, f64)],
num_radii: usize,
) -> f64 {
if points.len() < 2 || num_radii < 2 {
return 0.0;
}
let n = points.len();
let mut distances: Vec<f64> = Vec::with_capacity(n * (n - 1) / 2);
for i in 0..n {
for j in (i + 1)..n {
let dx = points[i].0 - points[j].0;
let dy = points[i].1 - points[j].1;
distances.push(dx.hypot(dy));
}
}
if distances.is_empty() {
return 0.0;
}
distances.sort_by(|a, b| a.partial_cmp(b).unwrap());
let r_min = distances[0].max(1e-10);
let r_max = distances[distances.len() - 1];
if r_max <= r_min {
return 0.0;
}
let mut log_r = Vec::with_capacity(num_radii);
let mut log_c = Vec::with_capacity(num_radii);
for i in 0..num_radii {
let log_r_val =
r_min.ln() + (r_max.ln() - r_min.ln()) * (i as f64) / (num_radii as f64 - 1.0);
let r = log_r_val.exp();
let count = distances.partition_point(|&d| d < r);
let c_r = (2.0 * count as f64) / ((n * (n - 1)) as f64);
if c_r > 0.0 {
log_r.push(r.ln());
log_c.push(c_r.ln());
}
}
if log_r.len() < 2 {
return 0.0;
}
linear_regression_slope(&log_r, &log_c)
}
pub(crate) fn linear_regression_slope(
x: &[f64],
y: &[f64],
) -> f64 {
let n = x.len() as f64;
let sum_x: f64 = x.iter().sum();
let sum_y: f64 = y.iter().sum();
let sum_xy: f64 = x.iter().zip(y.iter()).map(|(xi, yi)| xi * yi).sum();
let sum_xx: f64 = x.iter().map(|xi| xi * xi).sum();
let denom = n.mul_add(sum_xx, -(sum_x * sum_x));
if denom.abs() < 1e-10 {
return 0.0;
}
n.mul_add(sum_xy, -(sum_x * sum_y)) / denom
}
#[must_use]
pub fn orbit_density(
points: &[(f64, f64)],
x_bins: usize,
y_bins: usize,
x_range: (f64, f64),
y_range: (f64, f64),
) -> Vec<Vec<usize>> {
let mut density = vec![vec![0usize; x_bins]; y_bins];
let x_scale = (x_range.1 - x_range.0) / x_bins as f64;
let y_scale = (y_range.1 - y_range.0) / y_bins as f64;
for &(x, y) in points {
if x >= x_range.0 && x < x_range.1 && y >= y_range.0 && y < y_range.1 {
let xi: usize = (((x - x_range.0) / x_scale) as i64).try_into().unwrap_or(0);
let yi: usize = (((y - y_range.0) / y_scale) as i64).try_into().unwrap_or(0);
if xi < x_bins && yi < y_bins {
density[yi][xi] += 1;
}
}
}
density
}
#[must_use]
pub fn orbit_entropy(density: &[Vec<usize>]) -> f64 {
let total: usize = density.iter().flat_map(|row| row.iter()).sum();
if total == 0 {
return 0.0;
}
let mut entropy = 0.0;
let total_f = total as f64;
for row in density {
for &count in row {
if count > 0 {
let p = count as f64 / total_f;
entropy -= p * p.ln();
}
}
}
entropy
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub struct AffineTransform2D {
pub a: f64,
pub b: f64,
pub c: f64,
pub d: f64,
pub e: f64,
pub f: f64,
}
impl AffineTransform2D {
#[must_use]
pub const fn new(
a: f64,
b: f64,
c: f64,
d: f64,
e: f64,
f: f64,
) -> Self {
Self { a, b, c, d, e, f }
}
#[must_use]
pub fn apply(
&self,
point: (f64, f64),
) -> (f64, f64) {
let (x, y) = point;
(
self.a.mul_add(x, self.b * y) + self.e,
self.c.mul_add(x, self.d * y) + self.f,
)
}
}
#[must_use]
pub fn generate_ifs_fractal(
transforms: &[AffineTransform2D],
probabilities: &[f64],
start_point: (f64, f64),
num_points: usize,
skip: usize,
) -> Vec<(f64, f64)> {
if transforms.is_empty() || transforms.len() != probabilities.len() {
return vec![];
}
let mut cumulative = Vec::with_capacity(probabilities.len());
let mut sum = 0.0;
for &p in probabilities {
sum += p;
cumulative.push(sum);
}
for c in &mut cumulative {
*c /= sum;
}
let mut points = Vec::with_capacity(num_points);
let mut current = start_point;
let mut rng_state = 12345u64;
let lcg_next = |state: &mut u64| -> f64 {
*state = state
.wrapping_mul(6_364_136_223_846_793_005)
.wrapping_add(1);
(*state >> 33) as f64 / f64::from(u32::MAX)
};
for i in 0..(num_points + skip) {
let r = lcg_next(&mut rng_state);
let idx = cumulative.iter().position(|&c| r <= c).unwrap_or(0);
current = transforms[idx].apply(current);
if i >= skip {
points.push(current);
}
}
points
}
#[must_use]
pub fn sierpinski_triangle_ifs() -> (Vec<AffineTransform2D>, Vec<f64>) {
let transforms = vec![
AffineTransform2D::new(0.5, 0.0, 0.0, 0.5, 0.0, 0.0),
AffineTransform2D::new(0.5, 0.0, 0.0, 0.5, 0.5, 0.0),
AffineTransform2D::new(0.5, 0.0, 0.0, 0.5, 0.25, 0.5),
];
let probabilities = vec![1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0];
(transforms, probabilities)
}
#[must_use]
pub fn barnsley_fern_ifs() -> (Vec<AffineTransform2D>, Vec<f64>) {
let transforms = vec![
AffineTransform2D::new(0.0, 0.0, 0.0, 0.16, 0.0, 0.0),
AffineTransform2D::new(0.85, 0.04, -0.04, 0.85, 0.0, 1.6),
AffineTransform2D::new(0.2, -0.26, 0.23, 0.22, 0.0, 1.6),
AffineTransform2D::new(-0.15, 0.28, 0.26, 0.24, 0.0, 0.44),
];
let probabilities = vec![0.01, 0.85, 0.07, 0.07];
(transforms, probabilities)
}