use core::{
f32,
marker::PhantomData,
ops::{AddAssign, Div, Mul},
};
use crate::{
NoiseFunction,
cells::WithGradient,
lengths::{DifferentiableLengthFunction, LengthFunction},
rng::NoiseRng,
};
use bevy_math::{Curve, VectorSpace, WithDerivative, curve::derivatives::SampleDerivative};
pub trait LayerResultContext {
fn expect_weight(&mut self, weight: f32);
}
pub trait LayerResultContextFor<I>: LayerResultContext {
type Result: LayerResult;
fn start_result(&self) -> Self::Result;
}
pub trait LayerResult {
type Output;
fn add_unexpected_weight_to_total(&mut self, weight: f32);
fn finish(self, rng: &mut NoiseRng) -> Self::Output;
}
pub trait LayerResultFor<V>: LayerResult {
fn include_value(&mut self, value: V, weight: f32);
}
pub trait LayerWeightsSettings {
type Weights: LayerWeights;
fn start_weights(&self) -> Self::Weights;
}
pub trait LayerWeights {
fn next_weight(&mut self) -> f32;
}
pub trait LayerOperation<R: LayerResultContext, W: LayerWeights> {
fn prepare(&self, result_context: &mut R, weights: &mut W);
}
pub trait LayerOperationFor<I: VectorSpace<Scalar = f32>, R: LayerResult, W: LayerWeights> {
fn do_noise_op(
&self,
seeds: &mut NoiseRng,
working_loc: &mut I,
result: &mut R,
weights: &mut W,
);
}
macro_rules! impl_all_operation_tuples {
() => { };
($i:ident=$f:tt, $($ni:ident=$nf:tt),* $(,)?) => {
impl<R: LayerResultContext, W: LayerWeights, $i: LayerOperation<R, W>, $($ni: LayerOperation<R, W>),* > LayerOperation<R, W> for ($i, $($ni),*) {
#[inline]
fn prepare(&self, result_context: &mut R, weights: &mut W) {
self.$f.prepare(result_context, weights);
$(self.$nf.prepare(result_context, weights);)*
}
}
impl<I: VectorSpace<Scalar = f32>, R: LayerResult, W: LayerWeights, $i: LayerOperationFor<I, R, W>, $($ni: LayerOperationFor<I, R, W>),* > LayerOperationFor<I, R, W> for ($i, $($ni),*) {
#[inline]
fn do_noise_op(
&self,
seeds: &mut NoiseRng,
working_loc: &mut I,
result: &mut R,
weights: &mut W,
) {
self.$f.do_noise_op(seeds, working_loc, result, weights);
$(self.$nf.do_noise_op(seeds, working_loc, result, weights);)*
}
}
impl_all_operation_tuples!($($ni=$nf,)*);
};
}
impl_all_operation_tuples!(
T15 = 15,
T14 = 14,
T13 = 13,
T12 = 12,
T11 = 11,
T10 = 10,
T9 = 9,
T8 = 8,
T7 = 7,
T6 = 6,
T5 = 5,
T4 = 4,
T3 = 3,
T2 = 2,
T1 = 1,
T0 = 0,
);
#[derive(PartialEq, Eq, Clone, Copy)]
#[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))]
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "debug", derive(Debug))]
pub struct LayeredNoise<R, W, N, const DONT_FINISH: bool = false> {
result_context: R,
weight_settings: W,
noise: N,
}
impl<
R: LayerResultContext + Default,
W: LayerWeightsSettings + Default,
N: LayerOperation<R, W::Weights> + Default,
> Default for LayeredNoise<R, W, N>
{
fn default() -> Self {
Self::new(Default::default(), Default::default(), Default::default())
}
}
impl<R: LayerResultContext, W: LayerWeightsSettings, N: LayerOperation<R, W::Weights>>
LayeredNoise<R, W, N>
{
pub fn new(result_settings: R, weight_settings: W, noise: N) -> Self {
let mut result_context = result_settings;
let mut weights = weight_settings.start_weights();
noise.prepare(&mut result_context, &mut weights);
Self {
result_context,
weight_settings,
noise,
}
}
}
impl<
I: VectorSpace<Scalar = f32>,
R: LayerResultContextFor<I>,
W: LayerWeightsSettings,
N: LayerOperationFor<I, R::Result, W::Weights>,
> NoiseFunction<I> for LayeredNoise<R, W, N, false>
{
type Output = <R::Result as LayerResult>::Output;
#[inline]
fn evaluate(&self, mut input: I, seeds: &mut NoiseRng) -> Self::Output {
let mut weights = self.weight_settings.start_weights();
let mut result = self.result_context.start_result();
self.noise
.do_noise_op(seeds, &mut input, &mut result, &mut weights);
result.finish(seeds)
}
}
impl<
I: VectorSpace<Scalar = f32>,
R: LayerResultContextFor<I>,
W: LayerWeightsSettings,
N: LayerOperationFor<I, R::Result, W::Weights>,
> NoiseFunction<I> for LayeredNoise<R, W, N, true>
{
type Output = R::Result;
#[inline]
fn evaluate(&self, mut input: I, seeds: &mut NoiseRng) -> Self::Output {
let mut weights = self.weight_settings.start_weights();
let mut result = self.result_context.start_result();
self.noise
.do_noise_op(seeds, &mut input, &mut result, &mut weights);
result
}
}
#[derive(Default, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))]
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "debug", derive(Debug))]
pub struct Octave<T>(pub T);
impl<T, R: LayerResultContext, W: LayerWeights> LayerOperation<R, W> for Octave<T> {
#[inline]
fn prepare(&self, result_context: &mut R, weights: &mut W) {
result_context.expect_weight(weights.next_weight());
}
}
impl<
T: NoiseFunction<I>,
I: VectorSpace<Scalar = f32>,
R: LayerResultFor<T::Output>,
W: LayerWeights,
> LayerOperationFor<I, R, W> for Octave<T>
{
#[inline]
fn do_noise_op(
&self,
seeds: &mut NoiseRng,
working_loc: &mut I,
result: &mut R,
weights: &mut W,
) {
let octave_result = self.0.evaluate(*working_loc, seeds);
result.include_value(octave_result, weights.next_weight());
seeds.re_seed();
}
}
#[derive(Clone, Copy, PartialEq)]
#[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))]
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "debug", derive(Debug))]
pub struct DomainWarp<T> {
pub warper: T,
pub strength: f32,
}
impl<T: Default> Default for DomainWarp<T> {
fn default() -> Self {
Self {
warper: T::default(),
strength: 1.0,
}
}
}
impl<T, R: LayerResultContext, W: LayerWeights> LayerOperation<R, W> for DomainWarp<T> {
#[inline]
fn prepare(&self, _result_context: &mut R, _weights: &mut W) {}
}
impl<T: NoiseFunction<I, Output = I>, I: VectorSpace<Scalar = f32>, R: LayerResult, W: LayerWeights>
LayerOperationFor<I, R, W> for DomainWarp<T>
{
#[inline]
fn do_noise_op(
&self,
seeds: &mut NoiseRng,
working_loc: &mut I,
_result: &mut R,
_weights: &mut W,
) {
let warp_by = self.warper.evaluate(*working_loc, seeds) * self.strength;
*working_loc = warp_by + warp_by;
}
}
#[derive(Clone, Copy, PartialEq)]
#[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))]
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "debug", derive(Debug))]
pub struct PersistenceConfig<T> {
pub configured: T,
pub config: f32,
}
impl<T: Default> Default for PersistenceConfig<T> {
fn default() -> Self {
Self {
configured: T::default(),
config: 2.0,
}
}
}
impl<T: LayerOperation<R, PersistenceWeights>, R: LayerResultContext>
LayerOperation<R, PersistenceWeights> for PersistenceConfig<T>
{
#[inline]
fn prepare(&self, result_context: &mut R, weights: &mut PersistenceWeights) {
weights.persistence.0 *= self.config;
weights.next *= self.config;
self.configured.prepare(result_context, weights);
weights.persistence.0 /= self.config;
weights.next /= self.config;
}
}
impl<T: LayerOperationFor<I, R, PersistenceWeights>, I: VectorSpace<Scalar = f32>, R: LayerResult>
LayerOperationFor<I, R, PersistenceWeights> for PersistenceConfig<T>
{
#[inline]
fn do_noise_op(
&self,
seeds: &mut NoiseRng,
working_loc: &mut I,
result: &mut R,
weights: &mut PersistenceWeights,
) {
weights.persistence.0 *= self.config;
weights.next *= self.config;
self.configured
.do_noise_op(seeds, working_loc, result, weights);
weights.persistence.0 /= self.config;
weights.next /= self.config;
}
}
#[derive(Clone, Copy, PartialEq)]
#[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))]
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "debug", derive(Debug))]
pub struct FractalLayers<T> {
pub layer: T,
pub lacunarity: f32,
pub amount: u32,
}
impl<T: Default> Default for FractalLayers<T> {
fn default() -> Self {
Self {
layer: T::default(),
lacunarity: 2.0,
amount: 8,
}
}
}
impl<T: LayerOperation<R, W>, R: LayerResultContext, W: LayerWeights> LayerOperation<R, W>
for FractalLayers<T>
{
#[inline]
fn prepare(&self, result_context: &mut R, weights: &mut W) {
for _ in 0..self.amount {
self.layer.prepare(result_context, weights);
}
}
}
impl<
I: VectorSpace<Scalar = f32>,
T: for<'a> LayerOperationFor<I, FractalLayeredResult<'a, R>, W>,
R: LayerResult,
W: LayerWeights,
> LayerOperationFor<I, R, W> for FractalLayers<T>
{
#[inline]
fn do_noise_op(
&self,
seeds: &mut NoiseRng,
working_loc: &mut I,
result: &mut R,
weights: &mut W,
) {
let mut result = FractalLayeredResult {
result,
artificial_frequency: 1.0,
};
self.layer
.do_noise_op(seeds, working_loc, &mut result, weights);
for _ in 1..self.amount {
*working_loc = *working_loc * self.lacunarity;
result.artificial_frequency *= self.lacunarity;
self.layer
.do_noise_op(seeds, working_loc, &mut result, weights);
}
}
}
pub trait FractalLayerResultCompatible<T>: LayerResultFor<T> {
fn include_fractal_value(&mut self, value: T, weight: f32, artificial_frequency: f32);
}
pub struct FractalLayeredResult<'a, R> {
result: &'a mut R,
artificial_frequency: f32,
}
impl<'a, R: LayerResult> LayerResult for FractalLayeredResult<'a, R> {
type Output = &'a mut R;
#[inline]
fn add_unexpected_weight_to_total(&mut self, weight: f32) {
self.result.add_unexpected_weight_to_total(weight);
}
#[inline]
fn finish(self, _rng: &mut NoiseRng) -> Self::Output {
self.result
}
}
impl<'a, T, R: FractalLayerResultCompatible<T>> LayerResultFor<T> for FractalLayeredResult<'a, R> {
#[inline]
fn include_value(&mut self, value: T, weight: f32) {
self.result
.include_fractal_value(value, weight, self.artificial_frequency);
}
}
#[derive(Clone, Copy, PartialEq)]
#[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))]
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "debug", derive(Debug))]
pub struct Persistence(pub f32);
impl Default for Persistence {
fn default() -> Self {
Self(0.5)
}
}
impl Persistence {
pub const CONSTANT: Self = Self(1.0);
}
#[derive(Clone, Copy, PartialEq)]
pub struct PersistenceWeights {
persistence: Persistence,
next: f32,
}
impl LayerWeights for PersistenceWeights {
#[inline]
fn next_weight(&mut self) -> f32 {
let result = self.next;
self.next *= self.persistence.0;
result
}
}
impl LayerWeightsSettings for Persistence {
type Weights = PersistenceWeights;
#[inline]
fn start_weights(&self) -> Self::Weights {
PersistenceWeights {
persistence: *self,
next: 1.0,
}
}
}
#[derive(Clone, Copy, PartialEq)]
#[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))]
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "debug", derive(Debug))]
pub struct Normed<T> {
marker: PhantomData<T>,
total_weights: f32,
}
impl<T> Default for Normed<T> {
fn default() -> Self {
Self {
marker: PhantomData,
total_weights: 0.0,
}
}
}
impl<T> LayerResultContext for Normed<T>
where
NormedResult<T>: LayerResult,
{
#[inline]
fn expect_weight(&mut self, weight: f32) {
self.total_weights += weight;
}
}
impl<T: Default, I> LayerResultContextFor<I> for Normed<T>
where
NormedResult<T>: LayerResult,
{
type Result = NormedResult<T>;
#[inline]
fn start_result(&self) -> Self::Result {
NormedResult {
total_weights: self.total_weights,
running_total: T::default(),
}
}
}
#[derive(Clone, Copy, PartialEq)]
pub struct NormedResult<T> {
total_weights: f32,
running_total: T,
}
impl<T: Div<f32>> LayerResult for NormedResult<T> {
type Output = T::Output;
#[inline]
fn add_unexpected_weight_to_total(&mut self, weight: f32) {
self.total_weights += weight;
}
#[inline]
fn finish(self, _rng: &mut NoiseRng) -> Self::Output {
self.running_total / self.total_weights
}
}
impl<T: AddAssign + Mul<f32, Output = T>, I: Into<T>> LayerResultFor<I> for NormedResult<T>
where
Self: LayerResult,
{
#[inline]
fn include_value(&mut self, value: I, weight: f32) {
self.running_total += value.into() * weight;
}
}
impl<T: VectorSpace<Scalar = f32>, I: Into<T>> FractalLayerResultCompatible<I> for NormedResult<T>
where
Self: LayerResultFor<I>,
{
#[inline]
fn include_fractal_value(&mut self, value: I, weight: f32, _artificial_frequency: f32) {
self.running_total = self.running_total + value.into() * weight;
}
}
impl<
T: AddAssign + Mul<f32, Output = T>,
G: AddAssign + Mul<f32, Output = G>,
IT: Into<T>,
IG: Into<G>,
> FractalLayerResultCompatible<WithGradient<IT, IG>> for NormedResult<WithGradient<T, G>>
where
Self: LayerResultFor<WithGradient<IT, IG>>,
{
#[inline]
fn include_fractal_value(
&mut self,
value: WithGradient<IT, IG>,
weight: f32,
artificial_frequency: f32,
) {
self.running_total.value += value.value.into() * weight;
self.running_total.gradient += value.gradient.into() * weight * artificial_frequency;
}
}
#[derive(Clone, Copy, PartialEq)]
#[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))]
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "debug", derive(Debug))]
pub struct NormedByDerivative<T, L, C> {
pub derivative_calculator: L,
pub derivative_contribution: C,
pub derivative_falloff: f32,
marker: PhantomData<T>,
total_weights: f32,
}
impl<T, L: Default, C: Default> Default for NormedByDerivative<T, L, C> {
fn default() -> Self {
Self {
marker: PhantomData,
total_weights: 0.0,
derivative_calculator: L::default(),
derivative_contribution: C::default(),
derivative_falloff: 0.25,
}
}
}
impl<T, L, C> NormedByDerivative<T, L, C> {
pub fn with_falloff(mut self, derivative_falloff: f32) -> Self {
self.derivative_falloff = derivative_falloff;
self
}
}
impl<T, L: Copy, C: Copy> LayerResultContext for NormedByDerivative<T, L, C>
where
NormedResult<T>: LayerResult,
{
#[inline]
fn expect_weight(&mut self, weight: f32) {
self.total_weights += weight;
}
}
impl<T: Default + Div<f32>, I: VectorSpace<Scalar = f32>, L: Copy, C: Copy> LayerResultContextFor<I>
for NormedByDerivative<T, L, C>
where
NormedByDerivativeResult<T, I, L, C>: LayerResult,
{
type Result = NormedByDerivativeResult<T, I, L, C>;
#[inline]
fn start_result(&self) -> Self::Result {
NormedByDerivativeResult {
total_weights: self.total_weights,
running_total: T::default(),
running_derivative: I::ZERO,
derivative_calculator: self.derivative_calculator,
derivative_contribution: self.derivative_contribution,
derivative_falloff: self.derivative_falloff,
}
}
}
#[derive(Clone, Copy, PartialEq)]
pub struct NormedByDerivativeResult<T, G, L, C> {
total_weights: f32,
running_total: T,
running_derivative: G,
derivative_calculator: L,
derivative_contribution: C,
derivative_falloff: f32,
}
impl<T: Div<f32>, G, L, C> LayerResult for NormedByDerivativeResult<T, G, L, C> {
type Output = T::Output;
#[inline]
fn add_unexpected_weight_to_total(&mut self, weight: f32) {
self.total_weights += weight;
}
#[inline]
fn finish(self, _rng: &mut NoiseRng) -> Self::Output {
self.running_total / self.total_weights
}
}
impl<I, T, G, L, C> LayerResultFor<I> for NormedByDerivativeResult<T, G, L, C>
where
Self: FractalLayerResultCompatible<I> + LayerResult,
{
#[inline]
fn include_value(&mut self, value: I, weight: f32) {
self.include_fractal_value(value, weight, 1.0);
}
}
impl<
T: VectorSpace<Scalar = f32> + AddAssign + Mul<f32, Output = T>,
I: Into<T>,
IG: Into<G> + Copy,
G: VectorSpace<Scalar = f32> + AddAssign + Mul<f32, Output = G>,
L: LengthFunction<G>,
C: Curve<f32>,
> FractalLayerResultCompatible<WithGradient<I, IG>> for NormedByDerivativeResult<T, G, L, C>
{
#[inline]
fn include_fractal_value(
&mut self,
value: WithGradient<I, IG>,
weight: f32,
artificial_frequency: f32,
) {
let gradient: G = value.gradient.into() * artificial_frequency * weight;
let value = value.value.into() * weight;
let total_derivative = self
.derivative_calculator
.length_of(self.running_derivative);
let additional_weight = self
.derivative_contribution
.sample_unchecked(total_derivative * self.derivative_falloff);
self.running_derivative += gradient;
self.running_total += value * additional_weight;
}
}
impl<
IT: Into<f32>,
IG: Into<G> + Copy,
G: VectorSpace<Scalar = f32> + AddAssign + Mul<G, Output = G>,
L: DifferentiableLengthFunction<G>,
C: SampleDerivative<f32>,
> FractalLayerResultCompatible<WithGradient<IT, IG>>
for NormedByDerivativeResult<WithGradient<f32, G>, G, L, C>
{
#[inline]
fn include_fractal_value(
&mut self,
value: WithGradient<IT, IG>,
weight: f32,
artificial_frequency: f32,
) {
let gradient: G = value.gradient.into() * artificial_frequency * weight;
let value = value.value.into() * weight;
let total_derivative = self
.derivative_calculator
.length_and_gradient_of(self.running_derivative);
let additional_weight = self
.derivative_contribution
.sample_with_derivative_unchecked(total_derivative.value * self.derivative_falloff);
self.running_derivative += gradient;
let d_additional_weight = total_derivative.gradient
* gradient
* additional_weight.derivative
* self.derivative_falloff;
self.running_total.value += value * additional_weight.value;
self.running_total.gradient +=
gradient * additional_weight.value + d_additional_weight * value;
}
}
#[derive(Default, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))]
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "debug", derive(Debug))]
pub struct PeakDerivativeContribution;
impl Curve<f32> for PeakDerivativeContribution {
#[inline]
fn domain(&self) -> bevy_math::curve::Interval {
unsafe { bevy_math::curve::Interval::new(0.0, f32::INFINITY).unwrap_unchecked() }
}
#[inline]
fn sample_unchecked(&self, t: f32) -> f32 {
1.0 / (1.0 + t)
}
}
impl SampleDerivative<f32> for PeakDerivativeContribution {
#[inline]
fn sample_with_derivative_unchecked(&self, t: f32) -> WithDerivative<f32> {
WithDerivative {
value: 1.0 / (1.0 + t),
derivative: -1.0 / ((1.0 + t) * (1.0 + t)),
}
}
}
#[derive(Default, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))]
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "debug", derive(Debug))]
pub struct SmoothDerivativeContribution;
impl Curve<f32> for SmoothDerivativeContribution {
#[inline]
fn domain(&self) -> bevy_math::curve::Interval {
unsafe { bevy_math::curve::Interval::new(0.0, f32::INFINITY).unwrap_unchecked() }
}
#[inline]
fn sample_unchecked(&self, t: f32) -> f32 {
bevy_math::ops::exp(-t)
}
}
impl SampleDerivative<f32> for SmoothDerivativeContribution {
#[inline]
fn sample_with_derivative_unchecked(&self, t: f32) -> WithDerivative<f32> {
WithDerivative {
value: bevy_math::ops::exp(-t),
derivative: -bevy_math::ops::exp(-t),
}
}
}