1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
use std::sync::atomic::{AtomicU32, AtomicUsize, Ordering};
use std::sync::Arc;

use crate::analysis::Analyser;
use crate::buffer::{ChannelConfig, ChannelConfigOptions, ChannelInterpretation};
use crate::context::{AsBaseAudioContext, AudioContextRegistration};
use crate::process::{AudioParamValues, AudioProcessor};
use crate::SampleRate;

use crossbeam_channel::{self, Receiver, Sender};

use super::AudioNode;

/// Options for constructing an AnalyserNode
pub struct AnalyserOptions {
    pub fft_size: usize,
    pub smoothing_time_constant: f32,
    /*
    pub max_decibels: f32,
    pub min_decibels: f32,
    */
    pub channel_config: ChannelConfigOptions,
}

impl Default for AnalyserOptions {
    fn default() -> Self {
        Self {
            fft_size: 2048,
            smoothing_time_constant: 0.8,
            /*
            max_decibels: -30.,
            min_decibels: 100.,
            */
            channel_config: ChannelConfigOptions::default(),
        }
    }
}

enum AnalyserRequest {
    FloatTime {
        sender: Sender<Vec<f32>>,
        buffer: Vec<f32>,
    },
    FloatFrequency {
        sender: Sender<Vec<f32>>,
        buffer: Vec<f32>,
    },
}

/// Provides real-time frequency and time-domain analysis information
pub struct AnalyserNode {
    registration: AudioContextRegistration,
    channel_config: ChannelConfig,
    fft_size: Arc<AtomicUsize>,
    smoothing_time_constant: Arc<AtomicU32>,
    sender: Sender<AnalyserRequest>,
    /*
    max_decibels: f32,
    min_decibels: f32,
    */
}

impl AudioNode for AnalyserNode {
    fn registration(&self) -> &AudioContextRegistration {
        &self.registration
    }

    fn channel_config_raw(&self) -> &ChannelConfig {
        &self.channel_config
    }

    fn number_of_inputs(&self) -> u32 {
        1
    }
    fn number_of_outputs(&self) -> u32 {
        1
    }
}

impl AnalyserNode {
    pub fn new<C: AsBaseAudioContext>(context: &C, options: AnalyserOptions) -> Self {
        context.base().register(move |registration| {
            let fft_size = Arc::new(AtomicUsize::new(options.fft_size));
            let smoothing_time_constant = Arc::new(AtomicU32::new(
                (options.smoothing_time_constant * 100.) as u32,
            ));

            let (sender, receiver) = crossbeam_channel::bounded(0);

            let render = AnalyserRenderer {
                analyser: Analyser::new(options.fft_size),
                fft_size: fft_size.clone(),
                smoothing_time_constant: smoothing_time_constant.clone(),
                receiver,
            };

            let node = AnalyserNode {
                registration,
                channel_config: options.channel_config.into(),
                fft_size,
                smoothing_time_constant,
                sender,
            };

            (node, Box::new(render))
        })
    }

    /// Half the FFT size
    pub fn frequency_bin_count(&self) -> usize {
        self.fft_size.load(Ordering::SeqCst) / 2
    }

    /// The size of the FFT used for frequency-domain analysis (in sample-frames)
    pub fn fft_size(&self) -> usize {
        self.fft_size.load(Ordering::SeqCst)
    }

    /// This MUST be a power of two in the range 32 to 32768
    pub fn set_fft_size(&self, fft_size: usize) {
        // todo assert size
        self.fft_size.store(fft_size, Ordering::SeqCst);
    }

    /// Time averaging parameter with the last analysis frame.
    pub fn smoothing_time_constant(&self) -> f32 {
        self.smoothing_time_constant.load(Ordering::SeqCst) as f32 / 100.
    }

    /// Set smoothing time constant, this MUST be a value between 0 and 1
    pub fn set_smoothing_time_constant(&self, v: f32) {
        // todo assert range
        self.smoothing_time_constant
            .store((v * 100.) as u32, Ordering::SeqCst);
    }

    /// Copies the current time domain data (waveform data) into the provided buffer
    pub fn get_float_time_domain_data(&self, buffer: Vec<f32>) -> Vec<f32> {
        let (sender, receiver) = crossbeam_channel::bounded(0);
        let request = AnalyserRequest::FloatTime { sender, buffer };
        self.sender.send(request).unwrap();
        receiver.recv().unwrap()
    }

    /// Copies the current frequency data into the provided buffer
    pub fn get_float_frequency_data(&self, buffer: Vec<f32>) -> Vec<f32> {
        let (sender, receiver) = crossbeam_channel::bounded(0);
        let request = AnalyserRequest::FloatFrequency { sender, buffer };
        self.sender.send(request).unwrap();
        receiver.recv().unwrap()
    }
}

struct AnalyserRenderer {
    pub analyser: Analyser,
    pub fft_size: Arc<AtomicUsize>,
    pub smoothing_time_constant: Arc<AtomicU32>,
    pub receiver: Receiver<AnalyserRequest>,
}

// SAFETY:
// AudioBuffer is not Send, but the buffer Vec is empty when we move it to the render thread.
unsafe impl Send for AnalyserRenderer {}

impl AudioProcessor for AnalyserRenderer {
    fn process(
        &mut self,
        inputs: &[crate::alloc::AudioBuffer],
        outputs: &mut [crate::alloc::AudioBuffer],
        _params: AudioParamValues,
        _timestamp: f64,
        _sample_rate: SampleRate,
    ) -> bool {
        // single input/output node
        let input = &inputs[0];
        let output = &mut outputs[0];

        // pass through input
        *output = input.clone();

        // add current input to ring buffer
        let mut mono = input.clone();
        mono.mix(1, ChannelInterpretation::Speakers);
        let mono_data = mono.channel_data(0).clone();
        self.analyser.add_data(mono_data);

        // calculate frequency domain every `fft_size` samples
        let fft_size = self.fft_size.load(Ordering::Relaxed);
        let resized = self.analyser.current_fft_size() != fft_size;
        let complete_cycle = self.analyser.check_complete_cycle(fft_size);
        if resized || complete_cycle {
            let smoothing_time_constant =
                self.smoothing_time_constant.load(Ordering::Relaxed) as f32 / 100.;
            self.analyser
                .calculate_float_frequency(fft_size, smoothing_time_constant);
        }

        // check if any information was requested from the control thread
        if let Ok(request) = self.receiver.try_recv() {
            match request {
                AnalyserRequest::FloatTime { sender, mut buffer } => {
                    self.analyser.get_float_time(&mut buffer[..], fft_size);

                    // allow to fail when receiver is disconnected
                    let _ = sender.send(buffer);
                }
                AnalyserRequest::FloatFrequency { sender, mut buffer } => {
                    self.analyser.get_float_frequency(&mut buffer[..]);

                    // allow to fail when receiver is disconnected
                    let _ = sender.send(buffer);
                }
            }
        }

        false
    }
}