use crate::analysis::{
Analyser, AnalyserRingBuffer, DEFAULT_FFT_SIZE, DEFAULT_MAX_DECIBELS, DEFAULT_MIN_DECIBELS,
DEFAULT_SMOOTHING_TIME_CONSTANT,
};
use crate::context::{AudioContextRegistration, BaseAudioContext};
use crate::render::{
AudioParamValues, AudioProcessor, AudioRenderQuantum, AudioWorkletGlobalScope,
};
use super::{AudioNode, AudioNodeOptions, ChannelConfig, ChannelInterpretation};
#[derive(Clone, Debug)]
pub struct AnalyserOptions {
pub fft_size: usize,
pub max_decibels: f64,
pub min_decibels: f64,
pub smoothing_time_constant: f64,
pub audio_node_options: AudioNodeOptions,
}
impl Default for AnalyserOptions {
fn default() -> Self {
Self {
fft_size: DEFAULT_FFT_SIZE,
max_decibels: DEFAULT_MAX_DECIBELS,
min_decibels: DEFAULT_MIN_DECIBELS,
smoothing_time_constant: DEFAULT_SMOOTHING_TIME_CONSTANT,
audio_node_options: AudioNodeOptions::default(),
}
}
}
#[derive(Debug)]
pub struct AnalyserNode {
registration: AudioContextRegistration,
channel_config: ChannelConfig,
analyser: Analyser,
}
impl AudioNode for AnalyserNode {
fn registration(&self) -> &AudioContextRegistration {
&self.registration
}
fn channel_config(&self) -> &ChannelConfig {
&self.channel_config
}
fn number_of_inputs(&self) -> usize {
1
}
fn number_of_outputs(&self) -> usize {
1
}
}
impl AnalyserNode {
pub fn new<C: BaseAudioContext>(context: &C, options: AnalyserOptions) -> Self {
context.base().register(move |registration| {
let fft_size = options.fft_size;
let smoothing_time_constant = options.smoothing_time_constant;
let min_decibels = options.min_decibels;
let max_decibels = options.max_decibels;
let mut analyser = Analyser::new();
analyser.set_fft_size(fft_size);
analyser.set_smoothing_time_constant(smoothing_time_constant);
analyser.set_decibels(min_decibels, max_decibels);
let render = AnalyserRenderer {
ring_buffer: analyser.get_ring_buffer_clone(),
};
let node = AnalyserNode {
registration,
channel_config: options.audio_node_options.into(),
analyser,
};
(node, Box::new(render))
})
}
pub fn fft_size(&self) -> usize {
self.analyser.fft_size()
}
pub fn set_fft_size(&mut self, fft_size: usize) {
self.analyser.set_fft_size(fft_size);
}
pub fn smoothing_time_constant(&self) -> f64 {
self.analyser.smoothing_time_constant()
}
pub fn set_smoothing_time_constant(&mut self, value: f64) {
self.analyser.set_smoothing_time_constant(value);
}
pub fn min_decibels(&self) -> f64 {
self.analyser.min_decibels()
}
pub fn set_min_decibels(&mut self, value: f64) {
self.analyser.set_decibels(value, self.max_decibels());
}
pub fn max_decibels(&self) -> f64 {
self.analyser.max_decibels()
}
pub fn set_max_decibels(&mut self, value: f64) {
self.analyser.set_decibels(self.min_decibels(), value);
}
pub fn frequency_bin_count(&self) -> usize {
self.analyser.frequency_bin_count()
}
pub fn get_float_time_domain_data(&mut self, buffer: &mut [f32]) {
self.analyser.get_float_time_domain_data(buffer);
}
pub fn get_byte_time_domain_data(&mut self, buffer: &mut [u8]) {
self.analyser.get_byte_time_domain_data(buffer);
}
pub fn get_float_frequency_data(&mut self, buffer: &mut [f32]) {
let current_time = self.registration.context().current_time();
self.analyser.get_float_frequency_data(buffer, current_time);
}
pub fn get_byte_frequency_data(&mut self, buffer: &mut [u8]) {
let current_time = self.registration.context().current_time();
self.analyser.get_byte_frequency_data(buffer, current_time);
}
}
struct AnalyserRenderer {
ring_buffer: AnalyserRingBuffer,
}
impl AudioProcessor for AnalyserRenderer {
fn process(
&mut self,
inputs: &[AudioRenderQuantum],
outputs: &mut [AudioRenderQuantum],
_params: AudioParamValues<'_>,
_scope: &AudioWorkletGlobalScope,
) -> bool {
let input = &inputs[0];
let output = &mut outputs[0];
*output = input.clone();
let mut mono = input.clone();
mono.mix(1, ChannelInterpretation::Speakers);
let data = mono.channel_data(0).as_ref();
self.ring_buffer.write(data);
false
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::context::{
AudioContext, AudioContextOptions, BaseAudioContext, OfflineAudioContext,
};
use crate::node::{AudioNode, AudioScheduledSourceNode};
use float_eq::assert_float_eq;
#[test]
fn test_analyser_after_closed() {
let options = AudioContextOptions {
sink_id: "none".into(),
..AudioContextOptions::default()
};
let context = AudioContext::new(options);
let mut src = context.create_constant_source();
src.start();
let mut analyser = context.create_analyser();
src.connect(&analyser);
std::thread::sleep(std::time::Duration::from_millis(20));
let mut buffer = vec![0.; 128];
analyser.get_float_time_domain_data(&mut buffer);
assert_float_eq!(&buffer[..], &[1.; 128][..], abs_all <= 0.);
context.close_sync();
std::thread::sleep(std::time::Duration::from_millis(50));
let mut buffer = vec![0.; 128];
analyser.get_float_time_domain_data(&mut buffer);
assert_float_eq!(&buffer[..], &[1.; 128][..], abs_all <= 0.);
}
#[test]
fn test_construct_decibels() {
let context = OfflineAudioContext::new(1, 128, 44_100.);
let options = AnalyserOptions {
min_decibels: -10.,
max_decibels: 20.,
..AnalyserOptions::default()
};
let _ = AnalyserNode::new(&context, options);
}
}