use bevy_ecs::component::Component;
use bevy_math::Vec3;
use delay_line::DelayLine;
use firewheel::{
channel_config::{ChannelConfig, NonZeroChannelCount},
diff::{Diff, Patch},
event::ProcEvents,
node::{
AudioNode, AudioNodeInfo, AudioNodeProcessor, ProcBuffers, ProcExtra, ProcInfo,
ProcStreamCtx, ProcessStatus,
},
};
mod delay_line;
const SPEED_OF_SOUND: f32 = 343.0;
#[derive(Debug, Default, Clone, Component, Diff, Patch)]
#[cfg_attr(feature = "reflect", derive(bevy_reflect::Reflect))]
pub struct ItdNode {
pub direction: Vec3,
}
#[derive(Debug, Clone, Component, PartialEq)]
#[cfg_attr(feature = "reflect", derive(bevy_reflect::Reflect))]
pub struct ItdConfig {
pub inter_ear_distance: f32,
pub input_config: InputConfig,
}
impl Default for ItdConfig {
fn default() -> Self {
Self {
inter_ear_distance: 0.22,
input_config: InputConfig::Stereo,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
#[cfg_attr(feature = "reflect", derive(bevy_reflect::Reflect))]
pub enum InputConfig {
Stereo,
Downmixed(NonZeroChannelCount),
}
impl InputConfig {
pub fn input_channels(&self) -> NonZeroChannelCount {
match self {
Self::Stereo => NonZeroChannelCount::STEREO,
Self::Downmixed(c) => *c,
}
}
}
struct ItdProcessor {
left: DelayLine,
right: DelayLine,
inter_ear_distance: f32,
input_config: InputConfig,
}
impl AudioNode for ItdNode {
type Configuration = ItdConfig;
fn info(&self, config: &Self::Configuration) -> AudioNodeInfo {
AudioNodeInfo::new()
.debug_name("itd node")
.channel_config(ChannelConfig::new(
config.input_config.input_channels().get(),
2,
))
}
fn construct_processor(
&self,
configuration: &Self::Configuration,
cx: firewheel::node::ConstructProcessorContext,
) -> impl firewheel::node::AudioNodeProcessor {
let maximum_samples = maximum_samples(
configuration.inter_ear_distance,
cx.stream_info.sample_rate.get() as f32,
);
ItdProcessor {
left: DelayLine::new(maximum_samples),
right: DelayLine::new(maximum_samples),
inter_ear_distance: configuration.inter_ear_distance,
input_config: configuration.input_config,
}
}
}
fn maximum_samples(distance: f32, sample_rate: f32) -> usize {
let maximum_delay = distance / SPEED_OF_SOUND;
(sample_rate * maximum_delay).ceil() as usize
}
impl AudioNodeProcessor for ItdProcessor {
fn process(
&mut self,
proc_info: &ProcInfo,
ProcBuffers { inputs, outputs }: ProcBuffers,
events: &mut ProcEvents,
_: &mut ProcExtra,
) -> ProcessStatus {
for patch in events.drain_patches::<ItdNode>() {
let ItdNodePatch::Direction(direction) = patch;
let direction = direction.normalize_or_zero();
if direction.length_squared() == 0.0 {
self.left.set_read_head(0.0);
self.right.set_read_head(0.0);
continue;
}
self.left.set_read_head(Vec3::X.dot(direction));
self.right.set_read_head(Vec3::NEG_X.dot(direction));
}
if proc_info.in_silence_mask.all_channels_silent(2) {
return ProcessStatus::ClearAllOutputs;
}
match self.input_config {
InputConfig::Stereo => {
let in_left = &inputs[0][..proc_info.frames];
let in_right = &inputs[1][..proc_info.frames];
let (out_left, rest) = outputs.split_first_mut().unwrap();
let out_left = &mut out_left[..proc_info.frames];
let out_right = &mut rest[0][..proc_info.frames];
for frame in 0..proc_info.frames {
self.left.write(in_left[frame]);
self.right.write(in_right[frame]);
out_left[frame] = self.left.read();
out_right[frame] = self.right.read();
}
}
InputConfig::Downmixed(_) => {
for frame in 0..proc_info.frames {
let mut downmixed = 0.0;
for channel in inputs {
downmixed += channel[frame];
}
downmixed /= inputs.len() as f32;
self.left.write(downmixed);
self.right.write(downmixed);
outputs[0][frame] = self.left.read();
outputs[1][frame] = self.right.read();
}
}
}
ProcessStatus::OutputsModified
}
fn new_stream(&mut self, stream_info: &firewheel::StreamInfo, _: &mut ProcStreamCtx) {
if stream_info.sample_rate != stream_info.prev_sample_rate {
let new_size = maximum_samples(
self.inter_ear_distance,
stream_info.sample_rate.get() as f32,
);
self.left.resize(new_size);
self.right.resize(new_size);
}
}
}