use std::slice::{Iter, IterMut};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use crate::context::AudioContextRegistration;
use crate::node::{
AudioNode, ChannelConfig, ChannelConfigOptions, ChannelCountMode, ChannelInterpretation,
};
use crate::render::{AudioParamValues, AudioProcessor, AudioRenderQuantum, RenderScope};
use crate::{AtomicF32, RENDER_QUANTUM_SIZE};
use crossbeam_channel::{Receiver, Sender};
use lazy_static::lazy_static;
const SNAP_TO_TARGET: f32 = 1e-10;
#[track_caller]
fn assert_non_negative(value: f64) {
if value < 0. {
panic!(
"RangeError - timing value ({:?}) should not be negative",
value
);
}
}
#[track_caller]
fn assert_strictly_positive(value: f64) {
if value <= 0. {
panic!(
"RangeError - duration ({:?}) should be strictly positive",
value
);
}
}
#[track_caller]
fn assert_not_zero(value: f32) {
if value == 0. {
panic!(
"RangeError - value ({:?}) should not be equal to zero",
value,
)
}
}
#[track_caller]
fn assert_sequence_length(values: &[f32]) {
if values.len() < 2 {
panic!(
"InvalidStateError - sequence length ({:?}) should not be less than 2",
values.len()
)
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum AutomationRate {
A,
K,
}
#[derive(Clone, Debug)]
pub struct AudioParamDescriptor {
pub automation_rate: AutomationRate,
pub default_value: f32,
pub min_value: f32,
pub max_value: f32,
}
#[derive(PartialEq, Eq, Debug)]
enum AudioParamEventType {
SetValue,
SetValueAtTime,
LinearRampToValueAtTime,
ExponentialRampToValueAtTime,
CancelScheduledValues,
SetTargetAtTime,
CancelAndHoldAtTime,
SetValueCurveAtTime,
}
#[derive(Debug)]
pub(crate) struct AudioParamEvent {
event_type: AudioParamEventType,
value: f32,
time: f64,
time_constant: Option<f64>, cancel_time: Option<f64>, duration: Option<f64>, values: Option<Box<[f32]>>, }
#[derive(Debug)]
struct AudioParamEventTimeline {
inner: Vec<AudioParamEvent>,
dirty: bool,
}
impl AudioParamEventTimeline {
fn new() -> Self {
Self {
inner: Vec::new(),
dirty: false,
}
}
fn push(&mut self, item: AudioParamEvent) {
self.dirty = true;
self.inner.push(item);
}
fn pop(&mut self) -> Option<AudioParamEvent> {
if !self.inner.is_empty() {
Some(self.inner.remove(0))
} else {
None
}
}
fn retain<F>(&mut self, func: F)
where
F: Fn(&AudioParamEvent) -> bool,
{
self.inner.retain(func);
}
fn replace_peek(&mut self, item: AudioParamEvent) {
self.inner[0] = item;
}
fn is_empty(&self) -> bool {
self.inner.is_empty()
}
fn unsorted_peek(&self) -> Option<&AudioParamEvent> {
self.inner.get(0)
}
fn peek(&self) -> Option<&AudioParamEvent> {
if self.dirty {
panic!("`AudioParamEventTimeline`: Invalid `.peek()` call, the queue is dirty");
}
self.inner.get(0)
}
fn next(&self) -> Option<&AudioParamEvent> {
if self.dirty {
panic!("`AudioParamEventTimeline`: Invalid `.next()` call, the queue is dirty");
}
self.inner.get(1)
}
fn sort(&mut self) {
self.inner
.sort_by(|a, b| a.time.partial_cmp(&b.time).unwrap());
self.dirty = false;
}
fn iter(&mut self) -> Iter<'_, AudioParamEvent> {
self.inner.iter()
}
fn iter_mut(&mut self) -> IterMut<'_, AudioParamEvent> {
self.inner.iter_mut()
}
}
pub struct AudioParam {
registration: AudioContextRegistration,
is_a_rate: Arc<AtomicBool>,
automation_rate_constrained: bool,
default_value: f32, min_value: f32, max_value: f32, current_value: Arc<AtomicF32>,
sender: Sender<AudioParamEvent>,
}
#[derive(Clone)]
pub(crate) struct AudioParamRaw {
is_a_rate: Arc<AtomicBool>,
automation_rate_constrained: bool,
default_value: f32,
min_value: f32,
max_value: f32,
current_value: Arc<AtomicF32>,
sender: Sender<AudioParamEvent>,
}
lazy_static! {
static ref AUDIO_PARAM_CHANNEL_CONFIG: ChannelConfig = ChannelConfigOptions {
count: 1,
count_mode: ChannelCountMode::Explicit,
interpretation: ChannelInterpretation::Discrete,
}
.into();
}
impl AudioNode for AudioParam {
fn registration(&self) -> &AudioContextRegistration {
&self.registration
}
fn channel_config(&self) -> &ChannelConfig {
&AUDIO_PARAM_CHANNEL_CONFIG
}
fn number_of_inputs(&self) -> usize {
1
}
fn number_of_outputs(&self) -> usize {
1
}
fn set_channel_count(&self, _v: usize) {
panic!("AudioParam has channel count constraints");
}
fn set_channel_count_mode(&self, _v: ChannelCountMode) {
panic!("AudioParam has channel count mode constraints");
}
fn set_channel_interpretation(&self, _v: ChannelInterpretation) {
panic!("AudioParam has channel interpretation constraints");
}
}
impl AudioParam {
pub fn automation_rate(&self) -> AutomationRate {
if self.is_a_rate.load(Ordering::SeqCst) {
AutomationRate::A
} else {
AutomationRate::K
}
}
pub fn set_automation_rate(&self, value: AutomationRate) {
if self.automation_rate_constrained && value != self.automation_rate() {
panic!("InvalidStateError: automation rate cannot be changed for this param");
}
let is_a_rate = value == AutomationRate::A;
self.is_a_rate.store(is_a_rate, Ordering::SeqCst);
}
pub(crate) fn set_automation_rate_constrained(&mut self, value: bool) {
self.automation_rate_constrained = value;
}
pub fn default_value(&self) -> f32 {
self.default_value
}
pub fn min_value(&self) -> f32 {
self.min_value
}
pub fn max_value(&self) -> f32 {
self.max_value
}
pub fn value(&self) -> f32 {
self.current_value.load(Ordering::SeqCst)
}
pub fn set_value(&self, value: f32) -> &Self {
let clamped = value.clamp(self.min_value, self.max_value);
self.current_value.store(clamped, Ordering::SeqCst);
let event = AudioParamEvent {
event_type: AudioParamEventType::SetValue,
value,
time: 0.,
time_constant: None,
cancel_time: None,
duration: None,
values: None,
};
self.send_event(event);
self
}
pub fn set_value_at_time(&self, value: f32, start_time: f64) -> &Self {
assert_non_negative(start_time);
let event = AudioParamEvent {
event_type: AudioParamEventType::SetValueAtTime,
value,
time: start_time,
time_constant: None,
cancel_time: None,
duration: None,
values: None,
};
self.send_event(event);
self
}
pub fn linear_ramp_to_value_at_time(&self, value: f32, end_time: f64) -> &Self {
assert_non_negative(end_time);
let event = AudioParamEvent {
event_type: AudioParamEventType::LinearRampToValueAtTime,
value,
time: end_time,
time_constant: None,
cancel_time: None,
duration: None,
values: None,
};
self.send_event(event);
self
}
pub fn exponential_ramp_to_value_at_time(&self, value: f32, end_time: f64) -> &Self {
assert_not_zero(value);
assert_non_negative(end_time);
let event = AudioParamEvent {
event_type: AudioParamEventType::ExponentialRampToValueAtTime,
value,
time: end_time,
time_constant: None,
cancel_time: None,
duration: None,
values: None,
};
self.send_event(event);
self
}
pub fn set_target_at_time(&self, value: f32, start_time: f64, time_constant: f64) -> &Self {
assert_non_negative(start_time);
assert_non_negative(time_constant);
let event = if time_constant == 0. {
AudioParamEvent {
event_type: AudioParamEventType::SetValueAtTime,
value,
time: start_time,
time_constant: None,
cancel_time: None,
duration: None,
values: None,
}
} else {
AudioParamEvent {
event_type: AudioParamEventType::SetTargetAtTime,
value,
time: start_time,
time_constant: Some(time_constant),
cancel_time: None,
duration: None,
values: None,
}
};
self.send_event(event);
self
}
pub fn cancel_scheduled_values(&self, cancel_time: f64) -> &Self {
assert_non_negative(cancel_time);
let event = AudioParamEvent {
event_type: AudioParamEventType::CancelScheduledValues,
value: 0., time: cancel_time,
time_constant: None,
cancel_time: None,
duration: None,
values: None,
};
self.send_event(event);
self
}
pub fn cancel_and_hold_at_time(&self, cancel_time: f64) -> &Self {
assert_non_negative(cancel_time);
let event = AudioParamEvent {
event_type: AudioParamEventType::CancelAndHoldAtTime,
value: 0., time: cancel_time,
time_constant: None,
cancel_time: None,
duration: None,
values: None,
};
self.send_event(event);
self
}
pub fn set_value_curve_at_time(&self, values: &[f32], start_time: f64, duration: f64) -> &Self {
assert_sequence_length(values);
assert_non_negative(start_time);
assert_strictly_positive(duration);
let copy = values.to_vec();
let boxed_copy = copy.into_boxed_slice();
let event = AudioParamEvent {
event_type: AudioParamEventType::SetValueCurveAtTime,
value: 0., time: start_time,
time_constant: None,
cancel_time: None,
duration: Some(duration),
values: Some(boxed_copy),
};
self.send_event(event);
self
}
pub(crate) fn into_raw_parts(self) -> AudioParamRaw {
AudioParamRaw {
is_a_rate: self.is_a_rate,
automation_rate_constrained: self.automation_rate_constrained,
default_value: self.default_value,
min_value: self.min_value,
max_value: self.max_value,
current_value: self.current_value,
sender: self.sender,
}
}
pub(crate) fn from_raw_parts(
registration: AudioContextRegistration,
parts: AudioParamRaw,
) -> Self {
Self {
registration,
is_a_rate: parts.is_a_rate,
automation_rate_constrained: parts.automation_rate_constrained,
default_value: parts.default_value,
min_value: parts.min_value,
max_value: parts.max_value,
current_value: parts.current_value,
sender: parts.sender,
}
}
fn send_event(&self, event: AudioParamEvent) {
if cfg!(test) {
self.sender.send(event).unwrap();
} else {
self.context().pass_audio_param_event(&self.sender, event);
}
}
}
#[derive(Debug)]
pub(crate) struct AudioParamProcessor {
intrisic_value: f32,
current_value: Arc<AtomicF32>,
receiver: Receiver<AudioParamEvent>,
is_a_rate: Arc<AtomicBool>,
default_value: f32,
min_value: f32,
max_value: f32,
event_timeline: AudioParamEventTimeline,
last_event: Option<AudioParamEvent>,
buffer: Vec<f32>,
}
impl AudioProcessor for AudioParamProcessor {
fn process(
&mut self,
inputs: &[AudioRenderQuantum],
outputs: &mut [AudioRenderQuantum],
_params: AudioParamValues,
scope: &RenderScope,
) -> bool {
let period = 1. / scope.sample_rate as f64;
let input = &inputs[0]; let output = &mut outputs[0];
self.compute_intrisic_values(scope.current_time, period, RENDER_QUANTUM_SIZE);
self.mix_to_output(input, output);
true }
}
impl AudioParamProcessor {
fn compute_intrisic_values(&mut self, block_time: f64, dt: f64, count: usize) -> &[f32] {
if !self.receiver.is_empty() {
self.handle_incoming_events();
}
self.compute_buffer(block_time, dt, count);
self.buffer.as_slice()
}
fn mix_to_output(&mut self, input: &AudioRenderQuantum, output: &mut AudioRenderQuantum) {
#[cfg(test)]
assert!(self.buffer.len() == 1 || self.buffer.len() == RENDER_QUANTUM_SIZE);
if self.buffer.len() == 1 && input.is_silent() {
let mut value = self.buffer[0];
if value.is_nan() {
value = self.default_value;
}
output.set_single_valued(true);
let output_channel = output.channel_data_mut(0);
output_channel[0] = value.clamp(self.min_value, self.max_value);
} else {
*output = input.clone();
output.set_single_valued(false);
output
.channel_data_mut(0)
.iter_mut()
.zip(self.buffer.iter().cycle())
.for_each(|(o, p)| {
*o += p;
if o.is_nan() {
*o = self.default_value;
}
*o = o.clamp(self.min_value, self.max_value)
});
}
}
#[inline(always)]
fn compute_linear_ramp_sample(
&self,
start_time: f64,
duration: f64,
start_value: f32,
diff: f32, time: f64,
) -> f32 {
let phase = (time - start_time) / duration;
diff.mul_add(phase as f32, start_value)
}
#[inline(always)]
fn compute_exponential_ramp_sample(
&self,
start_time: f64,
duration: f64,
start_value: f32,
ratio: f32, time: f64,
) -> f32 {
let phase = (time - start_time) / duration;
start_value * ratio.powf(phase as f32)
}
#[inline(always)]
fn compute_set_target_sample(
&self,
start_time: f64,
time_constant: f64,
end_value: f32,
diff: f32, time: f64,
) -> f32 {
let exponent = -1. * ((time - start_time) / time_constant);
diff.mul_add(exponent.exp() as f32, end_value)
}
#[inline(always)]
fn compute_set_value_curve_sample(
&self,
start_time: f64,
duration: f64,
values: &[f32],
time: f64,
) -> f32 {
if time - start_time >= duration {
values[values.len() - 1]
} else {
let position = (values.len() - 1) as f64 * (time - start_time) / duration;
let k = position as usize;
let phase = (position - position.floor()) as f32;
(values[k + 1] - values[k]).mul_add(phase, values[k])
}
}
fn handle_incoming_events(&mut self) {
for event in self.receiver.try_iter() {
if event.event_type == AudioParamEventType::CancelScheduledValues {
let some_current_event = self.event_timeline.unsorted_peek();
match some_current_event {
None => (),
Some(current_event) => {
match current_event.event_type {
AudioParamEventType::LinearRampToValueAtTime
| AudioParamEventType::ExponentialRampToValueAtTime => {
if current_event.time >= event.time {
let last_event = self.last_event.as_ref().unwrap();
self.intrisic_value = last_event.value;
}
}
_ => (),
}
}
}
self.event_timeline
.retain(|queued| queued.time < event.time);
continue; }
if event.event_type == AudioParamEventType::CancelAndHoldAtTime {
let mut e1: Option<&mut AudioParamEvent> = None;
let mut e2: Option<&mut AudioParamEvent> = None;
let mut t1 = f64::MIN;
let mut t2 = f64::MAX;
self.event_timeline.sort();
for queued in self.event_timeline.iter_mut() {
if queued.time >= t1 && queued.time <= event.time {
t1 = queued.time;
e1 = Some(queued);
} else if queued.time < t2 && queued.time > event.time {
t2 = queued.time;
e2 = Some(queued);
}
}
if let Some(matched) = e2 {
if matched.event_type == AudioParamEventType::LinearRampToValueAtTime
|| matched.event_type == AudioParamEventType::ExponentialRampToValueAtTime
{
matched.cancel_time = Some(event.time);
}
} else if let Some(matched) = e1 {
if matched.event_type == AudioParamEventType::SetTargetAtTime {
matched.cancel_time = Some(event.time);
} else if matched.event_type == AudioParamEventType::SetValueCurveAtTime {
let start_time = matched.time;
let duration = matched.duration.unwrap();
if event.time <= start_time + duration {
matched.cancel_time = Some(event.time);
}
}
}
self.event_timeline.retain(|queued| {
let mut time = queued.time;
if let Some(cancel_time) = queued.cancel_time {
time = cancel_time;
}
time <= event.time
});
continue; }
if event.event_type == AudioParamEventType::SetValueCurveAtTime {
let start_time = event.time;
let end_time = start_time + event.duration.unwrap();
for queued in self.event_timeline.iter() {
if queued.time > start_time && queued.time < end_time {
panic!(
"NotSupportedError: scheduling SetValueCurveAtTime ({:?}) at
time of another automation event ({:?})",
event, queued,
);
}
}
}
if event.event_type == AudioParamEventType::SetValueAtTime
|| event.event_type == AudioParamEventType::SetValue
|| event.event_type == AudioParamEventType::LinearRampToValueAtTime
|| event.event_type == AudioParamEventType::ExponentialRampToValueAtTime
|| event.event_type == AudioParamEventType::SetTargetAtTime
{
for queued in self.event_timeline.iter() {
if queued.event_type == AudioParamEventType::SetValueCurveAtTime {
let start_time = queued.time;
let end_time = start_time + queued.duration.unwrap();
if event.time > start_time && event.time < end_time {
panic!(
"NotSupportedError: scheduling automation event ({:?})
during SetValueCurveAtTime ({:?})",
event, queued,
);
}
}
}
}
if event.event_type == AudioParamEventType::SetValue {
self.intrisic_value = event.value;
}
if self.event_timeline.is_empty()
&& self.last_event.is_none()
&& (event.event_type == AudioParamEventType::LinearRampToValueAtTime
|| event.event_type == AudioParamEventType::ExponentialRampToValueAtTime)
{
let set_value_event = AudioParamEvent {
event_type: AudioParamEventType::SetValue,
value: self.intrisic_value,
time: 0.,
time_constant: None,
cancel_time: None,
duration: None,
values: None,
};
self.event_timeline.push(set_value_event);
}
if self.event_timeline.is_empty()
&& event.event_type == AudioParamEventType::SetTargetAtTime
{
let set_value_event = AudioParamEvent {
event_type: AudioParamEventType::SetValue,
value: self.intrisic_value,
time: 0.,
time_constant: None,
cancel_time: None,
duration: None,
values: None,
};
self.event_timeline.push(set_value_event);
}
self.event_timeline.push(event);
}
self.event_timeline.sort();
}
fn compute_buffer(&mut self, block_time: f64, dt: f64, count: usize) {
let clamped = self.intrisic_value.clamp(self.min_value, self.max_value);
self.current_value.store(clamped, Ordering::SeqCst);
self.buffer.clear();
let is_a_rate = self.is_a_rate.load(Ordering::SeqCst);
let is_k_rate = !is_a_rate;
let next_block_time = dt.mul_add(count as f64, block_time);
let is_constant_block = match self.event_timeline.peek() {
None => true,
Some(event) => {
if event.event_type != AudioParamEventType::LinearRampToValueAtTime
&& event.event_type != AudioParamEventType::ExponentialRampToValueAtTime
{
event.time >= next_block_time
} else {
false
}
}
};
if is_k_rate || is_constant_block {
self.buffer.push(self.intrisic_value);
if is_constant_block {
return;
}
}
loop {
let some_event = self.event_timeline.peek();
match some_event {
None => {
if is_a_rate {
self.buffer.resize(count, self.intrisic_value);
}
break;
}
Some(event) => {
match event.event_type {
AudioParamEventType::SetValue | AudioParamEventType::SetValueAtTime => {
let value = event.value;
let mut time = event.time;
if time == 0. {
time = block_time;
}
if is_a_rate {
let end_index = ((time - block_time).max(0.) / dt) as usize;
let end_index_clipped = end_index.min(count);
for _ in self.buffer.len()..end_index_clipped {
self.buffer.push(self.intrisic_value);
}
}
if time > next_block_time {
break;
} else {
self.intrisic_value = value;
#[allow(clippy::float_cmp)]
if time != event.time {
let mut event = self.event_timeline.pop().unwrap();
event.time = time;
self.last_event = Some(event);
} else {
self.last_event = self.event_timeline.pop();
}
}
}
AudioParamEventType::LinearRampToValueAtTime => {
let last_event = self.last_event.as_ref().unwrap();
let start_time = last_event.time;
let mut end_time = event.time;
let duration = end_time - start_time;
if let Some(cancel_time) = event.cancel_time {
end_time = cancel_time;
}
let start_value = last_event.value;
let end_value = event.value;
let diff = end_value - start_value;
if is_a_rate {
let start_index = self.buffer.len();
let end_index =
((end_time - block_time).max(0.) / dt).ceil() as usize;
let end_index_clipped = end_index.min(count);
if end_index_clipped > start_index {
let mut time = (start_index as f64).mul_add(dt, block_time);
for _ in start_index..end_index_clipped {
let value = self.compute_linear_ramp_sample(
start_time,
duration,
start_value,
diff,
time,
);
self.buffer.push(value);
time += dt;
self.intrisic_value = value;
}
}
}
if end_time >= next_block_time {
let value = self.compute_linear_ramp_sample(
start_time,
duration,
start_value,
diff,
next_block_time,
);
self.intrisic_value = value;
break;
} else if event.cancel_time.is_some() {
let value = self.compute_linear_ramp_sample(
start_time,
duration,
start_value,
diff,
end_time,
);
self.intrisic_value = value;
let mut last_event = self.event_timeline.pop().unwrap();
last_event.time = end_time;
last_event.value = value;
self.last_event = Some(last_event);
} else {
self.intrisic_value = end_value;
self.last_event = self.event_timeline.pop();
}
}
AudioParamEventType::ExponentialRampToValueAtTime => {
let last_event = self.last_event.as_ref().unwrap();
let start_time = last_event.time;
let mut end_time = event.time;
let duration = end_time - start_time;
if let Some(cancel_time) = event.cancel_time {
end_time = cancel_time;
}
let start_value = last_event.value;
let end_value = event.value;
let ratio = end_value / start_value;
if start_value == 0. || start_value * end_value < 0. {
let event = AudioParamEvent {
event_type: AudioParamEventType::SetValueAtTime,
time: end_time,
value: end_value,
time_constant: None,
cancel_time: None,
duration: None,
values: None,
};
self.event_timeline.replace_peek(event);
} else {
if is_a_rate {
let start_index = self.buffer.len();
let end_index =
((end_time - block_time).max(0.) / dt).ceil() as usize;
let end_index_clipped = end_index.min(count);
if end_index_clipped > start_index {
let mut time = (start_index as f64).mul_add(dt, block_time);
for _ in start_index..end_index_clipped {
let value = self.compute_exponential_ramp_sample(
start_time,
duration,
start_value,
ratio,
time,
);
self.buffer.push(value);
self.intrisic_value = value;
time += dt;
}
}
}
if end_time >= next_block_time {
let value = self.compute_exponential_ramp_sample(
start_time,
duration,
start_value,
ratio,
next_block_time,
);
self.intrisic_value = value;
break;
} else if event.cancel_time.is_some() {
let value = self.compute_exponential_ramp_sample(
start_time,
duration,
start_value,
ratio,
end_time,
);
self.intrisic_value = value;
let mut last_event = self.event_timeline.pop().unwrap();
last_event.time = end_time;
last_event.value = value;
self.last_event = Some(last_event);
} else {
self.intrisic_value = end_value;
self.last_event = self.event_timeline.pop();
}
}
}
AudioParamEventType::SetTargetAtTime => {
let mut end_time = next_block_time;
let mut ended = false;
let some_next_event = self.event_timeline.next();
if let Some(next_event) = some_next_event {
match next_event.event_type {
AudioParamEventType::LinearRampToValueAtTime
| AudioParamEventType::ExponentialRampToValueAtTime => {
end_time = block_time;
ended = true;
}
_ => {
if next_event.time < next_block_time {
end_time = next_event.time;
ended = true;
}
}
}
}
if let Some(cancel_time) = event.cancel_time {
if cancel_time < next_block_time {
end_time = cancel_time;
ended = true;
}
}
let start_time = event.time;
let start_value = self.last_event.as_ref().unwrap().value;
let end_value = event.value;
let diff = start_value - end_value;
let time_constant = event.time_constant.unwrap();
if is_a_rate {
let start_index = self.buffer.len();
let end_index =
((end_time - block_time).max(0.) / dt).ceil() as usize;
let end_index_clipped = end_index.min(count);
if end_index_clipped > start_index {
let mut time = (start_index as f64).mul_add(dt, block_time);
for _ in start_index..end_index_clipped {
let value = if time - start_time < 0. {
self.intrisic_value
} else {
self.compute_set_target_sample(
start_time,
time_constant,
end_value,
diff,
time,
)
};
self.buffer.push(value);
self.intrisic_value = value;
time += dt;
}
}
}
if !ended {
let value = self.compute_set_target_sample(
start_time,
time_constant,
end_value,
diff,
next_block_time,
);
let diff = (end_value - value).abs();
if diff < SNAP_TO_TARGET {
self.intrisic_value = end_value;
if end_value == 0. {
for v in self.buffer.iter_mut() {
if v.is_subnormal() {
*v = 0.;
}
}
}
let event = AudioParamEvent {
event_type: AudioParamEventType::SetValueAtTime,
time: next_block_time,
value: end_value,
time_constant: None,
cancel_time: None,
duration: None,
values: None,
};
self.event_timeline.replace_peek(event);
} else {
self.intrisic_value = value;
}
break;
} else {
let value = self.compute_set_target_sample(
start_time,
time_constant,
end_value,
diff,
end_time,
);
self.intrisic_value = value;
let mut event = self.event_timeline.pop().unwrap();
event.time = end_time;
event.value = value;
self.last_event = Some(event);
}
}
AudioParamEventType::SetValueCurveAtTime => {
let start_time = event.time;
let duration = event.duration.unwrap();
let values = event.values.as_ref().unwrap();
let mut end_time = start_time + duration;
if let Some(cancel_time) = event.cancel_time {
end_time = cancel_time;
}
if is_a_rate {
let start_index = self.buffer.len();
let end_index =
((end_time - block_time).max(0.) / dt).ceil() as usize;
let end_index_clipped = end_index.min(count);
if end_index_clipped > start_index {
let mut time = (start_index as f64).mul_add(dt, block_time);
for _ in start_index..end_index_clipped {
let value = if time - start_time < 0. {
self.intrisic_value
} else {
self.compute_set_value_curve_sample(
start_time, duration, values, time,
)
};
self.buffer.push(value);
self.intrisic_value = value;
time += dt;
}
}
}
if end_time >= next_block_time {
let value = self.compute_set_value_curve_sample(
start_time,
duration,
values,
next_block_time,
);
self.intrisic_value = value;
break;
} else {
if event.cancel_time.is_some() {
let value = self.compute_set_value_curve_sample(
start_time, duration, values, end_time,
);
self.intrisic_value = value;
let mut last_event = self.event_timeline.pop().unwrap();
last_event.time = end_time;
last_event.value = value;
self.last_event = Some(last_event);
} else {
let value = values[values.len() - 1];
let mut last_event = self.event_timeline.pop().unwrap();
last_event.time = end_time;
last_event.value = value;
self.intrisic_value = value;
self.last_event = Some(last_event);
}
}
}
_ => panic!(
"AudioParamEvent {:?} should not appear in AudioParamEventTimeline",
event.event_type
),
}
}
}
}
}
}
pub(crate) fn audio_param_pair(
opts: AudioParamDescriptor,
registration: AudioContextRegistration,
) -> (AudioParam, AudioParamProcessor) {
let (sender, receiver) = crossbeam_channel::unbounded();
let current_value = Arc::new(AtomicF32::new(opts.default_value));
let is_a_rate = Arc::new(AtomicBool::new(opts.automation_rate == AutomationRate::A));
let param = AudioParam {
registration,
is_a_rate: is_a_rate.clone(),
automation_rate_constrained: false,
default_value: opts.default_value,
min_value: opts.min_value,
max_value: opts.max_value,
current_value: current_value.clone(),
sender,
};
let render = AudioParamProcessor {
intrisic_value: opts.default_value,
current_value,
receiver,
is_a_rate,
default_value: opts.default_value,
min_value: opts.min_value,
max_value: opts.max_value,
event_timeline: AudioParamEventTimeline::new(),
last_event: None,
buffer: Vec::with_capacity(RENDER_QUANTUM_SIZE),
};
(param, render)
}
#[cfg(test)]
mod tests {
use float_eq::assert_float_eq;
use crate::context::{BaseAudioContext, OfflineAudioContext};
use crate::render::Alloc;
use super::*;
#[test]
#[should_panic]
fn test_assert_non_negative_fail() {
assert_non_negative(-1.);
}
#[test]
fn test_assert_non_negative() {
assert_non_negative(0.);
}
#[test]
#[should_panic]
fn test_assert_strictly_positive_fail() {
assert_strictly_positive(0.);
}
#[test]
fn test_assert_strictly_positive() {
assert_strictly_positive(0.1);
}
#[test]
#[should_panic]
fn test_assert_not_zero_fail() {
assert_not_zero(0.);
}
#[test]
fn test_assert_not_zero() {
assert_not_zero(-0.1);
assert_not_zero(0.1);
}
#[test]
#[should_panic]
fn test_assert_sequence_length_fail() {
assert_sequence_length(&[0.; 1]);
}
#[test]
fn test_assert_sequence_length() {
assert_sequence_length(&[0.; 2]);
}
#[test]
fn test_default_and_accessors() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: -10.,
max_value: 10.,
};
let (param, _render) = audio_param_pair(opts, context.mock_registration());
assert_eq!(param.automation_rate(), AutomationRate::A);
assert_float_eq!(param.default_value(), 0., abs_all <= 0.);
assert_float_eq!(param.min_value(), -10., abs_all <= 0.);
assert_float_eq!(param.max_value(), 10., abs_all <= 0.);
assert_float_eq!(param.value(), 0., abs_all <= 0.);
}
#[test]
fn test_set_value() {
{
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: -10.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_value(2.);
assert_float_eq!(param.value(), 2., abs_all <= 0.);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(param.value(), 2., abs_all <= 0.);
assert_float_eq!(vs, &[2.; 10][..], abs_all <= 0.);
}
{
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_value(2.);
assert_float_eq!(param.value(), 1., abs_all <= 0.);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(param.value(), 1., abs_all <= 0.);
assert_float_eq!(vs, &[2.; 10][..], abs_all <= 0.);
}
}
#[test]
fn test_steps_a_rate() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: -10.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_value_at_time(5., 2.0);
param.set_value_at_time(12., 8.0); param.set_value_at_time(8., 10.0);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 0., 5., 5., 5., 5., 5., 5., 12., 12.][..],
abs_all <= 0.
);
let vs = render.compute_intrisic_values(10., 1., 10);
assert_float_eq!(vs, &[8.; 1][..], abs_all <= 0.);
}
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: -10.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_value_at_time(5., 2.0);
param.set_value_at_time(8., 12.0);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 0., 5., 5., 5., 5., 5., 5., 5., 5.][..],
abs_all <= 0.
);
let vs = render.compute_intrisic_values(10., 1., 10);
assert_float_eq!(
vs,
&[5., 5., 8., 8., 8., 8., 8., 8., 8., 8.][..],
abs_all <= 0.
);
}
}
#[test]
fn test_steps_k_rate() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::K,
default_value: 0.,
min_value: -10.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_value_at_time(5., 2.0);
param.set_value_at_time(12., 8.0); param.set_value_at_time(8., 10.0); param.set_value_at_time(3., 14.0);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(vs, &[0.; 1][..], abs_all <= 0.);
let vs = render.compute_intrisic_values(10., 1., 10);
assert_float_eq!(vs, &[8.; 1][..], abs_all <= 0.);
let vs = render.compute_intrisic_values(20., 1., 10);
assert_float_eq!(vs, &[3.; 1][..], abs_all <= 0.);
}
#[test]
fn test_linear_ramp_arate() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: -10.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_value_at_time(5., 2.0);
param.linear_ramp_to_value_at_time(8.0, 5.0);
param.linear_ramp_to_value_at_time(0., 13.0);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 0., 5., 6., 7., 8., 7., 6., 5., 4.][..],
abs_all <= 0.
);
}
#[test]
fn test_linear_ramp_arate_end_of_block() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: -10.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_value_at_time(0., 0.);
param.linear_ramp_to_value_at_time(9.0, 9.0);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.][..],
abs_all <= 0.
);
}
#[test]
fn test_linear_ramp_arate_implicit_set_value() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: -10.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(vs, &[0.; 1][..], abs_all <= 0.);
param.linear_ramp_to_value_at_time(10.0, 20.0);
let vs = render.compute_intrisic_values(10., 1., 10);
assert_float_eq!(
vs,
&[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.][..],
abs_all <= 0.
);
let vs = render.compute_intrisic_values(20., 1., 10);
assert_float_eq!(vs, &[10.; 10][..], abs_all <= 0.);
}
#[test]
fn test_linear_ramp_arate_multiple_blocks() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: -20.,
max_value: 20.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.linear_ramp_to_value_at_time(20.0, 20.0);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.][..],
abs_all <= 0.
);
assert_float_eq!(param.value(), 0., abs <= 0.);
let vs = render.compute_intrisic_values(10., 1., 10);
assert_float_eq!(
vs,
&[10., 11., 12., 13., 14., 15., 16., 17., 18., 19.][..],
abs_all <= 0.
);
assert_float_eq!(param.value(), 10., abs <= 0.);
let vs = render.compute_intrisic_values(20., 1., 10);
assert_float_eq!(vs, &[20.0; 10][..], abs_all <= 0.);
assert_float_eq!(param.value(), 20., abs <= 0.);
}
#[test]
fn test_linear_ramp_krate_multiple_blocks() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::K,
default_value: 0.,
min_value: -20.,
max_value: 20.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.linear_ramp_to_value_at_time(20.0, 20.0);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(vs, &[0.; 1][..], abs_all <= 0.);
assert_float_eq!(param.value(), 0., abs <= 0.);
let vs = render.compute_intrisic_values(10., 1., 10);
assert_float_eq!(vs, &[10.; 1][..], abs_all <= 0.);
assert_float_eq!(param.value(), 10., abs <= 0.);
let vs = render.compute_intrisic_values(20., 1., 10);
assert_float_eq!(vs, &[20.0; 1][..], abs_all <= 0.);
assert_float_eq!(param.value(), 20., abs <= 0.);
}
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::K,
default_value: 0.,
min_value: -20.,
max_value: 20.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.linear_ramp_to_value_at_time(15.0, 15.0);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(vs, &[0.; 1][..], abs_all <= 0.);
assert_float_eq!(param.value(), 0., abs <= 0.);
let vs = render.compute_intrisic_values(10., 1., 10);
assert_float_eq!(vs, &[10.; 1][..], abs_all <= 0.);
assert_float_eq!(param.value(), 10., abs <= 0.);
let vs = render.compute_intrisic_values(20., 1., 10);
assert_float_eq!(vs, &[15.0; 1][..], abs_all <= 0.);
assert_float_eq!(param.value(), 15., abs <= 0.);
}
}
#[test]
fn test_linear_ramp_start_time() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: -10.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_value_at_time(1., 0.);
param.linear_ramp_to_value_at_time(-1., 10.);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(
vs,
&[1., 0.8, 0.6, 0.4, 0.2, 0., -0.2, -0.4, -0.6, -0.8][..],
abs_all <= 1e-7
);
let vs = render.compute_intrisic_values(10., 1., 10);
assert_float_eq!(vs, &[-1.; 10][..], abs_all <= 0.);
param.linear_ramp_to_value_at_time(1., 30.);
let vs = render.compute_intrisic_values(20., 1., 10);
assert_float_eq!(
vs,
&[0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9][..],
abs_all <= 1e-7
);
}
#[test]
fn test_exponential_ramp_a_rate() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_value_at_time(0.0001, 0.);
param.exponential_ramp_to_value_at_time(1.0, 10.);
let mut res = Vec::<f32>::with_capacity(10);
let start: f32 = 0.0001;
let end: f32 = 1.;
for t in 0..10 {
let value = start * (end / start).powf(t as f32 / 10.);
res.push(value);
}
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(vs, &res[..], abs_all <= 0.);
let vs = render.compute_intrisic_values(10., 1., 10);
assert_float_eq!(vs, &[1.0; 10][..], abs_all <= 0.);
}
#[test]
fn test_exponential_ramp_a_rate_multiple_blocks() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let start: f32 = 0.0001; let end: f32 = 1.;
param.set_value_at_time(start, 3.);
param.exponential_ramp_to_value_at_time(end, 13.);
let mut res = vec![0.; 3];
for t in 0..10 {
let value = start * (end / start).powf(t as f32 / 10.);
res.push(value);
}
res.append(&mut vec![1.; 7]);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(vs, &res[0..10], abs_all <= 0.);
assert_float_eq!(param.value(), res[0], abs <= 0.);
let vs = render.compute_intrisic_values(10., 1., 10);
assert_float_eq!(vs, &res[10..20], abs_all <= 0.);
assert_float_eq!(param.value(), res[10], abs <= 0.);
}
#[test]
fn test_exponential_ramp_a_rate_zero_and_opposite_target() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_value_at_time(0., 0.);
param.exponential_ramp_to_value_at_time(1.0, 5.);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 0., 0., 0., 0., 1., 1., 1., 1., 1.][..],
abs_all <= 0.
);
}
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: -1.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_value_at_time(-1., 0.);
param.exponential_ramp_to_value_at_time(1.0, 5.);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(
vs,
&[-1., -1., -1., -1., -1., 1., 1., 1., 1., 1.][..],
abs_all <= 0.
);
}
}
#[test]
#[should_panic]
fn test_exponential_ramp_to_zero() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 1.,
min_value: 0.,
max_value: 1.,
};
let (param, mut _render) = audio_param_pair(opts, context.mock_registration());
param.exponential_ramp_to_value_at_time(0.0, 10.);
}
#[test]
fn test_exponential_ramp_k_rate_multiple_blocks() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::K,
default_value: 0.,
min_value: 0.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let start: f32 = 0.0001; let end: f32 = 1.;
param.set_value_at_time(start, 3.);
param.exponential_ramp_to_value_at_time(end, 13.);
let mut res = vec![0.; 3];
for t in 0..10 {
let value = start * (end / start).powf(t as f32 / 10.);
res.push(value);
}
res.append(&mut vec![1.; 7]);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(vs, &[res[0]; 1][..], abs_all <= 0.);
let vs = render.compute_intrisic_values(10., 1., 10);
assert_float_eq!(vs, &[res[10]; 1][..], abs_all <= 0.);
let vs = render.compute_intrisic_values(20., 1., 10);
assert_float_eq!(vs, &[1.; 1][..], abs_all <= 0.);
}
#[test]
fn test_exponential_ramp_k_rate_zero_and_opposite_target() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::K,
default_value: 0.,
min_value: 0.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.exponential_ramp_to_value_at_time(1.0, 5.);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(vs, &[0.; 1][..], abs_all <= 0.);
let vs = render.compute_intrisic_values(10., 1., 10);
assert_float_eq!(vs, &[1.; 1][..], abs_all <= 0.);
}
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::K,
default_value: -1.,
min_value: -1.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.exponential_ramp_to_value_at_time(1.0, 5.);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(vs, &[-1.; 1][..], abs_all <= 0.);
let vs = render.compute_intrisic_values(10., 1., 10);
assert_float_eq!(vs, &[1.; 1][..], abs_all <= 0.);
}
}
#[test]
fn test_exponential_ramp_start_time() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: -10.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_value_at_time(0., 0.);
param.linear_ramp_to_value_at_time(1., 10.);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9][..],
abs_all <= 1e-7
);
let vs = render.compute_intrisic_values(10., 1., 10);
assert_float_eq!(vs, &[1.; 10][..], abs_all <= 0.);
param.exponential_ramp_to_value_at_time(0.0001, 30.);
let vs = render.compute_intrisic_values(20., 1., 10);
let start: f32 = 1.;
let end: f32 = 0.0001;
let mut res = [0.; 20];
for (t, v) in res.iter_mut().enumerate() {
*v = start * (end / start).powf(t as f32 / 20.);
}
assert_float_eq!(vs, &res[10..], abs_all <= 1e-7);
}
#[test]
fn test_set_target_at_time_a_rate() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let v0: f32 = 0.;
let v1: f32 = 1.;
let t0: f64 = 0.;
let time_constant: f64 = 1.;
param.set_value_at_time(v0, t0);
param.set_target_at_time(v1, t0, time_constant);
let vs = render.compute_intrisic_values(0., 1., 10);
let mut res = Vec::<f32>::with_capacity(10);
for t in 0..10 {
let val = v1 + (v0 - v1) * (-1. * ((t as f64 - t0) / time_constant)).exp() as f32;
res.push(val);
}
assert_float_eq!(vs, &res[..], abs_all <= 0.);
}
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let v0: f32 = 0.; let v1: f32 = 1.;
let t0: f64 = 0.;
let time_constant: f64 = 1.;
param.set_target_at_time(v1, t0, time_constant);
let vs = render.compute_intrisic_values(0., 1., 10);
let mut res = Vec::<f32>::with_capacity(10);
for t in 0..10 {
let val = v1 + (v0 - v1) * (-1. * ((t as f64 - t0) / time_constant)).exp() as f32;
res.push(val);
}
assert_float_eq!(vs, &res[..], abs_all <= 0.);
}
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 100.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let v0: f32 = 1.;
let v1: f32 = 42.;
let t0: f64 = 1.;
let time_constant: f64 = 2.1;
param.set_value_at_time(v0, t0);
param.set_target_at_time(v1, t0, time_constant);
let mut res = Vec::<f32>::with_capacity(10);
for t in 0..10 {
let val = v1 + (v0 - v1) * (-1. * ((t as f64 - t0) / time_constant)).exp() as f32;
res.push(val);
}
res[0] = 0.;
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(vs, &res[..], abs_all <= 0.);
}
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 100.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_target_at_time(1., 1., 0.);
let mut res = [1.; 10];
res[0] = 0.;
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(vs, &res[..], abs_all <= 0.);
}
}
#[test]
fn test_set_target_at_time_a_rate_multiple_blocks() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 2.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let v0: f32 = 0.;
let v1: f32 = 2.;
let t0: f64 = 0.;
let time_constant: f64 = 1.;
param.set_value_at_time(v0, t0);
param.set_target_at_time(v1, t0, time_constant);
let mut res = Vec::<f32>::with_capacity(20);
for t in 0..20 {
let val = v1 + (v0 - v1) * (-1. * ((t as f64 - t0) / time_constant)).exp() as f32;
res.push(val);
}
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(vs, &res[0..10], abs_all <= 0.);
let vs = render.compute_intrisic_values(10., 1., 10);
assert_float_eq!(vs, &res[10..20], abs_all <= 0.);
}
}
#[test]
fn test_set_target_at_time_a_rate_followed_by_set_value() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 2.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let v0: f32 = 0.;
let v1: f32 = 2.;
let t0: f64 = 0.;
let time_constant: f64 = 1.;
param.set_value_at_time(v0, t0);
param.set_target_at_time(v1, t0, time_constant);
param.set_value_at_time(0.5, 15.);
let mut res = Vec::<f32>::with_capacity(20);
for t in 0..15 {
let val = v1 + (v0 - v1) * (-1. * ((t as f64 - t0) / time_constant)).exp() as f32;
res.push(val);
}
res.resize(20, 0.5);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(vs, &res[0..10], abs_all <= 0.);
let vs = render.compute_intrisic_values(10., 1., 10);
assert_float_eq!(vs, &res[10..20], abs_all <= 0.);
}
}
#[test]
fn test_set_target_at_time_ends_at_threshold() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 2.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_value_at_time(1., 0.);
param.set_target_at_time(0., 1., 0.2);
let vs = render.compute_intrisic_values(0., 1., 128);
for v in vs.iter() {
assert!(!v.is_subnormal());
}
let peek = render.event_timeline.peek();
assert_eq!(
peek.unwrap().event_type,
AudioParamEventType::SetValueAtTime
);
let vs = render.compute_intrisic_values(10., 1., 128);
assert_float_eq!(vs[..], [0.; 128], abs_all <= 0.);
}
#[test]
fn test_set_target_at_time_waits_for_start_time() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 2.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_value_at_time(1., 0.);
param.set_target_at_time(0., 5., 1.);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(vs[0], 1., abs <= 0.);
assert_float_eq!(vs[1], 1., abs <= 0.);
assert_float_eq!(vs[2], 1., abs <= 0.);
assert_float_eq!(vs[3], 1., abs <= 0.);
assert_float_eq!(vs[4], 1., abs <= 0.);
assert_float_eq!(vs[5], 1., abs <= 0.);
}
#[test]
fn test_set_target_at_time_a_rate_followed_by_ramp() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let v0: f32 = 0.;
let v1: f32 = 2.;
let t0: f64 = 0.;
let time_constant: f64 = 10.;
param.set_value_at_time(v0, t0);
param.set_target_at_time(v1, t0, time_constant);
let mut res = Vec::<f32>::with_capacity(20);
for t in 0..11 {
let val = v1 + (v0 - v1) * (-1. * ((t as f64 - t0) / time_constant)).exp() as f32;
res.push(val);
}
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(vs, &res[0..10], abs_all <= 0.);
let v0 = res.pop().unwrap(); let v1 = 10.;
let t0 = 10.;
let t1 = 20.;
param.linear_ramp_to_value_at_time(v1, t1);
for t in 10..20 {
let time = t as f64;
let value = v0 + (v1 - v0) * (time - t0) as f32 / (t1 - t0) as f32;
res.push(value);
}
let vs = render.compute_intrisic_values(10., 1., 10);
assert_float_eq!(vs, &res[10..20], abs_all <= 1.0e-6);
let vs = render.compute_intrisic_values(20., 1., 10);
assert_float_eq!(vs, &[v1; 10][..], abs_all <= 0.);
}
}
#[test]
fn test_set_target_at_time_k_rate_multiple_blocks() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::K,
default_value: 0.,
min_value: 0.,
max_value: 2.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let v0: f32 = 0.;
let v1: f32 = 2.;
let t0: f64 = 0.;
let time_constant: f64 = 1.;
param.set_value_at_time(v0, t0);
param.set_target_at_time(v1, t0, time_constant);
let mut res = Vec::<f32>::with_capacity(20);
for t in 0..20 {
let val = v1 + (v0 - v1) * (-1. * ((t as f64 - t0) / time_constant)).exp() as f32;
res.push(val);
}
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(vs, &[res[0]; 1][..], abs_all <= 0.);
let vs = render.compute_intrisic_values(10., 1., 10);
assert_float_eq!(vs, &[res[10]; 1][..], abs_all <= 0.);
}
}
#[test]
fn test_set_target_at_time_snap_to_value() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let v0: f32 = 1.;
let v1: f32 = 0.;
let t0: f64 = 0.;
let time_constant: f64 = 1.;
param.set_value_at_time(v0, t0);
param.set_target_at_time(v1, t0, time_constant);
let mut res = [0.; 30];
res.iter_mut().enumerate().for_each(|(t, r)| {
*r = v1 + (v0 - v1) * (-1. * ((t as f64 - t0) / time_constant)).exp() as f32;
});
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(vs, &res[..10], abs_all <= 0.);
let vs = render.compute_intrisic_values(10., 1., 10);
assert_float_eq!(vs, &res[10..20], abs_all <= 0.);
let vs = render.compute_intrisic_values(20., 1., 10);
assert_float_eq!(vs, &res[20..30], abs_all <= 0.);
let vs = render.compute_intrisic_values(30., 1., 10);
assert_float_eq!(vs, &[0.; 10][..], abs_all <= 0.);
}
#[test]
fn test_cancel_scheduled_values() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
for t in 0..10 {
param.set_value_at_time(t as f32, t as f64);
}
param.cancel_scheduled_values(5.);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 1., 2., 3., 4., 4., 4., 4., 4., 4.][..],
abs_all <= 0.
);
}
#[test]
fn test_cancel_scheduled_values_ramp() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_value_at_time(0., 0.);
param.linear_ramp_to_value_at_time(10., 10.);
param.cancel_scheduled_values(10.);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(vs, &[0.; 10][..], abs_all <= 0.);
}
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 20.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_value_at_time(0., 0.);
param.linear_ramp_to_value_at_time(20., 20.);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.][..],
abs_all <= 0.
);
param.cancel_scheduled_values(10.);
let vs = render.compute_intrisic_values(10., 1., 10);
assert_float_eq!(vs, &[0.; 1][..], abs_all <= 0.);
}
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.linear_ramp_to_value_at_time(10., 10.);
param.cancel_scheduled_values(10.);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(vs, &[0.; 10][..], abs_all <= 0.);
}
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 20.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.linear_ramp_to_value_at_time(20., 20.);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.][..],
abs_all <= 0.
);
param.cancel_scheduled_values(10.);
let vs = render.compute_intrisic_values(10., 1., 10);
assert_float_eq!(vs, &[0.; 1][..], abs_all <= 0.);
}
}
#[test]
fn test_cancel_and_hold() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_value_at_time(1., 1.);
param.set_value_at_time(2., 2.);
param.set_value_at_time(3., 3.);
param.set_value_at_time(4., 4.);
param.cancel_and_hold_at_time(2.5);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 1., 2., 2., 2., 2., 2., 2., 2., 2.][0..10],
abs_all <= 0.
);
}
}
#[test]
fn test_cancel_and_hold_during_set_target() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 2.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let v0: f32 = 0.;
let v1: f32 = 2.;
let t0: f64 = 0.;
let time_constant: f64 = 1.;
param.set_value_at_time(v0, t0);
param.set_target_at_time(v1, t0, time_constant);
param.cancel_and_hold_at_time(15.);
let mut res = Vec::<f32>::with_capacity(20);
for t in 0..16 {
let val = v1 + (v0 - v1) * (-1. * ((t as f64 - t0) / time_constant)).exp() as f32;
res.push(val);
}
let hold_value = res.pop().unwrap();
res.resize(20, hold_value);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(vs, &res[0..10], abs_all <= 0.);
let vs = render.compute_intrisic_values(10., 1., 10);
assert_float_eq!(vs, &res[10..20], abs_all <= 0.);
}
}
#[test]
fn test_cancel_and_hold_during_linear_ramp() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.linear_ramp_to_value_at_time(10., 10.);
param.cancel_and_hold_at_time(5.);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 1., 2., 3., 4., 5., 5., 5., 5., 5.][0..10],
abs_all <= 0.
);
}
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.linear_ramp_to_value_at_time(10., 10.);
param.cancel_and_hold_at_time(4.5);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 1., 2., 3., 4., 4.5, 4.5, 4.5, 4.5, 4.5][0..10],
abs_all <= 0.
);
}
}
#[test]
fn test_cancel_and_hold_during_exponential_ramp() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_value_at_time(0.0001, 0.);
param.exponential_ramp_to_value_at_time(1.0, 10.);
param.cancel_and_hold_at_time(5.);
let mut res = Vec::<f32>::with_capacity(10);
let start: f32 = 0.0001;
let end: f32 = 1.;
for t in 0..6 {
let value = start * (end / start).powf(t as f32 / 10.);
res.push(value);
}
let hold_value = res.pop().unwrap();
res.resize(10, hold_value);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(vs, &res[..], abs_all <= 0.);
}
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_value_at_time(0.0001, 0.);
param.exponential_ramp_to_value_at_time(1.0, 10.);
param.cancel_and_hold_at_time(4.5);
let mut res = Vec::<f32>::with_capacity(10);
let start: f32 = 0.0001;
let end: f32 = 1.;
for t in 0..5 {
let value = start * (end / start).powf(t as f32 / 10.);
res.push(value);
}
let hold_value = start * (end / start).powf(4.5 / 10.);
res.resize(10, hold_value);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(vs, &res[..], abs_all <= 0.);
}
}
#[test]
fn test_cancel_and_hold_during_set_value_curve() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 2.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let curve = [0., 0.5, 1., 0.5, 0.];
param.set_value_curve_at_time(&curve[..], 0., 10.);
param.cancel_and_hold_at_time(5.);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 0.2, 0.4, 0.6, 0.8, 1., 1., 1., 1., 1.][..],
abs_all <= 1e-7
);
}
{
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 2.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let curve = [0., 0.5, 1., 0.5, 0.];
param.set_value_curve_at_time(&curve[..], 0., 10.);
param.cancel_and_hold_at_time(4.5);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 0.2, 0.4, 0.6, 0.8, 0.9, 0.9, 0.9, 0.9, 0.9][..],
abs_all <= 1e-7
);
}
}
#[test]
fn test_set_value_curve_at_time_a_rate() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let curve = [0., 0.5, 1., 0.5, 0.];
param.set_value_curve_at_time(&curve[..], 0., 10.);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 0.2, 0.4, 0.6, 0.8, 1., 0.8, 0.6, 0.4, 0.2][..],
abs_all <= 1e-7
);
let vs = render.compute_intrisic_values(10., 1., 10);
assert_float_eq!(vs, &[0.; 10][..], abs_all <= 0.);
}
#[test]
fn test_set_value_curve_at_time_a_rate_multiple_frames() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let curve = [0., 0.5, 1., 0.5, 0.];
param.set_value_curve_at_time(&curve[..], 0., 20.);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9][..],
abs_all <= 1e-7
);
let vs = render.compute_intrisic_values(10., 1., 10);
assert_float_eq!(
vs,
&[1., 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1][..],
abs_all <= 1e-7
);
let vs = render.compute_intrisic_values(20., 1., 10);
assert_float_eq!(vs, &[0.; 10][..], abs_all <= 0.);
}
#[test]
#[should_panic]
fn test_set_value_curve_at_time_insert_while_another_event() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 1.,
min_value: 0.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_value_at_time(0.0, 5.);
let curve = [0., 0.5, 1., 0.5, 0.];
param.set_value_curve_at_time(&curve[..], 0., 10.);
let _vs = render.compute_intrisic_values(0., 1., 10);
}
#[test]
#[should_panic]
fn test_set_value_curve_at_time_insert_another_event_inside() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 1.,
min_value: 0.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let curve = [0., 0.5, 1., 0.5, 0.];
param.set_value_curve_at_time(&curve[..], 0., 10.);
param.set_value_at_time(0.0, 5.);
let _vs = render.compute_intrisic_values(0., 1., 10);
}
#[test]
fn test_set_value_curve_waits_for_start_time() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let curve = [0., 0.5, 1., 0.5, 0.];
param.set_value_curve_at_time(&curve[..], 5., 10.);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 0., 0., 0., 0., 0., 0.2, 0.4, 0.6, 0.8][..],
abs_all <= 0.
);
}
#[test]
fn test_update_automation_rate_to_k() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: -10.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_value_at_time(2., 0.000001);
param.set_automation_rate(AutomationRate::K);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(vs, &[0.; 1][..], abs_all <= 0.);
}
#[test]
fn test_update_automation_rate_to_a() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::K,
default_value: 0.,
min_value: -10.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_value_at_time(2., 0.000001);
param.set_automation_rate(AutomationRate::A);
let vs = render.compute_intrisic_values(0., 1., 10);
assert_float_eq!(vs, &[2.; 10][..], abs_all <= 0.);
}
#[test]
fn test_varying_param_size() {
{
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_value_at_time(0., 0.);
param.linear_ramp_to_value_at_time(9., 9.);
let vs = render.compute_intrisic_values(0., 1., 10);
let expected = [0., 1., 2., 3., 4., 5., 6., 7., 8., 9.];
assert_float_eq!(vs, &expected[..], abs_all <= 0.);
let vs = render.compute_intrisic_values(10., 1., 10);
let expected = [9.; 1];
assert_float_eq!(vs, &expected[..], abs_all <= 0.);
param.set_value_at_time(1., 25.);
let vs = render.compute_intrisic_values(20., 1., 10);
let expected = [9., 9., 9., 9., 9., 1., 1., 1., 1., 1.];
assert_float_eq!(vs, &expected[..], abs_all <= 0.);
let vs = render.compute_intrisic_values(30., 1., 10);
let expected = [1.; 1];
assert_float_eq!(vs, &expected[..], abs_all <= 0.);
}
{
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_value_at_time(0., 0.);
param.linear_ramp_to_value_at_time(9., 9.);
param.set_value_at_time(1., 25.);
let vs = render.compute_intrisic_values(0., 1., 10);
let expected = [0., 1., 2., 3., 4., 5., 6., 7., 8., 9.];
assert_float_eq!(vs, &expected[..], abs_all <= 0.);
let vs = render.compute_intrisic_values(10., 1., 10);
let expected = [9.; 1];
assert_float_eq!(vs, &expected[..], abs_all <= 0.);
let vs = render.compute_intrisic_values(20., 1., 10);
let expected = [9., 9., 9., 9., 9., 1., 1., 1., 1., 1.];
assert_float_eq!(vs, &expected[..], abs_all <= 0.);
let vs = render.compute_intrisic_values(30., 1., 10);
let expected = [1.; 1];
assert_float_eq!(vs, &expected[..], abs_all <= 0.);
}
}
#[test]
fn test_varying_param_size_modulated() {
let alloc = Alloc::with_capacity(1);
{
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (_param, mut render) = audio_param_pair(opts, context.mock_registration());
let vs = render.compute_intrisic_values(0., 1., 128);
assert_float_eq!(vs, &[0.; 1][..], abs_all <= 0.);
let signal = alloc.silence();
let input = AudioRenderQuantum::from(signal);
let signal = alloc.silence();
let mut output = AudioRenderQuantum::from(signal);
render.mix_to_output(&input, &mut output);
assert!(output.single_valued());
assert_float_eq!(output.channel_data(0)[0], 0., abs <= 0.);
}
{
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (_param, mut render) = audio_param_pair(opts, context.mock_registration());
let vs = render.compute_intrisic_values(0., 1., 128);
assert_float_eq!(vs, &[0.; 1][..], abs_all <= 0.);
let signal = alloc.silence();
let mut input = AudioRenderQuantum::from(signal);
input.channel_data_mut(0)[0] = 1.;
let signal = alloc.silence();
let mut output = AudioRenderQuantum::from(signal);
render.mix_to_output(&input, &mut output);
let mut expected = [0.; 128];
expected[0] = 1.;
assert!(!output.single_valued());
assert_float_eq!(output.channel_data(0)[..], &expected[..], abs_all <= 0.);
}
}
#[test]
fn test_full_render_chain() {
let alloc = Alloc::with_capacity(1);
let context = OfflineAudioContext::new(1, 0, 48000.);
let min = 2.;
let max = 42.;
let default = 2.;
let opts = AudioParamDescriptor {
automation_rate: AutomationRate::A,
default_value: default,
min_value: min,
max_value: max,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
param.set_value(128.);
param.linear_ramp_to_value_at_time(0., 128.);
let intrisic_values = render.compute_intrisic_values(0., 1., 128);
let mut expected = [0.; 128];
for (i, v) in expected.iter_mut().enumerate() {
*v = 128. - i as f32;
}
assert_float_eq!(intrisic_values, &expected[..], abs_all <= 0.);
let signal = alloc.silence();
let mut input = AudioRenderQuantum::from(signal);
input.channel_data_mut(0)[0] = f32::NAN;
let signal = alloc.silence();
let mut output = AudioRenderQuantum::from(signal);
render.mix_to_output(&input, &mut output);
expected.iter_mut().for_each(|v| *v = v.clamp(min, max));
expected[0] = 2.;
assert_float_eq!(output.channel_data(0)[..], &expected[..], abs_all <= 0.);
}
}