use std::any::Any;
use std::slice::{Iter, IterMut};
use std::sync::atomic::Ordering;
use std::sync::{Arc, Mutex, OnceLock};
use arrayvec::ArrayVec;
use crate::context::AudioContextRegistration;
use crate::node::{
AudioNode, ChannelConfig, ChannelConfigOptions, ChannelCountMode, ChannelInterpretation,
};
use crate::render::{AudioParamValues, AudioProcessor, AudioRenderQuantum, RenderScope};
use crate::{assert_valid_time_value, AtomicF32, RENDER_QUANTUM_SIZE};
const SNAP_TO_TARGET: f32 = 1e-10;
#[track_caller]
fn assert_is_finite(value: f32) {
assert!(
value.is_finite(),
"TypeError - The provided value is non-finite."
);
}
#[track_caller]
fn assert_strictly_positive(value: f64) {
assert!(
value.is_finite(),
"TypeError - The provided value is non-finite."
);
assert!(
value > 0.,
"RangeError - duration ({:?}) should be strictly positive",
value
);
}
#[track_caller]
fn assert_not_zero(value: f32) {
assert_is_finite(value);
assert_ne!(
value, 0.,
"RangeError - value ({:?}) should not be equal to zero",
value
);
}
#[track_caller]
fn assert_sequence_length(values: &[f32]) {
assert!(
values.len() >= 2,
"InvalidStateError - sequence length ({:?}) should not be less than 2",
values.len()
);
}
#[inline(always)]
fn compute_linear_ramp_sample(
start_time: f64,
duration: f64,
start_value: f32,
diff: f32, time: f64,
) -> f32 {
let phase = (time - start_time) / duration;
diff.mul_add(phase as f32, start_value)
}
#[inline(always)]
fn compute_exponential_ramp_sample(
start_time: f64,
duration: f64,
start_value: f32,
ratio: f32, time: f64,
) -> f32 {
let phase = (time - start_time) / duration;
start_value * ratio.powf(phase as f32)
}
#[inline(always)]
fn compute_set_target_sample(
start_time: f64,
time_constant: f64,
end_value: f32,
diff: f32, time: f64,
) -> f32 {
let exponent = -1. * ((time - start_time) / time_constant);
diff.mul_add(exponent.exp() as f32, end_value)
}
#[inline(always)]
fn compute_set_value_curve_sample(
start_time: f64,
duration: f64,
values: &[f32],
time: f64,
) -> f32 {
if time - start_time >= duration {
return values[values.len() - 1];
}
let position = (values.len() - 1) as f64 * (time - start_time) / duration;
let k = position as usize;
let phase = (position - position.floor()) as f32;
(values[k + 1] - values[k]).mul_add(phase, values[k])
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum AutomationRate {
A,
K,
}
impl AutomationRate {
fn is_a_rate(self) -> bool {
match self {
AutomationRate::A => true,
AutomationRate::K => false,
}
}
}
#[derive(Clone, Debug)]
pub struct AudioParamDescriptor {
pub name: String,
pub automation_rate: AutomationRate,
pub default_value: f32,
pub min_value: f32,
pub max_value: f32,
}
#[derive(PartialEq, Eq, Debug, Copy, Clone)]
enum AudioParamEventType {
SetValue,
SetValueAtTime,
LinearRampToValueAtTime,
ExponentialRampToValueAtTime,
CancelScheduledValues,
SetTargetAtTime,
CancelAndHoldAtTime,
SetValueCurveAtTime,
}
#[derive(Debug)]
pub(crate) struct AudioParamEvent {
event_type: AudioParamEventType,
value: f32,
time: f64,
time_constant: Option<f64>, cancel_time: Option<f64>, duration: Option<f64>, values: Option<Box<[f32]>>, }
#[derive(Debug, Default)]
struct AudioParamEventTimeline {
inner: Vec<AudioParamEvent>,
dirty: bool,
}
impl AudioParamEventTimeline {
fn new() -> Self {
Self {
inner: Vec::with_capacity(32),
dirty: false,
}
}
fn push(&mut self, item: AudioParamEvent) {
self.dirty = true;
self.inner.push(item);
}
fn pop(&mut self) -> Option<AudioParamEvent> {
if !self.inner.is_empty() {
Some(self.inner.remove(0))
} else {
None
}
}
fn retain<F>(&mut self, func: F)
where
F: Fn(&AudioParamEvent) -> bool,
{
self.inner.retain(func);
}
fn replace_peek(&mut self, item: AudioParamEvent) {
self.inner[0] = item;
}
fn is_empty(&self) -> bool {
self.inner.is_empty()
}
fn unsorted_peek(&self) -> Option<&AudioParamEvent> {
self.inner.first()
}
fn peek(&self) -> Option<&AudioParamEvent> {
assert!(
!self.dirty,
"`AudioParamEventTimeline`: Invalid `.peek()` call, the queue is dirty"
);
self.inner.first()
}
fn next(&self) -> Option<&AudioParamEvent> {
assert!(
!self.dirty,
"`AudioParamEventTimeline`: Invalid `.next()` call, the queue is dirty"
);
self.inner.get(1)
}
fn sort(&mut self) {
self.inner
.sort_by(|a, b| a.time.partial_cmp(&b.time).unwrap());
self.dirty = false;
}
fn iter(&mut self) -> Iter<'_, AudioParamEvent> {
self.inner.iter()
}
fn iter_mut(&mut self) -> IterMut<'_, AudioParamEvent> {
self.inner.iter_mut()
}
}
#[derive(Clone)] pub struct AudioParam {
registration: Arc<AudioContextRegistration>,
raw_parts: AudioParamInner,
}
#[derive(Clone)]
pub(crate) struct AudioParamInner {
default_value: f32, min_value: f32, max_value: f32, automation_rate_constrained: bool, automation_rate: Arc<Mutex<AutomationRate>>, current_value: Arc<AtomicF32>, }
impl AudioNode for AudioParam {
fn registration(&self) -> &AudioContextRegistration {
&self.registration
}
fn channel_config(&self) -> &'static ChannelConfig {
static INSTANCE: OnceLock<ChannelConfig> = OnceLock::new();
INSTANCE.get_or_init(|| {
ChannelConfigOptions {
count: 1,
count_mode: ChannelCountMode::Explicit,
interpretation: ChannelInterpretation::Discrete,
}
.into()
})
}
fn number_of_inputs(&self) -> usize {
1
}
fn number_of_outputs(&self) -> usize {
1
}
fn set_channel_count(&self, _v: usize) {
panic!("NotSupportedError - AudioParam has channel count constraints");
}
fn set_channel_count_mode(&self, _v: ChannelCountMode) {
panic!("NotSupportedError - AudioParam has channel count mode constraints");
}
fn set_channel_interpretation(&self, _v: ChannelInterpretation) {
panic!("NotSupportedError - AudioParam has channel interpretation constraints");
}
}
impl AudioParam {
#[allow(clippy::missing_panics_doc)]
pub fn automation_rate(&self) -> AutomationRate {
*self.raw_parts.automation_rate.lock().unwrap()
}
pub fn set_automation_rate(&self, value: AutomationRate) {
assert!(
!self.raw_parts.automation_rate_constrained || value == self.automation_rate(),
"InvalidStateError - automation rate cannot be changed for this param"
);
let mut guard = self.raw_parts.automation_rate.lock().unwrap();
*guard = value;
self.registration().post_message(value);
drop(guard); }
pub(crate) fn set_automation_rate_constrained(&mut self, value: bool) {
self.raw_parts.automation_rate_constrained = value;
}
pub fn default_value(&self) -> f32 {
self.raw_parts.default_value
}
pub fn min_value(&self) -> f32 {
self.raw_parts.min_value
}
pub fn max_value(&self) -> f32 {
self.raw_parts.max_value
}
pub fn value(&self) -> f32 {
self.raw_parts.current_value.load(Ordering::Acquire)
}
pub fn set_value(&self, value: f32) -> &Self {
self.send_event(self.set_value_raw(value))
}
fn set_value_raw(&self, value: f32) -> AudioParamEvent {
assert_is_finite(value);
let clamped = value.clamp(self.raw_parts.min_value, self.raw_parts.max_value);
self.raw_parts
.current_value
.store(clamped, Ordering::Release);
AudioParamEvent {
event_type: AudioParamEventType::SetValue,
value,
time: 0.,
time_constant: None,
cancel_time: None,
duration: None,
values: None,
}
}
pub fn set_value_at_time(&self, value: f32, start_time: f64) -> &Self {
self.send_event(self.set_value_at_time_raw(value, start_time))
}
fn set_value_at_time_raw(&self, value: f32, start_time: f64) -> AudioParamEvent {
assert_is_finite(value);
assert_valid_time_value(start_time);
AudioParamEvent {
event_type: AudioParamEventType::SetValueAtTime,
value,
time: start_time,
time_constant: None,
cancel_time: None,
duration: None,
values: None,
}
}
pub fn linear_ramp_to_value_at_time(&self, value: f32, end_time: f64) -> &Self {
self.send_event(self.linear_ramp_to_value_at_time_raw(value, end_time))
}
fn linear_ramp_to_value_at_time_raw(&self, value: f32, end_time: f64) -> AudioParamEvent {
assert_is_finite(value);
assert_valid_time_value(end_time);
AudioParamEvent {
event_type: AudioParamEventType::LinearRampToValueAtTime,
value,
time: end_time,
time_constant: None,
cancel_time: None,
duration: None,
values: None,
}
}
pub fn exponential_ramp_to_value_at_time(&self, value: f32, end_time: f64) -> &Self {
self.send_event(self.exponential_ramp_to_value_at_time_raw(value, end_time))
}
fn exponential_ramp_to_value_at_time_raw(&self, value: f32, end_time: f64) -> AudioParamEvent {
assert_not_zero(value);
assert_valid_time_value(end_time);
AudioParamEvent {
event_type: AudioParamEventType::ExponentialRampToValueAtTime,
value,
time: end_time,
time_constant: None,
cancel_time: None,
duration: None,
values: None,
}
}
pub fn set_target_at_time(&self, value: f32, start_time: f64, time_constant: f64) -> &Self {
self.send_event(self.set_target_at_time_raw(value, start_time, time_constant))
}
fn set_target_at_time_raw(
&self,
value: f32,
start_time: f64,
time_constant: f64,
) -> AudioParamEvent {
assert_is_finite(value);
assert_valid_time_value(start_time);
assert_valid_time_value(time_constant);
if time_constant == 0. {
AudioParamEvent {
event_type: AudioParamEventType::SetValueAtTime,
value,
time: start_time,
time_constant: None,
cancel_time: None,
duration: None,
values: None,
}
} else {
AudioParamEvent {
event_type: AudioParamEventType::SetTargetAtTime,
value,
time: start_time,
time_constant: Some(time_constant),
cancel_time: None,
duration: None,
values: None,
}
}
}
pub fn cancel_scheduled_values(&self, cancel_time: f64) -> &Self {
self.send_event(self.cancel_scheduled_values_raw(cancel_time))
}
fn cancel_scheduled_values_raw(&self, cancel_time: f64) -> AudioParamEvent {
assert_valid_time_value(cancel_time);
AudioParamEvent {
event_type: AudioParamEventType::CancelScheduledValues,
value: 0., time: cancel_time,
time_constant: None,
cancel_time: None,
duration: None,
values: None,
}
}
pub fn cancel_and_hold_at_time(&self, cancel_time: f64) -> &Self {
self.send_event(self.cancel_and_hold_at_time_raw(cancel_time))
}
fn cancel_and_hold_at_time_raw(&self, cancel_time: f64) -> AudioParamEvent {
assert_valid_time_value(cancel_time);
AudioParamEvent {
event_type: AudioParamEventType::CancelAndHoldAtTime,
value: 0., time: cancel_time,
time_constant: None,
cancel_time: None,
duration: None,
values: None,
}
}
pub fn set_value_curve_at_time(&self, values: &[f32], start_time: f64, duration: f64) -> &Self {
self.send_event(self.set_value_curve_at_time_raw(values, start_time, duration))
}
fn set_value_curve_at_time_raw(
&self,
values: &[f32],
start_time: f64,
duration: f64,
) -> AudioParamEvent {
assert_sequence_length(values);
assert_valid_time_value(start_time);
assert_strictly_positive(duration);
let copy = values.to_vec();
let boxed_copy = copy.into_boxed_slice();
AudioParamEvent {
event_type: AudioParamEventType::SetValueCurveAtTime,
value: 0., time: start_time,
time_constant: None,
cancel_time: None,
duration: Some(duration),
values: Some(boxed_copy),
}
}
pub(crate) fn into_raw_parts(self) -> AudioParamInner {
let Self {
registration: _,
raw_parts,
} = self;
raw_parts
}
pub(crate) fn from_raw_parts(
registration: AudioContextRegistration,
raw_parts: AudioParamInner,
) -> Self {
Self {
registration: registration.into(),
raw_parts,
}
}
fn send_event(&self, event: AudioParamEvent) -> &Self {
self.registration().post_message(event);
self
}
}
struct BlockInfos {
block_time: f64,
dt: f64,
count: usize,
is_a_rate: bool,
next_block_time: f64,
}
#[derive(Debug)]
pub(crate) struct AudioParamProcessor {
default_value: f32, min_value: f32, max_value: f32, intrinsic_value: f32,
automation_rate: AutomationRate,
current_value: Arc<AtomicF32>,
event_timeline: AudioParamEventTimeline,
last_event: Option<AudioParamEvent>,
buffer: ArrayVec<f32, RENDER_QUANTUM_SIZE>,
}
impl AudioProcessor for AudioParamProcessor {
fn process(
&mut self,
inputs: &[AudioRenderQuantum],
outputs: &mut [AudioRenderQuantum],
_params: AudioParamValues<'_>,
scope: &RenderScope,
) -> bool {
let period = 1. / scope.sample_rate as f64;
let input = &inputs[0]; let output = &mut outputs[0];
self.compute_intrinsic_values(scope.current_time, period, RENDER_QUANTUM_SIZE);
self.mix_to_output(input, output);
true }
fn onmessage(&mut self, msg: &mut dyn Any) {
if let Some(automation_rate) = msg.downcast_ref::<AutomationRate>() {
self.automation_rate = *automation_rate;
return;
}
if let Some(event) = msg.downcast_mut::<AudioParamEvent>() {
let tombstone_event = AudioParamEvent {
event_type: AudioParamEventType::SetValue,
value: Default::default(),
time: Default::default(),
time_constant: None,
cancel_time: None,
duration: None,
values: None,
};
let event = std::mem::replace(event, tombstone_event);
self.handle_incoming_event(event);
return;
};
log::warn!("AudioParamProcessor: Dropping incoming message {msg:?}");
}
}
impl AudioParamProcessor {
fn compute_intrinsic_values(&mut self, block_time: f64, dt: f64, count: usize) -> &[f32] {
self.compute_buffer(block_time, dt, count);
self.buffer.as_slice()
}
fn mix_to_output(&mut self, input: &AudioRenderQuantum, output: &mut AudioRenderQuantum) {
#[cfg(test)]
assert!(self.buffer.len() == 1 || self.buffer.len() == RENDER_QUANTUM_SIZE);
if self.buffer.len() == 1 && input.is_silent() {
let mut value = self.buffer[0];
if value.is_nan() {
value = self.default_value;
}
output.set_single_valued(true);
let output_channel = output.channel_data_mut(0);
output_channel[0] = value.clamp(self.min_value, self.max_value);
} else {
*output = input.clone();
output.set_single_valued(false);
output
.channel_data_mut(0)
.iter_mut()
.zip(self.buffer.iter().cycle())
.for_each(|(o, p)| {
*o += p;
if o.is_nan() {
*o = self.default_value;
}
*o = o.clamp(self.min_value, self.max_value)
});
}
}
fn handle_incoming_event(&mut self, event: AudioParamEvent) {
if event.event_type == AudioParamEventType::CancelScheduledValues {
let some_current_event = self.event_timeline.unsorted_peek();
match some_current_event {
None => (),
Some(current_event) => {
match current_event.event_type {
AudioParamEventType::LinearRampToValueAtTime
| AudioParamEventType::ExponentialRampToValueAtTime => {
if current_event.time >= event.time {
let last_event = self.last_event.as_ref().unwrap();
self.intrinsic_value = last_event.value;
}
}
_ => (),
}
}
}
self.event_timeline
.retain(|queued| queued.time < event.time);
return; }
if event.event_type == AudioParamEventType::CancelAndHoldAtTime {
let mut e1: Option<&mut AudioParamEvent> = None;
let mut e2: Option<&mut AudioParamEvent> = None;
let mut t1 = f64::MIN;
let mut t2 = f64::MAX;
self.event_timeline.sort();
for queued in self.event_timeline.iter_mut() {
if queued.time >= t1 && queued.time <= event.time {
t1 = queued.time;
e1 = Some(queued);
} else if queued.time < t2 && queued.time > event.time {
t2 = queued.time;
e2 = Some(queued);
}
}
if let Some(matched) = e2 {
if matched.event_type == AudioParamEventType::LinearRampToValueAtTime
|| matched.event_type == AudioParamEventType::ExponentialRampToValueAtTime
{
matched.cancel_time = Some(event.time);
}
} else if let Some(matched) = e1 {
if matched.event_type == AudioParamEventType::SetTargetAtTime {
matched.cancel_time = Some(event.time);
} else if matched.event_type == AudioParamEventType::SetValueCurveAtTime {
let start_time = matched.time;
let duration = matched.duration.unwrap();
if event.time <= start_time + duration {
matched.cancel_time = Some(event.time);
}
}
}
self.event_timeline.retain(|queued| {
let mut time = queued.time;
if let Some(cancel_time) = queued.cancel_time {
time = cancel_time;
}
time <= event.time
});
return; }
if event.event_type == AudioParamEventType::SetValueCurveAtTime {
let start_time = event.time;
let end_time = start_time + event.duration.unwrap();
for queued in self.event_timeline.iter() {
assert!(
queued.time <= start_time || queued.time >= end_time,
"NotSupportedError - scheduling SetValueCurveAtTime ({:?}) at time of another automation event ({:?})",
event, queued,
);
}
}
if event.event_type == AudioParamEventType::SetValueAtTime
|| event.event_type == AudioParamEventType::SetValue
|| event.event_type == AudioParamEventType::LinearRampToValueAtTime
|| event.event_type == AudioParamEventType::ExponentialRampToValueAtTime
|| event.event_type == AudioParamEventType::SetTargetAtTime
{
for queued in self.event_timeline.iter() {
if queued.event_type == AudioParamEventType::SetValueCurveAtTime {
let start_time = queued.time;
let end_time = start_time + queued.duration.unwrap();
assert!(
event.time <= start_time || event.time >= end_time,
"NotSupportedError - scheduling automation event ({:?}) during SetValueCurveAtTime ({:?})",
event, queued,
);
}
}
}
if event.event_type == AudioParamEventType::SetValue {
self.intrinsic_value = event.value;
}
if self.event_timeline.is_empty()
&& self.last_event.is_none()
&& (event.event_type == AudioParamEventType::LinearRampToValueAtTime
|| event.event_type == AudioParamEventType::ExponentialRampToValueAtTime)
{
let set_value_event = AudioParamEvent {
event_type: AudioParamEventType::SetValue,
value: self.intrinsic_value,
time: 0.,
time_constant: None,
cancel_time: None,
duration: None,
values: None,
};
self.event_timeline.push(set_value_event);
}
if self.event_timeline.is_empty()
&& event.event_type == AudioParamEventType::SetTargetAtTime
{
let set_value_event = AudioParamEvent {
event_type: AudioParamEventType::SetValue,
value: self.intrinsic_value,
time: 0.,
time_constant: None,
cancel_time: None,
duration: None,
values: None,
};
self.event_timeline.push(set_value_event);
}
self.event_timeline.push(event);
self.event_timeline.sort();
}
fn compute_set_value_automation(&mut self, infos: &BlockInfos) -> bool {
let event = self.event_timeline.peek().unwrap();
let mut time = event.time;
if time == 0. {
time = infos.block_time;
}
if infos.is_a_rate {
let end_index = ((time - infos.block_time).max(0.) / infos.dt) as usize;
let end_index_clipped = end_index.min(infos.count);
for _ in self.buffer.len()..end_index_clipped {
self.buffer.push(self.intrinsic_value);
}
}
if time > infos.next_block_time {
return true;
}
self.intrinsic_value = event.value;
#[allow(clippy::float_cmp)]
if time != event.time {
let mut event = self.event_timeline.pop().unwrap();
event.time = time;
self.last_event = Some(event);
} else {
self.last_event = self.event_timeline.pop();
}
false
}
fn compute_linear_ramp_automation(&mut self, infos: &BlockInfos) -> bool {
let event = self.event_timeline.peek().unwrap();
let last_event = self.last_event.as_ref().unwrap();
let start_time = last_event.time;
let mut end_time = event.time;
let duration = end_time - start_time;
if let Some(cancel_time) = event.cancel_time {
end_time = cancel_time;
}
let start_value = last_event.value;
let end_value = event.value;
let diff = end_value - start_value;
if infos.is_a_rate {
let start_index = self.buffer.len();
let end_index = ((end_time - infos.block_time).max(0.) / infos.dt).ceil() as usize;
let end_index_clipped = end_index.min(infos.count);
if end_index_clipped > start_index {
let mut time = (start_index as f64).mul_add(infos.dt, infos.block_time);
for _ in start_index..end_index_clipped {
let value =
compute_linear_ramp_sample(start_time, duration, start_value, diff, time);
self.buffer.push(value);
time += infos.dt;
self.intrinsic_value = value;
}
}
}
if end_time >= infos.next_block_time {
let value = compute_linear_ramp_sample(
start_time,
duration,
start_value,
diff,
infos.next_block_time,
);
self.intrinsic_value = value;
return true;
}
if event.cancel_time.is_some() {
let value =
compute_linear_ramp_sample(start_time, duration, start_value, diff, end_time);
self.intrinsic_value = value;
let mut last_event = self.event_timeline.pop().unwrap();
last_event.time = end_time;
last_event.value = value;
self.last_event = Some(last_event);
} else {
self.intrinsic_value = end_value;
self.last_event = self.event_timeline.pop();
}
false
}
fn compute_exponential_ramp_automation(&mut self, infos: &BlockInfos) -> bool {
let event = self.event_timeline.peek().unwrap();
let last_event = self.last_event.as_ref().unwrap();
let start_time = last_event.time;
let mut end_time = event.time;
let duration = end_time - start_time;
if let Some(cancel_time) = event.cancel_time {
end_time = cancel_time;
}
let start_value = last_event.value;
let end_value = event.value;
let ratio = end_value / start_value;
if start_value == 0. || start_value * end_value < 0. {
let event = AudioParamEvent {
event_type: AudioParamEventType::SetValueAtTime,
time: end_time,
value: end_value,
time_constant: None,
cancel_time: None,
duration: None,
values: None,
};
self.event_timeline.replace_peek(event);
return false;
}
if infos.is_a_rate {
let start_index = self.buffer.len();
let end_index = ((end_time - infos.block_time).max(0.) / infos.dt).ceil() as usize;
let end_index_clipped = end_index.min(infos.count);
if end_index_clipped > start_index {
let mut time = (start_index as f64).mul_add(infos.dt, infos.block_time);
for _ in start_index..end_index_clipped {
let value = compute_exponential_ramp_sample(
start_time,
duration,
start_value,
ratio,
time,
);
self.buffer.push(value);
self.intrinsic_value = value;
time += infos.dt;
}
}
}
if end_time >= infos.next_block_time {
let value = compute_exponential_ramp_sample(
start_time,
duration,
start_value,
ratio,
infos.next_block_time,
);
self.intrinsic_value = value;
return true;
}
if event.cancel_time.is_some() {
let value =
compute_exponential_ramp_sample(start_time, duration, start_value, ratio, end_time);
self.intrinsic_value = value;
let mut last_event = self.event_timeline.pop().unwrap();
last_event.time = end_time;
last_event.value = value;
self.last_event = Some(last_event);
} else {
self.intrinsic_value = end_value;
self.last_event = self.event_timeline.pop();
}
false
}
fn compute_set_target_automation(&mut self, infos: &BlockInfos) -> bool {
let event = self.event_timeline.peek().unwrap();
let mut end_time = infos.next_block_time;
let mut ended = false;
let some_next_event = self.event_timeline.next();
if let Some(next_event) = some_next_event {
match next_event.event_type {
AudioParamEventType::LinearRampToValueAtTime
| AudioParamEventType::ExponentialRampToValueAtTime => {
end_time = infos.block_time;
ended = true;
}
_ => {
if next_event.time < infos.next_block_time {
end_time = next_event.time;
ended = true;
}
}
}
}
if let Some(cancel_time) = event.cancel_time {
if cancel_time < infos.next_block_time {
end_time = cancel_time;
ended = true;
}
}
let start_time = event.time;
let start_value = self.last_event.as_ref().unwrap().value;
let end_value = event.value;
let diff = start_value - end_value;
let time_constant = event.time_constant.unwrap();
if infos.is_a_rate {
let start_index = self.buffer.len();
let end_index = ((end_time - infos.block_time).max(0.) / infos.dt).ceil() as usize;
let end_index_clipped = end_index.min(infos.count);
if end_index_clipped > start_index {
let mut time = (start_index as f64).mul_add(infos.dt, infos.block_time);
for _ in start_index..end_index_clipped {
let value = if time - start_time < 0. {
self.intrinsic_value
} else {
compute_set_target_sample(start_time, time_constant, end_value, diff, time)
};
self.buffer.push(value);
self.intrinsic_value = value;
time += infos.dt;
}
}
}
if !ended {
let value = compute_set_target_sample(
start_time,
time_constant,
end_value,
diff,
infos.next_block_time,
);
let diff = (end_value - value).abs();
if diff < SNAP_TO_TARGET {
self.intrinsic_value = end_value;
if end_value == 0. {
for v in self.buffer.iter_mut() {
if v.is_subnormal() {
*v = 0.;
}
}
}
let event = AudioParamEvent {
event_type: AudioParamEventType::SetValueAtTime,
time: infos.next_block_time,
value: end_value,
time_constant: None,
cancel_time: None,
duration: None,
values: None,
};
self.event_timeline.replace_peek(event);
} else {
self.intrinsic_value = value;
}
return true;
}
let value = compute_set_target_sample(start_time, time_constant, end_value, diff, end_time);
self.intrinsic_value = value;
let mut event = self.event_timeline.pop().unwrap();
event.time = end_time;
event.value = value;
self.last_event = Some(event);
false
}
fn compute_set_value_curve_automation(&mut self, infos: &BlockInfos) -> bool {
let event = self.event_timeline.peek().unwrap();
let start_time = event.time;
let duration = event.duration.unwrap();
let values = event.values.as_ref().unwrap();
let mut end_time = start_time + duration;
if let Some(cancel_time) = event.cancel_time {
end_time = cancel_time;
}
if infos.is_a_rate {
let start_index = self.buffer.len();
let end_index = ((end_time - infos.block_time).max(0.) / infos.dt).ceil() as usize;
let end_index_clipped = end_index.min(infos.count);
if end_index_clipped > start_index {
let mut time = (start_index as f64).mul_add(infos.dt, infos.block_time);
for _ in start_index..end_index_clipped {
let value = if time - start_time < 0. {
self.intrinsic_value
} else {
compute_set_value_curve_sample(start_time, duration, values, time)
};
self.buffer.push(value);
self.intrinsic_value = value;
time += infos.dt;
}
}
}
if end_time >= infos.next_block_time {
let value =
compute_set_value_curve_sample(start_time, duration, values, infos.next_block_time);
self.intrinsic_value = value;
return true;
}
if event.cancel_time.is_some() {
let value = compute_set_value_curve_sample(start_time, duration, values, end_time);
self.intrinsic_value = value;
let mut last_event = self.event_timeline.pop().unwrap();
last_event.time = end_time;
last_event.value = value;
self.last_event = Some(last_event);
} else {
let value = values[values.len() - 1];
let mut last_event = self.event_timeline.pop().unwrap();
last_event.time = end_time;
last_event.value = value;
self.intrinsic_value = value;
self.last_event = Some(last_event);
}
false
}
fn compute_buffer(&mut self, block_time: f64, dt: f64, count: usize) {
let clamped = self.intrinsic_value.clamp(self.min_value, self.max_value);
self.current_value.store(clamped, Ordering::Release);
self.buffer.clear();
let is_a_rate = self.automation_rate.is_a_rate();
let next_block_time = dt.mul_add(count as f64, block_time);
let is_constant_block = match self.event_timeline.peek() {
None => true,
Some(event) => {
if event.event_type != AudioParamEventType::LinearRampToValueAtTime
&& event.event_type != AudioParamEventType::ExponentialRampToValueAtTime
{
event.time >= next_block_time
} else {
false
}
}
};
if !is_a_rate || is_constant_block {
self.buffer.push(self.intrinsic_value);
if is_constant_block {
return;
}
}
let block_infos = BlockInfos {
block_time,
dt,
count,
is_a_rate,
next_block_time,
};
loop {
let next_event_type = self.event_timeline.peek().map(|e| e.event_type);
let exit_loop = match next_event_type {
None => {
if is_a_rate {
for _ in self.buffer.len()..count {
self.buffer.push(self.intrinsic_value);
}
}
true
}
Some(AudioParamEventType::SetValue) | Some(AudioParamEventType::SetValueAtTime) => {
self.compute_set_value_automation(&block_infos)
}
Some(AudioParamEventType::LinearRampToValueAtTime) => {
self.compute_linear_ramp_automation(&block_infos)
}
Some(AudioParamEventType::ExponentialRampToValueAtTime) => {
self.compute_exponential_ramp_automation(&block_infos)
}
Some(AudioParamEventType::SetTargetAtTime) => {
self.compute_set_target_automation(&block_infos)
}
Some(AudioParamEventType::SetValueCurveAtTime) => {
self.compute_set_value_curve_automation(&block_infos)
}
_ => panic!(
"AudioParamEvent {:?} should not appear in AudioParamEventTimeline",
next_event_type.unwrap()
),
};
if exit_loop {
break;
}
}
}
}
pub(crate) fn audio_param_pair(
descriptor: AudioParamDescriptor,
registration: AudioContextRegistration,
) -> (AudioParam, AudioParamProcessor) {
let AudioParamDescriptor {
automation_rate,
default_value,
max_value,
min_value,
..
} = descriptor;
let current_value = Arc::new(AtomicF32::new(default_value));
let param = AudioParam {
registration: registration.into(),
raw_parts: AudioParamInner {
default_value,
max_value,
min_value,
automation_rate_constrained: false,
automation_rate: Arc::new(Mutex::new(automation_rate)),
current_value: Arc::clone(¤t_value),
},
};
let processor = AudioParamProcessor {
intrinsic_value: default_value,
current_value,
default_value,
min_value,
max_value,
automation_rate,
event_timeline: AudioParamEventTimeline::new(),
last_event: None,
buffer: ArrayVec::new(),
};
(param, processor)
}
#[cfg(test)]
mod tests {
use float_eq::assert_float_eq;
use crate::context::{BaseAudioContext, OfflineAudioContext};
use crate::render::Alloc;
use super::*;
#[test]
#[should_panic]
fn test_assert_strictly_positive_fail() {
assert_strictly_positive(0.);
}
#[test]
fn test_assert_strictly_positive() {
assert_strictly_positive(0.1);
}
#[test]
#[should_panic]
fn test_assert_not_zero_fail() {
assert_not_zero(0.);
}
#[test]
fn test_assert_not_zero() {
assert_not_zero(-0.1);
assert_not_zero(0.1);
}
#[test]
#[should_panic]
fn test_assert_sequence_length_fail() {
assert_sequence_length(&[0.; 1]);
}
#[test]
fn test_assert_sequence_length() {
assert_sequence_length(&[0.; 2]);
}
#[test]
fn test_default_and_accessors() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: -10.,
max_value: 10.,
};
let (param, _render) = audio_param_pair(opts, context.mock_registration());
assert_eq!(param.automation_rate(), AutomationRate::A);
assert_float_eq!(param.default_value(), 0., abs_all <= 0.);
assert_float_eq!(param.min_value(), -10., abs_all <= 0.);
assert_float_eq!(param.max_value(), 10., abs_all <= 0.);
assert_float_eq!(param.value(), 0., abs_all <= 0.);
}
#[test]
fn test_automation_rate_synchronicity_on_control_thread() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 1.,
};
let (param, _render) = audio_param_pair(opts, context.mock_registration());
param.set_automation_rate(AutomationRate::K);
assert_eq!(param.automation_rate(), AutomationRate::K);
}
#[test]
fn test_audioparam_clones_in_sync() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: -10.,
max_value: 10.,
};
let (param1, mut render) = audio_param_pair(opts, context.mock_registration());
let param2 = param1.clone();
param1.set_automation_rate(AutomationRate::K);
assert_eq!(param2.automation_rate(), AutomationRate::K);
render.handle_incoming_event(param1.set_value_raw(2.));
assert_float_eq!(param1.value(), 2., abs_all <= 0.);
assert_float_eq!(param2.value(), 2., abs_all <= 0.);
render.handle_incoming_event(param2.set_value_raw(3.));
assert_float_eq!(param1.value(), 3., abs_all <= 0.);
assert_float_eq!(param2.value(), 3., abs_all <= 0.);
}
#[test]
fn test_set_value() {
{
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: -10.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.set_value_raw(2.));
assert_float_eq!(param.value(), 2., abs_all <= 0.);
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(param.value(), 2., abs_all <= 0.);
assert_float_eq!(vs, &[2.; 10][..], abs_all <= 0.);
}
{
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.set_value_raw(2.));
assert_float_eq!(param.value(), 1., abs_all <= 0.);
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(param.value(), 1., abs_all <= 0.);
assert_float_eq!(vs, &[2.; 10][..], abs_all <= 0.);
}
}
#[test]
fn test_steps_a_rate() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: -10.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.set_value_at_time_raw(5., 2.0));
render.handle_incoming_event(param.set_value_at_time_raw(12., 8.0)); render.handle_incoming_event(param.set_value_at_time_raw(8., 10.0)); let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 0., 5., 5., 5., 5., 5., 5., 12., 12.][..],
abs_all <= 0.
);
let vs = render.compute_intrinsic_values(10., 1., 10);
assert_float_eq!(vs, &[8.; 1][..], abs_all <= 0.);
}
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: -10.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.set_value_at_time_raw(5., 2.0));
render.handle_incoming_event(param.set_value_at_time_raw(8., 12.0));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 0., 5., 5., 5., 5., 5., 5., 5., 5.][..],
abs_all <= 0.
);
let vs = render.compute_intrinsic_values(10., 1., 10);
assert_float_eq!(
vs,
&[5., 5., 8., 8., 8., 8., 8., 8., 8., 8.][..],
abs_all <= 0.
);
}
}
#[test]
fn test_steps_k_rate() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::K,
default_value: 0.,
min_value: -10.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.set_value_at_time_raw(5., 2.0));
render.handle_incoming_event(param.set_value_at_time_raw(12., 8.0)); render.handle_incoming_event(param.set_value_at_time_raw(8., 10.0)); render.handle_incoming_event(param.set_value_at_time_raw(3., 14.0)); let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(vs, &[0.; 1][..], abs_all <= 0.);
let vs = render.compute_intrinsic_values(10., 1., 10);
assert_float_eq!(vs, &[8.; 1][..], abs_all <= 0.);
let vs = render.compute_intrinsic_values(20., 1., 10);
assert_float_eq!(vs, &[3.; 1][..], abs_all <= 0.);
}
#[test]
fn test_linear_ramp_arate() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: -10.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.set_value_at_time_raw(5., 2.0));
render.handle_incoming_event(param.linear_ramp_to_value_at_time_raw(8.0, 5.0));
render.handle_incoming_event(param.linear_ramp_to_value_at_time_raw(0., 13.0));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 0., 5., 6., 7., 8., 7., 6., 5., 4.][..],
abs_all <= 0.
);
}
#[test]
fn test_linear_ramp_arate_end_of_block() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: -10.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.set_value_at_time_raw(0., 0.));
render.handle_incoming_event(param.linear_ramp_to_value_at_time_raw(9.0, 9.0));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.][..],
abs_all <= 0.
);
}
#[test]
fn test_linear_ramp_arate_implicit_set_value() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: -10.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(vs, &[0.; 1][..], abs_all <= 0.);
render.handle_incoming_event(param.linear_ramp_to_value_at_time_raw(10.0, 20.0));
let vs = render.compute_intrinsic_values(10., 1., 10);
assert_float_eq!(
vs,
&[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.][..],
abs_all <= 0.
);
let vs = render.compute_intrinsic_values(20., 1., 10);
assert_float_eq!(vs, &[10.; 10][..], abs_all <= 0.);
}
#[test]
fn test_linear_ramp_arate_multiple_blocks() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: -20.,
max_value: 20.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.linear_ramp_to_value_at_time_raw(20.0, 20.0));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.][..],
abs_all <= 0.
);
assert_float_eq!(param.value(), 0., abs <= 0.);
let vs = render.compute_intrinsic_values(10., 1., 10);
assert_float_eq!(
vs,
&[10., 11., 12., 13., 14., 15., 16., 17., 18., 19.][..],
abs_all <= 0.
);
assert_float_eq!(param.value(), 10., abs <= 0.);
let vs = render.compute_intrinsic_values(20., 1., 10);
assert_float_eq!(vs, &[20.0; 10][..], abs_all <= 0.);
assert_float_eq!(param.value(), 20., abs <= 0.);
}
#[test]
fn test_linear_ramp_krate_multiple_blocks() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::K,
default_value: 0.,
min_value: -20.,
max_value: 20.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.linear_ramp_to_value_at_time_raw(20.0, 20.0));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(vs, &[0.; 1][..], abs_all <= 0.);
assert_float_eq!(param.value(), 0., abs <= 0.);
let vs = render.compute_intrinsic_values(10., 1., 10);
assert_float_eq!(vs, &[10.; 1][..], abs_all <= 0.);
assert_float_eq!(param.value(), 10., abs <= 0.);
let vs = render.compute_intrinsic_values(20., 1., 10);
assert_float_eq!(vs, &[20.0; 1][..], abs_all <= 0.);
assert_float_eq!(param.value(), 20., abs <= 0.);
}
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::K,
default_value: 0.,
min_value: -20.,
max_value: 20.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.linear_ramp_to_value_at_time_raw(15.0, 15.0));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(vs, &[0.; 1][..], abs_all <= 0.);
assert_float_eq!(param.value(), 0., abs <= 0.);
let vs = render.compute_intrinsic_values(10., 1., 10);
assert_float_eq!(vs, &[10.; 1][..], abs_all <= 0.);
assert_float_eq!(param.value(), 10., abs <= 0.);
let vs = render.compute_intrinsic_values(20., 1., 10);
assert_float_eq!(vs, &[15.0; 1][..], abs_all <= 0.);
assert_float_eq!(param.value(), 15., abs <= 0.);
}
}
#[test]
fn test_linear_ramp_start_time() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: -10.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.set_value_at_time_raw(1., 0.));
render.handle_incoming_event(param.linear_ramp_to_value_at_time_raw(-1., 10.));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(
vs,
&[1., 0.8, 0.6, 0.4, 0.2, 0., -0.2, -0.4, -0.6, -0.8][..],
abs_all <= 1e-7
);
let vs = render.compute_intrinsic_values(10., 1., 10);
assert_float_eq!(vs, &[-1.; 10][..], abs_all <= 0.);
render.handle_incoming_event(param.linear_ramp_to_value_at_time_raw(1., 30.));
let vs = render.compute_intrinsic_values(20., 1., 10);
assert_float_eq!(
vs,
&[0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9][..],
abs_all <= 1e-7
);
}
#[test]
fn test_exponential_ramp_a_rate() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.set_value_at_time_raw(0.0001, 0.));
render.handle_incoming_event(param.exponential_ramp_to_value_at_time_raw(1.0, 10.));
let mut res = Vec::<f32>::with_capacity(10);
let start: f32 = 0.0001;
let end: f32 = 1.;
for t in 0..10 {
let value = start * (end / start).powf(t as f32 / 10.);
res.push(value);
}
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(vs, &res[..], abs_all <= 0.);
let vs = render.compute_intrinsic_values(10., 1., 10);
assert_float_eq!(vs, &[1.0; 10][..], abs_all <= 0.);
}
#[test]
fn test_exponential_ramp_a_rate_multiple_blocks() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let start: f32 = 0.0001; let end: f32 = 1.;
render.handle_incoming_event(param.set_value_at_time_raw(start, 3.));
render.handle_incoming_event(param.exponential_ramp_to_value_at_time_raw(end, 13.));
let mut res = vec![0.; 3];
for t in 0..10 {
let value = start * (end / start).powf(t as f32 / 10.);
res.push(value);
}
res.append(&mut vec![1.; 7]);
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(vs, &res[0..10], abs_all <= 0.);
assert_float_eq!(param.value(), res[0], abs <= 0.);
let vs = render.compute_intrinsic_values(10., 1., 10);
assert_float_eq!(vs, &res[10..20], abs_all <= 0.);
assert_float_eq!(param.value(), res[10], abs <= 0.);
}
#[test]
fn test_exponential_ramp_a_rate_zero_and_opposite_target() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.set_value_at_time_raw(0., 0.));
render.handle_incoming_event(param.exponential_ramp_to_value_at_time_raw(1.0, 5.));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 0., 0., 0., 0., 1., 1., 1., 1., 1.][..],
abs_all <= 0.
);
}
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: -1.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.set_value_at_time_raw(-1., 0.));
render.handle_incoming_event(param.exponential_ramp_to_value_at_time_raw(1.0, 5.));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(
vs,
&[-1., -1., -1., -1., -1., 1., 1., 1., 1., 1.][..],
abs_all <= 0.
);
}
}
#[test]
#[should_panic]
fn test_exponential_ramp_to_zero() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 1.,
min_value: 0.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.exponential_ramp_to_value_at_time_raw(0.0, 10.));
}
#[test]
fn test_exponential_ramp_k_rate_multiple_blocks() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::K,
default_value: 0.,
min_value: 0.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let start: f32 = 0.0001; let end: f32 = 1.;
render.handle_incoming_event(param.set_value_at_time_raw(start, 3.));
render.handle_incoming_event(param.exponential_ramp_to_value_at_time_raw(end, 13.));
let mut res = vec![0.; 3];
for t in 0..10 {
let value = start * (end / start).powf(t as f32 / 10.);
res.push(value);
}
res.append(&mut vec![1.; 7]);
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(vs, &[res[0]; 1][..], abs_all <= 0.);
let vs = render.compute_intrinsic_values(10., 1., 10);
assert_float_eq!(vs, &[res[10]; 1][..], abs_all <= 0.);
let vs = render.compute_intrinsic_values(20., 1., 10);
assert_float_eq!(vs, &[1.; 1][..], abs_all <= 0.);
}
#[test]
fn test_exponential_ramp_k_rate_zero_and_opposite_target() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::K,
default_value: 0.,
min_value: 0.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.exponential_ramp_to_value_at_time_raw(1.0, 5.));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(vs, &[0.; 1][..], abs_all <= 0.);
let vs = render.compute_intrinsic_values(10., 1., 10);
assert_float_eq!(vs, &[1.; 1][..], abs_all <= 0.);
}
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::K,
default_value: -1.,
min_value: -1.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.exponential_ramp_to_value_at_time_raw(1.0, 5.));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(vs, &[-1.; 1][..], abs_all <= 0.);
let vs = render.compute_intrinsic_values(10., 1., 10);
assert_float_eq!(vs, &[1.; 1][..], abs_all <= 0.);
}
}
#[test]
fn test_exponential_ramp_start_time() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: -10.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.set_value_at_time_raw(0., 0.));
render.handle_incoming_event(param.linear_ramp_to_value_at_time_raw(1., 10.));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9][..],
abs_all <= 1e-7
);
let vs = render.compute_intrinsic_values(10., 1., 10);
assert_float_eq!(vs, &[1.; 10][..], abs_all <= 0.);
render.handle_incoming_event(param.exponential_ramp_to_value_at_time_raw(0.0001, 30.));
let vs = render.compute_intrinsic_values(20., 1., 10);
let start: f32 = 1.;
let end: f32 = 0.0001;
let mut res = [0.; 20];
for (t, v) in res.iter_mut().enumerate() {
*v = start * (end / start).powf(t as f32 / 20.);
}
assert_float_eq!(vs, &res[10..], abs_all <= 1e-7);
}
#[test]
fn test_set_target_at_time_a_rate() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let v0: f32 = 0.;
let v1: f32 = 1.;
let t0: f64 = 0.;
let time_constant: f64 = 1.;
render.handle_incoming_event(param.set_value_at_time_raw(v0, t0));
render.handle_incoming_event(param.set_target_at_time_raw(v1, t0, time_constant));
let vs = render.compute_intrinsic_values(0., 1., 10);
let mut res = Vec::<f32>::with_capacity(10);
for t in 0..10 {
let val = v1 + (v0 - v1) * (-1. * ((t as f64 - t0) / time_constant)).exp() as f32;
res.push(val);
}
assert_float_eq!(vs, &res[..], abs_all <= 0.);
}
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let v0: f32 = 0.; let v1: f32 = 1.;
let t0: f64 = 0.;
let time_constant: f64 = 1.;
render.handle_incoming_event(param.set_target_at_time_raw(v1, t0, time_constant));
let vs = render.compute_intrinsic_values(0., 1., 10);
let mut res = Vec::<f32>::with_capacity(10);
for t in 0..10 {
let val = v1 + (v0 - v1) * (-1. * ((t as f64 - t0) / time_constant)).exp() as f32;
res.push(val);
}
assert_float_eq!(vs, &res[..], abs_all <= 0.);
}
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 100.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let v0: f32 = 1.;
let v1: f32 = 42.;
let t0: f64 = 1.;
let time_constant: f64 = 2.1;
render.handle_incoming_event(param.set_value_at_time_raw(v0, t0));
render.handle_incoming_event(param.set_target_at_time_raw(v1, t0, time_constant));
let mut res = Vec::<f32>::with_capacity(10);
for t in 0..10 {
let val = v1 + (v0 - v1) * (-1. * ((t as f64 - t0) / time_constant)).exp() as f32;
res.push(val);
}
res[0] = 0.;
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(vs, &res[..], abs_all <= 0.);
}
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 100.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.set_target_at_time_raw(1., 1., 0.));
let mut res = [1.; 10];
res[0] = 0.; let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(vs, &res[..], abs_all <= 0.);
}
}
#[test]
fn test_set_target_at_time_a_rate_multiple_blocks() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 2.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let v0: f32 = 0.;
let v1: f32 = 2.;
let t0: f64 = 0.;
let time_constant: f64 = 1.;
render.handle_incoming_event(param.set_value_at_time_raw(v0, t0));
render.handle_incoming_event(param.set_target_at_time_raw(v1, t0, time_constant));
let mut res = Vec::<f32>::with_capacity(20);
for t in 0..20 {
let val = v1 + (v0 - v1) * (-1. * ((t as f64 - t0) / time_constant)).exp() as f32;
res.push(val);
}
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(vs, &res[0..10], abs_all <= 0.);
let vs = render.compute_intrinsic_values(10., 1., 10);
assert_float_eq!(vs, &res[10..20], abs_all <= 0.);
}
}
#[test]
fn test_set_target_at_time_a_rate_followed_by_set_value() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 2.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let v0: f32 = 0.;
let v1: f32 = 2.;
let t0: f64 = 0.;
let time_constant: f64 = 1.;
render.handle_incoming_event(param.set_value_at_time_raw(v0, t0));
render.handle_incoming_event(param.set_target_at_time_raw(v1, t0, time_constant));
render.handle_incoming_event(param.set_value_at_time_raw(0.5, 15.));
let mut res = Vec::<f32>::with_capacity(20);
for t in 0..15 {
let val = v1 + (v0 - v1) * (-1. * ((t as f64 - t0) / time_constant)).exp() as f32;
res.push(val);
}
res.resize(20, 0.5);
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(vs, &res[0..10], abs_all <= 0.);
let vs = render.compute_intrinsic_values(10., 1., 10);
assert_float_eq!(vs, &res[10..20], abs_all <= 0.);
}
}
#[test]
fn test_set_target_at_time_ends_at_threshold() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 2.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.set_value_at_time_raw(1., 0.));
render.handle_incoming_event(param.set_target_at_time_raw(0., 1., 0.2));
let vs = render.compute_intrinsic_values(0., 1., 128);
for v in vs.iter() {
assert!(!v.is_subnormal());
}
let peek = render.event_timeline.peek();
assert_eq!(
peek.unwrap().event_type,
AudioParamEventType::SetValueAtTime
);
let vs = render.compute_intrinsic_values(10., 1., 128);
assert_float_eq!(vs[..], [0.; 128], abs_all <= 0.);
}
#[test]
fn test_set_target_at_time_waits_for_start_time() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 2.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.set_value_at_time_raw(1., 0.));
render.handle_incoming_event(param.set_target_at_time_raw(0., 5., 1.));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(vs[0], 1., abs <= 0.);
assert_float_eq!(vs[1], 1., abs <= 0.);
assert_float_eq!(vs[2], 1., abs <= 0.);
assert_float_eq!(vs[3], 1., abs <= 0.);
assert_float_eq!(vs[4], 1., abs <= 0.);
assert_float_eq!(vs[5], 1., abs <= 0.);
}
#[test]
fn test_set_target_at_time_a_rate_followed_by_ramp() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let v0: f32 = 0.;
let v1: f32 = 2.;
let t0: f64 = 0.;
let time_constant: f64 = 10.;
render.handle_incoming_event(param.set_value_at_time_raw(v0, t0));
render.handle_incoming_event(param.set_target_at_time_raw(v1, t0, time_constant));
let mut res = Vec::<f32>::with_capacity(20);
for t in 0..11 {
let val = v1 + (v0 - v1) * (-1. * ((t as f64 - t0) / time_constant)).exp() as f32;
res.push(val);
}
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(vs, &res[0..10], abs_all <= 0.);
let v0 = res.pop().unwrap(); let v1 = 10.;
let t0 = 10.;
let t1 = 20.;
render.handle_incoming_event(param.linear_ramp_to_value_at_time_raw(v1, t1));
for t in 10..20 {
let time = t as f64;
let value = v0 + (v1 - v0) * (time - t0) as f32 / (t1 - t0) as f32;
res.push(value);
}
let vs = render.compute_intrinsic_values(10., 1., 10);
assert_float_eq!(vs, &res[10..20], abs_all <= 1.0e-6);
let vs = render.compute_intrinsic_values(20., 1., 10);
assert_float_eq!(vs, &[v1; 10][..], abs_all <= 0.);
}
}
#[test]
fn test_set_target_at_time_k_rate_multiple_blocks() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::K,
default_value: 0.,
min_value: 0.,
max_value: 2.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let v0: f32 = 0.;
let v1: f32 = 2.;
let t0: f64 = 0.;
let time_constant: f64 = 1.;
render.handle_incoming_event(param.set_value_at_time_raw(v0, t0));
render.handle_incoming_event(param.set_target_at_time_raw(v1, t0, time_constant));
let mut res = Vec::<f32>::with_capacity(20);
for t in 0..20 {
let val = v1 + (v0 - v1) * (-1. * ((t as f64 - t0) / time_constant)).exp() as f32;
res.push(val);
}
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(vs, &[res[0]; 1][..], abs_all <= 0.);
let vs = render.compute_intrinsic_values(10., 1., 10);
assert_float_eq!(vs, &[res[10]; 1][..], abs_all <= 0.);
}
}
#[test]
fn test_set_target_at_time_snap_to_value() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let v0: f32 = 1.;
let v1: f32 = 0.;
let t0: f64 = 0.;
let time_constant: f64 = 1.;
render.handle_incoming_event(param.set_value_at_time_raw(v0, t0));
render.handle_incoming_event(param.set_target_at_time_raw(v1, t0, time_constant));
let mut res = [0.; 30];
res.iter_mut().enumerate().for_each(|(t, r)| {
*r = v1 + (v0 - v1) * (-1. * ((t as f64 - t0) / time_constant)).exp() as f32;
});
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(vs, &res[..10], abs_all <= 0.);
let vs = render.compute_intrinsic_values(10., 1., 10);
assert_float_eq!(vs, &res[10..20], abs_all <= 0.);
let vs = render.compute_intrinsic_values(20., 1., 10);
assert_float_eq!(vs, &res[20..30], abs_all <= 0.);
let vs = render.compute_intrinsic_values(30., 1., 10);
assert_float_eq!(vs, &[0.; 10][..], abs_all <= 0.);
}
#[test]
fn test_cancel_scheduled_values() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
for t in 0..10 {
render.handle_incoming_event(param.set_value_at_time_raw(t as f32, t as f64));
}
render.handle_incoming_event(param.cancel_scheduled_values_raw(5.));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 1., 2., 3., 4., 4., 4., 4., 4., 4.][..],
abs_all <= 0.
);
}
#[test]
fn test_cancel_scheduled_values_ramp() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.set_value_at_time_raw(0., 0.));
render.handle_incoming_event(param.linear_ramp_to_value_at_time_raw(10., 10.));
render.handle_incoming_event(param.cancel_scheduled_values_raw(10.));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(vs, &[0.; 10][..], abs_all <= 0.);
}
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 20.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.set_value_at_time_raw(0., 0.));
render.handle_incoming_event(param.linear_ramp_to_value_at_time_raw(20., 20.));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.][..],
abs_all <= 0.
);
render.handle_incoming_event(param.cancel_scheduled_values_raw(10.));
let vs = render.compute_intrinsic_values(10., 1., 10);
assert_float_eq!(vs, &[0.; 1][..], abs_all <= 0.);
}
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.linear_ramp_to_value_at_time_raw(10., 10.));
render.handle_incoming_event(param.cancel_scheduled_values_raw(10.)); let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(vs, &[0.; 10][..], abs_all <= 0.);
}
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 20.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.linear_ramp_to_value_at_time_raw(20., 20.));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.][..],
abs_all <= 0.
);
render.handle_incoming_event(param.cancel_scheduled_values_raw(10.));
let vs = render.compute_intrinsic_values(10., 1., 10);
assert_float_eq!(vs, &[0.; 1][..], abs_all <= 0.);
}
}
#[test]
fn test_cancel_and_hold() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.set_value_at_time_raw(1., 1.));
render.handle_incoming_event(param.set_value_at_time_raw(2., 2.));
render.handle_incoming_event(param.set_value_at_time_raw(3., 3.));
render.handle_incoming_event(param.set_value_at_time_raw(4., 4.));
render.handle_incoming_event(param.cancel_and_hold_at_time_raw(2.5));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 1., 2., 2., 2., 2., 2., 2., 2., 2.][0..10],
abs_all <= 0.
);
}
}
#[test]
fn test_cancel_and_hold_during_set_target() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 2.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let v0: f32 = 0.;
let v1: f32 = 2.;
let t0: f64 = 0.;
let time_constant: f64 = 1.;
render.handle_incoming_event(param.set_value_at_time_raw(v0, t0));
render.handle_incoming_event(param.set_target_at_time_raw(v1, t0, time_constant));
render.handle_incoming_event(param.cancel_and_hold_at_time_raw(15.));
let mut res = Vec::<f32>::with_capacity(20);
for t in 0..16 {
let val = v1 + (v0 - v1) * (-1. * ((t as f64 - t0) / time_constant)).exp() as f32;
res.push(val);
}
let hold_value = res.pop().unwrap();
res.resize(20, hold_value);
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(vs, &res[0..10], abs_all <= 0.);
let vs = render.compute_intrinsic_values(10., 1., 10);
assert_float_eq!(vs, &res[10..20], abs_all <= 0.);
}
}
#[test]
fn test_cancel_and_hold_during_linear_ramp() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.linear_ramp_to_value_at_time_raw(10., 10.));
render.handle_incoming_event(param.cancel_and_hold_at_time_raw(5.));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 1., 2., 3., 4., 5., 5., 5., 5., 5.][0..10],
abs_all <= 0.
);
}
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.linear_ramp_to_value_at_time_raw(10., 10.));
render.handle_incoming_event(param.cancel_and_hold_at_time_raw(4.5));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 1., 2., 3., 4., 4.5, 4.5, 4.5, 4.5, 4.5][0..10],
abs_all <= 0.
);
}
}
#[test]
fn test_cancel_and_hold_during_exponential_ramp() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.set_value_at_time_raw(0.0001, 0.));
render.handle_incoming_event(param.exponential_ramp_to_value_at_time_raw(1.0, 10.));
render.handle_incoming_event(param.cancel_and_hold_at_time_raw(5.));
let mut res = Vec::<f32>::with_capacity(10);
let start: f32 = 0.0001;
let end: f32 = 1.;
for t in 0..6 {
let value = start * (end / start).powf(t as f32 / 10.);
res.push(value);
}
let hold_value = res.pop().unwrap();
res.resize(10, hold_value);
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(vs, &res[..], abs_all <= 0.);
}
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.set_value_at_time_raw(0.0001, 0.));
render.handle_incoming_event(param.exponential_ramp_to_value_at_time_raw(1.0, 10.));
render.handle_incoming_event(param.cancel_and_hold_at_time_raw(4.5));
let mut res = Vec::<f32>::with_capacity(10);
let start: f32 = 0.0001;
let end: f32 = 1.;
for t in 0..5 {
let value = start * (end / start).powf(t as f32 / 10.);
res.push(value);
}
let hold_value = start * (end / start).powf(4.5 / 10.);
res.resize(10, hold_value);
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(vs, &res[..], abs_all <= 0.);
}
}
#[test]
fn test_cancel_and_hold_during_set_value_curve() {
let context = OfflineAudioContext::new(1, 0, 48000.);
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 2.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let curve = [0., 0.5, 1., 0.5, 0.];
render.handle_incoming_event(param.set_value_curve_at_time_raw(&curve[..], 0., 10.));
render.handle_incoming_event(param.cancel_and_hold_at_time_raw(5.));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 0.2, 0.4, 0.6, 0.8, 1., 1., 1., 1., 1.][..],
abs_all <= 1e-7
);
}
{
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 2.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let curve = [0., 0.5, 1., 0.5, 0.];
render.handle_incoming_event(param.set_value_curve_at_time_raw(&curve[..], 0., 10.));
render.handle_incoming_event(param.cancel_and_hold_at_time_raw(4.5));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 0.2, 0.4, 0.6, 0.8, 0.9, 0.9, 0.9, 0.9, 0.9][..],
abs_all <= 1e-7
);
}
}
#[test]
fn test_set_value_curve_at_time_a_rate() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let curve = [0., 0.5, 1., 0.5, 0.];
render.handle_incoming_event(param.set_value_curve_at_time_raw(&curve[..], 0., 10.));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 0.2, 0.4, 0.6, 0.8, 1., 0.8, 0.6, 0.4, 0.2][..],
abs_all <= 1e-7
);
let vs = render.compute_intrinsic_values(10., 1., 10);
assert_float_eq!(vs, &[0.; 10][..], abs_all <= 0.);
}
#[test]
fn test_set_value_curve_at_time_a_rate_multiple_frames() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let curve = [0., 0.5, 1., 0.5, 0.];
render.handle_incoming_event(param.set_value_curve_at_time_raw(&curve[..], 0., 20.));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9][..],
abs_all <= 1e-7
);
let vs = render.compute_intrinsic_values(10., 1., 10);
assert_float_eq!(
vs,
&[1., 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1][..],
abs_all <= 1e-7
);
let vs = render.compute_intrinsic_values(20., 1., 10);
assert_float_eq!(vs, &[0.; 10][..], abs_all <= 0.);
}
#[test]
#[should_panic]
fn test_set_value_curve_at_time_insert_while_another_event() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 1.,
min_value: 0.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.set_value_at_time_raw(0.0, 5.));
let curve = [0., 0.5, 1., 0.5, 0.];
render.handle_incoming_event(param.set_value_curve_at_time_raw(&curve[..], 0., 10.));
let _vs = render.compute_intrinsic_values(0., 1., 10);
}
#[test]
#[should_panic]
fn test_set_value_curve_at_time_insert_another_event_inside() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 1.,
min_value: 0.,
max_value: 1.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let curve = [0., 0.5, 1., 0.5, 0.];
render.handle_incoming_event(param.set_value_curve_at_time_raw(&curve[..], 0., 10.));
render.handle_incoming_event(param.set_value_at_time_raw(0.0, 5.));
let _vs = render.compute_intrinsic_values(0., 1., 10);
}
#[test]
fn test_set_value_curve_waits_for_start_time() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
let curve = [0., 0.5, 1., 0.5, 0.];
render.handle_incoming_event(param.set_value_curve_at_time_raw(&curve[..], 5., 10.));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(
vs,
&[0., 0., 0., 0., 0., 0., 0.2, 0.4, 0.6, 0.8][..],
abs_all <= 0.
);
}
#[test]
fn test_update_automation_rate_to_k() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: -10.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.onmessage(&mut AutomationRate::K);
render.handle_incoming_event(param.set_value_at_time_raw(2., 0.000001));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(vs, &[0.; 1][..], abs_all <= 0.);
}
#[test]
fn test_update_automation_rate_to_a() {
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::K,
default_value: 0.,
min_value: -10.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.onmessage(&mut AutomationRate::A);
render.handle_incoming_event(param.set_value_at_time_raw(2., 0.000001));
let vs = render.compute_intrinsic_values(0., 1., 10);
assert_float_eq!(vs, &[2.; 10][..], abs_all <= 0.);
}
#[test]
fn test_varying_param_size() {
{
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.set_value_at_time_raw(0., 0.));
render.handle_incoming_event(param.linear_ramp_to_value_at_time_raw(9., 9.));
let vs = render.compute_intrinsic_values(0., 1., 10);
let expected = [0., 1., 2., 3., 4., 5., 6., 7., 8., 9.];
assert_float_eq!(vs, &expected[..], abs_all <= 0.);
let vs = render.compute_intrinsic_values(10., 1., 10);
let expected = [9.; 1];
assert_float_eq!(vs, &expected[..], abs_all <= 0.);
render.handle_incoming_event(param.set_value_at_time_raw(1., 25.));
let vs = render.compute_intrinsic_values(20., 1., 10);
let expected = [9., 9., 9., 9., 9., 1., 1., 1., 1., 1.];
assert_float_eq!(vs, &expected[..], abs_all <= 0.);
let vs = render.compute_intrinsic_values(30., 1., 10);
let expected = [1.; 1];
assert_float_eq!(vs, &expected[..], abs_all <= 0.);
}
{
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.set_value_at_time_raw(0., 0.));
render.handle_incoming_event(param.linear_ramp_to_value_at_time_raw(9., 9.));
render.handle_incoming_event(param.set_value_at_time_raw(1., 25.));
let vs = render.compute_intrinsic_values(0., 1., 10);
let expected = [0., 1., 2., 3., 4., 5., 6., 7., 8., 9.];
assert_float_eq!(vs, &expected[..], abs_all <= 0.);
let vs = render.compute_intrinsic_values(10., 1., 10);
let expected = [9.; 1];
assert_float_eq!(vs, &expected[..], abs_all <= 0.);
let vs = render.compute_intrinsic_values(20., 1., 10);
let expected = [9., 9., 9., 9., 9., 1., 1., 1., 1., 1.];
assert_float_eq!(vs, &expected[..], abs_all <= 0.);
let vs = render.compute_intrinsic_values(30., 1., 10);
let expected = [1.; 1];
assert_float_eq!(vs, &expected[..], abs_all <= 0.);
}
}
#[test]
fn test_varying_param_size_modulated() {
let alloc = Alloc::with_capacity(1);
{
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (_param, mut render) = audio_param_pair(opts, context.mock_registration());
let vs = render.compute_intrinsic_values(0., 1., 128);
assert_float_eq!(vs, &[0.; 1][..], abs_all <= 0.);
let signal = alloc.silence();
let input = AudioRenderQuantum::from(signal);
let signal = alloc.silence();
let mut output = AudioRenderQuantum::from(signal);
render.mix_to_output(&input, &mut output);
assert!(output.single_valued());
assert_float_eq!(output.channel_data(0)[0], 0., abs <= 0.);
}
{
let context = OfflineAudioContext::new(1, 0, 48000.);
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: 0.,
min_value: 0.,
max_value: 10.,
};
let (_param, mut render) = audio_param_pair(opts, context.mock_registration());
let vs = render.compute_intrinsic_values(0., 1., 128);
assert_float_eq!(vs, &[0.; 1][..], abs_all <= 0.);
let signal = alloc.silence();
let mut input = AudioRenderQuantum::from(signal);
input.channel_data_mut(0)[0] = 1.;
let signal = alloc.silence();
let mut output = AudioRenderQuantum::from(signal);
render.mix_to_output(&input, &mut output);
let mut expected = [0.; 128];
expected[0] = 1.;
assert!(!output.single_valued());
assert_float_eq!(output.channel_data(0)[..], &expected[..], abs_all <= 0.);
}
}
#[test]
fn test_full_render_chain() {
let alloc = Alloc::with_capacity(1);
let context = OfflineAudioContext::new(1, 0, 48000.);
let min = 2.;
let max = 42.;
let default = 2.;
let opts = AudioParamDescriptor {
name: String::new(),
automation_rate: AutomationRate::A,
default_value: default,
min_value: min,
max_value: max,
};
let (param, mut render) = audio_param_pair(opts, context.mock_registration());
render.handle_incoming_event(param.set_value_raw(128.));
render.handle_incoming_event(param.linear_ramp_to_value_at_time_raw(0., 128.));
let intrinsic_values = render.compute_intrinsic_values(0., 1., 128);
let mut expected = [0.; 128];
for (i, v) in expected.iter_mut().enumerate() {
*v = 128. - i as f32;
}
assert_float_eq!(intrinsic_values, &expected[..], abs_all <= 0.);
let signal = alloc.silence();
let mut input = AudioRenderQuantum::from(signal);
input.channel_data_mut(0)[0] = f32::NAN;
let signal = alloc.silence();
let mut output = AudioRenderQuantum::from(signal);
render.mix_to_output(&input, &mut output);
expected.iter_mut().for_each(|v| *v = v.clamp(min, max));
expected[0] = 2.;
assert_float_eq!(output.channel_data(0)[..], &expected[..], abs_all <= 0.);
}
}