1#![allow(unused_variables)]
2use smallvec::SmallVec;
9
10#[cfg(target_arch = "wasm32")]
11use std::sync::atomic::{AtomicU32, Ordering};
12
13#[cfg(target_arch = "wasm32")]
18extern "C" {
19 fn sesh_vec_version() -> u32;
20}
21
22#[cfg(target_arch = "wasm32")]
24static HOST_VEC_VERSION: AtomicU32 = AtomicU32::new(0);
25
26#[cfg(target_arch = "wasm32")]
27fn host_version() -> u32 {
28 let v = HOST_VEC_VERSION.load(Ordering::Relaxed);
29 if v != 0 {
30 return v;
31 }
32 let v = unsafe { sesh_vec_version() };
33 let store = if v == 0 { u32::MAX } else { v };
35 HOST_VEC_VERSION.store(store, Ordering::Relaxed);
36 v
37}
38
39#[inline]
40#[allow(dead_code)]
41fn use_host_ops() -> bool {
42 #[cfg(target_arch = "wasm32")]
43 { host_version() > 0 && host_version() != u32::MAX }
44 #[cfg(not(target_arch = "wasm32"))]
45 { false }
46}
47
48macro_rules! dispatch {
53 ($host:expr, $fallback:expr) => {{
54 #[cfg(target_arch = "wasm32")]
55 {
56 if use_host_ops() { $host } else { $fallback }
57 }
58 #[cfg(not(target_arch = "wasm32"))]
59 { $fallback }
60 }};
61}
62
63#[cfg(target_arch = "wasm32")]
68extern "C" {
69 fn sesh_vec_copy_host(dst: *mut f32, src: *const f32, len: u32);
70 fn sesh_vec_fill_host(dst: *mut f32, value: f32, len: u32);
71 fn sesh_vec_add_host(dst: *mut f32, a: *const f32, b: *const f32, len: u32);
72 fn sesh_vec_add_scalar_host(dst: *mut f32, value: f32, len: u32);
73 fn sesh_vec_mul_host(dst: *mut f32, a: *const f32, b: *const f32, len: u32);
74 fn sesh_vec_mul_scalar_host(dst: *mut f32, value: f32, len: u32);
75 fn sesh_vec_mul_add_host(dst: *mut f32, src: *const f32, gain: f32, len: u32);
76 fn sesh_vec_clamp_host(dst: *mut f32, src: *const f32, min: f32, max: f32, len: u32);
77 fn sesh_vec_ring_write_host(
78 buf: *mut f32, buf_len: u32, pos: *mut u32, src: *const f32, len: u32,
79 );
80 fn sesh_vec_ring_read_host(
81 buf: *const f32, buf_len: u32, pos: u32, dst: *mut f32, offset: u32, len: u32,
82 );
83 fn sesh_vec_delay_read_host(
84 buf: *const f32, buf_len: u32, pos: u32, dst: *mut f32, time: *const f32, len: u32,
85 );
86 fn sesh_vec_osc_host(
87 phase: *mut f32, dst: *mut f32, freq: f32, waveform: u32, sample_rate: f32, len: u32,
88 );
89 fn sesh_vec_biquad_host(
90 state: *mut f32, dst: *mut f32, src: *const f32,
91 cutoff: *const f32, q: *const f32, gain: *const f32,
92 filter_type: u32, sample_rate: f32, len: u32,
93 );
94 fn sesh_vec_envelope_host(
95 state: *mut f32, dst: *mut f32, src: *const f32,
96 attack: *const f32, release: *const f32,
97 mode: u32, sample_rate: f32, len: u32,
98 );
99 fn sesh_vec_tanh_host(dst: *mut f32, src: *const f32, drive: *const f32, len: u32);
100 fn sesh_vec_hard_clip_host(dst: *mut f32, src: *const f32, threshold: *const f32, len: u32);
101 fn sesh_vec_abs_host(dst: *mut f32, src: *const f32, len: u32);
102 fn sesh_vec_neg_host(dst: *mut f32, src: *const f32, len: u32);
103 fn sesh_vec_sqrt_host(dst: *mut f32, src: *const f32, len: u32);
104 fn sesh_vec_recip_host(dst: *mut f32, src: *const f32, len: u32);
105 fn sesh_vec_div_host(dst: *mut f32, a: *const f32, b: *const f32, len: u32);
106 fn sesh_vec_pow_host(dst: *mut f32, src: *const f32, exp: *const f32, len: u32);
107 fn sesh_vec_schroeder_allpass_host(
108 buf: *mut f32, buf_len: u32, pos: *mut u32,
109 dst: *mut f32, src: *const f32,
110 delay: u32, g: f32, len: u32,
111 );
112 fn sesh_vec_one_pole_host(
113 state: *mut f32, dst: *mut f32, src: *const f32,
114 coefficient: f32, len: u32,
115 );
116 fn sesh_vec_comb_host(
117 buf: *mut f32, buf_len: u32, pos: *mut u32, damp: *mut f32,
118 dst: *mut f32, src: *const f32, time: *const f32,
119 feedback: f32, damping: f32, len: u32,
120 );
121 fn sesh_vec_comb_parallel_host(
122 bufs: *const *mut f32, buf_lens: *const u32, positions: *mut u32, damp: *mut f32,
123 dst: *const *mut f32, src: *const f32, time: *const *const f32,
124 feedback: f32, damping: f32, n: u32, len: u32,
125 );
126 fn sesh_vec_comb_coupled_host(
127 bufs: *const *mut f32, buf_lens: *const u32, positions: *mut u32, damp: *mut f32,
128 dst: *const *mut f32, src: *const *const f32, time: *const *const f32,
129 matrix: *const f32, damping: f32, n: u32, len: u32,
130 );
131}
132
133#[repr(u32)]
139#[derive(Clone, Copy)]
140pub enum Waveform {
141 Sine = 0,
142 Triangle = 1,
143 Saw = 2,
144 Square = 3,
145}
146
147#[repr(u32)]
149#[derive(Clone, Copy)]
150pub enum FilterType {
151 Lowpass = 0,
152 Highpass = 1,
153 Bandpass = 2,
154 Notch = 3,
155 Peak = 4,
157 LowShelf = 5,
159 HighShelf = 6,
161 Allpass = 7,
163}
164
165#[derive(Clone, Copy)]
167#[repr(C)]
168pub struct BiquadState {
169 pub x1: f32,
170 pub x2: f32,
171 pub y1: f32,
172 pub y2: f32,
173}
174
175impl BiquadState {
176 pub const fn new() -> Self {
177 Self { x1: 0.0, x2: 0.0, y1: 0.0, y2: 0.0 }
178 }
179}
180
181#[repr(u32)]
183#[derive(Clone, Copy)]
184pub enum EnvelopeMode {
185 Peak = 0,
187 Rms = 1,
189}
190
191#[derive(Clone, Copy)]
193#[repr(C)]
194pub struct EnvelopeState {
195 pub current: f32,
196}
197
198impl EnvelopeState {
199 pub const fn new() -> Self {
200 Self { current: 0.0 }
201 }
202}
203
204#[derive(Clone, Copy)]
206#[repr(C)]
207pub struct OnePoleState {
208 pub y1: f32,
209}
210
211impl OnePoleState {
212 pub const fn new() -> Self {
213 Self { y1: 0.0 }
214 }
215}
216
217pub fn vec_copy(dst: &mut [f32], src: &[f32]) {
223 let len = dst.len().min(src.len());
224 dispatch!(
225 unsafe { sesh_vec_copy_host(dst.as_mut_ptr(), src.as_ptr(), len as u32) },
226 dst[..len].copy_from_slice(&src[..len])
227 );
228}
229
230pub fn vec_fill(dst: &mut [f32], value: f32) {
232 let len = dst.len();
233 dispatch!(
234 unsafe { sesh_vec_fill_host(dst.as_mut_ptr(), value, len as u32) },
235 for s in dst.iter_mut() { *s = value; }
236 );
237}
238
239pub fn vec_add(dst: &mut [f32], a: &[f32], b: &[f32]) {
241 let len = dst.len().min(a.len()).min(b.len());
242 dispatch!(
243 unsafe { sesh_vec_add_host(dst.as_mut_ptr(), a.as_ptr(), b.as_ptr(), len as u32) },
244 for i in 0..len { dst[i] = a[i] + b[i]; }
245 );
246}
247
248pub fn vec_add_scalar(dst: &mut [f32], value: f32) {
250 let len = dst.len();
251 dispatch!(
252 unsafe { sesh_vec_add_scalar_host(dst.as_mut_ptr(), value, len as u32) },
253 for s in dst.iter_mut() { *s += value; }
254 );
255}
256
257pub fn vec_mul(dst: &mut [f32], a: &[f32], b: &[f32]) {
259 let len = dst.len().min(a.len()).min(b.len());
260 dispatch!(
261 unsafe { sesh_vec_mul_host(dst.as_mut_ptr(), a.as_ptr(), b.as_ptr(), len as u32) },
262 for i in 0..len { dst[i] = a[i] * b[i]; }
263 );
264}
265
266pub fn vec_mul_scalar(dst: &mut [f32], value: f32) {
268 let len = dst.len();
269 dispatch!(
270 unsafe { sesh_vec_mul_scalar_host(dst.as_mut_ptr(), value, len as u32) },
271 for s in dst.iter_mut() { *s *= value; }
272 );
273}
274
275pub fn vec_mul_add(dst: &mut [f32], src: &[f32], gain: f32) {
277 let len = dst.len().min(src.len());
278 dispatch!(
279 unsafe { sesh_vec_mul_add_host(dst.as_mut_ptr(), src.as_ptr(), gain, len as u32) },
280 for i in 0..len { dst[i] += src[i] * gain; }
281 );
282}
283
284pub fn vec_clamp(dst: &mut [f32], src: &[f32], min: f32, max: f32) {
286 let len = dst.len().min(src.len());
287 dispatch!(
288 unsafe { sesh_vec_clamp_host(dst.as_mut_ptr(), src.as_ptr(), min, max, len as u32) },
289 for i in 0..len { dst[i] = src[i].clamp(min, max); }
290 );
291}
292
293pub fn vec_clamp_assign(dst: &mut [f32], min: f32, max: f32) {
295 let len = dst.len();
296 dispatch!(
297 unsafe { sesh_vec_clamp_host(dst.as_mut_ptr(), dst.as_ptr(), min, max, len as u32) },
298 for i in 0..len { dst[i] = dst[i].clamp(min, max); }
299 );
300}
301
302pub fn vec_ring_write(buf: &mut [f32], pos: &mut usize, src: &[f32]) {
309 let buf_len = buf.len();
310 let frames = src.len();
311 dispatch!(
312 {
313 let mut pos32 = *pos as u32;
314 unsafe {
315 sesh_vec_ring_write_host(
316 buf.as_mut_ptr(), buf_len as u32, &mut pos32, src.as_ptr(), frames as u32,
317 );
318 }
319 *pos = pos32 as usize;
320 },
321 {
322 for i in 0..frames {
323 buf[(*pos + i) % buf_len] = src[i];
324 }
325 *pos = (*pos + frames) % buf_len;
326 }
327 );
328}
329
330pub fn vec_ring_read(buf: &[f32], pos: usize, dst: &mut [f32], offset: usize) {
332 let buf_len = buf.len();
333 let frames = dst.len();
334 dispatch!(
335 unsafe {
336 sesh_vec_ring_read_host(
337 buf.as_ptr(), buf_len as u32, pos as u32,
338 dst.as_mut_ptr(), offset as u32, frames as u32,
339 );
340 },
341 {
342 let start = (pos + buf_len - offset) % buf_len;
343 for i in 0..frames {
344 dst[i] = buf[(start + i) % buf_len];
345 }
346 }
347 );
348}
349
350pub fn vec_delay_read(buf: &[f32], pos: usize, dst: &mut [f32], time: &[f32]) {
360 let buf_len = buf.len();
361 let frames = dst.len().min(time.len());
362 dispatch!(
363 unsafe {
364 sesh_vec_delay_read_host(
365 buf.as_ptr(), buf_len as u32, pos as u32,
366 dst.as_mut_ptr(), time.as_ptr(), frames as u32,
367 );
368 },
369 {
370 for i in 0..frames {
371 let write_pos_at_i = (pos + buf_len - frames + i) % buf_len;
373
374 let delay_int = time[i] as usize;
375 let delay_frac = time[i] - delay_int as f32;
376
377 let idx1 = (write_pos_at_i + buf_len - delay_int) % buf_len;
378 let idx2 = (idx1 + buf_len - 1) % buf_len;
379
380 dst[i] = buf[idx1] + delay_frac * (buf[idx2] - buf[idx1]);
381 }
382 }
383 );
384}
385
386pub fn vec_schroeder_allpass(
398 buf: &mut [f32],
399 pos: &mut usize,
400 dst: &mut [f32],
401 src: &[f32],
402 delay: usize,
403 g: f32,
404) {
405 let buf_len = buf.len();
406 let frames = dst.len().min(src.len());
407 dispatch!(
408 {
409 let mut pos32 = *pos as u32;
410 unsafe {
411 sesh_vec_schroeder_allpass_host(
412 buf.as_mut_ptr(), buf_len as u32, &mut pos32,
413 dst.as_mut_ptr(), src.as_ptr(),
414 delay as u32, g, frames as u32,
415 );
416 }
417 *pos = pos32 as usize;
418 },
419 {
420 let mut wp = *pos;
421 for i in 0..frames {
422 let read_idx = (wp + buf_len - delay) % buf_len;
423 let buf_out = buf[read_idx];
424
425 let v = src[i] + g * buf_out;
426 dst[i] = buf_out - g * v;
427
428 buf[wp] = v;
429 wp = (wp + 1) % buf_len;
430 }
431 *pos = wp;
432 }
433 );
434}
435
436pub fn vec_one_pole(
446 state: &mut OnePoleState,
447 dst: &mut [f32],
448 src: &[f32],
449 coefficient: f32,
450) {
451 let frames = dst.len().min(src.len());
452 dispatch!(
453 {
454 unsafe {
455 sesh_vec_one_pole_host(
456 &mut state.y1 as *mut f32, dst.as_mut_ptr(), src.as_ptr(),
457 coefficient, frames as u32,
458 );
459 }
460 },
461 {
462 let mut y = state.y1;
463 for i in 0..frames {
464 y = src[i] + coefficient * (y - src[i]);
465 dst[i] = y;
466 }
467 state.y1 = y;
468 }
469 );
470}
471
472pub fn vec_comb(
502 buf: &mut [f32],
503 pos: &mut usize,
504 damp: &mut OnePoleState,
505 dst: &mut [f32],
506 src: &[f32],
507 time: &[f32],
508 feedback: f32,
509 damping: f32,
510) {
511 let buf_len = buf.len();
512 let frames = dst.len().min(src.len()).min(time.len());
513 dispatch!(
514 {
515 let mut pos32 = *pos as u32;
516 unsafe {
517 sesh_vec_comb_host(
518 buf.as_mut_ptr(), buf_len as u32, &mut pos32, &mut damp.y1 as *mut f32,
519 dst.as_mut_ptr(), src.as_ptr(), time.as_ptr(),
520 feedback, damping, frames as u32,
521 );
522 }
523 *pos = pos32 as usize;
524 },
525 {
526 let mut wp = *pos;
527 let mut y = damp.y1;
528 for i in 0..frames {
529 let delay_int = time[i] as usize;
530 let delay_frac = time[i] - delay_int as f32;
531 let idx1 = (wp + buf_len - delay_int) % buf_len;
532 let idx2 = (idx1 + buf_len - 1) % buf_len;
533 let tap = buf[idx1] + delay_frac * (buf[idx2] - buf[idx1]);
534
535 y = tap + damping * (y - tap);
536
537 dst[i] = y;
538 buf[wp] = src[i] + feedback * y;
539 wp = (wp + 1) % buf_len;
540 }
541 *pos = wp;
542 damp.y1 = y;
543 }
544 );
545}
546
547pub fn vec_comb_parallel(
556 bufs: &mut [&mut [f32]],
557 positions: &mut [usize],
558 damp: &mut [OnePoleState],
559 dst: &mut [&mut [f32]],
560 src: &[f32],
561 time: &[&[f32]],
562 feedback: f32,
563 damping: f32,
564) {
565 let n = bufs.len();
566 let frames = src.len();
567 dispatch!(
568 {
569 let mut buf_ptrs: SmallVec<[*mut f32; 16]> = SmallVec::with_capacity(n);
570 let mut buf_lens: SmallVec<[u32; 16]> = SmallVec::with_capacity(n);
571 let mut pos32: SmallVec<[u32; 16]> = SmallVec::with_capacity(n);
572 let mut damp_vals: SmallVec<[f32; 16]> = SmallVec::with_capacity(n);
573 let mut dst_ptrs: SmallVec<[*mut f32; 16]> = SmallVec::with_capacity(n);
574 let mut time_ptrs: SmallVec<[*const f32; 16]> = SmallVec::with_capacity(n);
575 for i in 0..n {
576 buf_ptrs.push(bufs[i].as_mut_ptr());
577 buf_lens.push(bufs[i].len() as u32);
578 pos32.push(positions[i] as u32);
579 damp_vals.push(damp[i].y1);
580 dst_ptrs.push(dst[i].as_mut_ptr());
581 time_ptrs.push(time[i].as_ptr());
582 }
583 unsafe {
584 sesh_vec_comb_parallel_host(
585 buf_ptrs.as_ptr(), buf_lens.as_ptr(), pos32.as_mut_ptr(), damp_vals.as_mut_ptr(),
586 dst_ptrs.as_ptr(), src.as_ptr(), time_ptrs.as_ptr(),
587 feedback, damping, n as u32, frames as u32,
588 );
589 }
590 for i in 0..n {
591 positions[i] = pos32[i] as usize;
592 damp[i].y1 = damp_vals[i];
593 }
594 },
595 {
596 for line in 0..n {
597 vec_comb(
598 bufs[line], &mut positions[line], &mut damp[line],
599 dst[line], src, time[line], feedback, damping,
600 );
601 }
602 }
603 );
604}
605
606pub fn vec_comb_coupled(
622 bufs: &mut [&mut [f32]],
623 positions: &mut [usize],
624 damp: &mut [OnePoleState],
625 dst: &mut [&mut [f32]],
626 src: &[&[f32]],
627 time: &[&[f32]],
628 matrix: &[f32],
629 damping: f32,
630) {
631 let n = bufs.len();
632 assert!(matrix.len() >= n * n, "vec_comb_coupled: matrix must be N×N");
633
634 let frames = dst[0].len();
635 dispatch!(
636 {
637 let mut buf_ptrs: SmallVec<[*mut f32; 16]> = SmallVec::with_capacity(n);
638 let mut buf_lens: SmallVec<[u32; 16]> = SmallVec::with_capacity(n);
639 let mut pos32: SmallVec<[u32; 16]> = SmallVec::with_capacity(n);
640 let mut damp_vals: SmallVec<[f32; 16]> = SmallVec::with_capacity(n);
641 let mut dst_ptrs: SmallVec<[*mut f32; 16]> = SmallVec::with_capacity(n);
642 let mut src_ptrs: SmallVec<[*const f32; 16]> = SmallVec::with_capacity(n);
643 let mut time_ptrs: SmallVec<[*const f32; 16]> = SmallVec::with_capacity(n);
644 for i in 0..n {
645 buf_ptrs.push(bufs[i].as_mut_ptr());
646 buf_lens.push(bufs[i].len() as u32);
647 pos32.push(positions[i] as u32);
648 damp_vals.push(damp[i].y1);
649 dst_ptrs.push(dst[i].as_mut_ptr());
650 src_ptrs.push(src[i].as_ptr());
651 time_ptrs.push(time[i].as_ptr());
652 }
653 unsafe {
654 sesh_vec_comb_coupled_host(
655 buf_ptrs.as_ptr(), buf_lens.as_ptr(), pos32.as_mut_ptr(), damp_vals.as_mut_ptr(),
656 dst_ptrs.as_ptr(), src_ptrs.as_ptr(), time_ptrs.as_ptr(),
657 matrix.as_ptr(), damping, n as u32, frames as u32,
658 );
659 }
660 for i in 0..n {
661 positions[i] = pos32[i] as usize;
662 damp[i].y1 = damp_vals[i];
663 }
664 },
665 {
666 for i in 0..frames {
667 let mut taps = [0.0f32; 16];
668 let mut mixed = [0.0f32; 16];
669
670 for line in 0..n {
671 let buf_len = bufs[line].len();
672 let wp = positions[line];
673 let t = time[line][i];
674 let delay_int = t as usize;
675 let delay_frac = t - delay_int as f32;
676 let idx1 = (wp + buf_len - delay_int) % buf_len;
677 let idx2 = (idx1 + buf_len - 1) % buf_len;
678 taps[line] = bufs[line][idx1] + delay_frac * (bufs[line][idx2] - bufs[line][idx1]);
679 }
680
681 for line in 0..n {
682 damp[line].y1 = taps[line] + damping * (damp[line].y1 - taps[line]);
683 taps[line] = damp[line].y1;
684 }
685
686 for row in 0..n {
687 let mut sum = 0.0;
688 for col in 0..n {
689 sum += matrix[row * n + col] * taps[col];
690 }
691 mixed[row] = sum;
692 }
693
694 for line in 0..n {
695 dst[line][i] = taps[line];
696 bufs[line][positions[line]] = src[line][i] + mixed[line];
697 positions[line] = (positions[line] + 1) % bufs[line].len();
698 }
699 }
700 }
701 );
702}
703
704pub fn vec_osc(
710 phase: &mut f32,
711 dst: &mut [f32],
712 freq: f32,
713 waveform: Waveform,
714 sample_rate: f32,
715) {
716 let frames = dst.len();
717 dispatch!(
718 unsafe {
719 sesh_vec_osc_host(
720 phase as *mut f32, dst.as_mut_ptr(),
721 freq, waveform as u32, sample_rate, frames as u32,
722 );
723 },
724 {
725 let phase_inc = freq / sample_rate;
726 for i in 0..frames {
727 dst[i] = match waveform {
728 Waveform::Sine => (*phase * std::f32::consts::TAU).sin(),
729 Waveform::Triangle => 4.0 * (*phase - (*phase + 0.5).floor()).abs() - 1.0,
730 Waveform::Saw => 2.0 * (*phase - (*phase + 0.5).floor()),
731 Waveform::Square => if *phase % 1.0 < 0.5 { 1.0 } else { -1.0 },
732 };
733 *phase += phase_inc;
734 if *phase >= 1.0 {
735 *phase -= 1.0;
736 }
737 }
738 }
739 );
740}
741
742pub fn vec_biquad(
751 state: &mut BiquadState,
752 dst: &mut [f32],
753 src: &[f32],
754 cutoff: &[f32],
755 q: &[f32],
756 gain: &[f32],
757 filter_type: FilterType,
758 sample_rate: f32,
759) {
760 let frames = dst.len().min(src.len()).min(cutoff.len()).min(q.len()).min(gain.len());
761 dispatch!(
762 unsafe {
763 sesh_vec_biquad_host(
764 state as *mut BiquadState as *mut f32,
765 dst.as_mut_ptr(), src.as_ptr(),
766 cutoff.as_ptr(), q.as_ptr(), gain.as_ptr(),
767 filter_type as u32, sample_rate, frames as u32,
768 );
769 },
770 {
771 for i in 0..frames {
772 let w0 = std::f32::consts::TAU * cutoff[i] / sample_rate;
773 let cos_w0 = w0.cos();
774 let sin_w0 = w0.sin();
775 let alpha = sin_w0 / (2.0 * q[i]);
776 let a_db = gain[i];
777 let a_lin = 10.0f32.powf(a_db / 40.0);
778
779 let (b0, b1, b2, a0, a1, a2) = match filter_type {
780 FilterType::Lowpass => {
781 let b1 = 1.0 - cos_w0;
782 let b0 = b1 / 2.0;
783 (b0, b1, b0, 1.0 + alpha, -2.0 * cos_w0, 1.0 - alpha)
784 }
785 FilterType::Highpass => {
786 let b1 = -(1.0 + cos_w0);
787 let b0 = (1.0 + cos_w0) / 2.0;
788 (b0, b1, b0, 1.0 + alpha, -2.0 * cos_w0, 1.0 - alpha)
789 }
790 FilterType::Bandpass => {
791 (alpha, 0.0, -alpha, 1.0 + alpha, -2.0 * cos_w0, 1.0 - alpha)
792 }
793 FilterType::Notch => {
794 (1.0, -2.0 * cos_w0, 1.0, 1.0 + alpha, -2.0 * cos_w0, 1.0 - alpha)
795 }
796 FilterType::Peak => {
797 (
798 1.0 + alpha * a_lin,
799 -2.0 * cos_w0,
800 1.0 - alpha * a_lin,
801 1.0 + alpha / a_lin,
802 -2.0 * cos_w0,
803 1.0 - alpha / a_lin,
804 )
805 }
806 FilterType::LowShelf => {
807 let two_sqrt_a_alpha = 2.0 * a_lin.sqrt() * alpha;
808 (
809 a_lin * ((a_lin + 1.0) - (a_lin - 1.0) * cos_w0 + two_sqrt_a_alpha),
810 2.0 * a_lin * ((a_lin - 1.0) - (a_lin + 1.0) * cos_w0),
811 a_lin * ((a_lin + 1.0) - (a_lin - 1.0) * cos_w0 - two_sqrt_a_alpha),
812 (a_lin + 1.0) + (a_lin - 1.0) * cos_w0 + two_sqrt_a_alpha,
813 -2.0 * ((a_lin - 1.0) + (a_lin + 1.0) * cos_w0),
814 (a_lin + 1.0) + (a_lin - 1.0) * cos_w0 - two_sqrt_a_alpha,
815 )
816 }
817 FilterType::HighShelf => {
818 let two_sqrt_a_alpha = 2.0 * a_lin.sqrt() * alpha;
819 (
820 a_lin * ((a_lin + 1.0) + (a_lin - 1.0) * cos_w0 + two_sqrt_a_alpha),
821 -2.0 * a_lin * ((a_lin - 1.0) + (a_lin + 1.0) * cos_w0),
822 a_lin * ((a_lin + 1.0) + (a_lin - 1.0) * cos_w0 - two_sqrt_a_alpha),
823 (a_lin + 1.0) - (a_lin - 1.0) * cos_w0 + two_sqrt_a_alpha,
824 2.0 * ((a_lin - 1.0) - (a_lin + 1.0) * cos_w0),
825 (a_lin + 1.0) - (a_lin - 1.0) * cos_w0 - two_sqrt_a_alpha,
826 )
827 }
828 FilterType::Allpass => {
829 (1.0 - alpha, -2.0 * cos_w0, 1.0 + alpha, 1.0 + alpha, -2.0 * cos_w0, 1.0 - alpha)
830 }
831 };
832
833 let b0 = b0 / a0;
835 let b1 = b1 / a0;
836 let b2 = b2 / a0;
837 let a1 = a1 / a0;
838 let a2 = a2 / a0;
839
840 let x0 = src[i];
841 let y0 = b0 * x0 + b1 * state.x1 + b2 * state.x2
842 - a1 * state.y1 - a2 * state.y2;
843
844 state.x2 = state.x1;
845 state.x1 = x0;
846 state.y2 = state.y1;
847 state.y1 = y0;
848
849 dst[i] = y0;
850 }
851 }
852 );
853}
854
855pub fn vec_envelope(
864 state: &mut EnvelopeState,
865 dst: &mut [f32],
866 src: &[f32],
867 attack: &[f32],
868 release: &[f32],
869 mode: EnvelopeMode,
870 sample_rate: f32,
871) {
872 let frames = dst.len().min(src.len()).min(attack.len()).min(release.len());
873 dispatch!(
874 unsafe {
875 sesh_vec_envelope_host(
876 state as *mut EnvelopeState as *mut f32,
877 dst.as_mut_ptr(), src.as_ptr(),
878 attack.as_ptr(), release.as_ptr(),
879 mode as u32, sample_rate, frames as u32,
880 );
881 },
882 {
883 for i in 0..frames {
884 let input_level = match mode {
885 EnvelopeMode::Peak => src[i].abs(),
886 EnvelopeMode::Rms => src[i] * src[i],
887 };
888
889 let att_coeff = (-1.0 / (attack[i] * sample_rate)).exp();
890 let rel_coeff = (-1.0 / (release[i] * sample_rate)).exp();
891
892 let coeff = if input_level > state.current { att_coeff } else { rel_coeff };
893 state.current = coeff * state.current + (1.0 - coeff) * input_level;
894
895 dst[i] = match mode {
896 EnvelopeMode::Peak => state.current,
897 EnvelopeMode::Rms => state.current.sqrt(),
898 };
899 }
900 }
901 );
902}
903
904pub fn vec_tanh(dst: &mut [f32], src: &[f32], drive: &[f32]) {
910 let len = dst.len().min(src.len()).min(drive.len());
911 dispatch!(
912 unsafe { sesh_vec_tanh_host(dst.as_mut_ptr(), src.as_ptr(), drive.as_ptr(), len as u32) },
913 for i in 0..len { dst[i] = (src[i] * drive[i]).tanh(); }
914 );
915}
916
917pub fn vec_hard_clip(dst: &mut [f32], src: &[f32], threshold: &[f32]) {
919 let len = dst.len().min(src.len()).min(threshold.len());
920 dispatch!(
921 unsafe { sesh_vec_hard_clip_host(dst.as_mut_ptr(), src.as_ptr(), threshold.as_ptr(), len as u32) },
922 for i in 0..len { dst[i] = src[i].clamp(-threshold[i], threshold[i]); }
923 );
924}
925
926pub fn vec_abs(dst: &mut [f32], src: &[f32]) {
932 let len = dst.len().min(src.len());
933 dispatch!(
934 unsafe { sesh_vec_abs_host(dst.as_mut_ptr(), src.as_ptr(), len as u32) },
935 for i in 0..len { dst[i] = src[i].abs(); }
936 );
937}
938
939pub fn vec_neg(dst: &mut [f32], src: &[f32]) {
941 let len = dst.len().min(src.len());
942 dispatch!(
943 unsafe { sesh_vec_neg_host(dst.as_mut_ptr(), src.as_ptr(), len as u32) },
944 for i in 0..len { dst[i] = -src[i]; }
945 );
946}
947
948pub fn vec_sqrt(dst: &mut [f32], src: &[f32]) {
950 let len = dst.len().min(src.len());
951 dispatch!(
952 unsafe { sesh_vec_sqrt_host(dst.as_mut_ptr(), src.as_ptr(), len as u32) },
953 for i in 0..len { dst[i] = src[i].sqrt(); }
954 );
955}
956
957pub fn vec_recip(dst: &mut [f32], src: &[f32]) {
959 let len = dst.len().min(src.len());
960 dispatch!(
961 unsafe { sesh_vec_recip_host(dst.as_mut_ptr(), src.as_ptr(), len as u32) },
962 for i in 0..len { dst[i] = 1.0 / src[i]; }
963 );
964}
965
966pub fn vec_div(dst: &mut [f32], a: &[f32], b: &[f32]) {
968 let len = dst.len().min(a.len()).min(b.len());
969 dispatch!(
970 unsafe { sesh_vec_div_host(dst.as_mut_ptr(), a.as_ptr(), b.as_ptr(), len as u32) },
971 for i in 0..len { dst[i] = a[i] / b[i]; }
972 );
973}
974
975pub fn vec_pow(dst: &mut [f32], src: &[f32], exp: &[f32]) {
977 let len = dst.len().min(src.len()).min(exp.len());
978 dispatch!(
979 unsafe { sesh_vec_pow_host(dst.as_mut_ptr(), src.as_ptr(), exp.as_ptr(), len as u32) },
980 for i in 0..len { dst[i] = src[i].powf(exp[i]); }
981 );
982}
983
984pub fn vec_add_assign(dst: &mut [f32], src: &[f32]) {
994 let len = dst.len().min(src.len());
995 dispatch!(
996 unsafe { sesh_vec_add_host(dst.as_mut_ptr(), dst.as_ptr(), src.as_ptr(), len as u32) },
997 for i in 0..len { dst[i] += src[i]; }
998 );
999}
1000
1001pub fn vec_mul_assign(dst: &mut [f32], src: &[f32]) {
1003 let len = dst.len().min(src.len());
1004 dispatch!(
1005 unsafe { sesh_vec_mul_host(dst.as_mut_ptr(), dst.as_ptr(), src.as_ptr(), len as u32) },
1006 for i in 0..len { dst[i] *= src[i]; }
1007 );
1008}
1009
1010pub fn vec_tanh_assign(dst: &mut [f32], drive: &[f32]) {
1012 let len = dst.len().min(drive.len());
1013 dispatch!(
1014 unsafe { sesh_vec_tanh_host(dst.as_mut_ptr(), dst.as_ptr(), drive.as_ptr(), len as u32) },
1015 for i in 0..len { dst[i] = (dst[i] * drive[i]).tanh(); }
1016 );
1017}
1018
1019pub fn vec_hard_clip_assign(dst: &mut [f32], threshold: &[f32]) {
1021 let len = dst.len().min(threshold.len());
1022 dispatch!(
1023 unsafe { sesh_vec_hard_clip_host(dst.as_mut_ptr(), dst.as_ptr(), threshold.as_ptr(), len as u32) },
1024 for i in 0..len { dst[i] = dst[i].clamp(-threshold[i], threshold[i]); }
1025 );
1026}
1027
1028pub fn vec_abs_assign(dst: &mut [f32]) {
1030 let len = dst.len();
1031 dispatch!(
1032 unsafe { sesh_vec_abs_host(dst.as_mut_ptr(), dst.as_ptr(), len as u32) },
1033 for i in 0..len { dst[i] = dst[i].abs(); }
1034 );
1035}
1036
1037pub fn vec_neg_assign(dst: &mut [f32]) {
1039 let len = dst.len();
1040 dispatch!(
1041 unsafe { sesh_vec_neg_host(dst.as_mut_ptr(), dst.as_ptr(), len as u32) },
1042 for i in 0..len { dst[i] = -dst[i]; }
1043 );
1044}
1045
1046pub fn vec_sqrt_assign(dst: &mut [f32]) {
1048 let len = dst.len();
1049 dispatch!(
1050 unsafe { sesh_vec_sqrt_host(dst.as_mut_ptr(), dst.as_ptr(), len as u32) },
1051 for i in 0..len { dst[i] = dst[i].sqrt(); }
1052 );
1053}
1054
1055pub fn vec_recip_assign(dst: &mut [f32]) {
1057 let len = dst.len();
1058 dispatch!(
1059 unsafe { sesh_vec_recip_host(dst.as_mut_ptr(), dst.as_ptr(), len as u32) },
1060 for i in 0..len { dst[i] = 1.0 / dst[i]; }
1061 );
1062}
1063
1064pub fn vec_div_assign(dst: &mut [f32], src: &[f32]) {
1066 let len = dst.len().min(src.len());
1067 dispatch!(
1068 unsafe { sesh_vec_div_host(dst.as_mut_ptr(), dst.as_ptr(), src.as_ptr(), len as u32) },
1069 for i in 0..len { dst[i] /= src[i]; }
1070 );
1071}
1072
1073pub fn vec_pow_assign(dst: &mut [f32], exp: &[f32]) {
1075 let len = dst.len().min(exp.len());
1076 dispatch!(
1077 unsafe { sesh_vec_pow_host(dst.as_mut_ptr(), dst.as_ptr(), exp.as_ptr(), len as u32) },
1078 for i in 0..len { dst[i] = dst[i].powf(exp[i]); }
1079 );
1080}