1use std::sync::atomic::{AtomicU32, Ordering};
8
9extern "C" {
14 fn sesh_vec_version() -> u32;
15}
16
17static HOST_VEC_VERSION: AtomicU32 = AtomicU32::new(0);
19
20fn host_version() -> u32 {
21 let v = HOST_VEC_VERSION.load(Ordering::Relaxed);
22 if v != 0 {
23 return v;
24 }
25 let v = unsafe { sesh_vec_version() };
26 let store = if v == 0 { u32::MAX } else { v };
28 HOST_VEC_VERSION.store(store, Ordering::Relaxed);
29 v
30}
31
32#[inline]
33fn use_host_ops() -> bool {
34 host_version() > 0 && host_version() != u32::MAX
35}
36
37extern "C" {
42 fn sesh_vec_copy_host(dst: *mut f32, src: *const f32, len: u32);
43 fn sesh_vec_fill_host(dst: *mut f32, value: f32, len: u32);
44 fn sesh_vec_add_host(dst: *mut f32, a: *const f32, b: *const f32, len: u32);
45 fn sesh_vec_add_scalar_host(dst: *mut f32, value: f32, len: u32);
46 fn sesh_vec_mul_host(dst: *mut f32, a: *const f32, b: *const f32, len: u32);
47 fn sesh_vec_mul_scalar_host(dst: *mut f32, value: f32, len: u32);
48 fn sesh_vec_mul_add_host(dst: *mut f32, src: *const f32, gain: f32, len: u32);
49 fn sesh_vec_clamp_host(dst: *mut f32, src: *const f32, min: f32, max: f32, len: u32);
50 fn sesh_vec_ring_write_host(
51 buf: *mut f32, buf_len: u32, pos: *mut u32, src: *const f32, len: u32,
52 );
53 fn sesh_vec_ring_read_host(
54 buf: *const f32, buf_len: u32, pos: u32, dst: *mut f32, offset: u32, len: u32,
55 );
56 fn sesh_vec_delay_read_host(
57 buf: *const f32, buf_len: u32, pos: u32, dst: *mut f32, time: *const f32, len: u32,
58 );
59 fn sesh_vec_osc_host(
60 phase: *mut f32, dst: *mut f32, freq: f32, waveform: u32, sample_rate: f32, len: u32,
61 );
62 fn sesh_vec_biquad_host(
63 state: *mut f32, dst: *mut f32, src: *const f32,
64 cutoff: *const f32, q: *const f32, gain: *const f32,
65 filter_type: u32, sample_rate: f32, len: u32,
66 );
67 fn sesh_vec_envelope_host(
68 state: *mut f32, dst: *mut f32, src: *const f32,
69 attack: *const f32, release: *const f32,
70 mode: u32, sample_rate: f32, len: u32,
71 );
72 fn sesh_vec_tanh_host(dst: *mut f32, src: *const f32, drive: *const f32, len: u32);
73 fn sesh_vec_hard_clip_host(dst: *mut f32, src: *const f32, threshold: *const f32, len: u32);
74 fn sesh_vec_abs_host(dst: *mut f32, src: *const f32, len: u32);
75 fn sesh_vec_neg_host(dst: *mut f32, src: *const f32, len: u32);
76 fn sesh_vec_sqrt_host(dst: *mut f32, src: *const f32, len: u32);
77 fn sesh_vec_recip_host(dst: *mut f32, src: *const f32, len: u32);
78 fn sesh_vec_div_host(dst: *mut f32, a: *const f32, b: *const f32, len: u32);
79 fn sesh_vec_pow_host(dst: *mut f32, src: *const f32, exp: *const f32, len: u32);
80}
81
82#[repr(u32)]
88#[derive(Clone, Copy)]
89pub enum Waveform {
90 Sine = 0,
91 Triangle = 1,
92 Saw = 2,
93 Square = 3,
94}
95
96#[repr(u32)]
98#[derive(Clone, Copy)]
99pub enum FilterType {
100 Lowpass = 0,
101 Highpass = 1,
102 Bandpass = 2,
103 Notch = 3,
104 Peak = 4,
106 LowShelf = 5,
108 HighShelf = 6,
110 Allpass = 7,
112}
113
114#[repr(C)]
116pub struct BiquadState {
117 pub x1: f32,
118 pub x2: f32,
119 pub y1: f32,
120 pub y2: f32,
121}
122
123impl BiquadState {
124 pub const fn new() -> Self {
125 Self { x1: 0.0, x2: 0.0, y1: 0.0, y2: 0.0 }
126 }
127}
128
129#[repr(u32)]
131#[derive(Clone, Copy)]
132pub enum EnvelopeMode {
133 Peak = 0,
135 Rms = 1,
137}
138
139#[repr(C)]
141pub struct EnvelopeState {
142 pub current: f32,
143}
144
145impl EnvelopeState {
146 pub const fn new() -> Self {
147 Self { current: 0.0 }
148 }
149}
150
151pub fn vec_copy(dst: &mut [f32], src: &[f32]) {
157 let len = dst.len().min(src.len());
158 if use_host_ops() {
159 unsafe { sesh_vec_copy_host(dst.as_mut_ptr(), src.as_ptr(), len as u32) }
160 } else {
161 dst[..len].copy_from_slice(&src[..len]);
162 }
163}
164
165pub fn vec_fill(dst: &mut [f32], value: f32) {
167 let len = dst.len();
168 if use_host_ops() {
169 unsafe { sesh_vec_fill_host(dst.as_mut_ptr(), value, len as u32) }
170 } else {
171 for s in dst.iter_mut() {
172 *s = value;
173 }
174 }
175}
176
177pub fn vec_add(dst: &mut [f32], a: &[f32], b: &[f32]) {
179 let len = dst.len().min(a.len()).min(b.len());
180 if use_host_ops() {
181 unsafe { sesh_vec_add_host(dst.as_mut_ptr(), a.as_ptr(), b.as_ptr(), len as u32) }
182 } else {
183 for i in 0..len {
184 dst[i] = a[i] + b[i];
185 }
186 }
187}
188
189pub fn vec_add_buf(dst: &mut [f32], src: &[f32]) {
191 let len = dst.len().min(src.len());
192 for i in 0..len {
193 dst[i] += src[i];
194 }
195}
196
197pub fn vec_add_scalar(dst: &mut [f32], value: f32) {
199 let len = dst.len();
200 if use_host_ops() {
201 unsafe { sesh_vec_add_scalar_host(dst.as_mut_ptr(), value, len as u32) }
202 } else {
203 for s in dst.iter_mut() {
204 *s += value;
205 }
206 }
207}
208
209pub fn vec_mul(dst: &mut [f32], a: &[f32], b: &[f32]) {
211 let len = dst.len().min(a.len()).min(b.len());
212 if use_host_ops() {
213 unsafe { sesh_vec_mul_host(dst.as_mut_ptr(), a.as_ptr(), b.as_ptr(), len as u32) }
214 } else {
215 for i in 0..len {
216 dst[i] = a[i] * b[i];
217 }
218 }
219}
220
221pub fn vec_mul_scalar(dst: &mut [f32], value: f32) {
223 let len = dst.len();
224 if use_host_ops() {
225 unsafe { sesh_vec_mul_scalar_host(dst.as_mut_ptr(), value, len as u32) }
226 } else {
227 for s in dst.iter_mut() {
228 *s *= value;
229 }
230 }
231}
232
233pub fn vec_mul_add(dst: &mut [f32], src: &[f32], gain: f32) {
235 let len = dst.len().min(src.len());
236 if use_host_ops() {
237 unsafe { sesh_vec_mul_add_host(dst.as_mut_ptr(), src.as_ptr(), gain, len as u32) }
238 } else {
239 for i in 0..len {
240 dst[i] += src[i] * gain;
241 }
242 }
243}
244
245pub fn vec_clamp(dst: &mut [f32], src: &[f32], min: f32, max: f32) {
247 let len = dst.len().min(src.len());
248 if use_host_ops() {
249 unsafe { sesh_vec_clamp_host(dst.as_mut_ptr(), src.as_ptr(), min, max, len as u32) }
250 } else {
251 for i in 0..len {
252 dst[i] = src[i].clamp(min, max);
253 }
254 }
255}
256
257pub fn vec_ring_write(buf: &mut [f32], pos: &mut usize, src: &[f32]) {
264 let buf_len = buf.len();
265 let frames = src.len();
266 if use_host_ops() {
267 let mut pos32 = *pos as u32;
268 unsafe {
269 sesh_vec_ring_write_host(
270 buf.as_mut_ptr(), buf_len as u32, &mut pos32, src.as_ptr(), frames as u32,
271 );
272 }
273 *pos = pos32 as usize;
274 } else {
275 for i in 0..frames {
276 buf[(*pos + i) % buf_len] = src[i];
277 }
278 *pos = (*pos + frames) % buf_len;
279 }
280}
281
282pub fn vec_ring_read(buf: &[f32], pos: usize, dst: &mut [f32], offset: usize) {
284 let buf_len = buf.len();
285 let frames = dst.len();
286 if use_host_ops() {
287 unsafe {
288 sesh_vec_ring_read_host(
289 buf.as_ptr(), buf_len as u32, pos as u32,
290 dst.as_mut_ptr(), offset as u32, frames as u32,
291 );
292 }
293 } else {
294 let start = (pos + buf_len - offset) % buf_len;
295 for i in 0..frames {
296 dst[i] = buf[(start + i) % buf_len];
297 }
298 }
299}
300
301pub fn vec_delay_read(buf: &[f32], pos: usize, dst: &mut [f32], time: &[f32]) {
311 let buf_len = buf.len();
312 let frames = dst.len().min(time.len());
313 if use_host_ops() {
314 unsafe {
315 sesh_vec_delay_read_host(
316 buf.as_ptr(), buf_len as u32, pos as u32,
317 dst.as_mut_ptr(), time.as_ptr(), frames as u32,
318 );
319 }
320 } else {
321 for i in 0..frames {
322 let write_pos_at_i = (pos + buf_len - frames + i) % buf_len;
324
325 let delay_int = time[i] as usize;
326 let delay_frac = time[i] - delay_int as f32;
327
328 let idx1 = (write_pos_at_i + buf_len - delay_int) % buf_len;
329 let idx2 = (idx1 + buf_len - 1) % buf_len;
330
331 dst[i] = buf[idx1] + delay_frac * (buf[idx2] - buf[idx1]);
332 }
333 }
334}
335
336pub fn vec_osc(
342 phase: &mut f32,
343 dst: &mut [f32],
344 freq: f32,
345 waveform: Waveform,
346 sample_rate: f32,
347) {
348 let frames = dst.len();
349 if use_host_ops() {
350 unsafe {
351 sesh_vec_osc_host(
352 phase as *mut f32, dst.as_mut_ptr(),
353 freq, waveform as u32, sample_rate, frames as u32,
354 );
355 }
356 } else {
357 let phase_inc = freq / sample_rate;
358 for i in 0..frames {
359 dst[i] = match waveform {
360 Waveform::Sine => (*phase * std::f32::consts::TAU).sin(),
361 Waveform::Triangle => 4.0 * (*phase - (*phase + 0.5).floor()).abs() - 1.0,
362 Waveform::Saw => 2.0 * (*phase - (*phase + 0.5).floor()),
363 Waveform::Square => if *phase % 1.0 < 0.5 { 1.0 } else { -1.0 },
364 };
365 *phase += phase_inc;
366 if *phase >= 1.0 {
367 *phase -= 1.0;
368 }
369 }
370 }
371}
372
373pub fn vec_biquad(
382 state: &mut BiquadState,
383 dst: &mut [f32],
384 src: &[f32],
385 cutoff: &[f32],
386 q: &[f32],
387 gain: &[f32],
388 filter_type: FilterType,
389 sample_rate: f32,
390) {
391 let frames = dst.len().min(src.len()).min(cutoff.len()).min(q.len()).min(gain.len());
392 if use_host_ops() {
393 unsafe {
394 sesh_vec_biquad_host(
395 state as *mut BiquadState as *mut f32,
396 dst.as_mut_ptr(), src.as_ptr(),
397 cutoff.as_ptr(), q.as_ptr(), gain.as_ptr(),
398 filter_type as u32, sample_rate, frames as u32,
399 );
400 }
401 } else {
402 for i in 0..frames {
403 let w0 = std::f32::consts::TAU * cutoff[i] / sample_rate;
404 let cos_w0 = w0.cos();
405 let sin_w0 = w0.sin();
406 let alpha = sin_w0 / (2.0 * q[i]);
407 let a_db = gain[i];
408 let a_lin = 10.0f32.powf(a_db / 40.0);
409
410 let (b0, b1, b2, a0, a1, a2) = match filter_type {
411 FilterType::Lowpass => {
412 let b1 = 1.0 - cos_w0;
413 let b0 = b1 / 2.0;
414 (b0, b1, b0, 1.0 + alpha, -2.0 * cos_w0, 1.0 - alpha)
415 }
416 FilterType::Highpass => {
417 let b1 = -(1.0 + cos_w0);
418 let b0 = (1.0 + cos_w0) / 2.0;
419 (b0, b1, b0, 1.0 + alpha, -2.0 * cos_w0, 1.0 - alpha)
420 }
421 FilterType::Bandpass => {
422 (alpha, 0.0, -alpha, 1.0 + alpha, -2.0 * cos_w0, 1.0 - alpha)
423 }
424 FilterType::Notch => {
425 (1.0, -2.0 * cos_w0, 1.0, 1.0 + alpha, -2.0 * cos_w0, 1.0 - alpha)
426 }
427 FilterType::Peak => {
428 (
429 1.0 + alpha * a_lin,
430 -2.0 * cos_w0,
431 1.0 - alpha * a_lin,
432 1.0 + alpha / a_lin,
433 -2.0 * cos_w0,
434 1.0 - alpha / a_lin,
435 )
436 }
437 FilterType::LowShelf => {
438 let two_sqrt_a_alpha = 2.0 * a_lin.sqrt() * alpha;
439 (
440 a_lin * ((a_lin + 1.0) - (a_lin - 1.0) * cos_w0 + two_sqrt_a_alpha),
441 2.0 * a_lin * ((a_lin - 1.0) - (a_lin + 1.0) * cos_w0),
442 a_lin * ((a_lin + 1.0) - (a_lin - 1.0) * cos_w0 - two_sqrt_a_alpha),
443 (a_lin + 1.0) + (a_lin - 1.0) * cos_w0 + two_sqrt_a_alpha,
444 -2.0 * ((a_lin - 1.0) + (a_lin + 1.0) * cos_w0),
445 (a_lin + 1.0) + (a_lin - 1.0) * cos_w0 - two_sqrt_a_alpha,
446 )
447 }
448 FilterType::HighShelf => {
449 let two_sqrt_a_alpha = 2.0 * a_lin.sqrt() * alpha;
450 (
451 a_lin * ((a_lin + 1.0) + (a_lin - 1.0) * cos_w0 + two_sqrt_a_alpha),
452 -2.0 * a_lin * ((a_lin - 1.0) + (a_lin + 1.0) * cos_w0),
453 a_lin * ((a_lin + 1.0) + (a_lin - 1.0) * cos_w0 - two_sqrt_a_alpha),
454 (a_lin + 1.0) - (a_lin - 1.0) * cos_w0 + two_sqrt_a_alpha,
455 2.0 * ((a_lin - 1.0) - (a_lin + 1.0) * cos_w0),
456 (a_lin + 1.0) - (a_lin - 1.0) * cos_w0 - two_sqrt_a_alpha,
457 )
458 }
459 FilterType::Allpass => {
460 (1.0 - alpha, -2.0 * cos_w0, 1.0 + alpha, 1.0 + alpha, -2.0 * cos_w0, 1.0 - alpha)
461 }
462 };
463
464 let b0 = b0 / a0;
466 let b1 = b1 / a0;
467 let b2 = b2 / a0;
468 let a1 = a1 / a0;
469 let a2 = a2 / a0;
470
471 let x0 = src[i];
472 let y0 = b0 * x0 + b1 * state.x1 + b2 * state.x2
473 - a1 * state.y1 - a2 * state.y2;
474
475 state.x2 = state.x1;
476 state.x1 = x0;
477 state.y2 = state.y1;
478 state.y1 = y0;
479
480 dst[i] = y0;
481 }
482 }
483}
484
485pub fn vec_envelope(
494 state: &mut EnvelopeState,
495 dst: &mut [f32],
496 src: &[f32],
497 attack: &[f32],
498 release: &[f32],
499 mode: EnvelopeMode,
500 sample_rate: f32,
501) {
502 let frames = dst.len().min(src.len()).min(attack.len()).min(release.len());
503 if use_host_ops() {
504 unsafe {
505 sesh_vec_envelope_host(
506 state as *mut EnvelopeState as *mut f32,
507 dst.as_mut_ptr(), src.as_ptr(),
508 attack.as_ptr(), release.as_ptr(),
509 mode as u32, sample_rate, frames as u32,
510 );
511 }
512 } else {
513 for i in 0..frames {
514 let input_level = match mode {
515 EnvelopeMode::Peak => src[i].abs(),
516 EnvelopeMode::Rms => src[i] * src[i],
517 };
518
519 let att_coeff = (-1.0 / (attack[i] * sample_rate)).exp();
520 let rel_coeff = (-1.0 / (release[i] * sample_rate)).exp();
521
522 let coeff = if input_level > state.current { att_coeff } else { rel_coeff };
523 state.current = coeff * state.current + (1.0 - coeff) * input_level;
524
525 dst[i] = match mode {
526 EnvelopeMode::Peak => state.current,
527 EnvelopeMode::Rms => state.current.sqrt(),
528 };
529 }
530 }
531}
532
533pub fn vec_tanh(dst: &mut [f32], src: &[f32], drive: &[f32]) {
539 let len = dst.len().min(src.len()).min(drive.len());
540 if use_host_ops() {
541 unsafe { sesh_vec_tanh_host(dst.as_mut_ptr(), src.as_ptr(), drive.as_ptr(), len as u32) }
542 } else {
543 for i in 0..len {
544 dst[i] = (src[i] * drive[i]).tanh();
545 }
546 }
547}
548
549pub fn vec_hard_clip(dst: &mut [f32], src: &[f32], threshold: &[f32]) {
551 let len = dst.len().min(src.len()).min(threshold.len());
552 if use_host_ops() {
553 unsafe {
554 sesh_vec_hard_clip_host(dst.as_mut_ptr(), src.as_ptr(), threshold.as_ptr(), len as u32)
555 }
556 } else {
557 for i in 0..len {
558 dst[i] = src[i].clamp(-threshold[i], threshold[i]);
559 }
560 }
561}
562
563pub fn vec_abs(dst: &mut [f32], src: &[f32]) {
569 let len = dst.len().min(src.len());
570 if use_host_ops() {
571 unsafe { sesh_vec_abs_host(dst.as_mut_ptr(), src.as_ptr(), len as u32) }
572 } else {
573 for i in 0..len {
574 dst[i] = src[i].abs();
575 }
576 }
577}
578
579pub fn vec_neg(dst: &mut [f32], src: &[f32]) {
581 let len = dst.len().min(src.len());
582 if use_host_ops() {
583 unsafe { sesh_vec_neg_host(dst.as_mut_ptr(), src.as_ptr(), len as u32) }
584 } else {
585 for i in 0..len {
586 dst[i] = -src[i];
587 }
588 }
589}
590
591pub fn vec_sqrt(dst: &mut [f32], src: &[f32]) {
593 let len = dst.len().min(src.len());
594 if use_host_ops() {
595 unsafe { sesh_vec_sqrt_host(dst.as_mut_ptr(), src.as_ptr(), len as u32) }
596 } else {
597 for i in 0..len {
598 dst[i] = src[i].sqrt();
599 }
600 }
601}
602
603pub fn vec_recip(dst: &mut [f32], src: &[f32]) {
605 let len = dst.len().min(src.len());
606 if use_host_ops() {
607 unsafe { sesh_vec_recip_host(dst.as_mut_ptr(), src.as_ptr(), len as u32) }
608 } else {
609 for i in 0..len {
610 dst[i] = 1.0 / src[i];
611 }
612 }
613}
614
615pub fn vec_div(dst: &mut [f32], a: &[f32], b: &[f32]) {
617 let len = dst.len().min(a.len()).min(b.len());
618 if use_host_ops() {
619 unsafe { sesh_vec_div_host(dst.as_mut_ptr(), a.as_ptr(), b.as_ptr(), len as u32) }
620 } else {
621 for i in 0..len {
622 dst[i] = a[i] / b[i];
623 }
624 }
625}
626
627pub fn vec_pow(dst: &mut [f32], src: &[f32], exp: &[f32]) {
629 let len = dst.len().min(src.len()).min(exp.len());
630 if use_host_ops() {
631 unsafe { sesh_vec_pow_host(dst.as_mut_ptr(), src.as_ptr(), exp.as_ptr(), len as u32) }
632 } else {
633 for i in 0..len {
634 dst[i] = src[i].powf(exp[i]);
635 }
636 }
637}