synpad 0.1.0

A full-featured Matrix chat client built with Dioxus
#[cfg(any(feature = "desktop", feature = "mobile"))]
mod platform_voice_recorder {
    use std::io::Cursor;
    use std::sync::{Arc, Mutex};
    use std::time::Instant;

    use cpal::traits::{DeviceTrait, HostTrait, StreamTrait};
    use cpal::{FromSample, Sample, SampleFormat, SizedSample, Stream};
    use dioxus::prelude::*;

    const WAVEFORM_BUCKETS: usize = 48;

    pub struct RecordedVoiceMessage {
        pub data: Vec<u8>,
        pub duration_ms: u64,
        pub file_name: String,
        pub mime_type: String,
        pub waveform: Vec<f32>,
    }

    struct RecordingSession {
        stream: Stream,
        samples: Arc<Mutex<Vec<f32>>>,
        sample_rate: u32,
        channels: u16,
        started_at: Instant,
    }

    fn build_recording_stream<T>(
        device: &cpal::Device,
        config: &cpal::StreamConfig,
        samples: Arc<Mutex<Vec<f32>>>,
    ) -> Result<Stream, String>
    where
        T: Sample + SizedSample,
        f32: FromSample<T>,
    {
        device
            .build_input_stream(
                config,
                move |data: &[T], _| {
                    if let Ok(mut buffer) = samples.lock() {
                        buffer.extend(data.iter().copied().map(f32::from_sample));
                    }
                },
                move |err| {
                    tracing::error!("Voice recorder stream error: {err}");
                },
                None,
            )
            .map_err(|e| format!("Failed to start microphone capture: {e}"))
    }

    fn start_recording() -> Result<RecordingSession, String> {
        let host = cpal::default_host();
        let device = host
            .default_input_device()
            .ok_or_else(|| "No microphone input device is available".to_string())?;
        let supported_config = device
            .default_input_config()
            .map_err(|e| format!("Failed to read microphone configuration: {e}"))?;
        let config = supported_config.config();
        let samples = Arc::new(Mutex::new(Vec::new()));

        let stream = match supported_config.sample_format() {
            SampleFormat::F32 => build_recording_stream::<f32>(&device, &config, samples.clone())?,
            SampleFormat::I16 => build_recording_stream::<i16>(&device, &config, samples.clone())?,
            SampleFormat::U16 => build_recording_stream::<u16>(&device, &config, samples.clone())?,
            other => {
                return Err(format!("Unsupported microphone sample format: {other:?}"));
            }
        };

        stream
            .play()
            .map_err(|e| format!("Failed to access microphone input: {e}"))?;

        Ok(RecordingSession {
            stream,
            samples,
            sample_rate: supported_config.sample_rate(),
            channels: supported_config.channels(),
            started_at: Instant::now(),
        })
    }

    fn encode_wav(samples: &[f32], sample_rate: u32, channels: u16) -> Result<Vec<u8>, String> {
        let spec = hound::WavSpec {
            channels,
            sample_rate,
            bits_per_sample: 16,
            sample_format: hound::SampleFormat::Int,
        };
        let mut cursor = Cursor::new(Vec::new());

        {
            let mut writer = hound::WavWriter::new(&mut cursor, spec)
                .map_err(|e| format!("Failed to encode voice message: {e}"))?;
            for sample in samples {
                let sample = (sample.clamp(-1.0, 1.0) * i16::MAX as f32) as i16;
                writer
                    .write_sample(sample)
                    .map_err(|e| format!("Failed to encode voice message: {e}"))?;
            }
            writer
                .finalize()
                .map_err(|e| format!("Failed to finalize voice message: {e}"))?;
        }

        Ok(cursor.into_inner())
    }

    fn build_waveform(samples: &[f32], channels: u16) -> Vec<f32> {
        if samples.is_empty() {
            return Vec::new();
        }

        let channels = usize::from(channels.max(1));
        let mono: Vec<f32> = if channels == 1 {
            samples.iter().map(|sample| sample.abs()).collect()
        } else {
            samples
                .chunks(channels)
                .map(|frame| frame.iter().map(|sample| sample.abs()).sum::<f32>() / channels as f32)
                .collect()
        };

        let bucket_count = mono.len().min(WAVEFORM_BUCKETS).max(1);
        let bucket_size = mono.len().div_ceil(bucket_count);
        let mut waveform = Vec::with_capacity(bucket_count);

        for chunk in mono.chunks(bucket_size) {
            let peak = chunk
                .iter()
                .copied()
                .fold(0.0f32, |acc, sample| acc.max(sample));
            waveform.push(peak);
        }

        let max_peak = waveform
            .iter()
            .copied()
            .fold(0.0f32, |acc, sample| acc.max(sample));

        if max_peak > 0.0 {
            waveform.iter_mut().for_each(|sample| *sample /= max_peak);
        }

        waveform
    }

    fn finish_recording(session: RecordingSession) -> Result<RecordedVoiceMessage, String> {
        let _ = session.stream.pause();
        let samples = session
            .samples
            .lock()
            .map_err(|_| "Voice recorder data is unavailable".to_string())?
            .clone();

        if samples.is_empty() {
            return Err("No audio was captured from the microphone".to_string());
        }

        let duration_ms = ((samples.len() as f64 / f64::from(session.channels.max(1)))
            / f64::from(session.sample_rate)
            * 1000.0)
            .round() as u64;
        let waveform = build_waveform(&samples, session.channels);
        let data = encode_wav(&samples, session.sample_rate, session.channels)?;

        Ok(RecordedVoiceMessage {
            data,
            duration_ms,
            file_name: format!("voice-message-{}.wav", uuid::Uuid::new_v4()),
            mime_type: "audio/wav".to_string(),
            waveform,
        })
    }

    #[component]
    pub fn VoiceRecorder(
        on_send: EventHandler<RecordedVoiceMessage>,
        on_cancel: EventHandler<()>,
    ) -> Element {
        let mut is_recording = use_signal(|| false);
        let mut elapsed_seconds = use_signal(|| 0u64);
        let mut error = use_signal(|| Option::<String>::None);
        let mut recording_started_at = use_signal(|| Option::<Instant>::None);
        let mut recording_generation = use_signal(|| 0u64);
        let mut session = use_signal(|| Option::<RecordingSession>::None);

        use_effect(move || {
            let generation = *recording_generation.read();
            if generation == 0 || !*is_recording.read() {
                return;
            }

            spawn(async move {
                loop {
                    tokio::time::sleep(std::time::Duration::from_millis(250)).await;
                    if !*is_recording.read() || *recording_generation.read() != generation {
                        break;
                    }
                    if let Some(started_at) = *recording_started_at.read() {
                        elapsed_seconds.set(started_at.elapsed().as_secs());
                    }
                }
            });
        });

        let elapsed = *elapsed_seconds.read();
        let minutes = elapsed / 60;
        let seconds = elapsed % 60;

        rsx! {
            div {
                class: "voice-recorder",

                if let Some(message) = error.read().as_ref() {
                    div {
                        class: "message-input__error",
                        span { "{message}" }
                        button {
                            onclick: move |_| error.set(None),
                            ""
                        }
                    }
                }

                if *is_recording.read() {
                    div {
                        class: "voice-recorder__recording",
                        span { class: "voice-recorder__dot" }
                        span { class: "voice-recorder__time", "{minutes}:{seconds:02}" }
                        button {
                            class: "voice-recorder__stop-btn",
                            title: "Stop recording",
                            onclick: move |_| {
                                is_recording.set(false);
                                recording_started_at.set(None);
                                if let Some(recording_session) = session.write().take() {
                                    match finish_recording(recording_session) {
                                        Ok(message) => {
                                            elapsed_seconds.set(0);
                                            on_send.call(message);
                                        }
                                        Err(err) => {
                                            elapsed_seconds.set(0);
                                            error.set(Some(err));
                                        }
                                    }
                                }
                            },
                            ""
                        }
                        button {
                            class: "voice-recorder__cancel-btn",
                            title: "Cancel",
                            onclick: move |_| {
                                is_recording.set(false);
                                elapsed_seconds.set(0);
                                recording_started_at.set(None);
                                session.write().take();
                                on_cancel.call(());
                            },
                            ""
                        }
                    }
                } else {
                    button {
                        class: "voice-recorder__start-btn",
                        title: "Record voice message",
                        onclick: move |_| {
                            error.set(None);
                            match start_recording() {
                                Ok(recording_session) => {
                                    let next_generation = *recording_generation.read() + 1;
                                    elapsed_seconds.set(0);
                                    recording_started_at.set(Some(recording_session.started_at));
                                    session.set(Some(recording_session));
                                    is_recording.set(true);
                                    recording_generation.set(next_generation);
                                }
                                Err(err) => {
                                    error.set(Some(err));
                                }
                            }
                        },
                        "🎤"
                    }
                }
            }
        }
    }
}

#[cfg(not(any(feature = "desktop", feature = "mobile")))]
mod platform_voice_recorder {
    use dioxus::prelude::*;

    pub struct RecordedVoiceMessage {
        pub data: Vec<u8>,
        pub duration_ms: u64,
        pub file_name: String,
        pub mime_type: String,
        pub waveform: Vec<f32>,
    }

    #[component]
    pub fn VoiceRecorder(
        _on_send: EventHandler<RecordedVoiceMessage>,
        on_cancel: EventHandler<()>,
    ) -> Element {
        rsx! {
            div {
                class: "voice-recorder",
                div {
                    class: "message-input__error",
                    span { "Voice recording is only available in desktop and mobile builds." }
                    button {
                        onclick: move |_| on_cancel.call(()),
                        ""
                    }
                }
            }
        }
    }
}

pub use platform_voice_recorder::{RecordedVoiceMessage, VoiceRecorder};