use futures::StreamExt;
use livekit::track::RemoteAudioTrack;
use livekit::webrtc::audio_stream::native::NativeAudioStream;
use crate::audio::{AudioChunk, AudioFormat, SmartAudioBuffer};
use crate::error::Result;
use crate::runner::RealtimeRunner;
const DEFAULT_SAMPLE_RATE: i32 = 24000;
const GEMINI_SAMPLE_RATE: i32 = 16000;
const DEFAULT_NUM_CHANNELS: i32 = 1;
const BUFFER_DURATION_MS: u32 = 40;
pub async fn bridge_input(track: RemoteAudioTrack, runner: &RealtimeRunner) -> Result<()> {
let mut stream =
NativeAudioStream::new(track.rtc_track(), DEFAULT_SAMPLE_RATE, DEFAULT_NUM_CHANNELS);
let mut buffer = SmartAudioBuffer::new(DEFAULT_SAMPLE_RATE as u32, BUFFER_DURATION_MS);
while let Some(frame) = stream.next().await {
buffer.push(&frame.data);
if let Some(samples) = buffer.flush() {
let chunk = AudioChunk::from_i16_samples(&samples, AudioFormat::pcm16_24khz());
runner.send_audio(&chunk.to_base64()).await?;
}
}
if let Some(samples) = buffer.flush_remaining() {
let chunk = AudioChunk::from_i16_samples(&samples, AudioFormat::pcm16_24khz());
runner.send_audio(&chunk.to_base64()).await?;
}
Ok(())
}
pub async fn bridge_gemini_input(track: RemoteAudioTrack, runner: &RealtimeRunner) -> Result<()> {
let mut stream =
NativeAudioStream::new(track.rtc_track(), GEMINI_SAMPLE_RATE, DEFAULT_NUM_CHANNELS);
let mut buffer = SmartAudioBuffer::new(GEMINI_SAMPLE_RATE as u32, BUFFER_DURATION_MS);
while let Some(frame) = stream.next().await {
buffer.push(&frame.data);
if let Some(samples) = buffer.flush() {
let chunk = AudioChunk::from_i16_samples(&samples, AudioFormat::pcm16_16khz());
runner.send_audio(&chunk.to_base64()).await?;
}
}
if let Some(samples) = buffer.flush_remaining() {
let chunk = AudioChunk::from_i16_samples(&samples, AudioFormat::pcm16_16khz());
runner.send_audio(&chunk.to_base64()).await?;
}
Ok(())
}