use crate::FrameCount;
use crate::{
BackendSpecificError, BufferSize, Data, DefaultStreamConfigError, DeviceNameError,
DevicesError, InputCallbackInfo, OutputCallbackInfo, SampleFormat, SampleRate, StreamConfig,
SupportedBufferSize, SupportedStreamConfig, SupportedStreamConfigRange,
SupportedStreamConfigsError, COMMON_SAMPLE_RATES,
};
use once_cell::sync::Lazy;
use std;
use std::ffi::OsString;
use std::fmt;
use std::mem;
use std::ops::{Deref, DerefMut};
use std::os::windows::ffi::OsStringExt;
use std::ptr;
use std::slice;
use std::sync::{Arc, Mutex, MutexGuard};
use super::com;
use super::{windows_err_to_cpal_err, windows_err_to_cpal_err_message};
use std::ffi::c_void;
use windows::core::Interface;
use windows::core::GUID;
use windows::Win32::Devices::Properties;
use windows::Win32::Foundation;
use windows::Win32::Media::Audio::IAudioRenderClient;
use windows::Win32::Media::{Audio, KernelStreaming, Multimedia};
use windows::Win32::System::Com;
use windows::Win32::System::Com::StructuredStorage;
use windows::Win32::System::Ole;
use windows::Win32::System::Threading;
use super::stream::{AudioClientFlow, Stream, StreamInner};
use crate::{traits::DeviceTrait, BuildStreamError, StreamError};
pub type SupportedInputConfigs = std::vec::IntoIter<SupportedStreamConfigRange>;
pub type SupportedOutputConfigs = std::vec::IntoIter<SupportedStreamConfigRange>;
#[derive(Clone)]
struct IAudioClientWrapper(Audio::IAudioClient);
unsafe impl Send for IAudioClientWrapper {}
unsafe impl Sync for IAudioClientWrapper {}
#[derive(Clone)]
pub struct Device {
device: Audio::IMMDevice,
future_audio_client: Arc<Mutex<Option<IAudioClientWrapper>>>, }
impl DeviceTrait for Device {
type SupportedInputConfigs = SupportedInputConfigs;
type SupportedOutputConfigs = SupportedOutputConfigs;
type Stream = Stream;
fn name(&self) -> Result<String, DeviceNameError> {
Device::name(self)
}
fn supported_input_configs(
&self,
) -> Result<Self::SupportedInputConfigs, SupportedStreamConfigsError> {
Device::supported_input_configs(self)
}
fn supported_output_configs(
&self,
) -> Result<Self::SupportedOutputConfigs, SupportedStreamConfigsError> {
Device::supported_output_configs(self)
}
fn default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
Device::default_input_config(self)
}
fn default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
Device::default_output_config(self)
}
fn build_input_stream_raw<D, E>(
&self,
config: &StreamConfig,
sample_format: SampleFormat,
data_callback: D,
error_callback: E,
) -> Result<Self::Stream, BuildStreamError>
where
D: FnMut(&Data, &InputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
let stream_inner = self.build_input_stream_raw_inner(config, sample_format)?;
Ok(Stream::new_input(
stream_inner,
data_callback,
error_callback,
))
}
fn build_output_stream_raw<D, E>(
&self,
config: &StreamConfig,
sample_format: SampleFormat,
data_callback: D,
error_callback: E,
) -> Result<Self::Stream, BuildStreamError>
where
D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
let stream_inner = self.build_output_stream_raw_inner(config, sample_format)?;
Ok(Stream::new_output(
stream_inner,
data_callback,
error_callback,
))
}
}
struct Endpoint {
endpoint: Audio::IMMEndpoint,
}
enum WaveFormat {
Ex(Audio::WAVEFORMATEX),
Extensible(Audio::WAVEFORMATEXTENSIBLE),
}
struct WaveFormatExPtr(*mut Audio::WAVEFORMATEX);
impl Drop for WaveFormatExPtr {
fn drop(&mut self) {
unsafe {
Com::CoTaskMemFree(self.0 as *mut _);
}
}
}
impl WaveFormat {
pub fn copy_from_waveformatex_ptr(ptr: *const Audio::WAVEFORMATEX) -> Option<Self> {
unsafe {
match (*ptr).wFormatTag as u32 {
Audio::WAVE_FORMAT_PCM | Multimedia::WAVE_FORMAT_IEEE_FLOAT => {
Some(WaveFormat::Ex(*ptr))
}
KernelStreaming::WAVE_FORMAT_EXTENSIBLE => {
let extensible_ptr = ptr as *const Audio::WAVEFORMATEXTENSIBLE;
Some(WaveFormat::Extensible(*extensible_ptr))
}
_ => None,
}
}
}
pub fn as_ptr(&self) -> *const Audio::WAVEFORMATEX {
self.deref() as *const _
}
}
impl Deref for WaveFormat {
type Target = Audio::WAVEFORMATEX;
fn deref(&self) -> &Self::Target {
match *self {
WaveFormat::Ex(ref f) => f,
WaveFormat::Extensible(ref f) => &f.Format,
}
}
}
impl DerefMut for WaveFormat {
fn deref_mut(&mut self) -> &mut Self::Target {
match *self {
WaveFormat::Ex(ref mut f) => f,
WaveFormat::Extensible(ref mut f) => &mut f.Format,
}
}
}
unsafe fn immendpoint_from_immdevice(device: Audio::IMMDevice) -> Audio::IMMEndpoint {
device
.cast::<Audio::IMMEndpoint>()
.expect("could not query IMMDevice interface for IMMEndpoint")
}
unsafe fn data_flow_from_immendpoint(endpoint: &Audio::IMMEndpoint) -> Audio::EDataFlow {
endpoint
.GetDataFlow()
.expect("could not get endpoint data_flow")
}
pub unsafe fn is_format_supported(
client: &Audio::IAudioClient,
waveformatex_ptr: *const Audio::WAVEFORMATEX,
) -> Result<bool, SupportedStreamConfigsError> {
let is_supported = |waveformatex_ptr, mut closest_waveformatex_ptr| {
let result = client.IsFormatSupported(
Audio::AUDCLNT_SHAREMODE_SHARED,
waveformatex_ptr,
&mut closest_waveformatex_ptr,
);
match result {
Audio::AUDCLNT_E_DEVICE_INVALIDATED => {
Err(SupportedStreamConfigsError::DeviceNotAvailable)
}
r if r.is_err() => Ok(false),
Foundation::S_FALSE => Ok(false),
_ => Ok(true),
}
};
match (*waveformatex_ptr).wFormatTag as u32 {
Audio::WAVE_FORMAT_PCM | Multimedia::WAVE_FORMAT_IEEE_FLOAT => {
let mut closest_waveformatex = *waveformatex_ptr;
let closest_waveformatex_ptr = &mut closest_waveformatex as *mut _;
is_supported(waveformatex_ptr, closest_waveformatex_ptr)
}
KernelStreaming::WAVE_FORMAT_EXTENSIBLE => {
let waveformatextensible_ptr = waveformatex_ptr as *const Audio::WAVEFORMATEXTENSIBLE;
let mut closest_waveformatextensible = *waveformatextensible_ptr;
let closest_waveformatextensible_ptr = &mut closest_waveformatextensible as *mut _;
let closest_waveformatex_ptr =
closest_waveformatextensible_ptr as *mut Audio::WAVEFORMATEX;
is_supported(waveformatex_ptr, closest_waveformatex_ptr)
}
_ => Ok(false),
}
}
unsafe fn format_from_waveformatex_ptr(
waveformatex_ptr: *const Audio::WAVEFORMATEX,
audio_client: &Audio::IAudioClient,
) -> Option<SupportedStreamConfig> {
fn cmp_guid(a: &GUID, b: &GUID) -> bool {
(a.data1, a.data2, a.data3, a.data4) == (b.data1, b.data2, b.data3, b.data4)
}
let sample_format = match (
(*waveformatex_ptr).wBitsPerSample,
(*waveformatex_ptr).wFormatTag as u32,
) {
(16, Audio::WAVE_FORMAT_PCM) => SampleFormat::I16,
(32, Multimedia::WAVE_FORMAT_IEEE_FLOAT) => SampleFormat::F32,
(n_bits, KernelStreaming::WAVE_FORMAT_EXTENSIBLE) => {
let waveformatextensible_ptr = waveformatex_ptr as *const Audio::WAVEFORMATEXTENSIBLE;
let sub = (*waveformatextensible_ptr).SubFormat;
if n_bits == 16 && cmp_guid(&sub, &KernelStreaming::KSDATAFORMAT_SUBTYPE_PCM) {
SampleFormat::I16
} else if n_bits == 32 && cmp_guid(&sub, &Multimedia::KSDATAFORMAT_SUBTYPE_IEEE_FLOAT) {
SampleFormat::F32
} else {
return None;
}
}
_ => return None,
};
let sample_rate = SampleRate((*waveformatex_ptr).nSamplesPerSec);
let (mut min_buffer_duration, mut max_buffer_duration) = (0, 0);
let buffer_size_is_limited = audio_client
.cast::<Audio::IAudioClient2>()
.and_then(|audio_client| {
audio_client.GetBufferSizeLimits(
waveformatex_ptr,
true,
&mut min_buffer_duration,
&mut max_buffer_duration,
)
})
.is_ok();
let buffer_size = if buffer_size_is_limited {
SupportedBufferSize::Range {
min: buffer_duration_to_frames(min_buffer_duration, sample_rate.0),
max: buffer_duration_to_frames(max_buffer_duration, sample_rate.0),
}
} else {
SupportedBufferSize::Range {
min: 0,
max: u32::max_value(),
}
};
let format = SupportedStreamConfig {
channels: (*waveformatex_ptr).nChannels as _,
sample_rate,
buffer_size,
sample_format,
};
Some(format)
}
unsafe impl Send for Device {}
unsafe impl Sync for Device {}
impl Device {
pub fn name(&self) -> Result<String, DeviceNameError> {
unsafe {
let property_store = self
.device
.OpenPropertyStore(StructuredStorage::STGM_READ)
.expect("could not open property store");
let mut property_value = property_store
.GetValue(&Properties::DEVPKEY_Device_FriendlyName as *const _ as *const _)
.map_err(|err| {
let description =
format!("failed to retrieve name from property store: {}", err);
let err = BackendSpecificError { description };
DeviceNameError::from(err)
})?;
let prop_variant = &property_value.Anonymous.Anonymous;
if prop_variant.vt != Ole::VT_LPWSTR.0 as _ {
let description = format!(
"property store produced invalid data: {:?}",
prop_variant.vt
);
let err = BackendSpecificError { description };
return Err(err.into());
}
let ptr_utf16 = *(&prop_variant.Anonymous as *const _ as *const *const u16);
let mut len = 0;
while *ptr_utf16.offset(len) != 0 {
len += 1;
}
let name_slice = slice::from_raw_parts(ptr_utf16, len as usize);
let name_os_string: OsString = OsStringExt::from_wide(name_slice);
let name_string = match name_os_string.into_string() {
Ok(string) => string,
Err(os_string) => os_string.to_string_lossy().into(),
};
StructuredStorage::PropVariantClear(&mut property_value).ok();
Ok(name_string)
}
}
#[inline]
fn from_immdevice(device: Audio::IMMDevice) -> Self {
Device {
device,
future_audio_client: Arc::new(Mutex::new(None)),
}
}
fn ensure_future_audio_client(
&self,
) -> Result<MutexGuard<Option<IAudioClientWrapper>>, windows::core::Error> {
let mut lock = self.future_audio_client.lock().unwrap();
if lock.is_some() {
return Ok(lock);
}
let audio_client: Audio::IAudioClient = unsafe {
let mut audio_client = ptr::null_mut();
self.device.Activate(
&Audio::IAudioClient::IID,
Com::CLSCTX_ALL,
ptr::null_mut(),
&mut audio_client,
)?;
assert!(!audio_client.is_null());
mem::transmute::<_, Audio::IAudioClient>(audio_client as *mut _)
};
*lock = Some(IAudioClientWrapper(audio_client));
Ok(lock)
}
#[inline]
pub(crate) fn build_audioclient(&self) -> Result<Audio::IAudioClient, windows::core::Error> {
let mut lock = self.ensure_future_audio_client()?;
Ok(lock.take().unwrap().0)
}
fn supported_formats(&self) -> Result<SupportedInputConfigs, SupportedStreamConfigsError> {
com::com_initialized();
let lock = match self.ensure_future_audio_client() {
Ok(lock) => lock,
Err(ref e) if e.code() == Audio::AUDCLNT_E_DEVICE_INVALIDATED => {
return Err(SupportedStreamConfigsError::DeviceNotAvailable)
}
Err(e) => {
let description = format!("{}", e);
let err = BackendSpecificError { description };
return Err(err.into());
}
};
let client = &lock.as_ref().unwrap().0;
unsafe {
let default_waveformatex_ptr = client
.GetMixFormat()
.map(WaveFormatExPtr)
.map_err(windows_err_to_cpal_err::<SupportedStreamConfigsError>)?;
assert_eq!(
is_format_supported(client, default_waveformatex_ptr.0)?,
true
);
let mut test_format = {
match WaveFormat::copy_from_waveformatex_ptr(default_waveformatex_ptr.0) {
Some(f) => f,
None => return Ok(vec![].into_iter()),
}
};
let mut supported_sample_rates: Vec<u32> = Vec::new();
for &rate in COMMON_SAMPLE_RATES {
let rate = rate.0;
test_format.nSamplesPerSec = rate;
test_format.nAvgBytesPerSec =
rate * u32::from((*default_waveformatex_ptr.0).nBlockAlign);
if is_format_supported(client, test_format.as_ptr())? {
supported_sample_rates.push(rate);
}
}
let default_sr = (*default_waveformatex_ptr.0).nSamplesPerSec as _;
if !supported_sample_rates.iter().any(|&r| r == default_sr) {
supported_sample_rates.push(default_sr);
}
test_format.nSamplesPerSec = (*default_waveformatex_ptr.0).nSamplesPerSec;
test_format.nAvgBytesPerSec = (*default_waveformatex_ptr.0).nAvgBytesPerSec;
let format = match format_from_waveformatex_ptr(default_waveformatex_ptr.0, &client) {
Some(fmt) => fmt,
None => {
let description =
"could not create a `cpal::SupportedStreamConfig` from a `WAVEFORMATEX`"
.to_string();
let err = BackendSpecificError { description };
return Err(err.into());
}
};
let mut supported_formats = Vec::with_capacity(supported_sample_rates.len());
for rate in supported_sample_rates {
supported_formats.push(SupportedStreamConfigRange {
channels: format.channels.clone(),
min_sample_rate: SampleRate(rate as _),
max_sample_rate: SampleRate(rate as _),
buffer_size: format.buffer_size.clone(),
sample_format: format.sample_format.clone(),
})
}
Ok(supported_formats.into_iter())
}
}
pub fn supported_input_configs(
&self,
) -> Result<SupportedInputConfigs, SupportedStreamConfigsError> {
if self.data_flow() == Audio::eCapture {
self.supported_formats()
} else {
Ok(vec![].into_iter())
}
}
pub fn supported_output_configs(
&self,
) -> Result<SupportedOutputConfigs, SupportedStreamConfigsError> {
if self.data_flow() == Audio::eRender {
self.supported_formats()
} else {
Ok(vec![].into_iter())
}
}
fn default_format(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
com::com_initialized();
let lock = match self.ensure_future_audio_client() {
Ok(lock) => lock,
Err(ref e) if e.code() == Audio::AUDCLNT_E_DEVICE_INVALIDATED => {
return Err(DefaultStreamConfigError::DeviceNotAvailable)
}
Err(e) => {
let description = format!("{}", e);
let err = BackendSpecificError { description };
return Err(err.into());
}
};
let client = &lock.as_ref().unwrap().0;
unsafe {
let format_ptr = client
.GetMixFormat()
.map(WaveFormatExPtr)
.map_err(windows_err_to_cpal_err::<DefaultStreamConfigError>)?;
format_from_waveformatex_ptr(format_ptr.0, client)
.ok_or(DefaultStreamConfigError::StreamTypeNotSupported)
}
}
pub(crate) fn data_flow(&self) -> Audio::EDataFlow {
let endpoint = Endpoint::from(self.device.clone());
endpoint.data_flow()
}
pub fn default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
if self.data_flow() == Audio::eCapture {
self.default_format()
} else {
Err(DefaultStreamConfigError::StreamTypeNotSupported)
}
}
pub fn default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
let data_flow = self.data_flow();
if data_flow == Audio::eRender {
self.default_format()
} else {
Err(DefaultStreamConfigError::StreamTypeNotSupported)
}
}
pub(crate) fn build_input_stream_raw_inner(
&self,
config: &StreamConfig,
sample_format: SampleFormat,
) -> Result<StreamInner, BuildStreamError> {
unsafe {
com::com_initialized();
let audio_client = match self.build_audioclient() {
Ok(client) => client,
Err(ref e) if e.code() == Audio::AUDCLNT_E_DEVICE_INVALIDATED => {
return Err(BuildStreamError::DeviceNotAvailable)
}
Err(e) => {
let description = format!("{}", e);
let err = BackendSpecificError { description };
return Err(err.into());
}
};
let buffer_duration =
buffer_size_to_duration(&config.buffer_size, config.sample_rate.0);
let mut stream_flags = Audio::AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
if self.data_flow() == Audio::eRender {
stream_flags |= Audio::AUDCLNT_STREAMFLAGS_LOOPBACK;
}
let waveformatex = {
let format_attempt = config_to_waveformatextensible(config, sample_format)
.ok_or(BuildStreamError::StreamConfigNotSupported)?;
let share_mode = Audio::AUDCLNT_SHAREMODE_SHARED;
match super::device::is_format_supported(&audio_client, &format_attempt.Format) {
Ok(false) => return Err(BuildStreamError::StreamConfigNotSupported),
Err(_) => return Err(BuildStreamError::DeviceNotAvailable),
_ => (),
}
let hresult = audio_client.Initialize(
share_mode,
stream_flags,
buffer_duration,
0,
&format_attempt.Format,
ptr::null(),
);
match hresult {
Err(ref e) if e.code() == Audio::AUDCLNT_E_DEVICE_INVALIDATED => {
return Err(BuildStreamError::DeviceNotAvailable);
}
Err(e) => {
let description = format!("{}", e);
let err = BackendSpecificError { description };
return Err(err.into());
}
Ok(()) => (),
};
format_attempt.Format
};
let max_frames_in_buffer = audio_client
.GetBufferSize()
.map_err(windows_err_to_cpal_err::<BuildStreamError>)?;
let event = {
let event = Threading::CreateEventA(
ptr::null_mut(),
false,
false,
windows::core::PCSTR(ptr::null()),
)
.map_err(|e| {
let description = format!("failed to create event: {}", e);
let err = BackendSpecificError { description };
BuildStreamError::from(err)
})?;
if let Err(e) = audio_client.SetEventHandle(event) {
let description = format!("failed to call SetEventHandle: {}", e);
let err = BackendSpecificError { description };
return Err(err.into());
}
event
};
let capture_client = audio_client
.GetService::<Audio::IAudioCaptureClient>()
.map_err(|e| {
windows_err_to_cpal_err_message::<BuildStreamError>(
e,
"failed to build capture client: ",
)
})?;
let client_flow = AudioClientFlow::Capture { capture_client };
let audio_clock = get_audio_clock(&audio_client)?;
Ok(StreamInner {
audio_client,
audio_clock,
client_flow,
event,
playing: false,
max_frames_in_buffer,
bytes_per_frame: waveformatex.nBlockAlign,
config: config.clone(),
sample_format,
})
}
}
pub(crate) fn build_output_stream_raw_inner(
&self,
config: &StreamConfig,
sample_format: SampleFormat,
) -> Result<StreamInner, BuildStreamError> {
unsafe {
com::com_initialized();
let audio_client = self
.build_audioclient()
.map_err(windows_err_to_cpal_err::<BuildStreamError>)?;
let buffer_duration =
buffer_size_to_duration(&config.buffer_size, config.sample_rate.0);
let waveformatex = {
let format_attempt = config_to_waveformatextensible(config, sample_format)
.ok_or(BuildStreamError::StreamConfigNotSupported)?;
let share_mode = Audio::AUDCLNT_SHAREMODE_SHARED;
match super::device::is_format_supported(&audio_client, &format_attempt.Format) {
Ok(false) => return Err(BuildStreamError::StreamConfigNotSupported),
Err(_) => return Err(BuildStreamError::DeviceNotAvailable),
_ => (),
}
audio_client
.Initialize(
share_mode,
Audio::AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
buffer_duration,
0,
&format_attempt.Format,
ptr::null(),
)
.map_err(windows_err_to_cpal_err::<BuildStreamError>)?;
format_attempt.Format
};
let event = {
let event = Threading::CreateEventA(
ptr::null_mut(),
false,
false,
windows::core::PCSTR(ptr::null()),
)
.map_err(|e| {
let description = format!("failed to create event: {}", e);
let err = BackendSpecificError { description };
BuildStreamError::from(err)
})?;
if let Err(e) = audio_client.SetEventHandle(event) {
let description = format!("failed to call SetEventHandle: {}", e);
let err = BackendSpecificError { description };
return Err(err.into());
}
event
};
let max_frames_in_buffer = audio_client.GetBufferSize().map_err(|e| {
windows_err_to_cpal_err_message::<BuildStreamError>(
e,
"failed to obtain buffer size: ",
)
})?;
let render_client = audio_client
.GetService::<IAudioRenderClient>()
.map_err(|e| {
windows_err_to_cpal_err_message::<BuildStreamError>(
e,
"failed to build render client: ",
)
})?;
let client_flow = AudioClientFlow::Render { render_client };
let audio_clock = get_audio_clock(&audio_client)?;
Ok(StreamInner {
audio_client,
audio_clock,
client_flow,
event,
playing: false,
max_frames_in_buffer,
bytes_per_frame: waveformatex.nBlockAlign,
config: config.clone(),
sample_format,
})
}
}
}
impl PartialEq for Device {
#[inline]
fn eq(&self, other: &Device) -> bool {
unsafe {
struct IdRAII(windows::core::PWSTR);
impl Drop for IdRAII {
fn drop(&mut self) {
unsafe { Com::CoTaskMemFree(self.0 .0 as *mut c_void) }
}
}
let id1 = self.device.GetId().expect("cpal: GetId failure");
let id1 = IdRAII(id1);
let id2 = other.device.GetId().expect("cpal: GetId failure");
let id2 = IdRAII(id2);
let mut offset = 0;
loop {
let w1: u16 = *(id1.0).0.offset(offset);
let w2: u16 = *(id2.0).0.offset(offset);
if w1 == 0 && w2 == 0 {
return true;
}
if w1 != w2 {
return false;
}
offset += 1;
}
}
}
}
impl Eq for Device {}
impl fmt::Debug for Device {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Device")
.field("device", &self.device)
.field("name", &self.name())
.finish()
}
}
impl From<Audio::IMMDevice> for Endpoint {
fn from(device: Audio::IMMDevice) -> Self {
unsafe {
let endpoint = immendpoint_from_immdevice(device);
Endpoint { endpoint }
}
}
}
impl Endpoint {
fn data_flow(&self) -> Audio::EDataFlow {
unsafe { data_flow_from_immendpoint(&self.endpoint) }
}
}
static ENUMERATOR: Lazy<Enumerator> = Lazy::new(|| {
com::com_initialized();
unsafe {
let enumerator = Com::CoCreateInstance::<_, Audio::IMMDeviceEnumerator>(
&Audio::MMDeviceEnumerator,
None,
Com::CLSCTX_ALL,
)
.unwrap();
Enumerator(enumerator)
}
});
struct Enumerator(Audio::IMMDeviceEnumerator);
unsafe impl Send for Enumerator {}
unsafe impl Sync for Enumerator {}
pub struct Devices {
collection: Audio::IMMDeviceCollection,
total_count: u32,
next_item: u32,
}
impl Devices {
pub fn new() -> Result<Self, DevicesError> {
unsafe {
let collection = ENUMERATOR
.0
.EnumAudioEndpoints(Audio::eAll, Audio::DEVICE_STATE_ACTIVE)
.map_err(BackendSpecificError::from)?;
let count = collection.GetCount().map_err(BackendSpecificError::from)?;
Ok(Devices {
collection,
total_count: count,
next_item: 0,
})
}
}
}
unsafe impl Send for Devices {}
unsafe impl Sync for Devices {}
impl Iterator for Devices {
type Item = Device;
fn next(&mut self) -> Option<Device> {
if self.next_item >= self.total_count {
return None;
}
unsafe {
let device = self.collection.Item(self.next_item).unwrap();
self.next_item += 1;
Some(Device::from_immdevice(device))
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let num = self.total_count - self.next_item;
let num = num as usize;
(num, Some(num))
}
}
fn default_device(data_flow: Audio::EDataFlow) -> Option<Device> {
unsafe {
let device = ENUMERATOR
.0
.GetDefaultAudioEndpoint(data_flow, Audio::eConsole)
.ok()?;
Some(Device::from_immdevice(device))
}
}
pub fn default_input_device() -> Option<Device> {
default_device(Audio::eCapture)
}
pub fn default_output_device() -> Option<Device> {
default_device(Audio::eRender)
}
unsafe fn get_audio_clock(
audio_client: &Audio::IAudioClient,
) -> Result<Audio::IAudioClock, BuildStreamError> {
audio_client
.GetService::<Audio::IAudioClock>()
.map_err(|e| {
windows_err_to_cpal_err_message::<BuildStreamError>(e, "failed to build audio clock: ")
})
}
fn config_to_waveformatextensible(
config: &StreamConfig,
sample_format: SampleFormat,
) -> Option<Audio::WAVEFORMATEXTENSIBLE> {
let format_tag = match sample_format {
SampleFormat::I16 => Audio::WAVE_FORMAT_PCM,
SampleFormat::F32 => KernelStreaming::WAVE_FORMAT_EXTENSIBLE,
SampleFormat::U16 => return None,
} as u16;
let channels = config.channels;
let sample_rate = config.sample_rate.0;
let sample_bytes = sample_format.sample_size() as u16;
let avg_bytes_per_sec = u32::from(channels) * sample_rate * u32::from(sample_bytes);
let block_align = channels * sample_bytes;
let bits_per_sample = 8 * sample_bytes;
let cb_size = match sample_format {
SampleFormat::I16 => 0,
SampleFormat::F32 => {
let extensible_size = mem::size_of::<Audio::WAVEFORMATEXTENSIBLE>();
let ex_size = mem::size_of::<Audio::WAVEFORMATEX>();
(extensible_size - ex_size) as u16
}
SampleFormat::U16 => return None,
};
let waveformatex = Audio::WAVEFORMATEX {
wFormatTag: format_tag,
nChannels: channels,
nSamplesPerSec: sample_rate,
nAvgBytesPerSec: avg_bytes_per_sec,
nBlockAlign: block_align,
wBitsPerSample: bits_per_sample,
cbSize: cb_size,
};
let channel_mask = KernelStreaming::KSAUDIO_SPEAKER_DIRECTOUT;
let sub_format = match sample_format {
SampleFormat::I16 => KernelStreaming::KSDATAFORMAT_SUBTYPE_PCM,
SampleFormat::F32 => Multimedia::KSDATAFORMAT_SUBTYPE_IEEE_FLOAT,
SampleFormat::U16 => return None,
};
let waveformatextensible = Audio::WAVEFORMATEXTENSIBLE {
Format: waveformatex,
Samples: Audio::WAVEFORMATEXTENSIBLE_0 {
wSamplesPerBlock: bits_per_sample,
},
dwChannelMask: channel_mask,
SubFormat: sub_format,
};
Some(waveformatextensible)
}
fn buffer_size_to_duration(buffer_size: &BufferSize, sample_rate: u32) -> i64 {
match buffer_size {
BufferSize::Fixed(frames) => *frames as i64 * (1_000_000_000 / 100) / sample_rate as i64,
BufferSize::Default => 0,
}
}
fn buffer_duration_to_frames(buffer_duration: i64, sample_rate: u32) -> FrameCount {
(buffer_duration * sample_rate as i64 * 100 / 1_000_000_000) as FrameCount
}