aaudio/
lib.rs

1extern crate libc;
2extern crate aaudio_sys;
3
4use std::ffi::c_void;
5use std::fmt;
6use std::mem::MaybeUninit;
7
8use aaudio_sys as ffi;
9use ffi::{AAudioStream as AAudioStreamRaw, AAudioStreamBuilder as AAudioStreamBuilderRaw};
10
11#[derive(Debug, Copy, Clone, Eq, PartialEq)]
12/// These values are returned from AAudio functions to indicate failure.
13pub enum Error {
14    /// AAudio returned error code that is not a part of this enum
15    Unknown(i32),
16
17    /// The audio device was disconnected. This could occur, for example, when headphones
18    /// are plugged in or unplugged. The stream cannot be used after the device is disconnected.
19    /// Applications should stop and close the stream.
20    /// If this error is received in an error callback then another thread should be
21    /// used to stop and close the stream.
22    Disconnected,
23
24    /// An invalid parameter was passed to AAudio.
25    IllegalArgument,
26
27    /// The requested operation is not appropriate for the current state of AAudio.
28    InvalidState,
29
30    /// The server rejected the handle used to identify the stream.
31    InvalidHandle,
32
33    /// The function is not implemented for this stream.
34    Unimplemented,
35
36    /// A resource or information is unavailable.
37    /// This could occur when an application tries to open too many streams,
38    /// or a timestamp is not available.
39    Unavailable,
40
41    /// Memory could not be allocated.
42    NoFreeHandles,
43
44    /// Memory could not be allocated.
45    NoMemory,
46
47    /// A NULL pointer was passed to AAudio.
48    /// Or a NULL pointer was detected internally.
49    Null,
50
51    /// An operation took longer than expected.
52    Timeout,
53
54    WouldBlock,
55
56    /// The requested data format is not supported.
57    InvalidFormat,
58
59    /// A requested was out of range.
60    OutOfRange,
61
62    /// The audio service was not available.
63    NoService,
64
65    /// The requested sample rate was not supported.
66    InvalidRate,
67}
68
69impl fmt::Display for Error {
70    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
71        match self {
72            Self::Unknown(code) => write!(f, "Error code {}", code),
73            Self::Disconnected => f.write_str("The audio device was disconnected"),
74            Self::IllegalArgument => f.write_str("An invalid parameter was passed to AAudio."),
75            Self::InvalidState => f.write_str(
76                "The requested operation is not appropriate for the current state of AAudio.",
77            ),
78            Self::InvalidHandle => {
79                f.write_str("The server rejected the handle used to identify the stream.")
80            }
81            Self::Unimplemented => f.write_str("The function is not implemented for this stream."),
82            Self::Unavailable => f.write_str("A resource or information is unavailable."),
83            Self::NoFreeHandles => f.write_str("Memory could not be allocated."),
84            Self::NoMemory => f.write_str("Memory could not be allocated"),
85            Self::Null => f.write_str("A NULL pointer was passed to AAudio."),
86            Self::Timeout => f.write_str("An operation took longer than expected."),
87            Self::WouldBlock => {
88                f.write_str("A blocking operation was invoked where no blocking was expected.")
89            }
90            Self::InvalidFormat => f.write_str("The requested data format is not supported."),
91            Self::OutOfRange => f.write_str("A requested was out of range."),
92            Self::NoService => f.write_str("The audio service was not available."),
93            Self::InvalidRate => f.write_str("The requested sample rate was not supported."),
94        }
95    }
96}
97
98impl Error {
99    fn from_code(code: i32) -> Self {
100        match code {
101            -899 => Self::Disconnected,
102            -898 => Self::IllegalArgument,
103            -895 => Self::InvalidState,
104            -892 => Self::InvalidHandle,
105            -890 => Self::Unimplemented,
106            -889 => Self::Unavailable,
107            -888 => Self::NoFreeHandles,
108            -887 => Self::NoMemory,
109            -886 => Self::Null,
110            -885 => Self::Timeout,
111            -884 => Self::WouldBlock,
112            -883 => Self::InvalidFormat,
113            -882 => Self::OutOfRange,
114            -881 => Self::NoService,
115            -880 => Self::InvalidRate,
116            code => Self::Unknown(code),
117        }
118    }
119}
120
121#[derive(Debug, Copy, Clone, Eq, PartialEq)]
122pub enum Direction {
123    /// Audio data will travel out of the device, for example through a speaker.
124    Output,
125
126    /// Audio data will travel into the device, for example from a microphone.
127    Input,
128}
129
130impl Direction {
131    fn from_i32(val: i32) -> Self {
132        match val {
133            0 => Self::Output,
134            1 => Self::Input,
135            direction => panic!("Unexpected direction: {}", direction),
136        }
137    }
138}
139
140/// A sample format.
141#[derive(Debug, Copy, Clone, Eq, PartialEq)]
142pub enum Format {
143    Unspecified = 0,
144
145    /// This format uses the i16 data type.
146    /// The maximum range of the data is -32768 to 32767.
147    I16,
148
149    /// This format uses the float data type.
150    /// The nominal range of the data is [-1.0f32, 1.0f32).
151    /// Values outside that range may be clipped.
152    ///
153    /// See also 'floatData' at
154    /// https://developer.android.com/reference/android/media/AudioTrack#write(float[],%20int,%20int,%20int)
155    F32,
156}
157
158impl Format {
159    fn from_i32(val: i32) -> Self {
160        match val {
161            0 => Self::Unspecified,
162            1 => Self::I16,
163            2 => Self::F32,
164            format => panic!("Unexpected format: {}", format),
165        }
166    }
167
168    fn sample_size(&self) -> i32 {
169        match self {
170            Self::Unspecified => 0,
171            Self::I16 => 2,
172            Self::F32 => 4,
173        }
174    }
175}
176
177#[derive(Debug, Copy, Clone, Eq, PartialEq)]
178pub enum SharingMode {
179    /// This will be the only stream using a particular source or sink.
180    /// This mode will provide the lowest possible latency.
181    /// You should close Exclusive streams immediately when you are not using them.
182    Exclusive,
183    /// Multiple applications will be mixed by the AAudio Server.
184    /// This will have higher latency than the Exclusive mode.
185    Shared,
186}
187
188impl SharingMode {
189    fn from_i32(val: i32) -> Self {
190        match val {
191            0 => Self::Exclusive,
192            1 => Self::Shared,
193            mode => panic!("Unexpected sharing mode: {}", mode),
194        }
195    }
196}
197
198/// The Usage attribute expresses "why" you are playing a sound, what is this sound used for.
199/// This information is used by certain platforms or routing policies
200/// to make more refined volume or routing decisions.
201///
202/// Note that these match the equivalent values in android.media.AudioAttributes
203/// in the Android Java API.
204///
205/// Added in API level 28.
206#[derive(Debug, Copy, Clone, Eq, PartialEq)]
207pub enum Usage {
208    /// Use this for streaming media, music performance, video, podcasts, etcetera.
209    Media = 1,
210
211    /// Use this for voice over IP, telephony, etcetera.
212    VoiceCommunication = 2,
213
214    /// Use this for sounds associated with telephony such as busy tones, DTMF, etcetera.
215    VoiceCommunicationSignalling = 3,
216
217    /// Use this to demand the users attention.
218    Alarm = 4,
219
220    /// Use this for notifying the user when a message has arrived or some
221    /// other background event has occured.
222    Notification = 5,
223
224    /// Use this when the phone rings.
225    NotificationRingtone = 6,
226
227    /// Use this to attract the users attention when, for example, the battery is low.
228    NotificationEvent = 10,
229
230    /// Use this for screen readers, etcetera.
231    AssistanceAccessibility = 11,
232
233    /// Use this for driving or navigation directions.
234    AssistanceNavigationGuidance = 12,
235
236    /// Use this for user interface sounds, beeps, etcetera.
237    AssistanceSonification = 13,
238
239    /// Use this for game audio and sound effects.
240    Game = 14,
241
242    /// Use this for audio responses to user queries, audio instructions or help utterances.
243    Assistant = 16,
244
245    /// Use this in case of playing sounds in an emergency.
246    /// Privileged MODIFY_AUDIO_ROUTING permission required.
247    Emergency = 1000,
248
249    /// Use this for safety sounds and alerts, for example backup camera obstacle detection.
250    /// Privileged MODIFY_AUDIO_ROUTING permission required.
251    Safety = 1001,
252
253    /// Use this for vehicle status alerts and information, for example the check engine light.
254    /// Privileged MODIFY_AUDIO_ROUTING permission required.
255    VehicleStatus = 1002,
256
257    /// Use this for traffic announcements, etc.
258    /// Privileged MODIFY_AUDIO_ROUTING permission required.
259    Announcement = 1003,
260}
261
262/// Defines the audio source.
263/// An audio source defines both a default physical source of audio signal, and a recording
264/// configuration.
265///
266/// Note that these match the equivalent values in MediaRecorder.AudioSource in the Android Java API.
267///
268/// Added in API level 28.
269#[derive(Debug, Copy, Clone, Eq, PartialEq)]
270pub enum InputPreset {
271    /// Use this preset when other presets do not apply.
272    Generic = 1,
273
274    /// Use this preset when recording video.
275    Camcorder = 5,
276
277    /// Use this preset when doing speech recognition.
278    VoiceRecognition = 6,
279
280    /// Use this preset when doing telephony or voice messaging.
281    VoiceCommunication = 7,
282
283    /// Use this preset to obtain an input with no effects.
284    /// Note that this input will not have automatic gain control
285    /// so the recorded volume may be very low.
286    Unprocessed = 9,
287
288    /// Use this preset for capturing audio meant to be processed in real time
289    /// and played back for live performance (e.g karaoke).
290    /// The capture path will minimize latency and coupling with playback path.
291    /// Available since API level 29.
292    VoicePerformance = 10,
293}
294
295/// The ContentType attribute describes "what" you are playing.
296/// It expresses the general category of the content. This information is optional.
297/// But in case it is known (for instance `Movie` for a
298/// movie streaming service or `Speech` for
299/// an audio book application) this information might be used by the audio framework to
300/// enforce audio focus.
301///
302/// Note that these match the equivalent values in android.media.AudioAttributes
303/// in the Android Java API.
304///
305/// Added in API level 28.
306#[derive(Debug, Copy, Clone, Eq, PartialEq)]
307pub enum ContentType {
308    /// Use this for spoken voice, audio books, etcetera.
309    Speech = 1,
310
311    /// Use this for pre-recorded or live music.
312    Music = 2,
313
314    /// Use this for a movie or video soundtrack.
315    Movie = 3,
316
317    /// Use this for sound is designed to accompany a user action,
318    /// such as a click or beep sound made when the user presses a button.
319    Sonification = 4,
320}
321
322/// Specifying if audio may or may not be captured by other apps or the system.
323///
324/// Note that these match the equivalent values in android.media.AudioAttributes
325/// in the Android Java API.
326///
327/// Added in API level 29.
328#[derive(Debug, Copy, Clone, Eq, PartialEq)]
329pub enum AllowedCapturePolicy {
330    /// Indicates that the audio may be captured by any app.
331    ///
332    /// For privacy, the following usages can not be recorded: `VoiceCommunication*`,
333    /// `Notification*`, `Assistance*` and `Assistant`.
334    ///
335    /// On Android Q, this means only `Media` and `Game` may be captured.
336    ///
337    /// See android.media.AudioAttributes#ALLOW_CAPTURE_BY_ALL.
338    AllowCaptureByAll = 1,
339
340    /// Indicates that the audio may only be captured by system apps.
341    ///
342    /// System apps can capture for many purposes like accessibility, user guidance...
343    /// but have strong restriction. See
344    /// android.media.AudioAttributes#ALLOW_CAPTURE_BY_SYSTEM for what the system apps
345    /// can do with the capture audio.
346    AllowCaptureBySystem = 2,
347
348    /// Indicates that the audio may not be recorded by any app, even if it is a system app.
349    ///
350    /// It is encouraged to use `AllowCaptureBySystem` instead of this value as system apps
351    /// provide significant and useful features for the user (eg. accessibility).
352    /// See android.media.AudioAttributes#ALLOW_CAPTURE_BY_NONE.
353    AllowCaptureByNone = 3,
354}
355
356#[derive(Debug, Copy, Clone, Eq, PartialEq)]
357pub enum PerformanceMode {
358    /// No particular performance needs. Default.
359    None = 10,
360
361    /// Extending battery life is more important than low latency.
362    ///
363    /// This mode is not supported in input streams.
364    /// For input, mode NONE will be used if this is requested.
365    PowerSaving = 11,
366
367    /// Reducing latency is more important than battery life.
368    LowLatency = 12,
369}
370
371impl PerformanceMode {
372    fn from_i32(val: i32) -> Self {
373        match val {
374            11 => Self::PowerSaving,
375            12 => Self::LowLatency,
376            _ => Self::None,
377        }
378    }
379}
380
381/// Value returned the data callback function.
382#[derive(Debug, Copy, Clone, Eq, PartialEq)]
383pub enum CallbackResult {
384    /// Continue calling the callback.
385    Continue,
386
387    /// Stop calling the callback.
388    ///
389    /// The application will still need to call `AAudioStream_requestPause()`
390    /// or `AAudioStream_requestStop()`.
391    Stop,
392}
393
394fn wrap_result(result: i32) -> Result<(), Error> {
395    if result < 0 {
396        Err(Error::from_code(result))
397    } else {
398        Ok(())
399    }
400}
401
402struct StreamCallbacks {
403    _data_callback:
404        Box<Box<dyn FnMut(&AAudioStreamInfo, &mut [u8], i32) -> CallbackResult + Send + 'static>>,
405    _error_callback: Box<Box<dyn FnMut(&AAudioStreamInfo, Error) + Send + 'static>>,
406}
407
408#[derive(Debug, Copy, Clone, Eq, PartialEq)]
409pub enum StreamState {
410    Uninitialized,
411    Unknown,
412    Open,
413    Starting,
414    Started,
415    Pausing,
416    Paused,
417    Flushing,
418    Flushed,
419    Stopping,
420    Stopped,
421    Closing,
422    Closed,
423    Disconnected,
424}
425
426impl StreamState {
427    fn from_i32(val: i32) -> Self {
428        match val {
429            0 => Self::Uninitialized,
430            1 => Self::Unknown,
431            2 => Self::Open,
432            3 => Self::Starting,
433            4 => Self::Started,
434            5 => Self::Pausing,
435            6 => Self::Paused,
436            7 => Self::Flushing,
437            8 => Self::Flushed,
438            9 => Self::Stopping,
439            10 => Self::Stopped,
440            11 => Self::Closing,
441            12 => Self::Closed,
442            13 => Self::Disconnected,
443            state => panic!("Unexpected stream state: {}", state),
444        }
445    }
446}
447
448pub struct Timestamp {
449    pub frame_position: i64,
450    pub time_nanos: i64,
451}
452
453pub struct AAudioStream {
454    raw: *mut AAudioStreamRaw,
455    _callbacks: Option<StreamCallbacks>,
456}
457
458unsafe impl Send for AAudioStream {}
459
460fn get_timestamp_monotonic(raw: *mut AAudioStreamRaw) -> Result<Timestamp, Error> {
461    let mut frame_position = MaybeUninit::uninit();
462    let mut time_nanos = MaybeUninit::uninit();
463    let result = unsafe {
464        ffi::AAudioStream_getTimestamp(
465            raw,
466            libc::CLOCK_MONOTONIC,
467            frame_position.as_mut_ptr(),
468            time_nanos.as_mut_ptr(),
469        )
470    };
471    wrap_result(result)?;
472    Ok(unsafe {
473        Timestamp {
474            frame_position: frame_position.assume_init(),
475            time_nanos: time_nanos.assume_init(),
476        }
477    })
478}
479
480impl AAudioStream {
481    /// Returns the actual sample rate.
482    ///
483    /// Available since API level 26.
484    pub fn get_sample_rate(&self) -> i32 {
485        unsafe { ffi::AAudioStream_getSampleRate(self.raw) }
486    }
487
488    /// A stream has one or more channels of data.
489    /// A frame will contain one sample for each channel.
490    ///
491    /// Available since API level 26.
492    pub fn get_channel_count(&self) -> i32 {
493        unsafe { ffi::AAudioStream_getChannelCount(self.raw) }
494    }
495
496    /// Query the maximum number of frames that can be filled without blocking.
497    ///
498    /// Available since API level 26.
499    pub fn get_buffer_size_in_frames(&self) -> i32 {
500        unsafe { ffi::AAudioStream_getBufferSizeInFrames(self.raw) }
501    }
502
503    /// Query the number of frames that the application should read or write at
504    /// one time for optimal performance. It is OK if an application writes
505    /// a different number of frames. But the buffer size may need to be larger
506    /// in order to avoid underruns or overruns.
507    ///
508    /// Note that this may or may not match the actual device burst size.
509    /// For some endpoints, the burst size can vary dynamically.
510    /// But these tend to be devices with high latency.
511    ///
512    /// Available since API level 26.
513    pub fn get_frames_per_burst(&self) -> i32 {
514        unsafe { ffi::AAudioStream_getFramesPerBurst(self.raw) }
515    }
516
517    /// Query maximum buffer capacity in frames.
518    ///
519    /// Available since API level 26.
520    pub fn get_buffer_capacity_in_frames(&self) -> i32 {
521        unsafe { ffi::AAudioStream_getBufferCapacityInFrames(self.raw) }
522    }
523
524    /// Query the size of the buffer that will be passed to the dataProc callback
525    /// in the numFrames parameter.
526    ///
527    /// This call can be used if the application needs to know the value of numFrames before
528    /// the stream is started. This is not normally necessary.
529    ///
530    /// If a specific size was requested by calling
531    /// `AAudioStreamBuilder::set_frames_per_data_callback()` then this will be the same size.
532    ///
533    /// If `AAudioStreamBuilder_set_frames_per_data_callback()` was not called then this will
534    /// return the size chosen by AAudio, or 0.
535    ///
536    /// 0 indicates that the callback buffer size for this stream
537    /// may vary from one dataProc callback to the next.
538    ///
539    /// Available since API level 26.
540    pub fn get_frames_per_data_callback(&self) -> i32 {
541        unsafe { ffi::AAudioStream_getFramesPerDataCallback(self.raw) }
542    }
543
544    /// An XRun is an Underrun or an Overrun.
545    /// During playing, an underrun will occur if the stream is not written in time
546    /// and the system runs out of valid data.
547    /// During recording, an overrun will occur if the stream is not read in time
548    /// and there is no place to put the incoming data so it is discarded.
549    ///
550    /// An underrun or overrun can cause an audible "pop" or "glitch".
551    ///
552    /// Note that some INPUT devices may not support this function.
553    /// In that case a 0 will always be returned.
554    ///
555    /// Available since API level 26.
556    pub fn get_x_run_count(&self) -> i32 {
557        unsafe { ffi::AAudioStream_getXRunCount(self.raw) }
558    }
559
560    /// Returns the actual device ID.
561    ///
562    /// Available since API level 26.
563    pub fn get_device_id(&self) -> i32 {
564        unsafe { ffi::AAudioStream_getDeviceId(self.raw) }
565    }
566
567    /// Returns the actual data format.
568    ///
569    /// Available since API level 26.
570    pub fn get_format(&self) -> Format {
571        let val = unsafe { ffi::AAudioStream_getFormat(self.raw) };
572        Format::from_i32(val)
573    }
574
575    /// Provide actual sharing mode.
576    ///
577    /// Available since API level 26.
578    pub fn get_sharing_mode(&self) -> SharingMode {
579        let val = unsafe { ffi::AAudioStream_getSharingMode(self.raw) };
580        SharingMode::from_i32(val)
581    }
582
583    /// Get the performance mode used by the stream.
584    ///
585    /// Available since API level 26.
586    pub fn get_performance_mode(&self) -> PerformanceMode {
587        let val = unsafe { ffi::AAudioStream_getPerformanceMode(self.raw) };
588        PerformanceMode::from_i32(val)
589    }
590
591    /// Available since API level 26.
592    pub fn get_direction(&self) -> Direction {
593        let val = unsafe { ffi::AAudioStream_getDirection(self.raw) };
594        Direction::from_i32(val)
595    }
596
597    /// Returns the number of frames that have been written since the stream was created.
598    /// For an output stream, this will be advanced by the application calling write()
599    /// or by a data callback.
600    /// For an input stream, this will be advanced by the endpoint.
601    ///
602    /// The frame position is monotonically increasing.
603    ///
604    /// Available since API level 26.
605    pub fn get_frames_written(&self) -> i64 {
606        unsafe { ffi::AAudioStream_getFramesWritten(self.raw) }
607    }
608
609    /// Returns the number of frames that have been read since the stream was created.
610    /// For an output stream, this will be advanced by the endpoint.
611    /// For an input stream, this will be advanced by the application calling read()
612    /// or by a data callback.
613    ///
614    /// The frame position is monotonically increasing.
615    ///
616    /// Available since API level 26.
617    pub fn get_frames_read(&self) -> i64 {
618        unsafe { ffi::AAudioStream_getFramesRead(self.raw) }
619    }
620
621    /// Passes back the session ID associated with this stream.
622    ///
623    /// The session ID can be used to associate a stream with effects processors.
624    /// The effects are controlled using the Android AudioEffect Java API.
625    ///
626    /// If `AAudioStreamBuilder::set_session_id()` was called with 0
627    /// then a new session ID should be allocated once when the stream is opened.
628    ///
629    /// If `AAudioStreamBuilder::set_session_id()` was called with a previously allocated
630    /// session ID then that value should be returned.
631    ///
632    /// If `AAudioStreamBuilder::set_session_id()` was not called then this function should
633    /// return -1.
634    ///
635    /// The sessionID for a stream should not change once the stream has been opened.
636    ///
637    /// Available since API level 28.
638    pub fn get_session_id(&self) -> i32 {
639        unsafe { ffi::AAudioStream_getSessionId(self.raw) }
640    }
641
642    /// Returns the time at which a particular frame was presented.
643    /// This can be used to synchronize audio with video or MIDI.
644    /// It can also be used to align a recorded stream with a playback stream.
645    ///
646    /// Timestamps are only valid when the stream is in `Started` state.
647    /// `InvalidState` will be returned if the stream is not started.
648    /// Note that because request_start() is asynchronous, timestamps will not be valid until
649    /// a short time after calling request_start().
650    /// So `InvalidState` should not be considered a fatal error.
651    /// Just try calling again later.
652    ///
653    /// If an error occurs, then the position and time will not be modified.
654    ///
655    /// The position and time passed back are monotonically increasing.
656    ///
657    /// Available since API level 26.
658    pub fn get_timestamp_monotonic(&self) -> Result<Timestamp, Error> {
659        get_timestamp_monotonic(self.raw)
660    }
661
662    /// Query the current state of the client, eg. `Pausing`.
663    ///
664    /// This function will immediately return the state without updating the state.
665    /// If you want to update the client state based on the server state then
666    /// call `AAudioStream::wait_for_state_change()` with currentState
667    /// set to `Unknown` and a zero timeout.
668    ///
669    /// Available since API level 26.
670    pub fn get_state(&self) -> StreamState {
671        let val = unsafe { ffi::AAudioStream_getState(self.raw) };
672        StreamState::from_i32(val)
673    }
674
675    /// Free the audio resources associated with the stream.
676    ///
677    /// After this call, the stream will be in `Closing` state.
678    ///
679    /// This function is useful if you want to release the audio resources immediately,
680    /// but still allow queries to the stream to occur from other threads. This often
681    /// happens if you are monitoring stream progress from a UI thread.
682    ///
683    /// Available since API level 30.
684    pub fn release(&mut self) -> Result<(), Error> {
685        let val = unsafe { ffi::AAudioStream_release(self.raw) };
686        wrap_result(val)
687    }
688
689    /// Asynchronously request to start playing the stream. For output streams, one should
690    /// write to the stream to fill the buffer before starting.
691    /// Otherwise it will underflow.
692    /// After this call the state will be in `Starting` or `Started`.
693    ///
694    /// Returns 0 for OK or a negative error.
695    ///
696    /// Available since API level 26.
697    pub fn request_start(&mut self) -> Result<(), Error> {
698        let val = unsafe { ffi::AAudioStream_requestStart(self.raw) };
699        wrap_result(val)
700    }
701
702    /// Asynchronous request for the stream to pause.
703    /// Pausing a stream will freeze the data flow but not flush any buffers.
704    /// Use `AAudioStream::request_start()` to resume playback after a pause.
705    /// After this call the state will be in `Pausing` or
706    /// `Paused`.
707    ///
708    /// This will return `Unimplemented` for input streams.
709    /// For input streams use `AAudioStream::request_stop()`.
710    ///
711    /// Available since API level 26.
712    pub fn request_pause(&mut self) -> Result<(), Error> {
713        let val = unsafe { ffi::AAudioStream_requestPause(self.raw) };
714        wrap_result(val)
715    }
716
717    /// Asynchronous request for the stream to flush.
718    /// Flushing will discard any pending data.
719    /// This call only works if the stream is pausing or paused.
720    /// Frame counters are not reset by a flush. They may be advanced.
721    /// After this call the state will be in `Flushing` or `Flushed`.
722    ///
723    /// This will return `Unimplemented` for input streams.
724    ///
725    /// Available since API level 26.
726    pub fn request_flush(&mut self) -> Result<(), Error> {
727        let val = unsafe { ffi::AAudioStream_requestFlush(self.raw) };
728        wrap_result(val)
729    }
730
731    /// Asynchronous request for the stream to stop.
732    /// The stream will stop after all of the data currently buffered has been played.
733    /// After this call the state will be in `Stopping` or `Stopped`.
734    ///
735    /// Available since API level 26.
736    pub fn request_stop(&mut self) -> Result<(), Error> {
737        let val = unsafe { ffi::AAudioStream_requestStop(self.raw) };
738        wrap_result(val)
739    }
740
741    /// Wait until the current state no longer matches the input state.
742    ///
743    /// This will update the current client state.
744    ///
745    /// Returns the new state.
746    ///
747    /// Available since API level 26.
748    pub fn wait_for_state_change(
749        &mut self,
750        input_state: StreamState,
751        timeout_nanos: i64,
752    ) -> Result<StreamState, Error> {
753        let mut new_state = MaybeUninit::uninit();
754        let result = unsafe {
755            ffi::AAudioStream_waitForStateChange(
756                self.raw,
757                input_state as i32,
758                new_state.as_mut_ptr(),
759                timeout_nanos,
760            )
761        };
762        wrap_result(result)?;
763        Ok(unsafe { StreamState::from_i32(new_state.assume_init()) })
764    }
765
766    /// Read data from the stream.
767    /// Returns the number of frames actually read or a negative error.
768    ///
769    /// The call will wait until the read is complete or until it runs out of time.
770    /// If timeoutNanos is zero then this call will not wait.
771    ///
772    /// Note that timeoutNanoseconds is a relative duration in wall clock time.
773    /// Time will not stop if the thread is asleep.
774    /// So it will be implemented using CLOCK_BOOTTIME.
775    ///
776    /// This call is "strong non-blocking" unless it has to wait for data.
777    ///
778    /// If the call times out then zero or a partial frame count will be returned.
779    ///
780    /// Available since API level 26.
781    ///
782    /// # Arguments
783    ///
784    /// * `buffer` - The slice with the samples.
785    /// * `num_frames` - Number of frames to read. Only complete frames will be written.
786    /// * `timeout_nanoseconds` - Maximum number of nanoseconds to wait for completion.
787    pub fn read(
788        &mut self,
789        buffer: &mut [u8],
790        num_frames: i32,
791        timeout_nanoseconds: i64,
792    ) -> Result<u32, Error> {
793        let result = unsafe {
794            ffi::AAudioStream_read(
795                self.raw,
796                buffer.as_mut_ptr() as *mut c_void,
797                num_frames,
798                timeout_nanoseconds,
799            )
800        };
801        wrap_result(result)?;
802        Ok(result as u32)
803    }
804
805    /// Write data to the stream.
806    /// Returns the number of frames actually written or a negative error.
807    ///
808    /// The call will wait until the write is complete or until it runs out of time.
809    /// If timeoutNanos is zero then this call will not wait.
810    ///
811    /// Note that timeoutNanoseconds is a relative duration in wall clock time.
812    /// Time will not stop if the thread is asleep.
813    /// So it will be implemented using CLOCK_BOOTTIME.
814    ///
815    /// This call is "strong non-blocking" unless it has to wait for room in the buffer.
816    ///
817    /// If the call times out then zero or a partial frame count will be returned.
818    ///
819    /// Available since API level 26.
820    ///
821    /// # Arguments
822    ///
823    /// * `buffer` - The address of the first sample.
824    /// * `num_frames` - Number of frames to write. Only complete frames will be written.
825    /// * `timeout_nanoseconds` - Maximum number of nanoseconds to wait for completion.
826    pub fn write(
827        &mut self,
828        buffer: &[u8],
829        num_frames: i32,
830        timeout_nanoseconds: i64,
831    ) -> Result<u32, Error> {
832        let result = unsafe {
833            ffi::AAudioStream_write(
834                self.raw,
835                buffer.as_ptr() as *const c_void,
836                num_frames,
837                timeout_nanoseconds,
838            )
839        };
840        wrap_result(result)?;
841        Ok(result as u32)
842    }
843
844    /// This can be used to adjust the latency of the buffer by changing
845    /// the threshold where blocking will occur.
846    /// By combining this with `AAudioStream::get_x_run_count()`, the latency can be tuned
847    /// at run-time for each device.
848    /// Returns actual buffer size in frames or a negative error.
849    ///
850    /// This cannot be set higher than `AAudioStream::get_buffer_capacity_in_frames()`.
851    ///
852    /// Note that you will probably not get the exact size you request.
853    /// You can check the return value or call `AAudioStream::get_buffer_size_in_frames()`
854    /// to see what the actual final size is.
855    ///
856    /// Available since API level 26.
857    ///
858    /// # Arguments
859    ///
860    /// * `num_frames` - requested number of frames that can be filled without blocking
861    pub fn set_buffer_size_in_frames(&mut self, num_frames: i32) -> Result<(), Error> {
862        let result = unsafe { ffi::AAudioStream_setBufferSizeInFrames(self.raw, num_frames) };
863        wrap_result(result)
864    }
865}
866
867impl Drop for AAudioStream {
868    fn drop(&mut self) {
869        unsafe {
870            ffi::AAudioStream_close(self.raw);
871        }
872    }
873}
874
875/// Passed as a callback parameter, providing operations that are safe
876/// to perform from callback invocation.
877pub struct AAudioStreamInfo {
878    raw: *mut AAudioStreamRaw,
879}
880
881unsafe impl Send for AAudioStreamInfo {}
882
883impl AAudioStreamInfo {
884    /// Returns the actual sample rate.
885    ///
886    /// Available since API level 26.
887    pub fn get_sample_rate(&self) -> i32 {
888        unsafe { ffi::AAudioStream_getSampleRate(self.raw) }
889    }
890
891    /// A stream has one or more channels of data.
892    /// A frame will contain one sample for each channel.
893    ///
894    /// Available since API level 26.
895    pub fn get_channel_count(&self) -> i32 {
896        unsafe { ffi::AAudioStream_getChannelCount(self.raw) }
897    }
898
899    /// Query the maximum number of frames that can be filled without blocking.
900    ///
901    /// Available since API level 26.
902    pub fn get_buffer_size_in_frames(&self) -> i32 {
903        unsafe { ffi::AAudioStream_getBufferSizeInFrames(self.raw) }
904    }
905
906    /// Query the number of frames that the application should read or write at
907    /// one time for optimal performance. It is OK if an application writes
908    /// a different number of frames. But the buffer size may need to be larger
909    /// in order to avoid underruns or overruns.
910    ///
911    /// Note that this may or may not match the actual device burst size.
912    /// For some endpoints, the burst size can vary dynamically.
913    /// But these tend to be devices with high latency.
914    ///
915    /// Available since API level 26.
916    pub fn get_frames_per_burst(&self) -> i32 {
917        unsafe { ffi::AAudioStream_getFramesPerBurst(self.raw) }
918    }
919
920    /// Query maximum buffer capacity in frames.
921    ///
922    /// Available since API level 26.
923    pub fn get_buffer_capacity_in_frames(&self) -> i32 {
924        unsafe { ffi::AAudioStream_getBufferCapacityInFrames(self.raw) }
925    }
926
927    /// Query the size of the buffer that will be passed to the dataProc callback
928    /// in the numFrames parameter.
929    ///
930    /// This call can be used if the application needs to know the value of numFrames before
931    /// the stream is started. This is not normally necessary.
932    ///
933    /// If a specific size was requested by calling
934    /// `AAudioStreamBuilder::set_frames_per_data_callback()` then this will be the same size.
935    ///
936    /// If `AAudioStreamBuilder::set_frames_per_data_callback()` was not called then this will
937    /// return the size chosen by AAudio, or 0.
938    ///
939    /// 0 indicates that the callback buffer size for this stream
940    /// may vary from one dataProc callback to the next.
941    ///
942    /// Available since API level 26.
943    pub fn get_frames_per_data_callback(&self) -> i32 {
944        unsafe { ffi::AAudioStream_getFramesPerDataCallback(self.raw) }
945    }
946
947    /// An XRun is an Underrun or an Overrun.
948    /// During playing, an underrun will occur if the stream is not written in time
949    /// and the system runs out of valid data.
950    /// During recording, an overrun will occur if the stream is not read in time
951    /// and there is no place to put the incoming data so it is discarded.
952    ///
953    /// An underrun or overrun can cause an audible "pop" or "glitch".
954    ///
955    /// Note that some INPUT devices may not support this function.
956    /// In that case a 0 will always be returned.
957    ///
958    /// Available since API level 26.
959    pub fn get_x_run_count(&self) -> i32 {
960        unsafe { ffi::AAudioStream_getXRunCount(self.raw) }
961    }
962
963    /// Returns the actual device ID.
964    ///
965    /// Available since API level 26.
966    pub fn get_device_id(&self) -> i32 {
967        unsafe { ffi::AAudioStream_getDeviceId(self.raw) }
968    }
969
970    /// Returns the actual data format.
971    ///
972    /// Available since API level 26.
973    pub fn get_format(&self) -> Format {
974        let val = unsafe { ffi::AAudioStream_getFormat(self.raw) };
975        Format::from_i32(val)
976    }
977
978    /// Provide actual sharing mode.
979    ///
980    /// Available since API level 26.
981    pub fn get_sharing_mode(&self) -> SharingMode {
982        let val = unsafe { ffi::AAudioStream_getSharingMode(self.raw) };
983        SharingMode::from_i32(val)
984    }
985
986    /// Get the performance mode used by the stream.
987    ///
988    /// Available since API level 26.
989    pub fn get_performance_mode(&self) -> PerformanceMode {
990        let val = unsafe { ffi::AAudioStream_getPerformanceMode(self.raw) };
991        PerformanceMode::from_i32(val)
992    }
993
994    /// Available since API level 26.
995    pub fn get_direction(&self) -> Direction {
996        let val = unsafe { ffi::AAudioStream_getDirection(self.raw) };
997        Direction::from_i32(val)
998    }
999
1000    /// Returns the number of frames that have been written since the stream was created.
1001    /// For an output stream, this will be advanced by the application calling `write()`
1002    /// or by a data callback.
1003    /// For an input stream, this will be advanced by the endpoint.
1004    ///
1005    /// The frame position is monotonically increasing.
1006    ///
1007    /// Available since API level 26.
1008    pub fn get_frames_written(&self) -> i64 {
1009        unsafe { ffi::AAudioStream_getFramesWritten(self.raw) }
1010    }
1011
1012    /// Returns the number of frames that have been read since the stream was created.
1013    /// For an output stream, this will be advanced by the endpoint.
1014    /// For an input stream, this will be advanced by the application calling `read()`
1015    /// or by a data callback.
1016    ///
1017    /// The frame position is monotonically increasing.
1018    ///
1019    /// Available since API level 26.
1020    pub fn get_frames_read(&self) -> i64 {
1021        unsafe { ffi::AAudioStream_getFramesRead(self.raw) }
1022    }
1023
1024    /// Passes back the session ID associated with this stream.
1025    ///
1026    /// The session ID can be used to associate a stream with effects processors.
1027    /// The effects are controlled using the Android AudioEffect Java API.
1028    ///
1029    /// If `AAudioStreamBuilder::set_session_id()` was called with 0
1030    /// then a new session ID should be allocated once when the stream is opened.
1031    ///
1032    /// If `AAudioStreamBuilder::set_session_id()` was called with a previously allocated
1033    /// session ID then that value should be returned.
1034    ///
1035    /// If `AAudioStreamBuilder::set_session_id()` was not called then this function should
1036    /// return -1.
1037    ///
1038    /// The sessionID for a stream should not change once the stream has been opened.
1039    ///
1040    /// Available since API level 28.
1041    pub fn get_session_id(&self) -> i32 {
1042        unsafe { ffi::AAudioStream_getSessionId(self.raw) }
1043    }
1044
1045    /// Returns the time at which a particular frame was presented.
1046    /// This can be used to synchronize audio with video or MIDI.
1047    /// It can also be used to align a recorded stream with a playback stream.
1048    ///
1049    /// Timestamps are only valid when the stream is in `Started` state.
1050    /// `InvalidState` will be returned if the stream is not started.
1051    /// Note that because request_start() is asynchronous, timestamps will not be valid until
1052    /// a short time after calling request_start().
1053    /// So `InvalidState` should not be considered a fatal error.
1054    /// Just try calling again later.
1055    ///
1056    /// If an error occurs, then the position and time will not be modified.
1057    ///
1058    /// The position and time passed back are monotonically increasing.
1059    ///
1060    /// Available since API level 26.
1061    pub fn get_timestamp_monotonic(&self) -> Result<Timestamp, Error> {
1062        get_timestamp_monotonic(self.raw)
1063    }
1064
1065    /// Query the current state of the client, eg. `Pausing`.
1066    ///
1067    /// This function will immediately return the state without updating the state.
1068    /// If you want to update the client state based on the server state then
1069    /// call AAudioStream::wait_for_state_change() with currentState
1070    /// set to `Unknown` and a zero timeout.
1071    ///
1072    /// Available since API level 26.
1073    pub fn get_state(&self) -> StreamState {
1074        let val = unsafe { ffi::AAudioStream_getState(self.raw) };
1075        StreamState::from_i32(val)
1076    }
1077}
1078
1079pub struct AAudioStreamBuilder {
1080    raw: *mut AAudioStreamBuilderRaw,
1081    callbacks: Option<StreamCallbacks>,
1082}
1083
1084unsafe extern "C" fn raw_data_callback(
1085    stream: *mut AAudioStreamRaw,
1086    user_data: *mut c_void,
1087    audio_data: *mut c_void,
1088    num_frames: i32,
1089) -> i32 {
1090    match std::panic::catch_unwind(|| {
1091        let stream = AAudioStreamInfo { raw: stream };
1092        let data: &mut [u8] = std::slice::from_raw_parts_mut(
1093            audio_data as *mut u8,
1094            (num_frames * stream.get_channel_count() * stream.get_format().sample_size()) as usize,
1095        );
1096        let callback =
1097            user_data as *mut Box<dyn FnMut(&AAudioStreamInfo, &mut [u8], i32) -> CallbackResult>;
1098        let callback = &mut *callback;
1099        callback(&stream, data, num_frames) as i32
1100    }) {
1101        Ok(r) => r,
1102        Err(e) => {
1103            eprintln!("{:?}", e);
1104            std::process::abort();
1105        }
1106    }
1107}
1108
1109unsafe extern "C" fn raw_error_callback(
1110    stream: *mut AAudioStreamRaw,
1111    user_data: *mut c_void,
1112    error: i32,
1113) {
1114    if let Err(e) = std::panic::catch_unwind(|| {
1115        let stream = AAudioStreamInfo { raw: stream };
1116        let callback = user_data as *mut Box<dyn FnMut(&AAudioStreamInfo, Error)>;
1117        let callback = &mut *callback;
1118        callback(&stream, Error::from_code(error));
1119    }) {
1120        eprintln!("{:?}", e);
1121        std::process::abort();
1122    }
1123}
1124
1125impl AAudioStreamBuilder {
1126    pub fn new() -> Result<Self, Error> {
1127        let mut raw = MaybeUninit::<*mut AAudioStreamBuilderRaw>::uninit();
1128        let result = unsafe { ffi::AAudio_createStreamBuilder(raw.as_mut_ptr()) };
1129        wrap_result(result)?;
1130        Ok(Self {
1131            raw: unsafe { raw.assume_init() },
1132            callbacks: None,
1133        })
1134    }
1135
1136    /// Request that AAudio call the `data_callback` when the stream is running and the
1137    /// `error_callback` if any error occurs or the stream is disconnected.
1138    ///
1139    /// Note that when using data callback, the audio data will be passed in or out
1140    /// of the function as an argument.
1141    /// So you cannot call `AAudioStream::write()` or `AAudioStream::read()`
1142    /// on the same stream that has an active data callback.
1143    ///
1144    /// The data callback function will start being called after `AAudioStream::request_start()`
1145    /// is called.
1146    /// It will stop being called after `AAudioStream::request_pause()` or
1147    /// `AAudioStream::request_stop()` is called.
1148    ///
1149    /// The `data_callback` function will be called on a real-time thread owned by AAudio.
1150    /// Note that numFrames can vary unless `AAudioStreamBuilder::set_frames_per_data_callback()`
1151    /// is called.
1152    ///
1153    /// Also note that this callback function should be considered a "real-time" function.
1154    /// It must not do anything that could cause an unbounded delay because that can cause the
1155    /// audio to glitch or pop.
1156    ///
1157    /// These are things the function should NOT do:
1158    /// * allocate memory using, for example, malloc() or new
1159    /// * any file operations such as opening, closing, reading or writing
1160    /// * any network operations such as streaming
1161    /// * use any mutexes or other synchronization primitives
1162    /// * sleep
1163    /// * stop or close the stream
1164    /// * `AAudioStream::read()`
1165    /// * `AAudioStream::write()`
1166    ///
1167    /// If you need to move data, eg. MIDI commands, in or out of the callback function then
1168    /// we recommend the use of non-blocking techniques such as an atomic FIFO.
1169    ///
1170    /// The `error_callback` will be called, for example, if a headset or a USB device is unplugged causing the stream's
1171    /// device to be unavailable or "disconnected".
1172    /// Another possible cause of error would be a timeout or an unanticipated internal error.
1173    ///
1174    /// In response, this function should signal or create another thread to stop
1175    /// and close this stream. The other thread could then reopen a stream on another device.
1176    /// Do not stop or close the stream, or reopen the new stream, directly from this callback.
1177    ///
1178    /// The `error_callback` will not be called because of actions by the application, such as stopping
1179    /// or closing a stream.
1180    ///
1181    /// Note that the AAudio callbacks will never be called simultaneously from multiple threads.
1182    ///
1183    /// Available since API level 26.
1184    pub fn set_callbacks<D, E>(mut self, data_callback: D, error_callback: E) -> Self
1185    where
1186        D: FnMut(&AAudioStreamInfo, &mut [u8], i32) -> CallbackResult + Send + 'static,
1187        E: FnMut(&AAudioStreamInfo, Error) + Send + 'static,
1188    {
1189        let data_callback: Box<
1190            Box<dyn FnMut(&AAudioStreamInfo, &mut [u8], i32) -> CallbackResult + Send + 'static>,
1191        > = Box::new(Box::new(data_callback));
1192        let error_callback: Box<Box<dyn FnMut(&AAudioStreamInfo, Error) + Send + 'static>> =
1193            Box::new(Box::new(error_callback));
1194        let data_callback_raw = Box::into_raw(data_callback);
1195        let error_callback_raw = Box::into_raw(error_callback);
1196        let callbacks = StreamCallbacks {
1197            _data_callback: unsafe { Box::from_raw(data_callback_raw) },
1198            _error_callback: unsafe { Box::from_raw(error_callback_raw) },
1199        };
1200        unsafe {
1201            ffi::AAudioStreamBuilder_setDataCallback(
1202                self.raw,
1203                Some(raw_data_callback),
1204                data_callback_raw as *mut c_void,
1205            );
1206            ffi::AAudioStreamBuilder_setErrorCallback(
1207                self.raw,
1208                Some(raw_error_callback),
1209                error_callback_raw as *mut c_void,
1210            );
1211        }
1212        self.callbacks = Some(callbacks);
1213        self
1214    }
1215
1216    /// Request an audio device identified device using an ID.
1217    /// On Android, for example, the ID could be obtained from the Java AudioManager.
1218    ///
1219    /// The default, if you do not call this function, is 0,
1220    /// in which case the primary device will be used.
1221    ///
1222    /// Available since API level 26.
1223    ///
1224    /// # Arguments
1225    ///
1226    /// * `device_id` - device identifier or 0 for unspecified
1227    pub fn set_device_id(self, device_id: i32) -> Self {
1228        unsafe {
1229            ffi::AAudioStreamBuilder_setDeviceId(self.raw, device_id);
1230        }
1231        self
1232    }
1233
1234    /// Request a sample rate in Hertz.
1235    ///
1236    /// The default, if you do not call this function, is 0 (unspecified).
1237    /// An optimal value will then be chosen when the stream is opened.
1238    /// After opening a stream with an unspecified value, the application must
1239    /// query for the actual value, which may vary by device.
1240    ///
1241    /// If an exact value is specified then an opened stream will use that value.
1242    /// If a stream cannot be opened with the specified value then the open will fail.
1243    ///
1244    /// Available since API level 26.
1245    ///
1246    /// # Arguments
1247    ///
1248    /// * `sample_rate` - frames per second. Common rates include 44100 and 48000 Hz.
1249    pub fn set_sample_rate(self, sample_rate: i32) -> Self {
1250        unsafe {
1251            ffi::AAudioStreamBuilder_setSampleRate(self.raw, sample_rate);
1252        }
1253        self
1254    }
1255
1256    /// Request a number of channels for the stream.
1257    ///
1258    /// The default, if you do not call this function, is unspecified.
1259    /// An optimal value will then be chosen when the stream is opened.
1260    /// After opening a stream with an unspecified value, the application must
1261    /// query for the actual value, which may vary by device.
1262    ///
1263    /// If an exact value is specified then an opened stream will use that value.
1264    /// If a stream cannot be opened with the specified value then the open will fail.
1265    ///
1266    /// Available since API level 26.
1267    ///
1268    /// # Arguments
1269    ///
1270    /// * `channel_count` - Number of channels desired.
1271    pub fn set_channel_count(self, channel_count: i32) -> Self {
1272        unsafe {
1273            ffi::AAudioStreamBuilder_setChannelCount(self.raw, channel_count);
1274        }
1275        self
1276    }
1277
1278    /// Request a sample data format, for example `Format::I16`.
1279    ///
1280    /// The default, if you do not call this function, is `Unspecified`.
1281    /// An optimal value will then be chosen when the stream is opened.
1282    /// After opening a stream with an unspecified value, the application must
1283    /// query for the actual value, which may vary by device.
1284    ///
1285    /// If an exact value is specified then an opened stream will use that value.
1286    /// If a stream cannot be opened with the specified value then the open will fail.
1287    ///
1288    /// Available since API level 26.
1289    ///
1290    /// # Arguments
1291    ///
1292    /// * `format` - the sample data format.
1293    pub fn set_format(self, format: Format) -> Self {
1294        unsafe { ffi::AAudioStreamBuilder_setFormat(self.raw, format as i32) }
1295        self
1296    }
1297
1298    /// Request a mode for sharing the device.
1299    ///
1300    /// The default, if you do not call this function, is `SharingMode::Shared`.
1301    ///
1302    /// The requested sharing mode may not be available.
1303    /// The application can query for the actual mode after the stream is opened.
1304    ///
1305    /// Available since API level 26.
1306    ///
1307    /// # Arguments
1308    ///
1309    /// * `sharing_mode` - `SharingMode::Shared` or `SharingMode::Exclusive`
1310    pub fn set_sharing_mode(self, sharing_mode: SharingMode) -> Self {
1311        unsafe { ffi::AAudioStreamBuilder_setSharingMode(self.raw, sharing_mode as i32) }
1312        self
1313    }
1314
1315    /// Request the direction for a stream.
1316    ///
1317    /// The default, if you do not call this function, is `Direction::Output`.
1318    ///
1319    /// Available since API level 26.
1320    ///
1321    /// # Arguments
1322    ///
1323    /// * `direction` - `Direction::Output` or `Direction::Input`
1324    pub fn set_direction(self, direction: Direction) -> Self {
1325        unsafe { ffi::AAudioStreamBuilder_setDirection(self.raw, direction as i32) }
1326        self
1327    }
1328
1329    /// Set the requested buffer capacity in frames.
1330    /// The final AAudioStream capacity may differ, but will probably be at least this big.
1331    ///
1332    /// The default, if you do not call this function, is unspecified.
1333    ///
1334    /// Available since API level 26.
1335    ///
1336    /// # Arguments
1337    ///
1338    /// * `num_frames` - the desired buffer capacity in frames or 0 for unspecified
1339    pub fn set_buffer_capacity_in_frames(self, num_frames: i32) -> Self {
1340        unsafe { ffi::AAudioStreamBuilder_setBufferCapacityInFrames(self.raw, num_frames as i32) }
1341        self
1342    }
1343
1344    /// Set the requested performance mode.
1345    ///
1346    /// Supported modes are None, PowerSaving and LowLatency.
1347    ///
1348    /// The default, if you do not call this function, is None.
1349    ///
1350    /// You may not get the mode you requested.
1351    /// You can call `AAudioStream::get_performance_mode()`
1352    /// to find out the final mode for the stream.
1353    ///
1354    /// Available since API level 26.
1355    ///
1356    /// # Arguments
1357    ///
1358    /// * `mode` - the desired performance mode, eg. LowLatency
1359    pub fn set_performance_mode(self, mode: PerformanceMode) -> Self {
1360        unsafe { ffi::AAudioStreamBuilder_setPerformanceMode(self.raw, mode as i32) }
1361        self
1362    }
1363
1364    /// Set the intended use case for the stream.
1365    ///
1366    /// The AAudio system will use this information to optimize the
1367    /// behavior of the stream.
1368    /// This could, for example, affect how volume and focus is handled for the stream.
1369    ///
1370    /// The default, if you do not call this function, is `Usage::Media`.
1371    ///
1372    /// Available since API level 28.
1373    ///
1374    /// * `usage` - the desired usage, eg. `Usage::Game`
1375    pub fn set_usage(self, usage: Usage) -> Self {
1376        unsafe { ffi::AAudioStreamBuilder_setUsage(self.raw, usage as i32) }
1377        self
1378    }
1379
1380    /// Set the type of audio data that the stream will carry.
1381    ///
1382    /// The AAudio system will use this information to optimize the
1383    /// behavior of the stream.
1384    /// This could, for example, affect whether a stream is paused when a notification occurs.
1385    ///
1386    /// The default, if you do not call this function, is `ContentType::Music`.
1387    ///
1388    /// Available since API level 28.
1389    ///
1390    /// # Arguments
1391    ///
1392    /// * `content_type` - the type of audio data, eg. `ContentType::Speech`
1393    pub fn set_content_type(self, content_type: ContentType) -> Self {
1394        unsafe { ffi::AAudioStreamBuilder_setContentType(self.raw, content_type as i32) }
1395        self
1396    }
1397
1398    /// Set the input (capture) preset for the stream.
1399    ///
1400    /// The AAudio system will use this information to optimize the
1401    /// behavior of the stream.
1402    /// This could, for example, affect which microphones are used and how the
1403    /// recorded data is processed.
1404    ///
1405    /// The default, if you do not call this function, is `InputPreset::VoiceRecognition`.
1406    /// That is because `InputPreset::VoiceRecognition` is the preset with the lowest latency
1407    /// on many platforms.
1408    ///
1409    /// Available since API level 28.
1410    ///
1411    /// # Arguments
1412    ///
1413    /// * `input_preset` - the desired configuration for recording
1414    pub fn set_input_preset(self, input_preset: InputPreset) -> Self {
1415        unsafe { ffi::AAudioStreamBuilder_setInputPreset(self.raw, input_preset as i32) }
1416        self
1417    }
1418
1419    /// Specify whether this stream audio may or may not be captured by other apps or the system.
1420    ///
1421    /// The default is `AllowedCapturePolicy::AllowCaptureByAll`.
1422    ///
1423    /// Note that an application can also set its global policy, in which case the most restrictive
1424    /// policy is always applied. See android.media.AudioAttributes#setAllowedCapturePolicy(int)
1425    ///
1426    /// Available since API level 29.
1427    ///
1428    /// # Arguments
1429    ///
1430    /// * `policy` - the desired level of opt-out from being captured.
1431    pub fn set_allowed_capture_policy(self, policy: AllowedCapturePolicy) -> Self {
1432        unsafe { ffi::AAudioStreamBuilder_setAllowedCapturePolicy(self.raw, policy as i32) }
1433        self
1434    }
1435
1436    /// Equivalent to invoking `AAudioStreamBuilder::set_session_id` with 0 argument.
1437    pub fn allocate_session_id(self) -> Self {
1438        unsafe { ffi::AAudioStreamBuilder_setSessionId(self.raw, 0) }
1439        self
1440    }
1441
1442    /// Equivalent to invoking `AAudioStreamBuilder::set_session_id` with -1 argument.
1443    pub fn remove_session_id(self) -> Self {
1444        unsafe { ffi::AAudioStreamBuilder_setSessionId(self.raw, -1) }
1445        self
1446    }
1447
1448    /// The session ID can be used to associate a stream with effects processors.
1449    /// The effects are controlled using the Android AudioEffect Java API.
1450    ///
1451    /// The default, if you do not call this function, is -1 (none).
1452    ///
1453    /// If set to 0 then a session ID will be allocated
1454    /// when the stream is opened.
1455    ///
1456    /// The allocated session ID can be obtained by calling `AAudioStream::get_session_id()`
1457    /// and then used with this function when opening another stream.
1458    /// This allows effects to be shared between streams.
1459    ///
1460    /// Session IDs from AAudio can be used with the Android Java APIs and vice versa.
1461    /// So a session ID from an AAudio stream can be passed to Java
1462    /// and effects applied using the Java AudioEffect API.
1463    ///
1464    /// Note that allocating or setting a session ID may result in a stream with higher latency.
1465    ///
1466    /// Allocated session IDs will always be positive and nonzero.
1467    ///
1468    /// Available since API level 28.
1469    ///
1470    /// # Arguments
1471    ///
1472    /// * `session_id` - an allocated sessionID or 0 to allocate a new sessionID
1473    pub fn set_session_id(self, session_id: i32) -> Self {
1474        unsafe { ffi::AAudioStreamBuilder_setSessionId(self.raw, session_id as i32) }
1475        self
1476    }
1477
1478    /// Indicates whether this input stream must be marked as privacy sensitive or not.
1479    ///
1480    /// When true, this input stream is privacy sensitive and any concurrent capture
1481    /// is not permitted.
1482    ///
1483    /// This is off (false) by default except when the input preset is `InputPreset::VoiceCommunication`
1484    /// or `InputPreset::Camcorder`.
1485    ///
1486    /// Always takes precedence over default from input preset when set explicitly.
1487    ///
1488    /// Only relevant if the stream direction is `Direction::Input`.
1489    ///
1490    /// Added in API level 30.
1491    ///
1492    /// # Arguments
1493    ///
1494    /// * `privacy_sensitive` - `true` if capture from this stream must be marked as privacy sensitive, `false` otherwise.
1495    pub fn set_privacy_sensitive(self, privacy_sensitive: bool) -> Self {
1496        unsafe { ffi::AAudioStreamBuilder_setPrivacySensitive(self.raw, privacy_sensitive) }
1497        self
1498    }
1499
1500    /// Set the requested data callback buffer size in frames.
1501    /// See [`set_callbacks`].
1502    ///
1503    /// The default, if you do not call this function, is unspecified.
1504    ///
1505    /// For the lowest possible latency, do not call this function. AAudio will then
1506    /// call the dataProc callback function with whatever size is optimal.
1507    /// That size may vary from one callback to another.
1508    ///
1509    /// Only use this function if the application requires a specific number of frames for processing.
1510    /// The application might, for example, be using an FFT that requires
1511    /// a specific power-of-two sized buffer.
1512    ///
1513    /// AAudio may need to add additional buffering in order to adapt between the internal
1514    /// buffer size and the requested buffer size.
1515    ///
1516    /// If you do call this function then the requested size should be less than
1517    /// half the buffer capacity, to allow double buffering.
1518    ///
1519    /// Available since API level 26.
1520    ///
1521    /// * `num_frames` - the desired buffer size in frames or 0 for unspecified
1522    ///
1523    /// [`set_callbacks`]: AAudioStreamBuilder::set_callbacks
1524    pub fn set_frames_per_data_callback(self, num_frames: i32) -> Self {
1525        unsafe { ffi::AAudioStreamBuilder_setFramesPerDataCallback(self.raw, num_frames) }
1526        self
1527    }
1528
1529    /// Open a stream based on the options in the AAudioStreamBuilder.
1530    pub fn open_stream(mut self) -> Result<AAudioStream, Error> {
1531        let mut raw = MaybeUninit::<*mut AAudioStreamRaw>::uninit();
1532        let result = unsafe { ffi::AAudioStreamBuilder_openStream(self.raw, raw.as_mut_ptr()) };
1533        wrap_result(result)?;
1534        let stream = AAudioStream {
1535            raw: unsafe { raw.assume_init() },
1536            _callbacks: self.callbacks.take(),
1537        };
1538        Ok(stream)
1539    }
1540}
1541
1542impl Drop for AAudioStreamBuilder {
1543    fn drop(&mut self) {
1544        unsafe {
1545            ffi::AAudioStreamBuilder_delete(self.raw);
1546        }
1547    }
1548}