objc2_speech/generated/
SFSpeechRecognitionRequest.rs

1//! This file has been automatically generated by `objc2`'s `header-translator`.
2//! DO NOT EDIT
3use core::ffi::*;
4use core::ptr::NonNull;
5use objc2::__framework_prelude::*;
6#[cfg(feature = "objc2-avf-audio")]
7use objc2_avf_audio::*;
8#[cfg(feature = "objc2-core-media")]
9use objc2_core_media::*;
10use objc2_foundation::*;
11
12use crate::*;
13
14extern_class!(
15    /// An abstract class that represents a request to recognize speech from an audio source.
16    ///
17    /// Don't create ``SFSpeechRecognitionRequest`` objects directly. Create an ``SFSpeechURLRecognitionRequest`` or ``SFSpeechAudioBufferRecognitionRequest`` object instead. Use the properties of this class to configure various aspects of your request object before you start the speech recognition process. For example, use the ``shouldReportPartialResults`` property to specify whether you want partial results or only the final result of speech recognition.
18    ///
19    /// See also [Apple's documentation](https://developer.apple.com/documentation/speech/sfspeechrecognitionrequest?language=objc)
20    #[unsafe(super(NSObject))]
21    #[derive(Debug, PartialEq, Eq, Hash)]
22    pub struct SFSpeechRecognitionRequest;
23);
24
25extern_conformance!(
26    unsafe impl NSObjectProtocol for SFSpeechRecognitionRequest {}
27);
28
29impl SFSpeechRecognitionRequest {
30    extern_methods!(
31        #[cfg(feature = "SFSpeechRecognitionTaskHint")]
32        /// A value that indicates the type of speech recognition being performed.
33        ///
34        /// The default value of this property is ``SFSpeechRecognitionTaskHint/unspecified``. For a valid list of values, see ``SFSpeechRecognitionTaskHint``.
35        #[unsafe(method(taskHint))]
36        #[unsafe(method_family = none)]
37        pub unsafe fn taskHint(&self) -> SFSpeechRecognitionTaskHint;
38
39        #[cfg(feature = "SFSpeechRecognitionTaskHint")]
40        /// Setter for [`taskHint`][Self::taskHint].
41        #[unsafe(method(setTaskHint:))]
42        #[unsafe(method_family = none)]
43        pub unsafe fn setTaskHint(&self, task_hint: SFSpeechRecognitionTaskHint);
44
45        /// A Boolean value that indicates whether you want intermediate results returned for each utterance.
46        ///
47        /// The default value of this property is `true`. If you want only final results (and you don't care about intermediate results), set this property to `false` to prevent the system from doing extra work.
48        #[unsafe(method(shouldReportPartialResults))]
49        #[unsafe(method_family = none)]
50        pub unsafe fn shouldReportPartialResults(&self) -> bool;
51
52        /// Setter for [`shouldReportPartialResults`][Self::shouldReportPartialResults].
53        #[unsafe(method(setShouldReportPartialResults:))]
54        #[unsafe(method_family = none)]
55        pub unsafe fn setShouldReportPartialResults(&self, should_report_partial_results: bool);
56
57        /// An array of phrases that should be recognized, even if they are not in the system vocabulary.
58        ///
59        /// Use this property to specify short custom phrases that are unique to your app. You might include phrases with the names of characters, products, or places that are specific to your app. You might also include domain-specific terminology or unusual or made-up words. Assigning custom phrases to this property improves the likelihood of those phrases being recognized.
60        ///
61        /// Keep phrases relatively brief, limiting them to one or two words whenever possible. Lengthy phrases are less likely to be recognized. In addition, try to limit each phrase to something the user can say without pausing.
62        ///
63        /// Limit the total number of phrases to no more than 100.
64        #[unsafe(method(contextualStrings))]
65        #[unsafe(method_family = none)]
66        pub unsafe fn contextualStrings(&self) -> Retained<NSArray<NSString>>;
67
68        /// Setter for [`contextualStrings`][Self::contextualStrings].
69        ///
70        /// This is [copied][objc2_foundation::NSCopying::copy] when set.
71        #[unsafe(method(setContextualStrings:))]
72        #[unsafe(method_family = none)]
73        pub unsafe fn setContextualStrings(&self, contextual_strings: &NSArray<NSString>);
74
75        /// An identifier string that you use to describe the type of interaction associated with the speech recognition request.
76        ///
77        /// If different parts of your app have different speech recognition needs, you can use this property to identify the part of your app that is making each request. For example, if one part of your app lets users speak phone numbers and another part lets users speak street addresses, consistently identifying the part of the app that makes a recognition request may help improve the accuracy of the results.
78        #[deprecated = "Not used anymore"]
79        #[unsafe(method(interactionIdentifier))]
80        #[unsafe(method_family = none)]
81        pub unsafe fn interactionIdentifier(&self) -> Option<Retained<NSString>>;
82
83        /// Setter for [`interactionIdentifier`][Self::interactionIdentifier].
84        ///
85        /// This is [copied][objc2_foundation::NSCopying::copy] when set.
86        #[deprecated = "Not used anymore"]
87        #[unsafe(method(setInteractionIdentifier:))]
88        #[unsafe(method_family = none)]
89        pub unsafe fn setInteractionIdentifier(&self, interaction_identifier: Option<&NSString>);
90
91        /// A Boolean value that determines whether a request must keep its audio data on the device.
92        ///
93        /// Set this property to `true` to prevent an ``SFSpeechRecognitionRequest`` from sending audio over the network. However, on-device requests won't be as accurate.
94        ///
95        /// > Note:
96        /// > The request only honors this setting if the ``SFSpeechRecognizer/supportsOnDeviceRecognition`` (``SFSpeechRecognizer``) property is also `true`.
97        #[unsafe(method(requiresOnDeviceRecognition))]
98        #[unsafe(method_family = none)]
99        pub unsafe fn requiresOnDeviceRecognition(&self) -> bool;
100
101        /// Setter for [`requiresOnDeviceRecognition`][Self::requiresOnDeviceRecognition].
102        #[unsafe(method(setRequiresOnDeviceRecognition:))]
103        #[unsafe(method_family = none)]
104        pub unsafe fn setRequiresOnDeviceRecognition(&self, requires_on_device_recognition: bool);
105
106        /// A Boolean value that indicates whether to add punctuation to speech recognition results.
107        ///
108        /// Set this property to `true` for the speech framework to automatically include punctuation in the recognition results. Punctuation includes a period or question mark at the end of a sentence, and a comma within a sentence.
109        #[unsafe(method(addsPunctuation))]
110        #[unsafe(method_family = none)]
111        pub unsafe fn addsPunctuation(&self) -> bool;
112
113        /// Setter for [`addsPunctuation`][Self::addsPunctuation].
114        #[unsafe(method(setAddsPunctuation:))]
115        #[unsafe(method_family = none)]
116        pub unsafe fn setAddsPunctuation(&self, adds_punctuation: bool);
117
118        #[cfg(feature = "SFSpeechLanguageModel")]
119        #[unsafe(method(customizedLanguageModel))]
120        #[unsafe(method_family = none)]
121        pub unsafe fn customizedLanguageModel(
122            &self,
123        ) -> Option<Retained<SFSpeechLanguageModelConfiguration>>;
124
125        #[cfg(feature = "SFSpeechLanguageModel")]
126        /// Setter for [`customizedLanguageModel`][Self::customizedLanguageModel].
127        ///
128        /// This is [copied][objc2_foundation::NSCopying::copy] when set.
129        #[unsafe(method(setCustomizedLanguageModel:))]
130        #[unsafe(method_family = none)]
131        pub unsafe fn setCustomizedLanguageModel(
132            &self,
133            customized_language_model: Option<&SFSpeechLanguageModelConfiguration>,
134        );
135    );
136}
137
138/// Methods declared on superclass `NSObject`.
139impl SFSpeechRecognitionRequest {
140    extern_methods!(
141        #[unsafe(method(init))]
142        #[unsafe(method_family = init)]
143        pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
144
145        #[unsafe(method(new))]
146        #[unsafe(method_family = new)]
147        pub unsafe fn new() -> Retained<Self>;
148    );
149}
150
151extern_class!(
152    /// A request to recognize speech in a recorded audio file.
153    ///
154    /// Use this object to perform speech recognition on the contents of an audio file.
155    ///
156    /// The following example shows a method that performs recognition on an audio file based on the user's default language and prints out the transcription.
157    ///
158    /// Listing 1. Getting a speech recognizer and making a recognition request
159    ///
160    /// ```swift
161    /// func recognizeFile(url: URL) {
162    /// // Create a speech recognizer associated with the user's default language.
163    /// guard let myRecognizer = SFSpeechRecognizer() else {
164    /// // The system doesn't support the user's default language.
165    /// return
166    /// }
167    ///
168    /// guard myRecognizer.isAvailable else {
169    /// // The recognizer isn't available.
170    /// return
171    /// }
172    ///
173    /// // Create and execute a speech recognition request for the audio file at the URL.
174    /// let request = SFSpeechURLRecognitionRequest(url: url)
175    /// myRecognizer.recognitionTask(with: request) { (result, error) in
176    /// guard let result else {
177    /// // Recognition failed, so check the error for details and handle it.
178    /// return
179    /// }
180    ///
181    /// // Print the speech transcription with the highest confidence that the
182    /// // system recognized.
183    /// if result.isFinal {
184    /// print(result.bestTranscription.formattedString)
185    /// }
186    /// }
187    /// }
188    /// ```
189    ///
190    /// See also [Apple's documentation](https://developer.apple.com/documentation/speech/sfspeechurlrecognitionrequest?language=objc)
191    #[unsafe(super(SFSpeechRecognitionRequest, NSObject))]
192    #[derive(Debug, PartialEq, Eq, Hash)]
193    pub struct SFSpeechURLRecognitionRequest;
194);
195
196extern_conformance!(
197    unsafe impl NSObjectProtocol for SFSpeechURLRecognitionRequest {}
198);
199
200impl SFSpeechURLRecognitionRequest {
201    extern_methods!(
202        #[unsafe(method(init))]
203        #[unsafe(method_family = init)]
204        pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
205
206        /// Creates a speech recognition request, initialized with the specified URL.
207        ///
208        /// Use this method to create a request to recognize speech in a recorded audio file that resides at the specified URL. Pass the request to the recognizer's ``SFSpeechRecognizer/recognitionTask(with:delegate:)`` method to start recognition.
209        #[unsafe(method(initWithURL:))]
210        #[unsafe(method_family = init)]
211        pub unsafe fn initWithURL(this: Allocated<Self>, url: &NSURL) -> Retained<Self>;
212
213        /// The URL of the audio file.
214        #[unsafe(method(URL))]
215        #[unsafe(method_family = none)]
216        pub unsafe fn URL(&self) -> Retained<NSURL>;
217    );
218}
219
220/// Methods declared on superclass `NSObject`.
221impl SFSpeechURLRecognitionRequest {
222    extern_methods!(
223        #[unsafe(method(new))]
224        #[unsafe(method_family = new)]
225        pub unsafe fn new() -> Retained<Self>;
226    );
227}
228
229extern_class!(
230    /// A request to recognize speech from captured audio content, such as audio from the device's microphone.
231    ///
232    /// Use an ``SFSpeechAudioBufferRecognitionRequest`` object to perform speech recognition on live audio, or on a set of existing audio buffers. For example, use this request object to route audio from a device's microphone to the speech recognizer.
233    ///
234    /// The request object contains no audio initially. As you capture audio, call ``append(_:)`` or ``appendAudioSampleBuffer(_:)`` to add audio samples to the request object. The speech recognizer continuously analyzes the audio you appended, stopping only when you call the ``endAudio()`` method. You must call ``endAudio()`` explicitly to stop the speech recognition process.
235    ///
236    /// For a complete example of how to use audio buffers with speech recognition, see [SpeakToMe: Using Speech Recognition with AVAudioEngine](https://developer.apple.com/library/archive/samplecode/SpeakToMe/Introduction/Intro.html#//apple_ref/doc/uid/TP40017110).
237    ///
238    /// See also [Apple's documentation](https://developer.apple.com/documentation/speech/sfspeechaudiobufferrecognitionrequest?language=objc)
239    #[unsafe(super(SFSpeechRecognitionRequest, NSObject))]
240    #[derive(Debug, PartialEq, Eq, Hash)]
241    pub struct SFSpeechAudioBufferRecognitionRequest;
242);
243
244extern_conformance!(
245    unsafe impl NSObjectProtocol for SFSpeechAudioBufferRecognitionRequest {}
246);
247
248impl SFSpeechAudioBufferRecognitionRequest {
249    extern_methods!(
250        #[cfg(feature = "objc2-avf-audio")]
251        /// The preferred audio format for optimal speech recognition.
252        ///
253        /// Use the audio format in this property as a hint for optimal recording, but don't depend on the value remaining unchanged.
254        #[unsafe(method(nativeAudioFormat))]
255        #[unsafe(method_family = none)]
256        pub unsafe fn nativeAudioFormat(&self) -> Retained<AVAudioFormat>;
257
258        #[cfg(feature = "objc2-avf-audio")]
259        /// Appends audio in the PCM format to the end of the recognition request.
260        ///
261        /// The audio must be in a native format and uncompressed.
262        ///
263        /// - Parameters:
264        /// - audioPCMBuffer: An audio buffer that contains audio in the PCM format.
265        #[unsafe(method(appendAudioPCMBuffer:))]
266        #[unsafe(method_family = none)]
267        pub unsafe fn appendAudioPCMBuffer(&self, audio_pcm_buffer: &AVAudioPCMBuffer);
268
269        #[cfg(feature = "objc2-core-media")]
270        /// Appends audio to the end of the recognition request.
271        ///
272        /// The audio must be in a native format.
273        ///
274        /// - Parameters:
275        /// - sampleBuffer: A buffer of audio.
276        #[unsafe(method(appendAudioSampleBuffer:))]
277        #[unsafe(method_family = none)]
278        pub unsafe fn appendAudioSampleBuffer(&self, sample_buffer: &CMSampleBuffer);
279
280        /// Marks the end of audio input for the recognition request.
281        ///
282        /// Call this method explicitly to let the speech recognizer know that no more audio input is coming.
283        #[unsafe(method(endAudio))]
284        #[unsafe(method_family = none)]
285        pub unsafe fn endAudio(&self);
286    );
287}
288
289/// Methods declared on superclass `NSObject`.
290impl SFSpeechAudioBufferRecognitionRequest {
291    extern_methods!(
292        #[unsafe(method(init))]
293        #[unsafe(method_family = init)]
294        pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
295
296        #[unsafe(method(new))]
297        #[unsafe(method_family = new)]
298        pub unsafe fn new() -> Retained<Self>;
299    );
300}