objc2_speech/generated/
SFSpeechRecognizer.rs

1//! This file has been automatically generated by `objc2`'s `header-translator`.
2//! DO NOT EDIT
3use core::ffi::*;
4use core::ptr::NonNull;
5use objc2::__framework_prelude::*;
6use objc2_foundation::*;
7
8use crate::*;
9
10/// The app's authorization to perform speech recognition.
11///
12/// See also [Apple's documentation](https://developer.apple.com/documentation/speech/sfspeechrecognizerauthorizationstatus?language=objc)
13// NS_ENUM
14#[repr(transparent)]
15#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
16pub struct SFSpeechRecognizerAuthorizationStatus(pub NSInteger);
17impl SFSpeechRecognizerAuthorizationStatus {
18    /// The app's authorization status has not yet been determined.
19    ///
20    /// When your app's status is not determined, calling the ``SFSpeechRecognizer/requestAuthorization(_:)`` method prompts the user to grant or deny authorization.
21    #[doc(alias = "SFSpeechRecognizerAuthorizationStatusNotDetermined")]
22    pub const NotDetermined: Self = Self(0);
23    /// The user denied your app's request to perform speech recognition.
24    #[doc(alias = "SFSpeechRecognizerAuthorizationStatusDenied")]
25    pub const Denied: Self = Self(1);
26    /// The device prevents your app from performing speech recognition.
27    #[doc(alias = "SFSpeechRecognizerAuthorizationStatusRestricted")]
28    pub const Restricted: Self = Self(2);
29    /// The user granted your app's request to perform speech recognition.
30    #[doc(alias = "SFSpeechRecognizerAuthorizationStatusAuthorized")]
31    pub const Authorized: Self = Self(3);
32}
33
34unsafe impl Encode for SFSpeechRecognizerAuthorizationStatus {
35    const ENCODING: Encoding = NSInteger::ENCODING;
36}
37
38unsafe impl RefEncode for SFSpeechRecognizerAuthorizationStatus {
39    const ENCODING_REF: Encoding = Encoding::Pointer(&Self::ENCODING);
40}
41
42extern_class!(
43    /// An object you use to check for the availability of the speech recognition service, and to initiate the speech recognition process.
44    ///
45    /// An ``SFSpeechRecognizer`` object is the central object for managing the speech recognizer process. Use this object to:
46    ///
47    /// - Request authorization to use speech recognition services.
48    /// - Specify the language to use during the recognition process.
49    /// - Initiate new speech recognition tasks.
50    ///
51    /// ### Set up speech recognition
52    ///
53    /// Each speech recognizer supports only one language, which you specify at creation time. The successful creation of a speech recognizer does not guarantee that speech recognition services are available. For some languages, the recognizer might require an Internet connection. Use the ``isAvailable`` property to find out if speech recognition services are available for the current language.
54    ///
55    /// To initiate the speech recognition process, do the following:
56    ///
57    /// 1. Request authorization to use speech recognition. See
58    /// <doc
59    /// :asking-permission-to-use-speech-recognition>.
60    /// 2. Create an ``SFSpeechRecognizer`` object.
61    /// 3. Verify the availability of services using the ``isAvailable`` property of your speech recognizer object.
62    /// 4. Prepare your audio content.
63    /// 5. Create a recognition request object—an object that descends from ``SFSpeechRecognitionRequest``.
64    /// 6. Call the ``recognitionTask(with:delegate:)`` or ``recognitionTask(with:resultHandler:)`` method to begin the recognition process.
65    ///
66    /// The type of recognition request object you create depends on whether you are processing an existing audio file or an incoming stream of audio. For existing audio files, create a ``SFSpeechURLRecognitionRequest`` object. For audio streams, create a ``SFSpeechAudioBufferRecognitionRequest`` object.
67    ///
68    /// ### Create a great user experience for speech recognition
69    ///
70    /// Here are some tips to consider when adding speech recognition support to your app.
71    ///
72    /// - **Be prepared to handle failures caused by speech recognition limits.** Because speech recognition is a network-based service, limits are enforced so that the service can remain freely available to all apps. Individual devices may be limited in the number of recognitions that can be performed per day, and each app may be throttled globally based on the number of requests it makes per day. If a recognition request fails quickly (within a second or two of starting), check to see if the recognition service became unavailable. If it is, you may want to ask users to try again later.
73    /// - **Plan for a one-minute limit on audio duration.** Speech recognition places a relatively high burden on battery life and network usage. To minimize this burden, the framework stops speech recognition tasks that last longer than one minute. This limit is similar to the one for keyboard-related dictation.
74    /// - **Remind the user when your app is recording.** For example, display a visual indicator and play sounds at the beginning and end of speech recognition to help users understand that they're being actively recorded. You can also display speech as it is being recognized so that users understand what your app is doing and see any mistakes made during the recognition process.
75    /// - **Do not perform speech recognition on private or sensitive information.** Some speech is not appropriate for recognition. Don't send passwords, health or financial data, and other sensitive speech for recognition.
76    ///
77    /// See also [Apple's documentation](https://developer.apple.com/documentation/speech/sfspeechrecognizer?language=objc)
78    #[unsafe(super(NSObject))]
79    #[derive(Debug, PartialEq, Eq, Hash)]
80    pub struct SFSpeechRecognizer;
81);
82
83extern_conformance!(
84    unsafe impl NSObjectProtocol for SFSpeechRecognizer {}
85);
86
87impl SFSpeechRecognizer {
88    extern_methods!(
89        /// Returns the set of locales that are supported by the speech recognizer.
90        ///
91        /// This method returns the locales for which speech recognition is supported. Support for a locale does not guarantee that speech recognition is currently possible for that locale. For some locales, the speech recognizer requires an active Internet connection to communicate with Apple's servers. If the speech recognizer is currently unable to process requests,   ``isAvailable`` returns `false`.
92        ///
93        /// Speech recognition supports the same locales that are supported by the keyboard's dictation feature. For a list of these locales, see [QuickType Keyboard: Dictation](https://www.apple.com/ios/feature-availability/#quicktype-keyboard-dictation).
94        ///
95        /// - Returns: A set of locales that support speech recognition.
96        #[unsafe(method(supportedLocales))]
97        #[unsafe(method_family = none)]
98        pub unsafe fn supportedLocales() -> Retained<NSSet<NSLocale>>;
99
100        /// Returns your app's current authorization to perform speech recognition.
101        ///
102        /// The user can reject your app's request to perform speech recognition, but your request can also be denied if speech recognition is not supported on the device. The app can also change your app's authorization status at any time from the Settings app.
103        ///
104        /// - Returns: The app's current authorization status value. For a list of values, see ``SFSpeechRecognizerAuthorizationStatus``.
105        #[unsafe(method(authorizationStatus))]
106        #[unsafe(method_family = none)]
107        pub unsafe fn authorizationStatus() -> SFSpeechRecognizerAuthorizationStatus;
108
109        #[cfg(feature = "block2")]
110        /// Asks the user to allow your app to perform speech recognition.
111        ///
112        /// Call this method before performing any other tasks associated with speech recognition. This method executes asynchronously, returning shortly after you call it. At some point later, the system calls the provided `handler` block with the results.
113        ///
114        /// When your app's authorization status is ``SFSpeechRecognizerAuthorizationStatus/notDetermined``, this method causes the system to prompt the user to grant or deny permission for your app to use speech recognition. The prompt includes the custom message you specify in the `NSSpeechRecognitionUsageDescription` key of your app's `Info.plist` file. The user's response is saved so that future calls to this method do not prompt the user again.
115        ///
116        /// > Important:
117        /// > Your app's `Info.plist` file must contain the `NSSpeechRecognitionUsageDescription` key with a valid usage description. If this key is not present, your app will crash when you call this method.
118        ///
119        /// For more information about requesting authorization, see
120        /// <doc
121        /// :asking-permission-to-use-speech-recognition>.
122        ///
123        /// - Parameters:
124        /// - handler: The block to execute when your app's authorization status is known. The status parameter of the block contains your app's authorization status. The system does not guarantee the execution of this block on your app's main dispatch queue.
125        #[unsafe(method(requestAuthorization:))]
126        #[unsafe(method_family = none)]
127        pub unsafe fn requestAuthorization(
128            handler: &block2::DynBlock<dyn Fn(SFSpeechRecognizerAuthorizationStatus)>,
129        );
130
131        /// Creates a speech recognizer associated with the user's default language settings.
132        ///
133        /// If the user's default language is not supported for speech recognition, this method attempts to fall back to the language used by the keyboard for dictation. If that fails, this method returns `nil`.
134        ///
135        /// Even if this method returns a valid speech recognizer object, the speech recognition services may be temporarily unavailable. To determine whether speech recognition services are available, check the ``isAvailable`` property.
136        ///
137        /// - Returns: An initialized speech recognizer object, or `nil` if there was a problem creating the object.
138        #[unsafe(method(init))]
139        #[unsafe(method_family = init)]
140        pub unsafe fn init(this: Allocated<Self>) -> Option<Retained<Self>>;
141
142        /// Creates a speech recognizer associated with the specified locale.
143        ///
144        /// If you specify a language that is not supported by the speech recognizer, this method attempts to fall back to the language used by the keyboard for dictation. If that fails, this method returns `nil`.
145        ///
146        /// Even if this method returns a valid speech recognizer object, the speech recognition services may be temporarily unavailable. To determine whether speech recognition services are available, check the ``isAvailable`` property.
147        ///
148        /// - Parameters:
149        /// - locale: The locale object representing the language you want to use for speech recognition. For a list of languages supported by the speech recognizer, see ``supportedLocales()``.
150        ///
151        /// - Returns: An initialized speech recognizer object, or `nil` if the specified language was not supported.
152        #[unsafe(method(initWithLocale:))]
153        #[unsafe(method_family = init)]
154        pub unsafe fn initWithLocale(
155            this: Allocated<Self>,
156            locale: &NSLocale,
157        ) -> Option<Retained<Self>>;
158
159        /// A Boolean value that indicates whether the speech recognizer is currently available.
160        ///
161        /// When the value of this property is `true`, you may create new speech recognition tasks. When value of this property is `false`, speech recognition services are not available.
162        #[unsafe(method(isAvailable))]
163        #[unsafe(method_family = none)]
164        pub unsafe fn isAvailable(&self) -> bool;
165
166        /// The locale of the speech recognizer.
167        ///
168        /// The locale of the speech recognizer is an `NSLocale` object. The default value of this property is the system locale (that is, `+[NSLocale systemLocale]`).
169        #[unsafe(method(locale))]
170        #[unsafe(method_family = none)]
171        pub unsafe fn locale(&self) -> Retained<NSLocale>;
172
173        /// A Boolean value that indicates whether the speech recognizer can operate without network access.
174        ///
175        /// An ``SFSpeechRecognitionRequest`` can only honor its ``SFSpeechRecognitionRequest/requiresOnDeviceRecognition`` property if ``supportsOnDeviceRecognition`` is `true`. If ``supportsOnDeviceRecognition`` is `false`, the ``SFSpeechRecognizer`` requires a network in order to recognize speech.
176        #[unsafe(method(supportsOnDeviceRecognition))]
177        #[unsafe(method_family = none)]
178        pub unsafe fn supportsOnDeviceRecognition(&self) -> bool;
179
180        /// Setter for [`supportsOnDeviceRecognition`][Self::supportsOnDeviceRecognition].
181        #[unsafe(method(setSupportsOnDeviceRecognition:))]
182        #[unsafe(method_family = none)]
183        pub unsafe fn setSupportsOnDeviceRecognition(&self, supports_on_device_recognition: bool);
184
185        /// The delegate object that handles changes to the availability of speech recognition services.
186        ///
187        /// Provide a delegate object when you want to monitor changes to the availability of speech recognition services. Your delegate object must conform to the ``SFSpeechRecognizerDelegate`` protocol.
188        #[unsafe(method(delegate))]
189        #[unsafe(method_family = none)]
190        pub unsafe fn delegate(
191            &self,
192        ) -> Option<Retained<ProtocolObject<dyn SFSpeechRecognizerDelegate>>>;
193
194        /// Setter for [`delegate`][Self::delegate].
195        ///
196        /// This is a [weak property][objc2::topics::weak_property].
197        #[unsafe(method(setDelegate:))]
198        #[unsafe(method_family = none)]
199        pub unsafe fn setDelegate(
200            &self,
201            delegate: Option<&ProtocolObject<dyn SFSpeechRecognizerDelegate>>,
202        );
203
204        #[cfg(feature = "SFSpeechRecognitionTaskHint")]
205        /// A hint that indicates the type of speech recognition being requested.
206        ///
207        /// By default, the value of this property overrides the ``SFSpeechRecognitionTaskHint/unspecified`` value for requests. For possible values, see ``SFSpeechRecognitionTaskHint``.
208        #[unsafe(method(defaultTaskHint))]
209        #[unsafe(method_family = none)]
210        pub unsafe fn defaultTaskHint(&self) -> SFSpeechRecognitionTaskHint;
211
212        #[cfg(feature = "SFSpeechRecognitionTaskHint")]
213        /// Setter for [`defaultTaskHint`][Self::defaultTaskHint].
214        #[unsafe(method(setDefaultTaskHint:))]
215        #[unsafe(method_family = none)]
216        pub unsafe fn setDefaultTaskHint(&self, default_task_hint: SFSpeechRecognitionTaskHint);
217
218        #[cfg(all(
219            feature = "SFSpeechRecognitionRequest",
220            feature = "SFSpeechRecognitionResult",
221            feature = "SFSpeechRecognitionTask",
222            feature = "block2"
223        ))]
224        /// Executes the speech recognition request and delivers the results to the specified handler block.
225        ///
226        /// Use this method to initiate the speech recognition process on the audio contained in the request object. This method executes asynchronously and returns a ``SFSpeechRecognitionTask`` object that you can use to cancel or finalize the recognition process later. As results become available, the method calls the block in the `resultHandler` parameter.
227        ///
228        /// - Parameters:
229        /// - request: A request (in an ``SFSpeechRecognitionRequest`` object) to recognize speech from an audio source.
230        /// - resultHandler: The block to call when partial or final results are available, or when an error occurs. If the ``SFSpeechRecognitionRequest/shouldReportPartialResults`` property is `true`, this block may be called multiple times to deliver the partial and final results. The block has no return value and takes the following parameters:
231        ///
232        /// - term result: A ``SFSpeechRecognitionResult`` containing the partial or final transcriptions of the audio content.
233        /// - term error: An error object if a problem occurred. This parameter is `nil` if speech recognition was successful.
234        ///
235        /// - Returns: The task object you can use to manage an in-progress recognition request.
236        #[unsafe(method(recognitionTaskWithRequest:resultHandler:))]
237        #[unsafe(method_family = none)]
238        pub unsafe fn recognitionTaskWithRequest_resultHandler(
239            &self,
240            request: &SFSpeechRecognitionRequest,
241            result_handler: &block2::DynBlock<dyn Fn(*mut SFSpeechRecognitionResult, *mut NSError)>,
242        ) -> Retained<SFSpeechRecognitionTask>;
243
244        #[cfg(all(
245            feature = "SFSpeechRecognitionRequest",
246            feature = "SFSpeechRecognitionTask"
247        ))]
248        /// Recognizes speech from the audio source associated with the specified request, using the specified delegate to manage the results.
249        ///
250        /// Use this method to initiate the speech recognition process on the audio contained in the request object. This method executes asynchronously and returns a ``SFSpeechRecognitionTask`` object that you can use to cancel or finalize the recognition process later. As results become available, the method calls the methods of the provided `delegate` object.
251        ///
252        /// Note that the ``SFSpeechRecognitionTask`` object returned by this method does not retain your delegate object. You must maintain a strong reference to your delegate while speech recognition is in progress.
253        ///
254        /// - Parameters:
255        /// - request: A request (encapsulated in an ``SFSpeechRecognitionRequest`` object) to recognize speech from an audio source.
256        /// - delegate: An object that can handle results from the speech recognition task. This object must conform to the ``SFSpeechRecognitionTaskDelegate`` protocol.
257        ///
258        /// - Returns: The task object you can use to manage an in-progress recognition request.
259        #[unsafe(method(recognitionTaskWithRequest:delegate:))]
260        #[unsafe(method_family = none)]
261        pub unsafe fn recognitionTaskWithRequest_delegate(
262            &self,
263            request: &SFSpeechRecognitionRequest,
264            delegate: &ProtocolObject<dyn SFSpeechRecognitionTaskDelegate>,
265        ) -> Retained<SFSpeechRecognitionTask>;
266
267        /// The queue on which to execute recognition task handlers and delegate methods.
268        ///
269        /// The default value of this property is the app's main queue. Assign a different queue if you want delegate methods and handlers to be executed on a background queue.
270        ///
271        /// The handler you pass to the ``requestAuthorization(_:)`` method does not use this queue.
272        #[unsafe(method(queue))]
273        #[unsafe(method_family = none)]
274        pub unsafe fn queue(&self) -> Retained<NSOperationQueue>;
275
276        /// Setter for [`queue`][Self::queue].
277        ///
278        /// # Safety
279        ///
280        /// `queue` possibly has additional threading requirements.
281        #[unsafe(method(setQueue:))]
282        #[unsafe(method_family = none)]
283        pub unsafe fn setQueue(&self, queue: &NSOperationQueue);
284    );
285}
286
287/// Methods declared on superclass `NSObject`.
288impl SFSpeechRecognizer {
289    extern_methods!(
290        #[unsafe(method(new))]
291        #[unsafe(method_family = new)]
292        pub unsafe fn new() -> Retained<Self>;
293    );
294}
295
296extern_protocol!(
297    /// A protocol that you adopt in your objects to track the availability of a speech recognizer.
298    ///
299    /// A speech recognizer's availability can change due to the device's Internet connection or other factors. Use this protocol's optional method to track those changes and provide an appropriate response. For example, when speech recognition becomes unavailable, you might disable related features in your app.
300    ///
301    /// See also [Apple's documentation](https://developer.apple.com/documentation/speech/sfspeechrecognizerdelegate?language=objc)
302    pub unsafe trait SFSpeechRecognizerDelegate: NSObjectProtocol {
303        /// Tells the delegate that the availability of its associated speech recognizer changed.
304        ///
305        /// - Parameters:
306        /// - speechRecognizer: The ``SFSpeechRecognizer`` object whose availability changed.
307        /// - available: A Boolean value that indicates the new availability of the speech recognizer.
308        #[optional]
309        #[unsafe(method(speechRecognizer:availabilityDidChange:))]
310        #[unsafe(method_family = none)]
311        unsafe fn speechRecognizer_availabilityDidChange(
312            &self,
313            speech_recognizer: &SFSpeechRecognizer,
314            available: bool,
315        );
316    }
317);