1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
//! This file has been automatically generated by `objc2`'s `header-translator`.
//! DO NOT EDIT
use core::ffi::*;
use core::ptr::NonNull;
use objc2::__framework_prelude::*;
#[cfg(feature = "objc2-avf-audio")]
use objc2_avf_audio::*;
#[cfg(feature = "objc2-core-media")]
use objc2_core_media::*;
use objc2_foundation::*;
use crate::*;
extern_class!(
/// An abstract class that represents a request to recognize speech from an audio source.
///
/// Don't create ``SFSpeechRecognitionRequest`` objects directly. Create an ``SFSpeechURLRecognitionRequest`` or ``SFSpeechAudioBufferRecognitionRequest`` object instead. Use the properties of this class to configure various aspects of your request object before you start the speech recognition process. For example, use the ``shouldReportPartialResults`` property to specify whether you want partial results or only the final result of speech recognition.
///
/// See also [Apple's documentation](https://developer.apple.com/documentation/speech/sfspeechrecognitionrequest?language=objc)
#[unsafe(super(NSObject))]
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct SFSpeechRecognitionRequest;
);
extern_conformance!(
unsafe impl NSObjectProtocol for SFSpeechRecognitionRequest {}
);
impl SFSpeechRecognitionRequest {
extern_methods!(
#[cfg(feature = "SFSpeechRecognitionTaskHint")]
/// A value that indicates the type of speech recognition being performed.
///
/// The default value of this property is ``SFSpeechRecognitionTaskHint/unspecified``. For a valid list of values, see ``SFSpeechRecognitionTaskHint``.
#[unsafe(method(taskHint))]
#[unsafe(method_family = none)]
pub unsafe fn taskHint(&self) -> SFSpeechRecognitionTaskHint;
#[cfg(feature = "SFSpeechRecognitionTaskHint")]
/// Setter for [`taskHint`][Self::taskHint].
#[unsafe(method(setTaskHint:))]
#[unsafe(method_family = none)]
pub unsafe fn setTaskHint(&self, task_hint: SFSpeechRecognitionTaskHint);
/// A Boolean value that indicates whether you want intermediate results returned for each utterance.
///
/// The default value of this property is `true`. If you want only final results (and you don't care about intermediate results), set this property to `false` to prevent the system from doing extra work.
#[unsafe(method(shouldReportPartialResults))]
#[unsafe(method_family = none)]
pub unsafe fn shouldReportPartialResults(&self) -> bool;
/// Setter for [`shouldReportPartialResults`][Self::shouldReportPartialResults].
#[unsafe(method(setShouldReportPartialResults:))]
#[unsafe(method_family = none)]
pub unsafe fn setShouldReportPartialResults(&self, should_report_partial_results: bool);
/// An array of phrases that should be recognized, even if they are not in the system vocabulary.
///
/// Use this property to specify short custom phrases that are unique to your app. You might include phrases with the names of characters, products, or places that are specific to your app. You might also include domain-specific terminology or unusual or made-up words. Assigning custom phrases to this property improves the likelihood of those phrases being recognized.
///
/// Keep phrases relatively brief, limiting them to one or two words whenever possible. Lengthy phrases are less likely to be recognized. In addition, try to limit each phrase to something the user can say without pausing.
///
/// Limit the total number of phrases to no more than 100.
#[unsafe(method(contextualStrings))]
#[unsafe(method_family = none)]
pub unsafe fn contextualStrings(&self) -> Retained<NSArray<NSString>>;
/// Setter for [`contextualStrings`][Self::contextualStrings].
///
/// This is [copied][objc2_foundation::NSCopying::copy] when set.
#[unsafe(method(setContextualStrings:))]
#[unsafe(method_family = none)]
pub unsafe fn setContextualStrings(&self, contextual_strings: &NSArray<NSString>);
/// An identifier string that you use to describe the type of interaction associated with the speech recognition request.
///
/// If different parts of your app have different speech recognition needs, you can use this property to identify the part of your app that is making each request. For example, if one part of your app lets users speak phone numbers and another part lets users speak street addresses, consistently identifying the part of the app that makes a recognition request may help improve the accuracy of the results.
#[deprecated = "Not used anymore"]
#[unsafe(method(interactionIdentifier))]
#[unsafe(method_family = none)]
pub unsafe fn interactionIdentifier(&self) -> Option<Retained<NSString>>;
/// Setter for [`interactionIdentifier`][Self::interactionIdentifier].
///
/// This is [copied][objc2_foundation::NSCopying::copy] when set.
#[deprecated = "Not used anymore"]
#[unsafe(method(setInteractionIdentifier:))]
#[unsafe(method_family = none)]
pub unsafe fn setInteractionIdentifier(&self, interaction_identifier: Option<&NSString>);
/// A Boolean value that determines whether a request must keep its audio data on the device.
///
/// Set this property to `true` to prevent an ``SFSpeechRecognitionRequest`` from sending audio over the network. However, on-device requests won't be as accurate.
///
/// > Note:
/// > The request only honors this setting if the ``SFSpeechRecognizer/supportsOnDeviceRecognition`` (``SFSpeechRecognizer``) property is also `true`.
#[unsafe(method(requiresOnDeviceRecognition))]
#[unsafe(method_family = none)]
pub unsafe fn requiresOnDeviceRecognition(&self) -> bool;
/// Setter for [`requiresOnDeviceRecognition`][Self::requiresOnDeviceRecognition].
#[unsafe(method(setRequiresOnDeviceRecognition:))]
#[unsafe(method_family = none)]
pub unsafe fn setRequiresOnDeviceRecognition(&self, requires_on_device_recognition: bool);
/// A Boolean value that indicates whether to add punctuation to speech recognition results.
///
/// Set this property to `true` for the speech framework to automatically include punctuation in the recognition results. Punctuation includes a period or question mark at the end of a sentence, and a comma within a sentence.
#[unsafe(method(addsPunctuation))]
#[unsafe(method_family = none)]
pub unsafe fn addsPunctuation(&self) -> bool;
/// Setter for [`addsPunctuation`][Self::addsPunctuation].
#[unsafe(method(setAddsPunctuation:))]
#[unsafe(method_family = none)]
pub unsafe fn setAddsPunctuation(&self, adds_punctuation: bool);
#[cfg(feature = "SFSpeechLanguageModel")]
#[unsafe(method(customizedLanguageModel))]
#[unsafe(method_family = none)]
pub unsafe fn customizedLanguageModel(
&self,
) -> Option<Retained<SFSpeechLanguageModelConfiguration>>;
#[cfg(feature = "SFSpeechLanguageModel")]
/// Setter for [`customizedLanguageModel`][Self::customizedLanguageModel].
///
/// This is [copied][objc2_foundation::NSCopying::copy] when set.
#[unsafe(method(setCustomizedLanguageModel:))]
#[unsafe(method_family = none)]
pub unsafe fn setCustomizedLanguageModel(
&self,
customized_language_model: Option<&SFSpeechLanguageModelConfiguration>,
);
);
}
/// Methods declared on superclass `NSObject`.
impl SFSpeechRecognitionRequest {
extern_methods!(
#[unsafe(method(init))]
#[unsafe(method_family = init)]
pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
#[unsafe(method(new))]
#[unsafe(method_family = new)]
pub unsafe fn new() -> Retained<Self>;
);
}
extern_class!(
/// A request to recognize speech in a recorded audio file.
///
/// Use this object to perform speech recognition on the contents of an audio file.
///
/// The following example shows a method that performs recognition on an audio file based on the user's default language and prints out the transcription.
///
/// Listing 1. Getting a speech recognizer and making a recognition request
///
/// ```swift
/// func recognizeFile(url: URL) {
/// // Create a speech recognizer associated with the user's default language.
/// guard let myRecognizer = SFSpeechRecognizer() else {
/// // The system doesn't support the user's default language.
/// return
/// }
///
/// guard myRecognizer.isAvailable else {
/// // The recognizer isn't available.
/// return
/// }
///
/// // Create and execute a speech recognition request for the audio file at the URL.
/// let request = SFSpeechURLRecognitionRequest(url: url)
/// myRecognizer.recognitionTask(with: request) { (result, error) in
/// guard let result else {
/// // Recognition failed, so check the error for details and handle it.
/// return
/// }
///
/// // Print the speech transcription with the highest confidence that the
/// // system recognized.
/// if result.isFinal {
/// print(result.bestTranscription.formattedString)
/// }
/// }
/// }
/// ```
///
/// See also [Apple's documentation](https://developer.apple.com/documentation/speech/sfspeechurlrecognitionrequest?language=objc)
#[unsafe(super(SFSpeechRecognitionRequest, NSObject))]
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct SFSpeechURLRecognitionRequest;
);
extern_conformance!(
unsafe impl NSObjectProtocol for SFSpeechURLRecognitionRequest {}
);
impl SFSpeechURLRecognitionRequest {
extern_methods!(
#[unsafe(method(init))]
#[unsafe(method_family = init)]
pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
/// Creates a speech recognition request, initialized with the specified URL.
///
/// Use this method to create a request to recognize speech in a recorded audio file that resides at the specified URL. Pass the request to the recognizer's ``SFSpeechRecognizer/recognitionTask(with:delegate:)`` method to start recognition.
#[unsafe(method(initWithURL:))]
#[unsafe(method_family = init)]
pub unsafe fn initWithURL(this: Allocated<Self>, url: &NSURL) -> Retained<Self>;
/// The URL of the audio file.
#[unsafe(method(URL))]
#[unsafe(method_family = none)]
pub unsafe fn URL(&self) -> Retained<NSURL>;
);
}
/// Methods declared on superclass `NSObject`.
impl SFSpeechURLRecognitionRequest {
extern_methods!(
#[unsafe(method(new))]
#[unsafe(method_family = new)]
pub unsafe fn new() -> Retained<Self>;
);
}
extern_class!(
/// A request to recognize speech from captured audio content, such as audio from the device's microphone.
///
/// Use an ``SFSpeechAudioBufferRecognitionRequest`` object to perform speech recognition on live audio, or on a set of existing audio buffers. For example, use this request object to route audio from a device's microphone to the speech recognizer.
///
/// The request object contains no audio initially. As you capture audio, call ``append(_:)`` or ``appendAudioSampleBuffer(_:)`` to add audio samples to the request object. The speech recognizer continuously analyzes the audio you appended, stopping only when you call the ``endAudio()`` method. You must call ``endAudio()`` explicitly to stop the speech recognition process.
///
/// For a complete example of how to use audio buffers with speech recognition, see [SpeakToMe: Using Speech Recognition with AVAudioEngine](https://developer.apple.com/library/archive/samplecode/SpeakToMe/Introduction/Intro.html#//apple_ref/doc/uid/TP40017110).
///
/// See also [Apple's documentation](https://developer.apple.com/documentation/speech/sfspeechaudiobufferrecognitionrequest?language=objc)
#[unsafe(super(SFSpeechRecognitionRequest, NSObject))]
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct SFSpeechAudioBufferRecognitionRequest;
);
extern_conformance!(
unsafe impl NSObjectProtocol for SFSpeechAudioBufferRecognitionRequest {}
);
impl SFSpeechAudioBufferRecognitionRequest {
extern_methods!(
#[cfg(feature = "objc2-avf-audio")]
/// The preferred audio format for optimal speech recognition.
///
/// Use the audio format in this property as a hint for optimal recording, but don't depend on the value remaining unchanged.
#[unsafe(method(nativeAudioFormat))]
#[unsafe(method_family = none)]
pub unsafe fn nativeAudioFormat(&self) -> Retained<AVAudioFormat>;
#[cfg(feature = "objc2-avf-audio")]
/// Appends audio in the PCM format to the end of the recognition request.
///
/// The audio must be in a native format and uncompressed.
///
/// - Parameters:
/// - audioPCMBuffer: An audio buffer that contains audio in the PCM format.
#[unsafe(method(appendAudioPCMBuffer:))]
#[unsafe(method_family = none)]
pub unsafe fn appendAudioPCMBuffer(&self, audio_pcm_buffer: &AVAudioPCMBuffer);
#[cfg(feature = "objc2-core-media")]
/// Appends audio to the end of the recognition request.
///
/// The audio must be in a native format.
///
/// - Parameters:
/// - sampleBuffer: A buffer of audio.
#[unsafe(method(appendAudioSampleBuffer:))]
#[unsafe(method_family = none)]
pub unsafe fn appendAudioSampleBuffer(&self, sample_buffer: &CMSampleBuffer);
/// Marks the end of audio input for the recognition request.
///
/// Call this method explicitly to let the speech recognizer know that no more audio input is coming.
#[unsafe(method(endAudio))]
#[unsafe(method_family = none)]
pub unsafe fn endAudio(&self);
);
}
/// Methods declared on superclass `NSObject`.
impl SFSpeechAudioBufferRecognitionRequest {
extern_methods!(
#[unsafe(method(init))]
#[unsafe(method_family = init)]
pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
#[unsafe(method(new))]
#[unsafe(method_family = new)]
pub unsafe fn new() -> Retained<Self>;
);
}