objc2_sound_analysis/generated/
SNClassifySoundRequest.rs

1//! This file has been automatically generated by `objc2`'s `header-translator`.
2//! DO NOT EDIT
3use core::ffi::*;
4use core::ptr::NonNull;
5use objc2::__framework_prelude::*;
6#[cfg(feature = "objc2-core-media")]
7use objc2_core_media::*;
8#[cfg(feature = "objc2-core-ml")]
9use objc2_core_ml::*;
10use objc2_foundation::*;
11
12use crate::*;
13
14extern_class!(
15    /// Configure an analyzer to perform sound classification using the provided MLModel.
16    ///
17    /// When a new classification result is produced, the results observer will be called with an SNClassificationResult. Audio buffers provided to SNAudioStreamAnalyzer may vary in size, and the analyzer will reblock the audio data to the block size expected by the MLModel. By default, analysis will occur on the first audio channel in the audio stream, and the analyzer will apply sample rate conversion if the provided audio does not match the sample rate required by the MLModel.
18    ///
19    /// See also [Apple's documentation](https://developer.apple.com/documentation/soundanalysis/snclassifysoundrequest?language=objc)
20    #[unsafe(super(NSObject))]
21    #[derive(Debug, PartialEq, Eq, Hash)]
22    pub struct SNClassifySoundRequest;
23);
24
25extern_conformance!(
26    unsafe impl NSObjectProtocol for SNClassifySoundRequest {}
27);
28
29#[cfg(feature = "SNRequest")]
30extern_conformance!(
31    unsafe impl SNRequest for SNClassifySoundRequest {}
32);
33
34impl SNClassifySoundRequest {
35    extern_methods!(
36        /// The overlap factor of the windows of audio data provided to the classifier, if the model operates on fixed audio block sizes.
37        ///
38        /// When performing audio analysis on fixed audio block sizes, it is common for the analysis windows to overlap by some factor. Without overlapping the analysis windows (when the overlap factor is 0.0), a sound might be split across two analysis windows, which could negatively affect classification performance. Overlapping the analysis windows by 50% ensures each sound will fall near the center of at least one analysis window. The supported range is [0.0, 1.0), and the default value is 0.5. Increasing the overlap factor increases computational complexity, so values greater than 0.5 should be used with care.
39        #[unsafe(method(overlapFactor))]
40        #[unsafe(method_family = none)]
41        pub unsafe fn overlapFactor(&self) -> c_double;
42
43        /// Setter for [`overlapFactor`][Self::overlapFactor].
44        #[unsafe(method(setOverlapFactor:))]
45        #[unsafe(method_family = none)]
46        pub unsafe fn setOverlapFactor(&self, overlap_factor: c_double);
47
48        #[cfg(feature = "objc2-core-media")]
49        /// The duration of a single analysis window.
50        ///
51        /// When performing classification over an audio stream, a classifier computes each classification result based on a single 'analysis window' of audio. Analysis windows are uniformly-sized time intervals, where the size of any given window is considered that window's 'duration'. Some classifiers can operate over analysis windows which conform to one of several different duration options. Larger window durations allow classification to execute less frequently over larger contexts of audio, potentially improving classification performance. Smaller window durations allow classification to execute more frequently over smaller contexts of audio, producing results with sharper time resolution. Depending on the use-case, a larger or smaller window may be preferable. When configuring the window duration, it is important to respect the capabilities of the classifier. A classifier's supported window durations can be discovered using the `windowDurationConstraint` property. If an unsupported window duration is selected, the window duration will be automatically rounded down to the nearest supported value if possible, else rounded up.
52        #[unsafe(method(windowDuration))]
53        #[unsafe(method_family = none)]
54        pub unsafe fn windowDuration(&self) -> CMTime;
55
56        #[cfg(feature = "objc2-core-media")]
57        /// Setter for [`windowDuration`][Self::windowDuration].
58        #[unsafe(method(setWindowDuration:))]
59        #[unsafe(method_family = none)]
60        pub unsafe fn setWindowDuration(&self, window_duration: CMTime);
61
62        #[cfg(feature = "SNTimeDurationConstraint")]
63        /// The constraints governing permitted analysis window durations.
64        ///
65        /// The analysis window duration is controlled using the `windowDuration` property. If an analysis window duration is selected which does not meet the necessary constraints, it will automatically be adjusted to meet these constraints (see `windowDuration` for more information regarding how this adjustment will be applied).
66        #[unsafe(method(windowDurationConstraint))]
67        #[unsafe(method_family = none)]
68        pub unsafe fn windowDurationConstraint(&self) -> Retained<SNTimeDurationConstraint>;
69
70        /// Lists all labels that can be produced by this request.
71        ///
72        /// - Returns: An array of strings containing all sound identifiers which can be produced by this request.
73        #[unsafe(method(knownClassifications))]
74        #[unsafe(method_family = none)]
75        pub unsafe fn knownClassifications(&self) -> Retained<NSArray<NSString>>;
76
77        #[cfg(feature = "objc2-core-ml")]
78        /// Initializes a sound classification request with the provided MLModel
79        ///
80        /// - Parameter mlModel: The CoreML audio classification model to be used with this request
81        ///
82        /// The provided model must accept audio data as input, and output a classification dictionary containing the probability of each category.
83        #[unsafe(method(initWithMLModel:error:_))]
84        #[unsafe(method_family = init)]
85        pub unsafe fn initWithMLModel_error(
86            this: Allocated<Self>,
87            ml_model: &MLModel,
88        ) -> Result<Retained<Self>, Retained<NSError>>;
89
90        #[cfg(feature = "SNTypes")]
91        /// Initializes a sound classification request with a known classifier.
92        ///
93        /// - Parameters:
94        ///
95        /// - classifierIdentifier: An identifier identifying the particular classifier to use for labeling sounds.
96        ///
97        /// - error: An output parameter which, in the case of an error, will be populated with details about that error. Upon success, the contents of this output parameter are undefined. Please use the return value of this method to determine whether or not an error occurred before using the value assigned to this output parameter.
98        ///
99        /// - Returns Upon failure, `nil`; upon success, an `SNClassifySoundRequest` instance which can be added to an analyzer to classify sounds using a recognized classifier.
100        ///
101        /// This initializer may be used to classify sounds using Apple-provided sound classifiers. Note that Apple may add new classifiers in the future, but it commits to ensuring the consistent performance of existing classifiers.
102        #[unsafe(method(initWithClassifierIdentifier:error:_))]
103        #[unsafe(method_family = init)]
104        pub unsafe fn initWithClassifierIdentifier_error(
105            this: Allocated<Self>,
106            classifier_identifier: &SNClassifierIdentifier,
107        ) -> Result<Retained<Self>, Retained<NSError>>;
108
109        #[unsafe(method(init))]
110        #[unsafe(method_family = init)]
111        pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
112
113        #[unsafe(method(new))]
114        #[unsafe(method_family = new)]
115        pub unsafe fn new() -> Retained<Self>;
116    );
117}