objc2-ml-compute 0.3.2

Bindings to the MLCompute framework
Documentation
//! This file has been automatically generated by `objc2`'s `header-translator`.
//! DO NOT EDIT
use core::ptr::NonNull;
use objc2::__framework_prelude::*;
use objc2_foundation::*;

use crate::*;

extern_class!(
    /// A multi-head attention layer
    ///
    /// A multi-head "Scaled Dot-Product Attention" layer which attends to one or more entries in the input key-value pairs
    /// N=Batch, S=source length, L=target length, E = model(embedding) dimension, K = Key dimension, V = value
    /// dimension H = headCount. The sources to this layer are of shapes: Query:(N,L,E), Key:(N,S,K), Value:(N,S,V),
    /// KeyMask:(N,S), AttentionMask:(1,L,S) or (NxH,L,S). KeyMask and AttentionMask are optional and either, both
    /// or none of them can be passed. KeyMask is of Boolean type and AttentionMask can be of Float or Boolean type.
    /// Output is of shape:(N,L,E).
    /// For details refer to: https://pytorch.org/docs/stable/nn.html#multiheadattention
    ///
    /// See also [Apple's documentation](https://developer.apple.com/documentation/mlcompute/mlcmultiheadattentionlayer?language=objc)
    #[unsafe(super(MLCLayer, NSObject))]
    #[derive(Debug, PartialEq, Eq, Hash)]
    #[cfg(feature = "MLCLayer")]
    #[deprecated]
    pub struct MLCMultiheadAttentionLayer;
);

#[cfg(feature = "MLCLayer")]
extern_conformance!(
    unsafe impl NSObjectProtocol for MLCMultiheadAttentionLayer {}
);

#[cfg(feature = "MLCLayer")]
impl MLCMultiheadAttentionLayer {
    extern_methods!(
        #[cfg(feature = "MLCMultiheadAttentionDescriptor")]
        /// The multi-head attention descriptor
        #[deprecated]
        #[unsafe(method(descriptor))]
        #[unsafe(method_family = none)]
        pub unsafe fn descriptor(&self) -> Retained<MLCMultiheadAttentionDescriptor>;

        #[cfg(feature = "MLCTensor")]
        /// The weights of query, key, value and output projections
        #[deprecated]
        #[unsafe(method(weights))]
        #[unsafe(method_family = none)]
        pub unsafe fn weights(&self) -> Retained<NSArray<MLCTensor>>;

        #[cfg(feature = "MLCTensor")]
        /// The biases of query, key, value and output projections
        #[deprecated]
        #[unsafe(method(biases))]
        #[unsafe(method_family = none)]
        pub unsafe fn biases(&self) -> Option<Retained<NSArray<MLCTensor>>>;

        #[cfg(feature = "MLCTensor")]
        /// The biases added to key and value
        #[deprecated]
        #[unsafe(method(attentionBiases))]
        #[unsafe(method_family = none)]
        pub unsafe fn attentionBiases(&self) -> Option<Retained<NSArray<MLCTensor>>>;

        #[cfg(feature = "MLCTensorParameter")]
        /// The weights tensor parameters used for optimizer update
        #[deprecated]
        #[unsafe(method(weightsParameters))]
        #[unsafe(method_family = none)]
        pub unsafe fn weightsParameters(&self) -> Retained<NSArray<MLCTensorParameter>>;

        #[cfg(feature = "MLCTensorParameter")]
        /// The biases tensor parameters used for optimizer update
        #[deprecated]
        #[unsafe(method(biasesParameters))]
        #[unsafe(method_family = none)]
        pub unsafe fn biasesParameters(&self) -> Option<Retained<NSArray<MLCTensorParameter>>>;

        #[cfg(all(feature = "MLCMultiheadAttentionDescriptor", feature = "MLCTensor"))]
        /// Create a multi-head attention layer
        ///
        /// Parameter `weights`: weights corresponding to query, key, value and output projections for all heads
        ///
        /// Parameter `biases`: Optional, biases corresponding to query, key, value and output projections for all heads
        ///
        /// Parameter `attentionBiases`: Optional, An array of biases added to the key and value respectively
        ///
        /// Returns: A new MultiheadAttention layer
        #[deprecated]
        #[unsafe(method(layerWithDescriptor:weights:biases:attentionBiases:))]
        #[unsafe(method_family = none)]
        pub unsafe fn layerWithDescriptor_weights_biases_attentionBiases(
            descriptor: &MLCMultiheadAttentionDescriptor,
            weights: &NSArray<MLCTensor>,
            biases: Option<&NSArray<MLCTensor>>,
            attention_biases: Option<&NSArray<MLCTensor>>,
        ) -> Option<Retained<Self>>;
    );
}

/// Methods declared on superclass `MLCLayer`.
#[cfg(feature = "MLCLayer")]
impl MLCMultiheadAttentionLayer {
    extern_methods!(
        #[deprecated]
        #[unsafe(method(new))]
        #[unsafe(method_family = new)]
        pub unsafe fn new() -> Retained<Self>;

        #[deprecated]
        #[unsafe(method(init))]
        #[unsafe(method_family = init)]
        pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
    );
}