objc2_metal_performance_shaders_graph/generated/
MPSGraphOptimizerOps.rs

1//! This file has been automatically generated by `objc2`'s `header-translator`.
2//! DO NOT EDIT
3use core::ptr::NonNull;
4use objc2::__framework_prelude::*;
5use objc2_foundation::*;
6
7use crate::*;
8
9/// MPSGraphOptimizerOps.
10#[cfg(all(feature = "MPSGraph", feature = "MPSGraphCore"))]
11impl MPSGraph {
12    extern_methods!(
13        #[cfg(feature = "MPSGraphTensor")]
14        /// The Stochastic gradient descent performs a gradient descent.
15        ///
16        /// `variable = variable - (learningRate * g)`
17        /// where,
18        /// `g` is gradient of error wrt variable
19        ///
20        /// - Parameters:
21        /// - learningRateTensor: scalar tensor which indicates the learning rate to use with the optimizer
22        /// - valuesTensor: values tensor, usually representing the trainable parameters
23        /// - gradientTensor: partial gradient of the trainable parameters with respect to loss
24        /// - name: name for the operation
25        /// - Returns: A valid MPSGraphTensor object.
26        #[unsafe(method(stochasticGradientDescentWithLearningRateTensor:valuesTensor:gradientTensor:name:))]
27        #[unsafe(method_family = none)]
28        pub unsafe fn stochasticGradientDescentWithLearningRateTensor_valuesTensor_gradientTensor_name(
29            &self,
30            learning_rate_tensor: &MPSGraphTensor,
31            values_tensor: &MPSGraphTensor,
32            gradient_tensor: &MPSGraphTensor,
33            name: Option<&NSString>,
34        ) -> Retained<MPSGraphTensor>;
35
36        #[cfg(all(
37            feature = "MPSGraphMemoryOps",
38            feature = "MPSGraphOperation",
39            feature = "MPSGraphTensor"
40        ))]
41        /// The Stochastic gradient descent performs a gradient descent
42        /// `variable = variable - (learningRate * g)`
43        /// where,
44        /// `g` is gradient of error wrt variable
45        /// this op directly writes to the variable
46        ///
47        /// - Parameters:
48        /// - learningRateTensor: scalar tensor which indicates the learning rate to use with the optimizer
49        /// - variable: variable operation with trainable parameters
50        /// - gradientTensor: partial gradient of the trainable parameters with respect to loss
51        /// - name: name for the operation
52        /// - Returns: A valid MPSGraphTensor object.
53        #[unsafe(method(applyStochasticGradientDescentWithLearningRateTensor:variable:gradientTensor:name:))]
54        #[unsafe(method_family = none)]
55        pub unsafe fn applyStochasticGradientDescentWithLearningRateTensor_variable_gradientTensor_name(
56            &self,
57            learning_rate_tensor: &MPSGraphTensor,
58            variable: &MPSGraphVariableOp,
59            gradient_tensor: &MPSGraphTensor,
60            name: Option<&NSString>,
61        ) -> Retained<MPSGraphOperation>;
62
63        #[cfg(feature = "MPSGraphTensor")]
64        /// Creates operations to apply Adam optimization.
65        ///
66        /// The adam update ops are added
67        /// current learning rate:
68        /// ```md
69        /// lr[t] = learningRate * sqrt(1 - beta2^t) / (1 - beta1^t)
70        /// m[t] = beta1 * m[t-1] + (1 - beta1) * g
71        /// v[t] = beta2 * v[t-1] + (1 - beta2) * (g ^ 2)
72        /// maxVel[t] = max(maxVel[t-1], v[t])
73        /// variable = variable - lr[t] * m[t] / (sqrt(maxVel) + epsilon)
74        /// ```
75        /// - Parameters:
76        /// - learningRateTensor: scalar tensor which indicates the learning rate to use with the optimizer
77        /// - beta1Tensor: beta1Tensor
78        /// - beta2Tensor: beta2Tensor
79        /// - beta1PowerTensor: `beta1^t` beta1 power tensor
80        /// - beta2PowerTensor: `beta2^t` beta2 power tensor
81        /// - valuesTensor: values to update with optimization
82        /// - momentumTensor: momentum tensor
83        /// - velocityTensor: velocity tensor
84        /// - maximumVelocityTensor: optional maximum velocity tensor
85        /// - gradientTensor: partial gradient of the trainable parameters with respect to loss
86        /// - name: name for the operation
87        /// - Returns: if maximumVelocity is nil array of 3 tensors (update, newMomentum, newVelocity) else array of 4 tensors (update, newMomentum, newVelocity, newMaximumVelocity)
88        #[unsafe(method(adamWithLearningRateTensor:beta1Tensor:beta2Tensor:epsilonTensor:beta1PowerTensor:beta2PowerTensor:valuesTensor:momentumTensor:velocityTensor:maximumVelocityTensor:gradientTensor:name:))]
89        #[unsafe(method_family = none)]
90        pub unsafe fn adamWithLearningRateTensor_beta1Tensor_beta2Tensor_epsilonTensor_beta1PowerTensor_beta2PowerTensor_valuesTensor_momentumTensor_velocityTensor_maximumVelocityTensor_gradientTensor_name(
91            &self,
92            learning_rate_tensor: &MPSGraphTensor,
93            beta1_tensor: &MPSGraphTensor,
94            beta2_tensor: &MPSGraphTensor,
95            epsilon_tensor: &MPSGraphTensor,
96            beta1_power_tensor: &MPSGraphTensor,
97            beta2_power_tensor: &MPSGraphTensor,
98            values_tensor: &MPSGraphTensor,
99            momentum_tensor: &MPSGraphTensor,
100            velocity_tensor: &MPSGraphTensor,
101            maximum_velocity_tensor: Option<&MPSGraphTensor>,
102            gradient_tensor: &MPSGraphTensor,
103            name: Option<&NSString>,
104        ) -> Retained<NSArray<MPSGraphTensor>>;
105
106        #[cfg(feature = "MPSGraphTensor")]
107        /// Creates operations to apply Adam optimization.
108        ///
109        /// The adam update ops are added
110        /// ```md
111        /// m[t] = beta1m[t-1] + (1 - beta1) * g
112        /// v[t] = beta2v[t-1] + (1 - beta2) * (g ^ 2)
113        /// maxVel[t] = max(maxVel[t-1],v[t])
114        /// variable = variable - lr[t] * m[t] / (sqrt(maxVel) + epsilon)
115        /// ```
116        /// - Parameters:
117        /// - learningRateTensor: scalar tensor which indicates the learning rate to use with the optimizer
118        /// - beta1Tensor: beta1Tensor
119        /// - beta2Tensor: beta2Tensor
120        /// - epsilonTensor: epsilon tensor
121        /// - valuesTensor: values to update with optimization
122        /// - momentumTensor: momentum tensor
123        /// - velocityTensor: velocity tensor
124        /// - maximumVelocityTensor: optional maximum velocity tensor
125        /// - gradientTensor: partial gradient of the trainable parameters with respect to loss
126        /// - name: name for the operation
127        /// - Returns: if maximumVelocity is nil array of 3 tensors (update, newMomentum, newVelocity) else array of 4 tensors (update, newMomentum, newVelocity, newMaximumVelocity)
128        #[unsafe(method(adamWithCurrentLearningRateTensor:beta1Tensor:beta2Tensor:epsilonTensor:valuesTensor:momentumTensor:velocityTensor:maximumVelocityTensor:gradientTensor:name:))]
129        #[unsafe(method_family = none)]
130        pub unsafe fn adamWithCurrentLearningRateTensor_beta1Tensor_beta2Tensor_epsilonTensor_valuesTensor_momentumTensor_velocityTensor_maximumVelocityTensor_gradientTensor_name(
131            &self,
132            current_learning_rate_tensor: &MPSGraphTensor,
133            beta1_tensor: &MPSGraphTensor,
134            beta2_tensor: &MPSGraphTensor,
135            epsilon_tensor: &MPSGraphTensor,
136            values_tensor: &MPSGraphTensor,
137            momentum_tensor: &MPSGraphTensor,
138            velocity_tensor: &MPSGraphTensor,
139            maximum_velocity_tensor: Option<&MPSGraphTensor>,
140            gradient_tensor: &MPSGraphTensor,
141            name: Option<&NSString>,
142        ) -> Retained<NSArray<MPSGraphTensor>>;
143    );
144}