1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
//! This file has been automatically generated by `objc2`'s `header-translator`.
//! DO NOT EDIT
use core::ffi::*;
use core::ptr::NonNull;
use objc2::__framework_prelude::*;
use objc2_foundation::*;
use crate::*;
extern_class!(
/// The MLCAdamWOptimizer specifies the AdamW optimizer.
///
/// See also [Apple's documentation](https://developer.apple.com/documentation/mlcompute/mlcadamwoptimizer?language=objc)
#[unsafe(super(MLCOptimizer, NSObject))]
#[derive(Debug, PartialEq, Eq, Hash)]
#[cfg(feature = "MLCOptimizer")]
#[deprecated]
pub struct MLCAdamWOptimizer;
);
#[cfg(feature = "MLCOptimizer")]
extern_conformance!(
unsafe impl NSCopying for MLCAdamWOptimizer {}
);
#[cfg(feature = "MLCOptimizer")]
unsafe impl CopyingHelper for MLCAdamWOptimizer {
type Result = Self;
}
#[cfg(feature = "MLCOptimizer")]
extern_conformance!(
unsafe impl NSObjectProtocol for MLCAdamWOptimizer {}
);
#[cfg(feature = "MLCOptimizer")]
impl MLCAdamWOptimizer {
extern_methods!(
/// Coefficent used for computing running averages of gradient.
///
/// The default is 0.9.
#[deprecated]
#[unsafe(method(beta1))]
#[unsafe(method_family = none)]
pub unsafe fn beta1(&self) -> c_float;
/// Coefficent used for computing running averages of square of gradient.
///
/// The default is 0.999.
#[deprecated]
#[unsafe(method(beta2))]
#[unsafe(method_family = none)]
pub unsafe fn beta2(&self) -> c_float;
/// A term added to improve numerical stability.
///
/// The default is 1e-8.
#[deprecated]
#[unsafe(method(epsilon))]
#[unsafe(method_family = none)]
pub unsafe fn epsilon(&self) -> c_float;
/// Whether to use the AMSGrad variant of this algorithm
///
/// The default is false
#[deprecated]
#[unsafe(method(usesAMSGrad))]
#[unsafe(method_family = none)]
pub unsafe fn usesAMSGrad(&self) -> bool;
/// The current timestep used for the update.
///
/// The default is 1.
#[deprecated]
#[unsafe(method(timeStep))]
#[unsafe(method_family = none)]
pub unsafe fn timeStep(&self) -> NSUInteger;
#[cfg(feature = "MLCOptimizerDescriptor")]
/// Create an MLCAdamWOptimizer object with defaults
///
/// Returns: A new MLCAdamWOptimizer object.
#[deprecated]
#[unsafe(method(optimizerWithDescriptor:))]
#[unsafe(method_family = none)]
pub unsafe fn optimizerWithDescriptor(
optimizer_descriptor: &MLCOptimizerDescriptor,
) -> Retained<Self>;
#[cfg(feature = "MLCOptimizerDescriptor")]
/// Create an MLCAdamWOptimizer object
///
/// Parameter `optimizerDescriptor`: The optimizer descriptor object
///
/// Parameter `beta1`: The beta1 value
///
/// Parameter `beta2`: The beta2 value
///
/// Parameter `epsilon`: The epsilon value to use to improve numerical stability
///
/// Parameter `usesAMSGrad`: Whether to use the AMSGrad variant of this algorithm from the paper (https://arxiv.org/abs/1904.09237)
///
/// Parameter `timeStep`: The initial timestep to use for the update
///
/// Returns: A new MLCAdamWOptimizer object.
#[deprecated]
#[unsafe(method(optimizerWithDescriptor:beta1:beta2:epsilon:usesAMSGrad:timeStep:))]
#[unsafe(method_family = none)]
pub unsafe fn optimizerWithDescriptor_beta1_beta2_epsilon_usesAMSGrad_timeStep(
optimizer_descriptor: &MLCOptimizerDescriptor,
beta1: c_float,
beta2: c_float,
epsilon: c_float,
uses_ams_grad: bool,
time_step: NSUInteger,
) -> Retained<Self>;
);
}
/// Methods declared on superclass `MLCOptimizer`.
#[cfg(feature = "MLCOptimizer")]
impl MLCAdamWOptimizer {
extern_methods!(
#[deprecated]
#[unsafe(method(new))]
#[unsafe(method_family = new)]
pub unsafe fn new() -> Retained<Self>;
#[deprecated]
#[unsafe(method(init))]
#[unsafe(method_family = init)]
pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
);
}