objc2_ml_compute/generated/MLCTrainingGraph.rs
1//! This file has been automatically generated by `objc2`'s `header-translator`.
2//! DO NOT EDIT
3use core::ffi::*;
4use core::ptr::NonNull;
5use objc2::__framework_prelude::*;
6use objc2_foundation::*;
7
8use crate::*;
9
10extern_class!(
11 /// A training graph created from one or more MLCGraph objects
12 /// plus additional layers added directly to the training graph.
13 ///
14 /// See also [Apple's documentation](https://developer.apple.com/documentation/mlcompute/mlctraininggraph?language=objc)
15 #[unsafe(super(MLCGraph, NSObject))]
16 #[derive(Debug, PartialEq, Eq, Hash)]
17 #[cfg(feature = "MLCGraph")]
18 #[deprecated]
19 pub struct MLCTrainingGraph;
20);
21
22#[cfg(feature = "MLCGraph")]
23extern_conformance!(
24 unsafe impl NSObjectProtocol for MLCTrainingGraph {}
25);
26
27#[cfg(feature = "MLCGraph")]
28impl MLCTrainingGraph {
29 extern_methods!(
30 #[cfg(feature = "MLCOptimizer")]
31 /// The optimizer to be used with the training graph
32 #[deprecated]
33 #[unsafe(method(optimizer))]
34 #[unsafe(method_family = none)]
35 pub unsafe fn optimizer(&self) -> Option<Retained<MLCOptimizer>>;
36
37 /// Returns the total size in bytes of device memory used for all intermediate tensors
38 /// for forward, gradient passes and optimizer update for all layers in the training graph.
39 /// We recommend executing an iteration before checking the device memory size as
40 /// the buffers needed get allocated when the corresponding pass such as gradient,
41 /// optimizer update is executed.
42 ///
43 /// Returns: A NSUInteger value
44 #[deprecated]
45 #[unsafe(method(deviceMemorySize))]
46 #[unsafe(method_family = none)]
47 pub unsafe fn deviceMemorySize(&self) -> NSUInteger;
48
49 #[cfg(all(feature = "MLCLayer", feature = "MLCOptimizer"))]
50 /// Create a training graph
51 ///
52 /// Parameter `graphObjects`: The layers from these graph objects will be added to the training graph
53 ///
54 /// Parameter `lossLayer`: The loss layer to use. The loss layer can also be added to the training graph
55 /// using nodeWithLayer:sources:lossLabels
56 ///
57 /// Parameter `optimizer`: The optimizer to use
58 ///
59 /// Returns: A new training graph object
60 #[deprecated]
61 #[unsafe(method(graphWithGraphObjects:lossLayer:optimizer:))]
62 #[unsafe(method_family = none)]
63 pub unsafe fn graphWithGraphObjects_lossLayer_optimizer(
64 graph_objects: &NSArray<MLCGraph>,
65 loss_layer: Option<&MLCLayer>,
66 optimizer: Option<&MLCOptimizer>,
67 ) -> Retained<Self>;
68
69 #[cfg(feature = "MLCTensor")]
70 /// Add the list of inputs to the training graph
71 ///
72 /// Parameter `inputs`: The inputs
73 ///
74 /// Parameter `lossLabels`: The loss label inputs
75 ///
76 /// Returns: A boolean indicating success or failure
77 #[deprecated]
78 #[unsafe(method(addInputs:lossLabels:))]
79 #[unsafe(method_family = none)]
80 pub unsafe fn addInputs_lossLabels(
81 &self,
82 inputs: &NSDictionary<NSString, MLCTensor>,
83 loss_labels: Option<&NSDictionary<NSString, MLCTensor>>,
84 ) -> bool;
85
86 #[cfg(feature = "MLCTensor")]
87 /// Add the list of inputs to the training graph
88 ///
89 /// Each input, loss label or label weights tensor is identified by a NSString.
90 /// When the training graph is executed, this NSString is used to identify which data object
91 /// should be as input data for each tensor whose device memory needs to be updated
92 /// before the graph is executed.
93 ///
94 /// Parameter `inputs`: The inputs
95 ///
96 /// Parameter `lossLabels`: The loss label inputs
97 ///
98 /// Parameter `lossLabelWeights`: The loss label weights
99 ///
100 /// Returns: A boolean indicating success or failure
101 #[deprecated]
102 #[unsafe(method(addInputs:lossLabels:lossLabelWeights:))]
103 #[unsafe(method_family = none)]
104 pub unsafe fn addInputs_lossLabels_lossLabelWeights(
105 &self,
106 inputs: &NSDictionary<NSString, MLCTensor>,
107 loss_labels: Option<&NSDictionary<NSString, MLCTensor>>,
108 loss_label_weights: Option<&NSDictionary<NSString, MLCTensor>>,
109 ) -> bool;
110
111 #[cfg(feature = "MLCTensor")]
112 /// Add the list of outputs to the training graph
113 ///
114 /// Parameter `outputs`: The outputs
115 ///
116 /// Returns: A boolean indicating success or failure
117 #[deprecated]
118 #[unsafe(method(addOutputs:))]
119 #[unsafe(method_family = none)]
120 pub unsafe fn addOutputs(&self, outputs: &NSDictionary<NSString, MLCTensor>) -> bool;
121
122 #[cfg(feature = "MLCTensor")]
123 /// Add the list of tensors whose contributions are not to be taken when computing gradients during gradient pass
124 ///
125 /// Parameter `tensors`: The list of tensors
126 ///
127 /// Returns: A boolean indicating success or failure
128 #[deprecated]
129 #[unsafe(method(stopGradientForTensors:))]
130 #[unsafe(method_family = none)]
131 pub unsafe fn stopGradientForTensors(&self, tensors: &NSArray<MLCTensor>) -> bool;
132
133 #[cfg(all(feature = "MLCDevice", feature = "MLCTypes"))]
134 /// Compile the training graph for a device.
135 ///
136 /// Parameter `options`: The compiler options to use when compiling the training graph
137 ///
138 /// Parameter `device`: The MLCDevice object
139 ///
140 /// Returns: A boolean indicating success or failure
141 #[deprecated]
142 #[unsafe(method(compileWithOptions:device:))]
143 #[unsafe(method_family = none)]
144 pub unsafe fn compileWithOptions_device(
145 &self,
146 options: MLCGraphCompilationOptions,
147 device: &MLCDevice,
148 ) -> bool;
149
150 #[cfg(all(
151 feature = "MLCDevice",
152 feature = "MLCTensor",
153 feature = "MLCTensorData",
154 feature = "MLCTypes"
155 ))]
156 /// Compile the training graph for a device.
157 ///
158 /// Specifying the list of constant tensors when we compile the graph allows MLCompute to perform additional optimizations at compile time.
159 ///
160 /// Parameter `options`: The compiler options to use when compiling the training graph
161 ///
162 /// Parameter `device`: The MLCDevice object
163 ///
164 /// Parameter `inputTensors`: The list of input tensors that are constants
165 ///
166 /// Parameter `inputTensorsData`: The tensor data to be used with these constant input tensors
167 ///
168 /// Returns: A boolean indicating success or failure
169 #[unsafe(method(compileWithOptions:device:inputTensors:inputTensorsData:))]
170 #[unsafe(method_family = none)]
171 pub unsafe fn compileWithOptions_device_inputTensors_inputTensorsData(
172 &self,
173 options: MLCGraphCompilationOptions,
174 device: &MLCDevice,
175 input_tensors: Option<&NSDictionary<NSString, MLCTensor>>,
176 input_tensors_data: Option<&NSDictionary<NSString, MLCTensorData>>,
177 ) -> bool;
178
179 #[cfg(feature = "MLCOptimizer")]
180 /// Compile the optimizer to be used with a training graph.
181 ///
182 /// Typically the optimizer to be used with a training graph is specifed when the training graph is created using
183 /// graphWithGraphObjects:lossLayer:optimizer. The optimizer will be compiled in when compileWithOptions:device
184 /// is called if an optimizer is specified with the training graph. In the case where the optimizer to be used is not known
185 /// when the graph is created or compiled, this method can be used to associate and compile a training graph with an optimizer.
186 ///
187 /// Parameter `optimizer`: The MLCOptimizer object
188 ///
189 /// Returns: A boolean indicating success or failure
190 #[deprecated]
191 #[unsafe(method(compileOptimizer:))]
192 #[unsafe(method_family = none)]
193 pub unsafe fn compileOptimizer(&self, optimizer: &MLCOptimizer) -> bool;
194
195 /// Link mutiple training graphs
196 ///
197 /// This is used to link subsequent training graphs with first training sub-graph.
198 /// This method should be used when we have tensors shared by one or more layers in multiple sub-graphs
199 ///
200 /// Parameter `graphs`: The list of training graphs to link
201 ///
202 /// Returns: A boolean indicating success or failure
203 #[deprecated]
204 #[unsafe(method(linkWithGraphs:))]
205 #[unsafe(method_family = none)]
206 pub unsafe fn linkWithGraphs(&self, graphs: &NSArray<MLCTrainingGraph>) -> bool;
207
208 #[cfg(feature = "MLCTensor")]
209 /// Get the gradient tensor for an input tensor
210 ///
211 /// Parameter `input`: The input tensor
212 ///
213 /// Returns: The gradient tensor
214 #[deprecated]
215 #[unsafe(method(gradientTensorForInput:))]
216 #[unsafe(method_family = none)]
217 pub unsafe fn gradientTensorForInput(
218 &self,
219 input: &MLCTensor,
220 ) -> Option<Retained<MLCTensor>>;
221
222 #[cfg(all(feature = "MLCLayer", feature = "MLCTensor"))]
223 /// Get the source gradient tensors for a layer in the training graph
224 ///
225 /// Parameter `layer`: A layer in the training graph
226 ///
227 /// Returns: A list of tensors
228 #[deprecated]
229 #[unsafe(method(sourceGradientTensorsForLayer:))]
230 #[unsafe(method_family = none)]
231 pub unsafe fn sourceGradientTensorsForLayer(
232 &self,
233 layer: &MLCLayer,
234 ) -> Retained<NSArray<MLCTensor>>;
235
236 #[cfg(all(feature = "MLCLayer", feature = "MLCTensor"))]
237 /// Get the result gradient tensors for a layer in the training graph
238 ///
239 /// Parameter `layer`: A layer in the training graph
240 ///
241 /// Returns: A list of tensors
242 #[deprecated]
243 #[unsafe(method(resultGradientTensorsForLayer:))]
244 #[unsafe(method_family = none)]
245 pub unsafe fn resultGradientTensorsForLayer(
246 &self,
247 layer: &MLCLayer,
248 ) -> Retained<NSArray<MLCTensor>>;
249
250 #[cfg(all(feature = "MLCLayer", feature = "MLCTensor"))]
251 /// Get the gradient data for a trainable parameter associated with a layer
252 ///
253 /// This can be used to get the gradient data for weights or biases parameters associated with a convolution,
254 /// fully connected or convolution transpose layer
255 ///
256 /// Parameter `parameter`: The updatable parameter associated with the layer
257 ///
258 /// Parameter `layer`: A layer in the training graph. Must be one of the following:
259 /// - MLCConvolutionLayer
260 /// - MLCFullyConnectedLayer
261 /// - MLCBatchNormalizationLayer
262 /// - MLCInstanceNormalizationLayer
263 /// - MLCGroupNormalizationLayer
264 /// - MLCLayerNormalizationLayer
265 /// - MLCEmbeddingLayer
266 /// - MLCMultiheadAttentionLayer
267 ///
268 /// Returns: The gradient data. Will return nil if the layer is marked as not trainable or if
269 /// training graph is not executed with separate calls to forward and gradient passes.
270 #[deprecated]
271 #[unsafe(method(gradientDataForParameter:layer:))]
272 #[unsafe(method_family = none)]
273 pub unsafe fn gradientDataForParameter_layer(
274 &self,
275 parameter: &MLCTensor,
276 layer: &MLCLayer,
277 ) -> Option<Retained<NSData>>;
278
279 #[cfg(feature = "MLCTensor")]
280 /// Allocate an entry for a user specified gradient for a tensor
281 ///
282 /// Parameter `tensor`: A result tensor produced by a layer in the training graph
283 /// that is input to some user specified code and will need to
284 /// provide a user gradient during the gradient pass.
285 ///
286 /// Returns: A gradient tensor
287 #[deprecated]
288 #[unsafe(method(allocateUserGradientForTensor:))]
289 #[unsafe(method_family = none)]
290 pub unsafe fn allocateUserGradientForTensor(
291 &self,
292 tensor: &MLCTensor,
293 ) -> Option<Retained<MLCTensor>>;
294
295 #[cfg(all(
296 feature = "MLCTensor",
297 feature = "MLCTensorData",
298 feature = "MLCTypes",
299 feature = "block2"
300 ))]
301 /// Execute the training graph (forward, gradient and optimizer update) with given source and label data
302 ///
303 /// Execute the training graph with given source and label data. If an optimizer is specified, the optimizer update is applied.
304 /// If MLCExecutionOptionsSynchronous is specified in 'options', this method returns after the graph has been executed.
305 /// Otherwise, this method returns after the graph has been queued for execution. The completion handler is called after the graph
306 /// has finished execution.
307 ///
308 /// Parameter `inputsData`: The data objects to use for inputs
309 ///
310 /// Parameter `lossLabelsData`: The data objects to use for loss labels
311 ///
312 /// Parameter `lossLabelWeightsData`: The data objects to use for loss label weights
313 ///
314 /// Parameter `batchSize`: The batch size to use. For a graph where batch size changes between layers this value must be 0.
315 ///
316 /// Parameter `options`: The execution options
317 ///
318 /// Parameter `completionHandler`: The completion handler
319 ///
320 /// Returns: A boolean indicating success or failure
321 #[deprecated]
322 #[unsafe(method(executeWithInputsData:lossLabelsData:lossLabelWeightsData:batchSize:options:completionHandler:))]
323 #[unsafe(method_family = none)]
324 pub unsafe fn executeWithInputsData_lossLabelsData_lossLabelWeightsData_batchSize_options_completionHandler(
325 &self,
326 inputs_data: &NSDictionary<NSString, MLCTensorData>,
327 loss_labels_data: Option<&NSDictionary<NSString, MLCTensorData>>,
328 loss_label_weights_data: Option<&NSDictionary<NSString, MLCTensorData>>,
329 batch_size: NSUInteger,
330 options: MLCExecutionOptions,
331 completion_handler: MLCGraphCompletionHandler,
332 ) -> bool;
333
334 #[cfg(all(
335 feature = "MLCTensor",
336 feature = "MLCTensorData",
337 feature = "MLCTypes",
338 feature = "block2"
339 ))]
340 /// Execute the training graph (forward, gradient and optimizer update) with given source and label data
341 ///
342 /// Parameter `inputsData`: The data objects to use for inputs
343 ///
344 /// Parameter `lossLabelsData`: The data objects to use for loss labels
345 ///
346 /// Parameter `lossLabelWeightsData`: The data objects to use for loss label weights
347 ///
348 /// Parameter `outputsData`: The data objects to use for outputs
349 ///
350 /// Parameter `batchSize`: The batch size to use. For a graph where batch size changes between layers this value must be 0.
351 ///
352 /// Parameter `options`: The execution options
353 ///
354 /// Parameter `completionHandler`: The completion handler
355 ///
356 /// Returns: A boolean indicating success or failure
357 #[deprecated]
358 #[unsafe(method(executeWithInputsData:lossLabelsData:lossLabelWeightsData:outputsData:batchSize:options:completionHandler:))]
359 #[unsafe(method_family = none)]
360 pub unsafe fn executeWithInputsData_lossLabelsData_lossLabelWeightsData_outputsData_batchSize_options_completionHandler(
361 &self,
362 inputs_data: &NSDictionary<NSString, MLCTensorData>,
363 loss_labels_data: Option<&NSDictionary<NSString, MLCTensorData>>,
364 loss_label_weights_data: Option<&NSDictionary<NSString, MLCTensorData>>,
365 outputs_data: Option<&NSDictionary<NSString, MLCTensorData>>,
366 batch_size: NSUInteger,
367 options: MLCExecutionOptions,
368 completion_handler: MLCGraphCompletionHandler,
369 ) -> bool;
370
371 #[cfg(all(feature = "MLCTensor", feature = "MLCTypes", feature = "block2"))]
372 /// Execute the forward pass of the training graph
373 ///
374 /// Parameter `batchSize`: The batch size to use. For a graph where batch size changes between layers this value must be 0.
375 ///
376 /// Parameter `options`: The execution options
377 ///
378 /// Parameter `completionHandler`: The completion handler
379 ///
380 /// Returns: A boolean indicating success or failure
381 #[deprecated]
382 #[unsafe(method(executeForwardWithBatchSize:options:completionHandler:))]
383 #[unsafe(method_family = none)]
384 pub unsafe fn executeForwardWithBatchSize_options_completionHandler(
385 &self,
386 batch_size: NSUInteger,
387 options: MLCExecutionOptions,
388 completion_handler: MLCGraphCompletionHandler,
389 ) -> bool;
390
391 #[cfg(all(
392 feature = "MLCTensor",
393 feature = "MLCTensorData",
394 feature = "MLCTypes",
395 feature = "block2"
396 ))]
397 /// Execute the forward pass for the training graph
398 ///
399 /// Parameter `batchSize`: The batch size to use. For a graph where batch size changes between layers this value must be 0.
400 ///
401 /// Parameter `options`: The execution options
402 ///
403 /// Parameter `outputsData`: The data objects to use for outputs
404 ///
405 /// Parameter `completionHandler`: The completion handler
406 ///
407 /// Returns: A boolean indicating success or failure
408 #[deprecated]
409 #[unsafe(method(executeForwardWithBatchSize:options:outputsData:completionHandler:))]
410 #[unsafe(method_family = none)]
411 pub unsafe fn executeForwardWithBatchSize_options_outputsData_completionHandler(
412 &self,
413 batch_size: NSUInteger,
414 options: MLCExecutionOptions,
415 outputs_data: Option<&NSDictionary<NSString, MLCTensorData>>,
416 completion_handler: MLCGraphCompletionHandler,
417 ) -> bool;
418
419 #[cfg(all(feature = "MLCTensor", feature = "MLCTypes", feature = "block2"))]
420 /// Execute the gradient pass of the training graph
421 ///
422 /// Parameter `batchSize`: The batch size to use. For a graph where batch size changes between layers this value must be 0.
423 ///
424 /// Parameter `options`: The execution options
425 ///
426 /// Parameter `completionHandler`: The completion handler
427 ///
428 /// Returns: A boolean indicating success or failure
429 #[deprecated]
430 #[unsafe(method(executeGradientWithBatchSize:options:completionHandler:))]
431 #[unsafe(method_family = none)]
432 pub unsafe fn executeGradientWithBatchSize_options_completionHandler(
433 &self,
434 batch_size: NSUInteger,
435 options: MLCExecutionOptions,
436 completion_handler: MLCGraphCompletionHandler,
437 ) -> bool;
438
439 #[cfg(all(
440 feature = "MLCTensor",
441 feature = "MLCTensorData",
442 feature = "MLCTypes",
443 feature = "block2"
444 ))]
445 /// Execute the gradient pass of the training graph
446 ///
447 /// Parameter `batchSize`: The batch size to use. For a graph where batch size changes between layers this value must be 0.
448 ///
449 /// Parameter `options`: The execution options
450 ///
451 /// Parameter `outputsData`: The data objects to use for outputs
452 ///
453 /// Parameter `completionHandler`: The completion handler
454 ///
455 /// Returns: A boolean indicating success or failure
456 #[deprecated]
457 #[unsafe(method(executeGradientWithBatchSize:options:outputsData:completionHandler:))]
458 #[unsafe(method_family = none)]
459 pub unsafe fn executeGradientWithBatchSize_options_outputsData_completionHandler(
460 &self,
461 batch_size: NSUInteger,
462 options: MLCExecutionOptions,
463 outputs_data: Option<&NSDictionary<NSString, MLCTensorData>>,
464 completion_handler: MLCGraphCompletionHandler,
465 ) -> bool;
466
467 #[cfg(all(feature = "MLCTensor", feature = "MLCTypes", feature = "block2"))]
468 /// Execute the optimizer update pass of the training graph
469 ///
470 /// Parameter `options`: The execution options
471 ///
472 /// Parameter `completionHandler`: The completion handler
473 ///
474 /// Returns: A boolean indicating success or failure
475 #[deprecated]
476 #[unsafe(method(executeOptimizerUpdateWithOptions:completionHandler:))]
477 #[unsafe(method_family = none)]
478 pub unsafe fn executeOptimizerUpdateWithOptions_completionHandler(
479 &self,
480 options: MLCExecutionOptions,
481 completion_handler: MLCGraphCompletionHandler,
482 ) -> bool;
483
484 /// Synchronize updates (weights/biases from convolution, fully connected and LSTM layers, tensor parameters)
485 /// from device memory to host memory.
486 #[deprecated]
487 #[unsafe(method(synchronizeUpdates))]
488 #[unsafe(method_family = none)]
489 pub unsafe fn synchronizeUpdates(&self);
490
491 #[cfg(feature = "MLCTensorParameter")]
492 /// Set the input tensor parameters that also will be updated by the optimizer
493 ///
494 /// These represent the list of input tensors to be updated when we execute the optimizer update
495 /// Weights, bias or beta, gamma tensors are not included in this list. MLCompute automatically
496 /// adds them to the parameter list based on whether the layer is marked as updatable or not.
497 ///
498 /// Parameter `parameters`: The list of input tensors to be updated by the optimizer
499 ///
500 /// Returns: A boolean indicating success or failure
501 #[deprecated]
502 #[unsafe(method(setTrainingTensorParameters:))]
503 #[unsafe(method_family = none)]
504 pub unsafe fn setTrainingTensorParameters(
505 &self,
506 parameters: &NSArray<MLCTensorParameter>,
507 ) -> bool;
508
509 #[cfg(all(
510 feature = "MLCTensor",
511 feature = "MLCTensorData",
512 feature = "MLCTensorOptimizerDeviceData"
513 ))]
514 /// Associates the given optimizer data and device data buffers with the tensor.
515 /// Returns true if the data is successfully associated with the tensor and copied to the device.
516 ///
517 /// The caller must guarantee the lifetime of the underlying memory of
518 /// `data`for the entirety of the tensor's
519 /// lifetime. The
520 /// `deviceData`buffers are allocated by MLCompute. This method must be called
521 /// before executeOptimizerUpdateWithOptions or executeWithInputsData is called for the training graph.
522 /// We recommend using this method instead of using [MLCTensor bindOptimizerData] especially if the
523 /// optimizer update is being called multiple times for each batch.
524 ///
525 /// Parameter `data`: The optimizer data to be associated with the tensor
526 ///
527 /// Parameter `deviceData`: The optimizer device data to be associated with the tensor
528 ///
529 /// Parameter `tensor`: The tensor
530 ///
531 /// Returns: A Boolean value indicating whether the data is successfully associated with the tensor .
532 #[deprecated]
533 #[unsafe(method(bindOptimizerData:deviceData:withTensor:))]
534 #[unsafe(method_family = none)]
535 pub unsafe fn bindOptimizerData_deviceData_withTensor(
536 &self,
537 data: &NSArray<MLCTensorData>,
538 device_data: Option<&NSArray<MLCTensorOptimizerDeviceData>>,
539 tensor: &MLCTensor,
540 ) -> bool;
541 );
542}
543
544/// Methods declared on superclass `MLCGraph`.
545#[cfg(feature = "MLCGraph")]
546impl MLCTrainingGraph {
547 extern_methods!(
548 /// Creates a new graph.
549 ///
550 /// Returns: A new graph.
551 #[deprecated]
552 #[unsafe(method(graph))]
553 #[unsafe(method_family = none)]
554 pub unsafe fn graph() -> Retained<Self>;
555 );
556}
557
558/// Methods declared on superclass `NSObject`.
559#[cfg(feature = "MLCGraph")]
560impl MLCTrainingGraph {
561 extern_methods!(
562 #[unsafe(method(init))]
563 #[unsafe(method_family = init)]
564 pub unsafe fn init(this: Allocated<Self>) -> Retained<Self>;
565
566 #[unsafe(method(new))]
567 #[unsafe(method_family = new)]
568 pub unsafe fn new() -> Retained<Self>;
569 );
570}