1#![allow(clippy::all)]
3use crate::{Device, Kind, Layout, Scalar, TchError, Tensor};
4use std::borrow::Borrow;
5use std::convert::Into;
6use torch_sys_plus::c_generated::*;
7use torch_sys_plus::*;
8
9fn ptr_list_opt<T: Borrow<Tensor>>(l: &[Option<T>]) -> Vec<*mut C_tensor> {
10 l.iter().map(|x| x.as_ref().map_or(std::ptr::null_mut(), |x| x.borrow().c_tensor)).collect()
11}
12
13fn ptr_list<T: Borrow<Tensor>>(l: &[T]) -> Vec<*mut C_tensor> {
14 l.iter().map(|x| x.borrow().c_tensor).collect()
15}
16
17impl Tensor {
18 pub fn f_internal_and_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
19 let mut c_tensors = [std::ptr::null_mut(); 1];
20 unsafe_torch_err!(atg___and__(
21 c_tensors.as_mut_ptr(),
22 self.c_tensor,
23 other.into().c_scalar
24 ));
25 Ok(Tensor { c_tensor: c_tensors[0] })
26 }
27
28 pub fn f_internal_and_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
29 let mut c_tensors = [std::ptr::null_mut(); 1];
30 unsafe_torch_err!(atg___and__tensor_(
31 c_tensors.as_mut_ptr(),
32 self.c_tensor,
33 other.c_tensor
34 ));
35 Ok(Tensor { c_tensor: c_tensors[0] })
36 }
37
38 pub fn f_internal_iand_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
39 let mut c_tensors = [std::ptr::null_mut(); 1];
40 unsafe_torch_err!(atg___iand__(
41 c_tensors.as_mut_ptr(),
42 self.c_tensor,
43 other.into().c_scalar
44 ));
45 Ok(Tensor { c_tensor: c_tensors[0] })
46 }
47
48 pub fn f_internal_iand_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
49 let mut c_tensors = [std::ptr::null_mut(); 1];
50 unsafe_torch_err!(atg___iand__tensor_(
51 c_tensors.as_mut_ptr(),
52 self.c_tensor,
53 other.c_tensor
54 ));
55 Ok(Tensor { c_tensor: c_tensors[0] })
56 }
57
58 pub fn f_internal_ilshift_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
59 let mut c_tensors = [std::ptr::null_mut(); 1];
60 unsafe_torch_err!(atg___ilshift__(
61 c_tensors.as_mut_ptr(),
62 self.c_tensor,
63 other.into().c_scalar
64 ));
65 Ok(Tensor { c_tensor: c_tensors[0] })
66 }
67
68 pub fn f_internal_ilshift_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
69 let mut c_tensors = [std::ptr::null_mut(); 1];
70 unsafe_torch_err!(atg___ilshift__tensor_(
71 c_tensors.as_mut_ptr(),
72 self.c_tensor,
73 other.c_tensor
74 ));
75 Ok(Tensor { c_tensor: c_tensors[0] })
76 }
77
78 pub fn f_internal_ior_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
79 let mut c_tensors = [std::ptr::null_mut(); 1];
80 unsafe_torch_err!(atg___ior__(
81 c_tensors.as_mut_ptr(),
82 self.c_tensor,
83 other.into().c_scalar
84 ));
85 Ok(Tensor { c_tensor: c_tensors[0] })
86 }
87
88 pub fn f_internal_ior_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
89 let mut c_tensors = [std::ptr::null_mut(); 1];
90 unsafe_torch_err!(atg___ior__tensor_(
91 c_tensors.as_mut_ptr(),
92 self.c_tensor,
93 other.c_tensor
94 ));
95 Ok(Tensor { c_tensor: c_tensors[0] })
96 }
97
98 pub fn f_internal_irshift_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
99 let mut c_tensors = [std::ptr::null_mut(); 1];
100 unsafe_torch_err!(atg___irshift__(
101 c_tensors.as_mut_ptr(),
102 self.c_tensor,
103 other.into().c_scalar
104 ));
105 Ok(Tensor { c_tensor: c_tensors[0] })
106 }
107
108 pub fn f_internal_irshift_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
109 let mut c_tensors = [std::ptr::null_mut(); 1];
110 unsafe_torch_err!(atg___irshift__tensor_(
111 c_tensors.as_mut_ptr(),
112 self.c_tensor,
113 other.c_tensor
114 ));
115 Ok(Tensor { c_tensor: c_tensors[0] })
116 }
117
118 pub fn f_internal_ixor_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
119 let mut c_tensors = [std::ptr::null_mut(); 1];
120 unsafe_torch_err!(atg___ixor__(
121 c_tensors.as_mut_ptr(),
122 self.c_tensor,
123 other.into().c_scalar
124 ));
125 Ok(Tensor { c_tensor: c_tensors[0] })
126 }
127
128 pub fn f_internal_ixor_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
129 let mut c_tensors = [std::ptr::null_mut(); 1];
130 unsafe_torch_err!(atg___ixor__tensor_(
131 c_tensors.as_mut_ptr(),
132 self.c_tensor,
133 other.c_tensor
134 ));
135 Ok(Tensor { c_tensor: c_tensors[0] })
136 }
137
138 pub fn f_internal_lshift_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
139 let mut c_tensors = [std::ptr::null_mut(); 1];
140 unsafe_torch_err!(atg___lshift__(
141 c_tensors.as_mut_ptr(),
142 self.c_tensor,
143 other.into().c_scalar
144 ));
145 Ok(Tensor { c_tensor: c_tensors[0] })
146 }
147
148 pub fn f_internal_lshift_scalar_out_<S: Into<Scalar>>(
149 &self,
150 out: &Tensor,
151 other: S,
152 ) -> Result<Tensor, TchError> {
153 let mut c_tensors = [std::ptr::null_mut(); 1];
154 unsafe_torch_err!(atg___lshift__scalar_out_(
155 c_tensors.as_mut_ptr(),
156 out.c_tensor,
157 self.c_tensor,
158 other.into().c_scalar
159 ));
160 Ok(Tensor { c_tensor: c_tensors[0] })
161 }
162
163 pub fn f_internal_lshift_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
164 let mut c_tensors = [std::ptr::null_mut(); 1];
165 unsafe_torch_err!(atg___lshift__tensor_(
166 c_tensors.as_mut_ptr(),
167 self.c_tensor,
168 other.c_tensor
169 ));
170 Ok(Tensor { c_tensor: c_tensors[0] })
171 }
172
173 pub fn f_internal_lshift_tensor_out_(
174 &self,
175 out: &Tensor,
176 other: &Tensor,
177 ) -> Result<Tensor, TchError> {
178 let mut c_tensors = [std::ptr::null_mut(); 1];
179 unsafe_torch_err!(atg___lshift__tensor_out_(
180 c_tensors.as_mut_ptr(),
181 out.c_tensor,
182 self.c_tensor,
183 other.c_tensor
184 ));
185 Ok(Tensor { c_tensor: c_tensors[0] })
186 }
187
188 pub fn f_internal_or_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
189 let mut c_tensors = [std::ptr::null_mut(); 1];
190 unsafe_torch_err!(atg___or__(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
191 Ok(Tensor { c_tensor: c_tensors[0] })
192 }
193
194 pub fn f_internal_or_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
195 let mut c_tensors = [std::ptr::null_mut(); 1];
196 unsafe_torch_err!(atg___or__tensor_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
197 Ok(Tensor { c_tensor: c_tensors[0] })
198 }
199
200 pub fn f_internal_rshift_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
201 let mut c_tensors = [std::ptr::null_mut(); 1];
202 unsafe_torch_err!(atg___rshift__(
203 c_tensors.as_mut_ptr(),
204 self.c_tensor,
205 other.into().c_scalar
206 ));
207 Ok(Tensor { c_tensor: c_tensors[0] })
208 }
209
210 pub fn f_internal_rshift_scalar_out_<S: Into<Scalar>>(
211 &self,
212 out: &Tensor,
213 other: S,
214 ) -> Result<Tensor, TchError> {
215 let mut c_tensors = [std::ptr::null_mut(); 1];
216 unsafe_torch_err!(atg___rshift__scalar_out_(
217 c_tensors.as_mut_ptr(),
218 out.c_tensor,
219 self.c_tensor,
220 other.into().c_scalar
221 ));
222 Ok(Tensor { c_tensor: c_tensors[0] })
223 }
224
225 pub fn f_internal_rshift_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
226 let mut c_tensors = [std::ptr::null_mut(); 1];
227 unsafe_torch_err!(atg___rshift__tensor_(
228 c_tensors.as_mut_ptr(),
229 self.c_tensor,
230 other.c_tensor
231 ));
232 Ok(Tensor { c_tensor: c_tensors[0] })
233 }
234
235 pub fn f_internal_rshift_tensor_out_(
236 &self,
237 out: &Tensor,
238 other: &Tensor,
239 ) -> Result<Tensor, TchError> {
240 let mut c_tensors = [std::ptr::null_mut(); 1];
241 unsafe_torch_err!(atg___rshift__tensor_out_(
242 c_tensors.as_mut_ptr(),
243 out.c_tensor,
244 self.c_tensor,
245 other.c_tensor
246 ));
247 Ok(Tensor { c_tensor: c_tensors[0] })
248 }
249
250 pub fn f_internal_xor_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
251 let mut c_tensors = [std::ptr::null_mut(); 1];
252 unsafe_torch_err!(atg___xor__(
253 c_tensors.as_mut_ptr(),
254 self.c_tensor,
255 other.into().c_scalar
256 ));
257 Ok(Tensor { c_tensor: c_tensors[0] })
258 }
259
260 pub fn f_internal_xor_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
261 let mut c_tensors = [std::ptr::null_mut(); 1];
262 unsafe_torch_err!(atg___xor__tensor_(
263 c_tensors.as_mut_ptr(),
264 self.c_tensor,
265 other.c_tensor
266 ));
267 Ok(Tensor { c_tensor: c_tensors[0] })
268 }
269
270 pub fn f_internal_adaptive_avg_pool2d(
271 &self,
272 output_size: impl IntList,
273 ) -> Result<Tensor, TchError> {
274 let mut c_tensors = [std::ptr::null_mut(); 1];
275 unsafe_torch_err!(atg__adaptive_avg_pool2d(
276 c_tensors.as_mut_ptr(),
277 self.c_tensor,
278 output_size.as_ptr(),
279 output_size.len_i32()
280 ));
281 Ok(Tensor { c_tensor: c_tensors[0] })
282 }
283
284 pub fn f_internal_adaptive_avg_pool2d_backward(
285 &self,
286 grad_output: &Tensor,
287 ) -> Result<Tensor, TchError> {
288 let mut c_tensors = [std::ptr::null_mut(); 1];
289 unsafe_torch_err!(atg__adaptive_avg_pool2d_backward(
290 c_tensors.as_mut_ptr(),
291 grad_output.c_tensor,
292 self.c_tensor
293 ));
294 Ok(Tensor { c_tensor: c_tensors[0] })
295 }
296
297 pub fn f_internal_adaptive_avg_pool2d_backward_out(
298 &self,
299 out: &Tensor,
300 grad_output: &Tensor,
301 ) -> Result<Tensor, TchError> {
302 let mut c_tensors = [std::ptr::null_mut(); 1];
303 unsafe_torch_err!(atg__adaptive_avg_pool2d_backward_out(
304 c_tensors.as_mut_ptr(),
305 out.c_tensor,
306 grad_output.c_tensor,
307 self.c_tensor
308 ));
309 Ok(Tensor { c_tensor: c_tensors[0] })
310 }
311
312 pub fn f_internal_adaptive_avg_pool2d_out(
313 &self,
314 out: &Tensor,
315 output_size: impl IntList,
316 ) -> Result<Tensor, TchError> {
317 let mut c_tensors = [std::ptr::null_mut(); 1];
318 unsafe_torch_err!(atg__adaptive_avg_pool2d_out(
319 c_tensors.as_mut_ptr(),
320 out.c_tensor,
321 self.c_tensor,
322 output_size.as_ptr(),
323 output_size.len_i32()
324 ));
325 Ok(Tensor { c_tensor: c_tensors[0] })
326 }
327
328 pub fn f_internal_adaptive_avg_pool3d(
329 &self,
330 output_size: impl IntList,
331 ) -> Result<Tensor, TchError> {
332 let mut c_tensors = [std::ptr::null_mut(); 1];
333 unsafe_torch_err!(atg__adaptive_avg_pool3d(
334 c_tensors.as_mut_ptr(),
335 self.c_tensor,
336 output_size.as_ptr(),
337 output_size.len_i32()
338 ));
339 Ok(Tensor { c_tensor: c_tensors[0] })
340 }
341
342 pub fn f_internal_adaptive_avg_pool3d_backward(
343 &self,
344 grad_output: &Tensor,
345 ) -> Result<Tensor, TchError> {
346 let mut c_tensors = [std::ptr::null_mut(); 1];
347 unsafe_torch_err!(atg__adaptive_avg_pool3d_backward(
348 c_tensors.as_mut_ptr(),
349 grad_output.c_tensor,
350 self.c_tensor
351 ));
352 Ok(Tensor { c_tensor: c_tensors[0] })
353 }
354
355 pub fn f_internal_adaptive_avg_pool3d_backward_out(
356 &self,
357 out: &Tensor,
358 grad_output: &Tensor,
359 ) -> Result<Tensor, TchError> {
360 let mut c_tensors = [std::ptr::null_mut(); 1];
361 unsafe_torch_err!(atg__adaptive_avg_pool3d_backward_out(
362 c_tensors.as_mut_ptr(),
363 out.c_tensor,
364 grad_output.c_tensor,
365 self.c_tensor
366 ));
367 Ok(Tensor { c_tensor: c_tensors[0] })
368 }
369
370 pub fn f_internal_adaptive_avg_pool3d_out(
371 &self,
372 out: &Tensor,
373 output_size: impl IntList,
374 ) -> Result<Tensor, TchError> {
375 let mut c_tensors = [std::ptr::null_mut(); 1];
376 unsafe_torch_err!(atg__adaptive_avg_pool3d_out(
377 c_tensors.as_mut_ptr(),
378 out.c_tensor,
379 self.c_tensor,
380 output_size.as_ptr(),
381 output_size.len_i32()
382 ));
383 Ok(Tensor { c_tensor: c_tensors[0] })
384 }
385
386 pub fn f_internal_add_batch_dim(&self, batch_dim: i64, level: i64) -> Result<Tensor, TchError> {
387 let mut c_tensors = [std::ptr::null_mut(); 1];
388 unsafe_torch_err!(atg__add_batch_dim(
389 c_tensors.as_mut_ptr(),
390 self.c_tensor,
391 batch_dim,
392 level
393 ));
394 Ok(Tensor { c_tensor: c_tensors[0] })
395 }
396
397 pub fn f_internal_add_relu(&self, other: &Tensor) -> Result<Tensor, TchError> {
398 let mut c_tensors = [std::ptr::null_mut(); 1];
399 unsafe_torch_err!(atg__add_relu(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
400 Ok(Tensor { c_tensor: c_tensors[0] })
401 }
402
403 pub fn f_internal_add_relu_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
404 let mut c_tensors = [std::ptr::null_mut(); 1];
405 unsafe_torch_err!(atg__add_relu_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
406 Ok(Tensor { c_tensor: c_tensors[0] })
407 }
408
409 pub fn f_internal_add_relu_out(
410 &self,
411 out: &Tensor,
412 other: &Tensor,
413 ) -> Result<Tensor, TchError> {
414 let mut c_tensors = [std::ptr::null_mut(); 1];
415 unsafe_torch_err!(atg__add_relu_out(
416 c_tensors.as_mut_ptr(),
417 out.c_tensor,
418 self.c_tensor,
419 other.c_tensor
420 ));
421 Ok(Tensor { c_tensor: c_tensors[0] })
422 }
423
424 pub fn f_internal_add_relu_scalar<S: Into<Scalar>>(
425 &self,
426 other: S,
427 ) -> Result<Tensor, TchError> {
428 let mut c_tensors = [std::ptr::null_mut(); 1];
429 unsafe_torch_err!(atg__add_relu_scalar(
430 c_tensors.as_mut_ptr(),
431 self.c_tensor,
432 other.into().c_scalar
433 ));
434 Ok(Tensor { c_tensor: c_tensors[0] })
435 }
436
437 pub fn f_internal_add_relu_scalar_<S: Into<Scalar>>(
438 &mut self,
439 other: S,
440 ) -> Result<Tensor, TchError> {
441 let mut c_tensors = [std::ptr::null_mut(); 1];
442 unsafe_torch_err!(atg__add_relu_scalar_(
443 c_tensors.as_mut_ptr(),
444 self.c_tensor,
445 other.into().c_scalar
446 ));
447 Ok(Tensor { c_tensor: c_tensors[0] })
448 }
449
450 pub fn f_internal_add_relu_scalar_out<S: Into<Scalar>>(
451 &self,
452 out: &Tensor,
453 other: S,
454 ) -> Result<Tensor, TchError> {
455 let mut c_tensors = [std::ptr::null_mut(); 1];
456 unsafe_torch_err!(atg__add_relu_scalar_out(
457 c_tensors.as_mut_ptr(),
458 out.c_tensor,
459 self.c_tensor,
460 other.into().c_scalar
461 ));
462 Ok(Tensor { c_tensor: c_tensors[0] })
463 }
464
465 pub fn f_internal_addmm_activation(
466 &self,
467 mat1: &Tensor,
468 mat2: &Tensor,
469 use_gelu: bool,
470 ) -> Result<Tensor, TchError> {
471 let mut c_tensors = [std::ptr::null_mut(); 1];
472 unsafe_torch_err!(atg__addmm_activation(
473 c_tensors.as_mut_ptr(),
474 self.c_tensor,
475 mat1.c_tensor,
476 mat2.c_tensor,
477 if use_gelu { 1 } else { 0 }
478 ));
479 Ok(Tensor { c_tensor: c_tensors[0] })
480 }
481
482 pub fn f_internal_addmm_activation_out(
483 &self,
484 out: &Tensor,
485 mat1: &Tensor,
486 mat2: &Tensor,
487 use_gelu: bool,
488 ) -> Result<Tensor, TchError> {
489 let mut c_tensors = [std::ptr::null_mut(); 1];
490 unsafe_torch_err!(atg__addmm_activation_out(
491 c_tensors.as_mut_ptr(),
492 out.c_tensor,
493 self.c_tensor,
494 mat1.c_tensor,
495 mat2.c_tensor,
496 if use_gelu { 1 } else { 0 }
497 ));
498 Ok(Tensor { c_tensor: c_tensors[0] })
499 }
500
501 pub fn f_internal_aminmax(&self) -> Result<(Tensor, Tensor), TchError> {
502 let mut c_tensors = [std::ptr::null_mut(); 2];
503 unsafe_torch_err!(atg__aminmax(c_tensors.as_mut_ptr(), self.c_tensor));
504 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
505 }
506
507 pub fn f_internal_aminmax_dim(
508 &self,
509 dim: i64,
510 keepdim: bool,
511 ) -> Result<(Tensor, Tensor), TchError> {
512 let mut c_tensors = [std::ptr::null_mut(); 2];
513 unsafe_torch_err!(atg__aminmax_dim(
514 c_tensors.as_mut_ptr(),
515 self.c_tensor,
516 dim,
517 if keepdim { 1 } else { 0 }
518 ));
519 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
520 }
521
522 pub fn f_internal_aminmax_dim_out(
523 &self,
524 out0: &Tensor,
525 out1: &Tensor,
526 dim: i64,
527 keepdim: bool,
528 ) -> Result<(Tensor, Tensor), TchError> {
529 let mut c_tensors = [std::ptr::null_mut(); 2];
530 unsafe_torch_err!(atg__aminmax_dim_out(
531 c_tensors.as_mut_ptr(),
532 out0.c_tensor,
533 out1.c_tensor,
534 self.c_tensor,
535 dim,
536 if keepdim { 1 } else { 0 }
537 ));
538 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
539 }
540
541 pub fn f_internal_aminmax_out(
542 &self,
543 out0: &Tensor,
544 out1: &Tensor,
545 ) -> Result<(Tensor, Tensor), TchError> {
546 let mut c_tensors = [std::ptr::null_mut(); 2];
547 unsafe_torch_err!(atg__aminmax_out(
548 c_tensors.as_mut_ptr(),
549 out0.c_tensor,
550 out1.c_tensor,
551 self.c_tensor
552 ));
553 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
554 }
555
556 pub fn f_internal_amp_update_scale(
557 &self,
558 growth_tracker: &Tensor,
559 found_inf: &Tensor,
560 scale_growth_factor: f64,
561 scale_backoff_factor: f64,
562 growth_interval: i64,
563 ) -> Result<(Tensor, Tensor), TchError> {
564 let mut c_tensors = [std::ptr::null_mut(); 2];
565 unsafe_torch_err!(atg__amp_update_scale(
566 c_tensors.as_mut_ptr(),
567 self.c_tensor,
568 growth_tracker.c_tensor,
569 found_inf.c_tensor,
570 scale_growth_factor,
571 scale_backoff_factor,
572 growth_interval
573 ));
574 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
575 }
576
577 pub fn f_internal_amp_update_scale_(
578 &mut self,
579 growth_tracker: &Tensor,
580 found_inf: &Tensor,
581 scale_growth_factor: f64,
582 scale_backoff_factor: f64,
583 growth_interval: i64,
584 ) -> Result<Tensor, TchError> {
585 let mut c_tensors = [std::ptr::null_mut(); 1];
586 unsafe_torch_err!(atg__amp_update_scale_(
587 c_tensors.as_mut_ptr(),
588 self.c_tensor,
589 growth_tracker.c_tensor,
590 found_inf.c_tensor,
591 scale_growth_factor,
592 scale_backoff_factor,
593 growth_interval
594 ));
595 Ok(Tensor { c_tensor: c_tensors[0] })
596 }
597
598 pub fn f_internal_amp_update_scale_out(
599 &self,
600 out: &Tensor,
601 growth_tracker: &Tensor,
602 found_inf: &Tensor,
603 scale_growth_factor: f64,
604 scale_backoff_factor: f64,
605 growth_interval: i64,
606 ) -> Result<Tensor, TchError> {
607 let mut c_tensors = [std::ptr::null_mut(); 1];
608 unsafe_torch_err!(atg__amp_update_scale_out(
609 c_tensors.as_mut_ptr(),
610 out.c_tensor,
611 self.c_tensor,
612 growth_tracker.c_tensor,
613 found_inf.c_tensor,
614 scale_growth_factor,
615 scale_backoff_factor,
616 growth_interval
617 ));
618 Ok(Tensor { c_tensor: c_tensors[0] })
619 }
620
621 pub fn f_internal_assert_scalar<S: Into<Scalar>>(
622 self_scalar: S,
623 assert_msg: &str,
624 ) -> Result<(), TchError> {
625 unsafe_torch_err!(atg__assert_scalar(
626 self_scalar.into().c_scalar,
627 assert_msg.as_ptr(),
628 assert_msg.len() as i32
629 ));
630 Ok(())
631 }
632
633 pub fn f_internal_assert_tensor_metadata(
634 a: &Tensor,
635 size: impl IntListOption,
636 stride: impl IntListOption,
637 dtype: impl Into<Option<Kind>>,
638 ) -> Result<(), TchError> {
639 unsafe_torch_err!(atg__assert_tensor_metadata(
640 a.c_tensor,
641 size.as_ptr(),
642 size.len_i32(),
643 stride.as_ptr(),
644 stride.len_i32(),
645 dtype.into().map_or(-1, |s| s.c_int())
646 ));
647 Ok(())
648 }
649
650 pub fn f_internal_autocast_to_full_precision(
651 &self,
652 cuda_enabled: bool,
653 cpu_enabled: bool,
654 ) -> Result<Tensor, TchError> {
655 let mut c_tensors = [std::ptr::null_mut(); 1];
656 unsafe_torch_err!(atg__autocast_to_full_precision(
657 c_tensors.as_mut_ptr(),
658 self.c_tensor,
659 if cuda_enabled { 1 } else { 0 },
660 if cpu_enabled { 1 } else { 0 }
661 ));
662 Ok(Tensor { c_tensor: c_tensors[0] })
663 }
664
665 pub fn f_internal_autocast_to_reduced_precision(
666 &self,
667 cuda_enabled: bool,
668 cpu_enabled: bool,
669 cuda_dtype: Kind,
670 cpu_dtype: Kind,
671 ) -> Result<Tensor, TchError> {
672 let mut c_tensors = [std::ptr::null_mut(); 1];
673 unsafe_torch_err!(atg__autocast_to_reduced_precision(
674 c_tensors.as_mut_ptr(),
675 self.c_tensor,
676 if cuda_enabled { 1 } else { 0 },
677 if cpu_enabled { 1 } else { 0 },
678 cuda_dtype.c_int(),
679 cpu_dtype.c_int()
680 ));
681 Ok(Tensor { c_tensor: c_tensors[0] })
682 }
683
684 pub fn f_internal_batch_norm_no_update<T: Borrow<Tensor>>(
685 &self,
686 weight: Option<T>,
687 bias: Option<T>,
688 running_mean: Option<T>,
689 running_var: Option<T>,
690 momentum: f64,
691 eps: f64,
692 ) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
693 let mut c_tensors = [std::ptr::null_mut(); 4];
694 unsafe_torch_err!(atg__batch_norm_no_update(
695 c_tensors.as_mut_ptr(),
696 self.c_tensor,
697 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
698 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
699 running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
700 running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
701 momentum,
702 eps
703 ));
704 Ok((
705 Tensor { c_tensor: c_tensors[0] },
706 Tensor { c_tensor: c_tensors[1] },
707 Tensor { c_tensor: c_tensors[2] },
708 Tensor { c_tensor: c_tensors[3] },
709 ))
710 }
711
712 pub fn f_internal_batch_norm_no_update_out<T: Borrow<Tensor>>(
713 &self,
714 out0: &Tensor,
715 out1: &Tensor,
716 out2: &Tensor,
717 out3: &Tensor,
718 weight: Option<T>,
719 bias: Option<T>,
720 running_mean: Option<T>,
721 running_var: Option<T>,
722 momentum: f64,
723 eps: f64,
724 ) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
725 let mut c_tensors = [std::ptr::null_mut(); 4];
726 unsafe_torch_err!(atg__batch_norm_no_update_out(
727 c_tensors.as_mut_ptr(),
728 out0.c_tensor,
729 out1.c_tensor,
730 out2.c_tensor,
731 out3.c_tensor,
732 self.c_tensor,
733 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
734 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
735 running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
736 running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
737 momentum,
738 eps
739 ));
740 Ok((
741 Tensor { c_tensor: c_tensors[0] },
742 Tensor { c_tensor: c_tensors[1] },
743 Tensor { c_tensor: c_tensors[2] },
744 Tensor { c_tensor: c_tensors[3] },
745 ))
746 }
747
748 pub fn f_internal_batch_norm_with_update<T: Borrow<Tensor>>(
749 &self,
750 weight: Option<T>,
751 bias: Option<T>,
752 running_mean: &Tensor,
753 running_var: &Tensor,
754 momentum: f64,
755 eps: f64,
756 ) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
757 let mut c_tensors = [std::ptr::null_mut(); 4];
758 unsafe_torch_err!(atg__batch_norm_with_update(
759 c_tensors.as_mut_ptr(),
760 self.c_tensor,
761 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
762 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
763 running_mean.c_tensor,
764 running_var.c_tensor,
765 momentum,
766 eps
767 ));
768 Ok((
769 Tensor { c_tensor: c_tensors[0] },
770 Tensor { c_tensor: c_tensors[1] },
771 Tensor { c_tensor: c_tensors[2] },
772 Tensor { c_tensor: c_tensors[3] },
773 ))
774 }
775
776 pub fn f_internal_batch_norm_with_update_functional<T: Borrow<Tensor>>(
777 &self,
778 weight: Option<T>,
779 bias: Option<T>,
780 running_mean: &Tensor,
781 running_var: &Tensor,
782 momentum: f64,
783 eps: f64,
784 ) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor), TchError> {
785 let mut c_tensors = [std::ptr::null_mut(); 6];
786 unsafe_torch_err!(atg__batch_norm_with_update_functional(
787 c_tensors.as_mut_ptr(),
788 self.c_tensor,
789 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
790 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
791 running_mean.c_tensor,
792 running_var.c_tensor,
793 momentum,
794 eps
795 ));
796 Ok((
797 Tensor { c_tensor: c_tensors[0] },
798 Tensor { c_tensor: c_tensors[1] },
799 Tensor { c_tensor: c_tensors[2] },
800 Tensor { c_tensor: c_tensors[3] },
801 Tensor { c_tensor: c_tensors[4] },
802 Tensor { c_tensor: c_tensors[5] },
803 ))
804 }
805
806 pub fn f_internal_batch_norm_with_update_out<T: Borrow<Tensor>>(
807 &self,
808 out: &Tensor,
809 save_mean: &Tensor,
810 save_invstd: &Tensor,
811 reserve: &Tensor,
812 weight: Option<T>,
813 bias: Option<T>,
814 running_mean: &Tensor,
815 running_var: &Tensor,
816 momentum: f64,
817 eps: f64,
818 ) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
819 let mut c_tensors = [std::ptr::null_mut(); 4];
820 unsafe_torch_err!(atg__batch_norm_with_update_out(
821 c_tensors.as_mut_ptr(),
822 out.c_tensor,
823 save_mean.c_tensor,
824 save_invstd.c_tensor,
825 reserve.c_tensor,
826 self.c_tensor,
827 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
828 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
829 running_mean.c_tensor,
830 running_var.c_tensor,
831 momentum,
832 eps
833 ));
834 Ok((
835 Tensor { c_tensor: c_tensors[0] },
836 Tensor { c_tensor: c_tensors[1] },
837 Tensor { c_tensor: c_tensors[2] },
838 Tensor { c_tensor: c_tensors[3] },
839 ))
840 }
841
842 pub fn f_internal_cast_byte(&self, non_blocking: bool) -> Result<Tensor, TchError> {
843 let mut c_tensors = [std::ptr::null_mut(); 1];
844 unsafe_torch_err!(atg__cast_byte(
845 c_tensors.as_mut_ptr(),
846 self.c_tensor,
847 if non_blocking { 1 } else { 0 }
848 ));
849 Ok(Tensor { c_tensor: c_tensors[0] })
850 }
851
852 pub fn f_internal_cast_char(&self, non_blocking: bool) -> Result<Tensor, TchError> {
853 let mut c_tensors = [std::ptr::null_mut(); 1];
854 unsafe_torch_err!(atg__cast_char(
855 c_tensors.as_mut_ptr(),
856 self.c_tensor,
857 if non_blocking { 1 } else { 0 }
858 ));
859 Ok(Tensor { c_tensor: c_tensors[0] })
860 }
861
862 pub fn f_internal_cast_double(&self, non_blocking: bool) -> Result<Tensor, TchError> {
863 let mut c_tensors = [std::ptr::null_mut(); 1];
864 unsafe_torch_err!(atg__cast_double(
865 c_tensors.as_mut_ptr(),
866 self.c_tensor,
867 if non_blocking { 1 } else { 0 }
868 ));
869 Ok(Tensor { c_tensor: c_tensors[0] })
870 }
871
872 pub fn f_internal_cast_float(&self, non_blocking: bool) -> Result<Tensor, TchError> {
873 let mut c_tensors = [std::ptr::null_mut(); 1];
874 unsafe_torch_err!(atg__cast_float(
875 c_tensors.as_mut_ptr(),
876 self.c_tensor,
877 if non_blocking { 1 } else { 0 }
878 ));
879 Ok(Tensor { c_tensor: c_tensors[0] })
880 }
881
882 pub fn f_internal_cast_half(&self, non_blocking: bool) -> Result<Tensor, TchError> {
883 let mut c_tensors = [std::ptr::null_mut(); 1];
884 unsafe_torch_err!(atg__cast_half(
885 c_tensors.as_mut_ptr(),
886 self.c_tensor,
887 if non_blocking { 1 } else { 0 }
888 ));
889 Ok(Tensor { c_tensor: c_tensors[0] })
890 }
891
892 pub fn f_internal_cast_int(&self, non_blocking: bool) -> Result<Tensor, TchError> {
893 let mut c_tensors = [std::ptr::null_mut(); 1];
894 unsafe_torch_err!(atg__cast_int(
895 c_tensors.as_mut_ptr(),
896 self.c_tensor,
897 if non_blocking { 1 } else { 0 }
898 ));
899 Ok(Tensor { c_tensor: c_tensors[0] })
900 }
901
902 pub fn f_internal_cast_long(&self, non_blocking: bool) -> Result<Tensor, TchError> {
903 let mut c_tensors = [std::ptr::null_mut(); 1];
904 unsafe_torch_err!(atg__cast_long(
905 c_tensors.as_mut_ptr(),
906 self.c_tensor,
907 if non_blocking { 1 } else { 0 }
908 ));
909 Ok(Tensor { c_tensor: c_tensors[0] })
910 }
911
912 pub fn f_internal_cast_short(&self, non_blocking: bool) -> Result<Tensor, TchError> {
913 let mut c_tensors = [std::ptr::null_mut(); 1];
914 unsafe_torch_err!(atg__cast_short(
915 c_tensors.as_mut_ptr(),
916 self.c_tensor,
917 if non_blocking { 1 } else { 0 }
918 ));
919 Ok(Tensor { c_tensor: c_tensors[0] })
920 }
921
922 pub fn f_internal_cdist_backward(
923 grad: &Tensor,
924 x1: &Tensor,
925 x2: &Tensor,
926 p: f64,
927 cdist: &Tensor,
928 ) -> Result<Tensor, TchError> {
929 let mut c_tensors = [std::ptr::null_mut(); 1];
930 unsafe_torch_err!(atg__cdist_backward(
931 c_tensors.as_mut_ptr(),
932 grad.c_tensor,
933 x1.c_tensor,
934 x2.c_tensor,
935 p,
936 cdist.c_tensor
937 ));
938 Ok(Tensor { c_tensor: c_tensors[0] })
939 }
940
941 pub fn f_internal_cdist_backward_out(
942 out: &Tensor,
943 grad: &Tensor,
944 x1: &Tensor,
945 x2: &Tensor,
946 p: f64,
947 cdist: &Tensor,
948 ) -> Result<Tensor, TchError> {
949 let mut c_tensors = [std::ptr::null_mut(); 1];
950 unsafe_torch_err!(atg__cdist_backward_out(
951 c_tensors.as_mut_ptr(),
952 out.c_tensor,
953 grad.c_tensor,
954 x1.c_tensor,
955 x2.c_tensor,
956 p,
957 cdist.c_tensor
958 ));
959 Ok(Tensor { c_tensor: c_tensors[0] })
960 }
961
962 pub fn f_internal_cholesky_solve_helper(
963 &self,
964 a: &Tensor,
965 upper: bool,
966 ) -> Result<Tensor, TchError> {
967 let mut c_tensors = [std::ptr::null_mut(); 1];
968 unsafe_torch_err!(atg__cholesky_solve_helper(
969 c_tensors.as_mut_ptr(),
970 self.c_tensor,
971 a.c_tensor,
972 if upper { 1 } else { 0 }
973 ));
974 Ok(Tensor { c_tensor: c_tensors[0] })
975 }
976
977 pub fn f_internal_cholesky_solve_helper_out(
978 &self,
979 out: &Tensor,
980 a: &Tensor,
981 upper: bool,
982 ) -> Result<Tensor, TchError> {
983 let mut c_tensors = [std::ptr::null_mut(); 1];
984 unsafe_torch_err!(atg__cholesky_solve_helper_out(
985 c_tensors.as_mut_ptr(),
986 out.c_tensor,
987 self.c_tensor,
988 a.c_tensor,
989 if upper { 1 } else { 0 }
990 ));
991 Ok(Tensor { c_tensor: c_tensors[0] })
992 }
993
994 pub fn f_internal_chunk_cat<T: Borrow<Tensor>>(
995 tensors: &[T],
996 dim: i64,
997 num_chunks: i64,
998 ) -> Result<Tensor, TchError> {
999 let mut c_tensors = [std::ptr::null_mut(); 1];
1000 unsafe_torch_err!(atg__chunk_cat(
1001 c_tensors.as_mut_ptr(),
1002 ptr_list(tensors).as_ptr(),
1003 tensors.len() as i32,
1004 dim,
1005 num_chunks
1006 ));
1007 Ok(Tensor { c_tensor: c_tensors[0] })
1008 }
1009
1010 pub fn f_internal_chunk_cat_out<T: Borrow<Tensor>>(
1011 out: &Tensor,
1012 tensors: &[T],
1013 dim: i64,
1014 num_chunks: i64,
1015 ) -> Result<Tensor, TchError> {
1016 let mut c_tensors = [std::ptr::null_mut(); 1];
1017 unsafe_torch_err!(atg__chunk_cat_out(
1018 c_tensors.as_mut_ptr(),
1019 out.c_tensor,
1020 ptr_list(tensors).as_ptr(),
1021 tensors.len() as i32,
1022 dim,
1023 num_chunks
1024 ));
1025 Ok(Tensor { c_tensor: c_tensors[0] })
1026 }
1027
1028 pub fn f_internal_coalesce(&self) -> Result<Tensor, TchError> {
1029 let mut c_tensors = [std::ptr::null_mut(); 1];
1030 unsafe_torch_err!(atg__coalesce(c_tensors.as_mut_ptr(), self.c_tensor));
1031 Ok(Tensor { c_tensor: c_tensors[0] })
1032 }
1033
1034 pub fn f_internal_coalesce_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
1035 let mut c_tensors = [std::ptr::null_mut(); 1];
1036 unsafe_torch_err!(atg__coalesce_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
1037 Ok(Tensor { c_tensor: c_tensors[0] })
1038 }
1039
1040 pub fn f_internal_coalesced(&self, coalesced: bool) -> Result<Tensor, TchError> {
1041 let mut c_tensors = [std::ptr::null_mut(); 1];
1042 unsafe_torch_err!(atg__coalesced(
1043 c_tensors.as_mut_ptr(),
1044 self.c_tensor,
1045 if coalesced { 1 } else { 0 }
1046 ));
1047 Ok(Tensor { c_tensor: c_tensors[0] })
1048 }
1049
1050 pub fn f_internal_coalesced_(&mut self, coalesced: bool) -> Result<Tensor, TchError> {
1051 let mut c_tensors = [std::ptr::null_mut(); 1];
1052 unsafe_torch_err!(atg__coalesced_(
1053 c_tensors.as_mut_ptr(),
1054 self.c_tensor,
1055 if coalesced { 1 } else { 0 }
1056 ));
1057 Ok(Tensor { c_tensor: c_tensors[0] })
1058 }
1059
1060 pub fn f_internal_coalesced_out(
1061 &self,
1062 out: &Tensor,
1063 coalesced: bool,
1064 ) -> Result<Tensor, TchError> {
1065 let mut c_tensors = [std::ptr::null_mut(); 1];
1066 unsafe_torch_err!(atg__coalesced_out(
1067 c_tensors.as_mut_ptr(),
1068 out.c_tensor,
1069 self.c_tensor,
1070 if coalesced { 1 } else { 0 }
1071 ));
1072 Ok(Tensor { c_tensor: c_tensors[0] })
1073 }
1074
1075 pub fn f_internal_compute_linear_combination(
1076 &self,
1077 coefficients: &Tensor,
1078 ) -> Result<Tensor, TchError> {
1079 let mut c_tensors = [std::ptr::null_mut(); 1];
1080 unsafe_torch_err!(atg__compute_linear_combination(
1081 c_tensors.as_mut_ptr(),
1082 self.c_tensor,
1083 coefficients.c_tensor
1084 ));
1085 Ok(Tensor { c_tensor: c_tensors[0] })
1086 }
1087
1088 pub fn f_internal_compute_linear_combination_out(
1089 &self,
1090 out: &Tensor,
1091 coefficients: &Tensor,
1092 ) -> Result<Tensor, TchError> {
1093 let mut c_tensors = [std::ptr::null_mut(); 1];
1094 unsafe_torch_err!(atg__compute_linear_combination_out(
1095 c_tensors.as_mut_ptr(),
1096 out.c_tensor,
1097 self.c_tensor,
1098 coefficients.c_tensor
1099 ));
1100 Ok(Tensor { c_tensor: c_tensors[0] })
1101 }
1102
1103 pub fn f_internal_conj(&self) -> Result<Tensor, TchError> {
1104 let mut c_tensors = [std::ptr::null_mut(); 1];
1105 unsafe_torch_err!(atg__conj(c_tensors.as_mut_ptr(), self.c_tensor));
1106 Ok(Tensor { c_tensor: c_tensors[0] })
1107 }
1108
1109 pub fn f_internal_conj_copy(&self) -> Result<Tensor, TchError> {
1110 let mut c_tensors = [std::ptr::null_mut(); 1];
1111 unsafe_torch_err!(atg__conj_copy(c_tensors.as_mut_ptr(), self.c_tensor));
1112 Ok(Tensor { c_tensor: c_tensors[0] })
1113 }
1114
1115 pub fn f_internal_conj_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
1116 let mut c_tensors = [std::ptr::null_mut(); 1];
1117 unsafe_torch_err!(atg__conj_copy_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
1118 Ok(Tensor { c_tensor: c_tensors[0] })
1119 }
1120
1121 pub fn f_internal_conj_physical(&self) -> Result<Tensor, TchError> {
1122 let mut c_tensors = [std::ptr::null_mut(); 1];
1123 unsafe_torch_err!(atg__conj_physical(c_tensors.as_mut_ptr(), self.c_tensor));
1124 Ok(Tensor { c_tensor: c_tensors[0] })
1125 }
1126
1127 pub fn f_internal_conj_physical_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
1128 let mut c_tensors = [std::ptr::null_mut(); 1];
1129 unsafe_torch_err!(atg__conj_physical_out(
1130 c_tensors.as_mut_ptr(),
1131 out.c_tensor,
1132 self.c_tensor
1133 ));
1134 Ok(Tensor { c_tensor: c_tensors[0] })
1135 }
1136
1137 pub fn f_internal_conv_depthwise2d<T: Borrow<Tensor>>(
1138 &self,
1139 weight: &Tensor,
1140 kernel_size: impl IntList,
1141 bias: Option<T>,
1142 stride: impl IntList,
1143 padding: impl IntList,
1144 dilation: impl IntList,
1145 ) -> Result<Tensor, TchError> {
1146 let mut c_tensors = [std::ptr::null_mut(); 1];
1147 unsafe_torch_err!(atg__conv_depthwise2d(
1148 c_tensors.as_mut_ptr(),
1149 self.c_tensor,
1150 weight.c_tensor,
1151 kernel_size.as_ptr(),
1152 kernel_size.len_i32(),
1153 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
1154 stride.as_ptr(),
1155 stride.len_i32(),
1156 padding.as_ptr(),
1157 padding.len_i32(),
1158 dilation.as_ptr(),
1159 dilation.len_i32()
1160 ));
1161 Ok(Tensor { c_tensor: c_tensors[0] })
1162 }
1163
1164 pub fn f_internal_conv_depthwise2d_out<T: Borrow<Tensor>>(
1165 &self,
1166 out: &Tensor,
1167 weight: &Tensor,
1168 kernel_size: impl IntList,
1169 bias: Option<T>,
1170 stride: impl IntList,
1171 padding: impl IntList,
1172 dilation: impl IntList,
1173 ) -> Result<Tensor, TchError> {
1174 let mut c_tensors = [std::ptr::null_mut(); 1];
1175 unsafe_torch_err!(atg__conv_depthwise2d_out(
1176 c_tensors.as_mut_ptr(),
1177 out.c_tensor,
1178 self.c_tensor,
1179 weight.c_tensor,
1180 kernel_size.as_ptr(),
1181 kernel_size.len_i32(),
1182 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
1183 stride.as_ptr(),
1184 stride.len_i32(),
1185 padding.as_ptr(),
1186 padding.len_i32(),
1187 dilation.as_ptr(),
1188 dilation.len_i32()
1189 ));
1190 Ok(Tensor { c_tensor: c_tensors[0] })
1191 }
1192
1193 pub fn f_internal_convert_indices_from_coo_to_csr(
1194 &self,
1195 size: i64,
1196 out_int32: bool,
1197 ) -> Result<Tensor, TchError> {
1198 let mut c_tensors = [std::ptr::null_mut(); 1];
1199 unsafe_torch_err!(atg__convert_indices_from_coo_to_csr(
1200 c_tensors.as_mut_ptr(),
1201 self.c_tensor,
1202 size,
1203 if out_int32 { 1 } else { 0 }
1204 ));
1205 Ok(Tensor { c_tensor: c_tensors[0] })
1206 }
1207
1208 pub fn f_internal_convert_indices_from_coo_to_csr_out(
1209 &self,
1210 out: &Tensor,
1211 size: i64,
1212 out_int32: bool,
1213 ) -> Result<Tensor, TchError> {
1214 let mut c_tensors = [std::ptr::null_mut(); 1];
1215 unsafe_torch_err!(atg__convert_indices_from_coo_to_csr_out(
1216 c_tensors.as_mut_ptr(),
1217 out.c_tensor,
1218 self.c_tensor,
1219 size,
1220 if out_int32 { 1 } else { 0 }
1221 ));
1222 Ok(Tensor { c_tensor: c_tensors[0] })
1223 }
1224
1225 pub fn f_internal_convert_indices_from_csr_to_coo(
1226 crow_indices: &Tensor,
1227 col_indices: &Tensor,
1228 out_int32: bool,
1229 transpose: bool,
1230 ) -> Result<Tensor, TchError> {
1231 let mut c_tensors = [std::ptr::null_mut(); 1];
1232 unsafe_torch_err!(atg__convert_indices_from_csr_to_coo(
1233 c_tensors.as_mut_ptr(),
1234 crow_indices.c_tensor,
1235 col_indices.c_tensor,
1236 if out_int32 { 1 } else { 0 },
1237 if transpose { 1 } else { 0 }
1238 ));
1239 Ok(Tensor { c_tensor: c_tensors[0] })
1240 }
1241
1242 pub fn f_internal_convert_indices_from_csr_to_coo_out(
1243 out: &Tensor,
1244 crow_indices: &Tensor,
1245 col_indices: &Tensor,
1246 out_int32: bool,
1247 transpose: bool,
1248 ) -> Result<Tensor, TchError> {
1249 let mut c_tensors = [std::ptr::null_mut(); 1];
1250 unsafe_torch_err!(atg__convert_indices_from_csr_to_coo_out(
1251 c_tensors.as_mut_ptr(),
1252 out.c_tensor,
1253 crow_indices.c_tensor,
1254 col_indices.c_tensor,
1255 if out_int32 { 1 } else { 0 },
1256 if transpose { 1 } else { 0 }
1257 ));
1258 Ok(Tensor { c_tensor: c_tensors[0] })
1259 }
1260
1261 pub fn f_internal_convert_weight_to_int4pack(
1262 &self,
1263 innerktiles: i64,
1264 ) -> Result<Tensor, TchError> {
1265 let mut c_tensors = [std::ptr::null_mut(); 1];
1266 unsafe_torch_err!(atg__convert_weight_to_int4pack(
1267 c_tensors.as_mut_ptr(),
1268 self.c_tensor,
1269 innerktiles
1270 ));
1271 Ok(Tensor { c_tensor: c_tensors[0] })
1272 }
1273
1274 pub fn f_internal_convolution<T: Borrow<Tensor>>(
1275 &self,
1276 weight: &Tensor,
1277 bias: Option<T>,
1278 stride: impl IntList,
1279 padding: impl IntList,
1280 dilation: impl IntList,
1281 transposed: bool,
1282 output_padding: impl IntList,
1283 groups: i64,
1284 benchmark: bool,
1285 deterministic: bool,
1286 cudnn_enabled: bool,
1287 allow_tf32: bool,
1288 ) -> Result<Tensor, TchError> {
1289 let mut c_tensors = [std::ptr::null_mut(); 1];
1290 unsafe_torch_err!(atg__convolution(
1291 c_tensors.as_mut_ptr(),
1292 self.c_tensor,
1293 weight.c_tensor,
1294 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
1295 stride.as_ptr(),
1296 stride.len_i32(),
1297 padding.as_ptr(),
1298 padding.len_i32(),
1299 dilation.as_ptr(),
1300 dilation.len_i32(),
1301 if transposed { 1 } else { 0 },
1302 output_padding.as_ptr(),
1303 output_padding.len_i32(),
1304 groups,
1305 if benchmark { 1 } else { 0 },
1306 if deterministic { 1 } else { 0 },
1307 if cudnn_enabled { 1 } else { 0 },
1308 if allow_tf32 { 1 } else { 0 }
1309 ));
1310 Ok(Tensor { c_tensor: c_tensors[0] })
1311 }
1312
1313 pub fn f_internal_convolution_deprecated<T: Borrow<Tensor>>(
1314 &self,
1315 weight: &Tensor,
1316 bias: Option<T>,
1317 stride: impl IntList,
1318 padding: impl IntList,
1319 dilation: impl IntList,
1320 transposed: bool,
1321 output_padding: impl IntList,
1322 groups: i64,
1323 benchmark: bool,
1324 deterministic: bool,
1325 cudnn_enabled: bool,
1326 ) -> Result<Tensor, TchError> {
1327 let mut c_tensors = [std::ptr::null_mut(); 1];
1328 unsafe_torch_err!(atg__convolution_deprecated(
1329 c_tensors.as_mut_ptr(),
1330 self.c_tensor,
1331 weight.c_tensor,
1332 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
1333 stride.as_ptr(),
1334 stride.len_i32(),
1335 padding.as_ptr(),
1336 padding.len_i32(),
1337 dilation.as_ptr(),
1338 dilation.len_i32(),
1339 if transposed { 1 } else { 0 },
1340 output_padding.as_ptr(),
1341 output_padding.len_i32(),
1342 groups,
1343 if benchmark { 1 } else { 0 },
1344 if deterministic { 1 } else { 0 },
1345 if cudnn_enabled { 1 } else { 0 }
1346 ));
1347 Ok(Tensor { c_tensor: c_tensors[0] })
1348 }
1349
1350 pub fn f_internal_convolution_mode<T: Borrow<Tensor>>(
1351 &self,
1352 weight: &Tensor,
1353 bias: Option<T>,
1354 stride: impl IntList,
1355 padding: &str,
1356 dilation: impl IntList,
1357 groups: i64,
1358 ) -> Result<Tensor, TchError> {
1359 let mut c_tensors = [std::ptr::null_mut(); 1];
1360 unsafe_torch_err!(atg__convolution_mode(
1361 c_tensors.as_mut_ptr(),
1362 self.c_tensor,
1363 weight.c_tensor,
1364 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
1365 stride.as_ptr(),
1366 stride.len_i32(),
1367 padding.as_ptr(),
1368 padding.len() as i32,
1369 dilation.as_ptr(),
1370 dilation.len_i32(),
1371 groups
1372 ));
1373 Ok(Tensor { c_tensor: c_tensors[0] })
1374 }
1375
1376 pub fn f_internal_convolution_out<T: Borrow<Tensor>>(
1377 &self,
1378 out: &Tensor,
1379 weight: &Tensor,
1380 bias: Option<T>,
1381 stride: impl IntList,
1382 padding: impl IntList,
1383 dilation: impl IntList,
1384 transposed: bool,
1385 output_padding: impl IntList,
1386 groups: i64,
1387 benchmark: bool,
1388 deterministic: bool,
1389 cudnn_enabled: bool,
1390 allow_tf32: bool,
1391 ) -> Result<Tensor, TchError> {
1392 let mut c_tensors = [std::ptr::null_mut(); 1];
1393 unsafe_torch_err!(atg__convolution_out(
1394 c_tensors.as_mut_ptr(),
1395 out.c_tensor,
1396 self.c_tensor,
1397 weight.c_tensor,
1398 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
1399 stride.as_ptr(),
1400 stride.len_i32(),
1401 padding.as_ptr(),
1402 padding.len_i32(),
1403 dilation.as_ptr(),
1404 dilation.len_i32(),
1405 if transposed { 1 } else { 0 },
1406 output_padding.as_ptr(),
1407 output_padding.len_i32(),
1408 groups,
1409 if benchmark { 1 } else { 0 },
1410 if deterministic { 1 } else { 0 },
1411 if cudnn_enabled { 1 } else { 0 },
1412 if allow_tf32 { 1 } else { 0 }
1413 ));
1414 Ok(Tensor { c_tensor: c_tensors[0] })
1415 }
1416
1417 pub fn f_internal_copy_from(
1418 &self,
1419 dst: &Tensor,
1420 non_blocking: bool,
1421 ) -> Result<Tensor, TchError> {
1422 let mut c_tensors = [std::ptr::null_mut(); 1];
1423 unsafe_torch_err!(atg__copy_from(
1424 c_tensors.as_mut_ptr(),
1425 self.c_tensor,
1426 dst.c_tensor,
1427 if non_blocking { 1 } else { 0 }
1428 ));
1429 Ok(Tensor { c_tensor: c_tensors[0] })
1430 }
1431
1432 pub fn f_internal_copy_from_and_resize(&self, dst: &Tensor) -> Result<Tensor, TchError> {
1433 let mut c_tensors = [std::ptr::null_mut(); 1];
1434 unsafe_torch_err!(atg__copy_from_and_resize(
1435 c_tensors.as_mut_ptr(),
1436 self.c_tensor,
1437 dst.c_tensor
1438 ));
1439 Ok(Tensor { c_tensor: c_tensors[0] })
1440 }
1441
1442 pub fn f_internal_copy_from_and_resize_out(
1443 &self,
1444 out: &Tensor,
1445 dst: &Tensor,
1446 ) -> Result<Tensor, TchError> {
1447 let mut c_tensors = [std::ptr::null_mut(); 1];
1448 unsafe_torch_err!(atg__copy_from_and_resize_out(
1449 c_tensors.as_mut_ptr(),
1450 out.c_tensor,
1451 self.c_tensor,
1452 dst.c_tensor
1453 ));
1454 Ok(Tensor { c_tensor: c_tensors[0] })
1455 }
1456
1457 pub fn f_internal_copy_from_out(
1458 &self,
1459 out: &Tensor,
1460 dst: &Tensor,
1461 non_blocking: bool,
1462 ) -> Result<Tensor, TchError> {
1463 let mut c_tensors = [std::ptr::null_mut(); 1];
1464 unsafe_torch_err!(atg__copy_from_out(
1465 c_tensors.as_mut_ptr(),
1466 out.c_tensor,
1467 self.c_tensor,
1468 dst.c_tensor,
1469 if non_blocking { 1 } else { 0 }
1470 ));
1471 Ok(Tensor { c_tensor: c_tensors[0] })
1472 }
1473
1474 pub fn f_internal_cslt_compress(&self) -> Result<Tensor, TchError> {
1475 let mut c_tensors = [std::ptr::null_mut(); 1];
1476 unsafe_torch_err!(atg__cslt_compress(c_tensors.as_mut_ptr(), self.c_tensor));
1477 Ok(Tensor { c_tensor: c_tensors[0] })
1478 }
1479
1480 pub fn f_internal_cslt_sparse_mm<T: Borrow<Tensor>>(
1481 compressed_a: &Tensor,
1482 dense_b: &Tensor,
1483 bias: Option<T>,
1484 alpha: Option<T>,
1485 out_dtype: impl Into<Option<Kind>>,
1486 transpose_result: bool,
1487 alg_id: i64,
1488 ) -> Result<Tensor, TchError> {
1489 let mut c_tensors = [std::ptr::null_mut(); 1];
1490 unsafe_torch_err!(atg__cslt_sparse_mm(
1491 c_tensors.as_mut_ptr(),
1492 compressed_a.c_tensor,
1493 dense_b.c_tensor,
1494 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
1495 alpha.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
1496 out_dtype.into().map_or(-1, |s| s.c_int()),
1497 if transpose_result { 1 } else { 0 },
1498 alg_id
1499 ));
1500 Ok(Tensor { c_tensor: c_tensors[0] })
1501 }
1502
1503 pub fn f_internal_cslt_sparse_mm_search<T: Borrow<Tensor>>(
1504 compressed_a: &Tensor,
1505 dense_b: &Tensor,
1506 bias: Option<T>,
1507 alpha: Option<T>,
1508 out_dtype: impl Into<Option<Kind>>,
1509 transpose_result: bool,
1510 ) -> Result<i64, TchError> {
1511 let return_;
1512 unsafe_torch_err!(
1513 return_ = atg__cslt_sparse_mm_search(
1514 compressed_a.c_tensor,
1515 dense_b.c_tensor,
1516 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
1517 alpha.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
1518 out_dtype.into().map_or(-1, |s| s.c_int()),
1519 if transpose_result { 1 } else { 0 }
1520 )
1521 );
1522 Ok(return_)
1523 }
1524
1525 pub fn f_internal_ctc_loss(
1526 log_probs: &Tensor,
1527 targets: &Tensor,
1528 input_lengths: impl IntList,
1529 target_lengths: impl IntList,
1530 blank: i64,
1531 zero_infinity: bool,
1532 ) -> Result<(Tensor, Tensor), TchError> {
1533 let mut c_tensors = [std::ptr::null_mut(); 2];
1534 unsafe_torch_err!(atg__ctc_loss(
1535 c_tensors.as_mut_ptr(),
1536 log_probs.c_tensor,
1537 targets.c_tensor,
1538 input_lengths.as_ptr(),
1539 input_lengths.len_i32(),
1540 target_lengths.as_ptr(),
1541 target_lengths.len_i32(),
1542 blank,
1543 if zero_infinity { 1 } else { 0 }
1544 ));
1545 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
1546 }
1547
1548 pub fn f_internal_ctc_loss_backward(
1549 grad: &Tensor,
1550 log_probs: &Tensor,
1551 targets: &Tensor,
1552 input_lengths: impl IntList,
1553 target_lengths: impl IntList,
1554 neg_log_likelihood: &Tensor,
1555 log_alpha: &Tensor,
1556 blank: i64,
1557 zero_infinity: bool,
1558 ) -> Result<Tensor, TchError> {
1559 let mut c_tensors = [std::ptr::null_mut(); 1];
1560 unsafe_torch_err!(atg__ctc_loss_backward(
1561 c_tensors.as_mut_ptr(),
1562 grad.c_tensor,
1563 log_probs.c_tensor,
1564 targets.c_tensor,
1565 input_lengths.as_ptr(),
1566 input_lengths.len_i32(),
1567 target_lengths.as_ptr(),
1568 target_lengths.len_i32(),
1569 neg_log_likelihood.c_tensor,
1570 log_alpha.c_tensor,
1571 blank,
1572 if zero_infinity { 1 } else { 0 }
1573 ));
1574 Ok(Tensor { c_tensor: c_tensors[0] })
1575 }
1576
1577 pub fn f_internal_ctc_loss_backward_out(
1578 out: &Tensor,
1579 grad: &Tensor,
1580 log_probs: &Tensor,
1581 targets: &Tensor,
1582 input_lengths: impl IntList,
1583 target_lengths: impl IntList,
1584 neg_log_likelihood: &Tensor,
1585 log_alpha: &Tensor,
1586 blank: i64,
1587 zero_infinity: bool,
1588 ) -> Result<Tensor, TchError> {
1589 let mut c_tensors = [std::ptr::null_mut(); 1];
1590 unsafe_torch_err!(atg__ctc_loss_backward_out(
1591 c_tensors.as_mut_ptr(),
1592 out.c_tensor,
1593 grad.c_tensor,
1594 log_probs.c_tensor,
1595 targets.c_tensor,
1596 input_lengths.as_ptr(),
1597 input_lengths.len_i32(),
1598 target_lengths.as_ptr(),
1599 target_lengths.len_i32(),
1600 neg_log_likelihood.c_tensor,
1601 log_alpha.c_tensor,
1602 blank,
1603 if zero_infinity { 1 } else { 0 }
1604 ));
1605 Ok(Tensor { c_tensor: c_tensors[0] })
1606 }
1607
1608 pub fn f_internal_ctc_loss_backward_tensor(
1609 grad: &Tensor,
1610 log_probs: &Tensor,
1611 targets: &Tensor,
1612 input_lengths: &Tensor,
1613 target_lengths: &Tensor,
1614 neg_log_likelihood: &Tensor,
1615 log_alpha: &Tensor,
1616 blank: i64,
1617 zero_infinity: bool,
1618 ) -> Result<Tensor, TchError> {
1619 let mut c_tensors = [std::ptr::null_mut(); 1];
1620 unsafe_torch_err!(atg__ctc_loss_backward_tensor(
1621 c_tensors.as_mut_ptr(),
1622 grad.c_tensor,
1623 log_probs.c_tensor,
1624 targets.c_tensor,
1625 input_lengths.c_tensor,
1626 target_lengths.c_tensor,
1627 neg_log_likelihood.c_tensor,
1628 log_alpha.c_tensor,
1629 blank,
1630 if zero_infinity { 1 } else { 0 }
1631 ));
1632 Ok(Tensor { c_tensor: c_tensors[0] })
1633 }
1634
1635 pub fn f_internal_ctc_loss_out(
1636 out0: &Tensor,
1637 out1: &Tensor,
1638 log_probs: &Tensor,
1639 targets: &Tensor,
1640 input_lengths: impl IntList,
1641 target_lengths: impl IntList,
1642 blank: i64,
1643 zero_infinity: bool,
1644 ) -> Result<(Tensor, Tensor), TchError> {
1645 let mut c_tensors = [std::ptr::null_mut(); 2];
1646 unsafe_torch_err!(atg__ctc_loss_out(
1647 c_tensors.as_mut_ptr(),
1648 out0.c_tensor,
1649 out1.c_tensor,
1650 log_probs.c_tensor,
1651 targets.c_tensor,
1652 input_lengths.as_ptr(),
1653 input_lengths.len_i32(),
1654 target_lengths.as_ptr(),
1655 target_lengths.len_i32(),
1656 blank,
1657 if zero_infinity { 1 } else { 0 }
1658 ));
1659 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
1660 }
1661
1662 pub fn f_internal_ctc_loss_tensor(
1663 log_probs: &Tensor,
1664 targets: &Tensor,
1665 input_lengths: &Tensor,
1666 target_lengths: &Tensor,
1667 blank: i64,
1668 zero_infinity: bool,
1669 ) -> Result<(Tensor, Tensor), TchError> {
1670 let mut c_tensors = [std::ptr::null_mut(); 2];
1671 unsafe_torch_err!(atg__ctc_loss_tensor(
1672 c_tensors.as_mut_ptr(),
1673 log_probs.c_tensor,
1674 targets.c_tensor,
1675 input_lengths.c_tensor,
1676 target_lengths.c_tensor,
1677 blank,
1678 if zero_infinity { 1 } else { 0 }
1679 ));
1680 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
1681 }
1682
1683 pub fn f_internal_ctc_loss_tensor_out(
1684 out0: &Tensor,
1685 out1: &Tensor,
1686 log_probs: &Tensor,
1687 targets: &Tensor,
1688 input_lengths: &Tensor,
1689 target_lengths: &Tensor,
1690 blank: i64,
1691 zero_infinity: bool,
1692 ) -> Result<(Tensor, Tensor), TchError> {
1693 let mut c_tensors = [std::ptr::null_mut(); 2];
1694 unsafe_torch_err!(atg__ctc_loss_tensor_out(
1695 c_tensors.as_mut_ptr(),
1696 out0.c_tensor,
1697 out1.c_tensor,
1698 log_probs.c_tensor,
1699 targets.c_tensor,
1700 input_lengths.c_tensor,
1701 target_lengths.c_tensor,
1702 blank,
1703 if zero_infinity { 1 } else { 0 }
1704 ));
1705 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
1706 }
1707
1708 pub fn f_internal_cudnn_ctc_loss(
1709 log_probs: &Tensor,
1710 targets: &Tensor,
1711 input_lengths: impl IntList,
1712 target_lengths: impl IntList,
1713 blank: i64,
1714 deterministic: bool,
1715 zero_infinity: bool,
1716 ) -> Result<(Tensor, Tensor), TchError> {
1717 let mut c_tensors = [std::ptr::null_mut(); 2];
1718 unsafe_torch_err!(atg__cudnn_ctc_loss(
1719 c_tensors.as_mut_ptr(),
1720 log_probs.c_tensor,
1721 targets.c_tensor,
1722 input_lengths.as_ptr(),
1723 input_lengths.len_i32(),
1724 target_lengths.as_ptr(),
1725 target_lengths.len_i32(),
1726 blank,
1727 if deterministic { 1 } else { 0 },
1728 if zero_infinity { 1 } else { 0 }
1729 ));
1730 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
1731 }
1732
1733 pub fn f_internal_cudnn_ctc_loss_out(
1734 out0: &Tensor,
1735 out1: &Tensor,
1736 log_probs: &Tensor,
1737 targets: &Tensor,
1738 input_lengths: impl IntList,
1739 target_lengths: impl IntList,
1740 blank: i64,
1741 deterministic: bool,
1742 zero_infinity: bool,
1743 ) -> Result<(Tensor, Tensor), TchError> {
1744 let mut c_tensors = [std::ptr::null_mut(); 2];
1745 unsafe_torch_err!(atg__cudnn_ctc_loss_out(
1746 c_tensors.as_mut_ptr(),
1747 out0.c_tensor,
1748 out1.c_tensor,
1749 log_probs.c_tensor,
1750 targets.c_tensor,
1751 input_lengths.as_ptr(),
1752 input_lengths.len_i32(),
1753 target_lengths.as_ptr(),
1754 target_lengths.len_i32(),
1755 blank,
1756 if deterministic { 1 } else { 0 },
1757 if zero_infinity { 1 } else { 0 }
1758 ));
1759 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
1760 }
1761
1762 pub fn f_internal_cudnn_ctc_loss_tensor(
1763 log_probs: &Tensor,
1764 targets: &Tensor,
1765 input_lengths: &Tensor,
1766 target_lengths: &Tensor,
1767 blank: i64,
1768 deterministic: bool,
1769 zero_infinity: bool,
1770 ) -> Result<(Tensor, Tensor), TchError> {
1771 let mut c_tensors = [std::ptr::null_mut(); 2];
1772 unsafe_torch_err!(atg__cudnn_ctc_loss_tensor(
1773 c_tensors.as_mut_ptr(),
1774 log_probs.c_tensor,
1775 targets.c_tensor,
1776 input_lengths.c_tensor,
1777 target_lengths.c_tensor,
1778 blank,
1779 if deterministic { 1 } else { 0 },
1780 if zero_infinity { 1 } else { 0 }
1781 ));
1782 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
1783 }
1784
1785 pub fn f_internal_cudnn_init_dropout_state(
1786 dropout: f64,
1787 train: bool,
1788 dropout_seed: i64,
1789 options: (Kind, Device),
1790 ) -> Result<Tensor, TchError> {
1791 let mut c_tensors = [std::ptr::null_mut(); 1];
1792 unsafe_torch_err!(atg__cudnn_init_dropout_state(
1793 c_tensors.as_mut_ptr(),
1794 dropout,
1795 if train { 1 } else { 0 },
1796 dropout_seed,
1797 options.0.c_int(),
1798 options.1.c_int()
1799 ));
1800 Ok(Tensor { c_tensor: c_tensors[0] })
1801 }
1802
1803 pub fn f_internal_cudnn_init_dropout_state_out(
1804 out: &Tensor,
1805 dropout: f64,
1806 train: bool,
1807 dropout_seed: i64,
1808 ) -> Result<Tensor, TchError> {
1809 let mut c_tensors = [std::ptr::null_mut(); 1];
1810 unsafe_torch_err!(atg__cudnn_init_dropout_state_out(
1811 c_tensors.as_mut_ptr(),
1812 out.c_tensor,
1813 dropout,
1814 if train { 1 } else { 0 },
1815 dropout_seed
1816 ));
1817 Ok(Tensor { c_tensor: c_tensors[0] })
1818 }
1819
1820 pub fn f_internal_cudnn_rnn<T: Borrow<Tensor>>(
1821 &self,
1822 weight: &[T],
1823 weight_stride0: i64,
1824 weight_buf: Option<T>,
1825 hx: &Tensor,
1826 cx: Option<T>,
1827 mode: i64,
1828 hidden_size: i64,
1829 proj_size: i64,
1830 num_layers: i64,
1831 batch_first: bool,
1832 dropout: f64,
1833 train: bool,
1834 bidirectional: bool,
1835 batch_sizes: impl IntList,
1836 dropout_state: Option<T>,
1837 ) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor), TchError> {
1838 let mut c_tensors = [std::ptr::null_mut(); 5];
1839 unsafe_torch_err!(atg__cudnn_rnn(
1840 c_tensors.as_mut_ptr(),
1841 self.c_tensor,
1842 ptr_list(weight).as_ptr(),
1843 weight.len() as i32,
1844 weight_stride0,
1845 weight_buf.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
1846 hx.c_tensor,
1847 cx.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
1848 mode,
1849 hidden_size,
1850 proj_size,
1851 num_layers,
1852 if batch_first { 1 } else { 0 },
1853 dropout,
1854 if train { 1 } else { 0 },
1855 if bidirectional { 1 } else { 0 },
1856 batch_sizes.as_ptr(),
1857 batch_sizes.len_i32(),
1858 dropout_state.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
1859 ));
1860 Ok((
1861 Tensor { c_tensor: c_tensors[0] },
1862 Tensor { c_tensor: c_tensors[1] },
1863 Tensor { c_tensor: c_tensors[2] },
1864 Tensor { c_tensor: c_tensors[3] },
1865 Tensor { c_tensor: c_tensors[4] },
1866 ))
1867 }
1868
1869 pub fn f_internal_cudnn_rnn_flatten_weight<T: Borrow<Tensor>>(
1870 weight_arr: &[T],
1871 weight_stride0: i64,
1872 input_size: i64,
1873 mode: i64,
1874 hidden_size: i64,
1875 proj_size: i64,
1876 num_layers: i64,
1877 batch_first: bool,
1878 bidirectional: bool,
1879 ) -> Result<Tensor, TchError> {
1880 let mut c_tensors = [std::ptr::null_mut(); 1];
1881 unsafe_torch_err!(atg__cudnn_rnn_flatten_weight(
1882 c_tensors.as_mut_ptr(),
1883 ptr_list(weight_arr).as_ptr(),
1884 weight_arr.len() as i32,
1885 weight_stride0,
1886 input_size,
1887 mode,
1888 hidden_size,
1889 proj_size,
1890 num_layers,
1891 if batch_first { 1 } else { 0 },
1892 if bidirectional { 1 } else { 0 }
1893 ));
1894 Ok(Tensor { c_tensor: c_tensors[0] })
1895 }
1896
1897 pub fn f_internal_cudnn_rnn_flatten_weight_out<T: Borrow<Tensor>>(
1898 out: &Tensor,
1899 weight_arr: &[T],
1900 weight_stride0: i64,
1901 input_size: i64,
1902 mode: i64,
1903 hidden_size: i64,
1904 proj_size: i64,
1905 num_layers: i64,
1906 batch_first: bool,
1907 bidirectional: bool,
1908 ) -> Result<Tensor, TchError> {
1909 let mut c_tensors = [std::ptr::null_mut(); 1];
1910 unsafe_torch_err!(atg__cudnn_rnn_flatten_weight_out(
1911 c_tensors.as_mut_ptr(),
1912 out.c_tensor,
1913 ptr_list(weight_arr).as_ptr(),
1914 weight_arr.len() as i32,
1915 weight_stride0,
1916 input_size,
1917 mode,
1918 hidden_size,
1919 proj_size,
1920 num_layers,
1921 if batch_first { 1 } else { 0 },
1922 if bidirectional { 1 } else { 0 }
1923 ));
1924 Ok(Tensor { c_tensor: c_tensors[0] })
1925 }
1926
1927 pub fn f_internal_cudnn_rnn_out<T: Borrow<Tensor>>(
1928 &self,
1929 out0: &Tensor,
1930 out1: &Tensor,
1931 out2: &Tensor,
1932 out3: &Tensor,
1933 out4: &Tensor,
1934 weight: &[T],
1935 weight_stride0: i64,
1936 weight_buf: Option<T>,
1937 hx: &Tensor,
1938 cx: Option<T>,
1939 mode: i64,
1940 hidden_size: i64,
1941 proj_size: i64,
1942 num_layers: i64,
1943 batch_first: bool,
1944 dropout: f64,
1945 train: bool,
1946 bidirectional: bool,
1947 batch_sizes: impl IntList,
1948 dropout_state: Option<T>,
1949 ) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor), TchError> {
1950 let mut c_tensors = [std::ptr::null_mut(); 5];
1951 unsafe_torch_err!(atg__cudnn_rnn_out(
1952 c_tensors.as_mut_ptr(),
1953 out0.c_tensor,
1954 out1.c_tensor,
1955 out2.c_tensor,
1956 out3.c_tensor,
1957 out4.c_tensor,
1958 self.c_tensor,
1959 ptr_list(weight).as_ptr(),
1960 weight.len() as i32,
1961 weight_stride0,
1962 weight_buf.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
1963 hx.c_tensor,
1964 cx.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
1965 mode,
1966 hidden_size,
1967 proj_size,
1968 num_layers,
1969 if batch_first { 1 } else { 0 },
1970 dropout,
1971 if train { 1 } else { 0 },
1972 if bidirectional { 1 } else { 0 },
1973 batch_sizes.as_ptr(),
1974 batch_sizes.len_i32(),
1975 dropout_state.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
1976 ));
1977 Ok((
1978 Tensor { c_tensor: c_tensors[0] },
1979 Tensor { c_tensor: c_tensors[1] },
1980 Tensor { c_tensor: c_tensors[2] },
1981 Tensor { c_tensor: c_tensors[3] },
1982 Tensor { c_tensor: c_tensors[4] },
1983 ))
1984 }
1985
1986 pub fn f_internal_debug_has_internal_overlap(&self) -> Result<i64, TchError> {
1987 let return_;
1988 unsafe_torch_err!(return_ = atg__debug_has_internal_overlap(self.c_tensor));
1989 Ok(return_)
1990 }
1991
1992 pub fn f_internal_dim_arange(like: &Tensor, dim: i64) -> Result<Tensor, TchError> {
1993 let mut c_tensors = [std::ptr::null_mut(); 1];
1994 unsafe_torch_err!(atg__dim_arange(c_tensors.as_mut_ptr(), like.c_tensor, dim));
1995 Ok(Tensor { c_tensor: c_tensors[0] })
1996 }
1997
1998 pub fn f_internal_dimi(&self) -> Result<i64, TchError> {
1999 let return_;
2000 unsafe_torch_err!(return_ = atg__dimi(self.c_tensor));
2001 Ok(return_)
2002 }
2003
2004 pub fn f_internal_dimv(&self) -> Result<i64, TchError> {
2005 let return_;
2006 unsafe_torch_err!(return_ = atg__dimv(self.c_tensor));
2007 Ok(return_)
2008 }
2009
2010 pub fn f_internal_dirichlet_grad(
2011 x: &Tensor,
2012 alpha: &Tensor,
2013 total: &Tensor,
2014 ) -> Result<Tensor, TchError> {
2015 let mut c_tensors = [std::ptr::null_mut(); 1];
2016 unsafe_torch_err!(atg__dirichlet_grad(
2017 c_tensors.as_mut_ptr(),
2018 x.c_tensor,
2019 alpha.c_tensor,
2020 total.c_tensor
2021 ));
2022 Ok(Tensor { c_tensor: c_tensors[0] })
2023 }
2024
2025 pub fn f_internal_dirichlet_grad_out(
2026 out: &Tensor,
2027 x: &Tensor,
2028 alpha: &Tensor,
2029 total: &Tensor,
2030 ) -> Result<Tensor, TchError> {
2031 let mut c_tensors = [std::ptr::null_mut(); 1];
2032 unsafe_torch_err!(atg__dirichlet_grad_out(
2033 c_tensors.as_mut_ptr(),
2034 out.c_tensor,
2035 x.c_tensor,
2036 alpha.c_tensor,
2037 total.c_tensor
2038 ));
2039 Ok(Tensor { c_tensor: c_tensors[0] })
2040 }
2041
2042 pub fn f_internal_efficient_attention_backward<T: Borrow<Tensor>>(
2043 grad_out_: &Tensor,
2044 query: &Tensor,
2045 key: &Tensor,
2046 value: &Tensor,
2047 bias: Option<T>,
2048 out: &Tensor,
2049 cu_seqlens_q: Option<T>,
2050 cu_seqlens_k: Option<T>,
2051 max_seqlen_q: i64,
2052 max_seqlen_k: i64,
2053 logsumexp: &Tensor,
2054 dropout_p: f64,
2055 philox_seed: &Tensor,
2056 philox_offset: &Tensor,
2057 custom_mask_type: i64,
2058 bias_requires_grad: bool,
2059 scale: impl Into<Option<f64>>,
2060 num_splits_key: impl Into<Option<i64>>,
2061 window_size: impl Into<Option<i64>>,
2062 shared_storage_dqdkdv: bool,
2063 ) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
2064 let scale = scale.into();
2065 let num_splits_key = num_splits_key.into();
2066 let window_size = window_size.into();
2067 let mut c_tensors = [std::ptr::null_mut(); 4];
2068 unsafe_torch_err!(atg__efficient_attention_backward(
2069 c_tensors.as_mut_ptr(),
2070 grad_out_.c_tensor,
2071 query.c_tensor,
2072 key.c_tensor,
2073 value.c_tensor,
2074 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
2075 out.c_tensor,
2076 cu_seqlens_q.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
2077 cu_seqlens_k.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
2078 max_seqlen_q,
2079 max_seqlen_k,
2080 logsumexp.c_tensor,
2081 dropout_p,
2082 philox_seed.c_tensor,
2083 philox_offset.c_tensor,
2084 custom_mask_type,
2085 if bias_requires_grad { 1 } else { 0 },
2086 scale.unwrap_or(std::f64::NAN),
2087 scale.is_none() as i8,
2088 num_splits_key.unwrap_or(0i64),
2089 num_splits_key.is_none() as i8,
2090 window_size.unwrap_or(0i64),
2091 window_size.is_none() as i8,
2092 if shared_storage_dqdkdv { 1 } else { 0 }
2093 ));
2094 Ok((
2095 Tensor { c_tensor: c_tensors[0] },
2096 Tensor { c_tensor: c_tensors[1] },
2097 Tensor { c_tensor: c_tensors[2] },
2098 Tensor { c_tensor: c_tensors[3] },
2099 ))
2100 }
2101
2102 pub fn f_internal_efficientzerotensor(
2103 size: impl IntList,
2104 options: (Kind, Device),
2105 ) -> Result<Tensor, TchError> {
2106 let mut c_tensors = [std::ptr::null_mut(); 1];
2107 unsafe_torch_err!(atg__efficientzerotensor(
2108 c_tensors.as_mut_ptr(),
2109 size.as_ptr(),
2110 size.len_i32(),
2111 options.0.c_int(),
2112 options.1.c_int()
2113 ));
2114 Ok(Tensor { c_tensor: c_tensors[0] })
2115 }
2116
2117 pub fn f_internal_efficientzerotensor_out(
2118 out: &Tensor,
2119 size: impl IntList,
2120 ) -> Result<Tensor, TchError> {
2121 let mut c_tensors = [std::ptr::null_mut(); 1];
2122 unsafe_torch_err!(atg__efficientzerotensor_out(
2123 c_tensors.as_mut_ptr(),
2124 out.c_tensor,
2125 size.as_ptr(),
2126 size.len_i32()
2127 ));
2128 Ok(Tensor { c_tensor: c_tensors[0] })
2129 }
2130
2131 pub fn f_internal_embedding_bag<T: Borrow<Tensor>>(
2132 weight: &Tensor,
2133 indices: &Tensor,
2134 offsets: &Tensor,
2135 scale_grad_by_freq: bool,
2136 mode: i64,
2137 sparse: bool,
2138 per_sample_weights: Option<T>,
2139 include_last_offset: bool,
2140 padding_idx: i64,
2141 ) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
2142 let mut c_tensors = [std::ptr::null_mut(); 4];
2143 unsafe_torch_err!(atg__embedding_bag(
2144 c_tensors.as_mut_ptr(),
2145 weight.c_tensor,
2146 indices.c_tensor,
2147 offsets.c_tensor,
2148 if scale_grad_by_freq { 1 } else { 0 },
2149 mode,
2150 if sparse { 1 } else { 0 },
2151 per_sample_weights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
2152 if include_last_offset { 1 } else { 0 },
2153 padding_idx
2154 ));
2155 Ok((
2156 Tensor { c_tensor: c_tensors[0] },
2157 Tensor { c_tensor: c_tensors[1] },
2158 Tensor { c_tensor: c_tensors[2] },
2159 Tensor { c_tensor: c_tensors[3] },
2160 ))
2161 }
2162
2163 pub fn f_internal_embedding_bag_backward<T: Borrow<Tensor>>(
2164 grad: &Tensor,
2165 indices: &Tensor,
2166 offsets: &Tensor,
2167 offset2bag: &Tensor,
2168 bag_size: &Tensor,
2169 maximum_indices: &Tensor,
2170 num_weights: i64,
2171 scale_grad_by_freq: bool,
2172 mode: i64,
2173 sparse: bool,
2174 per_sample_weights: Option<T>,
2175 padding_idx: i64,
2176 ) -> Result<Tensor, TchError> {
2177 let mut c_tensors = [std::ptr::null_mut(); 1];
2178 unsafe_torch_err!(atg__embedding_bag_backward(
2179 c_tensors.as_mut_ptr(),
2180 grad.c_tensor,
2181 indices.c_tensor,
2182 offsets.c_tensor,
2183 offset2bag.c_tensor,
2184 bag_size.c_tensor,
2185 maximum_indices.c_tensor,
2186 num_weights,
2187 if scale_grad_by_freq { 1 } else { 0 },
2188 mode,
2189 if sparse { 1 } else { 0 },
2190 per_sample_weights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
2191 padding_idx
2192 ));
2193 Ok(Tensor { c_tensor: c_tensors[0] })
2194 }
2195
2196 pub fn f_internal_embedding_bag_dense_backward<T: Borrow<Tensor>>(
2197 grad: &Tensor,
2198 indices: &Tensor,
2199 offset2bag: &Tensor,
2200 bag_size: &Tensor,
2201 maximum_indices: &Tensor,
2202 num_weights: i64,
2203 scale_grad_by_freq: bool,
2204 mode: i64,
2205 per_sample_weights: Option<T>,
2206 padding_idx: i64,
2207 ) -> Result<Tensor, TchError> {
2208 let mut c_tensors = [std::ptr::null_mut(); 1];
2209 unsafe_torch_err!(atg__embedding_bag_dense_backward(
2210 c_tensors.as_mut_ptr(),
2211 grad.c_tensor,
2212 indices.c_tensor,
2213 offset2bag.c_tensor,
2214 bag_size.c_tensor,
2215 maximum_indices.c_tensor,
2216 num_weights,
2217 if scale_grad_by_freq { 1 } else { 0 },
2218 mode,
2219 per_sample_weights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
2220 padding_idx
2221 ));
2222 Ok(Tensor { c_tensor: c_tensors[0] })
2223 }
2224
2225 pub fn f_internal_embedding_bag_dense_backward_out<T: Borrow<Tensor>>(
2226 out: &Tensor,
2227 grad: &Tensor,
2228 indices: &Tensor,
2229 offset2bag: &Tensor,
2230 bag_size: &Tensor,
2231 maximum_indices: &Tensor,
2232 num_weights: i64,
2233 scale_grad_by_freq: bool,
2234 mode: i64,
2235 per_sample_weights: Option<T>,
2236 padding_idx: i64,
2237 ) -> Result<Tensor, TchError> {
2238 let mut c_tensors = [std::ptr::null_mut(); 1];
2239 unsafe_torch_err!(atg__embedding_bag_dense_backward_out(
2240 c_tensors.as_mut_ptr(),
2241 out.c_tensor,
2242 grad.c_tensor,
2243 indices.c_tensor,
2244 offset2bag.c_tensor,
2245 bag_size.c_tensor,
2246 maximum_indices.c_tensor,
2247 num_weights,
2248 if scale_grad_by_freq { 1 } else { 0 },
2249 mode,
2250 per_sample_weights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
2251 padding_idx
2252 ));
2253 Ok(Tensor { c_tensor: c_tensors[0] })
2254 }
2255
2256 pub fn f_internal_embedding_bag_forward_only<T: Borrow<Tensor>>(
2257 weight: &Tensor,
2258 indices: &Tensor,
2259 offsets: &Tensor,
2260 scale_grad_by_freq: bool,
2261 mode: i64,
2262 sparse: bool,
2263 per_sample_weights: Option<T>,
2264 include_last_offset: bool,
2265 padding_idx: i64,
2266 ) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
2267 let mut c_tensors = [std::ptr::null_mut(); 4];
2268 unsafe_torch_err!(atg__embedding_bag_forward_only(
2269 c_tensors.as_mut_ptr(),
2270 weight.c_tensor,
2271 indices.c_tensor,
2272 offsets.c_tensor,
2273 if scale_grad_by_freq { 1 } else { 0 },
2274 mode,
2275 if sparse { 1 } else { 0 },
2276 per_sample_weights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
2277 if include_last_offset { 1 } else { 0 },
2278 padding_idx
2279 ));
2280 Ok((
2281 Tensor { c_tensor: c_tensors[0] },
2282 Tensor { c_tensor: c_tensors[1] },
2283 Tensor { c_tensor: c_tensors[2] },
2284 Tensor { c_tensor: c_tensors[3] },
2285 ))
2286 }
2287
2288 pub fn f_internal_embedding_bag_forward_only_out<T: Borrow<Tensor>>(
2289 out0: &Tensor,
2290 out1: &Tensor,
2291 out2: &Tensor,
2292 out3: &Tensor,
2293 weight: &Tensor,
2294 indices: &Tensor,
2295 offsets: &Tensor,
2296 scale_grad_by_freq: bool,
2297 mode: i64,
2298 sparse: bool,
2299 per_sample_weights: Option<T>,
2300 include_last_offset: bool,
2301 padding_idx: i64,
2302 ) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
2303 let mut c_tensors = [std::ptr::null_mut(); 4];
2304 unsafe_torch_err!(atg__embedding_bag_forward_only_out(
2305 c_tensors.as_mut_ptr(),
2306 out0.c_tensor,
2307 out1.c_tensor,
2308 out2.c_tensor,
2309 out3.c_tensor,
2310 weight.c_tensor,
2311 indices.c_tensor,
2312 offsets.c_tensor,
2313 if scale_grad_by_freq { 1 } else { 0 },
2314 mode,
2315 if sparse { 1 } else { 0 },
2316 per_sample_weights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
2317 if include_last_offset { 1 } else { 0 },
2318 padding_idx
2319 ));
2320 Ok((
2321 Tensor { c_tensor: c_tensors[0] },
2322 Tensor { c_tensor: c_tensors[1] },
2323 Tensor { c_tensor: c_tensors[2] },
2324 Tensor { c_tensor: c_tensors[3] },
2325 ))
2326 }
2327
2328 pub fn f_internal_embedding_bag_out<T: Borrow<Tensor>>(
2329 out0: &Tensor,
2330 out1: &Tensor,
2331 out2: &Tensor,
2332 out3: &Tensor,
2333 weight: &Tensor,
2334 indices: &Tensor,
2335 offsets: &Tensor,
2336 scale_grad_by_freq: bool,
2337 mode: i64,
2338 sparse: bool,
2339 per_sample_weights: Option<T>,
2340 include_last_offset: bool,
2341 padding_idx: i64,
2342 ) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
2343 let mut c_tensors = [std::ptr::null_mut(); 4];
2344 unsafe_torch_err!(atg__embedding_bag_out(
2345 c_tensors.as_mut_ptr(),
2346 out0.c_tensor,
2347 out1.c_tensor,
2348 out2.c_tensor,
2349 out3.c_tensor,
2350 weight.c_tensor,
2351 indices.c_tensor,
2352 offsets.c_tensor,
2353 if scale_grad_by_freq { 1 } else { 0 },
2354 mode,
2355 if sparse { 1 } else { 0 },
2356 per_sample_weights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
2357 if include_last_offset { 1 } else { 0 },
2358 padding_idx
2359 ));
2360 Ok((
2361 Tensor { c_tensor: c_tensors[0] },
2362 Tensor { c_tensor: c_tensors[1] },
2363 Tensor { c_tensor: c_tensors[2] },
2364 Tensor { c_tensor: c_tensors[3] },
2365 ))
2366 }
2367
2368 pub fn f_internal_embedding_bag_per_sample_weights_backward(
2369 grad: &Tensor,
2370 weight: &Tensor,
2371 indices: &Tensor,
2372 offsets: &Tensor,
2373 offset2bag: &Tensor,
2374 mode: i64,
2375 padding_idx: i64,
2376 ) -> Result<Tensor, TchError> {
2377 let mut c_tensors = [std::ptr::null_mut(); 1];
2378 unsafe_torch_err!(atg__embedding_bag_per_sample_weights_backward(
2379 c_tensors.as_mut_ptr(),
2380 grad.c_tensor,
2381 weight.c_tensor,
2382 indices.c_tensor,
2383 offsets.c_tensor,
2384 offset2bag.c_tensor,
2385 mode,
2386 padding_idx
2387 ));
2388 Ok(Tensor { c_tensor: c_tensors[0] })
2389 }
2390
2391 pub fn f_internal_embedding_bag_per_sample_weights_backward_out(
2392 out: &Tensor,
2393 grad: &Tensor,
2394 weight: &Tensor,
2395 indices: &Tensor,
2396 offsets: &Tensor,
2397 offset2bag: &Tensor,
2398 mode: i64,
2399 padding_idx: i64,
2400 ) -> Result<Tensor, TchError> {
2401 let mut c_tensors = [std::ptr::null_mut(); 1];
2402 unsafe_torch_err!(atg__embedding_bag_per_sample_weights_backward_out(
2403 c_tensors.as_mut_ptr(),
2404 out.c_tensor,
2405 grad.c_tensor,
2406 weight.c_tensor,
2407 indices.c_tensor,
2408 offsets.c_tensor,
2409 offset2bag.c_tensor,
2410 mode,
2411 padding_idx
2412 ));
2413 Ok(Tensor { c_tensor: c_tensors[0] })
2414 }
2415
2416 pub fn f_internal_embedding_bag_sparse_backward<T: Borrow<Tensor>>(
2417 grad: &Tensor,
2418 indices: &Tensor,
2419 offsets: &Tensor,
2420 offset2bag: &Tensor,
2421 bag_size: &Tensor,
2422 num_weights: i64,
2423 scale_grad_by_freq: bool,
2424 mode: i64,
2425 per_sample_weights: Option<T>,
2426 padding_idx: i64,
2427 ) -> Result<Tensor, TchError> {
2428 let mut c_tensors = [std::ptr::null_mut(); 1];
2429 unsafe_torch_err!(atg__embedding_bag_sparse_backward(
2430 c_tensors.as_mut_ptr(),
2431 grad.c_tensor,
2432 indices.c_tensor,
2433 offsets.c_tensor,
2434 offset2bag.c_tensor,
2435 bag_size.c_tensor,
2436 num_weights,
2437 if scale_grad_by_freq { 1 } else { 0 },
2438 mode,
2439 per_sample_weights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
2440 padding_idx
2441 ));
2442 Ok(Tensor { c_tensor: c_tensors[0] })
2443 }
2444
2445 pub fn f_internal_empty_affine_quantized(
2446 size: impl IntList,
2447 options: (Kind, Device),
2448 scale: f64,
2449 zero_point: i64,
2450 ) -> Result<Tensor, TchError> {
2451 let mut c_tensors = [std::ptr::null_mut(); 1];
2452 unsafe_torch_err!(atg__empty_affine_quantized(
2453 c_tensors.as_mut_ptr(),
2454 size.as_ptr(),
2455 size.len_i32(),
2456 options.0.c_int(),
2457 options.1.c_int(),
2458 scale,
2459 zero_point
2460 ));
2461 Ok(Tensor { c_tensor: c_tensors[0] })
2462 }
2463
2464 pub fn f_internal_empty_affine_quantized_out(
2465 out: &Tensor,
2466 size: impl IntList,
2467 scale: f64,
2468 zero_point: i64,
2469 ) -> Result<Tensor, TchError> {
2470 let mut c_tensors = [std::ptr::null_mut(); 1];
2471 unsafe_torch_err!(atg__empty_affine_quantized_out(
2472 c_tensors.as_mut_ptr(),
2473 out.c_tensor,
2474 size.as_ptr(),
2475 size.len_i32(),
2476 scale,
2477 zero_point
2478 ));
2479 Ok(Tensor { c_tensor: c_tensors[0] })
2480 }
2481
2482 pub fn f_internal_empty_per_channel_affine_quantized(
2483 size: impl IntList,
2484 scales: &Tensor,
2485 zero_points: &Tensor,
2486 axis: i64,
2487 options: (Kind, Device),
2488 ) -> Result<Tensor, TchError> {
2489 let mut c_tensors = [std::ptr::null_mut(); 1];
2490 unsafe_torch_err!(atg__empty_per_channel_affine_quantized(
2491 c_tensors.as_mut_ptr(),
2492 size.as_ptr(),
2493 size.len_i32(),
2494 scales.c_tensor,
2495 zero_points.c_tensor,
2496 axis,
2497 options.0.c_int(),
2498 options.1.c_int()
2499 ));
2500 Ok(Tensor { c_tensor: c_tensors[0] })
2501 }
2502
2503 pub fn f_internal_empty_per_channel_affine_quantized_out(
2504 out: &Tensor,
2505 size: impl IntList,
2506 scales: &Tensor,
2507 zero_points: &Tensor,
2508 axis: i64,
2509 ) -> Result<Tensor, TchError> {
2510 let mut c_tensors = [std::ptr::null_mut(); 1];
2511 unsafe_torch_err!(atg__empty_per_channel_affine_quantized_out(
2512 c_tensors.as_mut_ptr(),
2513 out.c_tensor,
2514 size.as_ptr(),
2515 size.len_i32(),
2516 scales.c_tensor,
2517 zero_points.c_tensor,
2518 axis
2519 ));
2520 Ok(Tensor { c_tensor: c_tensors[0] })
2521 }
2522
2523 pub fn f_internal_euclidean_dist(x1: &Tensor, x2: &Tensor) -> Result<Tensor, TchError> {
2524 let mut c_tensors = [std::ptr::null_mut(); 1];
2525 unsafe_torch_err!(atg__euclidean_dist(c_tensors.as_mut_ptr(), x1.c_tensor, x2.c_tensor));
2526 Ok(Tensor { c_tensor: c_tensors[0] })
2527 }
2528
2529 pub fn f_internal_euclidean_dist_out(
2530 out: &Tensor,
2531 x1: &Tensor,
2532 x2: &Tensor,
2533 ) -> Result<Tensor, TchError> {
2534 let mut c_tensors = [std::ptr::null_mut(); 1];
2535 unsafe_torch_err!(atg__euclidean_dist_out(
2536 c_tensors.as_mut_ptr(),
2537 out.c_tensor,
2538 x1.c_tensor,
2539 x2.c_tensor
2540 ));
2541 Ok(Tensor { c_tensor: c_tensors[0] })
2542 }
2543
2544 pub fn f_internal_fake_quantize_learnable_per_channel_affine(
2545 &self,
2546 scale: &Tensor,
2547 zero_point: &Tensor,
2548 axis: i64,
2549 quant_min: i64,
2550 quant_max: i64,
2551 grad_factor: f64,
2552 ) -> Result<Tensor, TchError> {
2553 let mut c_tensors = [std::ptr::null_mut(); 1];
2554 unsafe_torch_err!(atg__fake_quantize_learnable_per_channel_affine(
2555 c_tensors.as_mut_ptr(),
2556 self.c_tensor,
2557 scale.c_tensor,
2558 zero_point.c_tensor,
2559 axis,
2560 quant_min,
2561 quant_max,
2562 grad_factor
2563 ));
2564 Ok(Tensor { c_tensor: c_tensors[0] })
2565 }
2566
2567 pub fn f_internal_fake_quantize_learnable_per_channel_affine_backward(
2568 &self,
2569 grad: &Tensor,
2570 scale: &Tensor,
2571 zero_point: &Tensor,
2572 axis: i64,
2573 quant_min: i64,
2574 quant_max: i64,
2575 grad_factor: f64,
2576 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
2577 let mut c_tensors = [std::ptr::null_mut(); 3];
2578 unsafe_torch_err!(atg__fake_quantize_learnable_per_channel_affine_backward(
2579 c_tensors.as_mut_ptr(),
2580 grad.c_tensor,
2581 self.c_tensor,
2582 scale.c_tensor,
2583 zero_point.c_tensor,
2584 axis,
2585 quant_min,
2586 quant_max,
2587 grad_factor
2588 ));
2589 Ok((
2590 Tensor { c_tensor: c_tensors[0] },
2591 Tensor { c_tensor: c_tensors[1] },
2592 Tensor { c_tensor: c_tensors[2] },
2593 ))
2594 }
2595
2596 pub fn f_internal_fake_quantize_learnable_per_channel_affine_out(
2597 &self,
2598 out: &Tensor,
2599 scale: &Tensor,
2600 zero_point: &Tensor,
2601 axis: i64,
2602 quant_min: i64,
2603 quant_max: i64,
2604 grad_factor: f64,
2605 ) -> Result<Tensor, TchError> {
2606 let mut c_tensors = [std::ptr::null_mut(); 1];
2607 unsafe_torch_err!(atg__fake_quantize_learnable_per_channel_affine_out(
2608 c_tensors.as_mut_ptr(),
2609 out.c_tensor,
2610 self.c_tensor,
2611 scale.c_tensor,
2612 zero_point.c_tensor,
2613 axis,
2614 quant_min,
2615 quant_max,
2616 grad_factor
2617 ));
2618 Ok(Tensor { c_tensor: c_tensors[0] })
2619 }
2620
2621 pub fn f_internal_fake_quantize_learnable_per_tensor_affine(
2622 &self,
2623 scale: &Tensor,
2624 zero_point: &Tensor,
2625 quant_min: i64,
2626 quant_max: i64,
2627 grad_factor: f64,
2628 ) -> Result<Tensor, TchError> {
2629 let mut c_tensors = [std::ptr::null_mut(); 1];
2630 unsafe_torch_err!(atg__fake_quantize_learnable_per_tensor_affine(
2631 c_tensors.as_mut_ptr(),
2632 self.c_tensor,
2633 scale.c_tensor,
2634 zero_point.c_tensor,
2635 quant_min,
2636 quant_max,
2637 grad_factor
2638 ));
2639 Ok(Tensor { c_tensor: c_tensors[0] })
2640 }
2641
2642 pub fn f_internal_fake_quantize_learnable_per_tensor_affine_backward(
2643 &self,
2644 grad: &Tensor,
2645 scale: &Tensor,
2646 zero_point: &Tensor,
2647 quant_min: i64,
2648 quant_max: i64,
2649 grad_factor: f64,
2650 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
2651 let mut c_tensors = [std::ptr::null_mut(); 3];
2652 unsafe_torch_err!(atg__fake_quantize_learnable_per_tensor_affine_backward(
2653 c_tensors.as_mut_ptr(),
2654 grad.c_tensor,
2655 self.c_tensor,
2656 scale.c_tensor,
2657 zero_point.c_tensor,
2658 quant_min,
2659 quant_max,
2660 grad_factor
2661 ));
2662 Ok((
2663 Tensor { c_tensor: c_tensors[0] },
2664 Tensor { c_tensor: c_tensors[1] },
2665 Tensor { c_tensor: c_tensors[2] },
2666 ))
2667 }
2668
2669 pub fn f_internal_fake_quantize_learnable_per_tensor_affine_out(
2670 &self,
2671 out: &Tensor,
2672 scale: &Tensor,
2673 zero_point: &Tensor,
2674 quant_min: i64,
2675 quant_max: i64,
2676 grad_factor: f64,
2677 ) -> Result<Tensor, TchError> {
2678 let mut c_tensors = [std::ptr::null_mut(); 1];
2679 unsafe_torch_err!(atg__fake_quantize_learnable_per_tensor_affine_out(
2680 c_tensors.as_mut_ptr(),
2681 out.c_tensor,
2682 self.c_tensor,
2683 scale.c_tensor,
2684 zero_point.c_tensor,
2685 quant_min,
2686 quant_max,
2687 grad_factor
2688 ));
2689 Ok(Tensor { c_tensor: c_tensors[0] })
2690 }
2691
2692 pub fn f_internal_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(
2693 &self,
2694 scale: &Tensor,
2695 zero_point: &Tensor,
2696 fake_quant_enabled: &Tensor,
2697 quant_min: i64,
2698 quant_max: i64,
2699 ) -> Result<(Tensor, Tensor), TchError> {
2700 let mut c_tensors = [std::ptr::null_mut(); 2];
2701 unsafe_torch_err!(atg__fake_quantize_per_tensor_affine_cachemask_tensor_qparams(
2702 c_tensors.as_mut_ptr(),
2703 self.c_tensor,
2704 scale.c_tensor,
2705 zero_point.c_tensor,
2706 fake_quant_enabled.c_tensor,
2707 quant_min,
2708 quant_max
2709 ));
2710 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
2711 }
2712
2713 pub fn f_internal_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(
2714 &self,
2715 out0: &Tensor,
2716 out1: &Tensor,
2717 scale: &Tensor,
2718 zero_point: &Tensor,
2719 fake_quant_enabled: &Tensor,
2720 quant_min: i64,
2721 quant_max: i64,
2722 ) -> Result<(Tensor, Tensor), TchError> {
2723 let mut c_tensors = [std::ptr::null_mut(); 2];
2724 unsafe_torch_err!(atg__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(
2725 c_tensors.as_mut_ptr(),
2726 out0.c_tensor,
2727 out1.c_tensor,
2728 self.c_tensor,
2729 scale.c_tensor,
2730 zero_point.c_tensor,
2731 fake_quant_enabled.c_tensor,
2732 quant_min,
2733 quant_max
2734 ));
2735 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
2736 }
2737
2738 pub fn f_internal_fft_c2c(
2739 &self,
2740 dim: impl IntList,
2741 normalization: i64,
2742 forward: bool,
2743 ) -> Result<Tensor, TchError> {
2744 let mut c_tensors = [std::ptr::null_mut(); 1];
2745 unsafe_torch_err!(atg__fft_c2c(
2746 c_tensors.as_mut_ptr(),
2747 self.c_tensor,
2748 dim.as_ptr(),
2749 dim.len_i32(),
2750 normalization,
2751 if forward { 1 } else { 0 }
2752 ));
2753 Ok(Tensor { c_tensor: c_tensors[0] })
2754 }
2755
2756 pub fn f_internal_fft_c2c_out(
2757 &self,
2758 out: &Tensor,
2759 dim: impl IntList,
2760 normalization: i64,
2761 forward: bool,
2762 ) -> Result<Tensor, TchError> {
2763 let mut c_tensors = [std::ptr::null_mut(); 1];
2764 unsafe_torch_err!(atg__fft_c2c_out(
2765 c_tensors.as_mut_ptr(),
2766 out.c_tensor,
2767 self.c_tensor,
2768 dim.as_ptr(),
2769 dim.len_i32(),
2770 normalization,
2771 if forward { 1 } else { 0 }
2772 ));
2773 Ok(Tensor { c_tensor: c_tensors[0] })
2774 }
2775
2776 pub fn f_internal_fft_c2r(
2777 &self,
2778 dim: impl IntList,
2779 normalization: i64,
2780 last_dim_size: i64,
2781 ) -> Result<Tensor, TchError> {
2782 let mut c_tensors = [std::ptr::null_mut(); 1];
2783 unsafe_torch_err!(atg__fft_c2r(
2784 c_tensors.as_mut_ptr(),
2785 self.c_tensor,
2786 dim.as_ptr(),
2787 dim.len_i32(),
2788 normalization,
2789 last_dim_size
2790 ));
2791 Ok(Tensor { c_tensor: c_tensors[0] })
2792 }
2793
2794 pub fn f_internal_fft_c2r_out(
2795 &self,
2796 out: &Tensor,
2797 dim: impl IntList,
2798 normalization: i64,
2799 last_dim_size: i64,
2800 ) -> Result<Tensor, TchError> {
2801 let mut c_tensors = [std::ptr::null_mut(); 1];
2802 unsafe_torch_err!(atg__fft_c2r_out(
2803 c_tensors.as_mut_ptr(),
2804 out.c_tensor,
2805 self.c_tensor,
2806 dim.as_ptr(),
2807 dim.len_i32(),
2808 normalization,
2809 last_dim_size
2810 ));
2811 Ok(Tensor { c_tensor: c_tensors[0] })
2812 }
2813
2814 pub fn f_internal_fft_r2c(
2815 &self,
2816 dim: impl IntList,
2817 normalization: i64,
2818 onesided: bool,
2819 ) -> Result<Tensor, TchError> {
2820 let mut c_tensors = [std::ptr::null_mut(); 1];
2821 unsafe_torch_err!(atg__fft_r2c(
2822 c_tensors.as_mut_ptr(),
2823 self.c_tensor,
2824 dim.as_ptr(),
2825 dim.len_i32(),
2826 normalization,
2827 if onesided { 1 } else { 0 }
2828 ));
2829 Ok(Tensor { c_tensor: c_tensors[0] })
2830 }
2831
2832 pub fn f_internal_fft_r2c_out(
2833 &self,
2834 out: &Tensor,
2835 dim: impl IntList,
2836 normalization: i64,
2837 onesided: bool,
2838 ) -> Result<Tensor, TchError> {
2839 let mut c_tensors = [std::ptr::null_mut(); 1];
2840 unsafe_torch_err!(atg__fft_r2c_out(
2841 c_tensors.as_mut_ptr(),
2842 out.c_tensor,
2843 self.c_tensor,
2844 dim.as_ptr(),
2845 dim.len_i32(),
2846 normalization,
2847 if onesided { 1 } else { 0 }
2848 ));
2849 Ok(Tensor { c_tensor: c_tensors[0] })
2850 }
2851
2852 pub fn f_internal_fill_mem_eff_dropout_mask_(
2853 &mut self,
2854 dropout_p: f64,
2855 seed: i64,
2856 offset: i64,
2857 ) -> Result<Tensor, TchError> {
2858 let mut c_tensors = [std::ptr::null_mut(); 1];
2859 unsafe_torch_err!(atg__fill_mem_eff_dropout_mask_(
2860 c_tensors.as_mut_ptr(),
2861 self.c_tensor,
2862 dropout_p,
2863 seed,
2864 offset
2865 ));
2866 Ok(Tensor { c_tensor: c_tensors[0] })
2867 }
2868
2869 pub fn f_internal_flash_attention_backward(
2870 grad_out: &Tensor,
2871 query: &Tensor,
2872 key: &Tensor,
2873 value: &Tensor,
2874 out: &Tensor,
2875 logsumexp: &Tensor,
2876 cum_seq_q: &Tensor,
2877 cum_seq_k: &Tensor,
2878 max_q: i64,
2879 max_k: i64,
2880 dropout_p: f64,
2881 is_causal: bool,
2882 philox_seed: &Tensor,
2883 philox_offset: &Tensor,
2884 scale: impl Into<Option<f64>>,
2885 window_size_left: impl Into<Option<i64>>,
2886 window_size_right: impl Into<Option<i64>>,
2887 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
2888 let scale = scale.into();
2889 let window_size_left = window_size_left.into();
2890 let window_size_right = window_size_right.into();
2891 let mut c_tensors = [std::ptr::null_mut(); 3];
2892 unsafe_torch_err!(atg__flash_attention_backward(
2893 c_tensors.as_mut_ptr(),
2894 grad_out.c_tensor,
2895 query.c_tensor,
2896 key.c_tensor,
2897 value.c_tensor,
2898 out.c_tensor,
2899 logsumexp.c_tensor,
2900 cum_seq_q.c_tensor,
2901 cum_seq_k.c_tensor,
2902 max_q,
2903 max_k,
2904 dropout_p,
2905 if is_causal { 1 } else { 0 },
2906 philox_seed.c_tensor,
2907 philox_offset.c_tensor,
2908 scale.unwrap_or(std::f64::NAN),
2909 scale.is_none() as i8,
2910 window_size_left.unwrap_or(0i64),
2911 window_size_left.is_none() as i8,
2912 window_size_right.unwrap_or(0i64),
2913 window_size_right.is_none() as i8
2914 ));
2915 Ok((
2916 Tensor { c_tensor: c_tensors[0] },
2917 Tensor { c_tensor: c_tensors[1] },
2918 Tensor { c_tensor: c_tensors[2] },
2919 ))
2920 }
2921
2922 pub fn f_internal_foobar(
2923 &self,
2924 arg1: bool,
2925 arg2: bool,
2926 arg3: bool,
2927 ) -> Result<Tensor, TchError> {
2928 let mut c_tensors = [std::ptr::null_mut(); 1];
2929 unsafe_torch_err!(atg__foobar(
2930 c_tensors.as_mut_ptr(),
2931 self.c_tensor,
2932 if arg1 { 1 } else { 0 },
2933 if arg2 { 1 } else { 0 },
2934 if arg3 { 1 } else { 0 }
2935 ));
2936 Ok(Tensor { c_tensor: c_tensors[0] })
2937 }
2938
2939 pub fn f_internal_foobar_out(
2940 &self,
2941 out: &Tensor,
2942 arg1: bool,
2943 arg2: bool,
2944 arg3: bool,
2945 ) -> Result<Tensor, TchError> {
2946 let mut c_tensors = [std::ptr::null_mut(); 1];
2947 unsafe_torch_err!(atg__foobar_out(
2948 c_tensors.as_mut_ptr(),
2949 out.c_tensor,
2950 self.c_tensor,
2951 if arg1 { 1 } else { 0 },
2952 if arg2 { 1 } else { 0 },
2953 if arg3 { 1 } else { 0 }
2954 ));
2955 Ok(Tensor { c_tensor: c_tensors[0] })
2956 }
2957
2958 pub fn f_internal_functional_assert_async(
2959 &self,
2960 assert_msg: &str,
2961 dep_token: &Tensor,
2962 ) -> Result<Tensor, TchError> {
2963 let mut c_tensors = [std::ptr::null_mut(); 1];
2964 unsafe_torch_err!(atg__functional_assert_async(
2965 c_tensors.as_mut_ptr(),
2966 self.c_tensor,
2967 assert_msg.as_ptr(),
2968 assert_msg.len() as i32,
2969 dep_token.c_tensor
2970 ));
2971 Ok(Tensor { c_tensor: c_tensors[0] })
2972 }
2973
2974 pub fn f_internal_functional_assert_scalar<S: Into<Scalar>>(
2975 self_scalar: S,
2976 assert_msg: &str,
2977 dep_token: &Tensor,
2978 ) -> Result<Tensor, TchError> {
2979 let mut c_tensors = [std::ptr::null_mut(); 1];
2980 unsafe_torch_err!(atg__functional_assert_scalar(
2981 c_tensors.as_mut_ptr(),
2982 self_scalar.into().c_scalar,
2983 assert_msg.as_ptr(),
2984 assert_msg.len() as i32,
2985 dep_token.c_tensor
2986 ));
2987 Ok(Tensor { c_tensor: c_tensors[0] })
2988 }
2989
2990 pub fn f_internal_functional_sym_constrain_range<S: Into<Scalar>>(
2991 size: S,
2992 min: impl Into<Option<i64>>,
2993 max: impl Into<Option<i64>>,
2994 dep_token: &Tensor,
2995 ) -> Result<Tensor, TchError> {
2996 let min = min.into();
2997 let max = max.into();
2998 let mut c_tensors = [std::ptr::null_mut(); 1];
2999 unsafe_torch_err!(atg__functional_sym_constrain_range(
3000 c_tensors.as_mut_ptr(),
3001 size.into().c_scalar,
3002 min.unwrap_or(0i64),
3003 min.is_none() as i8,
3004 max.unwrap_or(0i64),
3005 max.is_none() as i8,
3006 dep_token.c_tensor
3007 ));
3008 Ok(Tensor { c_tensor: c_tensors[0] })
3009 }
3010
3011 pub fn f_internal_functional_sym_constrain_range_for_size<S: Into<Scalar>>(
3012 size: S,
3013 min: impl Into<Option<i64>>,
3014 max: impl Into<Option<i64>>,
3015 dep_token: &Tensor,
3016 ) -> Result<Tensor, TchError> {
3017 let min = min.into();
3018 let max = max.into();
3019 let mut c_tensors = [std::ptr::null_mut(); 1];
3020 unsafe_torch_err!(atg__functional_sym_constrain_range_for_size(
3021 c_tensors.as_mut_ptr(),
3022 size.into().c_scalar,
3023 min.unwrap_or(0i64),
3024 min.is_none() as i8,
3025 max.unwrap_or(0i64),
3026 max.is_none() as i8,
3027 dep_token.c_tensor
3028 ));
3029 Ok(Tensor { c_tensor: c_tensors[0] })
3030 }
3031
3032 pub fn f_internal_fused_dropout(&self, p: f64) -> Result<(Tensor, Tensor), TchError> {
3033 let mut c_tensors = [std::ptr::null_mut(); 2];
3034 unsafe_torch_err!(atg__fused_dropout(c_tensors.as_mut_ptr(), self.c_tensor, p));
3035 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
3036 }
3037
3038 pub fn f_internal_fused_dropout_out(
3039 &self,
3040 out0: &Tensor,
3041 out1: &Tensor,
3042 p: f64,
3043 ) -> Result<(Tensor, Tensor), TchError> {
3044 let mut c_tensors = [std::ptr::null_mut(); 2];
3045 unsafe_torch_err!(atg__fused_dropout_out(
3046 c_tensors.as_mut_ptr(),
3047 out0.c_tensor,
3048 out1.c_tensor,
3049 self.c_tensor,
3050 p
3051 ));
3052 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
3053 }
3054
3055 pub fn f_internal_fused_moving_avg_obs_fq_helper(
3056 &self,
3057 observer_on: &Tensor,
3058 fake_quant_on: &Tensor,
3059 running_min: &Tensor,
3060 running_max: &Tensor,
3061 scale: &Tensor,
3062 zero_point: &Tensor,
3063 averaging_const: f64,
3064 quant_min: i64,
3065 quant_max: i64,
3066 ch_axis: i64,
3067 per_row_fake_quant: bool,
3068 symmetric_quant: bool,
3069 ) -> Result<(Tensor, Tensor), TchError> {
3070 let mut c_tensors = [std::ptr::null_mut(); 2];
3071 unsafe_torch_err!(atg__fused_moving_avg_obs_fq_helper(
3072 c_tensors.as_mut_ptr(),
3073 self.c_tensor,
3074 observer_on.c_tensor,
3075 fake_quant_on.c_tensor,
3076 running_min.c_tensor,
3077 running_max.c_tensor,
3078 scale.c_tensor,
3079 zero_point.c_tensor,
3080 averaging_const,
3081 quant_min,
3082 quant_max,
3083 ch_axis,
3084 if per_row_fake_quant { 1 } else { 0 },
3085 if symmetric_quant { 1 } else { 0 }
3086 ));
3087 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
3088 }
3089
3090 pub fn f_internal_fused_moving_avg_obs_fq_helper_functional(
3091 &self,
3092 observer_on: &Tensor,
3093 fake_quant_on: &Tensor,
3094 running_min: &Tensor,
3095 running_max: &Tensor,
3096 scale: &Tensor,
3097 zero_point: &Tensor,
3098 averaging_const: f64,
3099 quant_min: i64,
3100 quant_max: i64,
3101 ch_axis: i64,
3102 per_row_fake_quant: bool,
3103 symmetric_quant: bool,
3104 ) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor), TchError> {
3105 let mut c_tensors = [std::ptr::null_mut(); 6];
3106 unsafe_torch_err!(atg__fused_moving_avg_obs_fq_helper_functional(
3107 c_tensors.as_mut_ptr(),
3108 self.c_tensor,
3109 observer_on.c_tensor,
3110 fake_quant_on.c_tensor,
3111 running_min.c_tensor,
3112 running_max.c_tensor,
3113 scale.c_tensor,
3114 zero_point.c_tensor,
3115 averaging_const,
3116 quant_min,
3117 quant_max,
3118 ch_axis,
3119 if per_row_fake_quant { 1 } else { 0 },
3120 if symmetric_quant { 1 } else { 0 }
3121 ));
3122 Ok((
3123 Tensor { c_tensor: c_tensors[0] },
3124 Tensor { c_tensor: c_tensors[1] },
3125 Tensor { c_tensor: c_tensors[2] },
3126 Tensor { c_tensor: c_tensors[3] },
3127 Tensor { c_tensor: c_tensors[4] },
3128 Tensor { c_tensor: c_tensors[5] },
3129 ))
3130 }
3131
3132 pub fn f_internal_fused_moving_avg_obs_fq_helper_out(
3133 &self,
3134 out0: &Tensor,
3135 out1: &Tensor,
3136 observer_on: &Tensor,
3137 fake_quant_on: &Tensor,
3138 running_min: &Tensor,
3139 running_max: &Tensor,
3140 scale: &Tensor,
3141 zero_point: &Tensor,
3142 averaging_const: f64,
3143 quant_min: i64,
3144 quant_max: i64,
3145 ch_axis: i64,
3146 per_row_fake_quant: bool,
3147 symmetric_quant: bool,
3148 ) -> Result<(Tensor, Tensor), TchError> {
3149 let mut c_tensors = [std::ptr::null_mut(); 2];
3150 unsafe_torch_err!(atg__fused_moving_avg_obs_fq_helper_out(
3151 c_tensors.as_mut_ptr(),
3152 out0.c_tensor,
3153 out1.c_tensor,
3154 self.c_tensor,
3155 observer_on.c_tensor,
3156 fake_quant_on.c_tensor,
3157 running_min.c_tensor,
3158 running_max.c_tensor,
3159 scale.c_tensor,
3160 zero_point.c_tensor,
3161 averaging_const,
3162 quant_min,
3163 quant_max,
3164 ch_axis,
3165 if per_row_fake_quant { 1 } else { 0 },
3166 if symmetric_quant { 1 } else { 0 }
3167 ));
3168 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
3169 }
3170
3171 pub fn f_internal_fused_sdp_choice<T: Borrow<Tensor>>(
3172 query: &Tensor,
3173 key: &Tensor,
3174 value: &Tensor,
3175 attn_mask: Option<T>,
3176 dropout_p: f64,
3177 is_causal: bool,
3178 scale: impl Into<Option<f64>>,
3179 enable_gqa: bool,
3180 ) -> Result<i64, TchError> {
3181 let scale = scale.into();
3182 let return_;
3183 unsafe_torch_err!(
3184 return_ = atg__fused_sdp_choice(
3185 query.c_tensor,
3186 key.c_tensor,
3187 value.c_tensor,
3188 attn_mask.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
3189 dropout_p,
3190 if is_causal { 1 } else { 0 },
3191 scale.unwrap_or(std::f64::NAN),
3192 scale.is_none() as i8,
3193 if enable_gqa { 1 } else { 0 }
3194 )
3195 );
3196 Ok(return_)
3197 }
3198
3199 pub fn f_internal_fw_primal(&self, level: i64) -> Result<Tensor, TchError> {
3200 let mut c_tensors = [std::ptr::null_mut(); 1];
3201 unsafe_torch_err!(atg__fw_primal(c_tensors.as_mut_ptr(), self.c_tensor, level));
3202 Ok(Tensor { c_tensor: c_tensors[0] })
3203 }
3204
3205 pub fn f_internal_fw_primal_copy(&self, level: i64) -> Result<Tensor, TchError> {
3206 let mut c_tensors = [std::ptr::null_mut(); 1];
3207 unsafe_torch_err!(atg__fw_primal_copy(c_tensors.as_mut_ptr(), self.c_tensor, level));
3208 Ok(Tensor { c_tensor: c_tensors[0] })
3209 }
3210
3211 pub fn f_internal_fw_primal_copy_out(
3212 &self,
3213 out: &Tensor,
3214 level: i64,
3215 ) -> Result<Tensor, TchError> {
3216 let mut c_tensors = [std::ptr::null_mut(); 1];
3217 unsafe_torch_err!(atg__fw_primal_copy_out(
3218 c_tensors.as_mut_ptr(),
3219 out.c_tensor,
3220 self.c_tensor,
3221 level
3222 ));
3223 Ok(Tensor { c_tensor: c_tensors[0] })
3224 }
3225
3226 pub fn f_internal_gather_sparse_backward(
3227 &self,
3228 dim: i64,
3229 index: &Tensor,
3230 grad: &Tensor,
3231 ) -> Result<Tensor, TchError> {
3232 let mut c_tensors = [std::ptr::null_mut(); 1];
3233 unsafe_torch_err!(atg__gather_sparse_backward(
3234 c_tensors.as_mut_ptr(),
3235 self.c_tensor,
3236 dim,
3237 index.c_tensor,
3238 grad.c_tensor
3239 ));
3240 Ok(Tensor { c_tensor: c_tensors[0] })
3241 }
3242
3243 pub fn f_internal_grid_sampler_2d_cpu_fallback(
3244 &self,
3245 grid: &Tensor,
3246 interpolation_mode: i64,
3247 padding_mode: i64,
3248 align_corners: bool,
3249 ) -> Result<Tensor, TchError> {
3250 let mut c_tensors = [std::ptr::null_mut(); 1];
3251 unsafe_torch_err!(atg__grid_sampler_2d_cpu_fallback(
3252 c_tensors.as_mut_ptr(),
3253 self.c_tensor,
3254 grid.c_tensor,
3255 interpolation_mode,
3256 padding_mode,
3257 if align_corners { 1 } else { 0 }
3258 ));
3259 Ok(Tensor { c_tensor: c_tensors[0] })
3260 }
3261
3262 pub fn f_internal_grid_sampler_2d_cpu_fallback_backward(
3263 &self,
3264 grad_output: &Tensor,
3265 grid: &Tensor,
3266 interpolation_mode: i64,
3267 padding_mode: i64,
3268 align_corners: bool,
3269 ) -> Result<(Tensor, Tensor), TchError> {
3270 let mut c_tensors = [std::ptr::null_mut(); 2];
3271 unsafe_torch_err!(atg__grid_sampler_2d_cpu_fallback_backward(
3272 c_tensors.as_mut_ptr(),
3273 grad_output.c_tensor,
3274 self.c_tensor,
3275 grid.c_tensor,
3276 interpolation_mode,
3277 padding_mode,
3278 if align_corners { 1 } else { 0 }
3279 ));
3280 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
3281 }
3282
3283 pub fn f_internal_grid_sampler_2d_cpu_fallback_out(
3284 &self,
3285 out: &Tensor,
3286 grid: &Tensor,
3287 interpolation_mode: i64,
3288 padding_mode: i64,
3289 align_corners: bool,
3290 ) -> Result<Tensor, TchError> {
3291 let mut c_tensors = [std::ptr::null_mut(); 1];
3292 unsafe_torch_err!(atg__grid_sampler_2d_cpu_fallback_out(
3293 c_tensors.as_mut_ptr(),
3294 out.c_tensor,
3295 self.c_tensor,
3296 grid.c_tensor,
3297 interpolation_mode,
3298 padding_mode,
3299 if align_corners { 1 } else { 0 }
3300 ));
3301 Ok(Tensor { c_tensor: c_tensors[0] })
3302 }
3303
3304 pub fn f_internal_has_compatible_shallow_copy_type(
3305 &self,
3306 from: &Tensor,
3307 ) -> Result<bool, TchError> {
3308 let return_;
3309 unsafe_torch_err!(
3310 return_ = atg__has_compatible_shallow_copy_type(self.c_tensor, from.c_tensor)
3311 );
3312 Ok(return_ != 0)
3313 }
3314
3315 pub fn f_internal_has_same_storage_numel(&self, other: &Tensor) -> Result<bool, TchError> {
3316 let return_;
3317 unsafe_torch_err!(return_ = atg__has_same_storage_numel(self.c_tensor, other.c_tensor));
3318 Ok(return_ != 0)
3319 }
3320
3321 pub fn f_internal_histogramdd_bin_edges<T: Borrow<Tensor>>(
3322 &self,
3323 bins: impl IntList,
3324 range: impl DoubleList,
3325 weight: Option<T>,
3326 density: bool,
3327 ) -> Result<Vec<Tensor>, TchError> {
3328 let c_tensors = unsafe_torch_err!(atg__histogramdd_bin_edges(
3329 self.c_tensor,
3330 bins.as_ptr(),
3331 bins.len_i32(),
3332 range.as_ptr(),
3333 range.len_i32(),
3334 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
3335 if density { 1 } else { 0 }
3336 ));
3337 let mut r__ = vec![];
3338 let mut i = 0;
3339 loop {
3340 let c__ = unsafe { *c_tensors.add(i) };
3341 if c__.is_null() {
3342 break;
3343 }
3344 r__.push(Tensor { c_tensor: c__ });
3345 i += 1;
3346 }
3347 unsafe { libc::free(c_tensors as *mut libc::c_void) }
3348 Ok(r__)
3349 }
3350
3351 pub fn f_internal_histogramdd_bin_edges_out<T: Borrow<Tensor>>(
3352 &self,
3353 out: &[T],
3354 bins: impl IntList,
3355 range: impl DoubleList,
3356 weight: Option<T>,
3357 density: bool,
3358 ) -> Result<(), TchError> {
3359 unsafe_torch_err!(atg__histogramdd_bin_edges_out(
3360 ptr_list(out).as_ptr(),
3361 out.len() as i32,
3362 self.c_tensor,
3363 bins.as_ptr(),
3364 bins.len_i32(),
3365 range.as_ptr(),
3366 range.len_i32(),
3367 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
3368 if density { 1 } else { 0 }
3369 ));
3370 Ok(())
3371 }
3372
3373 pub fn f_internal_histogramdd_from_bin_cts<T: Borrow<Tensor>>(
3374 &self,
3375 bins: impl IntList,
3376 range: impl DoubleList,
3377 weight: Option<T>,
3378 density: bool,
3379 ) -> Result<Tensor, TchError> {
3380 let mut c_tensors = [std::ptr::null_mut(); 1];
3381 unsafe_torch_err!(atg__histogramdd_from_bin_cts(
3382 c_tensors.as_mut_ptr(),
3383 self.c_tensor,
3384 bins.as_ptr(),
3385 bins.len_i32(),
3386 range.as_ptr(),
3387 range.len_i32(),
3388 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
3389 if density { 1 } else { 0 }
3390 ));
3391 Ok(Tensor { c_tensor: c_tensors[0] })
3392 }
3393
3394 pub fn f_internal_histogramdd_from_bin_cts_out<T: Borrow<Tensor>>(
3395 &self,
3396 out: &Tensor,
3397 bins: impl IntList,
3398 range: impl DoubleList,
3399 weight: Option<T>,
3400 density: bool,
3401 ) -> Result<Tensor, TchError> {
3402 let mut c_tensors = [std::ptr::null_mut(); 1];
3403 unsafe_torch_err!(atg__histogramdd_from_bin_cts_out(
3404 c_tensors.as_mut_ptr(),
3405 out.c_tensor,
3406 self.c_tensor,
3407 bins.as_ptr(),
3408 bins.len_i32(),
3409 range.as_ptr(),
3410 range.len_i32(),
3411 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
3412 if density { 1 } else { 0 }
3413 ));
3414 Ok(Tensor { c_tensor: c_tensors[0] })
3415 }
3416
3417 pub fn f_internal_histogramdd_from_bin_tensors<T: Borrow<Tensor>>(
3418 &self,
3419 bins: &[T],
3420 weight: Option<T>,
3421 density: bool,
3422 ) -> Result<Tensor, TchError> {
3423 let mut c_tensors = [std::ptr::null_mut(); 1];
3424 unsafe_torch_err!(atg__histogramdd_from_bin_tensors(
3425 c_tensors.as_mut_ptr(),
3426 self.c_tensor,
3427 ptr_list(bins).as_ptr(),
3428 bins.len() as i32,
3429 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
3430 if density { 1 } else { 0 }
3431 ));
3432 Ok(Tensor { c_tensor: c_tensors[0] })
3433 }
3434
3435 pub fn f_internal_histogramdd_from_bin_tensors_out<T: Borrow<Tensor>>(
3436 &self,
3437 out: &Tensor,
3438 bins: &[T],
3439 weight: Option<T>,
3440 density: bool,
3441 ) -> Result<Tensor, TchError> {
3442 let mut c_tensors = [std::ptr::null_mut(); 1];
3443 unsafe_torch_err!(atg__histogramdd_from_bin_tensors_out(
3444 c_tensors.as_mut_ptr(),
3445 out.c_tensor,
3446 self.c_tensor,
3447 ptr_list(bins).as_ptr(),
3448 bins.len() as i32,
3449 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
3450 if density { 1 } else { 0 }
3451 ));
3452 Ok(Tensor { c_tensor: c_tensors[0] })
3453 }
3454
3455 pub fn f_internal_index_put_impl<T: Borrow<Tensor>>(
3456 &self,
3457 indices: &[Option<T>],
3458 values: &Tensor,
3459 accumulate: bool,
3460 unsafe_: bool,
3461 ) -> Result<Tensor, TchError> {
3462 let mut c_tensors = [std::ptr::null_mut(); 1];
3463 unsafe_torch_err!(atg__index_put_impl(
3464 c_tensors.as_mut_ptr(),
3465 self.c_tensor,
3466 ptr_list_opt(indices).as_ptr(),
3467 indices.len() as i32,
3468 values.c_tensor,
3469 if accumulate { 1 } else { 0 },
3470 if unsafe_ { 1 } else { 0 }
3471 ));
3472 Ok(Tensor { c_tensor: c_tensors[0] })
3473 }
3474
3475 pub fn f_internal_index_put_impl_<T: Borrow<Tensor>>(
3476 &mut self,
3477 indices: &[Option<T>],
3478 values: &Tensor,
3479 accumulate: bool,
3480 unsafe_: bool,
3481 ) -> Result<Tensor, TchError> {
3482 let mut c_tensors = [std::ptr::null_mut(); 1];
3483 unsafe_torch_err!(atg__index_put_impl_(
3484 c_tensors.as_mut_ptr(),
3485 self.c_tensor,
3486 ptr_list_opt(indices).as_ptr(),
3487 indices.len() as i32,
3488 values.c_tensor,
3489 if accumulate { 1 } else { 0 },
3490 if unsafe_ { 1 } else { 0 }
3491 ));
3492 Ok(Tensor { c_tensor: c_tensors[0] })
3493 }
3494
3495 pub fn f_internal_index_put_impl_out<T: Borrow<Tensor>>(
3496 &self,
3497 out: &Tensor,
3498 indices: &[Option<T>],
3499 values: &Tensor,
3500 accumulate: bool,
3501 unsafe_: bool,
3502 ) -> Result<Tensor, TchError> {
3503 let mut c_tensors = [std::ptr::null_mut(); 1];
3504 unsafe_torch_err!(atg__index_put_impl_out(
3505 c_tensors.as_mut_ptr(),
3506 out.c_tensor,
3507 self.c_tensor,
3508 ptr_list_opt(indices).as_ptr(),
3509 indices.len() as i32,
3510 values.c_tensor,
3511 if accumulate { 1 } else { 0 },
3512 if unsafe_ { 1 } else { 0 }
3513 ));
3514 Ok(Tensor { c_tensor: c_tensors[0] })
3515 }
3516
3517 pub fn f_internal_indices(&self) -> Result<Tensor, TchError> {
3518 let mut c_tensors = [std::ptr::null_mut(); 1];
3519 unsafe_torch_err!(atg__indices(c_tensors.as_mut_ptr(), self.c_tensor));
3520 Ok(Tensor { c_tensor: c_tensors[0] })
3521 }
3522
3523 pub fn f_internal_indices_copy(&self) -> Result<Tensor, TchError> {
3524 let mut c_tensors = [std::ptr::null_mut(); 1];
3525 unsafe_torch_err!(atg__indices_copy(c_tensors.as_mut_ptr(), self.c_tensor));
3526 Ok(Tensor { c_tensor: c_tensors[0] })
3527 }
3528
3529 pub fn f_internal_indices_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
3530 let mut c_tensors = [std::ptr::null_mut(); 1];
3531 unsafe_torch_err!(atg__indices_copy_out(
3532 c_tensors.as_mut_ptr(),
3533 out.c_tensor,
3534 self.c_tensor
3535 ));
3536 Ok(Tensor { c_tensor: c_tensors[0] })
3537 }
3538
3539 pub fn f_internal_int_mm(&self, mat2: &Tensor) -> Result<Tensor, TchError> {
3540 let mut c_tensors = [std::ptr::null_mut(); 1];
3541 unsafe_torch_err!(atg__int_mm(c_tensors.as_mut_ptr(), self.c_tensor, mat2.c_tensor));
3542 Ok(Tensor { c_tensor: c_tensors[0] })
3543 }
3544
3545 pub fn f_internal_int_mm_out(&self, out: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError> {
3546 let mut c_tensors = [std::ptr::null_mut(); 1];
3547 unsafe_torch_err!(atg__int_mm_out(
3548 c_tensors.as_mut_ptr(),
3549 out.c_tensor,
3550 self.c_tensor,
3551 mat2.c_tensor
3552 ));
3553 Ok(Tensor { c_tensor: c_tensors[0] })
3554 }
3555
3556 pub fn f_internal_is_all_true(&self) -> Result<Tensor, TchError> {
3557 let mut c_tensors = [std::ptr::null_mut(); 1];
3558 unsafe_torch_err!(atg__is_all_true(c_tensors.as_mut_ptr(), self.c_tensor));
3559 Ok(Tensor { c_tensor: c_tensors[0] })
3560 }
3561
3562 pub fn f_internal_is_any_true(&self) -> Result<Tensor, TchError> {
3563 let mut c_tensors = [std::ptr::null_mut(); 1];
3564 unsafe_torch_err!(atg__is_any_true(c_tensors.as_mut_ptr(), self.c_tensor));
3565 Ok(Tensor { c_tensor: c_tensors[0] })
3566 }
3567
3568 pub fn f_internal_is_zerotensor(&self) -> Result<bool, TchError> {
3569 let return_;
3570 unsafe_torch_err!(return_ = atg__is_zerotensor(self.c_tensor));
3571 Ok(return_ != 0)
3572 }
3573
3574 pub fn f_internal_lazy_clone(&self) -> Result<Tensor, TchError> {
3575 let mut c_tensors = [std::ptr::null_mut(); 1];
3576 unsafe_torch_err!(atg__lazy_clone(c_tensors.as_mut_ptr(), self.c_tensor));
3577 Ok(Tensor { c_tensor: c_tensors[0] })
3578 }
3579
3580 pub fn f_internal_linalg_check_errors(
3581 info: &Tensor,
3582 api_name: &str,
3583 is_matrix: bool,
3584 ) -> Result<(), TchError> {
3585 unsafe_torch_err!(atg__linalg_check_errors(
3586 info.c_tensor,
3587 api_name.as_ptr(),
3588 api_name.len() as i32,
3589 if is_matrix { 1 } else { 0 }
3590 ));
3591 Ok(())
3592 }
3593
3594 pub fn f_internal_linalg_det(a: &Tensor) -> Result<(Tensor, Tensor, Tensor), TchError> {
3595 let mut c_tensors = [std::ptr::null_mut(); 3];
3596 unsafe_torch_err!(atg__linalg_det(c_tensors.as_mut_ptr(), a.c_tensor));
3597 Ok((
3598 Tensor { c_tensor: c_tensors[0] },
3599 Tensor { c_tensor: c_tensors[1] },
3600 Tensor { c_tensor: c_tensors[2] },
3601 ))
3602 }
3603
3604 pub fn f_internal_linalg_det_result(
3605 result: &Tensor,
3606 lu: &Tensor,
3607 pivots: &Tensor,
3608 a: &Tensor,
3609 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
3610 let mut c_tensors = [std::ptr::null_mut(); 3];
3611 unsafe_torch_err!(atg__linalg_det_result(
3612 c_tensors.as_mut_ptr(),
3613 result.c_tensor,
3614 lu.c_tensor,
3615 pivots.c_tensor,
3616 a.c_tensor
3617 ));
3618 Ok((
3619 Tensor { c_tensor: c_tensors[0] },
3620 Tensor { c_tensor: c_tensors[1] },
3621 Tensor { c_tensor: c_tensors[2] },
3622 ))
3623 }
3624
3625 pub fn f_internal_linalg_eigh(
3626 a: &Tensor,
3627 uplo: &str,
3628 compute_v: bool,
3629 ) -> Result<(Tensor, Tensor), TchError> {
3630 let mut c_tensors = [std::ptr::null_mut(); 2];
3631 unsafe_torch_err!(atg__linalg_eigh(
3632 c_tensors.as_mut_ptr(),
3633 a.c_tensor,
3634 uplo.as_ptr(),
3635 uplo.len() as i32,
3636 if compute_v { 1 } else { 0 }
3637 ));
3638 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
3639 }
3640
3641 pub fn f_internal_linalg_eigh_eigenvalues(
3642 eigenvalues: &Tensor,
3643 eigenvectors: &Tensor,
3644 a: &Tensor,
3645 uplo: &str,
3646 compute_v: bool,
3647 ) -> Result<(Tensor, Tensor), TchError> {
3648 let mut c_tensors = [std::ptr::null_mut(); 2];
3649 unsafe_torch_err!(atg__linalg_eigh_eigenvalues(
3650 c_tensors.as_mut_ptr(),
3651 eigenvalues.c_tensor,
3652 eigenvectors.c_tensor,
3653 a.c_tensor,
3654 uplo.as_ptr(),
3655 uplo.len() as i32,
3656 if compute_v { 1 } else { 0 }
3657 ));
3658 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
3659 }
3660
3661 pub fn f_internal_linalg_eigvals(&self) -> Result<Tensor, TchError> {
3662 let mut c_tensors = [std::ptr::null_mut(); 1];
3663 unsafe_torch_err!(atg__linalg_eigvals(c_tensors.as_mut_ptr(), self.c_tensor));
3664 Ok(Tensor { c_tensor: c_tensors[0] })
3665 }
3666
3667 pub fn f_internal_linalg_slogdet(
3668 a: &Tensor,
3669 ) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
3670 let mut c_tensors = [std::ptr::null_mut(); 4];
3671 unsafe_torch_err!(atg__linalg_slogdet(c_tensors.as_mut_ptr(), a.c_tensor));
3672 Ok((
3673 Tensor { c_tensor: c_tensors[0] },
3674 Tensor { c_tensor: c_tensors[1] },
3675 Tensor { c_tensor: c_tensors[2] },
3676 Tensor { c_tensor: c_tensors[3] },
3677 ))
3678 }
3679
3680 pub fn f_internal_linalg_slogdet_sign(
3681 sign: &Tensor,
3682 logabsdet: &Tensor,
3683 lu: &Tensor,
3684 pivots: &Tensor,
3685 a: &Tensor,
3686 ) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
3687 let mut c_tensors = [std::ptr::null_mut(); 4];
3688 unsafe_torch_err!(atg__linalg_slogdet_sign(
3689 c_tensors.as_mut_ptr(),
3690 sign.c_tensor,
3691 logabsdet.c_tensor,
3692 lu.c_tensor,
3693 pivots.c_tensor,
3694 a.c_tensor
3695 ));
3696 Ok((
3697 Tensor { c_tensor: c_tensors[0] },
3698 Tensor { c_tensor: c_tensors[1] },
3699 Tensor { c_tensor: c_tensors[2] },
3700 Tensor { c_tensor: c_tensors[3] },
3701 ))
3702 }
3703
3704 pub fn f_internal_linalg_solve_ex(
3705 a: &Tensor,
3706 b: &Tensor,
3707 left: bool,
3708 check_errors: bool,
3709 ) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
3710 let mut c_tensors = [std::ptr::null_mut(); 4];
3711 unsafe_torch_err!(atg__linalg_solve_ex(
3712 c_tensors.as_mut_ptr(),
3713 a.c_tensor,
3714 b.c_tensor,
3715 if left { 1 } else { 0 },
3716 if check_errors { 1 } else { 0 }
3717 ));
3718 Ok((
3719 Tensor { c_tensor: c_tensors[0] },
3720 Tensor { c_tensor: c_tensors[1] },
3721 Tensor { c_tensor: c_tensors[2] },
3722 Tensor { c_tensor: c_tensors[3] },
3723 ))
3724 }
3725
3726 pub fn f_internal_linalg_solve_ex_result(
3727 result: &Tensor,
3728 lu: &Tensor,
3729 pivots: &Tensor,
3730 info: &Tensor,
3731 a: &Tensor,
3732 b: &Tensor,
3733 left: bool,
3734 check_errors: bool,
3735 ) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
3736 let mut c_tensors = [std::ptr::null_mut(); 4];
3737 unsafe_torch_err!(atg__linalg_solve_ex_result(
3738 c_tensors.as_mut_ptr(),
3739 result.c_tensor,
3740 lu.c_tensor,
3741 pivots.c_tensor,
3742 info.c_tensor,
3743 a.c_tensor,
3744 b.c_tensor,
3745 if left { 1 } else { 0 },
3746 if check_errors { 1 } else { 0 }
3747 ));
3748 Ok((
3749 Tensor { c_tensor: c_tensors[0] },
3750 Tensor { c_tensor: c_tensors[1] },
3751 Tensor { c_tensor: c_tensors[2] },
3752 Tensor { c_tensor: c_tensors[3] },
3753 ))
3754 }
3755
3756 pub fn f_internal_linalg_svd(
3757 a: &Tensor,
3758 full_matrices: bool,
3759 compute_uv: bool,
3760 driver: &str,
3761 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
3762 let mut c_tensors = [std::ptr::null_mut(); 3];
3763 unsafe_torch_err!(atg__linalg_svd(
3764 c_tensors.as_mut_ptr(),
3765 a.c_tensor,
3766 if full_matrices { 1 } else { 0 },
3767 if compute_uv { 1 } else { 0 },
3768 driver.as_ptr(),
3769 driver.len() as i32
3770 ));
3771 Ok((
3772 Tensor { c_tensor: c_tensors[0] },
3773 Tensor { c_tensor: c_tensors[1] },
3774 Tensor { c_tensor: c_tensors[2] },
3775 ))
3776 }
3777
3778 pub fn f_internal_linalg_svd_u(
3779 u: &Tensor,
3780 s: &Tensor,
3781 vh: &Tensor,
3782 a: &Tensor,
3783 full_matrices: bool,
3784 compute_uv: bool,
3785 driver: &str,
3786 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
3787 let mut c_tensors = [std::ptr::null_mut(); 3];
3788 unsafe_torch_err!(atg__linalg_svd_u(
3789 c_tensors.as_mut_ptr(),
3790 u.c_tensor,
3791 s.c_tensor,
3792 vh.c_tensor,
3793 a.c_tensor,
3794 if full_matrices { 1 } else { 0 },
3795 if compute_uv { 1 } else { 0 },
3796 driver.as_ptr(),
3797 driver.len() as i32
3798 ));
3799 Ok((
3800 Tensor { c_tensor: c_tensors[0] },
3801 Tensor { c_tensor: c_tensors[1] },
3802 Tensor { c_tensor: c_tensors[2] },
3803 ))
3804 }
3805
3806 pub fn f_internal_log_softmax(
3807 &self,
3808 dim: i64,
3809 half_to_float: bool,
3810 ) -> Result<Tensor, TchError> {
3811 let mut c_tensors = [std::ptr::null_mut(); 1];
3812 unsafe_torch_err!(atg__log_softmax(
3813 c_tensors.as_mut_ptr(),
3814 self.c_tensor,
3815 dim,
3816 if half_to_float { 1 } else { 0 }
3817 ));
3818 Ok(Tensor { c_tensor: c_tensors[0] })
3819 }
3820
3821 pub fn f_internal_log_softmax_backward_data(
3822 grad_output: &Tensor,
3823 output: &Tensor,
3824 dim: i64,
3825 input_dtype: Kind,
3826 ) -> Result<Tensor, TchError> {
3827 let mut c_tensors = [std::ptr::null_mut(); 1];
3828 unsafe_torch_err!(atg__log_softmax_backward_data(
3829 c_tensors.as_mut_ptr(),
3830 grad_output.c_tensor,
3831 output.c_tensor,
3832 dim,
3833 input_dtype.c_int()
3834 ));
3835 Ok(Tensor { c_tensor: c_tensors[0] })
3836 }
3837
3838 pub fn f_internal_log_softmax_backward_data_out(
3839 out: &Tensor,
3840 grad_output: &Tensor,
3841 output: &Tensor,
3842 dim: i64,
3843 input_dtype: Kind,
3844 ) -> Result<Tensor, TchError> {
3845 let mut c_tensors = [std::ptr::null_mut(); 1];
3846 unsafe_torch_err!(atg__log_softmax_backward_data_out(
3847 c_tensors.as_mut_ptr(),
3848 out.c_tensor,
3849 grad_output.c_tensor,
3850 output.c_tensor,
3851 dim,
3852 input_dtype.c_int()
3853 ));
3854 Ok(Tensor { c_tensor: c_tensors[0] })
3855 }
3856
3857 pub fn f_internal_log_softmax_out(
3858 &self,
3859 out: &Tensor,
3860 dim: i64,
3861 half_to_float: bool,
3862 ) -> Result<Tensor, TchError> {
3863 let mut c_tensors = [std::ptr::null_mut(); 1];
3864 unsafe_torch_err!(atg__log_softmax_out(
3865 c_tensors.as_mut_ptr(),
3866 out.c_tensor,
3867 self.c_tensor,
3868 dim,
3869 if half_to_float { 1 } else { 0 }
3870 ));
3871 Ok(Tensor { c_tensor: c_tensors[0] })
3872 }
3873
3874 pub fn f_internal_logcumsumexp(&self, dim: i64) -> Result<Tensor, TchError> {
3875 let mut c_tensors = [std::ptr::null_mut(); 1];
3876 unsafe_torch_err!(atg__logcumsumexp(c_tensors.as_mut_ptr(), self.c_tensor, dim));
3877 Ok(Tensor { c_tensor: c_tensors[0] })
3878 }
3879
3880 pub fn f_internal_logcumsumexp_out(&self, out: &Tensor, dim: i64) -> Result<Tensor, TchError> {
3881 let mut c_tensors = [std::ptr::null_mut(); 1];
3882 unsafe_torch_err!(atg__logcumsumexp_out(
3883 c_tensors.as_mut_ptr(),
3884 out.c_tensor,
3885 self.c_tensor,
3886 dim
3887 ));
3888 Ok(Tensor { c_tensor: c_tensors[0] })
3889 }
3890
3891 pub fn f_internal_lstm_mps<T: Borrow<Tensor>>(
3892 &self,
3893 hx: &[T],
3894 params: &[T],
3895 has_biases: bool,
3896 num_layers: i64,
3897 dropout: f64,
3898 train: bool,
3899 bidirectional: bool,
3900 batch_first: bool,
3901 ) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor), TchError> {
3902 let mut c_tensors = [std::ptr::null_mut(); 6];
3903 unsafe_torch_err!(atg__lstm_mps(
3904 c_tensors.as_mut_ptr(),
3905 self.c_tensor,
3906 ptr_list(hx).as_ptr(),
3907 hx.len() as i32,
3908 ptr_list(params).as_ptr(),
3909 params.len() as i32,
3910 if has_biases { 1 } else { 0 },
3911 num_layers,
3912 dropout,
3913 if train { 1 } else { 0 },
3914 if bidirectional { 1 } else { 0 },
3915 if batch_first { 1 } else { 0 }
3916 ));
3917 Ok((
3918 Tensor { c_tensor: c_tensors[0] },
3919 Tensor { c_tensor: c_tensors[1] },
3920 Tensor { c_tensor: c_tensors[2] },
3921 Tensor { c_tensor: c_tensors[3] },
3922 Tensor { c_tensor: c_tensors[4] },
3923 Tensor { c_tensor: c_tensors[5] },
3924 ))
3925 }
3926
3927 pub fn f_internal_lstm_mps_out<T: Borrow<Tensor>>(
3928 &self,
3929 out0: &Tensor,
3930 out1: &Tensor,
3931 out2: &Tensor,
3932 out3: &Tensor,
3933 out4: &Tensor,
3934 out5: &Tensor,
3935 hx: &[T],
3936 params: &[T],
3937 has_biases: bool,
3938 num_layers: i64,
3939 dropout: f64,
3940 train: bool,
3941 bidirectional: bool,
3942 batch_first: bool,
3943 ) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor), TchError> {
3944 let mut c_tensors = [std::ptr::null_mut(); 6];
3945 unsafe_torch_err!(atg__lstm_mps_out(
3946 c_tensors.as_mut_ptr(),
3947 out0.c_tensor,
3948 out1.c_tensor,
3949 out2.c_tensor,
3950 out3.c_tensor,
3951 out4.c_tensor,
3952 out5.c_tensor,
3953 self.c_tensor,
3954 ptr_list(hx).as_ptr(),
3955 hx.len() as i32,
3956 ptr_list(params).as_ptr(),
3957 params.len() as i32,
3958 if has_biases { 1 } else { 0 },
3959 num_layers,
3960 dropout,
3961 if train { 1 } else { 0 },
3962 if bidirectional { 1 } else { 0 },
3963 if batch_first { 1 } else { 0 }
3964 ));
3965 Ok((
3966 Tensor { c_tensor: c_tensors[0] },
3967 Tensor { c_tensor: c_tensors[1] },
3968 Tensor { c_tensor: c_tensors[2] },
3969 Tensor { c_tensor: c_tensors[3] },
3970 Tensor { c_tensor: c_tensors[4] },
3971 Tensor { c_tensor: c_tensors[5] },
3972 ))
3973 }
3974
3975 pub fn f_internal_lu_with_info(
3976 &self,
3977 pivot: bool,
3978 check_errors: bool,
3979 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
3980 let mut c_tensors = [std::ptr::null_mut(); 3];
3981 unsafe_torch_err!(atg__lu_with_info(
3982 c_tensors.as_mut_ptr(),
3983 self.c_tensor,
3984 if pivot { 1 } else { 0 },
3985 if check_errors { 1 } else { 0 }
3986 ));
3987 Ok((
3988 Tensor { c_tensor: c_tensors[0] },
3989 Tensor { c_tensor: c_tensors[1] },
3990 Tensor { c_tensor: c_tensors[2] },
3991 ))
3992 }
3993
3994 pub fn f_internal_make_dep_token(options: (Kind, Device)) -> Result<Tensor, TchError> {
3995 let mut c_tensors = [std::ptr::null_mut(); 1];
3996 unsafe_torch_err!(atg__make_dep_token(
3997 c_tensors.as_mut_ptr(),
3998 options.0.c_int(),
3999 options.1.c_int()
4000 ));
4001 Ok(Tensor { c_tensor: c_tensors[0] })
4002 }
4003
4004 pub fn f_internal_make_dual(
4005 primal: &Tensor,
4006 tangent: &Tensor,
4007 level: i64,
4008 ) -> Result<Tensor, TchError> {
4009 let mut c_tensors = [std::ptr::null_mut(); 1];
4010 unsafe_torch_err!(atg__make_dual(
4011 c_tensors.as_mut_ptr(),
4012 primal.c_tensor,
4013 tangent.c_tensor,
4014 level
4015 ));
4016 Ok(Tensor { c_tensor: c_tensors[0] })
4017 }
4018
4019 pub fn f_internal_make_dual_copy(
4020 primal: &Tensor,
4021 tangent: &Tensor,
4022 level: i64,
4023 ) -> Result<Tensor, TchError> {
4024 let mut c_tensors = [std::ptr::null_mut(); 1];
4025 unsafe_torch_err!(atg__make_dual_copy(
4026 c_tensors.as_mut_ptr(),
4027 primal.c_tensor,
4028 tangent.c_tensor,
4029 level
4030 ));
4031 Ok(Tensor { c_tensor: c_tensors[0] })
4032 }
4033
4034 pub fn f_internal_make_dual_copy_out(
4035 out: &Tensor,
4036 primal: &Tensor,
4037 tangent: &Tensor,
4038 level: i64,
4039 ) -> Result<Tensor, TchError> {
4040 let mut c_tensors = [std::ptr::null_mut(); 1];
4041 unsafe_torch_err!(atg__make_dual_copy_out(
4042 c_tensors.as_mut_ptr(),
4043 out.c_tensor,
4044 primal.c_tensor,
4045 tangent.c_tensor,
4046 level
4047 ));
4048 Ok(Tensor { c_tensor: c_tensors[0] })
4049 }
4050
4051 pub fn f_internal_make_per_channel_quantized_tensor(
4052 &self,
4053 scale: &Tensor,
4054 zero_point: &Tensor,
4055 axis: i64,
4056 ) -> Result<Tensor, TchError> {
4057 let mut c_tensors = [std::ptr::null_mut(); 1];
4058 unsafe_torch_err!(atg__make_per_channel_quantized_tensor(
4059 c_tensors.as_mut_ptr(),
4060 self.c_tensor,
4061 scale.c_tensor,
4062 zero_point.c_tensor,
4063 axis
4064 ));
4065 Ok(Tensor { c_tensor: c_tensors[0] })
4066 }
4067
4068 pub fn f_internal_make_per_channel_quantized_tensor_out(
4069 &self,
4070 out: &Tensor,
4071 scale: &Tensor,
4072 zero_point: &Tensor,
4073 axis: i64,
4074 ) -> Result<Tensor, TchError> {
4075 let mut c_tensors = [std::ptr::null_mut(); 1];
4076 unsafe_torch_err!(atg__make_per_channel_quantized_tensor_out(
4077 c_tensors.as_mut_ptr(),
4078 out.c_tensor,
4079 self.c_tensor,
4080 scale.c_tensor,
4081 zero_point.c_tensor,
4082 axis
4083 ));
4084 Ok(Tensor { c_tensor: c_tensors[0] })
4085 }
4086
4087 pub fn f_internal_make_per_tensor_quantized_tensor(
4088 &self,
4089 scale: f64,
4090 zero_point: i64,
4091 ) -> Result<Tensor, TchError> {
4092 let mut c_tensors = [std::ptr::null_mut(); 1];
4093 unsafe_torch_err!(atg__make_per_tensor_quantized_tensor(
4094 c_tensors.as_mut_ptr(),
4095 self.c_tensor,
4096 scale,
4097 zero_point
4098 ));
4099 Ok(Tensor { c_tensor: c_tensors[0] })
4100 }
4101
4102 pub fn f_internal_make_per_tensor_quantized_tensor_out(
4103 &self,
4104 out: &Tensor,
4105 scale: f64,
4106 zero_point: i64,
4107 ) -> Result<Tensor, TchError> {
4108 let mut c_tensors = [std::ptr::null_mut(); 1];
4109 unsafe_torch_err!(atg__make_per_tensor_quantized_tensor_out(
4110 c_tensors.as_mut_ptr(),
4111 out.c_tensor,
4112 self.c_tensor,
4113 scale,
4114 zero_point
4115 ));
4116 Ok(Tensor { c_tensor: c_tensors[0] })
4117 }
4118
4119 pub fn f_internal_masked_scale(&self, mask: &Tensor, scale: f64) -> Result<Tensor, TchError> {
4120 let mut c_tensors = [std::ptr::null_mut(); 1];
4121 unsafe_torch_err!(atg__masked_scale(
4122 c_tensors.as_mut_ptr(),
4123 self.c_tensor,
4124 mask.c_tensor,
4125 scale
4126 ));
4127 Ok(Tensor { c_tensor: c_tensors[0] })
4128 }
4129
4130 pub fn f_internal_masked_scale_out(
4131 &self,
4132 out: &Tensor,
4133 mask: &Tensor,
4134 scale: f64,
4135 ) -> Result<Tensor, TchError> {
4136 let mut c_tensors = [std::ptr::null_mut(); 1];
4137 unsafe_torch_err!(atg__masked_scale_out(
4138 c_tensors.as_mut_ptr(),
4139 out.c_tensor,
4140 self.c_tensor,
4141 mask.c_tensor,
4142 scale
4143 ));
4144 Ok(Tensor { c_tensor: c_tensors[0] })
4145 }
4146
4147 pub fn f_internal_masked_softmax(
4148 &self,
4149 mask: &Tensor,
4150 dim: impl Into<Option<i64>>,
4151 mask_type: impl Into<Option<i64>>,
4152 ) -> Result<Tensor, TchError> {
4153 let dim = dim.into();
4154 let mask_type = mask_type.into();
4155 let mut c_tensors = [std::ptr::null_mut(); 1];
4156 unsafe_torch_err!(atg__masked_softmax(
4157 c_tensors.as_mut_ptr(),
4158 self.c_tensor,
4159 mask.c_tensor,
4160 dim.unwrap_or(0i64),
4161 dim.is_none() as i8,
4162 mask_type.unwrap_or(0i64),
4163 mask_type.is_none() as i8
4164 ));
4165 Ok(Tensor { c_tensor: c_tensors[0] })
4166 }
4167
4168 pub fn f_internal_masked_softmax_backward(
4169 grad_output: &Tensor,
4170 output: &Tensor,
4171 mask: &Tensor,
4172 dim: impl Into<Option<i64>>,
4173 ) -> Result<Tensor, TchError> {
4174 let dim = dim.into();
4175 let mut c_tensors = [std::ptr::null_mut(); 1];
4176 unsafe_torch_err!(atg__masked_softmax_backward(
4177 c_tensors.as_mut_ptr(),
4178 grad_output.c_tensor,
4179 output.c_tensor,
4180 mask.c_tensor,
4181 dim.unwrap_or(0i64),
4182 dim.is_none() as i8
4183 ));
4184 Ok(Tensor { c_tensor: c_tensors[0] })
4185 }
4186
4187 pub fn f_internal_masked_softmax_backward_out(
4188 out: &Tensor,
4189 grad_output: &Tensor,
4190 output: &Tensor,
4191 mask: &Tensor,
4192 dim: impl Into<Option<i64>>,
4193 ) -> Result<Tensor, TchError> {
4194 let dim = dim.into();
4195 let mut c_tensors = [std::ptr::null_mut(); 1];
4196 unsafe_torch_err!(atg__masked_softmax_backward_out(
4197 c_tensors.as_mut_ptr(),
4198 out.c_tensor,
4199 grad_output.c_tensor,
4200 output.c_tensor,
4201 mask.c_tensor,
4202 dim.unwrap_or(0i64),
4203 dim.is_none() as i8
4204 ));
4205 Ok(Tensor { c_tensor: c_tensors[0] })
4206 }
4207
4208 pub fn f_internal_masked_softmax_out(
4209 &self,
4210 out: &Tensor,
4211 mask: &Tensor,
4212 dim: impl Into<Option<i64>>,
4213 mask_type: impl Into<Option<i64>>,
4214 ) -> Result<Tensor, TchError> {
4215 let dim = dim.into();
4216 let mask_type = mask_type.into();
4217 let mut c_tensors = [std::ptr::null_mut(); 1];
4218 unsafe_torch_err!(atg__masked_softmax_out(
4219 c_tensors.as_mut_ptr(),
4220 out.c_tensor,
4221 self.c_tensor,
4222 mask.c_tensor,
4223 dim.unwrap_or(0i64),
4224 dim.is_none() as i8,
4225 mask_type.unwrap_or(0i64),
4226 mask_type.is_none() as i8
4227 ));
4228 Ok(Tensor { c_tensor: c_tensors[0] })
4229 }
4230
4231 pub fn f_internal_mixed_dtypes_linear<T: Borrow<Tensor>>(
4232 &self,
4233 weight: &Tensor,
4234 scale: &Tensor,
4235 bias: Option<T>,
4236 activation: &str,
4237 ) -> Result<Tensor, TchError> {
4238 let mut c_tensors = [std::ptr::null_mut(); 1];
4239 unsafe_torch_err!(atg__mixed_dtypes_linear(
4240 c_tensors.as_mut_ptr(),
4241 self.c_tensor,
4242 weight.c_tensor,
4243 scale.c_tensor,
4244 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
4245 activation.as_ptr(),
4246 activation.len() as i32
4247 ));
4248 Ok(Tensor { c_tensor: c_tensors[0] })
4249 }
4250
4251 pub fn f_internal_mkldnn_reshape(&self, shape: impl IntList) -> Result<Tensor, TchError> {
4252 let mut c_tensors = [std::ptr::null_mut(); 1];
4253 unsafe_torch_err!(atg__mkldnn_reshape(
4254 c_tensors.as_mut_ptr(),
4255 self.c_tensor,
4256 shape.as_ptr(),
4257 shape.len_i32()
4258 ));
4259 Ok(Tensor { c_tensor: c_tensors[0] })
4260 }
4261
4262 pub fn f_internal_mkldnn_reshape_out(
4263 &self,
4264 out: &Tensor,
4265 shape: impl IntList,
4266 ) -> Result<Tensor, TchError> {
4267 let mut c_tensors = [std::ptr::null_mut(); 1];
4268 unsafe_torch_err!(atg__mkldnn_reshape_out(
4269 c_tensors.as_mut_ptr(),
4270 out.c_tensor,
4271 self.c_tensor,
4272 shape.as_ptr(),
4273 shape.len_i32()
4274 ));
4275 Ok(Tensor { c_tensor: c_tensors[0] })
4276 }
4277
4278 pub fn f_internal_mkldnn_transpose(&self, dim0: i64, dim1: i64) -> Result<Tensor, TchError> {
4279 let mut c_tensors = [std::ptr::null_mut(); 1];
4280 unsafe_torch_err!(atg__mkldnn_transpose(c_tensors.as_mut_ptr(), self.c_tensor, dim0, dim1));
4281 Ok(Tensor { c_tensor: c_tensors[0] })
4282 }
4283
4284 pub fn f_internal_mkldnn_transpose_(
4285 &mut self,
4286 dim0: i64,
4287 dim1: i64,
4288 ) -> Result<Tensor, TchError> {
4289 let mut c_tensors = [std::ptr::null_mut(); 1];
4290 unsafe_torch_err!(atg__mkldnn_transpose_(
4291 c_tensors.as_mut_ptr(),
4292 self.c_tensor,
4293 dim0,
4294 dim1
4295 ));
4296 Ok(Tensor { c_tensor: c_tensors[0] })
4297 }
4298
4299 pub fn f_internal_mkldnn_transpose_out(
4300 &self,
4301 out: &Tensor,
4302 dim0: i64,
4303 dim1: i64,
4304 ) -> Result<Tensor, TchError> {
4305 let mut c_tensors = [std::ptr::null_mut(); 1];
4306 unsafe_torch_err!(atg__mkldnn_transpose_out(
4307 c_tensors.as_mut_ptr(),
4308 out.c_tensor,
4309 self.c_tensor,
4310 dim0,
4311 dim1
4312 ));
4313 Ok(Tensor { c_tensor: c_tensors[0] })
4314 }
4315
4316 pub fn f_internal_mps_convolution<T: Borrow<Tensor>>(
4317 &self,
4318 weight: &Tensor,
4319 bias: Option<T>,
4320 padding: impl IntList,
4321 stride: impl IntList,
4322 dilation: impl IntList,
4323 groups: i64,
4324 ) -> Result<Tensor, TchError> {
4325 let mut c_tensors = [std::ptr::null_mut(); 1];
4326 unsafe_torch_err!(atg__mps_convolution(
4327 c_tensors.as_mut_ptr(),
4328 self.c_tensor,
4329 weight.c_tensor,
4330 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
4331 padding.as_ptr(),
4332 padding.len_i32(),
4333 stride.as_ptr(),
4334 stride.len_i32(),
4335 dilation.as_ptr(),
4336 dilation.len_i32(),
4337 groups
4338 ));
4339 Ok(Tensor { c_tensor: c_tensors[0] })
4340 }
4341
4342 pub fn f_internal_mps_convolution_out<T: Borrow<Tensor>>(
4343 &self,
4344 out: &Tensor,
4345 weight: &Tensor,
4346 bias: Option<T>,
4347 padding: impl IntList,
4348 stride: impl IntList,
4349 dilation: impl IntList,
4350 groups: i64,
4351 ) -> Result<Tensor, TchError> {
4352 let mut c_tensors = [std::ptr::null_mut(); 1];
4353 unsafe_torch_err!(atg__mps_convolution_out(
4354 c_tensors.as_mut_ptr(),
4355 out.c_tensor,
4356 self.c_tensor,
4357 weight.c_tensor,
4358 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
4359 padding.as_ptr(),
4360 padding.len_i32(),
4361 stride.as_ptr(),
4362 stride.len_i32(),
4363 dilation.as_ptr(),
4364 dilation.len_i32(),
4365 groups
4366 ));
4367 Ok(Tensor { c_tensor: c_tensors[0] })
4368 }
4369
4370 pub fn f_internal_mps_convolution_transpose(
4371 &self,
4372 weight: &Tensor,
4373 padding: impl IntList,
4374 output_padding: impl IntList,
4375 stride: impl IntList,
4376 dilation: impl IntList,
4377 groups: i64,
4378 ) -> Result<Tensor, TchError> {
4379 let mut c_tensors = [std::ptr::null_mut(); 1];
4380 unsafe_torch_err!(atg__mps_convolution_transpose(
4381 c_tensors.as_mut_ptr(),
4382 self.c_tensor,
4383 weight.c_tensor,
4384 padding.as_ptr(),
4385 padding.len_i32(),
4386 output_padding.as_ptr(),
4387 output_padding.len_i32(),
4388 stride.as_ptr(),
4389 stride.len_i32(),
4390 dilation.as_ptr(),
4391 dilation.len_i32(),
4392 groups
4393 ));
4394 Ok(Tensor { c_tensor: c_tensors[0] })
4395 }
4396
4397 pub fn f_internal_mps_convolution_transpose_out(
4398 &self,
4399 out: &Tensor,
4400 weight: &Tensor,
4401 padding: impl IntList,
4402 output_padding: impl IntList,
4403 stride: impl IntList,
4404 dilation: impl IntList,
4405 groups: i64,
4406 ) -> Result<Tensor, TchError> {
4407 let mut c_tensors = [std::ptr::null_mut(); 1];
4408 unsafe_torch_err!(atg__mps_convolution_transpose_out(
4409 c_tensors.as_mut_ptr(),
4410 out.c_tensor,
4411 self.c_tensor,
4412 weight.c_tensor,
4413 padding.as_ptr(),
4414 padding.len_i32(),
4415 output_padding.as_ptr(),
4416 output_padding.len_i32(),
4417 stride.as_ptr(),
4418 stride.len_i32(),
4419 dilation.as_ptr(),
4420 dilation.len_i32(),
4421 groups
4422 ));
4423 Ok(Tensor { c_tensor: c_tensors[0] })
4424 }
4425
4426 pub fn f_internal_native_batch_norm_legit<T: Borrow<Tensor>>(
4427 &self,
4428 weight: Option<T>,
4429 bias: Option<T>,
4430 running_mean: &Tensor,
4431 running_var: &Tensor,
4432 training: bool,
4433 momentum: f64,
4434 eps: f64,
4435 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
4436 let mut c_tensors = [std::ptr::null_mut(); 3];
4437 unsafe_torch_err!(atg__native_batch_norm_legit(
4438 c_tensors.as_mut_ptr(),
4439 self.c_tensor,
4440 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
4441 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
4442 running_mean.c_tensor,
4443 running_var.c_tensor,
4444 if training { 1 } else { 0 },
4445 momentum,
4446 eps
4447 ));
4448 Ok((
4449 Tensor { c_tensor: c_tensors[0] },
4450 Tensor { c_tensor: c_tensors[1] },
4451 Tensor { c_tensor: c_tensors[2] },
4452 ))
4453 }
4454
4455 pub fn f_internal_native_batch_norm_legit_functional<T: Borrow<Tensor>>(
4456 &self,
4457 weight: Option<T>,
4458 bias: Option<T>,
4459 running_mean: &Tensor,
4460 running_var: &Tensor,
4461 training: bool,
4462 momentum: f64,
4463 eps: f64,
4464 ) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor), TchError> {
4465 let mut c_tensors = [std::ptr::null_mut(); 5];
4466 unsafe_torch_err!(atg__native_batch_norm_legit_functional(
4467 c_tensors.as_mut_ptr(),
4468 self.c_tensor,
4469 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
4470 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
4471 running_mean.c_tensor,
4472 running_var.c_tensor,
4473 if training { 1 } else { 0 },
4474 momentum,
4475 eps
4476 ));
4477 Ok((
4478 Tensor { c_tensor: c_tensors[0] },
4479 Tensor { c_tensor: c_tensors[1] },
4480 Tensor { c_tensor: c_tensors[2] },
4481 Tensor { c_tensor: c_tensors[3] },
4482 Tensor { c_tensor: c_tensors[4] },
4483 ))
4484 }
4485
4486 pub fn f_internal_native_batch_norm_legit_no_stats<T: Borrow<Tensor>>(
4487 &self,
4488 weight: Option<T>,
4489 bias: Option<T>,
4490 training: bool,
4491 momentum: f64,
4492 eps: f64,
4493 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
4494 let mut c_tensors = [std::ptr::null_mut(); 3];
4495 unsafe_torch_err!(atg__native_batch_norm_legit_no_stats(
4496 c_tensors.as_mut_ptr(),
4497 self.c_tensor,
4498 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
4499 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
4500 if training { 1 } else { 0 },
4501 momentum,
4502 eps
4503 ));
4504 Ok((
4505 Tensor { c_tensor: c_tensors[0] },
4506 Tensor { c_tensor: c_tensors[1] },
4507 Tensor { c_tensor: c_tensors[2] },
4508 ))
4509 }
4510
4511 pub fn f_internal_native_batch_norm_legit_no_stats_out<T: Borrow<Tensor>>(
4512 &self,
4513 out: &Tensor,
4514 save_mean: &Tensor,
4515 save_invstd: &Tensor,
4516 weight: Option<T>,
4517 bias: Option<T>,
4518 training: bool,
4519 momentum: f64,
4520 eps: f64,
4521 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
4522 let mut c_tensors = [std::ptr::null_mut(); 3];
4523 unsafe_torch_err!(atg__native_batch_norm_legit_no_stats_out(
4524 c_tensors.as_mut_ptr(),
4525 out.c_tensor,
4526 save_mean.c_tensor,
4527 save_invstd.c_tensor,
4528 self.c_tensor,
4529 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
4530 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
4531 if training { 1 } else { 0 },
4532 momentum,
4533 eps
4534 ));
4535 Ok((
4536 Tensor { c_tensor: c_tensors[0] },
4537 Tensor { c_tensor: c_tensors[1] },
4538 Tensor { c_tensor: c_tensors[2] },
4539 ))
4540 }
4541
4542 pub fn f_internal_native_batch_norm_legit_no_training<T: Borrow<Tensor>>(
4543 &self,
4544 weight: Option<T>,
4545 bias: Option<T>,
4546 running_mean: &Tensor,
4547 running_var: &Tensor,
4548 momentum: f64,
4549 eps: f64,
4550 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
4551 let mut c_tensors = [std::ptr::null_mut(); 3];
4552 unsafe_torch_err!(atg__native_batch_norm_legit_no_training(
4553 c_tensors.as_mut_ptr(),
4554 self.c_tensor,
4555 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
4556 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
4557 running_mean.c_tensor,
4558 running_var.c_tensor,
4559 momentum,
4560 eps
4561 ));
4562 Ok((
4563 Tensor { c_tensor: c_tensors[0] },
4564 Tensor { c_tensor: c_tensors[1] },
4565 Tensor { c_tensor: c_tensors[2] },
4566 ))
4567 }
4568
4569 pub fn f_internal_native_batch_norm_legit_no_training_out<T: Borrow<Tensor>>(
4570 &self,
4571 out0: &Tensor,
4572 out1: &Tensor,
4573 out2: &Tensor,
4574 weight: Option<T>,
4575 bias: Option<T>,
4576 running_mean: &Tensor,
4577 running_var: &Tensor,
4578 momentum: f64,
4579 eps: f64,
4580 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
4581 let mut c_tensors = [std::ptr::null_mut(); 3];
4582 unsafe_torch_err!(atg__native_batch_norm_legit_no_training_out(
4583 c_tensors.as_mut_ptr(),
4584 out0.c_tensor,
4585 out1.c_tensor,
4586 out2.c_tensor,
4587 self.c_tensor,
4588 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
4589 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
4590 running_mean.c_tensor,
4591 running_var.c_tensor,
4592 momentum,
4593 eps
4594 ));
4595 Ok((
4596 Tensor { c_tensor: c_tensors[0] },
4597 Tensor { c_tensor: c_tensors[1] },
4598 Tensor { c_tensor: c_tensors[2] },
4599 ))
4600 }
4601
4602 pub fn f_internal_native_batch_norm_legit_out<T: Borrow<Tensor>>(
4603 &self,
4604 out: &Tensor,
4605 save_mean: &Tensor,
4606 save_invstd: &Tensor,
4607 weight: Option<T>,
4608 bias: Option<T>,
4609 running_mean: &Tensor,
4610 running_var: &Tensor,
4611 training: bool,
4612 momentum: f64,
4613 eps: f64,
4614 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
4615 let mut c_tensors = [std::ptr::null_mut(); 3];
4616 unsafe_torch_err!(atg__native_batch_norm_legit_out(
4617 c_tensors.as_mut_ptr(),
4618 out.c_tensor,
4619 save_mean.c_tensor,
4620 save_invstd.c_tensor,
4621 self.c_tensor,
4622 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
4623 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
4624 running_mean.c_tensor,
4625 running_var.c_tensor,
4626 if training { 1 } else { 0 },
4627 momentum,
4628 eps
4629 ));
4630 Ok((
4631 Tensor { c_tensor: c_tensors[0] },
4632 Tensor { c_tensor: c_tensors[1] },
4633 Tensor { c_tensor: c_tensors[2] },
4634 ))
4635 }
4636
4637 pub fn f_internal_native_multi_head_attention<T: Borrow<Tensor>>(
4638 query: &Tensor,
4639 key: &Tensor,
4640 value: &Tensor,
4641 embed_dim: i64,
4642 num_head: i64,
4643 qkv_weight: &Tensor,
4644 qkv_bias: &Tensor,
4645 proj_weight: &Tensor,
4646 proj_bias: &Tensor,
4647 mask: Option<T>,
4648 need_weights: bool,
4649 average_attn_weights: bool,
4650 mask_type: impl Into<Option<i64>>,
4651 ) -> Result<(Tensor, Tensor), TchError> {
4652 let mask_type = mask_type.into();
4653 let mut c_tensors = [std::ptr::null_mut(); 2];
4654 unsafe_torch_err!(atg__native_multi_head_attention(
4655 c_tensors.as_mut_ptr(),
4656 query.c_tensor,
4657 key.c_tensor,
4658 value.c_tensor,
4659 embed_dim,
4660 num_head,
4661 qkv_weight.c_tensor,
4662 qkv_bias.c_tensor,
4663 proj_weight.c_tensor,
4664 proj_bias.c_tensor,
4665 mask.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
4666 if need_weights { 1 } else { 0 },
4667 if average_attn_weights { 1 } else { 0 },
4668 mask_type.unwrap_or(0i64),
4669 mask_type.is_none() as i8
4670 ));
4671 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
4672 }
4673
4674 pub fn f_internal_native_multi_head_attention_out<T: Borrow<Tensor>>(
4675 out0: &Tensor,
4676 out1: &Tensor,
4677 query: &Tensor,
4678 key: &Tensor,
4679 value: &Tensor,
4680 embed_dim: i64,
4681 num_head: i64,
4682 qkv_weight: &Tensor,
4683 qkv_bias: &Tensor,
4684 proj_weight: &Tensor,
4685 proj_bias: &Tensor,
4686 mask: Option<T>,
4687 need_weights: bool,
4688 average_attn_weights: bool,
4689 mask_type: impl Into<Option<i64>>,
4690 ) -> Result<(Tensor, Tensor), TchError> {
4691 let mask_type = mask_type.into();
4692 let mut c_tensors = [std::ptr::null_mut(); 2];
4693 unsafe_torch_err!(atg__native_multi_head_attention_out(
4694 c_tensors.as_mut_ptr(),
4695 out0.c_tensor,
4696 out1.c_tensor,
4697 query.c_tensor,
4698 key.c_tensor,
4699 value.c_tensor,
4700 embed_dim,
4701 num_head,
4702 qkv_weight.c_tensor,
4703 qkv_bias.c_tensor,
4704 proj_weight.c_tensor,
4705 proj_bias.c_tensor,
4706 mask.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
4707 if need_weights { 1 } else { 0 },
4708 if average_attn_weights { 1 } else { 0 },
4709 mask_type.unwrap_or(0i64),
4710 mask_type.is_none() as i8
4711 ));
4712 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
4713 }
4714
4715 pub fn f_internal_neg_view(&self) -> Result<Tensor, TchError> {
4716 let mut c_tensors = [std::ptr::null_mut(); 1];
4717 unsafe_torch_err!(atg__neg_view(c_tensors.as_mut_ptr(), self.c_tensor));
4718 Ok(Tensor { c_tensor: c_tensors[0] })
4719 }
4720
4721 pub fn f_internal_neg_view_copy(&self) -> Result<Tensor, TchError> {
4722 let mut c_tensors = [std::ptr::null_mut(); 1];
4723 unsafe_torch_err!(atg__neg_view_copy(c_tensors.as_mut_ptr(), self.c_tensor));
4724 Ok(Tensor { c_tensor: c_tensors[0] })
4725 }
4726
4727 pub fn f_internal_neg_view_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
4728 let mut c_tensors = [std::ptr::null_mut(); 1];
4729 unsafe_torch_err!(atg__neg_view_copy_out(
4730 c_tensors.as_mut_ptr(),
4731 out.c_tensor,
4732 self.c_tensor
4733 ));
4734 Ok(Tensor { c_tensor: c_tensors[0] })
4735 }
4736
4737 pub fn f_internal_nested_compute_contiguous_strides_offsets(
4738 nested_size: &Tensor,
4739 ) -> Result<(Tensor, Tensor), TchError> {
4740 let mut c_tensors = [std::ptr::null_mut(); 2];
4741 unsafe_torch_err!(atg__nested_compute_contiguous_strides_offsets(
4742 c_tensors.as_mut_ptr(),
4743 nested_size.c_tensor
4744 ));
4745 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
4746 }
4747
4748 pub fn f_internal_nested_from_padded(
4749 padded: &Tensor,
4750 cpu_nested_shape_example: &Tensor,
4751 fuse_transform_0213: bool,
4752 ) -> Result<Tensor, TchError> {
4753 let mut c_tensors = [std::ptr::null_mut(); 1];
4754 unsafe_torch_err!(atg__nested_from_padded(
4755 c_tensors.as_mut_ptr(),
4756 padded.c_tensor,
4757 cpu_nested_shape_example.c_tensor,
4758 if fuse_transform_0213 { 1 } else { 0 }
4759 ));
4760 Ok(Tensor { c_tensor: c_tensors[0] })
4761 }
4762
4763 pub fn f_internal_nested_from_padded_and_nested_example(
4764 padded: &Tensor,
4765 nt_example: &Tensor,
4766 ) -> Result<Tensor, TchError> {
4767 let mut c_tensors = [std::ptr::null_mut(); 1];
4768 unsafe_torch_err!(atg__nested_from_padded_and_nested_example(
4769 c_tensors.as_mut_ptr(),
4770 padded.c_tensor,
4771 nt_example.c_tensor
4772 ));
4773 Ok(Tensor { c_tensor: c_tensors[0] })
4774 }
4775
4776 pub fn f_internal_nested_from_padded_and_nested_example_out(
4777 out: &Tensor,
4778 padded: &Tensor,
4779 nt_example: &Tensor,
4780 ) -> Result<Tensor, TchError> {
4781 let mut c_tensors = [std::ptr::null_mut(); 1];
4782 unsafe_torch_err!(atg__nested_from_padded_and_nested_example_out(
4783 c_tensors.as_mut_ptr(),
4784 out.c_tensor,
4785 padded.c_tensor,
4786 nt_example.c_tensor
4787 ));
4788 Ok(Tensor { c_tensor: c_tensors[0] })
4789 }
4790
4791 pub fn f_internal_nested_from_padded_out(
4792 out: &Tensor,
4793 padded: &Tensor,
4794 cpu_nested_shape_example: &Tensor,
4795 fuse_transform_0213: bool,
4796 ) -> Result<Tensor, TchError> {
4797 let mut c_tensors = [std::ptr::null_mut(); 1];
4798 unsafe_torch_err!(atg__nested_from_padded_out(
4799 c_tensors.as_mut_ptr(),
4800 out.c_tensor,
4801 padded.c_tensor,
4802 cpu_nested_shape_example.c_tensor,
4803 if fuse_transform_0213 { 1 } else { 0 }
4804 ));
4805 Ok(Tensor { c_tensor: c_tensors[0] })
4806 }
4807
4808 pub fn f_internal_nested_get_jagged_dummy(any: &Tensor) -> Result<Tensor, TchError> {
4809 let mut c_tensors = [std::ptr::null_mut(); 1];
4810 unsafe_torch_err!(atg__nested_get_jagged_dummy(c_tensors.as_mut_ptr(), any.c_tensor));
4811 Ok(Tensor { c_tensor: c_tensors[0] })
4812 }
4813
4814 pub fn f_internal_nested_get_lengths(&self) -> Result<Tensor, TchError> {
4815 let mut c_tensors = [std::ptr::null_mut(); 1];
4816 unsafe_torch_err!(atg__nested_get_lengths(c_tensors.as_mut_ptr(), self.c_tensor));
4817 Ok(Tensor { c_tensor: c_tensors[0] })
4818 }
4819
4820 pub fn f_internal_nested_get_max_seqlen(&self) -> Result<Tensor, TchError> {
4821 let mut c_tensors = [std::ptr::null_mut(); 1];
4822 unsafe_torch_err!(atg__nested_get_max_seqlen(c_tensors.as_mut_ptr(), self.c_tensor));
4823 Ok(Tensor { c_tensor: c_tensors[0] })
4824 }
4825
4826 pub fn f_internal_nested_get_min_seqlen(&self) -> Result<Tensor, TchError> {
4827 let mut c_tensors = [std::ptr::null_mut(); 1];
4828 unsafe_torch_err!(atg__nested_get_min_seqlen(c_tensors.as_mut_ptr(), self.c_tensor));
4829 Ok(Tensor { c_tensor: c_tensors[0] })
4830 }
4831
4832 pub fn f_internal_nested_get_offsets(&self) -> Result<Tensor, TchError> {
4833 let mut c_tensors = [std::ptr::null_mut(); 1];
4834 unsafe_torch_err!(atg__nested_get_offsets(c_tensors.as_mut_ptr(), self.c_tensor));
4835 Ok(Tensor { c_tensor: c_tensors[0] })
4836 }
4837
4838 pub fn f_internal_nested_get_ragged_idx(&self) -> Result<i64, TchError> {
4839 let return_;
4840 unsafe_torch_err!(return_ = atg__nested_get_ragged_idx(self.c_tensor));
4841 Ok(return_)
4842 }
4843
4844 pub fn f_internal_nested_get_values(&self) -> Result<Tensor, TchError> {
4845 let mut c_tensors = [std::ptr::null_mut(); 1];
4846 unsafe_torch_err!(atg__nested_get_values(c_tensors.as_mut_ptr(), self.c_tensor));
4847 Ok(Tensor { c_tensor: c_tensors[0] })
4848 }
4849
4850 pub fn f_internal_nested_get_values_copy(&self) -> Result<Tensor, TchError> {
4851 let mut c_tensors = [std::ptr::null_mut(); 1];
4852 unsafe_torch_err!(atg__nested_get_values_copy(c_tensors.as_mut_ptr(), self.c_tensor));
4853 Ok(Tensor { c_tensor: c_tensors[0] })
4854 }
4855
4856 pub fn f_internal_nested_get_values_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
4857 let mut c_tensors = [std::ptr::null_mut(); 1];
4858 unsafe_torch_err!(atg__nested_get_values_copy_out(
4859 c_tensors.as_mut_ptr(),
4860 out.c_tensor,
4861 self.c_tensor
4862 ));
4863 Ok(Tensor { c_tensor: c_tensors[0] })
4864 }
4865
4866 pub fn f_internal_nested_select_backward(
4867 &self,
4868 grad_output: &Tensor,
4869 dim: i64,
4870 index: i64,
4871 ) -> Result<Tensor, TchError> {
4872 let mut c_tensors = [std::ptr::null_mut(); 1];
4873 unsafe_torch_err!(atg__nested_select_backward(
4874 c_tensors.as_mut_ptr(),
4875 grad_output.c_tensor,
4876 self.c_tensor,
4877 dim,
4878 index
4879 ));
4880 Ok(Tensor { c_tensor: c_tensors[0] })
4881 }
4882
4883 pub fn f_internal_nested_sum_backward(
4884 &self,
4885 grad: &Tensor,
4886 dim: impl IntListOption,
4887 keepdim: bool,
4888 ) -> Result<Tensor, TchError> {
4889 let mut c_tensors = [std::ptr::null_mut(); 1];
4890 unsafe_torch_err!(atg__nested_sum_backward(
4891 c_tensors.as_mut_ptr(),
4892 grad.c_tensor,
4893 self.c_tensor,
4894 dim.as_ptr(),
4895 dim.len_i32(),
4896 if keepdim { 1 } else { 0 }
4897 ));
4898 Ok(Tensor { c_tensor: c_tensors[0] })
4899 }
4900
4901 pub fn f_internal_nested_view_from_buffer(
4902 &self,
4903 nested_size: &Tensor,
4904 nested_strides: &Tensor,
4905 offsets: &Tensor,
4906 ) -> Result<Tensor, TchError> {
4907 let mut c_tensors = [std::ptr::null_mut(); 1];
4908 unsafe_torch_err!(atg__nested_view_from_buffer(
4909 c_tensors.as_mut_ptr(),
4910 self.c_tensor,
4911 nested_size.c_tensor,
4912 nested_strides.c_tensor,
4913 offsets.c_tensor
4914 ));
4915 Ok(Tensor { c_tensor: c_tensors[0] })
4916 }
4917
4918 pub fn f_internal_nested_view_from_buffer_copy(
4919 &self,
4920 nested_size: &Tensor,
4921 nested_strides: &Tensor,
4922 offsets: &Tensor,
4923 ) -> Result<Tensor, TchError> {
4924 let mut c_tensors = [std::ptr::null_mut(); 1];
4925 unsafe_torch_err!(atg__nested_view_from_buffer_copy(
4926 c_tensors.as_mut_ptr(),
4927 self.c_tensor,
4928 nested_size.c_tensor,
4929 nested_strides.c_tensor,
4930 offsets.c_tensor
4931 ));
4932 Ok(Tensor { c_tensor: c_tensors[0] })
4933 }
4934
4935 pub fn f_internal_nested_view_from_buffer_copy_out(
4936 &self,
4937 out: &Tensor,
4938 nested_size: &Tensor,
4939 nested_strides: &Tensor,
4940 offsets: &Tensor,
4941 ) -> Result<Tensor, TchError> {
4942 let mut c_tensors = [std::ptr::null_mut(); 1];
4943 unsafe_torch_err!(atg__nested_view_from_buffer_copy_out(
4944 c_tensors.as_mut_ptr(),
4945 out.c_tensor,
4946 self.c_tensor,
4947 nested_size.c_tensor,
4948 nested_strides.c_tensor,
4949 offsets.c_tensor
4950 ));
4951 Ok(Tensor { c_tensor: c_tensors[0] })
4952 }
4953
4954 pub fn f_internal_nested_view_from_jagged<T: Borrow<Tensor>>(
4955 &self,
4956 offsets: &Tensor,
4957 dummy: &Tensor,
4958 lengths: Option<T>,
4959 ragged_idx: i64,
4960 min_seqlen: Option<T>,
4961 max_seqlen: Option<T>,
4962 ) -> Result<Tensor, TchError> {
4963 let mut c_tensors = [std::ptr::null_mut(); 1];
4964 unsafe_torch_err!(atg__nested_view_from_jagged(
4965 c_tensors.as_mut_ptr(),
4966 self.c_tensor,
4967 offsets.c_tensor,
4968 dummy.c_tensor,
4969 lengths.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
4970 ragged_idx,
4971 min_seqlen.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
4972 max_seqlen.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
4973 ));
4974 Ok(Tensor { c_tensor: c_tensors[0] })
4975 }
4976
4977 pub fn f_internal_nested_view_from_jagged_copy<T: Borrow<Tensor>>(
4978 &self,
4979 offsets: &Tensor,
4980 dummy: &Tensor,
4981 lengths: Option<T>,
4982 ragged_idx: i64,
4983 min_seqlen: Option<T>,
4984 max_seqlen: Option<T>,
4985 ) -> Result<Tensor, TchError> {
4986 let mut c_tensors = [std::ptr::null_mut(); 1];
4987 unsafe_torch_err!(atg__nested_view_from_jagged_copy(
4988 c_tensors.as_mut_ptr(),
4989 self.c_tensor,
4990 offsets.c_tensor,
4991 dummy.c_tensor,
4992 lengths.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
4993 ragged_idx,
4994 min_seqlen.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
4995 max_seqlen.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
4996 ));
4997 Ok(Tensor { c_tensor: c_tensors[0] })
4998 }
4999
5000 pub fn f_internal_nested_view_from_jagged_copy_out<T: Borrow<Tensor>>(
5001 &self,
5002 out: &Tensor,
5003 offsets: &Tensor,
5004 dummy: &Tensor,
5005 lengths: Option<T>,
5006 ragged_idx: i64,
5007 min_seqlen: Option<T>,
5008 max_seqlen: Option<T>,
5009 ) -> Result<Tensor, TchError> {
5010 let mut c_tensors = [std::ptr::null_mut(); 1];
5011 unsafe_torch_err!(atg__nested_view_from_jagged_copy_out(
5012 c_tensors.as_mut_ptr(),
5013 out.c_tensor,
5014 self.c_tensor,
5015 offsets.c_tensor,
5016 dummy.c_tensor,
5017 lengths.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
5018 ragged_idx,
5019 min_seqlen.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
5020 max_seqlen.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
5021 ));
5022 Ok(Tensor { c_tensor: c_tensors[0] })
5023 }
5024
5025 pub fn f_internal_new_zeros_with_same_feature_meta(
5026 &self,
5027 other: &Tensor,
5028 self_num_batch_dims: i64,
5029 ) -> Result<Tensor, TchError> {
5030 let mut c_tensors = [std::ptr::null_mut(); 1];
5031 unsafe_torch_err!(atg__new_zeros_with_same_feature_meta(
5032 c_tensors.as_mut_ptr(),
5033 self.c_tensor,
5034 other.c_tensor,
5035 self_num_batch_dims
5036 ));
5037 Ok(Tensor { c_tensor: c_tensors[0] })
5038 }
5039
5040 pub fn f_internal_new_zeros_with_same_feature_meta_out(
5041 &self,
5042 out: &Tensor,
5043 other: &Tensor,
5044 self_num_batch_dims: i64,
5045 ) -> Result<Tensor, TchError> {
5046 let mut c_tensors = [std::ptr::null_mut(); 1];
5047 unsafe_torch_err!(atg__new_zeros_with_same_feature_meta_out(
5048 c_tensors.as_mut_ptr(),
5049 out.c_tensor,
5050 self.c_tensor,
5051 other.c_tensor,
5052 self_num_batch_dims
5053 ));
5054 Ok(Tensor { c_tensor: c_tensors[0] })
5055 }
5056
5057 pub fn f_internal_nnpack_available() -> Result<bool, TchError> {
5058 let return_;
5059 unsafe_torch_err!(return_ = atg__nnpack_available());
5060 Ok(return_ != 0)
5061 }
5062
5063 pub fn f_internal_nnpack_spatial_convolution<T: Borrow<Tensor>>(
5064 &self,
5065 weight: &Tensor,
5066 bias: Option<T>,
5067 padding: impl IntList,
5068 stride: impl IntList,
5069 ) -> Result<Tensor, TchError> {
5070 let mut c_tensors = [std::ptr::null_mut(); 1];
5071 unsafe_torch_err!(atg__nnpack_spatial_convolution(
5072 c_tensors.as_mut_ptr(),
5073 self.c_tensor,
5074 weight.c_tensor,
5075 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
5076 padding.as_ptr(),
5077 padding.len_i32(),
5078 stride.as_ptr(),
5079 stride.len_i32()
5080 ));
5081 Ok(Tensor { c_tensor: c_tensors[0] })
5082 }
5083
5084 pub fn f_internal_nnpack_spatial_convolution_out<T: Borrow<Tensor>>(
5085 &self,
5086 out: &Tensor,
5087 weight: &Tensor,
5088 bias: Option<T>,
5089 padding: impl IntList,
5090 stride: impl IntList,
5091 ) -> Result<Tensor, TchError> {
5092 let mut c_tensors = [std::ptr::null_mut(); 1];
5093 unsafe_torch_err!(atg__nnpack_spatial_convolution_out(
5094 c_tensors.as_mut_ptr(),
5095 out.c_tensor,
5096 self.c_tensor,
5097 weight.c_tensor,
5098 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
5099 padding.as_ptr(),
5100 padding.len_i32(),
5101 stride.as_ptr(),
5102 stride.len_i32()
5103 ));
5104 Ok(Tensor { c_tensor: c_tensors[0] })
5105 }
5106
5107 pub fn f_internal_nnz(&self) -> Result<i64, TchError> {
5108 let return_;
5109 unsafe_torch_err!(return_ = atg__nnz(self.c_tensor));
5110 Ok(return_)
5111 }
5112
5113 pub fn f_internal_pack_padded_sequence(
5114 &self,
5115 lengths: &Tensor,
5116 batch_first: bool,
5117 ) -> Result<(Tensor, Tensor), TchError> {
5118 let mut c_tensors = [std::ptr::null_mut(); 2];
5119 unsafe_torch_err!(atg__pack_padded_sequence(
5120 c_tensors.as_mut_ptr(),
5121 self.c_tensor,
5122 lengths.c_tensor,
5123 if batch_first { 1 } else { 0 }
5124 ));
5125 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
5126 }
5127
5128 pub fn f_internal_pack_padded_sequence_backward(
5129 grad: &Tensor,
5130 input_size: impl IntList,
5131 batch_sizes: &Tensor,
5132 batch_first: bool,
5133 ) -> Result<Tensor, TchError> {
5134 let mut c_tensors = [std::ptr::null_mut(); 1];
5135 unsafe_torch_err!(atg__pack_padded_sequence_backward(
5136 c_tensors.as_mut_ptr(),
5137 grad.c_tensor,
5138 input_size.as_ptr(),
5139 input_size.len_i32(),
5140 batch_sizes.c_tensor,
5141 if batch_first { 1 } else { 0 }
5142 ));
5143 Ok(Tensor { c_tensor: c_tensors[0] })
5144 }
5145
5146 pub fn f_internal_pack_padded_sequence_out(
5147 &self,
5148 out0: &Tensor,
5149 out1: &Tensor,
5150 lengths: &Tensor,
5151 batch_first: bool,
5152 ) -> Result<(Tensor, Tensor), TchError> {
5153 let mut c_tensors = [std::ptr::null_mut(); 2];
5154 unsafe_torch_err!(atg__pack_padded_sequence_out(
5155 c_tensors.as_mut_ptr(),
5156 out0.c_tensor,
5157 out1.c_tensor,
5158 self.c_tensor,
5159 lengths.c_tensor,
5160 if batch_first { 1 } else { 0 }
5161 ));
5162 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
5163 }
5164
5165 pub fn f_internal_pad_circular(&self, pad: impl IntList) -> Result<Tensor, TchError> {
5166 let mut c_tensors = [std::ptr::null_mut(); 1];
5167 unsafe_torch_err!(atg__pad_circular(
5168 c_tensors.as_mut_ptr(),
5169 self.c_tensor,
5170 pad.as_ptr(),
5171 pad.len_i32()
5172 ));
5173 Ok(Tensor { c_tensor: c_tensors[0] })
5174 }
5175
5176 pub fn f_internal_pad_enum(
5177 &self,
5178 pad: impl IntList,
5179 mode: i64,
5180 value: impl Into<Option<f64>>,
5181 ) -> Result<Tensor, TchError> {
5182 let value = value.into();
5183 let mut c_tensors = [std::ptr::null_mut(); 1];
5184 unsafe_torch_err!(atg__pad_enum(
5185 c_tensors.as_mut_ptr(),
5186 self.c_tensor,
5187 pad.as_ptr(),
5188 pad.len_i32(),
5189 mode,
5190 value.unwrap_or(std::f64::NAN),
5191 value.is_none() as i8
5192 ));
5193 Ok(Tensor { c_tensor: c_tensors[0] })
5194 }
5195
5196 pub fn f_internal_pad_packed_sequence<S: Into<Scalar>>(
5197 data: &Tensor,
5198 batch_sizes: &Tensor,
5199 batch_first: bool,
5200 padding_value: S,
5201 total_length: i64,
5202 ) -> Result<(Tensor, Tensor), TchError> {
5203 let mut c_tensors = [std::ptr::null_mut(); 2];
5204 unsafe_torch_err!(atg__pad_packed_sequence(
5205 c_tensors.as_mut_ptr(),
5206 data.c_tensor,
5207 batch_sizes.c_tensor,
5208 if batch_first { 1 } else { 0 },
5209 padding_value.into().c_scalar,
5210 total_length
5211 ));
5212 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
5213 }
5214
5215 pub fn f_internal_pdist_backward(
5216 &self,
5217 grad: &Tensor,
5218 p: f64,
5219 pdist: &Tensor,
5220 ) -> Result<Tensor, TchError> {
5221 let mut c_tensors = [std::ptr::null_mut(); 1];
5222 unsafe_torch_err!(atg__pdist_backward(
5223 c_tensors.as_mut_ptr(),
5224 grad.c_tensor,
5225 self.c_tensor,
5226 p,
5227 pdist.c_tensor
5228 ));
5229 Ok(Tensor { c_tensor: c_tensors[0] })
5230 }
5231
5232 pub fn f_internal_pdist_backward_out(
5233 &self,
5234 out: &Tensor,
5235 grad: &Tensor,
5236 p: f64,
5237 pdist: &Tensor,
5238 ) -> Result<Tensor, TchError> {
5239 let mut c_tensors = [std::ptr::null_mut(); 1];
5240 unsafe_torch_err!(atg__pdist_backward_out(
5241 c_tensors.as_mut_ptr(),
5242 out.c_tensor,
5243 grad.c_tensor,
5244 self.c_tensor,
5245 p,
5246 pdist.c_tensor
5247 ));
5248 Ok(Tensor { c_tensor: c_tensors[0] })
5249 }
5250
5251 pub fn f_internal_pin_memory(&self, device: Device) -> Result<Tensor, TchError> {
5252 let mut c_tensors = [std::ptr::null_mut(); 1];
5253 unsafe_torch_err!(atg__pin_memory(c_tensors.as_mut_ptr(), self.c_tensor, device.c_int()));
5254 Ok(Tensor { c_tensor: c_tensors[0] })
5255 }
5256
5257 pub fn f_internal_pin_memory_out(
5258 &self,
5259 out: &Tensor,
5260 device: Device,
5261 ) -> Result<Tensor, TchError> {
5262 let mut c_tensors = [std::ptr::null_mut(); 1];
5263 unsafe_torch_err!(atg__pin_memory_out(
5264 c_tensors.as_mut_ptr(),
5265 out.c_tensor,
5266 self.c_tensor,
5267 device.c_int()
5268 ));
5269 Ok(Tensor { c_tensor: c_tensors[0] })
5270 }
5271
5272 pub fn f_internal_prelu_kernel(&self, weight: &Tensor) -> Result<Tensor, TchError> {
5273 let mut c_tensors = [std::ptr::null_mut(); 1];
5274 unsafe_torch_err!(atg__prelu_kernel(
5275 c_tensors.as_mut_ptr(),
5276 self.c_tensor,
5277 weight.c_tensor
5278 ));
5279 Ok(Tensor { c_tensor: c_tensors[0] })
5280 }
5281
5282 pub fn f_internal_prelu_kernel_backward(
5283 &self,
5284 grad_output: &Tensor,
5285 weight: &Tensor,
5286 ) -> Result<(Tensor, Tensor), TchError> {
5287 let mut c_tensors = [std::ptr::null_mut(); 2];
5288 unsafe_torch_err!(atg__prelu_kernel_backward(
5289 c_tensors.as_mut_ptr(),
5290 grad_output.c_tensor,
5291 self.c_tensor,
5292 weight.c_tensor
5293 ));
5294 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
5295 }
5296
5297 pub fn f_internal_print(s: &str) -> Result<(), TchError> {
5298 unsafe_torch_err!(atg__print(s.as_ptr(), s.len() as i32));
5299 Ok(())
5300 }
5301
5302 pub fn f_internal_propagate_xla_data(&self, output: &Tensor) -> Result<(), TchError> {
5303 unsafe_torch_err!(atg__propagate_xla_data(self.c_tensor, output.c_tensor));
5304 Ok(())
5305 }
5306
5307 pub fn f_internal_remove_batch_dim(
5308 &self,
5309 level: i64,
5310 batch_size: i64,
5311 out_dim: i64,
5312 ) -> Result<Tensor, TchError> {
5313 let mut c_tensors = [std::ptr::null_mut(); 1];
5314 unsafe_torch_err!(atg__remove_batch_dim(
5315 c_tensors.as_mut_ptr(),
5316 self.c_tensor,
5317 level,
5318 batch_size,
5319 out_dim
5320 ));
5321 Ok(Tensor { c_tensor: c_tensors[0] })
5322 }
5323
5324 pub fn f_internal_reshape_alias(
5325 &self,
5326 size: impl IntList,
5327 stride: impl IntList,
5328 ) -> Result<Tensor, TchError> {
5329 let mut c_tensors = [std::ptr::null_mut(); 1];
5330 unsafe_torch_err!(atg__reshape_alias(
5331 c_tensors.as_mut_ptr(),
5332 self.c_tensor,
5333 size.as_ptr(),
5334 size.len_i32(),
5335 stride.as_ptr(),
5336 stride.len_i32()
5337 ));
5338 Ok(Tensor { c_tensor: c_tensors[0] })
5339 }
5340
5341 pub fn f_internal_reshape_alias_copy(
5342 &self,
5343 size: impl IntList,
5344 stride: impl IntList,
5345 ) -> Result<Tensor, TchError> {
5346 let mut c_tensors = [std::ptr::null_mut(); 1];
5347 unsafe_torch_err!(atg__reshape_alias_copy(
5348 c_tensors.as_mut_ptr(),
5349 self.c_tensor,
5350 size.as_ptr(),
5351 size.len_i32(),
5352 stride.as_ptr(),
5353 stride.len_i32()
5354 ));
5355 Ok(Tensor { c_tensor: c_tensors[0] })
5356 }
5357
5358 pub fn f_internal_reshape_alias_copy_out(
5359 &self,
5360 out: &Tensor,
5361 size: impl IntList,
5362 stride: impl IntList,
5363 ) -> Result<Tensor, TchError> {
5364 let mut c_tensors = [std::ptr::null_mut(); 1];
5365 unsafe_torch_err!(atg__reshape_alias_copy_out(
5366 c_tensors.as_mut_ptr(),
5367 out.c_tensor,
5368 self.c_tensor,
5369 size.as_ptr(),
5370 size.len_i32(),
5371 stride.as_ptr(),
5372 stride.len_i32()
5373 ));
5374 Ok(Tensor { c_tensor: c_tensors[0] })
5375 }
5376
5377 pub fn f_internal_reshape_copy(&self, size: impl IntList) -> Result<Tensor, TchError> {
5378 let mut c_tensors = [std::ptr::null_mut(); 1];
5379 unsafe_torch_err!(atg__reshape_copy(
5380 c_tensors.as_mut_ptr(),
5381 self.c_tensor,
5382 size.as_ptr(),
5383 size.len_i32()
5384 ));
5385 Ok(Tensor { c_tensor: c_tensors[0] })
5386 }
5387
5388 pub fn f_internal_reshape_from_tensor(&self, shape: &Tensor) -> Result<Tensor, TchError> {
5389 let mut c_tensors = [std::ptr::null_mut(); 1];
5390 unsafe_torch_err!(atg__reshape_from_tensor(
5391 c_tensors.as_mut_ptr(),
5392 self.c_tensor,
5393 shape.c_tensor
5394 ));
5395 Ok(Tensor { c_tensor: c_tensors[0] })
5396 }
5397
5398 pub fn f_internal_resize_output(
5399 &self,
5400 size: impl IntList,
5401 device: Device,
5402 ) -> Result<Tensor, TchError> {
5403 let mut c_tensors = [std::ptr::null_mut(); 1];
5404 unsafe_torch_err!(atg__resize_output(
5405 c_tensors.as_mut_ptr(),
5406 self.c_tensor,
5407 size.as_ptr(),
5408 size.len_i32(),
5409 device.c_int()
5410 ));
5411 Ok(Tensor { c_tensor: c_tensors[0] })
5412 }
5413
5414 pub fn f_internal_resize_output_(
5415 &mut self,
5416 size: impl IntList,
5417 device: Device,
5418 ) -> Result<Tensor, TchError> {
5419 let mut c_tensors = [std::ptr::null_mut(); 1];
5420 unsafe_torch_err!(atg__resize_output_(
5421 c_tensors.as_mut_ptr(),
5422 self.c_tensor,
5423 size.as_ptr(),
5424 size.len_i32(),
5425 device.c_int()
5426 ));
5427 Ok(Tensor { c_tensor: c_tensors[0] })
5428 }
5429
5430 pub fn f_internal_resize_output_out(
5431 &self,
5432 out: &Tensor,
5433 size: impl IntList,
5434 device: Device,
5435 ) -> Result<Tensor, TchError> {
5436 let mut c_tensors = [std::ptr::null_mut(); 1];
5437 unsafe_torch_err!(atg__resize_output_out(
5438 c_tensors.as_mut_ptr(),
5439 out.c_tensor,
5440 self.c_tensor,
5441 size.as_ptr(),
5442 size.len_i32(),
5443 device.c_int()
5444 ));
5445 Ok(Tensor { c_tensor: c_tensors[0] })
5446 }
5447
5448 pub fn f_internal_rowwise_prune(
5449 weight: &Tensor,
5450 mask: &Tensor,
5451 compressed_indices_dtype: Kind,
5452 ) -> Result<(Tensor, Tensor), TchError> {
5453 let mut c_tensors = [std::ptr::null_mut(); 2];
5454 unsafe_torch_err!(atg__rowwise_prune(
5455 c_tensors.as_mut_ptr(),
5456 weight.c_tensor,
5457 mask.c_tensor,
5458 compressed_indices_dtype.c_int()
5459 ));
5460 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
5461 }
5462
5463 pub fn f_internal_safe_softmax(
5464 &self,
5465 dim: i64,
5466 dtype: impl Into<Option<Kind>>,
5467 ) -> Result<Tensor, TchError> {
5468 let mut c_tensors = [std::ptr::null_mut(); 1];
5469 unsafe_torch_err!(atg__safe_softmax(
5470 c_tensors.as_mut_ptr(),
5471 self.c_tensor,
5472 dim,
5473 dtype.into().map_or(-1, |s| s.c_int())
5474 ));
5475 Ok(Tensor { c_tensor: c_tensors[0] })
5476 }
5477
5478 pub fn f_internal_sample_dirichlet(&self) -> Result<Tensor, TchError> {
5479 let mut c_tensors = [std::ptr::null_mut(); 1];
5480 unsafe_torch_err!(atg__sample_dirichlet(c_tensors.as_mut_ptr(), self.c_tensor));
5481 Ok(Tensor { c_tensor: c_tensors[0] })
5482 }
5483
5484 pub fn f_internal_sample_dirichlet_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
5485 let mut c_tensors = [std::ptr::null_mut(); 1];
5486 unsafe_torch_err!(atg__sample_dirichlet_out(
5487 c_tensors.as_mut_ptr(),
5488 out.c_tensor,
5489 self.c_tensor
5490 ));
5491 Ok(Tensor { c_tensor: c_tensors[0] })
5492 }
5493
5494 pub fn f_internal_saturate_weight_to_fp16(weight: &Tensor) -> Result<Tensor, TchError> {
5495 let mut c_tensors = [std::ptr::null_mut(); 1];
5496 unsafe_torch_err!(atg__saturate_weight_to_fp16(c_tensors.as_mut_ptr(), weight.c_tensor));
5497 Ok(Tensor { c_tensor: c_tensors[0] })
5498 }
5499
5500 pub fn f_internal_scaled_dot_product_attention_math<T: Borrow<Tensor>>(
5501 query: &Tensor,
5502 key: &Tensor,
5503 value: &Tensor,
5504 attn_mask: Option<T>,
5505 dropout_p: f64,
5506 is_causal: bool,
5507 dropout_mask: Option<T>,
5508 scale: impl Into<Option<f64>>,
5509 enable_gqa: bool,
5510 ) -> Result<(Tensor, Tensor), TchError> {
5511 let scale = scale.into();
5512 let mut c_tensors = [std::ptr::null_mut(); 2];
5513 unsafe_torch_err!(atg__scaled_dot_product_attention_math(
5514 c_tensors.as_mut_ptr(),
5515 query.c_tensor,
5516 key.c_tensor,
5517 value.c_tensor,
5518 attn_mask.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
5519 dropout_p,
5520 if is_causal { 1 } else { 0 },
5521 dropout_mask.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
5522 scale.unwrap_or(std::f64::NAN),
5523 scale.is_none() as i8,
5524 if enable_gqa { 1 } else { 0 }
5525 ));
5526 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
5527 }
5528
5529 pub fn f_internal_scaled_dot_product_attention_math_for_mps<T: Borrow<Tensor>>(
5530 query: &Tensor,
5531 key: &Tensor,
5532 value: &Tensor,
5533 attn_mask: Option<T>,
5534 dropout_p: f64,
5535 is_causal: bool,
5536 dropout_mask: Option<T>,
5537 scale: impl Into<Option<f64>>,
5538 ) -> Result<(Tensor, Tensor), TchError> {
5539 let scale = scale.into();
5540 let mut c_tensors = [std::ptr::null_mut(); 2];
5541 unsafe_torch_err!(atg__scaled_dot_product_attention_math_for_mps(
5542 c_tensors.as_mut_ptr(),
5543 query.c_tensor,
5544 key.c_tensor,
5545 value.c_tensor,
5546 attn_mask.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
5547 dropout_p,
5548 if is_causal { 1 } else { 0 },
5549 dropout_mask.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
5550 scale.unwrap_or(std::f64::NAN),
5551 scale.is_none() as i8
5552 ));
5553 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
5554 }
5555
5556 pub fn f_internal_scaled_dot_product_cudnn_attention_backward(
5557 grad_out: &Tensor,
5558 query: &Tensor,
5559 key: &Tensor,
5560 value: &Tensor,
5561 out: &Tensor,
5562 logsumexp: &Tensor,
5563 philox_seed: &Tensor,
5564 philox_offset: &Tensor,
5565 attn_bias: &Tensor,
5566 cum_seq_q: &Tensor,
5567 cum_seq_k: &Tensor,
5568 max_q: i64,
5569 max_k: i64,
5570 dropout_p: f64,
5571 is_causal: bool,
5572 scale: impl Into<Option<f64>>,
5573 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
5574 let scale = scale.into();
5575 let mut c_tensors = [std::ptr::null_mut(); 3];
5576 unsafe_torch_err!(atg__scaled_dot_product_cudnn_attention_backward(
5577 c_tensors.as_mut_ptr(),
5578 grad_out.c_tensor,
5579 query.c_tensor,
5580 key.c_tensor,
5581 value.c_tensor,
5582 out.c_tensor,
5583 logsumexp.c_tensor,
5584 philox_seed.c_tensor,
5585 philox_offset.c_tensor,
5586 attn_bias.c_tensor,
5587 cum_seq_q.c_tensor,
5588 cum_seq_k.c_tensor,
5589 max_q,
5590 max_k,
5591 dropout_p,
5592 if is_causal { 1 } else { 0 },
5593 scale.unwrap_or(std::f64::NAN),
5594 scale.is_none() as i8
5595 ));
5596 Ok((
5597 Tensor { c_tensor: c_tensors[0] },
5598 Tensor { c_tensor: c_tensors[1] },
5599 Tensor { c_tensor: c_tensors[2] },
5600 ))
5601 }
5602
5603 pub fn f_internal_scaled_dot_product_efficient_attention<T: Borrow<Tensor>>(
5604 query: &Tensor,
5605 key: &Tensor,
5606 value: &Tensor,
5607 attn_bias: Option<T>,
5608 compute_log_sumexp: bool,
5609 dropout_p: f64,
5610 is_causal: bool,
5611 scale: impl Into<Option<f64>>,
5612 ) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
5613 let scale = scale.into();
5614 let mut c_tensors = [std::ptr::null_mut(); 4];
5615 unsafe_torch_err!(atg__scaled_dot_product_efficient_attention(
5616 c_tensors.as_mut_ptr(),
5617 query.c_tensor,
5618 key.c_tensor,
5619 value.c_tensor,
5620 attn_bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
5621 if compute_log_sumexp { 1 } else { 0 },
5622 dropout_p,
5623 if is_causal { 1 } else { 0 },
5624 scale.unwrap_or(std::f64::NAN),
5625 scale.is_none() as i8
5626 ));
5627 Ok((
5628 Tensor { c_tensor: c_tensors[0] },
5629 Tensor { c_tensor: c_tensors[1] },
5630 Tensor { c_tensor: c_tensors[2] },
5631 Tensor { c_tensor: c_tensors[3] },
5632 ))
5633 }
5634
5635 pub fn f_internal_scaled_dot_product_flash_attention_backward(
5636 grad_out: &Tensor,
5637 query: &Tensor,
5638 key: &Tensor,
5639 value: &Tensor,
5640 out: &Tensor,
5641 logsumexp: &Tensor,
5642 cum_seq_q: &Tensor,
5643 cum_seq_k: &Tensor,
5644 max_q: i64,
5645 max_k: i64,
5646 dropout_p: f64,
5647 is_causal: bool,
5648 philox_seed: &Tensor,
5649 philox_offset: &Tensor,
5650 scale: impl Into<Option<f64>>,
5651 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
5652 let scale = scale.into();
5653 let mut c_tensors = [std::ptr::null_mut(); 3];
5654 unsafe_torch_err!(atg__scaled_dot_product_flash_attention_backward(
5655 c_tensors.as_mut_ptr(),
5656 grad_out.c_tensor,
5657 query.c_tensor,
5658 key.c_tensor,
5659 value.c_tensor,
5660 out.c_tensor,
5661 logsumexp.c_tensor,
5662 cum_seq_q.c_tensor,
5663 cum_seq_k.c_tensor,
5664 max_q,
5665 max_k,
5666 dropout_p,
5667 if is_causal { 1 } else { 0 },
5668 philox_seed.c_tensor,
5669 philox_offset.c_tensor,
5670 scale.unwrap_or(std::f64::NAN),
5671 scale.is_none() as i8
5672 ));
5673 Ok((
5674 Tensor { c_tensor: c_tensors[0] },
5675 Tensor { c_tensor: c_tensors[1] },
5676 Tensor { c_tensor: c_tensors[2] },
5677 ))
5678 }
5679
5680 pub fn f_internal_scaled_dot_product_flash_attention_for_cpu<T: Borrow<Tensor>>(
5681 query: &Tensor,
5682 key: &Tensor,
5683 value: &Tensor,
5684 dropout_p: f64,
5685 is_causal: bool,
5686 attn_mask: Option<T>,
5687 scale: impl Into<Option<f64>>,
5688 ) -> Result<(Tensor, Tensor), TchError> {
5689 let scale = scale.into();
5690 let mut c_tensors = [std::ptr::null_mut(); 2];
5691 unsafe_torch_err!(atg__scaled_dot_product_flash_attention_for_cpu(
5692 c_tensors.as_mut_ptr(),
5693 query.c_tensor,
5694 key.c_tensor,
5695 value.c_tensor,
5696 dropout_p,
5697 if is_causal { 1 } else { 0 },
5698 attn_mask.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
5699 scale.unwrap_or(std::f64::NAN),
5700 scale.is_none() as i8
5701 ));
5702 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
5703 }
5704
5705 pub fn f_internal_scaled_dot_product_flash_attention_for_cpu_backward<T: Borrow<Tensor>>(
5706 grad_out: &Tensor,
5707 query: &Tensor,
5708 key: &Tensor,
5709 value: &Tensor,
5710 out: &Tensor,
5711 logsumexp: &Tensor,
5712 dropout_p: f64,
5713 is_causal: bool,
5714 attn_mask: Option<T>,
5715 scale: impl Into<Option<f64>>,
5716 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
5717 let scale = scale.into();
5718 let mut c_tensors = [std::ptr::null_mut(); 3];
5719 unsafe_torch_err!(atg__scaled_dot_product_flash_attention_for_cpu_backward(
5720 c_tensors.as_mut_ptr(),
5721 grad_out.c_tensor,
5722 query.c_tensor,
5723 key.c_tensor,
5724 value.c_tensor,
5725 out.c_tensor,
5726 logsumexp.c_tensor,
5727 dropout_p,
5728 if is_causal { 1 } else { 0 },
5729 attn_mask.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
5730 scale.unwrap_or(std::f64::NAN),
5731 scale.is_none() as i8
5732 ));
5733 Ok((
5734 Tensor { c_tensor: c_tensors[0] },
5735 Tensor { c_tensor: c_tensors[1] },
5736 Tensor { c_tensor: c_tensors[2] },
5737 ))
5738 }
5739
5740 pub fn f_internal_scaled_mm<T: Borrow<Tensor>>(
5741 &self,
5742 mat2: &Tensor,
5743 scale_a: &Tensor,
5744 scale_b: &Tensor,
5745 bias: Option<T>,
5746 scale_result: Option<T>,
5747 out_dtype: impl Into<Option<Kind>>,
5748 use_fast_accum: bool,
5749 ) -> Result<Tensor, TchError> {
5750 let mut c_tensors = [std::ptr::null_mut(); 1];
5751 unsafe_torch_err!(atg__scaled_mm(
5752 c_tensors.as_mut_ptr(),
5753 self.c_tensor,
5754 mat2.c_tensor,
5755 scale_a.c_tensor,
5756 scale_b.c_tensor,
5757 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
5758 scale_result.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
5759 out_dtype.into().map_or(-1, |s| s.c_int()),
5760 if use_fast_accum { 1 } else { 0 }
5761 ));
5762 Ok(Tensor { c_tensor: c_tensors[0] })
5763 }
5764
5765 pub fn f_internal_scaled_mm_out<T: Borrow<Tensor>>(
5766 &self,
5767 out: &Tensor,
5768 mat2: &Tensor,
5769 scale_a: &Tensor,
5770 scale_b: &Tensor,
5771 bias: Option<T>,
5772 scale_result: Option<T>,
5773 out_dtype: impl Into<Option<Kind>>,
5774 use_fast_accum: bool,
5775 ) -> Result<Tensor, TchError> {
5776 let mut c_tensors = [std::ptr::null_mut(); 1];
5777 unsafe_torch_err!(atg__scaled_mm_out(
5778 c_tensors.as_mut_ptr(),
5779 out.c_tensor,
5780 self.c_tensor,
5781 mat2.c_tensor,
5782 scale_a.c_tensor,
5783 scale_b.c_tensor,
5784 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
5785 scale_result.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
5786 out_dtype.into().map_or(-1, |s| s.c_int()),
5787 if use_fast_accum { 1 } else { 0 }
5788 ));
5789 Ok(Tensor { c_tensor: c_tensors[0] })
5790 }
5791
5792 pub fn f_internal_scatter_reduce(
5793 &self,
5794 dim: i64,
5795 index: &Tensor,
5796 src: &Tensor,
5797 reduce: &str,
5798 include_self: bool,
5799 ) -> Result<Tensor, TchError> {
5800 let mut c_tensors = [std::ptr::null_mut(); 1];
5801 unsafe_torch_err!(atg__scatter_reduce(
5802 c_tensors.as_mut_ptr(),
5803 self.c_tensor,
5804 dim,
5805 index.c_tensor,
5806 src.c_tensor,
5807 reduce.as_ptr(),
5808 reduce.len() as i32,
5809 if include_self { 1 } else { 0 }
5810 ));
5811 Ok(Tensor { c_tensor: c_tensors[0] })
5812 }
5813
5814 pub fn f_internal_scatter_reduce_(
5815 &mut self,
5816 dim: i64,
5817 index: &Tensor,
5818 src: &Tensor,
5819 reduce: &str,
5820 include_self: bool,
5821 ) -> Result<Tensor, TchError> {
5822 let mut c_tensors = [std::ptr::null_mut(); 1];
5823 unsafe_torch_err!(atg__scatter_reduce_(
5824 c_tensors.as_mut_ptr(),
5825 self.c_tensor,
5826 dim,
5827 index.c_tensor,
5828 src.c_tensor,
5829 reduce.as_ptr(),
5830 reduce.len() as i32,
5831 if include_self { 1 } else { 0 }
5832 ));
5833 Ok(Tensor { c_tensor: c_tensors[0] })
5834 }
5835
5836 pub fn f_internal_scatter_reduce_two_out(
5837 &self,
5838 out: &Tensor,
5839 dim: i64,
5840 index: &Tensor,
5841 src: &Tensor,
5842 reduce: &str,
5843 include_self: bool,
5844 ) -> Result<Tensor, TchError> {
5845 let mut c_tensors = [std::ptr::null_mut(); 1];
5846 unsafe_torch_err!(atg__scatter_reduce_two_out(
5847 c_tensors.as_mut_ptr(),
5848 out.c_tensor,
5849 self.c_tensor,
5850 dim,
5851 index.c_tensor,
5852 src.c_tensor,
5853 reduce.as_ptr(),
5854 reduce.len() as i32,
5855 if include_self { 1 } else { 0 }
5856 ));
5857 Ok(Tensor { c_tensor: c_tensors[0] })
5858 }
5859
5860 pub fn f_internal_segment_reduce_backward<T: Borrow<Tensor>, S: Into<Scalar>>(
5861 grad: &Tensor,
5862 output: &Tensor,
5863 data: &Tensor,
5864 reduce: &str,
5865 lengths: Option<T>,
5866 offsets: Option<T>,
5867 axis: i64,
5868 initial: S,
5869 ) -> Result<Tensor, TchError> {
5870 let mut c_tensors = [std::ptr::null_mut(); 1];
5871 unsafe_torch_err!(atg__segment_reduce_backward(
5872 c_tensors.as_mut_ptr(),
5873 grad.c_tensor,
5874 output.c_tensor,
5875 data.c_tensor,
5876 reduce.as_ptr(),
5877 reduce.len() as i32,
5878 lengths.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
5879 offsets.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
5880 axis,
5881 initial.into().c_scalar
5882 ));
5883 Ok(Tensor { c_tensor: c_tensors[0] })
5884 }
5885
5886 pub fn f_internal_segment_reduce_backward_out<T: Borrow<Tensor>, S: Into<Scalar>>(
5887 out: &Tensor,
5888 grad: &Tensor,
5889 output: &Tensor,
5890 data: &Tensor,
5891 reduce: &str,
5892 lengths: Option<T>,
5893 offsets: Option<T>,
5894 axis: i64,
5895 initial: S,
5896 ) -> Result<Tensor, TchError> {
5897 let mut c_tensors = [std::ptr::null_mut(); 1];
5898 unsafe_torch_err!(atg__segment_reduce_backward_out(
5899 c_tensors.as_mut_ptr(),
5900 out.c_tensor,
5901 grad.c_tensor,
5902 output.c_tensor,
5903 data.c_tensor,
5904 reduce.as_ptr(),
5905 reduce.len() as i32,
5906 lengths.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
5907 offsets.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
5908 axis,
5909 initial.into().c_scalar
5910 ));
5911 Ok(Tensor { c_tensor: c_tensors[0] })
5912 }
5913
5914 pub fn f_internal_shape_as_tensor(&self) -> Result<Tensor, TchError> {
5915 let mut c_tensors = [std::ptr::null_mut(); 1];
5916 unsafe_torch_err!(atg__shape_as_tensor(c_tensors.as_mut_ptr(), self.c_tensor));
5917 Ok(Tensor { c_tensor: c_tensors[0] })
5918 }
5919
5920 pub fn f_internal_slow_conv2d_backward(
5921 &self,
5922 grad_input: &Tensor,
5923 grad_weight: &Tensor,
5924 grad_bias: &Tensor,
5925 grad_output: &Tensor,
5926 weight: &Tensor,
5927 kernel_size: impl IntList,
5928 stride: impl IntList,
5929 padding: impl IntList,
5930 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
5931 let mut c_tensors = [std::ptr::null_mut(); 3];
5932 unsafe_torch_err!(atg__slow_conv2d_backward(
5933 c_tensors.as_mut_ptr(),
5934 grad_input.c_tensor,
5935 grad_weight.c_tensor,
5936 grad_bias.c_tensor,
5937 grad_output.c_tensor,
5938 self.c_tensor,
5939 weight.c_tensor,
5940 kernel_size.as_ptr(),
5941 kernel_size.len_i32(),
5942 stride.as_ptr(),
5943 stride.len_i32(),
5944 padding.as_ptr(),
5945 padding.len_i32()
5946 ));
5947 Ok((
5948 Tensor { c_tensor: c_tensors[0] },
5949 Tensor { c_tensor: c_tensors[1] },
5950 Tensor { c_tensor: c_tensors[2] },
5951 ))
5952 }
5953
5954 pub fn f_internal_sobol_engine_draw(
5955 quasi: &Tensor,
5956 n: i64,
5957 sobolstate: &Tensor,
5958 dimension: i64,
5959 num_generated: i64,
5960 dtype: impl Into<Option<Kind>>,
5961 ) -> Result<(Tensor, Tensor), TchError> {
5962 let mut c_tensors = [std::ptr::null_mut(); 2];
5963 unsafe_torch_err!(atg__sobol_engine_draw(
5964 c_tensors.as_mut_ptr(),
5965 quasi.c_tensor,
5966 n,
5967 sobolstate.c_tensor,
5968 dimension,
5969 num_generated,
5970 dtype.into().map_or(-1, |s| s.c_int())
5971 ));
5972 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
5973 }
5974
5975 pub fn f_internal_sobol_engine_ff_(
5976 &mut self,
5977 n: i64,
5978 sobolstate: &Tensor,
5979 dimension: i64,
5980 num_generated: i64,
5981 ) -> Result<Tensor, TchError> {
5982 let mut c_tensors = [std::ptr::null_mut(); 1];
5983 unsafe_torch_err!(atg__sobol_engine_ff_(
5984 c_tensors.as_mut_ptr(),
5985 self.c_tensor,
5986 n,
5987 sobolstate.c_tensor,
5988 dimension,
5989 num_generated
5990 ));
5991 Ok(Tensor { c_tensor: c_tensors[0] })
5992 }
5993
5994 pub fn f_internal_sobol_engine_initialize_state_(
5995 &mut self,
5996 dimension: i64,
5997 ) -> Result<Tensor, TchError> {
5998 let mut c_tensors = [std::ptr::null_mut(); 1];
5999 unsafe_torch_err!(atg__sobol_engine_initialize_state_(
6000 c_tensors.as_mut_ptr(),
6001 self.c_tensor,
6002 dimension
6003 ));
6004 Ok(Tensor { c_tensor: c_tensors[0] })
6005 }
6006
6007 pub fn f_internal_sobol_engine_scramble_(
6008 &mut self,
6009 ltm: &Tensor,
6010 dimension: i64,
6011 ) -> Result<Tensor, TchError> {
6012 let mut c_tensors = [std::ptr::null_mut(); 1];
6013 unsafe_torch_err!(atg__sobol_engine_scramble_(
6014 c_tensors.as_mut_ptr(),
6015 self.c_tensor,
6016 ltm.c_tensor,
6017 dimension
6018 ));
6019 Ok(Tensor { c_tensor: c_tensors[0] })
6020 }
6021
6022 pub fn f_internal_softmax(&self, dim: i64, half_to_float: bool) -> Result<Tensor, TchError> {
6023 let mut c_tensors = [std::ptr::null_mut(); 1];
6024 unsafe_torch_err!(atg__softmax(
6025 c_tensors.as_mut_ptr(),
6026 self.c_tensor,
6027 dim,
6028 if half_to_float { 1 } else { 0 }
6029 ));
6030 Ok(Tensor { c_tensor: c_tensors[0] })
6031 }
6032
6033 pub fn f_internal_softmax_backward_data(
6034 grad_output: &Tensor,
6035 output: &Tensor,
6036 dim: i64,
6037 input_dtype: Kind,
6038 ) -> Result<Tensor, TchError> {
6039 let mut c_tensors = [std::ptr::null_mut(); 1];
6040 unsafe_torch_err!(atg__softmax_backward_data(
6041 c_tensors.as_mut_ptr(),
6042 grad_output.c_tensor,
6043 output.c_tensor,
6044 dim,
6045 input_dtype.c_int()
6046 ));
6047 Ok(Tensor { c_tensor: c_tensors[0] })
6048 }
6049
6050 pub fn f_internal_softmax_backward_data_out(
6051 grad_input: &Tensor,
6052 grad_output: &Tensor,
6053 output: &Tensor,
6054 dim: i64,
6055 input_dtype: Kind,
6056 ) -> Result<Tensor, TchError> {
6057 let mut c_tensors = [std::ptr::null_mut(); 1];
6058 unsafe_torch_err!(atg__softmax_backward_data_out(
6059 c_tensors.as_mut_ptr(),
6060 grad_input.c_tensor,
6061 grad_output.c_tensor,
6062 output.c_tensor,
6063 dim,
6064 input_dtype.c_int()
6065 ));
6066 Ok(Tensor { c_tensor: c_tensors[0] })
6067 }
6068
6069 pub fn f_internal_softmax_out(
6070 &self,
6071 out: &Tensor,
6072 dim: i64,
6073 half_to_float: bool,
6074 ) -> Result<Tensor, TchError> {
6075 let mut c_tensors = [std::ptr::null_mut(); 1];
6076 unsafe_torch_err!(atg__softmax_out(
6077 c_tensors.as_mut_ptr(),
6078 out.c_tensor,
6079 self.c_tensor,
6080 dim,
6081 if half_to_float { 1 } else { 0 }
6082 ));
6083 Ok(Tensor { c_tensor: c_tensors[0] })
6084 }
6085
6086 pub fn f_internal_sparse_addmm(
6087 &self,
6088 mat1: &Tensor,
6089 mat2: &Tensor,
6090 ) -> Result<Tensor, TchError> {
6091 let mut c_tensors = [std::ptr::null_mut(); 1];
6092 unsafe_torch_err!(atg__sparse_addmm(
6093 c_tensors.as_mut_ptr(),
6094 self.c_tensor,
6095 mat1.c_tensor,
6096 mat2.c_tensor
6097 ));
6098 Ok(Tensor { c_tensor: c_tensors[0] })
6099 }
6100
6101 pub fn f_internal_sparse_addmm_out(
6102 &self,
6103 out: &Tensor,
6104 mat1: &Tensor,
6105 mat2: &Tensor,
6106 ) -> Result<Tensor, TchError> {
6107 let mut c_tensors = [std::ptr::null_mut(); 1];
6108 unsafe_torch_err!(atg__sparse_addmm_out(
6109 c_tensors.as_mut_ptr(),
6110 out.c_tensor,
6111 self.c_tensor,
6112 mat1.c_tensor,
6113 mat2.c_tensor
6114 ));
6115 Ok(Tensor { c_tensor: c_tensors[0] })
6116 }
6117
6118 pub fn f_internal_sparse_broadcast_to(&self, size: impl IntList) -> Result<Tensor, TchError> {
6119 let mut c_tensors = [std::ptr::null_mut(); 1];
6120 unsafe_torch_err!(atg__sparse_broadcast_to(
6121 c_tensors.as_mut_ptr(),
6122 self.c_tensor,
6123 size.as_ptr(),
6124 size.len_i32()
6125 ));
6126 Ok(Tensor { c_tensor: c_tensors[0] })
6127 }
6128
6129 pub fn f_internal_sparse_broadcast_to_copy(
6130 &self,
6131 size: impl IntList,
6132 ) -> Result<Tensor, TchError> {
6133 let mut c_tensors = [std::ptr::null_mut(); 1];
6134 unsafe_torch_err!(atg__sparse_broadcast_to_copy(
6135 c_tensors.as_mut_ptr(),
6136 self.c_tensor,
6137 size.as_ptr(),
6138 size.len_i32()
6139 ));
6140 Ok(Tensor { c_tensor: c_tensors[0] })
6141 }
6142
6143 pub fn f_internal_sparse_broadcast_to_copy_out(
6144 &self,
6145 out: &Tensor,
6146 size: impl IntList,
6147 ) -> Result<Tensor, TchError> {
6148 let mut c_tensors = [std::ptr::null_mut(); 1];
6149 unsafe_torch_err!(atg__sparse_broadcast_to_copy_out(
6150 c_tensors.as_mut_ptr(),
6151 out.c_tensor,
6152 self.c_tensor,
6153 size.as_ptr(),
6154 size.len_i32()
6155 ));
6156 Ok(Tensor { c_tensor: c_tensors[0] })
6157 }
6158
6159 pub fn f_internal_sparse_bsc_tensor_unsafe(
6160 ccol_indices: &Tensor,
6161 row_indices: &Tensor,
6162 values: &Tensor,
6163 size: impl IntList,
6164 options: (Kind, Device),
6165 ) -> Result<Tensor, TchError> {
6166 let mut c_tensors = [std::ptr::null_mut(); 1];
6167 unsafe_torch_err!(atg__sparse_bsc_tensor_unsafe(
6168 c_tensors.as_mut_ptr(),
6169 ccol_indices.c_tensor,
6170 row_indices.c_tensor,
6171 values.c_tensor,
6172 size.as_ptr(),
6173 size.len_i32(),
6174 options.0.c_int(),
6175 options.1.c_int()
6176 ));
6177 Ok(Tensor { c_tensor: c_tensors[0] })
6178 }
6179
6180 pub fn f_internal_sparse_bsr_tensor_unsafe(
6181 crow_indices: &Tensor,
6182 col_indices: &Tensor,
6183 values: &Tensor,
6184 size: impl IntList,
6185 options: (Kind, Device),
6186 ) -> Result<Tensor, TchError> {
6187 let mut c_tensors = [std::ptr::null_mut(); 1];
6188 unsafe_torch_err!(atg__sparse_bsr_tensor_unsafe(
6189 c_tensors.as_mut_ptr(),
6190 crow_indices.c_tensor,
6191 col_indices.c_tensor,
6192 values.c_tensor,
6193 size.as_ptr(),
6194 size.len_i32(),
6195 options.0.c_int(),
6196 options.1.c_int()
6197 ));
6198 Ok(Tensor { c_tensor: c_tensors[0] })
6199 }
6200
6201 pub fn f_internal_sparse_compressed_tensor_unsafe(
6202 compressed_indices: &Tensor,
6203 plain_indices: &Tensor,
6204 values: &Tensor,
6205 size: impl IntList,
6206 options: (Kind, Device),
6207 ) -> Result<Tensor, TchError> {
6208 let mut c_tensors = [std::ptr::null_mut(); 1];
6209 unsafe_torch_err!(atg__sparse_compressed_tensor_unsafe(
6210 c_tensors.as_mut_ptr(),
6211 compressed_indices.c_tensor,
6212 plain_indices.c_tensor,
6213 values.c_tensor,
6214 size.as_ptr(),
6215 size.len_i32(),
6216 options.0.c_int(),
6217 options.1.c_int()
6218 ));
6219 Ok(Tensor { c_tensor: c_tensors[0] })
6220 }
6221
6222 pub fn f_internal_sparse_compressed_tensor_with_dims(
6223 nnz: i64,
6224 dense_dim: i64,
6225 size: impl IntList,
6226 blocksize: impl IntList,
6227 index_dtype: Kind,
6228 options: (Kind, Device),
6229 ) -> Result<Tensor, TchError> {
6230 let mut c_tensors = [std::ptr::null_mut(); 1];
6231 unsafe_torch_err!(atg__sparse_compressed_tensor_with_dims(
6232 c_tensors.as_mut_ptr(),
6233 nnz,
6234 dense_dim,
6235 size.as_ptr(),
6236 size.len_i32(),
6237 blocksize.as_ptr(),
6238 blocksize.len_i32(),
6239 index_dtype.c_int(),
6240 options.0.c_int(),
6241 options.1.c_int()
6242 ));
6243 Ok(Tensor { c_tensor: c_tensors[0] })
6244 }
6245
6246 pub fn f_internal_sparse_coo_tensor_unsafe(
6247 indices: &Tensor,
6248 values: &Tensor,
6249 size: impl IntList,
6250 options: (Kind, Device),
6251 is_coalesced: bool,
6252 ) -> Result<Tensor, TchError> {
6253 let mut c_tensors = [std::ptr::null_mut(); 1];
6254 unsafe_torch_err!(atg__sparse_coo_tensor_unsafe(
6255 c_tensors.as_mut_ptr(),
6256 indices.c_tensor,
6257 values.c_tensor,
6258 size.as_ptr(),
6259 size.len_i32(),
6260 options.0.c_int(),
6261 options.1.c_int(),
6262 if is_coalesced { 1 } else { 0 }
6263 ));
6264 Ok(Tensor { c_tensor: c_tensors[0] })
6265 }
6266
6267 pub fn f_internal_sparse_coo_tensor_with_dims(
6268 sparse_dim: i64,
6269 dense_dim: i64,
6270 size: impl IntList,
6271 options: (Kind, Device),
6272 ) -> Result<Tensor, TchError> {
6273 let mut c_tensors = [std::ptr::null_mut(); 1];
6274 unsafe_torch_err!(atg__sparse_coo_tensor_with_dims(
6275 c_tensors.as_mut_ptr(),
6276 sparse_dim,
6277 dense_dim,
6278 size.as_ptr(),
6279 size.len_i32(),
6280 options.0.c_int(),
6281 options.1.c_int()
6282 ));
6283 Ok(Tensor { c_tensor: c_tensors[0] })
6284 }
6285
6286 pub fn f_internal_sparse_coo_tensor_with_dims_and_tensors(
6287 sparse_dim: i64,
6288 dense_dim: i64,
6289 size: impl IntList,
6290 indices: &Tensor,
6291 values: &Tensor,
6292 options: (Kind, Device),
6293 is_coalesced: bool,
6294 ) -> Result<Tensor, TchError> {
6295 let mut c_tensors = [std::ptr::null_mut(); 1];
6296 unsafe_torch_err!(atg__sparse_coo_tensor_with_dims_and_tensors(
6297 c_tensors.as_mut_ptr(),
6298 sparse_dim,
6299 dense_dim,
6300 size.as_ptr(),
6301 size.len_i32(),
6302 indices.c_tensor,
6303 values.c_tensor,
6304 options.0.c_int(),
6305 options.1.c_int(),
6306 if is_coalesced { 1 } else { 0 }
6307 ));
6308 Ok(Tensor { c_tensor: c_tensors[0] })
6309 }
6310
6311 pub fn f_internal_sparse_coo_tensor_with_dims_and_tensors_out(
6312 out: &Tensor,
6313 sparse_dim: i64,
6314 dense_dim: i64,
6315 size: impl IntList,
6316 indices: &Tensor,
6317 values: &Tensor,
6318 is_coalesced: bool,
6319 ) -> Result<Tensor, TchError> {
6320 let mut c_tensors = [std::ptr::null_mut(); 1];
6321 unsafe_torch_err!(atg__sparse_coo_tensor_with_dims_and_tensors_out(
6322 c_tensors.as_mut_ptr(),
6323 out.c_tensor,
6324 sparse_dim,
6325 dense_dim,
6326 size.as_ptr(),
6327 size.len_i32(),
6328 indices.c_tensor,
6329 values.c_tensor,
6330 if is_coalesced { 1 } else { 0 }
6331 ));
6332 Ok(Tensor { c_tensor: c_tensors[0] })
6333 }
6334
6335 pub fn f_internal_sparse_coo_tensor_with_dims_out(
6336 out: &Tensor,
6337 sparse_dim: i64,
6338 dense_dim: i64,
6339 size: impl IntList,
6340 ) -> Result<Tensor, TchError> {
6341 let mut c_tensors = [std::ptr::null_mut(); 1];
6342 unsafe_torch_err!(atg__sparse_coo_tensor_with_dims_out(
6343 c_tensors.as_mut_ptr(),
6344 out.c_tensor,
6345 sparse_dim,
6346 dense_dim,
6347 size.as_ptr(),
6348 size.len_i32()
6349 ));
6350 Ok(Tensor { c_tensor: c_tensors[0] })
6351 }
6352
6353 pub fn f_internal_sparse_csc_tensor_unsafe(
6354 ccol_indices: &Tensor,
6355 row_indices: &Tensor,
6356 values: &Tensor,
6357 size: impl IntList,
6358 options: (Kind, Device),
6359 ) -> Result<Tensor, TchError> {
6360 let mut c_tensors = [std::ptr::null_mut(); 1];
6361 unsafe_torch_err!(atg__sparse_csc_tensor_unsafe(
6362 c_tensors.as_mut_ptr(),
6363 ccol_indices.c_tensor,
6364 row_indices.c_tensor,
6365 values.c_tensor,
6366 size.as_ptr(),
6367 size.len_i32(),
6368 options.0.c_int(),
6369 options.1.c_int()
6370 ));
6371 Ok(Tensor { c_tensor: c_tensors[0] })
6372 }
6373
6374 pub fn f_internal_sparse_csr_prod(
6375 &self,
6376 dim: impl IntList,
6377 keepdim: bool,
6378 dtype: impl Into<Option<Kind>>,
6379 ) -> Result<Tensor, TchError> {
6380 let mut c_tensors = [std::ptr::null_mut(); 1];
6381 unsafe_torch_err!(atg__sparse_csr_prod(
6382 c_tensors.as_mut_ptr(),
6383 self.c_tensor,
6384 dim.as_ptr(),
6385 dim.len_i32(),
6386 if keepdim { 1 } else { 0 },
6387 dtype.into().map_or(-1, |s| s.c_int())
6388 ));
6389 Ok(Tensor { c_tensor: c_tensors[0] })
6390 }
6391
6392 pub fn f_internal_sparse_csr_prod_dim_dtype_out(
6393 &self,
6394 out: &Tensor,
6395 dim: impl IntList,
6396 keepdim: bool,
6397 dtype: impl Into<Option<Kind>>,
6398 ) -> Result<Tensor, TchError> {
6399 let mut c_tensors = [std::ptr::null_mut(); 1];
6400 unsafe_torch_err!(atg__sparse_csr_prod_dim_dtype_out(
6401 c_tensors.as_mut_ptr(),
6402 out.c_tensor,
6403 self.c_tensor,
6404 dim.as_ptr(),
6405 dim.len_i32(),
6406 if keepdim { 1 } else { 0 },
6407 dtype.into().map_or(-1, |s| s.c_int())
6408 ));
6409 Ok(Tensor { c_tensor: c_tensors[0] })
6410 }
6411
6412 pub fn f_internal_sparse_csr_sum(
6413 &self,
6414 dim: impl IntList,
6415 keepdim: bool,
6416 dtype: impl Into<Option<Kind>>,
6417 ) -> Result<Tensor, TchError> {
6418 let mut c_tensors = [std::ptr::null_mut(); 1];
6419 unsafe_torch_err!(atg__sparse_csr_sum(
6420 c_tensors.as_mut_ptr(),
6421 self.c_tensor,
6422 dim.as_ptr(),
6423 dim.len_i32(),
6424 if keepdim { 1 } else { 0 },
6425 dtype.into().map_or(-1, |s| s.c_int())
6426 ));
6427 Ok(Tensor { c_tensor: c_tensors[0] })
6428 }
6429
6430 pub fn f_internal_sparse_csr_sum_dim_dtype_out(
6431 &self,
6432 out: &Tensor,
6433 dim: impl IntList,
6434 keepdim: bool,
6435 dtype: impl Into<Option<Kind>>,
6436 ) -> Result<Tensor, TchError> {
6437 let mut c_tensors = [std::ptr::null_mut(); 1];
6438 unsafe_torch_err!(atg__sparse_csr_sum_dim_dtype_out(
6439 c_tensors.as_mut_ptr(),
6440 out.c_tensor,
6441 self.c_tensor,
6442 dim.as_ptr(),
6443 dim.len_i32(),
6444 if keepdim { 1 } else { 0 },
6445 dtype.into().map_or(-1, |s| s.c_int())
6446 ));
6447 Ok(Tensor { c_tensor: c_tensors[0] })
6448 }
6449
6450 pub fn f_internal_sparse_csr_tensor_unsafe(
6451 crow_indices: &Tensor,
6452 col_indices: &Tensor,
6453 values: &Tensor,
6454 size: impl IntList,
6455 options: (Kind, Device),
6456 ) -> Result<Tensor, TchError> {
6457 let mut c_tensors = [std::ptr::null_mut(); 1];
6458 unsafe_torch_err!(atg__sparse_csr_tensor_unsafe(
6459 c_tensors.as_mut_ptr(),
6460 crow_indices.c_tensor,
6461 col_indices.c_tensor,
6462 values.c_tensor,
6463 size.as_ptr(),
6464 size.len_i32(),
6465 options.0.c_int(),
6466 options.1.c_int()
6467 ));
6468 Ok(Tensor { c_tensor: c_tensors[0] })
6469 }
6470
6471 pub fn f_internal_sparse_log_softmax(
6472 &self,
6473 dim: i64,
6474 half_to_float: bool,
6475 ) -> Result<Tensor, TchError> {
6476 let mut c_tensors = [std::ptr::null_mut(); 1];
6477 unsafe_torch_err!(atg__sparse_log_softmax(
6478 c_tensors.as_mut_ptr(),
6479 self.c_tensor,
6480 dim,
6481 if half_to_float { 1 } else { 0 }
6482 ));
6483 Ok(Tensor { c_tensor: c_tensors[0] })
6484 }
6485
6486 pub fn f_internal_sparse_log_softmax_backward_data(
6487 &self,
6488 grad_output: &Tensor,
6489 output: &Tensor,
6490 dim: i64,
6491 ) -> Result<Tensor, TchError> {
6492 let mut c_tensors = [std::ptr::null_mut(); 1];
6493 unsafe_torch_err!(atg__sparse_log_softmax_backward_data(
6494 c_tensors.as_mut_ptr(),
6495 grad_output.c_tensor,
6496 output.c_tensor,
6497 dim,
6498 self.c_tensor
6499 ));
6500 Ok(Tensor { c_tensor: c_tensors[0] })
6501 }
6502
6503 pub fn f_internal_sparse_log_softmax_backward_data_out(
6504 &self,
6505 out: &Tensor,
6506 grad_output: &Tensor,
6507 output: &Tensor,
6508 dim: i64,
6509 ) -> Result<Tensor, TchError> {
6510 let mut c_tensors = [std::ptr::null_mut(); 1];
6511 unsafe_torch_err!(atg__sparse_log_softmax_backward_data_out(
6512 c_tensors.as_mut_ptr(),
6513 out.c_tensor,
6514 grad_output.c_tensor,
6515 output.c_tensor,
6516 dim,
6517 self.c_tensor
6518 ));
6519 Ok(Tensor { c_tensor: c_tensors[0] })
6520 }
6521
6522 pub fn f_internal_sparse_log_softmax_int(
6523 &self,
6524 dim: i64,
6525 dtype: impl Into<Option<Kind>>,
6526 ) -> Result<Tensor, TchError> {
6527 let mut c_tensors = [std::ptr::null_mut(); 1];
6528 unsafe_torch_err!(atg__sparse_log_softmax_int(
6529 c_tensors.as_mut_ptr(),
6530 self.c_tensor,
6531 dim,
6532 dtype.into().map_or(-1, |s| s.c_int())
6533 ));
6534 Ok(Tensor { c_tensor: c_tensors[0] })
6535 }
6536
6537 pub fn f_internal_sparse_log_softmax_out(
6538 &self,
6539 out: &Tensor,
6540 dim: i64,
6541 half_to_float: bool,
6542 ) -> Result<Tensor, TchError> {
6543 let mut c_tensors = [std::ptr::null_mut(); 1];
6544 unsafe_torch_err!(atg__sparse_log_softmax_out(
6545 c_tensors.as_mut_ptr(),
6546 out.c_tensor,
6547 self.c_tensor,
6548 dim,
6549 if half_to_float { 1 } else { 0 }
6550 ));
6551 Ok(Tensor { c_tensor: c_tensors[0] })
6552 }
6553
6554 pub fn f_internal_sparse_mask_projection(
6555 &self,
6556 mask: &Tensor,
6557 accumulate_matches: bool,
6558 ) -> Result<Tensor, TchError> {
6559 let mut c_tensors = [std::ptr::null_mut(); 1];
6560 unsafe_torch_err!(atg__sparse_mask_projection(
6561 c_tensors.as_mut_ptr(),
6562 self.c_tensor,
6563 mask.c_tensor,
6564 if accumulate_matches { 1 } else { 0 }
6565 ));
6566 Ok(Tensor { c_tensor: c_tensors[0] })
6567 }
6568
6569 pub fn f_internal_sparse_mask_projection_out(
6570 &self,
6571 out: &Tensor,
6572 mask: &Tensor,
6573 accumulate_matches: bool,
6574 ) -> Result<Tensor, TchError> {
6575 let mut c_tensors = [std::ptr::null_mut(); 1];
6576 unsafe_torch_err!(atg__sparse_mask_projection_out(
6577 c_tensors.as_mut_ptr(),
6578 out.c_tensor,
6579 self.c_tensor,
6580 mask.c_tensor,
6581 if accumulate_matches { 1 } else { 0 }
6582 ));
6583 Ok(Tensor { c_tensor: c_tensors[0] })
6584 }
6585
6586 pub fn f_internal_sparse_mm(sparse: &Tensor, dense: &Tensor) -> Result<Tensor, TchError> {
6587 let mut c_tensors = [std::ptr::null_mut(); 1];
6588 unsafe_torch_err!(atg__sparse_mm(c_tensors.as_mut_ptr(), sparse.c_tensor, dense.c_tensor));
6589 Ok(Tensor { c_tensor: c_tensors[0] })
6590 }
6591
6592 pub fn f_internal_sparse_mm_reduce(
6593 sparse: &Tensor,
6594 dense: &Tensor,
6595 reduce: &str,
6596 ) -> Result<Tensor, TchError> {
6597 let mut c_tensors = [std::ptr::null_mut(); 1];
6598 unsafe_torch_err!(atg__sparse_mm_reduce(
6599 c_tensors.as_mut_ptr(),
6600 sparse.c_tensor,
6601 dense.c_tensor,
6602 reduce.as_ptr(),
6603 reduce.len() as i32
6604 ));
6605 Ok(Tensor { c_tensor: c_tensors[0] })
6606 }
6607
6608 pub fn f_internal_sparse_mm_reduce_impl(
6609 &self,
6610 other: &Tensor,
6611 reduce: &str,
6612 ) -> Result<(Tensor, Tensor), TchError> {
6613 let mut c_tensors = [std::ptr::null_mut(); 2];
6614 unsafe_torch_err!(atg__sparse_mm_reduce_impl(
6615 c_tensors.as_mut_ptr(),
6616 self.c_tensor,
6617 other.c_tensor,
6618 reduce.as_ptr(),
6619 reduce.len() as i32
6620 ));
6621 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
6622 }
6623
6624 pub fn f_internal_sparse_semi_structured_apply(
6625 &self,
6626 thread_masks: &Tensor,
6627 ) -> Result<(Tensor, Tensor), TchError> {
6628 let mut c_tensors = [std::ptr::null_mut(); 2];
6629 unsafe_torch_err!(atg__sparse_semi_structured_apply(
6630 c_tensors.as_mut_ptr(),
6631 self.c_tensor,
6632 thread_masks.c_tensor
6633 ));
6634 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
6635 }
6636
6637 pub fn f_internal_sparse_semi_structured_apply_dense(
6638 &self,
6639 thread_masks: &Tensor,
6640 ) -> Result<Tensor, TchError> {
6641 let mut c_tensors = [std::ptr::null_mut(); 1];
6642 unsafe_torch_err!(atg__sparse_semi_structured_apply_dense(
6643 c_tensors.as_mut_ptr(),
6644 self.c_tensor,
6645 thread_masks.c_tensor
6646 ));
6647 Ok(Tensor { c_tensor: c_tensors[0] })
6648 }
6649
6650 pub fn f_internal_sparse_semi_structured_linear<T: Borrow<Tensor>>(
6651 &self,
6652 weight: &Tensor,
6653 meta: &Tensor,
6654 bias: Option<T>,
6655 activation: &str,
6656 out_dtype: impl Into<Option<Kind>>,
6657 ) -> Result<Tensor, TchError> {
6658 let mut c_tensors = [std::ptr::null_mut(); 1];
6659 unsafe_torch_err!(atg__sparse_semi_structured_linear(
6660 c_tensors.as_mut_ptr(),
6661 self.c_tensor,
6662 weight.c_tensor,
6663 meta.c_tensor,
6664 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
6665 activation.as_ptr(),
6666 activation.len() as i32,
6667 out_dtype.into().map_or(-1, |s| s.c_int())
6668 ));
6669 Ok(Tensor { c_tensor: c_tensors[0] })
6670 }
6671
6672 pub fn f_internal_sparse_semi_structured_mm(
6673 mat1: &Tensor,
6674 mat1_meta: &Tensor,
6675 mat2: &Tensor,
6676 out_dtype: impl Into<Option<Kind>>,
6677 ) -> Result<Tensor, TchError> {
6678 let mut c_tensors = [std::ptr::null_mut(); 1];
6679 unsafe_torch_err!(atg__sparse_semi_structured_mm(
6680 c_tensors.as_mut_ptr(),
6681 mat1.c_tensor,
6682 mat1_meta.c_tensor,
6683 mat2.c_tensor,
6684 out_dtype.into().map_or(-1, |s| s.c_int())
6685 ));
6686 Ok(Tensor { c_tensor: c_tensors[0] })
6687 }
6688
6689 pub fn f_internal_sparse_semi_structured_tile(
6690 &self,
6691 algorithm: &str,
6692 use_cutlass: bool,
6693 ) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor), TchError> {
6694 let mut c_tensors = [std::ptr::null_mut(); 5];
6695 unsafe_torch_err!(atg__sparse_semi_structured_tile(
6696 c_tensors.as_mut_ptr(),
6697 self.c_tensor,
6698 algorithm.as_ptr(),
6699 algorithm.len() as i32,
6700 if use_cutlass { 1 } else { 0 }
6701 ));
6702 Ok((
6703 Tensor { c_tensor: c_tensors[0] },
6704 Tensor { c_tensor: c_tensors[1] },
6705 Tensor { c_tensor: c_tensors[2] },
6706 Tensor { c_tensor: c_tensors[3] },
6707 Tensor { c_tensor: c_tensors[4] },
6708 ))
6709 }
6710
6711 pub fn f_internal_sparse_softmax(
6712 &self,
6713 dim: i64,
6714 half_to_float: bool,
6715 ) -> Result<Tensor, TchError> {
6716 let mut c_tensors = [std::ptr::null_mut(); 1];
6717 unsafe_torch_err!(atg__sparse_softmax(
6718 c_tensors.as_mut_ptr(),
6719 self.c_tensor,
6720 dim,
6721 if half_to_float { 1 } else { 0 }
6722 ));
6723 Ok(Tensor { c_tensor: c_tensors[0] })
6724 }
6725
6726 pub fn f_internal_sparse_softmax_backward_data(
6727 &self,
6728 grad_output: &Tensor,
6729 output: &Tensor,
6730 dim: i64,
6731 ) -> Result<Tensor, TchError> {
6732 let mut c_tensors = [std::ptr::null_mut(); 1];
6733 unsafe_torch_err!(atg__sparse_softmax_backward_data(
6734 c_tensors.as_mut_ptr(),
6735 grad_output.c_tensor,
6736 output.c_tensor,
6737 dim,
6738 self.c_tensor
6739 ));
6740 Ok(Tensor { c_tensor: c_tensors[0] })
6741 }
6742
6743 pub fn f_internal_sparse_softmax_backward_data_out(
6744 &self,
6745 out: &Tensor,
6746 grad_output: &Tensor,
6747 output: &Tensor,
6748 dim: i64,
6749 ) -> Result<Tensor, TchError> {
6750 let mut c_tensors = [std::ptr::null_mut(); 1];
6751 unsafe_torch_err!(atg__sparse_softmax_backward_data_out(
6752 c_tensors.as_mut_ptr(),
6753 out.c_tensor,
6754 grad_output.c_tensor,
6755 output.c_tensor,
6756 dim,
6757 self.c_tensor
6758 ));
6759 Ok(Tensor { c_tensor: c_tensors[0] })
6760 }
6761
6762 pub fn f_internal_sparse_softmax_int(
6763 &self,
6764 dim: i64,
6765 dtype: impl Into<Option<Kind>>,
6766 ) -> Result<Tensor, TchError> {
6767 let mut c_tensors = [std::ptr::null_mut(); 1];
6768 unsafe_torch_err!(atg__sparse_softmax_int(
6769 c_tensors.as_mut_ptr(),
6770 self.c_tensor,
6771 dim,
6772 dtype.into().map_or(-1, |s| s.c_int())
6773 ));
6774 Ok(Tensor { c_tensor: c_tensors[0] })
6775 }
6776
6777 pub fn f_internal_sparse_softmax_out(
6778 &self,
6779 out: &Tensor,
6780 dim: i64,
6781 half_to_float: bool,
6782 ) -> Result<Tensor, TchError> {
6783 let mut c_tensors = [std::ptr::null_mut(); 1];
6784 unsafe_torch_err!(atg__sparse_softmax_out(
6785 c_tensors.as_mut_ptr(),
6786 out.c_tensor,
6787 self.c_tensor,
6788 dim,
6789 if half_to_float { 1 } else { 0 }
6790 ));
6791 Ok(Tensor { c_tensor: c_tensors[0] })
6792 }
6793
6794 pub fn f_internal_sparse_sparse_matmul(&self, other: &Tensor) -> Result<Tensor, TchError> {
6795 let mut c_tensors = [std::ptr::null_mut(); 1];
6796 unsafe_torch_err!(atg__sparse_sparse_matmul(
6797 c_tensors.as_mut_ptr(),
6798 self.c_tensor,
6799 other.c_tensor
6800 ));
6801 Ok(Tensor { c_tensor: c_tensors[0] })
6802 }
6803
6804 pub fn f_internal_sparse_sparse_matmul_out(
6805 &self,
6806 out: &Tensor,
6807 other: &Tensor,
6808 ) -> Result<Tensor, TchError> {
6809 let mut c_tensors = [std::ptr::null_mut(); 1];
6810 unsafe_torch_err!(atg__sparse_sparse_matmul_out(
6811 c_tensors.as_mut_ptr(),
6812 out.c_tensor,
6813 self.c_tensor,
6814 other.c_tensor
6815 ));
6816 Ok(Tensor { c_tensor: c_tensors[0] })
6817 }
6818
6819 pub fn f_internal_sparse_sum(&self) -> Result<Tensor, TchError> {
6820 let mut c_tensors = [std::ptr::null_mut(); 1];
6821 unsafe_torch_err!(atg__sparse_sum(c_tensors.as_mut_ptr(), self.c_tensor));
6822 Ok(Tensor { c_tensor: c_tensors[0] })
6823 }
6824
6825 pub fn f_internal_sparse_sum_backward(
6826 &self,
6827 grad: &Tensor,
6828 dim: impl IntList,
6829 ) -> Result<Tensor, TchError> {
6830 let mut c_tensors = [std::ptr::null_mut(); 1];
6831 unsafe_torch_err!(atg__sparse_sum_backward(
6832 c_tensors.as_mut_ptr(),
6833 grad.c_tensor,
6834 self.c_tensor,
6835 dim.as_ptr(),
6836 dim.len_i32()
6837 ));
6838 Ok(Tensor { c_tensor: c_tensors[0] })
6839 }
6840
6841 pub fn f_internal_sparse_sum_backward_out(
6842 &self,
6843 out: &Tensor,
6844 grad: &Tensor,
6845 dim: impl IntList,
6846 ) -> Result<Tensor, TchError> {
6847 let mut c_tensors = [std::ptr::null_mut(); 1];
6848 unsafe_torch_err!(atg__sparse_sum_backward_out(
6849 c_tensors.as_mut_ptr(),
6850 out.c_tensor,
6851 grad.c_tensor,
6852 self.c_tensor,
6853 dim.as_ptr(),
6854 dim.len_i32()
6855 ));
6856 Ok(Tensor { c_tensor: c_tensors[0] })
6857 }
6858
6859 pub fn f_internal_sparse_sum_dim(&self, dim: impl IntList) -> Result<Tensor, TchError> {
6860 let mut c_tensors = [std::ptr::null_mut(); 1];
6861 unsafe_torch_err!(atg__sparse_sum_dim(
6862 c_tensors.as_mut_ptr(),
6863 self.c_tensor,
6864 dim.as_ptr(),
6865 dim.len_i32()
6866 ));
6867 Ok(Tensor { c_tensor: c_tensors[0] })
6868 }
6869
6870 pub fn f_internal_sparse_sum_dim_dtype(
6871 &self,
6872 dim: impl IntList,
6873 dtype: Kind,
6874 ) -> Result<Tensor, TchError> {
6875 let mut c_tensors = [std::ptr::null_mut(); 1];
6876 unsafe_torch_err!(atg__sparse_sum_dim_dtype(
6877 c_tensors.as_mut_ptr(),
6878 self.c_tensor,
6879 dim.as_ptr(),
6880 dim.len_i32(),
6881 dtype.c_int()
6882 ));
6883 Ok(Tensor { c_tensor: c_tensors[0] })
6884 }
6885
6886 pub fn f_internal_sparse_sum_dim_out(
6887 &self,
6888 out: &Tensor,
6889 dim: impl IntList,
6890 ) -> Result<Tensor, TchError> {
6891 let mut c_tensors = [std::ptr::null_mut(); 1];
6892 unsafe_torch_err!(atg__sparse_sum_dim_out(
6893 c_tensors.as_mut_ptr(),
6894 out.c_tensor,
6895 self.c_tensor,
6896 dim.as_ptr(),
6897 dim.len_i32()
6898 ));
6899 Ok(Tensor { c_tensor: c_tensors[0] })
6900 }
6901
6902 pub fn f_internal_sparse_sum_dtype(&self, dtype: Kind) -> Result<Tensor, TchError> {
6903 let mut c_tensors = [std::ptr::null_mut(); 1];
6904 unsafe_torch_err!(atg__sparse_sum_dtype(
6905 c_tensors.as_mut_ptr(),
6906 self.c_tensor,
6907 dtype.c_int()
6908 ));
6909 Ok(Tensor { c_tensor: c_tensors[0] })
6910 }
6911
6912 pub fn f_internal_spdiags(
6913 diagonals: &Tensor,
6914 offsets: &Tensor,
6915 shape: impl IntList,
6916 layout: Option<Layout>,
6917 ) -> Result<Tensor, TchError> {
6918 let mut c_tensors = [std::ptr::null_mut(); 1];
6919 unsafe_torch_err!(atg__spdiags(
6920 c_tensors.as_mut_ptr(),
6921 diagonals.c_tensor,
6922 offsets.c_tensor,
6923 shape.as_ptr(),
6924 shape.len_i32(),
6925 layout.map_or(-1, |s| s.to_i8())
6926 ));
6927 Ok(Tensor { c_tensor: c_tensors[0] })
6928 }
6929
6930 pub fn f_internal_spdiags_out(
6931 out: &Tensor,
6932 diagonals: &Tensor,
6933 offsets: &Tensor,
6934 shape: impl IntList,
6935 layout: Option<Layout>,
6936 ) -> Result<Tensor, TchError> {
6937 let mut c_tensors = [std::ptr::null_mut(); 1];
6938 unsafe_torch_err!(atg__spdiags_out(
6939 c_tensors.as_mut_ptr(),
6940 out.c_tensor,
6941 diagonals.c_tensor,
6942 offsets.c_tensor,
6943 shape.as_ptr(),
6944 shape.len_i32(),
6945 layout.map_or(-1, |s| s.to_i8())
6946 ));
6947 Ok(Tensor { c_tensor: c_tensors[0] })
6948 }
6949
6950 pub fn f_internal_spsolve(a: &Tensor, b: &Tensor, left: bool) -> Result<Tensor, TchError> {
6951 let mut c_tensors = [std::ptr::null_mut(); 1];
6952 unsafe_torch_err!(atg__spsolve(
6953 c_tensors.as_mut_ptr(),
6954 a.c_tensor,
6955 b.c_tensor,
6956 if left { 1 } else { 0 }
6957 ));
6958 Ok(Tensor { c_tensor: c_tensors[0] })
6959 }
6960
6961 pub fn f_internal_stack<T: Borrow<Tensor>>(
6962 tensors: &[T],
6963 dim: i64,
6964 ) -> Result<Tensor, TchError> {
6965 let mut c_tensors = [std::ptr::null_mut(); 1];
6966 unsafe_torch_err!(atg__stack(
6967 c_tensors.as_mut_ptr(),
6968 ptr_list(tensors).as_ptr(),
6969 tensors.len() as i32,
6970 dim
6971 ));
6972 Ok(Tensor { c_tensor: c_tensors[0] })
6973 }
6974
6975 pub fn f_internal_stack_out<T: Borrow<Tensor>>(
6976 out: &Tensor,
6977 tensors: &[T],
6978 dim: i64,
6979 ) -> Result<Tensor, TchError> {
6980 let mut c_tensors = [std::ptr::null_mut(); 1];
6981 unsafe_torch_err!(atg__stack_out(
6982 c_tensors.as_mut_ptr(),
6983 out.c_tensor,
6984 ptr_list(tensors).as_ptr(),
6985 tensors.len() as i32,
6986 dim
6987 ));
6988 Ok(Tensor { c_tensor: c_tensors[0] })
6989 }
6990
6991 pub fn f_internal_standard_gamma(&self) -> Result<Tensor, TchError> {
6992 let mut c_tensors = [std::ptr::null_mut(); 1];
6993 unsafe_torch_err!(atg__standard_gamma(c_tensors.as_mut_ptr(), self.c_tensor));
6994 Ok(Tensor { c_tensor: c_tensors[0] })
6995 }
6996
6997 pub fn f_internal_standard_gamma_grad(&self, output: &Tensor) -> Result<Tensor, TchError> {
6998 let mut c_tensors = [std::ptr::null_mut(); 1];
6999 unsafe_torch_err!(atg__standard_gamma_grad(
7000 c_tensors.as_mut_ptr(),
7001 self.c_tensor,
7002 output.c_tensor
7003 ));
7004 Ok(Tensor { c_tensor: c_tensors[0] })
7005 }
7006
7007 pub fn f_internal_standard_gamma_grad_out(
7008 &self,
7009 out: &Tensor,
7010 output: &Tensor,
7011 ) -> Result<Tensor, TchError> {
7012 let mut c_tensors = [std::ptr::null_mut(); 1];
7013 unsafe_torch_err!(atg__standard_gamma_grad_out(
7014 c_tensors.as_mut_ptr(),
7015 out.c_tensor,
7016 self.c_tensor,
7017 output.c_tensor
7018 ));
7019 Ok(Tensor { c_tensor: c_tensors[0] })
7020 }
7021
7022 pub fn f_internal_standard_gamma_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
7023 let mut c_tensors = [std::ptr::null_mut(); 1];
7024 unsafe_torch_err!(atg__standard_gamma_out(
7025 c_tensors.as_mut_ptr(),
7026 out.c_tensor,
7027 self.c_tensor
7028 ));
7029 Ok(Tensor { c_tensor: c_tensors[0] })
7030 }
7031
7032 pub fn f_internal_test_ambiguous_defaults(
7033 dummy: &Tensor,
7034 a: i64,
7035 b: i64,
7036 ) -> Result<Tensor, TchError> {
7037 let mut c_tensors = [std::ptr::null_mut(); 1];
7038 unsafe_torch_err!(atg__test_ambiguous_defaults(
7039 c_tensors.as_mut_ptr(),
7040 dummy.c_tensor,
7041 a,
7042 b
7043 ));
7044 Ok(Tensor { c_tensor: c_tensors[0] })
7045 }
7046
7047 pub fn f_internal_test_ambiguous_defaults_b(
7048 dummy: &Tensor,
7049 a: i64,
7050 b: &str,
7051 ) -> Result<Tensor, TchError> {
7052 let mut c_tensors = [std::ptr::null_mut(); 1];
7053 unsafe_torch_err!(atg__test_ambiguous_defaults_b(
7054 c_tensors.as_mut_ptr(),
7055 dummy.c_tensor,
7056 a,
7057 b.as_ptr(),
7058 b.len() as i32
7059 ));
7060 Ok(Tensor { c_tensor: c_tensors[0] })
7061 }
7062
7063 pub fn f_internal_test_autograd_multiple_dispatch(&self) -> Result<Tensor, TchError> {
7064 let mut c_tensors = [std::ptr::null_mut(); 1];
7065 unsafe_torch_err!(atg__test_autograd_multiple_dispatch(
7066 c_tensors.as_mut_ptr(),
7067 self.c_tensor
7068 ));
7069 Ok(Tensor { c_tensor: c_tensors[0] })
7070 }
7071
7072 pub fn f_internal_test_autograd_multiple_dispatch_fullcoverage_out(
7073 &self,
7074 out: &Tensor,
7075 ) -> Result<Tensor, TchError> {
7076 let mut c_tensors = [std::ptr::null_mut(); 1];
7077 unsafe_torch_err!(atg__test_autograd_multiple_dispatch_fullcoverage_out(
7078 c_tensors.as_mut_ptr(),
7079 out.c_tensor,
7080 self.c_tensor
7081 ));
7082 Ok(Tensor { c_tensor: c_tensors[0] })
7083 }
7084
7085 pub fn f_internal_test_autograd_multiple_dispatch_ntonly(
7086 &self,
7087 b: bool,
7088 ) -> Result<Tensor, TchError> {
7089 let mut c_tensors = [std::ptr::null_mut(); 1];
7090 unsafe_torch_err!(atg__test_autograd_multiple_dispatch_ntonly(
7091 c_tensors.as_mut_ptr(),
7092 self.c_tensor,
7093 if b { 1 } else { 0 }
7094 ));
7095 Ok(Tensor { c_tensor: c_tensors[0] })
7096 }
7097
7098 pub fn f_internal_test_autograd_multiple_dispatch_view(&self) -> Result<Tensor, TchError> {
7099 let mut c_tensors = [std::ptr::null_mut(); 1];
7100 unsafe_torch_err!(atg__test_autograd_multiple_dispatch_view(
7101 c_tensors.as_mut_ptr(),
7102 self.c_tensor
7103 ));
7104 Ok(Tensor { c_tensor: c_tensors[0] })
7105 }
7106
7107 pub fn f_internal_test_autograd_multiple_dispatch_view_copy(&self) -> Result<Tensor, TchError> {
7108 let mut c_tensors = [std::ptr::null_mut(); 1];
7109 unsafe_torch_err!(atg__test_autograd_multiple_dispatch_view_copy(
7110 c_tensors.as_mut_ptr(),
7111 self.c_tensor
7112 ));
7113 Ok(Tensor { c_tensor: c_tensors[0] })
7114 }
7115
7116 pub fn f_internal_test_autograd_multiple_dispatch_view_copy_out(
7117 &self,
7118 out: &Tensor,
7119 ) -> Result<Tensor, TchError> {
7120 let mut c_tensors = [std::ptr::null_mut(); 1];
7121 unsafe_torch_err!(atg__test_autograd_multiple_dispatch_view_copy_out(
7122 c_tensors.as_mut_ptr(),
7123 out.c_tensor,
7124 self.c_tensor
7125 ));
7126 Ok(Tensor { c_tensor: c_tensors[0] })
7127 }
7128
7129 pub fn f_internal_test_check_tensor(&self) -> Result<Tensor, TchError> {
7130 let mut c_tensors = [std::ptr::null_mut(); 1];
7131 unsafe_torch_err!(atg__test_check_tensor(c_tensors.as_mut_ptr(), self.c_tensor));
7132 Ok(Tensor { c_tensor: c_tensors[0] })
7133 }
7134
7135 pub fn f_internal_test_functorch_fallback(&self, other: &Tensor) -> Result<Tensor, TchError> {
7136 let mut c_tensors = [std::ptr::null_mut(); 1];
7137 unsafe_torch_err!(atg__test_functorch_fallback(
7138 c_tensors.as_mut_ptr(),
7139 self.c_tensor,
7140 other.c_tensor
7141 ));
7142 Ok(Tensor { c_tensor: c_tensors[0] })
7143 }
7144
7145 pub fn f_internal_test_functorch_fallback_out(
7146 &self,
7147 out: &Tensor,
7148 other: &Tensor,
7149 ) -> Result<Tensor, TchError> {
7150 let mut c_tensors = [std::ptr::null_mut(); 1];
7151 unsafe_torch_err!(atg__test_functorch_fallback_out(
7152 c_tensors.as_mut_ptr(),
7153 out.c_tensor,
7154 self.c_tensor,
7155 other.c_tensor
7156 ));
7157 Ok(Tensor { c_tensor: c_tensors[0] })
7158 }
7159
7160 pub fn f_internal_test_optional_filled_intlist(
7161 values: &Tensor,
7162 addends: impl IntListOption,
7163 ) -> Result<Tensor, TchError> {
7164 let mut c_tensors = [std::ptr::null_mut(); 1];
7165 unsafe_torch_err!(atg__test_optional_filled_intlist(
7166 c_tensors.as_mut_ptr(),
7167 values.c_tensor,
7168 addends.as_ptr(),
7169 addends.len_i32()
7170 ));
7171 Ok(Tensor { c_tensor: c_tensors[0] })
7172 }
7173
7174 pub fn f_internal_test_optional_filled_intlist_out(
7175 out: &Tensor,
7176 values: &Tensor,
7177 addends: impl IntListOption,
7178 ) -> Result<Tensor, TchError> {
7179 let mut c_tensors = [std::ptr::null_mut(); 1];
7180 unsafe_torch_err!(atg__test_optional_filled_intlist_out(
7181 c_tensors.as_mut_ptr(),
7182 out.c_tensor,
7183 values.c_tensor,
7184 addends.as_ptr(),
7185 addends.len_i32()
7186 ));
7187 Ok(Tensor { c_tensor: c_tensors[0] })
7188 }
7189
7190 pub fn f_internal_test_optional_floatlist(
7191 values: &Tensor,
7192 addends: impl DoubleList,
7193 ) -> Result<Tensor, TchError> {
7194 let mut c_tensors = [std::ptr::null_mut(); 1];
7195 unsafe_torch_err!(atg__test_optional_floatlist(
7196 c_tensors.as_mut_ptr(),
7197 values.c_tensor,
7198 addends.as_ptr(),
7199 addends.len_i32()
7200 ));
7201 Ok(Tensor { c_tensor: c_tensors[0] })
7202 }
7203
7204 pub fn f_internal_test_optional_floatlist_out(
7205 out: &Tensor,
7206 values: &Tensor,
7207 addends: impl DoubleList,
7208 ) -> Result<Tensor, TchError> {
7209 let mut c_tensors = [std::ptr::null_mut(); 1];
7210 unsafe_torch_err!(atg__test_optional_floatlist_out(
7211 c_tensors.as_mut_ptr(),
7212 out.c_tensor,
7213 values.c_tensor,
7214 addends.as_ptr(),
7215 addends.len_i32()
7216 ));
7217 Ok(Tensor { c_tensor: c_tensors[0] })
7218 }
7219
7220 pub fn f_internal_test_optional_intlist(
7221 values: &Tensor,
7222 addends: impl IntListOption,
7223 ) -> Result<Tensor, TchError> {
7224 let mut c_tensors = [std::ptr::null_mut(); 1];
7225 unsafe_torch_err!(atg__test_optional_intlist(
7226 c_tensors.as_mut_ptr(),
7227 values.c_tensor,
7228 addends.as_ptr(),
7229 addends.len_i32()
7230 ));
7231 Ok(Tensor { c_tensor: c_tensors[0] })
7232 }
7233
7234 pub fn f_internal_test_optional_intlist_out(
7235 out: &Tensor,
7236 values: &Tensor,
7237 addends: impl IntListOption,
7238 ) -> Result<Tensor, TchError> {
7239 let mut c_tensors = [std::ptr::null_mut(); 1];
7240 unsafe_torch_err!(atg__test_optional_intlist_out(
7241 c_tensors.as_mut_ptr(),
7242 out.c_tensor,
7243 values.c_tensor,
7244 addends.as_ptr(),
7245 addends.len_i32()
7246 ));
7247 Ok(Tensor { c_tensor: c_tensors[0] })
7248 }
7249
7250 pub fn f_internal_test_parallel_materialize(
7251 &self,
7252 num_parallel: i64,
7253 skip_first: bool,
7254 ) -> Result<Tensor, TchError> {
7255 let mut c_tensors = [std::ptr::null_mut(); 1];
7256 unsafe_torch_err!(atg__test_parallel_materialize(
7257 c_tensors.as_mut_ptr(),
7258 self.c_tensor,
7259 num_parallel,
7260 if skip_first { 1 } else { 0 }
7261 ));
7262 Ok(Tensor { c_tensor: c_tensors[0] })
7263 }
7264
7265 pub fn f_internal_test_serialization_subcmul(
7266 &self,
7267 other: &Tensor,
7268 ) -> Result<Tensor, TchError> {
7269 let mut c_tensors = [std::ptr::null_mut(); 1];
7270 unsafe_torch_err!(atg__test_serialization_subcmul(
7271 c_tensors.as_mut_ptr(),
7272 self.c_tensor,
7273 other.c_tensor
7274 ));
7275 Ok(Tensor { c_tensor: c_tensors[0] })
7276 }
7277
7278 pub fn f_internal_test_string_default(
7279 dummy: &Tensor,
7280 a: &str,
7281 b: &str,
7282 ) -> Result<Tensor, TchError> {
7283 let mut c_tensors = [std::ptr::null_mut(); 1];
7284 unsafe_torch_err!(atg__test_string_default(
7285 c_tensors.as_mut_ptr(),
7286 dummy.c_tensor,
7287 a.as_ptr(),
7288 a.len() as i32,
7289 b.as_ptr(),
7290 b.len() as i32
7291 ));
7292 Ok(Tensor { c_tensor: c_tensors[0] })
7293 }
7294
7295 pub fn f_internal_test_warn_in_autograd(&self) -> Result<Tensor, TchError> {
7296 let mut c_tensors = [std::ptr::null_mut(); 1];
7297 unsafe_torch_err!(atg__test_warn_in_autograd(c_tensors.as_mut_ptr(), self.c_tensor));
7298 Ok(Tensor { c_tensor: c_tensors[0] })
7299 }
7300
7301 pub fn f_internal_test_warn_in_autograd_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
7302 let mut c_tensors = [std::ptr::null_mut(); 1];
7303 unsafe_torch_err!(atg__test_warn_in_autograd_out(
7304 c_tensors.as_mut_ptr(),
7305 out.c_tensor,
7306 self.c_tensor
7307 ));
7308 Ok(Tensor { c_tensor: c_tensors[0] })
7309 }
7310
7311 pub fn f_internal_to_copy(
7312 &self,
7313 options: (Kind, Device),
7314 non_blocking: bool,
7315 ) -> Result<Tensor, TchError> {
7316 let mut c_tensors = [std::ptr::null_mut(); 1];
7317 unsafe_torch_err!(atg__to_copy(
7318 c_tensors.as_mut_ptr(),
7319 self.c_tensor,
7320 options.0.c_int(),
7321 options.1.c_int(),
7322 if non_blocking { 1 } else { 0 }
7323 ));
7324 Ok(Tensor { c_tensor: c_tensors[0] })
7325 }
7326
7327 pub fn f_internal_to_copy_out(
7328 &self,
7329 out: &Tensor,
7330 non_blocking: bool,
7331 ) -> Result<Tensor, TchError> {
7332 let mut c_tensors = [std::ptr::null_mut(); 1];
7333 unsafe_torch_err!(atg__to_copy_out(
7334 c_tensors.as_mut_ptr(),
7335 out.c_tensor,
7336 self.c_tensor,
7337 if non_blocking { 1 } else { 0 }
7338 ));
7339 Ok(Tensor { c_tensor: c_tensors[0] })
7340 }
7341
7342 pub fn f_internal_to_cpu<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Vec<Tensor>, TchError> {
7343 let c_tensors =
7344 unsafe_torch_err!(atg__to_cpu(ptr_list(tensors).as_ptr(), tensors.len() as i32));
7345 let mut r__ = vec![];
7346 let mut i = 0;
7347 loop {
7348 let c__ = unsafe { *c_tensors.add(i) };
7349 if c__.is_null() {
7350 break;
7351 }
7352 r__.push(Tensor { c_tensor: c__ });
7353 i += 1;
7354 }
7355 unsafe { libc::free(c_tensors as *mut libc::c_void) }
7356 Ok(r__)
7357 }
7358
7359 pub fn f_internal_to_dense(
7360 &self,
7361 dtype: impl Into<Option<Kind>>,
7362 masked_grad: bool,
7363 ) -> Result<Tensor, TchError> {
7364 let mut c_tensors = [std::ptr::null_mut(); 1];
7365 unsafe_torch_err!(atg__to_dense(
7366 c_tensors.as_mut_ptr(),
7367 self.c_tensor,
7368 dtype.into().map_or(-1, |s| s.c_int()),
7369 if masked_grad { 1 } else { 0 }
7370 ));
7371 Ok(Tensor { c_tensor: c_tensors[0] })
7372 }
7373
7374 pub fn f_internal_to_dense_out(
7375 &self,
7376 out: &Tensor,
7377 dtype: impl Into<Option<Kind>>,
7378 masked_grad: bool,
7379 ) -> Result<Tensor, TchError> {
7380 let mut c_tensors = [std::ptr::null_mut(); 1];
7381 unsafe_torch_err!(atg__to_dense_out(
7382 c_tensors.as_mut_ptr(),
7383 out.c_tensor,
7384 self.c_tensor,
7385 dtype.into().map_or(-1, |s| s.c_int()),
7386 if masked_grad { 1 } else { 0 }
7387 ));
7388 Ok(Tensor { c_tensor: c_tensors[0] })
7389 }
7390
7391 pub fn f_internal_to_sparse(
7392 &self,
7393 layout: Option<Layout>,
7394 blocksize: impl IntListOption,
7395 dense_dim: impl Into<Option<i64>>,
7396 ) -> Result<Tensor, TchError> {
7397 let dense_dim = dense_dim.into();
7398 let mut c_tensors = [std::ptr::null_mut(); 1];
7399 unsafe_torch_err!(atg__to_sparse(
7400 c_tensors.as_mut_ptr(),
7401 self.c_tensor,
7402 layout.map_or(-1, |s| s.to_i8()),
7403 blocksize.as_ptr(),
7404 blocksize.len_i32(),
7405 dense_dim.unwrap_or(0i64),
7406 dense_dim.is_none() as i8
7407 ));
7408 Ok(Tensor { c_tensor: c_tensors[0] })
7409 }
7410
7411 pub fn f_internal_to_sparse_bsc(
7412 &self,
7413 blocksize: impl IntList,
7414 dense_dim: impl Into<Option<i64>>,
7415 ) -> Result<Tensor, TchError> {
7416 let dense_dim = dense_dim.into();
7417 let mut c_tensors = [std::ptr::null_mut(); 1];
7418 unsafe_torch_err!(atg__to_sparse_bsc(
7419 c_tensors.as_mut_ptr(),
7420 self.c_tensor,
7421 blocksize.as_ptr(),
7422 blocksize.len_i32(),
7423 dense_dim.unwrap_or(0i64),
7424 dense_dim.is_none() as i8
7425 ));
7426 Ok(Tensor { c_tensor: c_tensors[0] })
7427 }
7428
7429 pub fn f_internal_to_sparse_bsc_out(
7430 &self,
7431 out: &Tensor,
7432 blocksize: impl IntList,
7433 dense_dim: impl Into<Option<i64>>,
7434 ) -> Result<Tensor, TchError> {
7435 let dense_dim = dense_dim.into();
7436 let mut c_tensors = [std::ptr::null_mut(); 1];
7437 unsafe_torch_err!(atg__to_sparse_bsc_out(
7438 c_tensors.as_mut_ptr(),
7439 out.c_tensor,
7440 self.c_tensor,
7441 blocksize.as_ptr(),
7442 blocksize.len_i32(),
7443 dense_dim.unwrap_or(0i64),
7444 dense_dim.is_none() as i8
7445 ));
7446 Ok(Tensor { c_tensor: c_tensors[0] })
7447 }
7448
7449 pub fn f_internal_to_sparse_bsr(
7450 &self,
7451 blocksize: impl IntList,
7452 dense_dim: impl Into<Option<i64>>,
7453 ) -> Result<Tensor, TchError> {
7454 let dense_dim = dense_dim.into();
7455 let mut c_tensors = [std::ptr::null_mut(); 1];
7456 unsafe_torch_err!(atg__to_sparse_bsr(
7457 c_tensors.as_mut_ptr(),
7458 self.c_tensor,
7459 blocksize.as_ptr(),
7460 blocksize.len_i32(),
7461 dense_dim.unwrap_or(0i64),
7462 dense_dim.is_none() as i8
7463 ));
7464 Ok(Tensor { c_tensor: c_tensors[0] })
7465 }
7466
7467 pub fn f_internal_to_sparse_bsr_out(
7468 &self,
7469 out: &Tensor,
7470 blocksize: impl IntList,
7471 dense_dim: impl Into<Option<i64>>,
7472 ) -> Result<Tensor, TchError> {
7473 let dense_dim = dense_dim.into();
7474 let mut c_tensors = [std::ptr::null_mut(); 1];
7475 unsafe_torch_err!(atg__to_sparse_bsr_out(
7476 c_tensors.as_mut_ptr(),
7477 out.c_tensor,
7478 self.c_tensor,
7479 blocksize.as_ptr(),
7480 blocksize.len_i32(),
7481 dense_dim.unwrap_or(0i64),
7482 dense_dim.is_none() as i8
7483 ));
7484 Ok(Tensor { c_tensor: c_tensors[0] })
7485 }
7486
7487 pub fn f_internal_to_sparse_csc(
7488 &self,
7489 dense_dim: impl Into<Option<i64>>,
7490 ) -> Result<Tensor, TchError> {
7491 let dense_dim = dense_dim.into();
7492 let mut c_tensors = [std::ptr::null_mut(); 1];
7493 unsafe_torch_err!(atg__to_sparse_csc(
7494 c_tensors.as_mut_ptr(),
7495 self.c_tensor,
7496 dense_dim.unwrap_or(0i64),
7497 dense_dim.is_none() as i8
7498 ));
7499 Ok(Tensor { c_tensor: c_tensors[0] })
7500 }
7501
7502 pub fn f_internal_to_sparse_csc_out(
7503 &self,
7504 out: &Tensor,
7505 dense_dim: impl Into<Option<i64>>,
7506 ) -> Result<Tensor, TchError> {
7507 let dense_dim = dense_dim.into();
7508 let mut c_tensors = [std::ptr::null_mut(); 1];
7509 unsafe_torch_err!(atg__to_sparse_csc_out(
7510 c_tensors.as_mut_ptr(),
7511 out.c_tensor,
7512 self.c_tensor,
7513 dense_dim.unwrap_or(0i64),
7514 dense_dim.is_none() as i8
7515 ));
7516 Ok(Tensor { c_tensor: c_tensors[0] })
7517 }
7518
7519 pub fn f_internal_to_sparse_csr(
7520 &self,
7521 dense_dim: impl Into<Option<i64>>,
7522 ) -> Result<Tensor, TchError> {
7523 let dense_dim = dense_dim.into();
7524 let mut c_tensors = [std::ptr::null_mut(); 1];
7525 unsafe_torch_err!(atg__to_sparse_csr(
7526 c_tensors.as_mut_ptr(),
7527 self.c_tensor,
7528 dense_dim.unwrap_or(0i64),
7529 dense_dim.is_none() as i8
7530 ));
7531 Ok(Tensor { c_tensor: c_tensors[0] })
7532 }
7533
7534 pub fn f_internal_to_sparse_csr_out(
7535 &self,
7536 out: &Tensor,
7537 dense_dim: impl Into<Option<i64>>,
7538 ) -> Result<Tensor, TchError> {
7539 let dense_dim = dense_dim.into();
7540 let mut c_tensors = [std::ptr::null_mut(); 1];
7541 unsafe_torch_err!(atg__to_sparse_csr_out(
7542 c_tensors.as_mut_ptr(),
7543 out.c_tensor,
7544 self.c_tensor,
7545 dense_dim.unwrap_or(0i64),
7546 dense_dim.is_none() as i8
7547 ));
7548 Ok(Tensor { c_tensor: c_tensors[0] })
7549 }
7550
7551 pub fn f_internal_to_sparse_out(
7552 &self,
7553 out: &Tensor,
7554 layout: Option<Layout>,
7555 blocksize: impl IntListOption,
7556 dense_dim: impl Into<Option<i64>>,
7557 ) -> Result<Tensor, TchError> {
7558 let dense_dim = dense_dim.into();
7559 let mut c_tensors = [std::ptr::null_mut(); 1];
7560 unsafe_torch_err!(atg__to_sparse_out(
7561 c_tensors.as_mut_ptr(),
7562 out.c_tensor,
7563 self.c_tensor,
7564 layout.map_or(-1, |s| s.to_i8()),
7565 blocksize.as_ptr(),
7566 blocksize.len_i32(),
7567 dense_dim.unwrap_or(0i64),
7568 dense_dim.is_none() as i8
7569 ));
7570 Ok(Tensor { c_tensor: c_tensors[0] })
7571 }
7572
7573 pub fn f_internal_to_sparse_semi_structured(
7574 dense: &Tensor,
7575 ) -> Result<(Tensor, Tensor), TchError> {
7576 let mut c_tensors = [std::ptr::null_mut(); 2];
7577 unsafe_torch_err!(atg__to_sparse_semi_structured(c_tensors.as_mut_ptr(), dense.c_tensor));
7578 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
7579 }
7580
7581 pub fn f_internal_to_sparse_sparse_dim(&self, sparse_dim: i64) -> Result<Tensor, TchError> {
7582 let mut c_tensors = [std::ptr::null_mut(); 1];
7583 unsafe_torch_err!(atg__to_sparse_sparse_dim(
7584 c_tensors.as_mut_ptr(),
7585 self.c_tensor,
7586 sparse_dim
7587 ));
7588 Ok(Tensor { c_tensor: c_tensors[0] })
7589 }
7590
7591 pub fn f_internal_to_sparse_sparse_dim_out(
7592 &self,
7593 out: &Tensor,
7594 sparse_dim: i64,
7595 ) -> Result<Tensor, TchError> {
7596 let mut c_tensors = [std::ptr::null_mut(); 1];
7597 unsafe_torch_err!(atg__to_sparse_sparse_dim_out(
7598 c_tensors.as_mut_ptr(),
7599 out.c_tensor,
7600 self.c_tensor,
7601 sparse_dim
7602 ));
7603 Ok(Tensor { c_tensor: c_tensors[0] })
7604 }
7605
7606 pub fn f_internal_transform_bias_rescale_qkv(
7607 qkv: &Tensor,
7608 qkv_bias: &Tensor,
7609 num_heads: i64,
7610 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
7611 let mut c_tensors = [std::ptr::null_mut(); 3];
7612 unsafe_torch_err!(atg__transform_bias_rescale_qkv(
7613 c_tensors.as_mut_ptr(),
7614 qkv.c_tensor,
7615 qkv_bias.c_tensor,
7616 num_heads
7617 ));
7618 Ok((
7619 Tensor { c_tensor: c_tensors[0] },
7620 Tensor { c_tensor: c_tensors[1] },
7621 Tensor { c_tensor: c_tensors[2] },
7622 ))
7623 }
7624
7625 pub fn f_internal_transform_bias_rescale_qkv_out(
7626 out0: &Tensor,
7627 out1: &Tensor,
7628 out2: &Tensor,
7629 qkv: &Tensor,
7630 qkv_bias: &Tensor,
7631 num_heads: i64,
7632 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
7633 let mut c_tensors = [std::ptr::null_mut(); 3];
7634 unsafe_torch_err!(atg__transform_bias_rescale_qkv_out(
7635 c_tensors.as_mut_ptr(),
7636 out0.c_tensor,
7637 out1.c_tensor,
7638 out2.c_tensor,
7639 qkv.c_tensor,
7640 qkv_bias.c_tensor,
7641 num_heads
7642 ));
7643 Ok((
7644 Tensor { c_tensor: c_tensors[0] },
7645 Tensor { c_tensor: c_tensors[1] },
7646 Tensor { c_tensor: c_tensors[2] },
7647 ))
7648 }
7649
7650 pub fn f_internal_transformer_encoder_layer_fwd<T: Borrow<Tensor>>(
7651 src: &Tensor,
7652 embed_dim: i64,
7653 num_heads: i64,
7654 qkv_weight: &Tensor,
7655 qkv_bias: &Tensor,
7656 proj_weight: &Tensor,
7657 proj_bias: &Tensor,
7658 use_gelu: bool,
7659 norm_first: bool,
7660 eps: f64,
7661 norm_weight_1: &Tensor,
7662 norm_bias_1: &Tensor,
7663 norm_weight_2: &Tensor,
7664 norm_bias_2: &Tensor,
7665 ffn_weight_1: &Tensor,
7666 ffn_bias_1: &Tensor,
7667 ffn_weight_2: &Tensor,
7668 ffn_bias_2: &Tensor,
7669 mask: Option<T>,
7670 mask_type: impl Into<Option<i64>>,
7671 ) -> Result<Tensor, TchError> {
7672 let mask_type = mask_type.into();
7673 let mut c_tensors = [std::ptr::null_mut(); 1];
7674 unsafe_torch_err!(atg__transformer_encoder_layer_fwd(
7675 c_tensors.as_mut_ptr(),
7676 src.c_tensor,
7677 embed_dim,
7678 num_heads,
7679 qkv_weight.c_tensor,
7680 qkv_bias.c_tensor,
7681 proj_weight.c_tensor,
7682 proj_bias.c_tensor,
7683 if use_gelu { 1 } else { 0 },
7684 if norm_first { 1 } else { 0 },
7685 eps,
7686 norm_weight_1.c_tensor,
7687 norm_bias_1.c_tensor,
7688 norm_weight_2.c_tensor,
7689 norm_bias_2.c_tensor,
7690 ffn_weight_1.c_tensor,
7691 ffn_bias_1.c_tensor,
7692 ffn_weight_2.c_tensor,
7693 ffn_bias_2.c_tensor,
7694 mask.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
7695 mask_type.unwrap_or(0i64),
7696 mask_type.is_none() as i8
7697 ));
7698 Ok(Tensor { c_tensor: c_tensors[0] })
7699 }
7700
7701 pub fn f_internal_transformer_encoder_layer_fwd_out<T: Borrow<Tensor>>(
7702 out: &Tensor,
7703 src: &Tensor,
7704 embed_dim: i64,
7705 num_heads: i64,
7706 qkv_weight: &Tensor,
7707 qkv_bias: &Tensor,
7708 proj_weight: &Tensor,
7709 proj_bias: &Tensor,
7710 use_gelu: bool,
7711 norm_first: bool,
7712 eps: f64,
7713 norm_weight_1: &Tensor,
7714 norm_bias_1: &Tensor,
7715 norm_weight_2: &Tensor,
7716 norm_bias_2: &Tensor,
7717 ffn_weight_1: &Tensor,
7718 ffn_bias_1: &Tensor,
7719 ffn_weight_2: &Tensor,
7720 ffn_bias_2: &Tensor,
7721 mask: Option<T>,
7722 mask_type: impl Into<Option<i64>>,
7723 ) -> Result<Tensor, TchError> {
7724 let mask_type = mask_type.into();
7725 let mut c_tensors = [std::ptr::null_mut(); 1];
7726 unsafe_torch_err!(atg__transformer_encoder_layer_fwd_out(
7727 c_tensors.as_mut_ptr(),
7728 out.c_tensor,
7729 src.c_tensor,
7730 embed_dim,
7731 num_heads,
7732 qkv_weight.c_tensor,
7733 qkv_bias.c_tensor,
7734 proj_weight.c_tensor,
7735 proj_bias.c_tensor,
7736 if use_gelu { 1 } else { 0 },
7737 if norm_first { 1 } else { 0 },
7738 eps,
7739 norm_weight_1.c_tensor,
7740 norm_bias_1.c_tensor,
7741 norm_weight_2.c_tensor,
7742 norm_bias_2.c_tensor,
7743 ffn_weight_1.c_tensor,
7744 ffn_bias_1.c_tensor,
7745 ffn_weight_2.c_tensor,
7746 ffn_bias_2.c_tensor,
7747 mask.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
7748 mask_type.unwrap_or(0i64),
7749 mask_type.is_none() as i8
7750 ));
7751 Ok(Tensor { c_tensor: c_tensors[0] })
7752 }
7753
7754 pub fn f_internal_trilinear(
7755 i1: &Tensor,
7756 i2: &Tensor,
7757 i3: &Tensor,
7758 expand1: impl IntList,
7759 expand2: impl IntList,
7760 expand3: impl IntList,
7761 sumdim: impl IntList,
7762 unroll_dim: i64,
7763 ) -> Result<Tensor, TchError> {
7764 let mut c_tensors = [std::ptr::null_mut(); 1];
7765 unsafe_torch_err!(atg__trilinear(
7766 c_tensors.as_mut_ptr(),
7767 i1.c_tensor,
7768 i2.c_tensor,
7769 i3.c_tensor,
7770 expand1.as_ptr(),
7771 expand1.len_i32(),
7772 expand2.as_ptr(),
7773 expand2.len_i32(),
7774 expand3.as_ptr(),
7775 expand3.len_i32(),
7776 sumdim.as_ptr(),
7777 sumdim.len_i32(),
7778 unroll_dim
7779 ));
7780 Ok(Tensor { c_tensor: c_tensors[0] })
7781 }
7782
7783 pub fn f_internal_trilinear_out(
7784 out: &Tensor,
7785 i1: &Tensor,
7786 i2: &Tensor,
7787 i3: &Tensor,
7788 expand1: impl IntList,
7789 expand2: impl IntList,
7790 expand3: impl IntList,
7791 sumdim: impl IntList,
7792 unroll_dim: i64,
7793 ) -> Result<Tensor, TchError> {
7794 let mut c_tensors = [std::ptr::null_mut(); 1];
7795 unsafe_torch_err!(atg__trilinear_out(
7796 c_tensors.as_mut_ptr(),
7797 out.c_tensor,
7798 i1.c_tensor,
7799 i2.c_tensor,
7800 i3.c_tensor,
7801 expand1.as_ptr(),
7802 expand1.len_i32(),
7803 expand2.as_ptr(),
7804 expand2.len_i32(),
7805 expand3.as_ptr(),
7806 expand3.len_i32(),
7807 sumdim.as_ptr(),
7808 sumdim.len_i32(),
7809 unroll_dim
7810 ));
7811 Ok(Tensor { c_tensor: c_tensors[0] })
7812 }
7813
7814 pub fn f_internal_triton_multi_head_attention<T: Borrow<Tensor>>(
7815 query: &Tensor,
7816 key: &Tensor,
7817 value: &Tensor,
7818 embed_dim: i64,
7819 num_head: i64,
7820 qkv_weight: &Tensor,
7821 qkv_bias: &Tensor,
7822 proj_weight: &Tensor,
7823 proj_bias: &Tensor,
7824 mask: Option<T>,
7825 ) -> Result<Tensor, TchError> {
7826 let mut c_tensors = [std::ptr::null_mut(); 1];
7827 unsafe_torch_err!(atg__triton_multi_head_attention(
7828 c_tensors.as_mut_ptr(),
7829 query.c_tensor,
7830 key.c_tensor,
7831 value.c_tensor,
7832 embed_dim,
7833 num_head,
7834 qkv_weight.c_tensor,
7835 qkv_bias.c_tensor,
7836 proj_weight.c_tensor,
7837 proj_bias.c_tensor,
7838 mask.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
7839 ));
7840 Ok(Tensor { c_tensor: c_tensors[0] })
7841 }
7842
7843 pub fn f_internal_triton_multi_head_attention_out<T: Borrow<Tensor>>(
7844 out: &Tensor,
7845 query: &Tensor,
7846 key: &Tensor,
7847 value: &Tensor,
7848 embed_dim: i64,
7849 num_head: i64,
7850 qkv_weight: &Tensor,
7851 qkv_bias: &Tensor,
7852 proj_weight: &Tensor,
7853 proj_bias: &Tensor,
7854 mask: Option<T>,
7855 ) -> Result<Tensor, TchError> {
7856 let mut c_tensors = [std::ptr::null_mut(); 1];
7857 unsafe_torch_err!(atg__triton_multi_head_attention_out(
7858 c_tensors.as_mut_ptr(),
7859 out.c_tensor,
7860 query.c_tensor,
7861 key.c_tensor,
7862 value.c_tensor,
7863 embed_dim,
7864 num_head,
7865 qkv_weight.c_tensor,
7866 qkv_bias.c_tensor,
7867 proj_weight.c_tensor,
7868 proj_bias.c_tensor,
7869 mask.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
7870 ));
7871 Ok(Tensor { c_tensor: c_tensors[0] })
7872 }
7873
7874 pub fn f_internal_triton_scaled_dot_attention(
7875 q: &Tensor,
7876 k: &Tensor,
7877 v: &Tensor,
7878 dropout_p: f64,
7879 ) -> Result<Tensor, TchError> {
7880 let mut c_tensors = [std::ptr::null_mut(); 1];
7881 unsafe_torch_err!(atg__triton_scaled_dot_attention(
7882 c_tensors.as_mut_ptr(),
7883 q.c_tensor,
7884 k.c_tensor,
7885 v.c_tensor,
7886 dropout_p
7887 ));
7888 Ok(Tensor { c_tensor: c_tensors[0] })
7889 }
7890
7891 pub fn f_internal_triton_scaled_dot_attention_out(
7892 out: &Tensor,
7893 q: &Tensor,
7894 k: &Tensor,
7895 v: &Tensor,
7896 dropout_p: f64,
7897 ) -> Result<Tensor, TchError> {
7898 let mut c_tensors = [std::ptr::null_mut(); 1];
7899 unsafe_torch_err!(atg__triton_scaled_dot_attention_out(
7900 c_tensors.as_mut_ptr(),
7901 out.c_tensor,
7902 q.c_tensor,
7903 k.c_tensor,
7904 v.c_tensor,
7905 dropout_p
7906 ));
7907 Ok(Tensor { c_tensor: c_tensors[0] })
7908 }
7909
7910 pub fn f_internal_unique(
7911 &self,
7912 sorted: bool,
7913 return_inverse: bool,
7914 ) -> Result<(Tensor, Tensor), TchError> {
7915 let mut c_tensors = [std::ptr::null_mut(); 2];
7916 unsafe_torch_err!(atg__unique(
7917 c_tensors.as_mut_ptr(),
7918 self.c_tensor,
7919 if sorted { 1 } else { 0 },
7920 if return_inverse { 1 } else { 0 }
7921 ));
7922 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
7923 }
7924
7925 pub fn f_internal_unique2(
7926 &self,
7927 sorted: bool,
7928 return_inverse: bool,
7929 return_counts: bool,
7930 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
7931 let mut c_tensors = [std::ptr::null_mut(); 3];
7932 unsafe_torch_err!(atg__unique2(
7933 c_tensors.as_mut_ptr(),
7934 self.c_tensor,
7935 if sorted { 1 } else { 0 },
7936 if return_inverse { 1 } else { 0 },
7937 if return_counts { 1 } else { 0 }
7938 ));
7939 Ok((
7940 Tensor { c_tensor: c_tensors[0] },
7941 Tensor { c_tensor: c_tensors[1] },
7942 Tensor { c_tensor: c_tensors[2] },
7943 ))
7944 }
7945
7946 pub fn f_internal_unique2_out(
7947 &self,
7948 out0: &Tensor,
7949 out1: &Tensor,
7950 out2: &Tensor,
7951 sorted: bool,
7952 return_inverse: bool,
7953 return_counts: bool,
7954 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
7955 let mut c_tensors = [std::ptr::null_mut(); 3];
7956 unsafe_torch_err!(atg__unique2_out(
7957 c_tensors.as_mut_ptr(),
7958 out0.c_tensor,
7959 out1.c_tensor,
7960 out2.c_tensor,
7961 self.c_tensor,
7962 if sorted { 1 } else { 0 },
7963 if return_inverse { 1 } else { 0 },
7964 if return_counts { 1 } else { 0 }
7965 ));
7966 Ok((
7967 Tensor { c_tensor: c_tensors[0] },
7968 Tensor { c_tensor: c_tensors[1] },
7969 Tensor { c_tensor: c_tensors[2] },
7970 ))
7971 }
7972
7973 pub fn f_internal_unique_out(
7974 &self,
7975 out0: &Tensor,
7976 out1: &Tensor,
7977 sorted: bool,
7978 return_inverse: bool,
7979 ) -> Result<(Tensor, Tensor), TchError> {
7980 let mut c_tensors = [std::ptr::null_mut(); 2];
7981 unsafe_torch_err!(atg__unique_out(
7982 c_tensors.as_mut_ptr(),
7983 out0.c_tensor,
7984 out1.c_tensor,
7985 self.c_tensor,
7986 if sorted { 1 } else { 0 },
7987 if return_inverse { 1 } else { 0 }
7988 ));
7989 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
7990 }
7991
7992 pub fn f_internal_unpack_dual(dual: &Tensor, level: i64) -> Result<(Tensor, Tensor), TchError> {
7993 let mut c_tensors = [std::ptr::null_mut(); 2];
7994 unsafe_torch_err!(atg__unpack_dual(c_tensors.as_mut_ptr(), dual.c_tensor, level));
7995 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
7996 }
7997
7998 pub fn f_internal_unsafe_index<T: Borrow<Tensor>>(
7999 &self,
8000 indices: &[Option<T>],
8001 ) -> Result<Tensor, TchError> {
8002 let mut c_tensors = [std::ptr::null_mut(); 1];
8003 unsafe_torch_err!(atg__unsafe_index(
8004 c_tensors.as_mut_ptr(),
8005 self.c_tensor,
8006 ptr_list_opt(indices).as_ptr(),
8007 indices.len() as i32
8008 ));
8009 Ok(Tensor { c_tensor: c_tensors[0] })
8010 }
8011
8012 pub fn f_internal_unsafe_index_put<T: Borrow<Tensor>>(
8013 &self,
8014 indices: &[Option<T>],
8015 values: &Tensor,
8016 accumulate: bool,
8017 ) -> Result<Tensor, TchError> {
8018 let mut c_tensors = [std::ptr::null_mut(); 1];
8019 unsafe_torch_err!(atg__unsafe_index_put(
8020 c_tensors.as_mut_ptr(),
8021 self.c_tensor,
8022 ptr_list_opt(indices).as_ptr(),
8023 indices.len() as i32,
8024 values.c_tensor,
8025 if accumulate { 1 } else { 0 }
8026 ));
8027 Ok(Tensor { c_tensor: c_tensors[0] })
8028 }
8029
8030 pub fn f_internal_unsafe_masked_index<T: Borrow<Tensor>, S: Into<Scalar>>(
8031 &self,
8032 mask: &Tensor,
8033 indices: &[Option<T>],
8034 fill: S,
8035 ) -> Result<Tensor, TchError> {
8036 let mut c_tensors = [std::ptr::null_mut(); 1];
8037 unsafe_torch_err!(atg__unsafe_masked_index(
8038 c_tensors.as_mut_ptr(),
8039 self.c_tensor,
8040 mask.c_tensor,
8041 ptr_list_opt(indices).as_ptr(),
8042 indices.len() as i32,
8043 fill.into().c_scalar
8044 ));
8045 Ok(Tensor { c_tensor: c_tensors[0] })
8046 }
8047
8048 pub fn f_internal_unsafe_masked_index_put_accumulate<T: Borrow<Tensor>>(
8049 &self,
8050 mask: &Tensor,
8051 indices: &[Option<T>],
8052 values: &Tensor,
8053 ) -> Result<Tensor, TchError> {
8054 let mut c_tensors = [std::ptr::null_mut(); 1];
8055 unsafe_torch_err!(atg__unsafe_masked_index_put_accumulate(
8056 c_tensors.as_mut_ptr(),
8057 self.c_tensor,
8058 mask.c_tensor,
8059 ptr_list_opt(indices).as_ptr(),
8060 indices.len() as i32,
8061 values.c_tensor
8062 ));
8063 Ok(Tensor { c_tensor: c_tensors[0] })
8064 }
8065
8066 pub fn f_internal_unsafe_view(&self, size: impl IntList) -> Result<Tensor, TchError> {
8067 let mut c_tensors = [std::ptr::null_mut(); 1];
8068 unsafe_torch_err!(atg__unsafe_view(
8069 c_tensors.as_mut_ptr(),
8070 self.c_tensor,
8071 size.as_ptr(),
8072 size.len_i32()
8073 ));
8074 Ok(Tensor { c_tensor: c_tensors[0] })
8075 }
8076
8077 pub fn f_internal_unsafe_view_out(
8078 &self,
8079 out: &Tensor,
8080 size: impl IntList,
8081 ) -> Result<Tensor, TchError> {
8082 let mut c_tensors = [std::ptr::null_mut(); 1];
8083 unsafe_torch_err!(atg__unsafe_view_out(
8084 c_tensors.as_mut_ptr(),
8085 out.c_tensor,
8086 self.c_tensor,
8087 size.as_ptr(),
8088 size.len_i32()
8089 ));
8090 Ok(Tensor { c_tensor: c_tensors[0] })
8091 }
8092
8093 pub fn f_internal_upsample_bicubic2d_aa(
8094 &self,
8095 output_size: impl IntList,
8096 align_corners: bool,
8097 scales_h: impl Into<Option<f64>>,
8098 scales_w: impl Into<Option<f64>>,
8099 ) -> Result<Tensor, TchError> {
8100 let scales_h = scales_h.into();
8101 let scales_w = scales_w.into();
8102 let mut c_tensors = [std::ptr::null_mut(); 1];
8103 unsafe_torch_err!(atg__upsample_bicubic2d_aa(
8104 c_tensors.as_mut_ptr(),
8105 self.c_tensor,
8106 output_size.as_ptr(),
8107 output_size.len_i32(),
8108 if align_corners { 1 } else { 0 },
8109 scales_h.unwrap_or(std::f64::NAN),
8110 scales_h.is_none() as i8,
8111 scales_w.unwrap_or(std::f64::NAN),
8112 scales_w.is_none() as i8
8113 ));
8114 Ok(Tensor { c_tensor: c_tensors[0] })
8115 }
8116
8117 pub fn f_internal_upsample_bicubic2d_aa_backward(
8118 grad_output: &Tensor,
8119 output_size: impl IntList,
8120 input_size: impl IntList,
8121 align_corners: bool,
8122 scales_h: impl Into<Option<f64>>,
8123 scales_w: impl Into<Option<f64>>,
8124 ) -> Result<Tensor, TchError> {
8125 let scales_h = scales_h.into();
8126 let scales_w = scales_w.into();
8127 let mut c_tensors = [std::ptr::null_mut(); 1];
8128 unsafe_torch_err!(atg__upsample_bicubic2d_aa_backward(
8129 c_tensors.as_mut_ptr(),
8130 grad_output.c_tensor,
8131 output_size.as_ptr(),
8132 output_size.len_i32(),
8133 input_size.as_ptr(),
8134 input_size.len_i32(),
8135 if align_corners { 1 } else { 0 },
8136 scales_h.unwrap_or(std::f64::NAN),
8137 scales_h.is_none() as i8,
8138 scales_w.unwrap_or(std::f64::NAN),
8139 scales_w.is_none() as i8
8140 ));
8141 Ok(Tensor { c_tensor: c_tensors[0] })
8142 }
8143
8144 pub fn f_internal_upsample_bicubic2d_aa_backward_grad_input(
8145 grad_input: &Tensor,
8146 grad_output: &Tensor,
8147 output_size: impl IntList,
8148 input_size: impl IntList,
8149 align_corners: bool,
8150 scales_h: impl Into<Option<f64>>,
8151 scales_w: impl Into<Option<f64>>,
8152 ) -> Result<Tensor, TchError> {
8153 let scales_h = scales_h.into();
8154 let scales_w = scales_w.into();
8155 let mut c_tensors = [std::ptr::null_mut(); 1];
8156 unsafe_torch_err!(atg__upsample_bicubic2d_aa_backward_grad_input(
8157 c_tensors.as_mut_ptr(),
8158 grad_input.c_tensor,
8159 grad_output.c_tensor,
8160 output_size.as_ptr(),
8161 output_size.len_i32(),
8162 input_size.as_ptr(),
8163 input_size.len_i32(),
8164 if align_corners { 1 } else { 0 },
8165 scales_h.unwrap_or(std::f64::NAN),
8166 scales_h.is_none() as i8,
8167 scales_w.unwrap_or(std::f64::NAN),
8168 scales_w.is_none() as i8
8169 ));
8170 Ok(Tensor { c_tensor: c_tensors[0] })
8171 }
8172
8173 pub fn f_internal_upsample_bicubic2d_aa_out(
8174 &self,
8175 out: &Tensor,
8176 output_size: impl IntList,
8177 align_corners: bool,
8178 scales_h: impl Into<Option<f64>>,
8179 scales_w: impl Into<Option<f64>>,
8180 ) -> Result<Tensor, TchError> {
8181 let scales_h = scales_h.into();
8182 let scales_w = scales_w.into();
8183 let mut c_tensors = [std::ptr::null_mut(); 1];
8184 unsafe_torch_err!(atg__upsample_bicubic2d_aa_out(
8185 c_tensors.as_mut_ptr(),
8186 out.c_tensor,
8187 self.c_tensor,
8188 output_size.as_ptr(),
8189 output_size.len_i32(),
8190 if align_corners { 1 } else { 0 },
8191 scales_h.unwrap_or(std::f64::NAN),
8192 scales_h.is_none() as i8,
8193 scales_w.unwrap_or(std::f64::NAN),
8194 scales_w.is_none() as i8
8195 ));
8196 Ok(Tensor { c_tensor: c_tensors[0] })
8197 }
8198
8199 pub fn f_internal_upsample_bicubic2d_aa_vec(
8200 &self,
8201 output_size: impl IntListOption,
8202 align_corners: bool,
8203 scale_factors: impl DoubleList,
8204 ) -> Result<Tensor, TchError> {
8205 let mut c_tensors = [std::ptr::null_mut(); 1];
8206 unsafe_torch_err!(atg__upsample_bicubic2d_aa_vec(
8207 c_tensors.as_mut_ptr(),
8208 self.c_tensor,
8209 output_size.as_ptr(),
8210 output_size.len_i32(),
8211 if align_corners { 1 } else { 0 },
8212 scale_factors.as_ptr(),
8213 scale_factors.len_i32()
8214 ));
8215 Ok(Tensor { c_tensor: c_tensors[0] })
8216 }
8217
8218 pub fn f_internal_upsample_bilinear2d_aa(
8219 &self,
8220 output_size: impl IntList,
8221 align_corners: bool,
8222 scales_h: impl Into<Option<f64>>,
8223 scales_w: impl Into<Option<f64>>,
8224 ) -> Result<Tensor, TchError> {
8225 let scales_h = scales_h.into();
8226 let scales_w = scales_w.into();
8227 let mut c_tensors = [std::ptr::null_mut(); 1];
8228 unsafe_torch_err!(atg__upsample_bilinear2d_aa(
8229 c_tensors.as_mut_ptr(),
8230 self.c_tensor,
8231 output_size.as_ptr(),
8232 output_size.len_i32(),
8233 if align_corners { 1 } else { 0 },
8234 scales_h.unwrap_or(std::f64::NAN),
8235 scales_h.is_none() as i8,
8236 scales_w.unwrap_or(std::f64::NAN),
8237 scales_w.is_none() as i8
8238 ));
8239 Ok(Tensor { c_tensor: c_tensors[0] })
8240 }
8241
8242 pub fn f_internal_upsample_bilinear2d_aa_backward(
8243 grad_output: &Tensor,
8244 output_size: impl IntList,
8245 input_size: impl IntList,
8246 align_corners: bool,
8247 scales_h: impl Into<Option<f64>>,
8248 scales_w: impl Into<Option<f64>>,
8249 ) -> Result<Tensor, TchError> {
8250 let scales_h = scales_h.into();
8251 let scales_w = scales_w.into();
8252 let mut c_tensors = [std::ptr::null_mut(); 1];
8253 unsafe_torch_err!(atg__upsample_bilinear2d_aa_backward(
8254 c_tensors.as_mut_ptr(),
8255 grad_output.c_tensor,
8256 output_size.as_ptr(),
8257 output_size.len_i32(),
8258 input_size.as_ptr(),
8259 input_size.len_i32(),
8260 if align_corners { 1 } else { 0 },
8261 scales_h.unwrap_or(std::f64::NAN),
8262 scales_h.is_none() as i8,
8263 scales_w.unwrap_or(std::f64::NAN),
8264 scales_w.is_none() as i8
8265 ));
8266 Ok(Tensor { c_tensor: c_tensors[0] })
8267 }
8268
8269 pub fn f_internal_upsample_bilinear2d_aa_backward_grad_input(
8270 grad_input: &Tensor,
8271 grad_output: &Tensor,
8272 output_size: impl IntList,
8273 input_size: impl IntList,
8274 align_corners: bool,
8275 scales_h: impl Into<Option<f64>>,
8276 scales_w: impl Into<Option<f64>>,
8277 ) -> Result<Tensor, TchError> {
8278 let scales_h = scales_h.into();
8279 let scales_w = scales_w.into();
8280 let mut c_tensors = [std::ptr::null_mut(); 1];
8281 unsafe_torch_err!(atg__upsample_bilinear2d_aa_backward_grad_input(
8282 c_tensors.as_mut_ptr(),
8283 grad_input.c_tensor,
8284 grad_output.c_tensor,
8285 output_size.as_ptr(),
8286 output_size.len_i32(),
8287 input_size.as_ptr(),
8288 input_size.len_i32(),
8289 if align_corners { 1 } else { 0 },
8290 scales_h.unwrap_or(std::f64::NAN),
8291 scales_h.is_none() as i8,
8292 scales_w.unwrap_or(std::f64::NAN),
8293 scales_w.is_none() as i8
8294 ));
8295 Ok(Tensor { c_tensor: c_tensors[0] })
8296 }
8297
8298 pub fn f_internal_upsample_bilinear2d_aa_out(
8299 &self,
8300 out: &Tensor,
8301 output_size: impl IntList,
8302 align_corners: bool,
8303 scales_h: impl Into<Option<f64>>,
8304 scales_w: impl Into<Option<f64>>,
8305 ) -> Result<Tensor, TchError> {
8306 let scales_h = scales_h.into();
8307 let scales_w = scales_w.into();
8308 let mut c_tensors = [std::ptr::null_mut(); 1];
8309 unsafe_torch_err!(atg__upsample_bilinear2d_aa_out(
8310 c_tensors.as_mut_ptr(),
8311 out.c_tensor,
8312 self.c_tensor,
8313 output_size.as_ptr(),
8314 output_size.len_i32(),
8315 if align_corners { 1 } else { 0 },
8316 scales_h.unwrap_or(std::f64::NAN),
8317 scales_h.is_none() as i8,
8318 scales_w.unwrap_or(std::f64::NAN),
8319 scales_w.is_none() as i8
8320 ));
8321 Ok(Tensor { c_tensor: c_tensors[0] })
8322 }
8323
8324 pub fn f_internal_upsample_bilinear2d_aa_vec(
8325 &self,
8326 output_size: impl IntListOption,
8327 align_corners: bool,
8328 scale_factors: impl DoubleList,
8329 ) -> Result<Tensor, TchError> {
8330 let mut c_tensors = [std::ptr::null_mut(); 1];
8331 unsafe_torch_err!(atg__upsample_bilinear2d_aa_vec(
8332 c_tensors.as_mut_ptr(),
8333 self.c_tensor,
8334 output_size.as_ptr(),
8335 output_size.len_i32(),
8336 if align_corners { 1 } else { 0 },
8337 scale_factors.as_ptr(),
8338 scale_factors.len_i32()
8339 ));
8340 Ok(Tensor { c_tensor: c_tensors[0] })
8341 }
8342
8343 pub fn f_internal_upsample_nearest_exact1d(
8344 &self,
8345 output_size: impl IntList,
8346 scales: impl Into<Option<f64>>,
8347 ) -> Result<Tensor, TchError> {
8348 let scales = scales.into();
8349 let mut c_tensors = [std::ptr::null_mut(); 1];
8350 unsafe_torch_err!(atg__upsample_nearest_exact1d(
8351 c_tensors.as_mut_ptr(),
8352 self.c_tensor,
8353 output_size.as_ptr(),
8354 output_size.len_i32(),
8355 scales.unwrap_or(std::f64::NAN),
8356 scales.is_none() as i8
8357 ));
8358 Ok(Tensor { c_tensor: c_tensors[0] })
8359 }
8360
8361 pub fn f_internal_upsample_nearest_exact1d_backward(
8362 grad_output: &Tensor,
8363 output_size: impl IntList,
8364 input_size: impl IntList,
8365 scales: impl Into<Option<f64>>,
8366 ) -> Result<Tensor, TchError> {
8367 let scales = scales.into();
8368 let mut c_tensors = [std::ptr::null_mut(); 1];
8369 unsafe_torch_err!(atg__upsample_nearest_exact1d_backward(
8370 c_tensors.as_mut_ptr(),
8371 grad_output.c_tensor,
8372 output_size.as_ptr(),
8373 output_size.len_i32(),
8374 input_size.as_ptr(),
8375 input_size.len_i32(),
8376 scales.unwrap_or(std::f64::NAN),
8377 scales.is_none() as i8
8378 ));
8379 Ok(Tensor { c_tensor: c_tensors[0] })
8380 }
8381
8382 pub fn f_internal_upsample_nearest_exact1d_backward_grad_input(
8383 grad_input: &Tensor,
8384 grad_output: &Tensor,
8385 output_size: impl IntList,
8386 input_size: impl IntList,
8387 scales: impl Into<Option<f64>>,
8388 ) -> Result<Tensor, TchError> {
8389 let scales = scales.into();
8390 let mut c_tensors = [std::ptr::null_mut(); 1];
8391 unsafe_torch_err!(atg__upsample_nearest_exact1d_backward_grad_input(
8392 c_tensors.as_mut_ptr(),
8393 grad_input.c_tensor,
8394 grad_output.c_tensor,
8395 output_size.as_ptr(),
8396 output_size.len_i32(),
8397 input_size.as_ptr(),
8398 input_size.len_i32(),
8399 scales.unwrap_or(std::f64::NAN),
8400 scales.is_none() as i8
8401 ));
8402 Ok(Tensor { c_tensor: c_tensors[0] })
8403 }
8404
8405 pub fn f_internal_upsample_nearest_exact1d_out(
8406 &self,
8407 out: &Tensor,
8408 output_size: impl IntList,
8409 scales: impl Into<Option<f64>>,
8410 ) -> Result<Tensor, TchError> {
8411 let scales = scales.into();
8412 let mut c_tensors = [std::ptr::null_mut(); 1];
8413 unsafe_torch_err!(atg__upsample_nearest_exact1d_out(
8414 c_tensors.as_mut_ptr(),
8415 out.c_tensor,
8416 self.c_tensor,
8417 output_size.as_ptr(),
8418 output_size.len_i32(),
8419 scales.unwrap_or(std::f64::NAN),
8420 scales.is_none() as i8
8421 ));
8422 Ok(Tensor { c_tensor: c_tensors[0] })
8423 }
8424
8425 pub fn f_internal_upsample_nearest_exact1d_vec(
8426 &self,
8427 output_size: impl IntListOption,
8428 scale_factors: impl DoubleList,
8429 ) -> Result<Tensor, TchError> {
8430 let mut c_tensors = [std::ptr::null_mut(); 1];
8431 unsafe_torch_err!(atg__upsample_nearest_exact1d_vec(
8432 c_tensors.as_mut_ptr(),
8433 self.c_tensor,
8434 output_size.as_ptr(),
8435 output_size.len_i32(),
8436 scale_factors.as_ptr(),
8437 scale_factors.len_i32()
8438 ));
8439 Ok(Tensor { c_tensor: c_tensors[0] })
8440 }
8441
8442 pub fn f_internal_upsample_nearest_exact2d(
8443 &self,
8444 output_size: impl IntList,
8445 scales_h: impl Into<Option<f64>>,
8446 scales_w: impl Into<Option<f64>>,
8447 ) -> Result<Tensor, TchError> {
8448 let scales_h = scales_h.into();
8449 let scales_w = scales_w.into();
8450 let mut c_tensors = [std::ptr::null_mut(); 1];
8451 unsafe_torch_err!(atg__upsample_nearest_exact2d(
8452 c_tensors.as_mut_ptr(),
8453 self.c_tensor,
8454 output_size.as_ptr(),
8455 output_size.len_i32(),
8456 scales_h.unwrap_or(std::f64::NAN),
8457 scales_h.is_none() as i8,
8458 scales_w.unwrap_or(std::f64::NAN),
8459 scales_w.is_none() as i8
8460 ));
8461 Ok(Tensor { c_tensor: c_tensors[0] })
8462 }
8463
8464 pub fn f_internal_upsample_nearest_exact2d_backward(
8465 grad_output: &Tensor,
8466 output_size: impl IntList,
8467 input_size: impl IntList,
8468 scales_h: impl Into<Option<f64>>,
8469 scales_w: impl Into<Option<f64>>,
8470 ) -> Result<Tensor, TchError> {
8471 let scales_h = scales_h.into();
8472 let scales_w = scales_w.into();
8473 let mut c_tensors = [std::ptr::null_mut(); 1];
8474 unsafe_torch_err!(atg__upsample_nearest_exact2d_backward(
8475 c_tensors.as_mut_ptr(),
8476 grad_output.c_tensor,
8477 output_size.as_ptr(),
8478 output_size.len_i32(),
8479 input_size.as_ptr(),
8480 input_size.len_i32(),
8481 scales_h.unwrap_or(std::f64::NAN),
8482 scales_h.is_none() as i8,
8483 scales_w.unwrap_or(std::f64::NAN),
8484 scales_w.is_none() as i8
8485 ));
8486 Ok(Tensor { c_tensor: c_tensors[0] })
8487 }
8488
8489 pub fn f_internal_upsample_nearest_exact2d_backward_grad_input(
8490 grad_input: &Tensor,
8491 grad_output: &Tensor,
8492 output_size: impl IntList,
8493 input_size: impl IntList,
8494 scales_h: impl Into<Option<f64>>,
8495 scales_w: impl Into<Option<f64>>,
8496 ) -> Result<Tensor, TchError> {
8497 let scales_h = scales_h.into();
8498 let scales_w = scales_w.into();
8499 let mut c_tensors = [std::ptr::null_mut(); 1];
8500 unsafe_torch_err!(atg__upsample_nearest_exact2d_backward_grad_input(
8501 c_tensors.as_mut_ptr(),
8502 grad_input.c_tensor,
8503 grad_output.c_tensor,
8504 output_size.as_ptr(),
8505 output_size.len_i32(),
8506 input_size.as_ptr(),
8507 input_size.len_i32(),
8508 scales_h.unwrap_or(std::f64::NAN),
8509 scales_h.is_none() as i8,
8510 scales_w.unwrap_or(std::f64::NAN),
8511 scales_w.is_none() as i8
8512 ));
8513 Ok(Tensor { c_tensor: c_tensors[0] })
8514 }
8515
8516 pub fn f_internal_upsample_nearest_exact2d_out(
8517 &self,
8518 out: &Tensor,
8519 output_size: impl IntList,
8520 scales_h: impl Into<Option<f64>>,
8521 scales_w: impl Into<Option<f64>>,
8522 ) -> Result<Tensor, TchError> {
8523 let scales_h = scales_h.into();
8524 let scales_w = scales_w.into();
8525 let mut c_tensors = [std::ptr::null_mut(); 1];
8526 unsafe_torch_err!(atg__upsample_nearest_exact2d_out(
8527 c_tensors.as_mut_ptr(),
8528 out.c_tensor,
8529 self.c_tensor,
8530 output_size.as_ptr(),
8531 output_size.len_i32(),
8532 scales_h.unwrap_or(std::f64::NAN),
8533 scales_h.is_none() as i8,
8534 scales_w.unwrap_or(std::f64::NAN),
8535 scales_w.is_none() as i8
8536 ));
8537 Ok(Tensor { c_tensor: c_tensors[0] })
8538 }
8539
8540 pub fn f_internal_upsample_nearest_exact2d_vec(
8541 &self,
8542 output_size: impl IntListOption,
8543 scale_factors: impl DoubleList,
8544 ) -> Result<Tensor, TchError> {
8545 let mut c_tensors = [std::ptr::null_mut(); 1];
8546 unsafe_torch_err!(atg__upsample_nearest_exact2d_vec(
8547 c_tensors.as_mut_ptr(),
8548 self.c_tensor,
8549 output_size.as_ptr(),
8550 output_size.len_i32(),
8551 scale_factors.as_ptr(),
8552 scale_factors.len_i32()
8553 ));
8554 Ok(Tensor { c_tensor: c_tensors[0] })
8555 }
8556
8557 pub fn f_internal_upsample_nearest_exact3d(
8558 &self,
8559 output_size: impl IntList,
8560 scales_d: impl Into<Option<f64>>,
8561 scales_h: impl Into<Option<f64>>,
8562 scales_w: impl Into<Option<f64>>,
8563 ) -> Result<Tensor, TchError> {
8564 let scales_d = scales_d.into();
8565 let scales_h = scales_h.into();
8566 let scales_w = scales_w.into();
8567 let mut c_tensors = [std::ptr::null_mut(); 1];
8568 unsafe_torch_err!(atg__upsample_nearest_exact3d(
8569 c_tensors.as_mut_ptr(),
8570 self.c_tensor,
8571 output_size.as_ptr(),
8572 output_size.len_i32(),
8573 scales_d.unwrap_or(std::f64::NAN),
8574 scales_d.is_none() as i8,
8575 scales_h.unwrap_or(std::f64::NAN),
8576 scales_h.is_none() as i8,
8577 scales_w.unwrap_or(std::f64::NAN),
8578 scales_w.is_none() as i8
8579 ));
8580 Ok(Tensor { c_tensor: c_tensors[0] })
8581 }
8582
8583 pub fn f_internal_upsample_nearest_exact3d_backward(
8584 grad_output: &Tensor,
8585 output_size: impl IntList,
8586 input_size: impl IntList,
8587 scales_d: impl Into<Option<f64>>,
8588 scales_h: impl Into<Option<f64>>,
8589 scales_w: impl Into<Option<f64>>,
8590 ) -> Result<Tensor, TchError> {
8591 let scales_d = scales_d.into();
8592 let scales_h = scales_h.into();
8593 let scales_w = scales_w.into();
8594 let mut c_tensors = [std::ptr::null_mut(); 1];
8595 unsafe_torch_err!(atg__upsample_nearest_exact3d_backward(
8596 c_tensors.as_mut_ptr(),
8597 grad_output.c_tensor,
8598 output_size.as_ptr(),
8599 output_size.len_i32(),
8600 input_size.as_ptr(),
8601 input_size.len_i32(),
8602 scales_d.unwrap_or(std::f64::NAN),
8603 scales_d.is_none() as i8,
8604 scales_h.unwrap_or(std::f64::NAN),
8605 scales_h.is_none() as i8,
8606 scales_w.unwrap_or(std::f64::NAN),
8607 scales_w.is_none() as i8
8608 ));
8609 Ok(Tensor { c_tensor: c_tensors[0] })
8610 }
8611
8612 pub fn f_internal_upsample_nearest_exact3d_backward_grad_input(
8613 grad_input: &Tensor,
8614 grad_output: &Tensor,
8615 output_size: impl IntList,
8616 input_size: impl IntList,
8617 scales_d: impl Into<Option<f64>>,
8618 scales_h: impl Into<Option<f64>>,
8619 scales_w: impl Into<Option<f64>>,
8620 ) -> Result<Tensor, TchError> {
8621 let scales_d = scales_d.into();
8622 let scales_h = scales_h.into();
8623 let scales_w = scales_w.into();
8624 let mut c_tensors = [std::ptr::null_mut(); 1];
8625 unsafe_torch_err!(atg__upsample_nearest_exact3d_backward_grad_input(
8626 c_tensors.as_mut_ptr(),
8627 grad_input.c_tensor,
8628 grad_output.c_tensor,
8629 output_size.as_ptr(),
8630 output_size.len_i32(),
8631 input_size.as_ptr(),
8632 input_size.len_i32(),
8633 scales_d.unwrap_or(std::f64::NAN),
8634 scales_d.is_none() as i8,
8635 scales_h.unwrap_or(std::f64::NAN),
8636 scales_h.is_none() as i8,
8637 scales_w.unwrap_or(std::f64::NAN),
8638 scales_w.is_none() as i8
8639 ));
8640 Ok(Tensor { c_tensor: c_tensors[0] })
8641 }
8642
8643 pub fn f_internal_upsample_nearest_exact3d_out(
8644 &self,
8645 out: &Tensor,
8646 output_size: impl IntList,
8647 scales_d: impl Into<Option<f64>>,
8648 scales_h: impl Into<Option<f64>>,
8649 scales_w: impl Into<Option<f64>>,
8650 ) -> Result<Tensor, TchError> {
8651 let scales_d = scales_d.into();
8652 let scales_h = scales_h.into();
8653 let scales_w = scales_w.into();
8654 let mut c_tensors = [std::ptr::null_mut(); 1];
8655 unsafe_torch_err!(atg__upsample_nearest_exact3d_out(
8656 c_tensors.as_mut_ptr(),
8657 out.c_tensor,
8658 self.c_tensor,
8659 output_size.as_ptr(),
8660 output_size.len_i32(),
8661 scales_d.unwrap_or(std::f64::NAN),
8662 scales_d.is_none() as i8,
8663 scales_h.unwrap_or(std::f64::NAN),
8664 scales_h.is_none() as i8,
8665 scales_w.unwrap_or(std::f64::NAN),
8666 scales_w.is_none() as i8
8667 ));
8668 Ok(Tensor { c_tensor: c_tensors[0] })
8669 }
8670
8671 pub fn f_internal_upsample_nearest_exact3d_vec(
8672 &self,
8673 output_size: impl IntListOption,
8674 scale_factors: impl DoubleList,
8675 ) -> Result<Tensor, TchError> {
8676 let mut c_tensors = [std::ptr::null_mut(); 1];
8677 unsafe_torch_err!(atg__upsample_nearest_exact3d_vec(
8678 c_tensors.as_mut_ptr(),
8679 self.c_tensor,
8680 output_size.as_ptr(),
8681 output_size.len_i32(),
8682 scale_factors.as_ptr(),
8683 scale_factors.len_i32()
8684 ));
8685 Ok(Tensor { c_tensor: c_tensors[0] })
8686 }
8687
8688 pub fn f_internal_use_cudnn_ctc_loss(
8689 log_probs: &Tensor,
8690 targets: &Tensor,
8691 input_lengths: impl IntList,
8692 target_lengths: impl IntList,
8693 blank: i64,
8694 ) -> Result<bool, TchError> {
8695 let return_;
8696 unsafe_torch_err!(
8697 return_ = atg__use_cudnn_ctc_loss(
8698 log_probs.c_tensor,
8699 targets.c_tensor,
8700 input_lengths.as_ptr(),
8701 input_lengths.len_i32(),
8702 target_lengths.as_ptr(),
8703 target_lengths.len_i32(),
8704 blank
8705 )
8706 );
8707 Ok(return_ != 0)
8708 }
8709
8710 pub fn f_internal_use_cudnn_ctc_loss_tensor(
8711 log_probs: &Tensor,
8712 targets: &Tensor,
8713 input_lengths: &Tensor,
8714 target_lengths: &Tensor,
8715 blank: i64,
8716 ) -> Result<bool, TchError> {
8717 let return_;
8718 unsafe_torch_err!(
8719 return_ = atg__use_cudnn_ctc_loss_tensor(
8720 log_probs.c_tensor,
8721 targets.c_tensor,
8722 input_lengths.c_tensor,
8723 target_lengths.c_tensor,
8724 blank
8725 )
8726 );
8727 Ok(return_ != 0)
8728 }
8729
8730 pub fn f_internal_use_cudnn_rnn_flatten_weight() -> Result<bool, TchError> {
8731 let return_;
8732 unsafe_torch_err!(return_ = atg__use_cudnn_rnn_flatten_weight());
8733 Ok(return_ != 0)
8734 }
8735
8736 pub fn f_internal_validate_compressed_sparse_indices(
8737 is_crow: bool,
8738 compressed_idx: &Tensor,
8739 plain_idx: &Tensor,
8740 cdim: i64,
8741 dim: i64,
8742 nnz: i64,
8743 ) -> Result<(), TchError> {
8744 unsafe_torch_err!(atg__validate_compressed_sparse_indices(
8745 if is_crow { 1 } else { 0 },
8746 compressed_idx.c_tensor,
8747 plain_idx.c_tensor,
8748 cdim,
8749 dim,
8750 nnz
8751 ));
8752 Ok(())
8753 }
8754
8755 pub fn f_internal_validate_sparse_bsc_tensor_args(
8756 ccol_indices: &Tensor,
8757 row_indices: &Tensor,
8758 values: &Tensor,
8759 size: impl IntList,
8760 ) -> Result<(), TchError> {
8761 unsafe_torch_err!(atg__validate_sparse_bsc_tensor_args(
8762 ccol_indices.c_tensor,
8763 row_indices.c_tensor,
8764 values.c_tensor,
8765 size.as_ptr(),
8766 size.len_i32()
8767 ));
8768 Ok(())
8769 }
8770
8771 pub fn f_internal_validate_sparse_bsr_tensor_args(
8772 crow_indices: &Tensor,
8773 col_indices: &Tensor,
8774 values: &Tensor,
8775 size: impl IntList,
8776 ) -> Result<(), TchError> {
8777 unsafe_torch_err!(atg__validate_sparse_bsr_tensor_args(
8778 crow_indices.c_tensor,
8779 col_indices.c_tensor,
8780 values.c_tensor,
8781 size.as_ptr(),
8782 size.len_i32()
8783 ));
8784 Ok(())
8785 }
8786
8787 pub fn f_internal_validate_sparse_compressed_tensor_args(
8788 compressed_indices: &Tensor,
8789 plain_indices: &Tensor,
8790 values: &Tensor,
8791 size: impl IntList,
8792 layout: Layout,
8793 ) -> Result<(), TchError> {
8794 unsafe_torch_err!(atg__validate_sparse_compressed_tensor_args(
8795 compressed_indices.c_tensor,
8796 plain_indices.c_tensor,
8797 values.c_tensor,
8798 size.as_ptr(),
8799 size.len_i32(),
8800 layout.to_i8()
8801 ));
8802 Ok(())
8803 }
8804
8805 pub fn f_internal_validate_sparse_csc_tensor_args(
8806 ccol_indices: &Tensor,
8807 row_indices: &Tensor,
8808 values: &Tensor,
8809 size: impl IntList,
8810 ) -> Result<(), TchError> {
8811 unsafe_torch_err!(atg__validate_sparse_csc_tensor_args(
8812 ccol_indices.c_tensor,
8813 row_indices.c_tensor,
8814 values.c_tensor,
8815 size.as_ptr(),
8816 size.len_i32()
8817 ));
8818 Ok(())
8819 }
8820
8821 pub fn f_internal_validate_sparse_csr_tensor_args(
8822 crow_indices: &Tensor,
8823 col_indices: &Tensor,
8824 values: &Tensor,
8825 size: impl IntList,
8826 ) -> Result<(), TchError> {
8827 unsafe_torch_err!(atg__validate_sparse_csr_tensor_args(
8828 crow_indices.c_tensor,
8829 col_indices.c_tensor,
8830 values.c_tensor,
8831 size.as_ptr(),
8832 size.len_i32()
8833 ));
8834 Ok(())
8835 }
8836
8837 pub fn f_internal_values(&self) -> Result<Tensor, TchError> {
8838 let mut c_tensors = [std::ptr::null_mut(); 1];
8839 unsafe_torch_err!(atg__values(c_tensors.as_mut_ptr(), self.c_tensor));
8840 Ok(Tensor { c_tensor: c_tensors[0] })
8841 }
8842
8843 pub fn f_internal_values_copy(&self) -> Result<Tensor, TchError> {
8844 let mut c_tensors = [std::ptr::null_mut(); 1];
8845 unsafe_torch_err!(atg__values_copy(c_tensors.as_mut_ptr(), self.c_tensor));
8846 Ok(Tensor { c_tensor: c_tensors[0] })
8847 }
8848
8849 pub fn f_internal_values_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
8850 let mut c_tensors = [std::ptr::null_mut(); 1];
8851 unsafe_torch_err!(atg__values_copy_out(
8852 c_tensors.as_mut_ptr(),
8853 out.c_tensor,
8854 self.c_tensor
8855 ));
8856 Ok(Tensor { c_tensor: c_tensors[0] })
8857 }
8858
8859 pub fn f_internal_version(&self) -> Result<i64, TchError> {
8860 let return_;
8861 unsafe_torch_err!(return_ = atg__version(self.c_tensor));
8862 Ok(return_)
8863 }
8864
8865 pub fn f_internal_weight_int4pack_mm(
8866 &self,
8867 mat2: &Tensor,
8868 qgroupsize: i64,
8869 qscaleandzeros: &Tensor,
8870 ) -> Result<Tensor, TchError> {
8871 let mut c_tensors = [std::ptr::null_mut(); 1];
8872 unsafe_torch_err!(atg__weight_int4pack_mm(
8873 c_tensors.as_mut_ptr(),
8874 self.c_tensor,
8875 mat2.c_tensor,
8876 qgroupsize,
8877 qscaleandzeros.c_tensor
8878 ));
8879 Ok(Tensor { c_tensor: c_tensors[0] })
8880 }
8881
8882 pub fn f_internal_weight_int8pack_mm(
8883 &self,
8884 mat2: &Tensor,
8885 scales: &Tensor,
8886 ) -> Result<Tensor, TchError> {
8887 let mut c_tensors = [std::ptr::null_mut(); 1];
8888 unsafe_torch_err!(atg__weight_int8pack_mm(
8889 c_tensors.as_mut_ptr(),
8890 self.c_tensor,
8891 mat2.c_tensor,
8892 scales.c_tensor
8893 ));
8894 Ok(Tensor { c_tensor: c_tensors[0] })
8895 }
8896
8897 pub fn f_internal_weight_norm(v: &Tensor, g: &Tensor, dim: i64) -> Result<Tensor, TchError> {
8898 let mut c_tensors = [std::ptr::null_mut(); 1];
8899 unsafe_torch_err!(atg__weight_norm(c_tensors.as_mut_ptr(), v.c_tensor, g.c_tensor, dim));
8900 Ok(Tensor { c_tensor: c_tensors[0] })
8901 }
8902
8903 pub fn f_internal_weight_norm_differentiable_backward(
8904 grad_w: &Tensor,
8905 saved_v: &Tensor,
8906 saved_g: &Tensor,
8907 saved_norms: &Tensor,
8908 dim: i64,
8909 ) -> Result<(Tensor, Tensor), TchError> {
8910 let mut c_tensors = [std::ptr::null_mut(); 2];
8911 unsafe_torch_err!(atg__weight_norm_differentiable_backward(
8912 c_tensors.as_mut_ptr(),
8913 grad_w.c_tensor,
8914 saved_v.c_tensor,
8915 saved_g.c_tensor,
8916 saved_norms.c_tensor,
8917 dim
8918 ));
8919 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
8920 }
8921
8922 pub fn f_internal_weight_norm_interface(
8923 v: &Tensor,
8924 g: &Tensor,
8925 dim: i64,
8926 ) -> Result<(Tensor, Tensor), TchError> {
8927 let mut c_tensors = [std::ptr::null_mut(); 2];
8928 unsafe_torch_err!(atg__weight_norm_interface(
8929 c_tensors.as_mut_ptr(),
8930 v.c_tensor,
8931 g.c_tensor,
8932 dim
8933 ));
8934 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
8935 }
8936
8937 pub fn f_internal_weight_norm_interface_backward(
8938 grad_w: &Tensor,
8939 saved_v: &Tensor,
8940 saved_g: &Tensor,
8941 saved_norms: &Tensor,
8942 dim: i64,
8943 ) -> Result<(Tensor, Tensor), TchError> {
8944 let mut c_tensors = [std::ptr::null_mut(); 2];
8945 unsafe_torch_err!(atg__weight_norm_interface_backward(
8946 c_tensors.as_mut_ptr(),
8947 grad_w.c_tensor,
8948 saved_v.c_tensor,
8949 saved_g.c_tensor,
8950 saved_norms.c_tensor,
8951 dim
8952 ));
8953 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
8954 }
8955
8956 pub fn f_internal_weight_norm_interface_backward_out(
8957 out0: &Tensor,
8958 out1: &Tensor,
8959 grad_w: &Tensor,
8960 saved_v: &Tensor,
8961 saved_g: &Tensor,
8962 saved_norms: &Tensor,
8963 dim: i64,
8964 ) -> Result<(Tensor, Tensor), TchError> {
8965 let mut c_tensors = [std::ptr::null_mut(); 2];
8966 unsafe_torch_err!(atg__weight_norm_interface_backward_out(
8967 c_tensors.as_mut_ptr(),
8968 out0.c_tensor,
8969 out1.c_tensor,
8970 grad_w.c_tensor,
8971 saved_v.c_tensor,
8972 saved_g.c_tensor,
8973 saved_norms.c_tensor,
8974 dim
8975 ));
8976 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
8977 }
8978
8979 pub fn f_internal_weight_norm_interface_out(
8980 out0: &Tensor,
8981 out1: &Tensor,
8982 v: &Tensor,
8983 g: &Tensor,
8984 dim: i64,
8985 ) -> Result<(Tensor, Tensor), TchError> {
8986 let mut c_tensors = [std::ptr::null_mut(); 2];
8987 unsafe_torch_err!(atg__weight_norm_interface_out(
8988 c_tensors.as_mut_ptr(),
8989 out0.c_tensor,
8990 out1.c_tensor,
8991 v.c_tensor,
8992 g.c_tensor,
8993 dim
8994 ));
8995 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
8996 }
8997
8998 pub fn f_internal_wrapped_linear_prepack(
8999 weight: &Tensor,
9000 weight_scale: &Tensor,
9001 weight_zero_point: &Tensor,
9002 bias: &Tensor,
9003 ) -> Result<Tensor, TchError> {
9004 let mut c_tensors = [std::ptr::null_mut(); 1];
9005 unsafe_torch_err!(atg__wrapped_linear_prepack(
9006 c_tensors.as_mut_ptr(),
9007 weight.c_tensor,
9008 weight_scale.c_tensor,
9009 weight_zero_point.c_tensor,
9010 bias.c_tensor
9011 ));
9012 Ok(Tensor { c_tensor: c_tensors[0] })
9013 }
9014
9015 pub fn f_internal_wrapped_quantized_linear_prepacked(
9016 &self,
9017 input_scale: &Tensor,
9018 input_zero_point: &Tensor,
9019 packed_weight: &Tensor,
9020 output_scale: &Tensor,
9021 output_zero_point: &Tensor,
9022 out_channel: i64,
9023 ) -> Result<Tensor, TchError> {
9024 let mut c_tensors = [std::ptr::null_mut(); 1];
9025 unsafe_torch_err!(atg__wrapped_quantized_linear_prepacked(
9026 c_tensors.as_mut_ptr(),
9027 self.c_tensor,
9028 input_scale.c_tensor,
9029 input_zero_point.c_tensor,
9030 packed_weight.c_tensor,
9031 output_scale.c_tensor,
9032 output_zero_point.c_tensor,
9033 out_channel
9034 ));
9035 Ok(Tensor { c_tensor: c_tensors[0] })
9036 }
9037
9038 pub fn f_abs(&self) -> Result<Tensor, TchError> {
9039 let mut c_tensors = [std::ptr::null_mut(); 1];
9040 unsafe_torch_err!(atg_abs(c_tensors.as_mut_ptr(), self.c_tensor));
9041 Ok(Tensor { c_tensor: c_tensors[0] })
9042 }
9043
9044 pub fn f_abs_(&mut self) -> Result<Tensor, TchError> {
9045 let mut c_tensors = [std::ptr::null_mut(); 1];
9046 unsafe_torch_err!(atg_abs_(c_tensors.as_mut_ptr(), self.c_tensor));
9047 Ok(Tensor { c_tensor: c_tensors[0] })
9048 }
9049
9050 pub fn f_abs_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
9051 let mut c_tensors = [std::ptr::null_mut(); 1];
9052 unsafe_torch_err!(atg_abs_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
9053 Ok(Tensor { c_tensor: c_tensors[0] })
9054 }
9055
9056 pub fn f_absolute(&self) -> Result<Tensor, TchError> {
9057 let mut c_tensors = [std::ptr::null_mut(); 1];
9058 unsafe_torch_err!(atg_absolute(c_tensors.as_mut_ptr(), self.c_tensor));
9059 Ok(Tensor { c_tensor: c_tensors[0] })
9060 }
9061
9062 pub fn f_absolute_(&mut self) -> Result<Tensor, TchError> {
9063 let mut c_tensors = [std::ptr::null_mut(); 1];
9064 unsafe_torch_err!(atg_absolute_(c_tensors.as_mut_ptr(), self.c_tensor));
9065 Ok(Tensor { c_tensor: c_tensors[0] })
9066 }
9067
9068 pub fn f_absolute_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
9069 let mut c_tensors = [std::ptr::null_mut(); 1];
9070 unsafe_torch_err!(atg_absolute_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
9071 Ok(Tensor { c_tensor: c_tensors[0] })
9072 }
9073
9074 pub fn f_acos(&self) -> Result<Tensor, TchError> {
9075 let mut c_tensors = [std::ptr::null_mut(); 1];
9076 unsafe_torch_err!(atg_acos(c_tensors.as_mut_ptr(), self.c_tensor));
9077 Ok(Tensor { c_tensor: c_tensors[0] })
9078 }
9079
9080 pub fn f_acos_(&mut self) -> Result<Tensor, TchError> {
9081 let mut c_tensors = [std::ptr::null_mut(); 1];
9082 unsafe_torch_err!(atg_acos_(c_tensors.as_mut_ptr(), self.c_tensor));
9083 Ok(Tensor { c_tensor: c_tensors[0] })
9084 }
9085
9086 pub fn f_acos_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
9087 let mut c_tensors = [std::ptr::null_mut(); 1];
9088 unsafe_torch_err!(atg_acos_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
9089 Ok(Tensor { c_tensor: c_tensors[0] })
9090 }
9091
9092 pub fn f_acosh(&self) -> Result<Tensor, TchError> {
9093 let mut c_tensors = [std::ptr::null_mut(); 1];
9094 unsafe_torch_err!(atg_acosh(c_tensors.as_mut_ptr(), self.c_tensor));
9095 Ok(Tensor { c_tensor: c_tensors[0] })
9096 }
9097
9098 pub fn f_acosh_(&mut self) -> Result<Tensor, TchError> {
9099 let mut c_tensors = [std::ptr::null_mut(); 1];
9100 unsafe_torch_err!(atg_acosh_(c_tensors.as_mut_ptr(), self.c_tensor));
9101 Ok(Tensor { c_tensor: c_tensors[0] })
9102 }
9103
9104 pub fn f_acosh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
9105 let mut c_tensors = [std::ptr::null_mut(); 1];
9106 unsafe_torch_err!(atg_acosh_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
9107 Ok(Tensor { c_tensor: c_tensors[0] })
9108 }
9109
9110 pub fn f_adaptive_avg_pool1d(&self, output_size: impl IntList) -> Result<Tensor, TchError> {
9111 let mut c_tensors = [std::ptr::null_mut(); 1];
9112 unsafe_torch_err!(atg_adaptive_avg_pool1d(
9113 c_tensors.as_mut_ptr(),
9114 self.c_tensor,
9115 output_size.as_ptr(),
9116 output_size.len_i32()
9117 ));
9118 Ok(Tensor { c_tensor: c_tensors[0] })
9119 }
9120
9121 pub fn f_adaptive_avg_pool2d(&self, output_size: impl IntList) -> Result<Tensor, TchError> {
9122 let mut c_tensors = [std::ptr::null_mut(); 1];
9123 unsafe_torch_err!(atg_adaptive_avg_pool2d(
9124 c_tensors.as_mut_ptr(),
9125 self.c_tensor,
9126 output_size.as_ptr(),
9127 output_size.len_i32()
9128 ));
9129 Ok(Tensor { c_tensor: c_tensors[0] })
9130 }
9131
9132 pub fn f_adaptive_avg_pool2d_out(
9133 &self,
9134 out: &Tensor,
9135 output_size: impl IntList,
9136 ) -> Result<Tensor, TchError> {
9137 let mut c_tensors = [std::ptr::null_mut(); 1];
9138 unsafe_torch_err!(atg_adaptive_avg_pool2d_out(
9139 c_tensors.as_mut_ptr(),
9140 out.c_tensor,
9141 self.c_tensor,
9142 output_size.as_ptr(),
9143 output_size.len_i32()
9144 ));
9145 Ok(Tensor { c_tensor: c_tensors[0] })
9146 }
9147
9148 pub fn f_adaptive_avg_pool3d(&self, output_size: impl IntList) -> Result<Tensor, TchError> {
9149 let mut c_tensors = [std::ptr::null_mut(); 1];
9150 unsafe_torch_err!(atg_adaptive_avg_pool3d(
9151 c_tensors.as_mut_ptr(),
9152 self.c_tensor,
9153 output_size.as_ptr(),
9154 output_size.len_i32()
9155 ));
9156 Ok(Tensor { c_tensor: c_tensors[0] })
9157 }
9158
9159 pub fn f_adaptive_avg_pool3d_backward(
9160 &self,
9161 grad_input: &Tensor,
9162 grad_output: &Tensor,
9163 ) -> Result<Tensor, TchError> {
9164 let mut c_tensors = [std::ptr::null_mut(); 1];
9165 unsafe_torch_err!(atg_adaptive_avg_pool3d_backward(
9166 c_tensors.as_mut_ptr(),
9167 grad_input.c_tensor,
9168 grad_output.c_tensor,
9169 self.c_tensor
9170 ));
9171 Ok(Tensor { c_tensor: c_tensors[0] })
9172 }
9173
9174 pub fn f_adaptive_avg_pool3d_out(
9175 &self,
9176 out: &Tensor,
9177 output_size: impl IntList,
9178 ) -> Result<Tensor, TchError> {
9179 let mut c_tensors = [std::ptr::null_mut(); 1];
9180 unsafe_torch_err!(atg_adaptive_avg_pool3d_out(
9181 c_tensors.as_mut_ptr(),
9182 out.c_tensor,
9183 self.c_tensor,
9184 output_size.as_ptr(),
9185 output_size.len_i32()
9186 ));
9187 Ok(Tensor { c_tensor: c_tensors[0] })
9188 }
9189
9190 pub fn f_adaptive_max_pool1d(
9191 &self,
9192 output_size: impl IntList,
9193 ) -> Result<(Tensor, Tensor), TchError> {
9194 let mut c_tensors = [std::ptr::null_mut(); 2];
9195 unsafe_torch_err!(atg_adaptive_max_pool1d(
9196 c_tensors.as_mut_ptr(),
9197 self.c_tensor,
9198 output_size.as_ptr(),
9199 output_size.len_i32()
9200 ));
9201 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
9202 }
9203
9204 pub fn f_adaptive_max_pool2d(
9205 &self,
9206 output_size: impl IntList,
9207 ) -> Result<(Tensor, Tensor), TchError> {
9208 let mut c_tensors = [std::ptr::null_mut(); 2];
9209 unsafe_torch_err!(atg_adaptive_max_pool2d(
9210 c_tensors.as_mut_ptr(),
9211 self.c_tensor,
9212 output_size.as_ptr(),
9213 output_size.len_i32()
9214 ));
9215 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
9216 }
9217
9218 pub fn f_adaptive_max_pool2d_backward(
9219 &self,
9220 grad_output: &Tensor,
9221 indices: &Tensor,
9222 ) -> Result<Tensor, TchError> {
9223 let mut c_tensors = [std::ptr::null_mut(); 1];
9224 unsafe_torch_err!(atg_adaptive_max_pool2d_backward(
9225 c_tensors.as_mut_ptr(),
9226 grad_output.c_tensor,
9227 self.c_tensor,
9228 indices.c_tensor
9229 ));
9230 Ok(Tensor { c_tensor: c_tensors[0] })
9231 }
9232
9233 pub fn f_adaptive_max_pool2d_backward_grad_input(
9234 &self,
9235 grad_input: &Tensor,
9236 grad_output: &Tensor,
9237 indices: &Tensor,
9238 ) -> Result<Tensor, TchError> {
9239 let mut c_tensors = [std::ptr::null_mut(); 1];
9240 unsafe_torch_err!(atg_adaptive_max_pool2d_backward_grad_input(
9241 c_tensors.as_mut_ptr(),
9242 grad_input.c_tensor,
9243 grad_output.c_tensor,
9244 self.c_tensor,
9245 indices.c_tensor
9246 ));
9247 Ok(Tensor { c_tensor: c_tensors[0] })
9248 }
9249
9250 pub fn f_adaptive_max_pool2d_out(
9251 &self,
9252 out: &Tensor,
9253 indices: &Tensor,
9254 output_size: impl IntList,
9255 ) -> Result<(Tensor, Tensor), TchError> {
9256 let mut c_tensors = [std::ptr::null_mut(); 2];
9257 unsafe_torch_err!(atg_adaptive_max_pool2d_out(
9258 c_tensors.as_mut_ptr(),
9259 out.c_tensor,
9260 indices.c_tensor,
9261 self.c_tensor,
9262 output_size.as_ptr(),
9263 output_size.len_i32()
9264 ));
9265 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
9266 }
9267
9268 pub fn f_adaptive_max_pool3d(
9269 &self,
9270 output_size: impl IntList,
9271 ) -> Result<(Tensor, Tensor), TchError> {
9272 let mut c_tensors = [std::ptr::null_mut(); 2];
9273 unsafe_torch_err!(atg_adaptive_max_pool3d(
9274 c_tensors.as_mut_ptr(),
9275 self.c_tensor,
9276 output_size.as_ptr(),
9277 output_size.len_i32()
9278 ));
9279 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
9280 }
9281
9282 pub fn f_adaptive_max_pool3d_backward(
9283 &self,
9284 grad_output: &Tensor,
9285 indices: &Tensor,
9286 ) -> Result<Tensor, TchError> {
9287 let mut c_tensors = [std::ptr::null_mut(); 1];
9288 unsafe_torch_err!(atg_adaptive_max_pool3d_backward(
9289 c_tensors.as_mut_ptr(),
9290 grad_output.c_tensor,
9291 self.c_tensor,
9292 indices.c_tensor
9293 ));
9294 Ok(Tensor { c_tensor: c_tensors[0] })
9295 }
9296
9297 pub fn f_adaptive_max_pool3d_backward_grad_input(
9298 &self,
9299 grad_input: &Tensor,
9300 grad_output: &Tensor,
9301 indices: &Tensor,
9302 ) -> Result<Tensor, TchError> {
9303 let mut c_tensors = [std::ptr::null_mut(); 1];
9304 unsafe_torch_err!(atg_adaptive_max_pool3d_backward_grad_input(
9305 c_tensors.as_mut_ptr(),
9306 grad_input.c_tensor,
9307 grad_output.c_tensor,
9308 self.c_tensor,
9309 indices.c_tensor
9310 ));
9311 Ok(Tensor { c_tensor: c_tensors[0] })
9312 }
9313
9314 pub fn f_adaptive_max_pool3d_out(
9315 &self,
9316 out: &Tensor,
9317 indices: &Tensor,
9318 output_size: impl IntList,
9319 ) -> Result<(Tensor, Tensor), TchError> {
9320 let mut c_tensors = [std::ptr::null_mut(); 2];
9321 unsafe_torch_err!(atg_adaptive_max_pool3d_out(
9322 c_tensors.as_mut_ptr(),
9323 out.c_tensor,
9324 indices.c_tensor,
9325 self.c_tensor,
9326 output_size.as_ptr(),
9327 output_size.len_i32()
9328 ));
9329 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
9330 }
9331
9332 pub fn f_add(&self, other: &Tensor) -> Result<Tensor, TchError> {
9333 let mut c_tensors = [std::ptr::null_mut(); 1];
9334 unsafe_torch_err!(atg_add(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
9335 Ok(Tensor { c_tensor: c_tensors[0] })
9336 }
9337
9338 pub fn f_add_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
9339 let mut c_tensors = [std::ptr::null_mut(); 1];
9340 unsafe_torch_err!(atg_add_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
9341 Ok(Tensor { c_tensor: c_tensors[0] })
9342 }
9343
9344 pub fn f_add_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
9345 let mut c_tensors = [std::ptr::null_mut(); 1];
9346 unsafe_torch_err!(atg_add_out(
9347 c_tensors.as_mut_ptr(),
9348 out.c_tensor,
9349 self.c_tensor,
9350 other.c_tensor
9351 ));
9352 Ok(Tensor { c_tensor: c_tensors[0] })
9353 }
9354
9355 pub fn f_add_scalar<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
9356 let mut c_tensors = [std::ptr::null_mut(); 1];
9357 unsafe_torch_err!(atg_add_scalar(
9358 c_tensors.as_mut_ptr(),
9359 self.c_tensor,
9360 other.into().c_scalar
9361 ));
9362 Ok(Tensor { c_tensor: c_tensors[0] })
9363 }
9364
9365 pub fn f_add_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
9366 let mut c_tensors = [std::ptr::null_mut(); 1];
9367 unsafe_torch_err!(atg_add_scalar_(
9368 c_tensors.as_mut_ptr(),
9369 self.c_tensor,
9370 other.into().c_scalar
9371 ));
9372 Ok(Tensor { c_tensor: c_tensors[0] })
9373 }
9374
9375 pub fn f_add_scalar_out<S: Into<Scalar>>(
9376 &self,
9377 out: &Tensor,
9378 other: S,
9379 ) -> Result<Tensor, TchError> {
9380 let mut c_tensors = [std::ptr::null_mut(); 1];
9381 unsafe_torch_err!(atg_add_scalar_out(
9382 c_tensors.as_mut_ptr(),
9383 out.c_tensor,
9384 self.c_tensor,
9385 other.into().c_scalar
9386 ));
9387 Ok(Tensor { c_tensor: c_tensors[0] })
9388 }
9389
9390 pub fn f_addbmm(&self, batch1: &Tensor, batch2: &Tensor) -> Result<Tensor, TchError> {
9391 let mut c_tensors = [std::ptr::null_mut(); 1];
9392 unsafe_torch_err!(atg_addbmm(
9393 c_tensors.as_mut_ptr(),
9394 self.c_tensor,
9395 batch1.c_tensor,
9396 batch2.c_tensor
9397 ));
9398 Ok(Tensor { c_tensor: c_tensors[0] })
9399 }
9400
9401 pub fn f_addbmm_(&mut self, batch1: &Tensor, batch2: &Tensor) -> Result<Tensor, TchError> {
9402 let mut c_tensors = [std::ptr::null_mut(); 1];
9403 unsafe_torch_err!(atg_addbmm_(
9404 c_tensors.as_mut_ptr(),
9405 self.c_tensor,
9406 batch1.c_tensor,
9407 batch2.c_tensor
9408 ));
9409 Ok(Tensor { c_tensor: c_tensors[0] })
9410 }
9411
9412 pub fn f_addbmm_out(
9413 &self,
9414 out: &Tensor,
9415 batch1: &Tensor,
9416 batch2: &Tensor,
9417 ) -> Result<Tensor, TchError> {
9418 let mut c_tensors = [std::ptr::null_mut(); 1];
9419 unsafe_torch_err!(atg_addbmm_out(
9420 c_tensors.as_mut_ptr(),
9421 out.c_tensor,
9422 self.c_tensor,
9423 batch1.c_tensor,
9424 batch2.c_tensor
9425 ));
9426 Ok(Tensor { c_tensor: c_tensors[0] })
9427 }
9428
9429 pub fn f_addcdiv(&self, tensor1: &Tensor, tensor2: &Tensor) -> Result<Tensor, TchError> {
9430 let mut c_tensors = [std::ptr::null_mut(); 1];
9431 unsafe_torch_err!(atg_addcdiv(
9432 c_tensors.as_mut_ptr(),
9433 self.c_tensor,
9434 tensor1.c_tensor,
9435 tensor2.c_tensor
9436 ));
9437 Ok(Tensor { c_tensor: c_tensors[0] })
9438 }
9439
9440 pub fn f_addcdiv_(&mut self, tensor1: &Tensor, tensor2: &Tensor) -> Result<Tensor, TchError> {
9441 let mut c_tensors = [std::ptr::null_mut(); 1];
9442 unsafe_torch_err!(atg_addcdiv_(
9443 c_tensors.as_mut_ptr(),
9444 self.c_tensor,
9445 tensor1.c_tensor,
9446 tensor2.c_tensor
9447 ));
9448 Ok(Tensor { c_tensor: c_tensors[0] })
9449 }
9450
9451 pub fn f_addcdiv_out(
9452 &self,
9453 out: &Tensor,
9454 tensor1: &Tensor,
9455 tensor2: &Tensor,
9456 ) -> Result<Tensor, TchError> {
9457 let mut c_tensors = [std::ptr::null_mut(); 1];
9458 unsafe_torch_err!(atg_addcdiv_out(
9459 c_tensors.as_mut_ptr(),
9460 out.c_tensor,
9461 self.c_tensor,
9462 tensor1.c_tensor,
9463 tensor2.c_tensor
9464 ));
9465 Ok(Tensor { c_tensor: c_tensors[0] })
9466 }
9467
9468 pub fn f_addcmul(&self, tensor1: &Tensor, tensor2: &Tensor) -> Result<Tensor, TchError> {
9469 let mut c_tensors = [std::ptr::null_mut(); 1];
9470 unsafe_torch_err!(atg_addcmul(
9471 c_tensors.as_mut_ptr(),
9472 self.c_tensor,
9473 tensor1.c_tensor,
9474 tensor2.c_tensor
9475 ));
9476 Ok(Tensor { c_tensor: c_tensors[0] })
9477 }
9478
9479 pub fn f_addcmul_(&mut self, tensor1: &Tensor, tensor2: &Tensor) -> Result<Tensor, TchError> {
9480 let mut c_tensors = [std::ptr::null_mut(); 1];
9481 unsafe_torch_err!(atg_addcmul_(
9482 c_tensors.as_mut_ptr(),
9483 self.c_tensor,
9484 tensor1.c_tensor,
9485 tensor2.c_tensor
9486 ));
9487 Ok(Tensor { c_tensor: c_tensors[0] })
9488 }
9489
9490 pub fn f_addcmul_out(
9491 &self,
9492 out: &Tensor,
9493 tensor1: &Tensor,
9494 tensor2: &Tensor,
9495 ) -> Result<Tensor, TchError> {
9496 let mut c_tensors = [std::ptr::null_mut(); 1];
9497 unsafe_torch_err!(atg_addcmul_out(
9498 c_tensors.as_mut_ptr(),
9499 out.c_tensor,
9500 self.c_tensor,
9501 tensor1.c_tensor,
9502 tensor2.c_tensor
9503 ));
9504 Ok(Tensor { c_tensor: c_tensors[0] })
9505 }
9506
9507 pub fn f_addmm(&self, mat1: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError> {
9508 let mut c_tensors = [std::ptr::null_mut(); 1];
9509 unsafe_torch_err!(atg_addmm(
9510 c_tensors.as_mut_ptr(),
9511 self.c_tensor,
9512 mat1.c_tensor,
9513 mat2.c_tensor
9514 ));
9515 Ok(Tensor { c_tensor: c_tensors[0] })
9516 }
9517
9518 pub fn f_addmm_(&mut self, mat1: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError> {
9519 let mut c_tensors = [std::ptr::null_mut(); 1];
9520 unsafe_torch_err!(atg_addmm_(
9521 c_tensors.as_mut_ptr(),
9522 self.c_tensor,
9523 mat1.c_tensor,
9524 mat2.c_tensor
9525 ));
9526 Ok(Tensor { c_tensor: c_tensors[0] })
9527 }
9528
9529 pub fn f_addmm_out(
9530 &self,
9531 out: &Tensor,
9532 mat1: &Tensor,
9533 mat2: &Tensor,
9534 ) -> Result<Tensor, TchError> {
9535 let mut c_tensors = [std::ptr::null_mut(); 1];
9536 unsafe_torch_err!(atg_addmm_out(
9537 c_tensors.as_mut_ptr(),
9538 out.c_tensor,
9539 self.c_tensor,
9540 mat1.c_tensor,
9541 mat2.c_tensor
9542 ));
9543 Ok(Tensor { c_tensor: c_tensors[0] })
9544 }
9545
9546 pub fn f_addmv(&self, mat: &Tensor, vec: &Tensor) -> Result<Tensor, TchError> {
9547 let mut c_tensors = [std::ptr::null_mut(); 1];
9548 unsafe_torch_err!(atg_addmv(
9549 c_tensors.as_mut_ptr(),
9550 self.c_tensor,
9551 mat.c_tensor,
9552 vec.c_tensor
9553 ));
9554 Ok(Tensor { c_tensor: c_tensors[0] })
9555 }
9556
9557 pub fn f_addmv_(&mut self, mat: &Tensor, vec: &Tensor) -> Result<Tensor, TchError> {
9558 let mut c_tensors = [std::ptr::null_mut(); 1];
9559 unsafe_torch_err!(atg_addmv_(
9560 c_tensors.as_mut_ptr(),
9561 self.c_tensor,
9562 mat.c_tensor,
9563 vec.c_tensor
9564 ));
9565 Ok(Tensor { c_tensor: c_tensors[0] })
9566 }
9567
9568 pub fn f_addmv_out(
9569 &self,
9570 out: &Tensor,
9571 mat: &Tensor,
9572 vec: &Tensor,
9573 ) -> Result<Tensor, TchError> {
9574 let mut c_tensors = [std::ptr::null_mut(); 1];
9575 unsafe_torch_err!(atg_addmv_out(
9576 c_tensors.as_mut_ptr(),
9577 out.c_tensor,
9578 self.c_tensor,
9579 mat.c_tensor,
9580 vec.c_tensor
9581 ));
9582 Ok(Tensor { c_tensor: c_tensors[0] })
9583 }
9584
9585 pub fn f_addr(&self, vec1: &Tensor, vec2: &Tensor) -> Result<Tensor, TchError> {
9586 let mut c_tensors = [std::ptr::null_mut(); 1];
9587 unsafe_torch_err!(atg_addr(
9588 c_tensors.as_mut_ptr(),
9589 self.c_tensor,
9590 vec1.c_tensor,
9591 vec2.c_tensor
9592 ));
9593 Ok(Tensor { c_tensor: c_tensors[0] })
9594 }
9595
9596 pub fn f_addr_(&mut self, vec1: &Tensor, vec2: &Tensor) -> Result<Tensor, TchError> {
9597 let mut c_tensors = [std::ptr::null_mut(); 1];
9598 unsafe_torch_err!(atg_addr_(
9599 c_tensors.as_mut_ptr(),
9600 self.c_tensor,
9601 vec1.c_tensor,
9602 vec2.c_tensor
9603 ));
9604 Ok(Tensor { c_tensor: c_tensors[0] })
9605 }
9606
9607 pub fn f_addr_out(
9608 &self,
9609 out: &Tensor,
9610 vec1: &Tensor,
9611 vec2: &Tensor,
9612 ) -> Result<Tensor, TchError> {
9613 let mut c_tensors = [std::ptr::null_mut(); 1];
9614 unsafe_torch_err!(atg_addr_out(
9615 c_tensors.as_mut_ptr(),
9616 out.c_tensor,
9617 self.c_tensor,
9618 vec1.c_tensor,
9619 vec2.c_tensor
9620 ));
9621 Ok(Tensor { c_tensor: c_tensors[0] })
9622 }
9623
9624 pub fn f_adjoint(&self) -> Result<Tensor, TchError> {
9625 let mut c_tensors = [std::ptr::null_mut(); 1];
9626 unsafe_torch_err!(atg_adjoint(c_tensors.as_mut_ptr(), self.c_tensor));
9627 Ok(Tensor { c_tensor: c_tensors[0] })
9628 }
9629
9630 pub fn f_affine_grid_generator(
9631 theta: &Tensor,
9632 size: impl IntList,
9633 align_corners: bool,
9634 ) -> Result<Tensor, TchError> {
9635 let mut c_tensors = [std::ptr::null_mut(); 1];
9636 unsafe_torch_err!(atg_affine_grid_generator(
9637 c_tensors.as_mut_ptr(),
9638 theta.c_tensor,
9639 size.as_ptr(),
9640 size.len_i32(),
9641 if align_corners { 1 } else { 0 }
9642 ));
9643 Ok(Tensor { c_tensor: c_tensors[0] })
9644 }
9645
9646 pub fn f_affine_grid_generator_backward(
9647 grad: &Tensor,
9648 size: impl IntList,
9649 align_corners: bool,
9650 ) -> Result<Tensor, TchError> {
9651 let mut c_tensors = [std::ptr::null_mut(); 1];
9652 unsafe_torch_err!(atg_affine_grid_generator_backward(
9653 c_tensors.as_mut_ptr(),
9654 grad.c_tensor,
9655 size.as_ptr(),
9656 size.len_i32(),
9657 if align_corners { 1 } else { 0 }
9658 ));
9659 Ok(Tensor { c_tensor: c_tensors[0] })
9660 }
9661
9662 pub fn f_affine_grid_generator_out(
9663 out: &Tensor,
9664 theta: &Tensor,
9665 size: impl IntList,
9666 align_corners: bool,
9667 ) -> Result<Tensor, TchError> {
9668 let mut c_tensors = [std::ptr::null_mut(); 1];
9669 unsafe_torch_err!(atg_affine_grid_generator_out(
9670 c_tensors.as_mut_ptr(),
9671 out.c_tensor,
9672 theta.c_tensor,
9673 size.as_ptr(),
9674 size.len_i32(),
9675 if align_corners { 1 } else { 0 }
9676 ));
9677 Ok(Tensor { c_tensor: c_tensors[0] })
9678 }
9679
9680 pub fn f_alias(&self) -> Result<Tensor, TchError> {
9681 let mut c_tensors = [std::ptr::null_mut(); 1];
9682 unsafe_torch_err!(atg_alias(c_tensors.as_mut_ptr(), self.c_tensor));
9683 Ok(Tensor { c_tensor: c_tensors[0] })
9684 }
9685
9686 pub fn f_alias_copy(&self) -> Result<Tensor, TchError> {
9687 let mut c_tensors = [std::ptr::null_mut(); 1];
9688 unsafe_torch_err!(atg_alias_copy(c_tensors.as_mut_ptr(), self.c_tensor));
9689 Ok(Tensor { c_tensor: c_tensors[0] })
9690 }
9691
9692 pub fn f_alias_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
9693 let mut c_tensors = [std::ptr::null_mut(); 1];
9694 unsafe_torch_err!(atg_alias_copy_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
9695 Ok(Tensor { c_tensor: c_tensors[0] })
9696 }
9697
9698 pub fn f_align_as(&self, other: &Tensor) -> Result<Tensor, TchError> {
9699 let mut c_tensors = [std::ptr::null_mut(); 1];
9700 unsafe_torch_err!(atg_align_as(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
9701 Ok(Tensor { c_tensor: c_tensors[0] })
9702 }
9703
9704 pub fn f_align_tensors<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Vec<Tensor>, TchError> {
9705 let c_tensors =
9706 unsafe_torch_err!(atg_align_tensors(ptr_list(tensors).as_ptr(), tensors.len() as i32));
9707 let mut r__ = vec![];
9708 let mut i = 0;
9709 loop {
9710 let c__ = unsafe { *c_tensors.add(i) };
9711 if c__.is_null() {
9712 break;
9713 }
9714 r__.push(Tensor { c_tensor: c__ });
9715 i += 1;
9716 }
9717 unsafe { libc::free(c_tensors as *mut libc::c_void) }
9718 Ok(r__)
9719 }
9720
9721 pub fn f_all(&self) -> Result<Tensor, TchError> {
9722 let mut c_tensors = [std::ptr::null_mut(); 1];
9723 unsafe_torch_err!(atg_all(c_tensors.as_mut_ptr(), self.c_tensor));
9724 Ok(Tensor { c_tensor: c_tensors[0] })
9725 }
9726
9727 pub fn f_all_all_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
9728 let mut c_tensors = [std::ptr::null_mut(); 1];
9729 unsafe_torch_err!(atg_all_all_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
9730 Ok(Tensor { c_tensor: c_tensors[0] })
9731 }
9732
9733 pub fn f_all_dim(&self, dim: i64, keepdim: bool) -> Result<Tensor, TchError> {
9734 let mut c_tensors = [std::ptr::null_mut(); 1];
9735 unsafe_torch_err!(atg_all_dim(
9736 c_tensors.as_mut_ptr(),
9737 self.c_tensor,
9738 dim,
9739 if keepdim { 1 } else { 0 }
9740 ));
9741 Ok(Tensor { c_tensor: c_tensors[0] })
9742 }
9743
9744 pub fn f_all_dims(&self, dim: impl IntListOption, keepdim: bool) -> Result<Tensor, TchError> {
9745 let mut c_tensors = [std::ptr::null_mut(); 1];
9746 unsafe_torch_err!(atg_all_dims(
9747 c_tensors.as_mut_ptr(),
9748 self.c_tensor,
9749 dim.as_ptr(),
9750 dim.len_i32(),
9751 if keepdim { 1 } else { 0 }
9752 ));
9753 Ok(Tensor { c_tensor: c_tensors[0] })
9754 }
9755
9756 pub fn f_all_dims_out(
9757 &self,
9758 out: &Tensor,
9759 dim: impl IntListOption,
9760 keepdim: bool,
9761 ) -> Result<Tensor, TchError> {
9762 let mut c_tensors = [std::ptr::null_mut(); 1];
9763 unsafe_torch_err!(atg_all_dims_out(
9764 c_tensors.as_mut_ptr(),
9765 out.c_tensor,
9766 self.c_tensor,
9767 dim.as_ptr(),
9768 dim.len_i32(),
9769 if keepdim { 1 } else { 0 }
9770 ));
9771 Ok(Tensor { c_tensor: c_tensors[0] })
9772 }
9773
9774 pub fn f_all_out(&self, out: &Tensor, dim: i64, keepdim: bool) -> Result<Tensor, TchError> {
9775 let mut c_tensors = [std::ptr::null_mut(); 1];
9776 unsafe_torch_err!(atg_all_out(
9777 c_tensors.as_mut_ptr(),
9778 out.c_tensor,
9779 self.c_tensor,
9780 dim,
9781 if keepdim { 1 } else { 0 }
9782 ));
9783 Ok(Tensor { c_tensor: c_tensors[0] })
9784 }
9785
9786 pub fn f_allclose(
9787 &self,
9788 other: &Tensor,
9789 rtol: f64,
9790 atol: f64,
9791 equal_nan: bool,
9792 ) -> Result<bool, TchError> {
9793 let return_;
9794 unsafe_torch_err!(
9795 return_ = atg_allclose(
9796 self.c_tensor,
9797 other.c_tensor,
9798 rtol,
9799 atol,
9800 if equal_nan { 1 } else { 0 }
9801 )
9802 );
9803 Ok(return_ != 0)
9804 }
9805
9806 pub fn f_alpha_dropout(&self, p: f64, train: bool) -> Result<Tensor, TchError> {
9807 let mut c_tensors = [std::ptr::null_mut(); 1];
9808 unsafe_torch_err!(atg_alpha_dropout(
9809 c_tensors.as_mut_ptr(),
9810 self.c_tensor,
9811 p,
9812 if train { 1 } else { 0 }
9813 ));
9814 Ok(Tensor { c_tensor: c_tensors[0] })
9815 }
9816
9817 pub fn f_alpha_dropout_(&mut self, p: f64, train: bool) -> Result<Tensor, TchError> {
9818 let mut c_tensors = [std::ptr::null_mut(); 1];
9819 unsafe_torch_err!(atg_alpha_dropout_(
9820 c_tensors.as_mut_ptr(),
9821 self.c_tensor,
9822 p,
9823 if train { 1 } else { 0 }
9824 ));
9825 Ok(Tensor { c_tensor: c_tensors[0] })
9826 }
9827
9828 pub fn f_amax(&self, dim: impl IntList, keepdim: bool) -> Result<Tensor, TchError> {
9829 let mut c_tensors = [std::ptr::null_mut(); 1];
9830 unsafe_torch_err!(atg_amax(
9831 c_tensors.as_mut_ptr(),
9832 self.c_tensor,
9833 dim.as_ptr(),
9834 dim.len_i32(),
9835 if keepdim { 1 } else { 0 }
9836 ));
9837 Ok(Tensor { c_tensor: c_tensors[0] })
9838 }
9839
9840 pub fn f_amax_out(
9841 &self,
9842 out: &Tensor,
9843 dim: impl IntList,
9844 keepdim: bool,
9845 ) -> Result<Tensor, TchError> {
9846 let mut c_tensors = [std::ptr::null_mut(); 1];
9847 unsafe_torch_err!(atg_amax_out(
9848 c_tensors.as_mut_ptr(),
9849 out.c_tensor,
9850 self.c_tensor,
9851 dim.as_ptr(),
9852 dim.len_i32(),
9853 if keepdim { 1 } else { 0 }
9854 ));
9855 Ok(Tensor { c_tensor: c_tensors[0] })
9856 }
9857
9858 pub fn f_amin(&self, dim: impl IntList, keepdim: bool) -> Result<Tensor, TchError> {
9859 let mut c_tensors = [std::ptr::null_mut(); 1];
9860 unsafe_torch_err!(atg_amin(
9861 c_tensors.as_mut_ptr(),
9862 self.c_tensor,
9863 dim.as_ptr(),
9864 dim.len_i32(),
9865 if keepdim { 1 } else { 0 }
9866 ));
9867 Ok(Tensor { c_tensor: c_tensors[0] })
9868 }
9869
9870 pub fn f_amin_out(
9871 &self,
9872 out: &Tensor,
9873 dim: impl IntList,
9874 keepdim: bool,
9875 ) -> Result<Tensor, TchError> {
9876 let mut c_tensors = [std::ptr::null_mut(); 1];
9877 unsafe_torch_err!(atg_amin_out(
9878 c_tensors.as_mut_ptr(),
9879 out.c_tensor,
9880 self.c_tensor,
9881 dim.as_ptr(),
9882 dim.len_i32(),
9883 if keepdim { 1 } else { 0 }
9884 ));
9885 Ok(Tensor { c_tensor: c_tensors[0] })
9886 }
9887
9888 pub fn f_aminmax(
9889 &self,
9890 dim: impl Into<Option<i64>>,
9891 keepdim: bool,
9892 ) -> Result<(Tensor, Tensor), TchError> {
9893 let dim = dim.into();
9894 let mut c_tensors = [std::ptr::null_mut(); 2];
9895 unsafe_torch_err!(atg_aminmax(
9896 c_tensors.as_mut_ptr(),
9897 self.c_tensor,
9898 dim.unwrap_or(0i64),
9899 dim.is_none() as i8,
9900 if keepdim { 1 } else { 0 }
9901 ));
9902 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
9903 }
9904
9905 pub fn f_aminmax_out(
9906 &self,
9907 min: &Tensor,
9908 max: &Tensor,
9909 dim: impl Into<Option<i64>>,
9910 keepdim: bool,
9911 ) -> Result<(Tensor, Tensor), TchError> {
9912 let dim = dim.into();
9913 let mut c_tensors = [std::ptr::null_mut(); 2];
9914 unsafe_torch_err!(atg_aminmax_out(
9915 c_tensors.as_mut_ptr(),
9916 min.c_tensor,
9917 max.c_tensor,
9918 self.c_tensor,
9919 dim.unwrap_or(0i64),
9920 dim.is_none() as i8,
9921 if keepdim { 1 } else { 0 }
9922 ));
9923 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
9924 }
9925
9926 pub fn f_angle(&self) -> Result<Tensor, TchError> {
9927 let mut c_tensors = [std::ptr::null_mut(); 1];
9928 unsafe_torch_err!(atg_angle(c_tensors.as_mut_ptr(), self.c_tensor));
9929 Ok(Tensor { c_tensor: c_tensors[0] })
9930 }
9931
9932 pub fn f_angle_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
9933 let mut c_tensors = [std::ptr::null_mut(); 1];
9934 unsafe_torch_err!(atg_angle_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
9935 Ok(Tensor { c_tensor: c_tensors[0] })
9936 }
9937
9938 pub fn f_any(&self) -> Result<Tensor, TchError> {
9939 let mut c_tensors = [std::ptr::null_mut(); 1];
9940 unsafe_torch_err!(atg_any(c_tensors.as_mut_ptr(), self.c_tensor));
9941 Ok(Tensor { c_tensor: c_tensors[0] })
9942 }
9943
9944 pub fn f_any_all_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
9945 let mut c_tensors = [std::ptr::null_mut(); 1];
9946 unsafe_torch_err!(atg_any_all_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
9947 Ok(Tensor { c_tensor: c_tensors[0] })
9948 }
9949
9950 pub fn f_any_dim(&self, dim: i64, keepdim: bool) -> Result<Tensor, TchError> {
9951 let mut c_tensors = [std::ptr::null_mut(); 1];
9952 unsafe_torch_err!(atg_any_dim(
9953 c_tensors.as_mut_ptr(),
9954 self.c_tensor,
9955 dim,
9956 if keepdim { 1 } else { 0 }
9957 ));
9958 Ok(Tensor { c_tensor: c_tensors[0] })
9959 }
9960
9961 pub fn f_any_dims(&self, dim: impl IntListOption, keepdim: bool) -> Result<Tensor, TchError> {
9962 let mut c_tensors = [std::ptr::null_mut(); 1];
9963 unsafe_torch_err!(atg_any_dims(
9964 c_tensors.as_mut_ptr(),
9965 self.c_tensor,
9966 dim.as_ptr(),
9967 dim.len_i32(),
9968 if keepdim { 1 } else { 0 }
9969 ));
9970 Ok(Tensor { c_tensor: c_tensors[0] })
9971 }
9972
9973 pub fn f_any_dims_out(
9974 &self,
9975 out: &Tensor,
9976 dim: impl IntListOption,
9977 keepdim: bool,
9978 ) -> Result<Tensor, TchError> {
9979 let mut c_tensors = [std::ptr::null_mut(); 1];
9980 unsafe_torch_err!(atg_any_dims_out(
9981 c_tensors.as_mut_ptr(),
9982 out.c_tensor,
9983 self.c_tensor,
9984 dim.as_ptr(),
9985 dim.len_i32(),
9986 if keepdim { 1 } else { 0 }
9987 ));
9988 Ok(Tensor { c_tensor: c_tensors[0] })
9989 }
9990
9991 pub fn f_any_out(&self, out: &Tensor, dim: i64, keepdim: bool) -> Result<Tensor, TchError> {
9992 let mut c_tensors = [std::ptr::null_mut(); 1];
9993 unsafe_torch_err!(atg_any_out(
9994 c_tensors.as_mut_ptr(),
9995 out.c_tensor,
9996 self.c_tensor,
9997 dim,
9998 if keepdim { 1 } else { 0 }
9999 ));
10000 Ok(Tensor { c_tensor: c_tensors[0] })
10001 }
10002
10003 pub fn f_arange<S: Into<Scalar>>(end: S, options: (Kind, Device)) -> Result<Tensor, TchError> {
10004 let mut c_tensors = [std::ptr::null_mut(); 1];
10005 unsafe_torch_err!(atg_arange(
10006 c_tensors.as_mut_ptr(),
10007 end.into().c_scalar,
10008 options.0.c_int(),
10009 options.1.c_int()
10010 ));
10011 Ok(Tensor { c_tensor: c_tensors[0] })
10012 }
10013
10014 pub fn f_arange_start<S: Into<Scalar>>(
10015 start: S,
10016 end: S,
10017 options: (Kind, Device),
10018 ) -> Result<Tensor, TchError> {
10019 let mut c_tensors = [std::ptr::null_mut(); 1];
10020 unsafe_torch_err!(atg_arange_start(
10021 c_tensors.as_mut_ptr(),
10022 start.into().c_scalar,
10023 end.into().c_scalar,
10024 options.0.c_int(),
10025 options.1.c_int()
10026 ));
10027 Ok(Tensor { c_tensor: c_tensors[0] })
10028 }
10029
10030 pub fn f_arange_start_step<S: Into<Scalar>>(
10031 start: S,
10032 end: S,
10033 step: S,
10034 options: (Kind, Device),
10035 ) -> Result<Tensor, TchError> {
10036 let mut c_tensors = [std::ptr::null_mut(); 1];
10037 unsafe_torch_err!(atg_arange_start_step(
10038 c_tensors.as_mut_ptr(),
10039 start.into().c_scalar,
10040 end.into().c_scalar,
10041 step.into().c_scalar,
10042 options.0.c_int(),
10043 options.1.c_int()
10044 ));
10045 Ok(Tensor { c_tensor: c_tensors[0] })
10046 }
10047
10048 pub fn f_arccos(&self) -> Result<Tensor, TchError> {
10049 let mut c_tensors = [std::ptr::null_mut(); 1];
10050 unsafe_torch_err!(atg_arccos(c_tensors.as_mut_ptr(), self.c_tensor));
10051 Ok(Tensor { c_tensor: c_tensors[0] })
10052 }
10053
10054 pub fn f_arccos_(&mut self) -> Result<Tensor, TchError> {
10055 let mut c_tensors = [std::ptr::null_mut(); 1];
10056 unsafe_torch_err!(atg_arccos_(c_tensors.as_mut_ptr(), self.c_tensor));
10057 Ok(Tensor { c_tensor: c_tensors[0] })
10058 }
10059
10060 pub fn f_arccos_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
10061 let mut c_tensors = [std::ptr::null_mut(); 1];
10062 unsafe_torch_err!(atg_arccos_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
10063 Ok(Tensor { c_tensor: c_tensors[0] })
10064 }
10065
10066 pub fn f_arccosh(&self) -> Result<Tensor, TchError> {
10067 let mut c_tensors = [std::ptr::null_mut(); 1];
10068 unsafe_torch_err!(atg_arccosh(c_tensors.as_mut_ptr(), self.c_tensor));
10069 Ok(Tensor { c_tensor: c_tensors[0] })
10070 }
10071
10072 pub fn f_arccosh_(&mut self) -> Result<Tensor, TchError> {
10073 let mut c_tensors = [std::ptr::null_mut(); 1];
10074 unsafe_torch_err!(atg_arccosh_(c_tensors.as_mut_ptr(), self.c_tensor));
10075 Ok(Tensor { c_tensor: c_tensors[0] })
10076 }
10077
10078 pub fn f_arccosh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
10079 let mut c_tensors = [std::ptr::null_mut(); 1];
10080 unsafe_torch_err!(atg_arccosh_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
10081 Ok(Tensor { c_tensor: c_tensors[0] })
10082 }
10083
10084 pub fn f_arcsin(&self) -> Result<Tensor, TchError> {
10085 let mut c_tensors = [std::ptr::null_mut(); 1];
10086 unsafe_torch_err!(atg_arcsin(c_tensors.as_mut_ptr(), self.c_tensor));
10087 Ok(Tensor { c_tensor: c_tensors[0] })
10088 }
10089
10090 pub fn f_arcsin_(&mut self) -> Result<Tensor, TchError> {
10091 let mut c_tensors = [std::ptr::null_mut(); 1];
10092 unsafe_torch_err!(atg_arcsin_(c_tensors.as_mut_ptr(), self.c_tensor));
10093 Ok(Tensor { c_tensor: c_tensors[0] })
10094 }
10095
10096 pub fn f_arcsin_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
10097 let mut c_tensors = [std::ptr::null_mut(); 1];
10098 unsafe_torch_err!(atg_arcsin_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
10099 Ok(Tensor { c_tensor: c_tensors[0] })
10100 }
10101
10102 pub fn f_arcsinh(&self) -> Result<Tensor, TchError> {
10103 let mut c_tensors = [std::ptr::null_mut(); 1];
10104 unsafe_torch_err!(atg_arcsinh(c_tensors.as_mut_ptr(), self.c_tensor));
10105 Ok(Tensor { c_tensor: c_tensors[0] })
10106 }
10107
10108 pub fn f_arcsinh_(&mut self) -> Result<Tensor, TchError> {
10109 let mut c_tensors = [std::ptr::null_mut(); 1];
10110 unsafe_torch_err!(atg_arcsinh_(c_tensors.as_mut_ptr(), self.c_tensor));
10111 Ok(Tensor { c_tensor: c_tensors[0] })
10112 }
10113
10114 pub fn f_arcsinh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
10115 let mut c_tensors = [std::ptr::null_mut(); 1];
10116 unsafe_torch_err!(atg_arcsinh_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
10117 Ok(Tensor { c_tensor: c_tensors[0] })
10118 }
10119
10120 pub fn f_arctan(&self) -> Result<Tensor, TchError> {
10121 let mut c_tensors = [std::ptr::null_mut(); 1];
10122 unsafe_torch_err!(atg_arctan(c_tensors.as_mut_ptr(), self.c_tensor));
10123 Ok(Tensor { c_tensor: c_tensors[0] })
10124 }
10125
10126 pub fn f_arctan2(&self, other: &Tensor) -> Result<Tensor, TchError> {
10127 let mut c_tensors = [std::ptr::null_mut(); 1];
10128 unsafe_torch_err!(atg_arctan2(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
10129 Ok(Tensor { c_tensor: c_tensors[0] })
10130 }
10131
10132 pub fn f_arctan2_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
10133 let mut c_tensors = [std::ptr::null_mut(); 1];
10134 unsafe_torch_err!(atg_arctan2_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
10135 Ok(Tensor { c_tensor: c_tensors[0] })
10136 }
10137
10138 pub fn f_arctan2_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
10139 let mut c_tensors = [std::ptr::null_mut(); 1];
10140 unsafe_torch_err!(atg_arctan2_out(
10141 c_tensors.as_mut_ptr(),
10142 out.c_tensor,
10143 self.c_tensor,
10144 other.c_tensor
10145 ));
10146 Ok(Tensor { c_tensor: c_tensors[0] })
10147 }
10148
10149 pub fn f_arctan_(&mut self) -> Result<Tensor, TchError> {
10150 let mut c_tensors = [std::ptr::null_mut(); 1];
10151 unsafe_torch_err!(atg_arctan_(c_tensors.as_mut_ptr(), self.c_tensor));
10152 Ok(Tensor { c_tensor: c_tensors[0] })
10153 }
10154
10155 pub fn f_arctan_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
10156 let mut c_tensors = [std::ptr::null_mut(); 1];
10157 unsafe_torch_err!(atg_arctan_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
10158 Ok(Tensor { c_tensor: c_tensors[0] })
10159 }
10160
10161 pub fn f_arctanh(&self) -> Result<Tensor, TchError> {
10162 let mut c_tensors = [std::ptr::null_mut(); 1];
10163 unsafe_torch_err!(atg_arctanh(c_tensors.as_mut_ptr(), self.c_tensor));
10164 Ok(Tensor { c_tensor: c_tensors[0] })
10165 }
10166
10167 pub fn f_arctanh_(&mut self) -> Result<Tensor, TchError> {
10168 let mut c_tensors = [std::ptr::null_mut(); 1];
10169 unsafe_torch_err!(atg_arctanh_(c_tensors.as_mut_ptr(), self.c_tensor));
10170 Ok(Tensor { c_tensor: c_tensors[0] })
10171 }
10172
10173 pub fn f_arctanh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
10174 let mut c_tensors = [std::ptr::null_mut(); 1];
10175 unsafe_torch_err!(atg_arctanh_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
10176 Ok(Tensor { c_tensor: c_tensors[0] })
10177 }
10178
10179 pub fn f_argmax(&self, dim: impl Into<Option<i64>>, keepdim: bool) -> Result<Tensor, TchError> {
10180 let dim = dim.into();
10181 let mut c_tensors = [std::ptr::null_mut(); 1];
10182 unsafe_torch_err!(atg_argmax(
10183 c_tensors.as_mut_ptr(),
10184 self.c_tensor,
10185 dim.unwrap_or(0i64),
10186 dim.is_none() as i8,
10187 if keepdim { 1 } else { 0 }
10188 ));
10189 Ok(Tensor { c_tensor: c_tensors[0] })
10190 }
10191
10192 pub fn f_argmax_out(
10193 &self,
10194 out: &Tensor,
10195 dim: impl Into<Option<i64>>,
10196 keepdim: bool,
10197 ) -> Result<Tensor, TchError> {
10198 let dim = dim.into();
10199 let mut c_tensors = [std::ptr::null_mut(); 1];
10200 unsafe_torch_err!(atg_argmax_out(
10201 c_tensors.as_mut_ptr(),
10202 out.c_tensor,
10203 self.c_tensor,
10204 dim.unwrap_or(0i64),
10205 dim.is_none() as i8,
10206 if keepdim { 1 } else { 0 }
10207 ));
10208 Ok(Tensor { c_tensor: c_tensors[0] })
10209 }
10210
10211 pub fn f_argmin(&self, dim: impl Into<Option<i64>>, keepdim: bool) -> Result<Tensor, TchError> {
10212 let dim = dim.into();
10213 let mut c_tensors = [std::ptr::null_mut(); 1];
10214 unsafe_torch_err!(atg_argmin(
10215 c_tensors.as_mut_ptr(),
10216 self.c_tensor,
10217 dim.unwrap_or(0i64),
10218 dim.is_none() as i8,
10219 if keepdim { 1 } else { 0 }
10220 ));
10221 Ok(Tensor { c_tensor: c_tensors[0] })
10222 }
10223
10224 pub fn f_argmin_out(
10225 &self,
10226 out: &Tensor,
10227 dim: impl Into<Option<i64>>,
10228 keepdim: bool,
10229 ) -> Result<Tensor, TchError> {
10230 let dim = dim.into();
10231 let mut c_tensors = [std::ptr::null_mut(); 1];
10232 unsafe_torch_err!(atg_argmin_out(
10233 c_tensors.as_mut_ptr(),
10234 out.c_tensor,
10235 self.c_tensor,
10236 dim.unwrap_or(0i64),
10237 dim.is_none() as i8,
10238 if keepdim { 1 } else { 0 }
10239 ));
10240 Ok(Tensor { c_tensor: c_tensors[0] })
10241 }
10242
10243 pub fn f_argsort(&self, dim: i64, descending: bool) -> Result<Tensor, TchError> {
10244 let mut c_tensors = [std::ptr::null_mut(); 1];
10245 unsafe_torch_err!(atg_argsort(
10246 c_tensors.as_mut_ptr(),
10247 self.c_tensor,
10248 dim,
10249 if descending { 1 } else { 0 }
10250 ));
10251 Ok(Tensor { c_tensor: c_tensors[0] })
10252 }
10253
10254 pub fn f_argsort_stable(
10255 &self,
10256 stable: bool,
10257 dim: i64,
10258 descending: bool,
10259 ) -> Result<Tensor, TchError> {
10260 let mut c_tensors = [std::ptr::null_mut(); 1];
10261 unsafe_torch_err!(atg_argsort_stable(
10262 c_tensors.as_mut_ptr(),
10263 self.c_tensor,
10264 if stable { 1 } else { 0 },
10265 dim,
10266 if descending { 1 } else { 0 }
10267 ));
10268 Ok(Tensor { c_tensor: c_tensors[0] })
10269 }
10270
10271 pub fn f_argsort_stable_out(
10272 &self,
10273 out: &Tensor,
10274 stable: bool,
10275 dim: i64,
10276 descending: bool,
10277 ) -> Result<Tensor, TchError> {
10278 let mut c_tensors = [std::ptr::null_mut(); 1];
10279 unsafe_torch_err!(atg_argsort_stable_out(
10280 c_tensors.as_mut_ptr(),
10281 out.c_tensor,
10282 self.c_tensor,
10283 if stable { 1 } else { 0 },
10284 dim,
10285 if descending { 1 } else { 0 }
10286 ));
10287 Ok(Tensor { c_tensor: c_tensors[0] })
10288 }
10289
10290 pub fn f_argwhere(&self) -> Result<Tensor, TchError> {
10291 let mut c_tensors = [std::ptr::null_mut(); 1];
10292 unsafe_torch_err!(atg_argwhere(c_tensors.as_mut_ptr(), self.c_tensor));
10293 Ok(Tensor { c_tensor: c_tensors[0] })
10294 }
10295
10296 pub fn f_as_strided(
10297 &self,
10298 size: impl IntList,
10299 stride: impl IntList,
10300 storage_offset: impl Into<Option<i64>>,
10301 ) -> Result<Tensor, TchError> {
10302 let storage_offset = storage_offset.into();
10303 let mut c_tensors = [std::ptr::null_mut(); 1];
10304 unsafe_torch_err!(atg_as_strided(
10305 c_tensors.as_mut_ptr(),
10306 self.c_tensor,
10307 size.as_ptr(),
10308 size.len_i32(),
10309 stride.as_ptr(),
10310 stride.len_i32(),
10311 storage_offset.unwrap_or(0i64),
10312 storage_offset.is_none() as i8
10313 ));
10314 Ok(Tensor { c_tensor: c_tensors[0] })
10315 }
10316
10317 pub fn f_as_strided_(
10318 &mut self,
10319 size: impl IntList,
10320 stride: impl IntList,
10321 storage_offset: impl Into<Option<i64>>,
10322 ) -> Result<Tensor, TchError> {
10323 let storage_offset = storage_offset.into();
10324 let mut c_tensors = [std::ptr::null_mut(); 1];
10325 unsafe_torch_err!(atg_as_strided_(
10326 c_tensors.as_mut_ptr(),
10327 self.c_tensor,
10328 size.as_ptr(),
10329 size.len_i32(),
10330 stride.as_ptr(),
10331 stride.len_i32(),
10332 storage_offset.unwrap_or(0i64),
10333 storage_offset.is_none() as i8
10334 ));
10335 Ok(Tensor { c_tensor: c_tensors[0] })
10336 }
10337
10338 pub fn f_as_strided_copy(
10339 &self,
10340 size: impl IntList,
10341 stride: impl IntList,
10342 storage_offset: impl Into<Option<i64>>,
10343 ) -> Result<Tensor, TchError> {
10344 let storage_offset = storage_offset.into();
10345 let mut c_tensors = [std::ptr::null_mut(); 1];
10346 unsafe_torch_err!(atg_as_strided_copy(
10347 c_tensors.as_mut_ptr(),
10348 self.c_tensor,
10349 size.as_ptr(),
10350 size.len_i32(),
10351 stride.as_ptr(),
10352 stride.len_i32(),
10353 storage_offset.unwrap_or(0i64),
10354 storage_offset.is_none() as i8
10355 ));
10356 Ok(Tensor { c_tensor: c_tensors[0] })
10357 }
10358
10359 pub fn f_as_strided_copy_out(
10360 &self,
10361 out: &Tensor,
10362 size: impl IntList,
10363 stride: impl IntList,
10364 storage_offset: impl Into<Option<i64>>,
10365 ) -> Result<Tensor, TchError> {
10366 let storage_offset = storage_offset.into();
10367 let mut c_tensors = [std::ptr::null_mut(); 1];
10368 unsafe_torch_err!(atg_as_strided_copy_out(
10369 c_tensors.as_mut_ptr(),
10370 out.c_tensor,
10371 self.c_tensor,
10372 size.as_ptr(),
10373 size.len_i32(),
10374 stride.as_ptr(),
10375 stride.len_i32(),
10376 storage_offset.unwrap_or(0i64),
10377 storage_offset.is_none() as i8
10378 ));
10379 Ok(Tensor { c_tensor: c_tensors[0] })
10380 }
10381
10382 pub fn f_as_strided_scatter(
10383 &self,
10384 src: &Tensor,
10385 size: impl IntList,
10386 stride: impl IntList,
10387 storage_offset: impl Into<Option<i64>>,
10388 ) -> Result<Tensor, TchError> {
10389 let storage_offset = storage_offset.into();
10390 let mut c_tensors = [std::ptr::null_mut(); 1];
10391 unsafe_torch_err!(atg_as_strided_scatter(
10392 c_tensors.as_mut_ptr(),
10393 self.c_tensor,
10394 src.c_tensor,
10395 size.as_ptr(),
10396 size.len_i32(),
10397 stride.as_ptr(),
10398 stride.len_i32(),
10399 storage_offset.unwrap_or(0i64),
10400 storage_offset.is_none() as i8
10401 ));
10402 Ok(Tensor { c_tensor: c_tensors[0] })
10403 }
10404
10405 pub fn f_as_strided_scatter_out(
10406 &self,
10407 out: &Tensor,
10408 src: &Tensor,
10409 size: impl IntList,
10410 stride: impl IntList,
10411 storage_offset: impl Into<Option<i64>>,
10412 ) -> Result<Tensor, TchError> {
10413 let storage_offset = storage_offset.into();
10414 let mut c_tensors = [std::ptr::null_mut(); 1];
10415 unsafe_torch_err!(atg_as_strided_scatter_out(
10416 c_tensors.as_mut_ptr(),
10417 out.c_tensor,
10418 self.c_tensor,
10419 src.c_tensor,
10420 size.as_ptr(),
10421 size.len_i32(),
10422 stride.as_ptr(),
10423 stride.len_i32(),
10424 storage_offset.unwrap_or(0i64),
10425 storage_offset.is_none() as i8
10426 ));
10427 Ok(Tensor { c_tensor: c_tensors[0] })
10428 }
10429
10430 pub fn f_asin(&self) -> Result<Tensor, TchError> {
10431 let mut c_tensors = [std::ptr::null_mut(); 1];
10432 unsafe_torch_err!(atg_asin(c_tensors.as_mut_ptr(), self.c_tensor));
10433 Ok(Tensor { c_tensor: c_tensors[0] })
10434 }
10435
10436 pub fn f_asin_(&mut self) -> Result<Tensor, TchError> {
10437 let mut c_tensors = [std::ptr::null_mut(); 1];
10438 unsafe_torch_err!(atg_asin_(c_tensors.as_mut_ptr(), self.c_tensor));
10439 Ok(Tensor { c_tensor: c_tensors[0] })
10440 }
10441
10442 pub fn f_asin_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
10443 let mut c_tensors = [std::ptr::null_mut(); 1];
10444 unsafe_torch_err!(atg_asin_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
10445 Ok(Tensor { c_tensor: c_tensors[0] })
10446 }
10447
10448 pub fn f_asinh(&self) -> Result<Tensor, TchError> {
10449 let mut c_tensors = [std::ptr::null_mut(); 1];
10450 unsafe_torch_err!(atg_asinh(c_tensors.as_mut_ptr(), self.c_tensor));
10451 Ok(Tensor { c_tensor: c_tensors[0] })
10452 }
10453
10454 pub fn f_asinh_(&mut self) -> Result<Tensor, TchError> {
10455 let mut c_tensors = [std::ptr::null_mut(); 1];
10456 unsafe_torch_err!(atg_asinh_(c_tensors.as_mut_ptr(), self.c_tensor));
10457 Ok(Tensor { c_tensor: c_tensors[0] })
10458 }
10459
10460 pub fn f_asinh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
10461 let mut c_tensors = [std::ptr::null_mut(); 1];
10462 unsafe_torch_err!(atg_asinh_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
10463 Ok(Tensor { c_tensor: c_tensors[0] })
10464 }
10465
10466 pub fn f_atan(&self) -> Result<Tensor, TchError> {
10467 let mut c_tensors = [std::ptr::null_mut(); 1];
10468 unsafe_torch_err!(atg_atan(c_tensors.as_mut_ptr(), self.c_tensor));
10469 Ok(Tensor { c_tensor: c_tensors[0] })
10470 }
10471
10472 pub fn f_atan2(&self, other: &Tensor) -> Result<Tensor, TchError> {
10473 let mut c_tensors = [std::ptr::null_mut(); 1];
10474 unsafe_torch_err!(atg_atan2(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
10475 Ok(Tensor { c_tensor: c_tensors[0] })
10476 }
10477
10478 pub fn f_atan2_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
10479 let mut c_tensors = [std::ptr::null_mut(); 1];
10480 unsafe_torch_err!(atg_atan2_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
10481 Ok(Tensor { c_tensor: c_tensors[0] })
10482 }
10483
10484 pub fn f_atan2_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
10485 let mut c_tensors = [std::ptr::null_mut(); 1];
10486 unsafe_torch_err!(atg_atan2_out(
10487 c_tensors.as_mut_ptr(),
10488 out.c_tensor,
10489 self.c_tensor,
10490 other.c_tensor
10491 ));
10492 Ok(Tensor { c_tensor: c_tensors[0] })
10493 }
10494
10495 pub fn f_atan_(&mut self) -> Result<Tensor, TchError> {
10496 let mut c_tensors = [std::ptr::null_mut(); 1];
10497 unsafe_torch_err!(atg_atan_(c_tensors.as_mut_ptr(), self.c_tensor));
10498 Ok(Tensor { c_tensor: c_tensors[0] })
10499 }
10500
10501 pub fn f_atan_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
10502 let mut c_tensors = [std::ptr::null_mut(); 1];
10503 unsafe_torch_err!(atg_atan_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
10504 Ok(Tensor { c_tensor: c_tensors[0] })
10505 }
10506
10507 pub fn f_atanh(&self) -> Result<Tensor, TchError> {
10508 let mut c_tensors = [std::ptr::null_mut(); 1];
10509 unsafe_torch_err!(atg_atanh(c_tensors.as_mut_ptr(), self.c_tensor));
10510 Ok(Tensor { c_tensor: c_tensors[0] })
10511 }
10512
10513 pub fn f_atanh_(&mut self) -> Result<Tensor, TchError> {
10514 let mut c_tensors = [std::ptr::null_mut(); 1];
10515 unsafe_torch_err!(atg_atanh_(c_tensors.as_mut_ptr(), self.c_tensor));
10516 Ok(Tensor { c_tensor: c_tensors[0] })
10517 }
10518
10519 pub fn f_atanh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
10520 let mut c_tensors = [std::ptr::null_mut(); 1];
10521 unsafe_torch_err!(atg_atanh_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
10522 Ok(Tensor { c_tensor: c_tensors[0] })
10523 }
10524
10525 pub fn f_atleast_1d(&self) -> Result<Tensor, TchError> {
10526 let mut c_tensors = [std::ptr::null_mut(); 1];
10527 unsafe_torch_err!(atg_atleast_1d(c_tensors.as_mut_ptr(), self.c_tensor));
10528 Ok(Tensor { c_tensor: c_tensors[0] })
10529 }
10530
10531 pub fn f_atleast_1d_sequence<T: Borrow<Tensor>>(
10532 tensors: &[T],
10533 ) -> Result<Vec<Tensor>, TchError> {
10534 let c_tensors = unsafe_torch_err!(atg_atleast_1d_sequence(
10535 ptr_list(tensors).as_ptr(),
10536 tensors.len() as i32
10537 ));
10538 let mut r__ = vec![];
10539 let mut i = 0;
10540 loop {
10541 let c__ = unsafe { *c_tensors.add(i) };
10542 if c__.is_null() {
10543 break;
10544 }
10545 r__.push(Tensor { c_tensor: c__ });
10546 i += 1;
10547 }
10548 unsafe { libc::free(c_tensors as *mut libc::c_void) }
10549 Ok(r__)
10550 }
10551
10552 pub fn f_atleast_2d(&self) -> Result<Tensor, TchError> {
10553 let mut c_tensors = [std::ptr::null_mut(); 1];
10554 unsafe_torch_err!(atg_atleast_2d(c_tensors.as_mut_ptr(), self.c_tensor));
10555 Ok(Tensor { c_tensor: c_tensors[0] })
10556 }
10557
10558 pub fn f_atleast_2d_sequence<T: Borrow<Tensor>>(
10559 tensors: &[T],
10560 ) -> Result<Vec<Tensor>, TchError> {
10561 let c_tensors = unsafe_torch_err!(atg_atleast_2d_sequence(
10562 ptr_list(tensors).as_ptr(),
10563 tensors.len() as i32
10564 ));
10565 let mut r__ = vec![];
10566 let mut i = 0;
10567 loop {
10568 let c__ = unsafe { *c_tensors.add(i) };
10569 if c__.is_null() {
10570 break;
10571 }
10572 r__.push(Tensor { c_tensor: c__ });
10573 i += 1;
10574 }
10575 unsafe { libc::free(c_tensors as *mut libc::c_void) }
10576 Ok(r__)
10577 }
10578
10579 pub fn f_atleast_3d(&self) -> Result<Tensor, TchError> {
10580 let mut c_tensors = [std::ptr::null_mut(); 1];
10581 unsafe_torch_err!(atg_atleast_3d(c_tensors.as_mut_ptr(), self.c_tensor));
10582 Ok(Tensor { c_tensor: c_tensors[0] })
10583 }
10584
10585 pub fn f_atleast_3d_sequence<T: Borrow<Tensor>>(
10586 tensors: &[T],
10587 ) -> Result<Vec<Tensor>, TchError> {
10588 let c_tensors = unsafe_torch_err!(atg_atleast_3d_sequence(
10589 ptr_list(tensors).as_ptr(),
10590 tensors.len() as i32
10591 ));
10592 let mut r__ = vec![];
10593 let mut i = 0;
10594 loop {
10595 let c__ = unsafe { *c_tensors.add(i) };
10596 if c__.is_null() {
10597 break;
10598 }
10599 r__.push(Tensor { c_tensor: c__ });
10600 i += 1;
10601 }
10602 unsafe { libc::free(c_tensors as *mut libc::c_void) }
10603 Ok(r__)
10604 }
10605
10606 pub fn f_avg_pool1d(
10607 &self,
10608 kernel_size: impl IntList,
10609 stride: impl IntList,
10610 padding: impl IntList,
10611 ceil_mode: bool,
10612 count_include_pad: bool,
10613 ) -> Result<Tensor, TchError> {
10614 let mut c_tensors = [std::ptr::null_mut(); 1];
10615 unsafe_torch_err!(atg_avg_pool1d(
10616 c_tensors.as_mut_ptr(),
10617 self.c_tensor,
10618 kernel_size.as_ptr(),
10619 kernel_size.len_i32(),
10620 stride.as_ptr(),
10621 stride.len_i32(),
10622 padding.as_ptr(),
10623 padding.len_i32(),
10624 if ceil_mode { 1 } else { 0 },
10625 if count_include_pad { 1 } else { 0 }
10626 ));
10627 Ok(Tensor { c_tensor: c_tensors[0] })
10628 }
10629
10630 pub fn f_avg_pool2d(
10631 &self,
10632 kernel_size: impl IntList,
10633 stride: impl IntList,
10634 padding: impl IntList,
10635 ceil_mode: bool,
10636 count_include_pad: bool,
10637 divisor_override: impl Into<Option<i64>>,
10638 ) -> Result<Tensor, TchError> {
10639 let divisor_override = divisor_override.into();
10640 let mut c_tensors = [std::ptr::null_mut(); 1];
10641 unsafe_torch_err!(atg_avg_pool2d(
10642 c_tensors.as_mut_ptr(),
10643 self.c_tensor,
10644 kernel_size.as_ptr(),
10645 kernel_size.len_i32(),
10646 stride.as_ptr(),
10647 stride.len_i32(),
10648 padding.as_ptr(),
10649 padding.len_i32(),
10650 if ceil_mode { 1 } else { 0 },
10651 if count_include_pad { 1 } else { 0 },
10652 divisor_override.unwrap_or(0i64),
10653 divisor_override.is_none() as i8
10654 ));
10655 Ok(Tensor { c_tensor: c_tensors[0] })
10656 }
10657
10658 pub fn f_avg_pool2d_backward(
10659 &self,
10660 grad_output: &Tensor,
10661 kernel_size: impl IntList,
10662 stride: impl IntList,
10663 padding: impl IntList,
10664 ceil_mode: bool,
10665 count_include_pad: bool,
10666 divisor_override: impl Into<Option<i64>>,
10667 ) -> Result<Tensor, TchError> {
10668 let divisor_override = divisor_override.into();
10669 let mut c_tensors = [std::ptr::null_mut(); 1];
10670 unsafe_torch_err!(atg_avg_pool2d_backward(
10671 c_tensors.as_mut_ptr(),
10672 grad_output.c_tensor,
10673 self.c_tensor,
10674 kernel_size.as_ptr(),
10675 kernel_size.len_i32(),
10676 stride.as_ptr(),
10677 stride.len_i32(),
10678 padding.as_ptr(),
10679 padding.len_i32(),
10680 if ceil_mode { 1 } else { 0 },
10681 if count_include_pad { 1 } else { 0 },
10682 divisor_override.unwrap_or(0i64),
10683 divisor_override.is_none() as i8
10684 ));
10685 Ok(Tensor { c_tensor: c_tensors[0] })
10686 }
10687
10688 pub fn f_avg_pool2d_backward_grad_input(
10689 &self,
10690 grad_input: &Tensor,
10691 grad_output: &Tensor,
10692 kernel_size: impl IntList,
10693 stride: impl IntList,
10694 padding: impl IntList,
10695 ceil_mode: bool,
10696 count_include_pad: bool,
10697 divisor_override: impl Into<Option<i64>>,
10698 ) -> Result<Tensor, TchError> {
10699 let divisor_override = divisor_override.into();
10700 let mut c_tensors = [std::ptr::null_mut(); 1];
10701 unsafe_torch_err!(atg_avg_pool2d_backward_grad_input(
10702 c_tensors.as_mut_ptr(),
10703 grad_input.c_tensor,
10704 grad_output.c_tensor,
10705 self.c_tensor,
10706 kernel_size.as_ptr(),
10707 kernel_size.len_i32(),
10708 stride.as_ptr(),
10709 stride.len_i32(),
10710 padding.as_ptr(),
10711 padding.len_i32(),
10712 if ceil_mode { 1 } else { 0 },
10713 if count_include_pad { 1 } else { 0 },
10714 divisor_override.unwrap_or(0i64),
10715 divisor_override.is_none() as i8
10716 ));
10717 Ok(Tensor { c_tensor: c_tensors[0] })
10718 }
10719
10720 pub fn f_avg_pool2d_out(
10721 &self,
10722 out: &Tensor,
10723 kernel_size: impl IntList,
10724 stride: impl IntList,
10725 padding: impl IntList,
10726 ceil_mode: bool,
10727 count_include_pad: bool,
10728 divisor_override: impl Into<Option<i64>>,
10729 ) -> Result<Tensor, TchError> {
10730 let divisor_override = divisor_override.into();
10731 let mut c_tensors = [std::ptr::null_mut(); 1];
10732 unsafe_torch_err!(atg_avg_pool2d_out(
10733 c_tensors.as_mut_ptr(),
10734 out.c_tensor,
10735 self.c_tensor,
10736 kernel_size.as_ptr(),
10737 kernel_size.len_i32(),
10738 stride.as_ptr(),
10739 stride.len_i32(),
10740 padding.as_ptr(),
10741 padding.len_i32(),
10742 if ceil_mode { 1 } else { 0 },
10743 if count_include_pad { 1 } else { 0 },
10744 divisor_override.unwrap_or(0i64),
10745 divisor_override.is_none() as i8
10746 ));
10747 Ok(Tensor { c_tensor: c_tensors[0] })
10748 }
10749
10750 pub fn f_avg_pool3d(
10751 &self,
10752 kernel_size: impl IntList,
10753 stride: impl IntList,
10754 padding: impl IntList,
10755 ceil_mode: bool,
10756 count_include_pad: bool,
10757 divisor_override: impl Into<Option<i64>>,
10758 ) -> Result<Tensor, TchError> {
10759 let divisor_override = divisor_override.into();
10760 let mut c_tensors = [std::ptr::null_mut(); 1];
10761 unsafe_torch_err!(atg_avg_pool3d(
10762 c_tensors.as_mut_ptr(),
10763 self.c_tensor,
10764 kernel_size.as_ptr(),
10765 kernel_size.len_i32(),
10766 stride.as_ptr(),
10767 stride.len_i32(),
10768 padding.as_ptr(),
10769 padding.len_i32(),
10770 if ceil_mode { 1 } else { 0 },
10771 if count_include_pad { 1 } else { 0 },
10772 divisor_override.unwrap_or(0i64),
10773 divisor_override.is_none() as i8
10774 ));
10775 Ok(Tensor { c_tensor: c_tensors[0] })
10776 }
10777
10778 pub fn f_avg_pool3d_backward(
10779 &self,
10780 grad_output: &Tensor,
10781 kernel_size: impl IntList,
10782 stride: impl IntList,
10783 padding: impl IntList,
10784 ceil_mode: bool,
10785 count_include_pad: bool,
10786 divisor_override: impl Into<Option<i64>>,
10787 ) -> Result<Tensor, TchError> {
10788 let divisor_override = divisor_override.into();
10789 let mut c_tensors = [std::ptr::null_mut(); 1];
10790 unsafe_torch_err!(atg_avg_pool3d_backward(
10791 c_tensors.as_mut_ptr(),
10792 grad_output.c_tensor,
10793 self.c_tensor,
10794 kernel_size.as_ptr(),
10795 kernel_size.len_i32(),
10796 stride.as_ptr(),
10797 stride.len_i32(),
10798 padding.as_ptr(),
10799 padding.len_i32(),
10800 if ceil_mode { 1 } else { 0 },
10801 if count_include_pad { 1 } else { 0 },
10802 divisor_override.unwrap_or(0i64),
10803 divisor_override.is_none() as i8
10804 ));
10805 Ok(Tensor { c_tensor: c_tensors[0] })
10806 }
10807
10808 pub fn f_avg_pool3d_backward_grad_input(
10809 &self,
10810 grad_input: &Tensor,
10811 grad_output: &Tensor,
10812 kernel_size: impl IntList,
10813 stride: impl IntList,
10814 padding: impl IntList,
10815 ceil_mode: bool,
10816 count_include_pad: bool,
10817 divisor_override: impl Into<Option<i64>>,
10818 ) -> Result<Tensor, TchError> {
10819 let divisor_override = divisor_override.into();
10820 let mut c_tensors = [std::ptr::null_mut(); 1];
10821 unsafe_torch_err!(atg_avg_pool3d_backward_grad_input(
10822 c_tensors.as_mut_ptr(),
10823 grad_input.c_tensor,
10824 grad_output.c_tensor,
10825 self.c_tensor,
10826 kernel_size.as_ptr(),
10827 kernel_size.len_i32(),
10828 stride.as_ptr(),
10829 stride.len_i32(),
10830 padding.as_ptr(),
10831 padding.len_i32(),
10832 if ceil_mode { 1 } else { 0 },
10833 if count_include_pad { 1 } else { 0 },
10834 divisor_override.unwrap_or(0i64),
10835 divisor_override.is_none() as i8
10836 ));
10837 Ok(Tensor { c_tensor: c_tensors[0] })
10838 }
10839
10840 pub fn f_avg_pool3d_out(
10841 &self,
10842 out: &Tensor,
10843 kernel_size: impl IntList,
10844 stride: impl IntList,
10845 padding: impl IntList,
10846 ceil_mode: bool,
10847 count_include_pad: bool,
10848 divisor_override: impl Into<Option<i64>>,
10849 ) -> Result<Tensor, TchError> {
10850 let divisor_override = divisor_override.into();
10851 let mut c_tensors = [std::ptr::null_mut(); 1];
10852 unsafe_torch_err!(atg_avg_pool3d_out(
10853 c_tensors.as_mut_ptr(),
10854 out.c_tensor,
10855 self.c_tensor,
10856 kernel_size.as_ptr(),
10857 kernel_size.len_i32(),
10858 stride.as_ptr(),
10859 stride.len_i32(),
10860 padding.as_ptr(),
10861 padding.len_i32(),
10862 if ceil_mode { 1 } else { 0 },
10863 if count_include_pad { 1 } else { 0 },
10864 divisor_override.unwrap_or(0i64),
10865 divisor_override.is_none() as i8
10866 ));
10867 Ok(Tensor { c_tensor: c_tensors[0] })
10868 }
10869
10870 pub fn f_baddbmm<S: Into<Scalar>>(
10871 &self,
10872 batch1: &Tensor,
10873 batch2: &Tensor,
10874 beta: S,
10875 alpha: S,
10876 ) -> Result<Tensor, TchError> {
10877 let mut c_tensors = [std::ptr::null_mut(); 1];
10878 unsafe_torch_err!(atg_baddbmm(
10879 c_tensors.as_mut_ptr(),
10880 self.c_tensor,
10881 batch1.c_tensor,
10882 batch2.c_tensor,
10883 beta.into().c_scalar,
10884 alpha.into().c_scalar
10885 ));
10886 Ok(Tensor { c_tensor: c_tensors[0] })
10887 }
10888
10889 pub fn f_baddbmm_(&mut self, batch1: &Tensor, batch2: &Tensor) -> Result<Tensor, TchError> {
10890 let mut c_tensors = [std::ptr::null_mut(); 1];
10891 unsafe_torch_err!(atg_baddbmm_(
10892 c_tensors.as_mut_ptr(),
10893 self.c_tensor,
10894 batch1.c_tensor,
10895 batch2.c_tensor
10896 ));
10897 Ok(Tensor { c_tensor: c_tensors[0] })
10898 }
10899
10900 pub fn f_baddbmm_out(
10901 &self,
10902 out: &Tensor,
10903 batch1: &Tensor,
10904 batch2: &Tensor,
10905 ) -> Result<Tensor, TchError> {
10906 let mut c_tensors = [std::ptr::null_mut(); 1];
10907 unsafe_torch_err!(atg_baddbmm_out(
10908 c_tensors.as_mut_ptr(),
10909 out.c_tensor,
10910 self.c_tensor,
10911 batch1.c_tensor,
10912 batch2.c_tensor
10913 ));
10914 Ok(Tensor { c_tensor: c_tensors[0] })
10915 }
10916
10917 pub fn f_bartlett_window(
10918 window_length: i64,
10919 options: (Kind, Device),
10920 ) -> Result<Tensor, TchError> {
10921 let mut c_tensors = [std::ptr::null_mut(); 1];
10922 unsafe_torch_err!(atg_bartlett_window(
10923 c_tensors.as_mut_ptr(),
10924 window_length,
10925 options.0.c_int(),
10926 options.1.c_int()
10927 ));
10928 Ok(Tensor { c_tensor: c_tensors[0] })
10929 }
10930
10931 pub fn f_bartlett_window_out(out: &Tensor, window_length: i64) -> Result<Tensor, TchError> {
10932 let mut c_tensors = [std::ptr::null_mut(); 1];
10933 unsafe_torch_err!(atg_bartlett_window_out(
10934 c_tensors.as_mut_ptr(),
10935 out.c_tensor,
10936 window_length
10937 ));
10938 Ok(Tensor { c_tensor: c_tensors[0] })
10939 }
10940
10941 pub fn f_bartlett_window_periodic(
10942 window_length: i64,
10943 periodic: bool,
10944 options: (Kind, Device),
10945 ) -> Result<Tensor, TchError> {
10946 let mut c_tensors = [std::ptr::null_mut(); 1];
10947 unsafe_torch_err!(atg_bartlett_window_periodic(
10948 c_tensors.as_mut_ptr(),
10949 window_length,
10950 if periodic { 1 } else { 0 },
10951 options.0.c_int(),
10952 options.1.c_int()
10953 ));
10954 Ok(Tensor { c_tensor: c_tensors[0] })
10955 }
10956
10957 pub fn f_bartlett_window_periodic_out(
10958 out: &Tensor,
10959 window_length: i64,
10960 periodic: bool,
10961 ) -> Result<Tensor, TchError> {
10962 let mut c_tensors = [std::ptr::null_mut(); 1];
10963 unsafe_torch_err!(atg_bartlett_window_periodic_out(
10964 c_tensors.as_mut_ptr(),
10965 out.c_tensor,
10966 window_length,
10967 if periodic { 1 } else { 0 }
10968 ));
10969 Ok(Tensor { c_tensor: c_tensors[0] })
10970 }
10971
10972 pub fn f_batch_norm<T: Borrow<Tensor>>(
10973 &self,
10974 weight: Option<T>,
10975 bias: Option<T>,
10976 running_mean: Option<T>,
10977 running_var: Option<T>,
10978 training: bool,
10979 momentum: f64,
10980 eps: f64,
10981 cudnn_enabled: bool,
10982 ) -> Result<Tensor, TchError> {
10983 let mut c_tensors = [std::ptr::null_mut(); 1];
10984 unsafe_torch_err!(atg_batch_norm(
10985 c_tensors.as_mut_ptr(),
10986 self.c_tensor,
10987 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
10988 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
10989 running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
10990 running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
10991 if training { 1 } else { 0 },
10992 momentum,
10993 eps,
10994 if cudnn_enabled { 1 } else { 0 }
10995 ));
10996 Ok(Tensor { c_tensor: c_tensors[0] })
10997 }
10998
10999 pub fn f_batch_norm_backward_elemt<T: Borrow<Tensor>>(
11000 &self,
11001 grad_out: &Tensor,
11002 mean: &Tensor,
11003 invstd: &Tensor,
11004 weight: Option<T>,
11005 sum_dy: &Tensor,
11006 sum_dy_xmu: &Tensor,
11007 count: &Tensor,
11008 ) -> Result<Tensor, TchError> {
11009 let mut c_tensors = [std::ptr::null_mut(); 1];
11010 unsafe_torch_err!(atg_batch_norm_backward_elemt(
11011 c_tensors.as_mut_ptr(),
11012 grad_out.c_tensor,
11013 self.c_tensor,
11014 mean.c_tensor,
11015 invstd.c_tensor,
11016 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11017 sum_dy.c_tensor,
11018 sum_dy_xmu.c_tensor,
11019 count.c_tensor
11020 ));
11021 Ok(Tensor { c_tensor: c_tensors[0] })
11022 }
11023
11024 pub fn f_batch_norm_backward_elemt_out<T: Borrow<Tensor>>(
11025 &self,
11026 out: &Tensor,
11027 grad_out: &Tensor,
11028 mean: &Tensor,
11029 invstd: &Tensor,
11030 weight: Option<T>,
11031 sum_dy: &Tensor,
11032 sum_dy_xmu: &Tensor,
11033 count: &Tensor,
11034 ) -> Result<Tensor, TchError> {
11035 let mut c_tensors = [std::ptr::null_mut(); 1];
11036 unsafe_torch_err!(atg_batch_norm_backward_elemt_out(
11037 c_tensors.as_mut_ptr(),
11038 out.c_tensor,
11039 grad_out.c_tensor,
11040 self.c_tensor,
11041 mean.c_tensor,
11042 invstd.c_tensor,
11043 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11044 sum_dy.c_tensor,
11045 sum_dy_xmu.c_tensor,
11046 count.c_tensor
11047 ));
11048 Ok(Tensor { c_tensor: c_tensors[0] })
11049 }
11050
11051 pub fn f_batch_norm_backward_reduce<T: Borrow<Tensor>>(
11052 &self,
11053 grad_out: &Tensor,
11054 mean: &Tensor,
11055 invstd: &Tensor,
11056 weight: Option<T>,
11057 input_g: bool,
11058 weight_g: bool,
11059 bias_g: bool,
11060 ) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
11061 let mut c_tensors = [std::ptr::null_mut(); 4];
11062 unsafe_torch_err!(atg_batch_norm_backward_reduce(
11063 c_tensors.as_mut_ptr(),
11064 grad_out.c_tensor,
11065 self.c_tensor,
11066 mean.c_tensor,
11067 invstd.c_tensor,
11068 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11069 if input_g { 1 } else { 0 },
11070 if weight_g { 1 } else { 0 },
11071 if bias_g { 1 } else { 0 }
11072 ));
11073 Ok((
11074 Tensor { c_tensor: c_tensors[0] },
11075 Tensor { c_tensor: c_tensors[1] },
11076 Tensor { c_tensor: c_tensors[2] },
11077 Tensor { c_tensor: c_tensors[3] },
11078 ))
11079 }
11080
11081 pub fn f_batch_norm_backward_reduce_out<T: Borrow<Tensor>>(
11082 &self,
11083 out0: &Tensor,
11084 out1: &Tensor,
11085 out2: &Tensor,
11086 out3: &Tensor,
11087 grad_out: &Tensor,
11088 mean: &Tensor,
11089 invstd: &Tensor,
11090 weight: Option<T>,
11091 input_g: bool,
11092 weight_g: bool,
11093 bias_g: bool,
11094 ) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
11095 let mut c_tensors = [std::ptr::null_mut(); 4];
11096 unsafe_torch_err!(atg_batch_norm_backward_reduce_out(
11097 c_tensors.as_mut_ptr(),
11098 out0.c_tensor,
11099 out1.c_tensor,
11100 out2.c_tensor,
11101 out3.c_tensor,
11102 grad_out.c_tensor,
11103 self.c_tensor,
11104 mean.c_tensor,
11105 invstd.c_tensor,
11106 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11107 if input_g { 1 } else { 0 },
11108 if weight_g { 1 } else { 0 },
11109 if bias_g { 1 } else { 0 }
11110 ));
11111 Ok((
11112 Tensor { c_tensor: c_tensors[0] },
11113 Tensor { c_tensor: c_tensors[1] },
11114 Tensor { c_tensor: c_tensors[2] },
11115 Tensor { c_tensor: c_tensors[3] },
11116 ))
11117 }
11118
11119 pub fn f_batch_norm_elemt<T: Borrow<Tensor>>(
11120 &self,
11121 weight: Option<T>,
11122 bias: Option<T>,
11123 mean: &Tensor,
11124 invstd: &Tensor,
11125 eps: f64,
11126 ) -> Result<Tensor, TchError> {
11127 let mut c_tensors = [std::ptr::null_mut(); 1];
11128 unsafe_torch_err!(atg_batch_norm_elemt(
11129 c_tensors.as_mut_ptr(),
11130 self.c_tensor,
11131 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11132 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11133 mean.c_tensor,
11134 invstd.c_tensor,
11135 eps
11136 ));
11137 Ok(Tensor { c_tensor: c_tensors[0] })
11138 }
11139
11140 pub fn f_batch_norm_elemt_out<T: Borrow<Tensor>>(
11141 &self,
11142 out: &Tensor,
11143 weight: Option<T>,
11144 bias: Option<T>,
11145 mean: &Tensor,
11146 invstd: &Tensor,
11147 eps: f64,
11148 ) -> Result<Tensor, TchError> {
11149 let mut c_tensors = [std::ptr::null_mut(); 1];
11150 unsafe_torch_err!(atg_batch_norm_elemt_out(
11151 c_tensors.as_mut_ptr(),
11152 out.c_tensor,
11153 self.c_tensor,
11154 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11155 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11156 mean.c_tensor,
11157 invstd.c_tensor,
11158 eps
11159 ));
11160 Ok(Tensor { c_tensor: c_tensors[0] })
11161 }
11162
11163 pub fn f_batch_norm_gather_stats<T: Borrow<Tensor>>(
11164 &self,
11165 mean: &Tensor,
11166 invstd: &Tensor,
11167 running_mean: Option<T>,
11168 running_var: Option<T>,
11169 momentum: f64,
11170 eps: f64,
11171 count: i64,
11172 ) -> Result<(Tensor, Tensor), TchError> {
11173 let mut c_tensors = [std::ptr::null_mut(); 2];
11174 unsafe_torch_err!(atg_batch_norm_gather_stats(
11175 c_tensors.as_mut_ptr(),
11176 self.c_tensor,
11177 mean.c_tensor,
11178 invstd.c_tensor,
11179 running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11180 running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11181 momentum,
11182 eps,
11183 count
11184 ));
11185 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
11186 }
11187
11188 pub fn f_batch_norm_gather_stats_out<T: Borrow<Tensor>>(
11189 &self,
11190 out0: &Tensor,
11191 out1: &Tensor,
11192 mean: &Tensor,
11193 invstd: &Tensor,
11194 running_mean: Option<T>,
11195 running_var: Option<T>,
11196 momentum: f64,
11197 eps: f64,
11198 count: i64,
11199 ) -> Result<(Tensor, Tensor), TchError> {
11200 let mut c_tensors = [std::ptr::null_mut(); 2];
11201 unsafe_torch_err!(atg_batch_norm_gather_stats_out(
11202 c_tensors.as_mut_ptr(),
11203 out0.c_tensor,
11204 out1.c_tensor,
11205 self.c_tensor,
11206 mean.c_tensor,
11207 invstd.c_tensor,
11208 running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11209 running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11210 momentum,
11211 eps,
11212 count
11213 ));
11214 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
11215 }
11216
11217 pub fn f_batch_norm_gather_stats_with_counts<T: Borrow<Tensor>>(
11218 &self,
11219 mean: &Tensor,
11220 invstd: &Tensor,
11221 running_mean: Option<T>,
11222 running_var: Option<T>,
11223 momentum: f64,
11224 eps: f64,
11225 counts: &Tensor,
11226 ) -> Result<(Tensor, Tensor), TchError> {
11227 let mut c_tensors = [std::ptr::null_mut(); 2];
11228 unsafe_torch_err!(atg_batch_norm_gather_stats_with_counts(
11229 c_tensors.as_mut_ptr(),
11230 self.c_tensor,
11231 mean.c_tensor,
11232 invstd.c_tensor,
11233 running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11234 running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11235 momentum,
11236 eps,
11237 counts.c_tensor
11238 ));
11239 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
11240 }
11241
11242 pub fn f_batch_norm_gather_stats_with_counts_out<T: Borrow<Tensor>>(
11243 &self,
11244 out0: &Tensor,
11245 out1: &Tensor,
11246 mean: &Tensor,
11247 invstd: &Tensor,
11248 running_mean: Option<T>,
11249 running_var: Option<T>,
11250 momentum: f64,
11251 eps: f64,
11252 counts: &Tensor,
11253 ) -> Result<(Tensor, Tensor), TchError> {
11254 let mut c_tensors = [std::ptr::null_mut(); 2];
11255 unsafe_torch_err!(atg_batch_norm_gather_stats_with_counts_out(
11256 c_tensors.as_mut_ptr(),
11257 out0.c_tensor,
11258 out1.c_tensor,
11259 self.c_tensor,
11260 mean.c_tensor,
11261 invstd.c_tensor,
11262 running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11263 running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11264 momentum,
11265 eps,
11266 counts.c_tensor
11267 ));
11268 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
11269 }
11270
11271 pub fn f_batch_norm_stats(&self, eps: f64) -> Result<(Tensor, Tensor), TchError> {
11272 let mut c_tensors = [std::ptr::null_mut(); 2];
11273 unsafe_torch_err!(atg_batch_norm_stats(c_tensors.as_mut_ptr(), self.c_tensor, eps));
11274 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
11275 }
11276
11277 pub fn f_batch_norm_stats_out(
11278 &self,
11279 out0: &Tensor,
11280 out1: &Tensor,
11281 eps: f64,
11282 ) -> Result<(Tensor, Tensor), TchError> {
11283 let mut c_tensors = [std::ptr::null_mut(); 2];
11284 unsafe_torch_err!(atg_batch_norm_stats_out(
11285 c_tensors.as_mut_ptr(),
11286 out0.c_tensor,
11287 out1.c_tensor,
11288 self.c_tensor,
11289 eps
11290 ));
11291 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
11292 }
11293
11294 pub fn f_batch_norm_update_stats<T: Borrow<Tensor>>(
11295 &self,
11296 running_mean: Option<T>,
11297 running_var: Option<T>,
11298 momentum: f64,
11299 ) -> Result<(Tensor, Tensor), TchError> {
11300 let mut c_tensors = [std::ptr::null_mut(); 2];
11301 unsafe_torch_err!(atg_batch_norm_update_stats(
11302 c_tensors.as_mut_ptr(),
11303 self.c_tensor,
11304 running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11305 running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11306 momentum
11307 ));
11308 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
11309 }
11310
11311 pub fn f_batch_norm_update_stats_out<T: Borrow<Tensor>>(
11312 &self,
11313 out0: &Tensor,
11314 out1: &Tensor,
11315 running_mean: Option<T>,
11316 running_var: Option<T>,
11317 momentum: f64,
11318 ) -> Result<(Tensor, Tensor), TchError> {
11319 let mut c_tensors = [std::ptr::null_mut(); 2];
11320 unsafe_torch_err!(atg_batch_norm_update_stats_out(
11321 c_tensors.as_mut_ptr(),
11322 out0.c_tensor,
11323 out1.c_tensor,
11324 self.c_tensor,
11325 running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11326 running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11327 momentum
11328 ));
11329 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
11330 }
11331
11332 pub fn f_bernoulli(&self) -> Result<Tensor, TchError> {
11333 let mut c_tensors = [std::ptr::null_mut(); 1];
11334 unsafe_torch_err!(atg_bernoulli(c_tensors.as_mut_ptr(), self.c_tensor));
11335 Ok(Tensor { c_tensor: c_tensors[0] })
11336 }
11337
11338 pub fn f_bernoulli_(&mut self, p: &Tensor) -> Result<Tensor, TchError> {
11339 let mut c_tensors = [std::ptr::null_mut(); 1];
11340 unsafe_torch_err!(atg_bernoulli_(c_tensors.as_mut_ptr(), self.c_tensor, p.c_tensor));
11341 Ok(Tensor { c_tensor: c_tensors[0] })
11342 }
11343
11344 pub fn f_bernoulli_float_(&mut self, p: f64) -> Result<Tensor, TchError> {
11345 let mut c_tensors = [std::ptr::null_mut(); 1];
11346 unsafe_torch_err!(atg_bernoulli_float_(c_tensors.as_mut_ptr(), self.c_tensor, p));
11347 Ok(Tensor { c_tensor: c_tensors[0] })
11348 }
11349
11350 pub fn f_bernoulli_p(&self, p: f64) -> Result<Tensor, TchError> {
11351 let mut c_tensors = [std::ptr::null_mut(); 1];
11352 unsafe_torch_err!(atg_bernoulli_p(c_tensors.as_mut_ptr(), self.c_tensor, p));
11353 Ok(Tensor { c_tensor: c_tensors[0] })
11354 }
11355
11356 pub fn f_bernoulli_tensor(&self, p: &Tensor) -> Result<Tensor, TchError> {
11357 let mut c_tensors = [std::ptr::null_mut(); 1];
11358 unsafe_torch_err!(atg_bernoulli_tensor(c_tensors.as_mut_ptr(), self.c_tensor, p.c_tensor));
11359 Ok(Tensor { c_tensor: c_tensors[0] })
11360 }
11361
11362 pub fn f_bilinear<T: Borrow<Tensor>>(
11363 input1: &Tensor,
11364 input2: &Tensor,
11365 weight: &Tensor,
11366 bias: Option<T>,
11367 ) -> Result<Tensor, TchError> {
11368 let mut c_tensors = [std::ptr::null_mut(); 1];
11369 unsafe_torch_err!(atg_bilinear(
11370 c_tensors.as_mut_ptr(),
11371 input1.c_tensor,
11372 input2.c_tensor,
11373 weight.c_tensor,
11374 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
11375 ));
11376 Ok(Tensor { c_tensor: c_tensors[0] })
11377 }
11378
11379 pub fn f_binary_cross_entropy<T: Borrow<Tensor>>(
11380 &self,
11381 target: &Tensor,
11382 weight: Option<T>,
11383 reduction: crate::Reduction,
11384 ) -> Result<Tensor, TchError> {
11385 let mut c_tensors = [std::ptr::null_mut(); 1];
11386 unsafe_torch_err!(atg_binary_cross_entropy(
11387 c_tensors.as_mut_ptr(),
11388 self.c_tensor,
11389 target.c_tensor,
11390 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11391 reduction.to_int()
11392 ));
11393 Ok(Tensor { c_tensor: c_tensors[0] })
11394 }
11395
11396 pub fn f_binary_cross_entropy_backward<T: Borrow<Tensor>>(
11397 &self,
11398 grad_output: &Tensor,
11399 target: &Tensor,
11400 weight: Option<T>,
11401 reduction: crate::Reduction,
11402 ) -> Result<Tensor, TchError> {
11403 let mut c_tensors = [std::ptr::null_mut(); 1];
11404 unsafe_torch_err!(atg_binary_cross_entropy_backward(
11405 c_tensors.as_mut_ptr(),
11406 grad_output.c_tensor,
11407 self.c_tensor,
11408 target.c_tensor,
11409 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11410 reduction.to_int()
11411 ));
11412 Ok(Tensor { c_tensor: c_tensors[0] })
11413 }
11414
11415 pub fn f_binary_cross_entropy_backward_grad_input<T: Borrow<Tensor>>(
11416 &self,
11417 grad_input: &Tensor,
11418 grad_output: &Tensor,
11419 target: &Tensor,
11420 weight: Option<T>,
11421 reduction: crate::Reduction,
11422 ) -> Result<Tensor, TchError> {
11423 let mut c_tensors = [std::ptr::null_mut(); 1];
11424 unsafe_torch_err!(atg_binary_cross_entropy_backward_grad_input(
11425 c_tensors.as_mut_ptr(),
11426 grad_input.c_tensor,
11427 grad_output.c_tensor,
11428 self.c_tensor,
11429 target.c_tensor,
11430 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11431 reduction.to_int()
11432 ));
11433 Ok(Tensor { c_tensor: c_tensors[0] })
11434 }
11435
11436 pub fn f_binary_cross_entropy_out<T: Borrow<Tensor>>(
11437 &self,
11438 out: &Tensor,
11439 target: &Tensor,
11440 weight: Option<T>,
11441 reduction: crate::Reduction,
11442 ) -> Result<Tensor, TchError> {
11443 let mut c_tensors = [std::ptr::null_mut(); 1];
11444 unsafe_torch_err!(atg_binary_cross_entropy_out(
11445 c_tensors.as_mut_ptr(),
11446 out.c_tensor,
11447 self.c_tensor,
11448 target.c_tensor,
11449 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11450 reduction.to_int()
11451 ));
11452 Ok(Tensor { c_tensor: c_tensors[0] })
11453 }
11454
11455 pub fn f_binary_cross_entropy_with_logits<T: Borrow<Tensor>>(
11456 &self,
11457 target: &Tensor,
11458 weight: Option<T>,
11459 pos_weight: Option<T>,
11460 reduction: crate::Reduction,
11461 ) -> Result<Tensor, TchError> {
11462 let mut c_tensors = [std::ptr::null_mut(); 1];
11463 unsafe_torch_err!(atg_binary_cross_entropy_with_logits(
11464 c_tensors.as_mut_ptr(),
11465 self.c_tensor,
11466 target.c_tensor,
11467 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11468 pos_weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11469 reduction.to_int()
11470 ));
11471 Ok(Tensor { c_tensor: c_tensors[0] })
11472 }
11473
11474 pub fn f_binary_cross_entropy_with_logits_out<T: Borrow<Tensor>>(
11475 &self,
11476 out: &Tensor,
11477 target: &Tensor,
11478 weight: Option<T>,
11479 pos_weight: Option<T>,
11480 reduction: crate::Reduction,
11481 ) -> Result<Tensor, TchError> {
11482 let mut c_tensors = [std::ptr::null_mut(); 1];
11483 unsafe_torch_err!(atg_binary_cross_entropy_with_logits_out(
11484 c_tensors.as_mut_ptr(),
11485 out.c_tensor,
11486 self.c_tensor,
11487 target.c_tensor,
11488 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11489 pos_weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11490 reduction.to_int()
11491 ));
11492 Ok(Tensor { c_tensor: c_tensors[0] })
11493 }
11494
11495 pub fn f_bincount<T: Borrow<Tensor>>(
11496 &self,
11497 weights: Option<T>,
11498 minlength: i64,
11499 ) -> Result<Tensor, TchError> {
11500 let mut c_tensors = [std::ptr::null_mut(); 1];
11501 unsafe_torch_err!(atg_bincount(
11502 c_tensors.as_mut_ptr(),
11503 self.c_tensor,
11504 weights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11505 minlength
11506 ));
11507 Ok(Tensor { c_tensor: c_tensors[0] })
11508 }
11509
11510 pub fn f_bincount_out<T: Borrow<Tensor>>(
11511 &self,
11512 out: &Tensor,
11513 weights: Option<T>,
11514 minlength: i64,
11515 ) -> Result<Tensor, TchError> {
11516 let mut c_tensors = [std::ptr::null_mut(); 1];
11517 unsafe_torch_err!(atg_bincount_out(
11518 c_tensors.as_mut_ptr(),
11519 out.c_tensor,
11520 self.c_tensor,
11521 weights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
11522 minlength
11523 ));
11524 Ok(Tensor { c_tensor: c_tensors[0] })
11525 }
11526
11527 pub fn f_binomial(count: &Tensor, prob: &Tensor) -> Result<Tensor, TchError> {
11528 let mut c_tensors = [std::ptr::null_mut(); 1];
11529 unsafe_torch_err!(atg_binomial(c_tensors.as_mut_ptr(), count.c_tensor, prob.c_tensor));
11530 Ok(Tensor { c_tensor: c_tensors[0] })
11531 }
11532
11533 pub fn f_binomial_out(out: &Tensor, count: &Tensor, prob: &Tensor) -> Result<Tensor, TchError> {
11534 let mut c_tensors = [std::ptr::null_mut(); 1];
11535 unsafe_torch_err!(atg_binomial_out(
11536 c_tensors.as_mut_ptr(),
11537 out.c_tensor,
11538 count.c_tensor,
11539 prob.c_tensor
11540 ));
11541 Ok(Tensor { c_tensor: c_tensors[0] })
11542 }
11543
11544 pub fn f_bitwise_and<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
11545 let mut c_tensors = [std::ptr::null_mut(); 1];
11546 unsafe_torch_err!(atg_bitwise_and(
11547 c_tensors.as_mut_ptr(),
11548 self.c_tensor,
11549 other.into().c_scalar
11550 ));
11551 Ok(Tensor { c_tensor: c_tensors[0] })
11552 }
11553
11554 pub fn f_bitwise_and_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
11555 let mut c_tensors = [std::ptr::null_mut(); 1];
11556 unsafe_torch_err!(atg_bitwise_and_(
11557 c_tensors.as_mut_ptr(),
11558 self.c_tensor,
11559 other.into().c_scalar
11560 ));
11561 Ok(Tensor { c_tensor: c_tensors[0] })
11562 }
11563
11564 pub fn f_bitwise_and_scalar_out<S: Into<Scalar>>(
11565 &self,
11566 out: &Tensor,
11567 other: S,
11568 ) -> Result<Tensor, TchError> {
11569 let mut c_tensors = [std::ptr::null_mut(); 1];
11570 unsafe_torch_err!(atg_bitwise_and_scalar_out(
11571 c_tensors.as_mut_ptr(),
11572 out.c_tensor,
11573 self.c_tensor,
11574 other.into().c_scalar
11575 ));
11576 Ok(Tensor { c_tensor: c_tensors[0] })
11577 }
11578
11579 pub fn f_bitwise_and_scalar_tensor<S: Into<Scalar>>(
11580 self_scalar: S,
11581 other: &Tensor,
11582 ) -> Result<Tensor, TchError> {
11583 let mut c_tensors = [std::ptr::null_mut(); 1];
11584 unsafe_torch_err!(atg_bitwise_and_scalar_tensor(
11585 c_tensors.as_mut_ptr(),
11586 self_scalar.into().c_scalar,
11587 other.c_tensor
11588 ));
11589 Ok(Tensor { c_tensor: c_tensors[0] })
11590 }
11591
11592 pub fn f_bitwise_and_scalar_tensor_out<S: Into<Scalar>>(
11593 out: &Tensor,
11594 self_scalar: S,
11595 other: &Tensor,
11596 ) -> Result<Tensor, TchError> {
11597 let mut c_tensors = [std::ptr::null_mut(); 1];
11598 unsafe_torch_err!(atg_bitwise_and_scalar_tensor_out(
11599 c_tensors.as_mut_ptr(),
11600 out.c_tensor,
11601 self_scalar.into().c_scalar,
11602 other.c_tensor
11603 ));
11604 Ok(Tensor { c_tensor: c_tensors[0] })
11605 }
11606
11607 pub fn f_bitwise_and_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
11608 let mut c_tensors = [std::ptr::null_mut(); 1];
11609 unsafe_torch_err!(atg_bitwise_and_tensor(
11610 c_tensors.as_mut_ptr(),
11611 self.c_tensor,
11612 other.c_tensor
11613 ));
11614 Ok(Tensor { c_tensor: c_tensors[0] })
11615 }
11616
11617 pub fn f_bitwise_and_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
11618 let mut c_tensors = [std::ptr::null_mut(); 1];
11619 unsafe_torch_err!(atg_bitwise_and_tensor_(
11620 c_tensors.as_mut_ptr(),
11621 self.c_tensor,
11622 other.c_tensor
11623 ));
11624 Ok(Tensor { c_tensor: c_tensors[0] })
11625 }
11626
11627 pub fn f_bitwise_and_tensor_out(
11628 &self,
11629 out: &Tensor,
11630 other: &Tensor,
11631 ) -> Result<Tensor, TchError> {
11632 let mut c_tensors = [std::ptr::null_mut(); 1];
11633 unsafe_torch_err!(atg_bitwise_and_tensor_out(
11634 c_tensors.as_mut_ptr(),
11635 out.c_tensor,
11636 self.c_tensor,
11637 other.c_tensor
11638 ));
11639 Ok(Tensor { c_tensor: c_tensors[0] })
11640 }
11641
11642 pub fn f_bitwise_left_shift(&self, other: &Tensor) -> Result<Tensor, TchError> {
11643 let mut c_tensors = [std::ptr::null_mut(); 1];
11644 unsafe_torch_err!(atg_bitwise_left_shift(
11645 c_tensors.as_mut_ptr(),
11646 self.c_tensor,
11647 other.c_tensor
11648 ));
11649 Ok(Tensor { c_tensor: c_tensors[0] })
11650 }
11651
11652 pub fn f_bitwise_left_shift_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
11653 let mut c_tensors = [std::ptr::null_mut(); 1];
11654 unsafe_torch_err!(atg_bitwise_left_shift_(
11655 c_tensors.as_mut_ptr(),
11656 self.c_tensor,
11657 other.c_tensor
11658 ));
11659 Ok(Tensor { c_tensor: c_tensors[0] })
11660 }
11661
11662 pub fn f_bitwise_left_shift_scalar_tensor<S: Into<Scalar>>(
11663 self_scalar: S,
11664 other: &Tensor,
11665 ) -> Result<Tensor, TchError> {
11666 let mut c_tensors = [std::ptr::null_mut(); 1];
11667 unsafe_torch_err!(atg_bitwise_left_shift_scalar_tensor(
11668 c_tensors.as_mut_ptr(),
11669 self_scalar.into().c_scalar,
11670 other.c_tensor
11671 ));
11672 Ok(Tensor { c_tensor: c_tensors[0] })
11673 }
11674
11675 pub fn f_bitwise_left_shift_scalar_tensor_out<S: Into<Scalar>>(
11676 out: &Tensor,
11677 self_scalar: S,
11678 other: &Tensor,
11679 ) -> Result<Tensor, TchError> {
11680 let mut c_tensors = [std::ptr::null_mut(); 1];
11681 unsafe_torch_err!(atg_bitwise_left_shift_scalar_tensor_out(
11682 c_tensors.as_mut_ptr(),
11683 out.c_tensor,
11684 self_scalar.into().c_scalar,
11685 other.c_tensor
11686 ));
11687 Ok(Tensor { c_tensor: c_tensors[0] })
11688 }
11689
11690 pub fn f_bitwise_left_shift_tensor_out(
11691 &self,
11692 out: &Tensor,
11693 other: &Tensor,
11694 ) -> Result<Tensor, TchError> {
11695 let mut c_tensors = [std::ptr::null_mut(); 1];
11696 unsafe_torch_err!(atg_bitwise_left_shift_tensor_out(
11697 c_tensors.as_mut_ptr(),
11698 out.c_tensor,
11699 self.c_tensor,
11700 other.c_tensor
11701 ));
11702 Ok(Tensor { c_tensor: c_tensors[0] })
11703 }
11704
11705 pub fn f_bitwise_left_shift_tensor_scalar<S: Into<Scalar>>(
11706 &self,
11707 other: S,
11708 ) -> Result<Tensor, TchError> {
11709 let mut c_tensors = [std::ptr::null_mut(); 1];
11710 unsafe_torch_err!(atg_bitwise_left_shift_tensor_scalar(
11711 c_tensors.as_mut_ptr(),
11712 self.c_tensor,
11713 other.into().c_scalar
11714 ));
11715 Ok(Tensor { c_tensor: c_tensors[0] })
11716 }
11717
11718 pub fn f_bitwise_left_shift_tensor_scalar_<S: Into<Scalar>>(
11719 &mut self,
11720 other: S,
11721 ) -> Result<Tensor, TchError> {
11722 let mut c_tensors = [std::ptr::null_mut(); 1];
11723 unsafe_torch_err!(atg_bitwise_left_shift_tensor_scalar_(
11724 c_tensors.as_mut_ptr(),
11725 self.c_tensor,
11726 other.into().c_scalar
11727 ));
11728 Ok(Tensor { c_tensor: c_tensors[0] })
11729 }
11730
11731 pub fn f_bitwise_left_shift_tensor_scalar_out<S: Into<Scalar>>(
11732 &self,
11733 out: &Tensor,
11734 other: S,
11735 ) -> Result<Tensor, TchError> {
11736 let mut c_tensors = [std::ptr::null_mut(); 1];
11737 unsafe_torch_err!(atg_bitwise_left_shift_tensor_scalar_out(
11738 c_tensors.as_mut_ptr(),
11739 out.c_tensor,
11740 self.c_tensor,
11741 other.into().c_scalar
11742 ));
11743 Ok(Tensor { c_tensor: c_tensors[0] })
11744 }
11745
11746 pub fn f_bitwise_not(&self) -> Result<Tensor, TchError> {
11747 let mut c_tensors = [std::ptr::null_mut(); 1];
11748 unsafe_torch_err!(atg_bitwise_not(c_tensors.as_mut_ptr(), self.c_tensor));
11749 Ok(Tensor { c_tensor: c_tensors[0] })
11750 }
11751
11752 pub fn f_bitwise_not_(&mut self) -> Result<Tensor, TchError> {
11753 let mut c_tensors = [std::ptr::null_mut(); 1];
11754 unsafe_torch_err!(atg_bitwise_not_(c_tensors.as_mut_ptr(), self.c_tensor));
11755 Ok(Tensor { c_tensor: c_tensors[0] })
11756 }
11757
11758 pub fn f_bitwise_not_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
11759 let mut c_tensors = [std::ptr::null_mut(); 1];
11760 unsafe_torch_err!(atg_bitwise_not_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
11761 Ok(Tensor { c_tensor: c_tensors[0] })
11762 }
11763
11764 pub fn f_bitwise_or<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
11765 let mut c_tensors = [std::ptr::null_mut(); 1];
11766 unsafe_torch_err!(atg_bitwise_or(
11767 c_tensors.as_mut_ptr(),
11768 self.c_tensor,
11769 other.into().c_scalar
11770 ));
11771 Ok(Tensor { c_tensor: c_tensors[0] })
11772 }
11773
11774 pub fn f_bitwise_or_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
11775 let mut c_tensors = [std::ptr::null_mut(); 1];
11776 unsafe_torch_err!(atg_bitwise_or_(
11777 c_tensors.as_mut_ptr(),
11778 self.c_tensor,
11779 other.into().c_scalar
11780 ));
11781 Ok(Tensor { c_tensor: c_tensors[0] })
11782 }
11783
11784 pub fn f_bitwise_or_scalar_out<S: Into<Scalar>>(
11785 &self,
11786 out: &Tensor,
11787 other: S,
11788 ) -> Result<Tensor, TchError> {
11789 let mut c_tensors = [std::ptr::null_mut(); 1];
11790 unsafe_torch_err!(atg_bitwise_or_scalar_out(
11791 c_tensors.as_mut_ptr(),
11792 out.c_tensor,
11793 self.c_tensor,
11794 other.into().c_scalar
11795 ));
11796 Ok(Tensor { c_tensor: c_tensors[0] })
11797 }
11798
11799 pub fn f_bitwise_or_scalar_tensor<S: Into<Scalar>>(
11800 self_scalar: S,
11801 other: &Tensor,
11802 ) -> Result<Tensor, TchError> {
11803 let mut c_tensors = [std::ptr::null_mut(); 1];
11804 unsafe_torch_err!(atg_bitwise_or_scalar_tensor(
11805 c_tensors.as_mut_ptr(),
11806 self_scalar.into().c_scalar,
11807 other.c_tensor
11808 ));
11809 Ok(Tensor { c_tensor: c_tensors[0] })
11810 }
11811
11812 pub fn f_bitwise_or_scalar_tensor_out<S: Into<Scalar>>(
11813 out: &Tensor,
11814 self_scalar: S,
11815 other: &Tensor,
11816 ) -> Result<Tensor, TchError> {
11817 let mut c_tensors = [std::ptr::null_mut(); 1];
11818 unsafe_torch_err!(atg_bitwise_or_scalar_tensor_out(
11819 c_tensors.as_mut_ptr(),
11820 out.c_tensor,
11821 self_scalar.into().c_scalar,
11822 other.c_tensor
11823 ));
11824 Ok(Tensor { c_tensor: c_tensors[0] })
11825 }
11826
11827 pub fn f_bitwise_or_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
11828 let mut c_tensors = [std::ptr::null_mut(); 1];
11829 unsafe_torch_err!(atg_bitwise_or_tensor(
11830 c_tensors.as_mut_ptr(),
11831 self.c_tensor,
11832 other.c_tensor
11833 ));
11834 Ok(Tensor { c_tensor: c_tensors[0] })
11835 }
11836
11837 pub fn f_bitwise_or_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
11838 let mut c_tensors = [std::ptr::null_mut(); 1];
11839 unsafe_torch_err!(atg_bitwise_or_tensor_(
11840 c_tensors.as_mut_ptr(),
11841 self.c_tensor,
11842 other.c_tensor
11843 ));
11844 Ok(Tensor { c_tensor: c_tensors[0] })
11845 }
11846
11847 pub fn f_bitwise_or_tensor_out(
11848 &self,
11849 out: &Tensor,
11850 other: &Tensor,
11851 ) -> Result<Tensor, TchError> {
11852 let mut c_tensors = [std::ptr::null_mut(); 1];
11853 unsafe_torch_err!(atg_bitwise_or_tensor_out(
11854 c_tensors.as_mut_ptr(),
11855 out.c_tensor,
11856 self.c_tensor,
11857 other.c_tensor
11858 ));
11859 Ok(Tensor { c_tensor: c_tensors[0] })
11860 }
11861
11862 pub fn f_bitwise_right_shift(&self, other: &Tensor) -> Result<Tensor, TchError> {
11863 let mut c_tensors = [std::ptr::null_mut(); 1];
11864 unsafe_torch_err!(atg_bitwise_right_shift(
11865 c_tensors.as_mut_ptr(),
11866 self.c_tensor,
11867 other.c_tensor
11868 ));
11869 Ok(Tensor { c_tensor: c_tensors[0] })
11870 }
11871
11872 pub fn f_bitwise_right_shift_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
11873 let mut c_tensors = [std::ptr::null_mut(); 1];
11874 unsafe_torch_err!(atg_bitwise_right_shift_(
11875 c_tensors.as_mut_ptr(),
11876 self.c_tensor,
11877 other.c_tensor
11878 ));
11879 Ok(Tensor { c_tensor: c_tensors[0] })
11880 }
11881
11882 pub fn f_bitwise_right_shift_scalar_tensor<S: Into<Scalar>>(
11883 self_scalar: S,
11884 other: &Tensor,
11885 ) -> Result<Tensor, TchError> {
11886 let mut c_tensors = [std::ptr::null_mut(); 1];
11887 unsafe_torch_err!(atg_bitwise_right_shift_scalar_tensor(
11888 c_tensors.as_mut_ptr(),
11889 self_scalar.into().c_scalar,
11890 other.c_tensor
11891 ));
11892 Ok(Tensor { c_tensor: c_tensors[0] })
11893 }
11894
11895 pub fn f_bitwise_right_shift_scalar_tensor_out<S: Into<Scalar>>(
11896 out: &Tensor,
11897 self_scalar: S,
11898 other: &Tensor,
11899 ) -> Result<Tensor, TchError> {
11900 let mut c_tensors = [std::ptr::null_mut(); 1];
11901 unsafe_torch_err!(atg_bitwise_right_shift_scalar_tensor_out(
11902 c_tensors.as_mut_ptr(),
11903 out.c_tensor,
11904 self_scalar.into().c_scalar,
11905 other.c_tensor
11906 ));
11907 Ok(Tensor { c_tensor: c_tensors[0] })
11908 }
11909
11910 pub fn f_bitwise_right_shift_tensor_out(
11911 &self,
11912 out: &Tensor,
11913 other: &Tensor,
11914 ) -> Result<Tensor, TchError> {
11915 let mut c_tensors = [std::ptr::null_mut(); 1];
11916 unsafe_torch_err!(atg_bitwise_right_shift_tensor_out(
11917 c_tensors.as_mut_ptr(),
11918 out.c_tensor,
11919 self.c_tensor,
11920 other.c_tensor
11921 ));
11922 Ok(Tensor { c_tensor: c_tensors[0] })
11923 }
11924
11925 pub fn f_bitwise_right_shift_tensor_scalar<S: Into<Scalar>>(
11926 &self,
11927 other: S,
11928 ) -> Result<Tensor, TchError> {
11929 let mut c_tensors = [std::ptr::null_mut(); 1];
11930 unsafe_torch_err!(atg_bitwise_right_shift_tensor_scalar(
11931 c_tensors.as_mut_ptr(),
11932 self.c_tensor,
11933 other.into().c_scalar
11934 ));
11935 Ok(Tensor { c_tensor: c_tensors[0] })
11936 }
11937
11938 pub fn f_bitwise_right_shift_tensor_scalar_<S: Into<Scalar>>(
11939 &mut self,
11940 other: S,
11941 ) -> Result<Tensor, TchError> {
11942 let mut c_tensors = [std::ptr::null_mut(); 1];
11943 unsafe_torch_err!(atg_bitwise_right_shift_tensor_scalar_(
11944 c_tensors.as_mut_ptr(),
11945 self.c_tensor,
11946 other.into().c_scalar
11947 ));
11948 Ok(Tensor { c_tensor: c_tensors[0] })
11949 }
11950
11951 pub fn f_bitwise_right_shift_tensor_scalar_out<S: Into<Scalar>>(
11952 &self,
11953 out: &Tensor,
11954 other: S,
11955 ) -> Result<Tensor, TchError> {
11956 let mut c_tensors = [std::ptr::null_mut(); 1];
11957 unsafe_torch_err!(atg_bitwise_right_shift_tensor_scalar_out(
11958 c_tensors.as_mut_ptr(),
11959 out.c_tensor,
11960 self.c_tensor,
11961 other.into().c_scalar
11962 ));
11963 Ok(Tensor { c_tensor: c_tensors[0] })
11964 }
11965
11966 pub fn f_bitwise_xor<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
11967 let mut c_tensors = [std::ptr::null_mut(); 1];
11968 unsafe_torch_err!(atg_bitwise_xor(
11969 c_tensors.as_mut_ptr(),
11970 self.c_tensor,
11971 other.into().c_scalar
11972 ));
11973 Ok(Tensor { c_tensor: c_tensors[0] })
11974 }
11975
11976 pub fn f_bitwise_xor_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
11977 let mut c_tensors = [std::ptr::null_mut(); 1];
11978 unsafe_torch_err!(atg_bitwise_xor_(
11979 c_tensors.as_mut_ptr(),
11980 self.c_tensor,
11981 other.into().c_scalar
11982 ));
11983 Ok(Tensor { c_tensor: c_tensors[0] })
11984 }
11985
11986 pub fn f_bitwise_xor_scalar_out<S: Into<Scalar>>(
11987 &self,
11988 out: &Tensor,
11989 other: S,
11990 ) -> Result<Tensor, TchError> {
11991 let mut c_tensors = [std::ptr::null_mut(); 1];
11992 unsafe_torch_err!(atg_bitwise_xor_scalar_out(
11993 c_tensors.as_mut_ptr(),
11994 out.c_tensor,
11995 self.c_tensor,
11996 other.into().c_scalar
11997 ));
11998 Ok(Tensor { c_tensor: c_tensors[0] })
11999 }
12000
12001 pub fn f_bitwise_xor_scalar_tensor<S: Into<Scalar>>(
12002 self_scalar: S,
12003 other: &Tensor,
12004 ) -> Result<Tensor, TchError> {
12005 let mut c_tensors = [std::ptr::null_mut(); 1];
12006 unsafe_torch_err!(atg_bitwise_xor_scalar_tensor(
12007 c_tensors.as_mut_ptr(),
12008 self_scalar.into().c_scalar,
12009 other.c_tensor
12010 ));
12011 Ok(Tensor { c_tensor: c_tensors[0] })
12012 }
12013
12014 pub fn f_bitwise_xor_scalar_tensor_out<S: Into<Scalar>>(
12015 out: &Tensor,
12016 self_scalar: S,
12017 other: &Tensor,
12018 ) -> Result<Tensor, TchError> {
12019 let mut c_tensors = [std::ptr::null_mut(); 1];
12020 unsafe_torch_err!(atg_bitwise_xor_scalar_tensor_out(
12021 c_tensors.as_mut_ptr(),
12022 out.c_tensor,
12023 self_scalar.into().c_scalar,
12024 other.c_tensor
12025 ));
12026 Ok(Tensor { c_tensor: c_tensors[0] })
12027 }
12028
12029 pub fn f_bitwise_xor_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
12030 let mut c_tensors = [std::ptr::null_mut(); 1];
12031 unsafe_torch_err!(atg_bitwise_xor_tensor(
12032 c_tensors.as_mut_ptr(),
12033 self.c_tensor,
12034 other.c_tensor
12035 ));
12036 Ok(Tensor { c_tensor: c_tensors[0] })
12037 }
12038
12039 pub fn f_bitwise_xor_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
12040 let mut c_tensors = [std::ptr::null_mut(); 1];
12041 unsafe_torch_err!(atg_bitwise_xor_tensor_(
12042 c_tensors.as_mut_ptr(),
12043 self.c_tensor,
12044 other.c_tensor
12045 ));
12046 Ok(Tensor { c_tensor: c_tensors[0] })
12047 }
12048
12049 pub fn f_bitwise_xor_tensor_out(
12050 &self,
12051 out: &Tensor,
12052 other: &Tensor,
12053 ) -> Result<Tensor, TchError> {
12054 let mut c_tensors = [std::ptr::null_mut(); 1];
12055 unsafe_torch_err!(atg_bitwise_xor_tensor_out(
12056 c_tensors.as_mut_ptr(),
12057 out.c_tensor,
12058 self.c_tensor,
12059 other.c_tensor
12060 ));
12061 Ok(Tensor { c_tensor: c_tensors[0] })
12062 }
12063
12064 pub fn f_blackman_window(
12065 window_length: i64,
12066 options: (Kind, Device),
12067 ) -> Result<Tensor, TchError> {
12068 let mut c_tensors = [std::ptr::null_mut(); 1];
12069 unsafe_torch_err!(atg_blackman_window(
12070 c_tensors.as_mut_ptr(),
12071 window_length,
12072 options.0.c_int(),
12073 options.1.c_int()
12074 ));
12075 Ok(Tensor { c_tensor: c_tensors[0] })
12076 }
12077
12078 pub fn f_blackman_window_out(out: &Tensor, window_length: i64) -> Result<Tensor, TchError> {
12079 let mut c_tensors = [std::ptr::null_mut(); 1];
12080 unsafe_torch_err!(atg_blackman_window_out(
12081 c_tensors.as_mut_ptr(),
12082 out.c_tensor,
12083 window_length
12084 ));
12085 Ok(Tensor { c_tensor: c_tensors[0] })
12086 }
12087
12088 pub fn f_blackman_window_periodic(
12089 window_length: i64,
12090 periodic: bool,
12091 options: (Kind, Device),
12092 ) -> Result<Tensor, TchError> {
12093 let mut c_tensors = [std::ptr::null_mut(); 1];
12094 unsafe_torch_err!(atg_blackman_window_periodic(
12095 c_tensors.as_mut_ptr(),
12096 window_length,
12097 if periodic { 1 } else { 0 },
12098 options.0.c_int(),
12099 options.1.c_int()
12100 ));
12101 Ok(Tensor { c_tensor: c_tensors[0] })
12102 }
12103
12104 pub fn f_blackman_window_periodic_out(
12105 out: &Tensor,
12106 window_length: i64,
12107 periodic: bool,
12108 ) -> Result<Tensor, TchError> {
12109 let mut c_tensors = [std::ptr::null_mut(); 1];
12110 unsafe_torch_err!(atg_blackman_window_periodic_out(
12111 c_tensors.as_mut_ptr(),
12112 out.c_tensor,
12113 window_length,
12114 if periodic { 1 } else { 0 }
12115 ));
12116 Ok(Tensor { c_tensor: c_tensors[0] })
12117 }
12118
12119 pub fn f_block_diag<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Tensor, TchError> {
12120 let mut c_tensors = [std::ptr::null_mut(); 1];
12121 unsafe_torch_err!(atg_block_diag(
12122 c_tensors.as_mut_ptr(),
12123 ptr_list(tensors).as_ptr(),
12124 tensors.len() as i32
12125 ));
12126 Ok(Tensor { c_tensor: c_tensors[0] })
12127 }
12128
12129 pub fn f_block_diag_out<T: Borrow<Tensor>>(
12130 out: &Tensor,
12131 tensors: &[T],
12132 ) -> Result<Tensor, TchError> {
12133 let mut c_tensors = [std::ptr::null_mut(); 1];
12134 unsafe_torch_err!(atg_block_diag_out(
12135 c_tensors.as_mut_ptr(),
12136 out.c_tensor,
12137 ptr_list(tensors).as_ptr(),
12138 tensors.len() as i32
12139 ));
12140 Ok(Tensor { c_tensor: c_tensors[0] })
12141 }
12142
12143 pub fn f_bmm(&self, mat2: &Tensor) -> Result<Tensor, TchError> {
12144 let mut c_tensors = [std::ptr::null_mut(); 1];
12145 unsafe_torch_err!(atg_bmm(c_tensors.as_mut_ptr(), self.c_tensor, mat2.c_tensor));
12146 Ok(Tensor { c_tensor: c_tensors[0] })
12147 }
12148
12149 pub fn f_bmm_out(&self, out: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError> {
12150 let mut c_tensors = [std::ptr::null_mut(); 1];
12151 unsafe_torch_err!(atg_bmm_out(
12152 c_tensors.as_mut_ptr(),
12153 out.c_tensor,
12154 self.c_tensor,
12155 mat2.c_tensor
12156 ));
12157 Ok(Tensor { c_tensor: c_tensors[0] })
12158 }
12159
12160 pub fn f_broadcast_tensors<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Vec<Tensor>, TchError> {
12161 let c_tensors = unsafe_torch_err!(atg_broadcast_tensors(
12162 ptr_list(tensors).as_ptr(),
12163 tensors.len() as i32
12164 ));
12165 let mut r__ = vec![];
12166 let mut i = 0;
12167 loop {
12168 let c__ = unsafe { *c_tensors.add(i) };
12169 if c__.is_null() {
12170 break;
12171 }
12172 r__.push(Tensor { c_tensor: c__ });
12173 i += 1;
12174 }
12175 unsafe { libc::free(c_tensors as *mut libc::c_void) }
12176 Ok(r__)
12177 }
12178
12179 pub fn f_broadcast_to(&self, size: impl IntList) -> Result<Tensor, TchError> {
12180 let mut c_tensors = [std::ptr::null_mut(); 1];
12181 unsafe_torch_err!(atg_broadcast_to(
12182 c_tensors.as_mut_ptr(),
12183 self.c_tensor,
12184 size.as_ptr(),
12185 size.len_i32()
12186 ));
12187 Ok(Tensor { c_tensor: c_tensors[0] })
12188 }
12189
12190 pub fn f_bucketize(
12191 &self,
12192 boundaries: &Tensor,
12193 out_int32: bool,
12194 right: bool,
12195 ) -> Result<Tensor, TchError> {
12196 let mut c_tensors = [std::ptr::null_mut(); 1];
12197 unsafe_torch_err!(atg_bucketize(
12198 c_tensors.as_mut_ptr(),
12199 self.c_tensor,
12200 boundaries.c_tensor,
12201 if out_int32 { 1 } else { 0 },
12202 if right { 1 } else { 0 }
12203 ));
12204 Ok(Tensor { c_tensor: c_tensors[0] })
12205 }
12206
12207 pub fn f_bucketize_scalar<S: Into<Scalar>>(
12208 self_scalar: S,
12209 boundaries: &Tensor,
12210 out_int32: bool,
12211 right: bool,
12212 ) -> Result<Tensor, TchError> {
12213 let mut c_tensors = [std::ptr::null_mut(); 1];
12214 unsafe_torch_err!(atg_bucketize_scalar(
12215 c_tensors.as_mut_ptr(),
12216 self_scalar.into().c_scalar,
12217 boundaries.c_tensor,
12218 if out_int32 { 1 } else { 0 },
12219 if right { 1 } else { 0 }
12220 ));
12221 Ok(Tensor { c_tensor: c_tensors[0] })
12222 }
12223
12224 pub fn f_bucketize_scalar_out<S: Into<Scalar>>(
12225 out: &Tensor,
12226 self_scalar: S,
12227 boundaries: &Tensor,
12228 out_int32: bool,
12229 right: bool,
12230 ) -> Result<Tensor, TchError> {
12231 let mut c_tensors = [std::ptr::null_mut(); 1];
12232 unsafe_torch_err!(atg_bucketize_scalar_out(
12233 c_tensors.as_mut_ptr(),
12234 out.c_tensor,
12235 self_scalar.into().c_scalar,
12236 boundaries.c_tensor,
12237 if out_int32 { 1 } else { 0 },
12238 if right { 1 } else { 0 }
12239 ));
12240 Ok(Tensor { c_tensor: c_tensors[0] })
12241 }
12242
12243 pub fn f_bucketize_tensor_out(
12244 &self,
12245 out: &Tensor,
12246 boundaries: &Tensor,
12247 out_int32: bool,
12248 right: bool,
12249 ) -> Result<Tensor, TchError> {
12250 let mut c_tensors = [std::ptr::null_mut(); 1];
12251 unsafe_torch_err!(atg_bucketize_tensor_out(
12252 c_tensors.as_mut_ptr(),
12253 out.c_tensor,
12254 self.c_tensor,
12255 boundaries.c_tensor,
12256 if out_int32 { 1 } else { 0 },
12257 if right { 1 } else { 0 }
12258 ));
12259 Ok(Tensor { c_tensor: c_tensors[0] })
12260 }
12261
12262 pub fn f_can_cast(from_: Kind, to: Kind) -> Result<bool, TchError> {
12263 let return_;
12264 unsafe_torch_err!(return_ = atg_can_cast(from_.c_int(), to.c_int()));
12265 Ok(return_ != 0)
12266 }
12267
12268 pub fn f_cartesian_prod<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Tensor, TchError> {
12269 let mut c_tensors = [std::ptr::null_mut(); 1];
12270 unsafe_torch_err!(atg_cartesian_prod(
12271 c_tensors.as_mut_ptr(),
12272 ptr_list(tensors).as_ptr(),
12273 tensors.len() as i32
12274 ));
12275 Ok(Tensor { c_tensor: c_tensors[0] })
12276 }
12277
12278 pub fn f_cat<T: Borrow<Tensor>>(tensors: &[T], dim: i64) -> Result<Tensor, TchError> {
12279 let mut c_tensors = [std::ptr::null_mut(); 1];
12280 unsafe_torch_err!(atg_cat(
12281 c_tensors.as_mut_ptr(),
12282 ptr_list(tensors).as_ptr(),
12283 tensors.len() as i32,
12284 dim
12285 ));
12286 Ok(Tensor { c_tensor: c_tensors[0] })
12287 }
12288
12289 pub fn f_cat_out<T: Borrow<Tensor>>(
12290 out: &Tensor,
12291 tensors: &[T],
12292 dim: i64,
12293 ) -> Result<Tensor, TchError> {
12294 let mut c_tensors = [std::ptr::null_mut(); 1];
12295 unsafe_torch_err!(atg_cat_out(
12296 c_tensors.as_mut_ptr(),
12297 out.c_tensor,
12298 ptr_list(tensors).as_ptr(),
12299 tensors.len() as i32,
12300 dim
12301 ));
12302 Ok(Tensor { c_tensor: c_tensors[0] })
12303 }
12304
12305 pub fn f_cauchy(&self, median: f64, sigma: f64) -> Result<Tensor, TchError> {
12306 let mut c_tensors = [std::ptr::null_mut(); 1];
12307 unsafe_torch_err!(atg_cauchy(c_tensors.as_mut_ptr(), self.c_tensor, median, sigma));
12308 Ok(Tensor { c_tensor: c_tensors[0] })
12309 }
12310
12311 pub fn f_cauchy_(&mut self, median: f64, sigma: f64) -> Result<Tensor, TchError> {
12312 let mut c_tensors = [std::ptr::null_mut(); 1];
12313 unsafe_torch_err!(atg_cauchy_(c_tensors.as_mut_ptr(), self.c_tensor, median, sigma));
12314 Ok(Tensor { c_tensor: c_tensors[0] })
12315 }
12316
12317 pub fn f_cauchy_out(&self, out: &Tensor, median: f64, sigma: f64) -> Result<Tensor, TchError> {
12318 let mut c_tensors = [std::ptr::null_mut(); 1];
12319 unsafe_torch_err!(atg_cauchy_out(
12320 c_tensors.as_mut_ptr(),
12321 out.c_tensor,
12322 self.c_tensor,
12323 median,
12324 sigma
12325 ));
12326 Ok(Tensor { c_tensor: c_tensors[0] })
12327 }
12328
12329 pub fn f_ccol_indices(&self) -> Result<Tensor, TchError> {
12330 let mut c_tensors = [std::ptr::null_mut(); 1];
12331 unsafe_torch_err!(atg_ccol_indices(c_tensors.as_mut_ptr(), self.c_tensor));
12332 Ok(Tensor { c_tensor: c_tensors[0] })
12333 }
12334
12335 pub fn f_ccol_indices_copy(&self) -> Result<Tensor, TchError> {
12336 let mut c_tensors = [std::ptr::null_mut(); 1];
12337 unsafe_torch_err!(atg_ccol_indices_copy(c_tensors.as_mut_ptr(), self.c_tensor));
12338 Ok(Tensor { c_tensor: c_tensors[0] })
12339 }
12340
12341 pub fn f_ccol_indices_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
12342 let mut c_tensors = [std::ptr::null_mut(); 1];
12343 unsafe_torch_err!(atg_ccol_indices_copy_out(
12344 c_tensors.as_mut_ptr(),
12345 out.c_tensor,
12346 self.c_tensor
12347 ));
12348 Ok(Tensor { c_tensor: c_tensors[0] })
12349 }
12350
12351 pub fn f_cdist(
12352 x1: &Tensor,
12353 x2: &Tensor,
12354 p: f64,
12355 compute_mode: impl Into<Option<i64>>,
12356 ) -> Result<Tensor, TchError> {
12357 let compute_mode = compute_mode.into();
12358 let mut c_tensors = [std::ptr::null_mut(); 1];
12359 unsafe_torch_err!(atg_cdist(
12360 c_tensors.as_mut_ptr(),
12361 x1.c_tensor,
12362 x2.c_tensor,
12363 p,
12364 compute_mode.unwrap_or(0i64),
12365 compute_mode.is_none() as i8
12366 ));
12367 Ok(Tensor { c_tensor: c_tensors[0] })
12368 }
12369
12370 pub fn f_ceil(&self) -> Result<Tensor, TchError> {
12371 let mut c_tensors = [std::ptr::null_mut(); 1];
12372 unsafe_torch_err!(atg_ceil(c_tensors.as_mut_ptr(), self.c_tensor));
12373 Ok(Tensor { c_tensor: c_tensors[0] })
12374 }
12375
12376 pub fn f_ceil_(&mut self) -> Result<Tensor, TchError> {
12377 let mut c_tensors = [std::ptr::null_mut(); 1];
12378 unsafe_torch_err!(atg_ceil_(c_tensors.as_mut_ptr(), self.c_tensor));
12379 Ok(Tensor { c_tensor: c_tensors[0] })
12380 }
12381
12382 pub fn f_ceil_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
12383 let mut c_tensors = [std::ptr::null_mut(); 1];
12384 unsafe_torch_err!(atg_ceil_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
12385 Ok(Tensor { c_tensor: c_tensors[0] })
12386 }
12387
12388 pub fn f_celu(&self) -> Result<Tensor, TchError> {
12389 let mut c_tensors = [std::ptr::null_mut(); 1];
12390 unsafe_torch_err!(atg_celu(c_tensors.as_mut_ptr(), self.c_tensor));
12391 Ok(Tensor { c_tensor: c_tensors[0] })
12392 }
12393
12394 pub fn f_celu_(&mut self) -> Result<Tensor, TchError> {
12395 let mut c_tensors = [std::ptr::null_mut(); 1];
12396 unsafe_torch_err!(atg_celu_(c_tensors.as_mut_ptr(), self.c_tensor));
12397 Ok(Tensor { c_tensor: c_tensors[0] })
12398 }
12399
12400 pub fn f_celu_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
12401 let mut c_tensors = [std::ptr::null_mut(); 1];
12402 unsafe_torch_err!(atg_celu_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
12403 Ok(Tensor { c_tensor: c_tensors[0] })
12404 }
12405
12406 pub fn f_chain_matmul<T: Borrow<Tensor>>(matrices: &[T]) -> Result<Tensor, TchError> {
12407 let mut c_tensors = [std::ptr::null_mut(); 1];
12408 unsafe_torch_err!(atg_chain_matmul(
12409 c_tensors.as_mut_ptr(),
12410 ptr_list(matrices).as_ptr(),
12411 matrices.len() as i32
12412 ));
12413 Ok(Tensor { c_tensor: c_tensors[0] })
12414 }
12415
12416 pub fn f_chain_matmul_out<T: Borrow<Tensor>>(
12417 out: &Tensor,
12418 matrices: &[T],
12419 ) -> Result<Tensor, TchError> {
12420 let mut c_tensors = [std::ptr::null_mut(); 1];
12421 unsafe_torch_err!(atg_chain_matmul_out(
12422 c_tensors.as_mut_ptr(),
12423 out.c_tensor,
12424 ptr_list(matrices).as_ptr(),
12425 matrices.len() as i32
12426 ));
12427 Ok(Tensor { c_tensor: c_tensors[0] })
12428 }
12429
12430 pub fn f_chalf(&self) -> Result<Tensor, TchError> {
12431 let mut c_tensors = [std::ptr::null_mut(); 1];
12432 unsafe_torch_err!(atg_chalf(c_tensors.as_mut_ptr(), self.c_tensor));
12433 Ok(Tensor { c_tensor: c_tensors[0] })
12434 }
12435
12436 pub fn f_channel_shuffle(&self, groups: i64) -> Result<Tensor, TchError> {
12437 let mut c_tensors = [std::ptr::null_mut(); 1];
12438 unsafe_torch_err!(atg_channel_shuffle(c_tensors.as_mut_ptr(), self.c_tensor, groups));
12439 Ok(Tensor { c_tensor: c_tensors[0] })
12440 }
12441
12442 pub fn f_channel_shuffle_out(&self, out: &Tensor, groups: i64) -> Result<Tensor, TchError> {
12443 let mut c_tensors = [std::ptr::null_mut(); 1];
12444 unsafe_torch_err!(atg_channel_shuffle_out(
12445 c_tensors.as_mut_ptr(),
12446 out.c_tensor,
12447 self.c_tensor,
12448 groups
12449 ));
12450 Ok(Tensor { c_tensor: c_tensors[0] })
12451 }
12452
12453 pub fn f_cholesky(&self, upper: bool) -> Result<Tensor, TchError> {
12454 let mut c_tensors = [std::ptr::null_mut(); 1];
12455 unsafe_torch_err!(atg_cholesky(
12456 c_tensors.as_mut_ptr(),
12457 self.c_tensor,
12458 if upper { 1 } else { 0 }
12459 ));
12460 Ok(Tensor { c_tensor: c_tensors[0] })
12461 }
12462
12463 pub fn f_cholesky_inverse(&self, upper: bool) -> Result<Tensor, TchError> {
12464 let mut c_tensors = [std::ptr::null_mut(); 1];
12465 unsafe_torch_err!(atg_cholesky_inverse(
12466 c_tensors.as_mut_ptr(),
12467 self.c_tensor,
12468 if upper { 1 } else { 0 }
12469 ));
12470 Ok(Tensor { c_tensor: c_tensors[0] })
12471 }
12472
12473 pub fn f_cholesky_inverse_out(&self, out: &Tensor, upper: bool) -> Result<Tensor, TchError> {
12474 let mut c_tensors = [std::ptr::null_mut(); 1];
12475 unsafe_torch_err!(atg_cholesky_inverse_out(
12476 c_tensors.as_mut_ptr(),
12477 out.c_tensor,
12478 self.c_tensor,
12479 if upper { 1 } else { 0 }
12480 ));
12481 Ok(Tensor { c_tensor: c_tensors[0] })
12482 }
12483
12484 pub fn f_cholesky_out(&self, out: &Tensor, upper: bool) -> Result<Tensor, TchError> {
12485 let mut c_tensors = [std::ptr::null_mut(); 1];
12486 unsafe_torch_err!(atg_cholesky_out(
12487 c_tensors.as_mut_ptr(),
12488 out.c_tensor,
12489 self.c_tensor,
12490 if upper { 1 } else { 0 }
12491 ));
12492 Ok(Tensor { c_tensor: c_tensors[0] })
12493 }
12494
12495 pub fn f_cholesky_solve(&self, input2: &Tensor, upper: bool) -> Result<Tensor, TchError> {
12496 let mut c_tensors = [std::ptr::null_mut(); 1];
12497 unsafe_torch_err!(atg_cholesky_solve(
12498 c_tensors.as_mut_ptr(),
12499 self.c_tensor,
12500 input2.c_tensor,
12501 if upper { 1 } else { 0 }
12502 ));
12503 Ok(Tensor { c_tensor: c_tensors[0] })
12504 }
12505
12506 pub fn f_cholesky_solve_out(
12507 &self,
12508 out: &Tensor,
12509 input2: &Tensor,
12510 upper: bool,
12511 ) -> Result<Tensor, TchError> {
12512 let mut c_tensors = [std::ptr::null_mut(); 1];
12513 unsafe_torch_err!(atg_cholesky_solve_out(
12514 c_tensors.as_mut_ptr(),
12515 out.c_tensor,
12516 self.c_tensor,
12517 input2.c_tensor,
12518 if upper { 1 } else { 0 }
12519 ));
12520 Ok(Tensor { c_tensor: c_tensors[0] })
12521 }
12522
12523 pub fn f_choose_qparams_optimized(
12524 &self,
12525 numel: i64,
12526 n_bins: i64,
12527 ratio: f64,
12528 bit_width: i64,
12529 ) -> Result<(Tensor, Tensor), TchError> {
12530 let mut c_tensors = [std::ptr::null_mut(); 2];
12531 unsafe_torch_err!(atg_choose_qparams_optimized(
12532 c_tensors.as_mut_ptr(),
12533 self.c_tensor,
12534 numel,
12535 n_bins,
12536 ratio,
12537 bit_width
12538 ));
12539 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
12540 }
12541
12542 pub fn f_chunk(&self, chunks: i64, dim: i64) -> Result<Vec<Tensor>, TchError> {
12543 let c_tensors = unsafe_torch_err!(atg_chunk(self.c_tensor, chunks, dim));
12544 let mut r__ = vec![];
12545 let mut i = 0;
12546 loop {
12547 let c__ = unsafe { *c_tensors.add(i) };
12548 if c__.is_null() {
12549 break;
12550 }
12551 r__.push(Tensor { c_tensor: c__ });
12552 i += 1;
12553 }
12554 unsafe { libc::free(c_tensors as *mut libc::c_void) }
12555 Ok(r__)
12556 }
12557
12558 pub fn f_clamp<S: Into<Scalar>>(&self, min: S, max: S) -> Result<Tensor, TchError> {
12559 let mut c_tensors = [std::ptr::null_mut(); 1];
12560 unsafe_torch_err!(atg_clamp(
12561 c_tensors.as_mut_ptr(),
12562 self.c_tensor,
12563 min.into().c_scalar,
12564 max.into().c_scalar
12565 ));
12566 Ok(Tensor { c_tensor: c_tensors[0] })
12567 }
12568
12569 pub fn f_clamp_<S: Into<Scalar>>(&mut self, min: S, max: S) -> Result<Tensor, TchError> {
12570 let mut c_tensors = [std::ptr::null_mut(); 1];
12571 unsafe_torch_err!(atg_clamp_(
12572 c_tensors.as_mut_ptr(),
12573 self.c_tensor,
12574 min.into().c_scalar,
12575 max.into().c_scalar
12576 ));
12577 Ok(Tensor { c_tensor: c_tensors[0] })
12578 }
12579
12580 pub fn f_clamp_max<S: Into<Scalar>>(&self, max: S) -> Result<Tensor, TchError> {
12581 let mut c_tensors = [std::ptr::null_mut(); 1];
12582 unsafe_torch_err!(atg_clamp_max(
12583 c_tensors.as_mut_ptr(),
12584 self.c_tensor,
12585 max.into().c_scalar
12586 ));
12587 Ok(Tensor { c_tensor: c_tensors[0] })
12588 }
12589
12590 pub fn f_clamp_max_<S: Into<Scalar>>(&mut self, max: S) -> Result<Tensor, TchError> {
12591 let mut c_tensors = [std::ptr::null_mut(); 1];
12592 unsafe_torch_err!(atg_clamp_max_(
12593 c_tensors.as_mut_ptr(),
12594 self.c_tensor,
12595 max.into().c_scalar
12596 ));
12597 Ok(Tensor { c_tensor: c_tensors[0] })
12598 }
12599
12600 pub fn f_clamp_max_out<S: Into<Scalar>>(
12601 &self,
12602 out: &Tensor,
12603 max: S,
12604 ) -> Result<Tensor, TchError> {
12605 let mut c_tensors = [std::ptr::null_mut(); 1];
12606 unsafe_torch_err!(atg_clamp_max_out(
12607 c_tensors.as_mut_ptr(),
12608 out.c_tensor,
12609 self.c_tensor,
12610 max.into().c_scalar
12611 ));
12612 Ok(Tensor { c_tensor: c_tensors[0] })
12613 }
12614
12615 pub fn f_clamp_max_tensor(&self, max: &Tensor) -> Result<Tensor, TchError> {
12616 let mut c_tensors = [std::ptr::null_mut(); 1];
12617 unsafe_torch_err!(atg_clamp_max_tensor(
12618 c_tensors.as_mut_ptr(),
12619 self.c_tensor,
12620 max.c_tensor
12621 ));
12622 Ok(Tensor { c_tensor: c_tensors[0] })
12623 }
12624
12625 pub fn f_clamp_max_tensor_(&mut self, max: &Tensor) -> Result<Tensor, TchError> {
12626 let mut c_tensors = [std::ptr::null_mut(); 1];
12627 unsafe_torch_err!(atg_clamp_max_tensor_(
12628 c_tensors.as_mut_ptr(),
12629 self.c_tensor,
12630 max.c_tensor
12631 ));
12632 Ok(Tensor { c_tensor: c_tensors[0] })
12633 }
12634
12635 pub fn f_clamp_max_tensor_out(&self, out: &Tensor, max: &Tensor) -> Result<Tensor, TchError> {
12636 let mut c_tensors = [std::ptr::null_mut(); 1];
12637 unsafe_torch_err!(atg_clamp_max_tensor_out(
12638 c_tensors.as_mut_ptr(),
12639 out.c_tensor,
12640 self.c_tensor,
12641 max.c_tensor
12642 ));
12643 Ok(Tensor { c_tensor: c_tensors[0] })
12644 }
12645
12646 pub fn f_clamp_min<S: Into<Scalar>>(&self, min: S) -> Result<Tensor, TchError> {
12647 let mut c_tensors = [std::ptr::null_mut(); 1];
12648 unsafe_torch_err!(atg_clamp_min(
12649 c_tensors.as_mut_ptr(),
12650 self.c_tensor,
12651 min.into().c_scalar
12652 ));
12653 Ok(Tensor { c_tensor: c_tensors[0] })
12654 }
12655
12656 pub fn f_clamp_min_<S: Into<Scalar>>(&mut self, min: S) -> Result<Tensor, TchError> {
12657 let mut c_tensors = [std::ptr::null_mut(); 1];
12658 unsafe_torch_err!(atg_clamp_min_(
12659 c_tensors.as_mut_ptr(),
12660 self.c_tensor,
12661 min.into().c_scalar
12662 ));
12663 Ok(Tensor { c_tensor: c_tensors[0] })
12664 }
12665
12666 pub fn f_clamp_min_out<S: Into<Scalar>>(
12667 &self,
12668 out: &Tensor,
12669 min: S,
12670 ) -> Result<Tensor, TchError> {
12671 let mut c_tensors = [std::ptr::null_mut(); 1];
12672 unsafe_torch_err!(atg_clamp_min_out(
12673 c_tensors.as_mut_ptr(),
12674 out.c_tensor,
12675 self.c_tensor,
12676 min.into().c_scalar
12677 ));
12678 Ok(Tensor { c_tensor: c_tensors[0] })
12679 }
12680
12681 pub fn f_clamp_min_tensor(&self, min: &Tensor) -> Result<Tensor, TchError> {
12682 let mut c_tensors = [std::ptr::null_mut(); 1];
12683 unsafe_torch_err!(atg_clamp_min_tensor(
12684 c_tensors.as_mut_ptr(),
12685 self.c_tensor,
12686 min.c_tensor
12687 ));
12688 Ok(Tensor { c_tensor: c_tensors[0] })
12689 }
12690
12691 pub fn f_clamp_min_tensor_(&mut self, min: &Tensor) -> Result<Tensor, TchError> {
12692 let mut c_tensors = [std::ptr::null_mut(); 1];
12693 unsafe_torch_err!(atg_clamp_min_tensor_(
12694 c_tensors.as_mut_ptr(),
12695 self.c_tensor,
12696 min.c_tensor
12697 ));
12698 Ok(Tensor { c_tensor: c_tensors[0] })
12699 }
12700
12701 pub fn f_clamp_min_tensor_out(&self, out: &Tensor, min: &Tensor) -> Result<Tensor, TchError> {
12702 let mut c_tensors = [std::ptr::null_mut(); 1];
12703 unsafe_torch_err!(atg_clamp_min_tensor_out(
12704 c_tensors.as_mut_ptr(),
12705 out.c_tensor,
12706 self.c_tensor,
12707 min.c_tensor
12708 ));
12709 Ok(Tensor { c_tensor: c_tensors[0] })
12710 }
12711
12712 pub fn f_clamp_out<S: Into<Scalar>>(
12713 &self,
12714 out: &Tensor,
12715 min: S,
12716 max: S,
12717 ) -> Result<Tensor, TchError> {
12718 let mut c_tensors = [std::ptr::null_mut(); 1];
12719 unsafe_torch_err!(atg_clamp_out(
12720 c_tensors.as_mut_ptr(),
12721 out.c_tensor,
12722 self.c_tensor,
12723 min.into().c_scalar,
12724 max.into().c_scalar
12725 ));
12726 Ok(Tensor { c_tensor: c_tensors[0] })
12727 }
12728
12729 pub fn f_clamp_tensor<T: Borrow<Tensor>>(
12730 &self,
12731 min: Option<T>,
12732 max: Option<T>,
12733 ) -> Result<Tensor, TchError> {
12734 let mut c_tensors = [std::ptr::null_mut(); 1];
12735 unsafe_torch_err!(atg_clamp_tensor(
12736 c_tensors.as_mut_ptr(),
12737 self.c_tensor,
12738 min.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
12739 max.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
12740 ));
12741 Ok(Tensor { c_tensor: c_tensors[0] })
12742 }
12743
12744 pub fn f_clamp_tensor_<T: Borrow<Tensor>>(
12745 &mut self,
12746 min: Option<T>,
12747 max: Option<T>,
12748 ) -> Result<Tensor, TchError> {
12749 let mut c_tensors = [std::ptr::null_mut(); 1];
12750 unsafe_torch_err!(atg_clamp_tensor_(
12751 c_tensors.as_mut_ptr(),
12752 self.c_tensor,
12753 min.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
12754 max.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
12755 ));
12756 Ok(Tensor { c_tensor: c_tensors[0] })
12757 }
12758
12759 pub fn f_clamp_tensor_out<T: Borrow<Tensor>>(
12760 &self,
12761 out: &Tensor,
12762 min: Option<T>,
12763 max: Option<T>,
12764 ) -> Result<Tensor, TchError> {
12765 let mut c_tensors = [std::ptr::null_mut(); 1];
12766 unsafe_torch_err!(atg_clamp_tensor_out(
12767 c_tensors.as_mut_ptr(),
12768 out.c_tensor,
12769 self.c_tensor,
12770 min.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
12771 max.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
12772 ));
12773 Ok(Tensor { c_tensor: c_tensors[0] })
12774 }
12775
12776 pub fn f_clip<S: Into<Scalar>>(&self, min: S, max: S) -> Result<Tensor, TchError> {
12777 let mut c_tensors = [std::ptr::null_mut(); 1];
12778 unsafe_torch_err!(atg_clip(
12779 c_tensors.as_mut_ptr(),
12780 self.c_tensor,
12781 min.into().c_scalar,
12782 max.into().c_scalar
12783 ));
12784 Ok(Tensor { c_tensor: c_tensors[0] })
12785 }
12786
12787 pub fn f_clip_<S: Into<Scalar>>(&mut self, min: S, max: S) -> Result<Tensor, TchError> {
12788 let mut c_tensors = [std::ptr::null_mut(); 1];
12789 unsafe_torch_err!(atg_clip_(
12790 c_tensors.as_mut_ptr(),
12791 self.c_tensor,
12792 min.into().c_scalar,
12793 max.into().c_scalar
12794 ));
12795 Ok(Tensor { c_tensor: c_tensors[0] })
12796 }
12797
12798 pub fn f_clip_out<S: Into<Scalar>>(
12799 &self,
12800 out: &Tensor,
12801 min: S,
12802 max: S,
12803 ) -> Result<Tensor, TchError> {
12804 let mut c_tensors = [std::ptr::null_mut(); 1];
12805 unsafe_torch_err!(atg_clip_out(
12806 c_tensors.as_mut_ptr(),
12807 out.c_tensor,
12808 self.c_tensor,
12809 min.into().c_scalar,
12810 max.into().c_scalar
12811 ));
12812 Ok(Tensor { c_tensor: c_tensors[0] })
12813 }
12814
12815 pub fn f_clip_tensor<T: Borrow<Tensor>>(
12816 &self,
12817 min: Option<T>,
12818 max: Option<T>,
12819 ) -> Result<Tensor, TchError> {
12820 let mut c_tensors = [std::ptr::null_mut(); 1];
12821 unsafe_torch_err!(atg_clip_tensor(
12822 c_tensors.as_mut_ptr(),
12823 self.c_tensor,
12824 min.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
12825 max.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
12826 ));
12827 Ok(Tensor { c_tensor: c_tensors[0] })
12828 }
12829
12830 pub fn f_clip_tensor_<T: Borrow<Tensor>>(
12831 &mut self,
12832 min: Option<T>,
12833 max: Option<T>,
12834 ) -> Result<Tensor, TchError> {
12835 let mut c_tensors = [std::ptr::null_mut(); 1];
12836 unsafe_torch_err!(atg_clip_tensor_(
12837 c_tensors.as_mut_ptr(),
12838 self.c_tensor,
12839 min.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
12840 max.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
12841 ));
12842 Ok(Tensor { c_tensor: c_tensors[0] })
12843 }
12844
12845 pub fn f_clip_tensor_out<T: Borrow<Tensor>>(
12846 &self,
12847 out: &Tensor,
12848 min: Option<T>,
12849 max: Option<T>,
12850 ) -> Result<Tensor, TchError> {
12851 let mut c_tensors = [std::ptr::null_mut(); 1];
12852 unsafe_torch_err!(atg_clip_tensor_out(
12853 c_tensors.as_mut_ptr(),
12854 out.c_tensor,
12855 self.c_tensor,
12856 min.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
12857 max.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
12858 ));
12859 Ok(Tensor { c_tensor: c_tensors[0] })
12860 }
12861
12862 pub fn f_clone(&self, out: &Tensor) -> Result<Tensor, TchError> {
12863 let mut c_tensors = [std::ptr::null_mut(); 1];
12864 unsafe_torch_err!(atg_clone(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
12865 Ok(Tensor { c_tensor: c_tensors[0] })
12866 }
12867
12868 pub fn f_coalesce(&self) -> Result<Tensor, TchError> {
12869 let mut c_tensors = [std::ptr::null_mut(); 1];
12870 unsafe_torch_err!(atg_coalesce(c_tensors.as_mut_ptr(), self.c_tensor));
12871 Ok(Tensor { c_tensor: c_tensors[0] })
12872 }
12873
12874 pub fn f_col2im(
12875 &self,
12876 output_size: impl IntList,
12877 kernel_size: impl IntList,
12878 dilation: impl IntList,
12879 padding: impl IntList,
12880 stride: impl IntList,
12881 ) -> Result<Tensor, TchError> {
12882 let mut c_tensors = [std::ptr::null_mut(); 1];
12883 unsafe_torch_err!(atg_col2im(
12884 c_tensors.as_mut_ptr(),
12885 self.c_tensor,
12886 output_size.as_ptr(),
12887 output_size.len_i32(),
12888 kernel_size.as_ptr(),
12889 kernel_size.len_i32(),
12890 dilation.as_ptr(),
12891 dilation.len_i32(),
12892 padding.as_ptr(),
12893 padding.len_i32(),
12894 stride.as_ptr(),
12895 stride.len_i32()
12896 ));
12897 Ok(Tensor { c_tensor: c_tensors[0] })
12898 }
12899
12900 pub fn f_col2im_out(
12901 &self,
12902 out: &Tensor,
12903 output_size: impl IntList,
12904 kernel_size: impl IntList,
12905 dilation: impl IntList,
12906 padding: impl IntList,
12907 stride: impl IntList,
12908 ) -> Result<Tensor, TchError> {
12909 let mut c_tensors = [std::ptr::null_mut(); 1];
12910 unsafe_torch_err!(atg_col2im_out(
12911 c_tensors.as_mut_ptr(),
12912 out.c_tensor,
12913 self.c_tensor,
12914 output_size.as_ptr(),
12915 output_size.len_i32(),
12916 kernel_size.as_ptr(),
12917 kernel_size.len_i32(),
12918 dilation.as_ptr(),
12919 dilation.len_i32(),
12920 padding.as_ptr(),
12921 padding.len_i32(),
12922 stride.as_ptr(),
12923 stride.len_i32()
12924 ));
12925 Ok(Tensor { c_tensor: c_tensors[0] })
12926 }
12927
12928 pub fn f_col_indices(&self) -> Result<Tensor, TchError> {
12929 let mut c_tensors = [std::ptr::null_mut(); 1];
12930 unsafe_torch_err!(atg_col_indices(c_tensors.as_mut_ptr(), self.c_tensor));
12931 Ok(Tensor { c_tensor: c_tensors[0] })
12932 }
12933
12934 pub fn f_col_indices_copy(&self) -> Result<Tensor, TchError> {
12935 let mut c_tensors = [std::ptr::null_mut(); 1];
12936 unsafe_torch_err!(atg_col_indices_copy(c_tensors.as_mut_ptr(), self.c_tensor));
12937 Ok(Tensor { c_tensor: c_tensors[0] })
12938 }
12939
12940 pub fn f_col_indices_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
12941 let mut c_tensors = [std::ptr::null_mut(); 1];
12942 unsafe_torch_err!(atg_col_indices_copy_out(
12943 c_tensors.as_mut_ptr(),
12944 out.c_tensor,
12945 self.c_tensor
12946 ));
12947 Ok(Tensor { c_tensor: c_tensors[0] })
12948 }
12949
12950 pub fn f_column_stack<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Tensor, TchError> {
12951 let mut c_tensors = [std::ptr::null_mut(); 1];
12952 unsafe_torch_err!(atg_column_stack(
12953 c_tensors.as_mut_ptr(),
12954 ptr_list(tensors).as_ptr(),
12955 tensors.len() as i32
12956 ));
12957 Ok(Tensor { c_tensor: c_tensors[0] })
12958 }
12959
12960 pub fn f_column_stack_out<T: Borrow<Tensor>>(
12961 out: &Tensor,
12962 tensors: &[T],
12963 ) -> Result<Tensor, TchError> {
12964 let mut c_tensors = [std::ptr::null_mut(); 1];
12965 unsafe_torch_err!(atg_column_stack_out(
12966 c_tensors.as_mut_ptr(),
12967 out.c_tensor,
12968 ptr_list(tensors).as_ptr(),
12969 tensors.len() as i32
12970 ));
12971 Ok(Tensor { c_tensor: c_tensors[0] })
12972 }
12973
12974 pub fn f_combinations(&self, r: i64, with_replacement: bool) -> Result<Tensor, TchError> {
12975 let mut c_tensors = [std::ptr::null_mut(); 1];
12976 unsafe_torch_err!(atg_combinations(
12977 c_tensors.as_mut_ptr(),
12978 self.c_tensor,
12979 r,
12980 if with_replacement { 1 } else { 0 }
12981 ));
12982 Ok(Tensor { c_tensor: c_tensors[0] })
12983 }
12984
12985 pub fn f_complex(real: &Tensor, imag: &Tensor) -> Result<Tensor, TchError> {
12986 let mut c_tensors = [std::ptr::null_mut(); 1];
12987 unsafe_torch_err!(atg_complex(c_tensors.as_mut_ptr(), real.c_tensor, imag.c_tensor));
12988 Ok(Tensor { c_tensor: c_tensors[0] })
12989 }
12990
12991 pub fn f_complex_out(out: &Tensor, real: &Tensor, imag: &Tensor) -> Result<Tensor, TchError> {
12992 let mut c_tensors = [std::ptr::null_mut(); 1];
12993 unsafe_torch_err!(atg_complex_out(
12994 c_tensors.as_mut_ptr(),
12995 out.c_tensor,
12996 real.c_tensor,
12997 imag.c_tensor
12998 ));
12999 Ok(Tensor { c_tensor: c_tensors[0] })
13000 }
13001
13002 pub fn f_concat<T: Borrow<Tensor>>(tensors: &[T], dim: i64) -> Result<Tensor, TchError> {
13003 let mut c_tensors = [std::ptr::null_mut(); 1];
13004 unsafe_torch_err!(atg_concat(
13005 c_tensors.as_mut_ptr(),
13006 ptr_list(tensors).as_ptr(),
13007 tensors.len() as i32,
13008 dim
13009 ));
13010 Ok(Tensor { c_tensor: c_tensors[0] })
13011 }
13012
13013 pub fn f_concat_out<T: Borrow<Tensor>>(
13014 out: &Tensor,
13015 tensors: &[T],
13016 dim: i64,
13017 ) -> Result<Tensor, TchError> {
13018 let mut c_tensors = [std::ptr::null_mut(); 1];
13019 unsafe_torch_err!(atg_concat_out(
13020 c_tensors.as_mut_ptr(),
13021 out.c_tensor,
13022 ptr_list(tensors).as_ptr(),
13023 tensors.len() as i32,
13024 dim
13025 ));
13026 Ok(Tensor { c_tensor: c_tensors[0] })
13027 }
13028
13029 pub fn f_concatenate<T: Borrow<Tensor>>(tensors: &[T], dim: i64) -> Result<Tensor, TchError> {
13030 let mut c_tensors = [std::ptr::null_mut(); 1];
13031 unsafe_torch_err!(atg_concatenate(
13032 c_tensors.as_mut_ptr(),
13033 ptr_list(tensors).as_ptr(),
13034 tensors.len() as i32,
13035 dim
13036 ));
13037 Ok(Tensor { c_tensor: c_tensors[0] })
13038 }
13039
13040 pub fn f_concatenate_out<T: Borrow<Tensor>>(
13041 out: &Tensor,
13042 tensors: &[T],
13043 dim: i64,
13044 ) -> Result<Tensor, TchError> {
13045 let mut c_tensors = [std::ptr::null_mut(); 1];
13046 unsafe_torch_err!(atg_concatenate_out(
13047 c_tensors.as_mut_ptr(),
13048 out.c_tensor,
13049 ptr_list(tensors).as_ptr(),
13050 tensors.len() as i32,
13051 dim
13052 ));
13053 Ok(Tensor { c_tensor: c_tensors[0] })
13054 }
13055
13056 pub fn f_conj(&self) -> Result<Tensor, TchError> {
13057 let mut c_tensors = [std::ptr::null_mut(); 1];
13058 unsafe_torch_err!(atg_conj(c_tensors.as_mut_ptr(), self.c_tensor));
13059 Ok(Tensor { c_tensor: c_tensors[0] })
13060 }
13061
13062 pub fn f_conj_physical(&self) -> Result<Tensor, TchError> {
13063 let mut c_tensors = [std::ptr::null_mut(); 1];
13064 unsafe_torch_err!(atg_conj_physical(c_tensors.as_mut_ptr(), self.c_tensor));
13065 Ok(Tensor { c_tensor: c_tensors[0] })
13066 }
13067
13068 pub fn f_conj_physical_(&mut self) -> Result<Tensor, TchError> {
13069 let mut c_tensors = [std::ptr::null_mut(); 1];
13070 unsafe_torch_err!(atg_conj_physical_(c_tensors.as_mut_ptr(), self.c_tensor));
13071 Ok(Tensor { c_tensor: c_tensors[0] })
13072 }
13073
13074 pub fn f_conj_physical_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
13075 let mut c_tensors = [std::ptr::null_mut(); 1];
13076 unsafe_torch_err!(atg_conj_physical_out(
13077 c_tensors.as_mut_ptr(),
13078 out.c_tensor,
13079 self.c_tensor
13080 ));
13081 Ok(Tensor { c_tensor: c_tensors[0] })
13082 }
13083
13084 pub fn f_constant_pad_nd(&self, pad: impl IntList) -> Result<Tensor, TchError> {
13085 let mut c_tensors = [std::ptr::null_mut(); 1];
13086 unsafe_torch_err!(atg_constant_pad_nd(
13087 c_tensors.as_mut_ptr(),
13088 self.c_tensor,
13089 pad.as_ptr(),
13090 pad.len_i32()
13091 ));
13092 Ok(Tensor { c_tensor: c_tensors[0] })
13093 }
13094
13095 pub fn f_constant_pad_nd_out(
13096 &self,
13097 out: &Tensor,
13098 pad: impl IntList,
13099 ) -> Result<Tensor, TchError> {
13100 let mut c_tensors = [std::ptr::null_mut(); 1];
13101 unsafe_torch_err!(atg_constant_pad_nd_out(
13102 c_tensors.as_mut_ptr(),
13103 out.c_tensor,
13104 self.c_tensor,
13105 pad.as_ptr(),
13106 pad.len_i32()
13107 ));
13108 Ok(Tensor { c_tensor: c_tensors[0] })
13109 }
13110
13111 pub fn f_contiguous(&self) -> Result<Tensor, TchError> {
13112 let mut c_tensors = [std::ptr::null_mut(); 1];
13113 unsafe_torch_err!(atg_contiguous(c_tensors.as_mut_ptr(), self.c_tensor));
13114 Ok(Tensor { c_tensor: c_tensors[0] })
13115 }
13116
13117 pub fn f_conv1d<T: Borrow<Tensor>>(
13118 &self,
13119 weight: &Tensor,
13120 bias: Option<T>,
13121 stride: impl IntList,
13122 padding: impl IntList,
13123 dilation: impl IntList,
13124 groups: i64,
13125 ) -> Result<Tensor, TchError> {
13126 let mut c_tensors = [std::ptr::null_mut(); 1];
13127 unsafe_torch_err!(atg_conv1d(
13128 c_tensors.as_mut_ptr(),
13129 self.c_tensor,
13130 weight.c_tensor,
13131 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
13132 stride.as_ptr(),
13133 stride.len_i32(),
13134 padding.as_ptr(),
13135 padding.len_i32(),
13136 dilation.as_ptr(),
13137 dilation.len_i32(),
13138 groups
13139 ));
13140 Ok(Tensor { c_tensor: c_tensors[0] })
13141 }
13142
13143 pub fn f_conv1d_padding<T: Borrow<Tensor>>(
13144 &self,
13145 weight: &Tensor,
13146 bias: Option<T>,
13147 stride: impl IntList,
13148 padding: &str,
13149 dilation: impl IntList,
13150 groups: i64,
13151 ) -> Result<Tensor, TchError> {
13152 let mut c_tensors = [std::ptr::null_mut(); 1];
13153 unsafe_torch_err!(atg_conv1d_padding(
13154 c_tensors.as_mut_ptr(),
13155 self.c_tensor,
13156 weight.c_tensor,
13157 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
13158 stride.as_ptr(),
13159 stride.len_i32(),
13160 padding.as_ptr(),
13161 padding.len() as i32,
13162 dilation.as_ptr(),
13163 dilation.len_i32(),
13164 groups
13165 ));
13166 Ok(Tensor { c_tensor: c_tensors[0] })
13167 }
13168
13169 pub fn f_conv2d<T: Borrow<Tensor>>(
13170 &self,
13171 weight: &Tensor,
13172 bias: Option<T>,
13173 stride: impl IntList,
13174 padding: impl IntList,
13175 dilation: impl IntList,
13176 groups: i64,
13177 ) -> Result<Tensor, TchError> {
13178 let mut c_tensors = [std::ptr::null_mut(); 1];
13179 unsafe_torch_err!(atg_conv2d(
13180 c_tensors.as_mut_ptr(),
13181 self.c_tensor,
13182 weight.c_tensor,
13183 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
13184 stride.as_ptr(),
13185 stride.len_i32(),
13186 padding.as_ptr(),
13187 padding.len_i32(),
13188 dilation.as_ptr(),
13189 dilation.len_i32(),
13190 groups
13191 ));
13192 Ok(Tensor { c_tensor: c_tensors[0] })
13193 }
13194
13195 pub fn f_conv2d_padding<T: Borrow<Tensor>>(
13196 &self,
13197 weight: &Tensor,
13198 bias: Option<T>,
13199 stride: impl IntList,
13200 padding: &str,
13201 dilation: impl IntList,
13202 groups: i64,
13203 ) -> Result<Tensor, TchError> {
13204 let mut c_tensors = [std::ptr::null_mut(); 1];
13205 unsafe_torch_err!(atg_conv2d_padding(
13206 c_tensors.as_mut_ptr(),
13207 self.c_tensor,
13208 weight.c_tensor,
13209 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
13210 stride.as_ptr(),
13211 stride.len_i32(),
13212 padding.as_ptr(),
13213 padding.len() as i32,
13214 dilation.as_ptr(),
13215 dilation.len_i32(),
13216 groups
13217 ));
13218 Ok(Tensor { c_tensor: c_tensors[0] })
13219 }
13220
13221 pub fn f_conv3d<T: Borrow<Tensor>>(
13222 &self,
13223 weight: &Tensor,
13224 bias: Option<T>,
13225 stride: impl IntList,
13226 padding: impl IntList,
13227 dilation: impl IntList,
13228 groups: i64,
13229 ) -> Result<Tensor, TchError> {
13230 let mut c_tensors = [std::ptr::null_mut(); 1];
13231 unsafe_torch_err!(atg_conv3d(
13232 c_tensors.as_mut_ptr(),
13233 self.c_tensor,
13234 weight.c_tensor,
13235 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
13236 stride.as_ptr(),
13237 stride.len_i32(),
13238 padding.as_ptr(),
13239 padding.len_i32(),
13240 dilation.as_ptr(),
13241 dilation.len_i32(),
13242 groups
13243 ));
13244 Ok(Tensor { c_tensor: c_tensors[0] })
13245 }
13246
13247 pub fn f_conv3d_padding<T: Borrow<Tensor>>(
13248 &self,
13249 weight: &Tensor,
13250 bias: Option<T>,
13251 stride: impl IntList,
13252 padding: &str,
13253 dilation: impl IntList,
13254 groups: i64,
13255 ) -> Result<Tensor, TchError> {
13256 let mut c_tensors = [std::ptr::null_mut(); 1];
13257 unsafe_torch_err!(atg_conv3d_padding(
13258 c_tensors.as_mut_ptr(),
13259 self.c_tensor,
13260 weight.c_tensor,
13261 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
13262 stride.as_ptr(),
13263 stride.len_i32(),
13264 padding.as_ptr(),
13265 padding.len() as i32,
13266 dilation.as_ptr(),
13267 dilation.len_i32(),
13268 groups
13269 ));
13270 Ok(Tensor { c_tensor: c_tensors[0] })
13271 }
13272
13273 pub fn f_conv_depthwise3d<T: Borrow<Tensor>>(
13274 &self,
13275 weight: &Tensor,
13276 kernel_size: impl IntList,
13277 bias: Option<T>,
13278 stride: impl IntList,
13279 padding: impl IntList,
13280 dilation: impl IntList,
13281 ) -> Result<Tensor, TchError> {
13282 let mut c_tensors = [std::ptr::null_mut(); 1];
13283 unsafe_torch_err!(atg_conv_depthwise3d(
13284 c_tensors.as_mut_ptr(),
13285 self.c_tensor,
13286 weight.c_tensor,
13287 kernel_size.as_ptr(),
13288 kernel_size.len_i32(),
13289 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
13290 stride.as_ptr(),
13291 stride.len_i32(),
13292 padding.as_ptr(),
13293 padding.len_i32(),
13294 dilation.as_ptr(),
13295 dilation.len_i32()
13296 ));
13297 Ok(Tensor { c_tensor: c_tensors[0] })
13298 }
13299
13300 pub fn f_conv_depthwise3d_out<T: Borrow<Tensor>>(
13301 &self,
13302 out: &Tensor,
13303 weight: &Tensor,
13304 kernel_size: impl IntList,
13305 bias: Option<T>,
13306 stride: impl IntList,
13307 padding: impl IntList,
13308 dilation: impl IntList,
13309 ) -> Result<Tensor, TchError> {
13310 let mut c_tensors = [std::ptr::null_mut(); 1];
13311 unsafe_torch_err!(atg_conv_depthwise3d_out(
13312 c_tensors.as_mut_ptr(),
13313 out.c_tensor,
13314 self.c_tensor,
13315 weight.c_tensor,
13316 kernel_size.as_ptr(),
13317 kernel_size.len_i32(),
13318 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
13319 stride.as_ptr(),
13320 stride.len_i32(),
13321 padding.as_ptr(),
13322 padding.len_i32(),
13323 dilation.as_ptr(),
13324 dilation.len_i32()
13325 ));
13326 Ok(Tensor { c_tensor: c_tensors[0] })
13327 }
13328
13329 pub fn f_conv_tbc(&self, weight: &Tensor, bias: &Tensor, pad: i64) -> Result<Tensor, TchError> {
13330 let mut c_tensors = [std::ptr::null_mut(); 1];
13331 unsafe_torch_err!(atg_conv_tbc(
13332 c_tensors.as_mut_ptr(),
13333 self.c_tensor,
13334 weight.c_tensor,
13335 bias.c_tensor,
13336 pad
13337 ));
13338 Ok(Tensor { c_tensor: c_tensors[0] })
13339 }
13340
13341 pub fn f_conv_tbc_backward(
13342 &self,
13343 input: &Tensor,
13344 weight: &Tensor,
13345 bias: &Tensor,
13346 pad: i64,
13347 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
13348 let mut c_tensors = [std::ptr::null_mut(); 3];
13349 unsafe_torch_err!(atg_conv_tbc_backward(
13350 c_tensors.as_mut_ptr(),
13351 self.c_tensor,
13352 input.c_tensor,
13353 weight.c_tensor,
13354 bias.c_tensor,
13355 pad
13356 ));
13357 Ok((
13358 Tensor { c_tensor: c_tensors[0] },
13359 Tensor { c_tensor: c_tensors[1] },
13360 Tensor { c_tensor: c_tensors[2] },
13361 ))
13362 }
13363
13364 pub fn f_conv_tbc_out(
13365 &self,
13366 out: &Tensor,
13367 weight: &Tensor,
13368 bias: &Tensor,
13369 pad: i64,
13370 ) -> Result<Tensor, TchError> {
13371 let mut c_tensors = [std::ptr::null_mut(); 1];
13372 unsafe_torch_err!(atg_conv_tbc_out(
13373 c_tensors.as_mut_ptr(),
13374 out.c_tensor,
13375 self.c_tensor,
13376 weight.c_tensor,
13377 bias.c_tensor,
13378 pad
13379 ));
13380 Ok(Tensor { c_tensor: c_tensors[0] })
13381 }
13382
13383 pub fn f_conv_transpose1d<T: Borrow<Tensor>>(
13384 &self,
13385 weight: &Tensor,
13386 bias: Option<T>,
13387 stride: impl IntList,
13388 padding: impl IntList,
13389 output_padding: impl IntList,
13390 groups: i64,
13391 dilation: impl IntList,
13392 ) -> Result<Tensor, TchError> {
13393 let mut c_tensors = [std::ptr::null_mut(); 1];
13394 unsafe_torch_err!(atg_conv_transpose1d(
13395 c_tensors.as_mut_ptr(),
13396 self.c_tensor,
13397 weight.c_tensor,
13398 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
13399 stride.as_ptr(),
13400 stride.len_i32(),
13401 padding.as_ptr(),
13402 padding.len_i32(),
13403 output_padding.as_ptr(),
13404 output_padding.len_i32(),
13405 groups,
13406 dilation.as_ptr(),
13407 dilation.len_i32()
13408 ));
13409 Ok(Tensor { c_tensor: c_tensors[0] })
13410 }
13411
13412 pub fn f_conv_transpose2d<T: Borrow<Tensor>>(
13413 &self,
13414 weight: &Tensor,
13415 bias: Option<T>,
13416 stride: impl IntList,
13417 padding: impl IntList,
13418 output_padding: impl IntList,
13419 groups: i64,
13420 dilation: impl IntList,
13421 ) -> Result<Tensor, TchError> {
13422 let mut c_tensors = [std::ptr::null_mut(); 1];
13423 unsafe_torch_err!(atg_conv_transpose2d(
13424 c_tensors.as_mut_ptr(),
13425 self.c_tensor,
13426 weight.c_tensor,
13427 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
13428 stride.as_ptr(),
13429 stride.len_i32(),
13430 padding.as_ptr(),
13431 padding.len_i32(),
13432 output_padding.as_ptr(),
13433 output_padding.len_i32(),
13434 groups,
13435 dilation.as_ptr(),
13436 dilation.len_i32()
13437 ));
13438 Ok(Tensor { c_tensor: c_tensors[0] })
13439 }
13440
13441 pub fn f_conv_transpose3d<T: Borrow<Tensor>>(
13442 &self,
13443 weight: &Tensor,
13444 bias: Option<T>,
13445 stride: impl IntList,
13446 padding: impl IntList,
13447 output_padding: impl IntList,
13448 groups: i64,
13449 dilation: impl IntList,
13450 ) -> Result<Tensor, TchError> {
13451 let mut c_tensors = [std::ptr::null_mut(); 1];
13452 unsafe_torch_err!(atg_conv_transpose3d(
13453 c_tensors.as_mut_ptr(),
13454 self.c_tensor,
13455 weight.c_tensor,
13456 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
13457 stride.as_ptr(),
13458 stride.len_i32(),
13459 padding.as_ptr(),
13460 padding.len_i32(),
13461 output_padding.as_ptr(),
13462 output_padding.len_i32(),
13463 groups,
13464 dilation.as_ptr(),
13465 dilation.len_i32()
13466 ));
13467 Ok(Tensor { c_tensor: c_tensors[0] })
13468 }
13469
13470 pub fn f_convolution<T: Borrow<Tensor>>(
13471 &self,
13472 weight: &Tensor,
13473 bias: Option<T>,
13474 stride: impl IntList,
13475 padding: impl IntList,
13476 dilation: impl IntList,
13477 transposed: bool,
13478 output_padding: impl IntList,
13479 groups: i64,
13480 ) -> Result<Tensor, TchError> {
13481 let mut c_tensors = [std::ptr::null_mut(); 1];
13482 unsafe_torch_err!(atg_convolution(
13483 c_tensors.as_mut_ptr(),
13484 self.c_tensor,
13485 weight.c_tensor,
13486 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
13487 stride.as_ptr(),
13488 stride.len_i32(),
13489 padding.as_ptr(),
13490 padding.len_i32(),
13491 dilation.as_ptr(),
13492 dilation.len_i32(),
13493 if transposed { 1 } else { 0 },
13494 output_padding.as_ptr(),
13495 output_padding.len_i32(),
13496 groups
13497 ));
13498 Ok(Tensor { c_tensor: c_tensors[0] })
13499 }
13500
13501 pub fn f_convolution_out<T: Borrow<Tensor>>(
13502 &self,
13503 out: &Tensor,
13504 weight: &Tensor,
13505 bias: Option<T>,
13506 stride: impl IntList,
13507 padding: impl IntList,
13508 dilation: impl IntList,
13509 transposed: bool,
13510 output_padding: impl IntList,
13511 groups: i64,
13512 ) -> Result<Tensor, TchError> {
13513 let mut c_tensors = [std::ptr::null_mut(); 1];
13514 unsafe_torch_err!(atg_convolution_out(
13515 c_tensors.as_mut_ptr(),
13516 out.c_tensor,
13517 self.c_tensor,
13518 weight.c_tensor,
13519 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
13520 stride.as_ptr(),
13521 stride.len_i32(),
13522 padding.as_ptr(),
13523 padding.len_i32(),
13524 dilation.as_ptr(),
13525 dilation.len_i32(),
13526 if transposed { 1 } else { 0 },
13527 output_padding.as_ptr(),
13528 output_padding.len_i32(),
13529 groups
13530 ));
13531 Ok(Tensor { c_tensor: c_tensors[0] })
13532 }
13533
13534 pub fn f_convolution_overrideable<T: Borrow<Tensor>>(
13535 &self,
13536 weight: &Tensor,
13537 bias: Option<T>,
13538 stride: impl IntList,
13539 padding: impl IntList,
13540 dilation: impl IntList,
13541 transposed: bool,
13542 output_padding: impl IntList,
13543 groups: i64,
13544 ) -> Result<Tensor, TchError> {
13545 let mut c_tensors = [std::ptr::null_mut(); 1];
13546 unsafe_torch_err!(atg_convolution_overrideable(
13547 c_tensors.as_mut_ptr(),
13548 self.c_tensor,
13549 weight.c_tensor,
13550 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
13551 stride.as_ptr(),
13552 stride.len_i32(),
13553 padding.as_ptr(),
13554 padding.len_i32(),
13555 dilation.as_ptr(),
13556 dilation.len_i32(),
13557 if transposed { 1 } else { 0 },
13558 output_padding.as_ptr(),
13559 output_padding.len_i32(),
13560 groups
13561 ));
13562 Ok(Tensor { c_tensor: c_tensors[0] })
13563 }
13564
13565 pub fn f_convolution_overrideable_out<T: Borrow<Tensor>>(
13566 &self,
13567 out: &Tensor,
13568 weight: &Tensor,
13569 bias: Option<T>,
13570 stride: impl IntList,
13571 padding: impl IntList,
13572 dilation: impl IntList,
13573 transposed: bool,
13574 output_padding: impl IntList,
13575 groups: i64,
13576 ) -> Result<Tensor, TchError> {
13577 let mut c_tensors = [std::ptr::null_mut(); 1];
13578 unsafe_torch_err!(atg_convolution_overrideable_out(
13579 c_tensors.as_mut_ptr(),
13580 out.c_tensor,
13581 self.c_tensor,
13582 weight.c_tensor,
13583 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
13584 stride.as_ptr(),
13585 stride.len_i32(),
13586 padding.as_ptr(),
13587 padding.len_i32(),
13588 dilation.as_ptr(),
13589 dilation.len_i32(),
13590 if transposed { 1 } else { 0 },
13591 output_padding.as_ptr(),
13592 output_padding.len_i32(),
13593 groups
13594 ));
13595 Ok(Tensor { c_tensor: c_tensors[0] })
13596 }
13597
13598 pub fn f_copy_sparse_to_sparse(
13599 &self,
13600 src: &Tensor,
13601 non_blocking: bool,
13602 ) -> Result<Tensor, TchError> {
13603 let mut c_tensors = [std::ptr::null_mut(); 1];
13604 unsafe_torch_err!(atg_copy_sparse_to_sparse(
13605 c_tensors.as_mut_ptr(),
13606 self.c_tensor,
13607 src.c_tensor,
13608 if non_blocking { 1 } else { 0 }
13609 ));
13610 Ok(Tensor { c_tensor: c_tensors[0] })
13611 }
13612
13613 pub fn f_copy_sparse_to_sparse_(
13614 &mut self,
13615 src: &Tensor,
13616 non_blocking: bool,
13617 ) -> Result<Tensor, TchError> {
13618 let mut c_tensors = [std::ptr::null_mut(); 1];
13619 unsafe_torch_err!(atg_copy_sparse_to_sparse_(
13620 c_tensors.as_mut_ptr(),
13621 self.c_tensor,
13622 src.c_tensor,
13623 if non_blocking { 1 } else { 0 }
13624 ));
13625 Ok(Tensor { c_tensor: c_tensors[0] })
13626 }
13627
13628 pub fn f_copy_sparse_to_sparse_out(
13629 &self,
13630 out: &Tensor,
13631 src: &Tensor,
13632 non_blocking: bool,
13633 ) -> Result<Tensor, TchError> {
13634 let mut c_tensors = [std::ptr::null_mut(); 1];
13635 unsafe_torch_err!(atg_copy_sparse_to_sparse_out(
13636 c_tensors.as_mut_ptr(),
13637 out.c_tensor,
13638 self.c_tensor,
13639 src.c_tensor,
13640 if non_blocking { 1 } else { 0 }
13641 ));
13642 Ok(Tensor { c_tensor: c_tensors[0] })
13643 }
13644
13645 pub fn f_copysign(&self, other: &Tensor) -> Result<Tensor, TchError> {
13646 let mut c_tensors = [std::ptr::null_mut(); 1];
13647 unsafe_torch_err!(atg_copysign(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
13648 Ok(Tensor { c_tensor: c_tensors[0] })
13649 }
13650
13651 pub fn f_copysign_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
13652 let mut c_tensors = [std::ptr::null_mut(); 1];
13653 unsafe_torch_err!(atg_copysign_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
13654 Ok(Tensor { c_tensor: c_tensors[0] })
13655 }
13656
13657 pub fn f_copysign_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
13658 let mut c_tensors = [std::ptr::null_mut(); 1];
13659 unsafe_torch_err!(atg_copysign_out(
13660 c_tensors.as_mut_ptr(),
13661 out.c_tensor,
13662 self.c_tensor,
13663 other.c_tensor
13664 ));
13665 Ok(Tensor { c_tensor: c_tensors[0] })
13666 }
13667
13668 pub fn f_copysign_scalar<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
13669 let mut c_tensors = [std::ptr::null_mut(); 1];
13670 unsafe_torch_err!(atg_copysign_scalar(
13671 c_tensors.as_mut_ptr(),
13672 self.c_tensor,
13673 other.into().c_scalar
13674 ));
13675 Ok(Tensor { c_tensor: c_tensors[0] })
13676 }
13677
13678 pub fn f_copysign_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
13679 let mut c_tensors = [std::ptr::null_mut(); 1];
13680 unsafe_torch_err!(atg_copysign_scalar_(
13681 c_tensors.as_mut_ptr(),
13682 self.c_tensor,
13683 other.into().c_scalar
13684 ));
13685 Ok(Tensor { c_tensor: c_tensors[0] })
13686 }
13687
13688 pub fn f_copysign_scalar_out<S: Into<Scalar>>(
13689 &self,
13690 out: &Tensor,
13691 other: S,
13692 ) -> Result<Tensor, TchError> {
13693 let mut c_tensors = [std::ptr::null_mut(); 1];
13694 unsafe_torch_err!(atg_copysign_scalar_out(
13695 c_tensors.as_mut_ptr(),
13696 out.c_tensor,
13697 self.c_tensor,
13698 other.into().c_scalar
13699 ));
13700 Ok(Tensor { c_tensor: c_tensors[0] })
13701 }
13702
13703 pub fn f_corrcoef(&self) -> Result<Tensor, TchError> {
13704 let mut c_tensors = [std::ptr::null_mut(); 1];
13705 unsafe_torch_err!(atg_corrcoef(c_tensors.as_mut_ptr(), self.c_tensor));
13706 Ok(Tensor { c_tensor: c_tensors[0] })
13707 }
13708
13709 pub fn f_cos(&self) -> Result<Tensor, TchError> {
13710 let mut c_tensors = [std::ptr::null_mut(); 1];
13711 unsafe_torch_err!(atg_cos(c_tensors.as_mut_ptr(), self.c_tensor));
13712 Ok(Tensor { c_tensor: c_tensors[0] })
13713 }
13714
13715 pub fn f_cos_(&mut self) -> Result<Tensor, TchError> {
13716 let mut c_tensors = [std::ptr::null_mut(); 1];
13717 unsafe_torch_err!(atg_cos_(c_tensors.as_mut_ptr(), self.c_tensor));
13718 Ok(Tensor { c_tensor: c_tensors[0] })
13719 }
13720
13721 pub fn f_cos_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
13722 let mut c_tensors = [std::ptr::null_mut(); 1];
13723 unsafe_torch_err!(atg_cos_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
13724 Ok(Tensor { c_tensor: c_tensors[0] })
13725 }
13726
13727 pub fn f_cosh(&self) -> Result<Tensor, TchError> {
13728 let mut c_tensors = [std::ptr::null_mut(); 1];
13729 unsafe_torch_err!(atg_cosh(c_tensors.as_mut_ptr(), self.c_tensor));
13730 Ok(Tensor { c_tensor: c_tensors[0] })
13731 }
13732
13733 pub fn f_cosh_(&mut self) -> Result<Tensor, TchError> {
13734 let mut c_tensors = [std::ptr::null_mut(); 1];
13735 unsafe_torch_err!(atg_cosh_(c_tensors.as_mut_ptr(), self.c_tensor));
13736 Ok(Tensor { c_tensor: c_tensors[0] })
13737 }
13738
13739 pub fn f_cosh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
13740 let mut c_tensors = [std::ptr::null_mut(); 1];
13741 unsafe_torch_err!(atg_cosh_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
13742 Ok(Tensor { c_tensor: c_tensors[0] })
13743 }
13744
13745 pub fn f_cosine_embedding_loss(
13746 input1: &Tensor,
13747 input2: &Tensor,
13748 target: &Tensor,
13749 margin: f64,
13750 reduction: crate::Reduction,
13751 ) -> Result<Tensor, TchError> {
13752 let mut c_tensors = [std::ptr::null_mut(); 1];
13753 unsafe_torch_err!(atg_cosine_embedding_loss(
13754 c_tensors.as_mut_ptr(),
13755 input1.c_tensor,
13756 input2.c_tensor,
13757 target.c_tensor,
13758 margin,
13759 reduction.to_int()
13760 ));
13761 Ok(Tensor { c_tensor: c_tensors[0] })
13762 }
13763
13764 pub fn f_cosine_similarity(
13765 x1: &Tensor,
13766 x2: &Tensor,
13767 dim: i64,
13768 eps: f64,
13769 ) -> Result<Tensor, TchError> {
13770 let mut c_tensors = [std::ptr::null_mut(); 1];
13771 unsafe_torch_err!(atg_cosine_similarity(
13772 c_tensors.as_mut_ptr(),
13773 x1.c_tensor,
13774 x2.c_tensor,
13775 dim,
13776 eps
13777 ));
13778 Ok(Tensor { c_tensor: c_tensors[0] })
13779 }
13780
13781 pub fn f_count_nonzero(&self, dim: impl Into<Option<i64>>) -> Result<Tensor, TchError> {
13782 let dim = dim.into();
13783 let mut c_tensors = [std::ptr::null_mut(); 1];
13784 unsafe_torch_err!(atg_count_nonzero(
13785 c_tensors.as_mut_ptr(),
13786 self.c_tensor,
13787 dim.unwrap_or(0i64),
13788 dim.is_none() as i8
13789 ));
13790 Ok(Tensor { c_tensor: c_tensors[0] })
13791 }
13792
13793 pub fn f_count_nonzero_dim_intlist(&self, dim: impl IntList) -> Result<Tensor, TchError> {
13794 let mut c_tensors = [std::ptr::null_mut(); 1];
13795 unsafe_torch_err!(atg_count_nonzero_dim_intlist(
13796 c_tensors.as_mut_ptr(),
13797 self.c_tensor,
13798 dim.as_ptr(),
13799 dim.len_i32()
13800 ));
13801 Ok(Tensor { c_tensor: c_tensors[0] })
13802 }
13803
13804 pub fn f_count_nonzero_dim_intlist_out(
13805 &self,
13806 out: &Tensor,
13807 dim: impl IntList,
13808 ) -> Result<Tensor, TchError> {
13809 let mut c_tensors = [std::ptr::null_mut(); 1];
13810 unsafe_torch_err!(atg_count_nonzero_dim_intlist_out(
13811 c_tensors.as_mut_ptr(),
13812 out.c_tensor,
13813 self.c_tensor,
13814 dim.as_ptr(),
13815 dim.len_i32()
13816 ));
13817 Ok(Tensor { c_tensor: c_tensors[0] })
13818 }
13819
13820 pub fn f_count_nonzero_out(
13821 &self,
13822 out: &Tensor,
13823 dim: impl Into<Option<i64>>,
13824 ) -> Result<Tensor, TchError> {
13825 let dim = dim.into();
13826 let mut c_tensors = [std::ptr::null_mut(); 1];
13827 unsafe_torch_err!(atg_count_nonzero_out(
13828 c_tensors.as_mut_ptr(),
13829 out.c_tensor,
13830 self.c_tensor,
13831 dim.unwrap_or(0i64),
13832 dim.is_none() as i8
13833 ));
13834 Ok(Tensor { c_tensor: c_tensors[0] })
13835 }
13836
13837 pub fn f_cov<T: Borrow<Tensor>>(
13838 &self,
13839 correction: i64,
13840 fweights: Option<T>,
13841 aweights: Option<T>,
13842 ) -> Result<Tensor, TchError> {
13843 let mut c_tensors = [std::ptr::null_mut(); 1];
13844 unsafe_torch_err!(atg_cov(
13845 c_tensors.as_mut_ptr(),
13846 self.c_tensor,
13847 correction,
13848 fweights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
13849 aweights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
13850 ));
13851 Ok(Tensor { c_tensor: c_tensors[0] })
13852 }
13853
13854 pub fn f_cross(&self, other: &Tensor, dim: impl Into<Option<i64>>) -> Result<Tensor, TchError> {
13855 let dim = dim.into();
13856 let mut c_tensors = [std::ptr::null_mut(); 1];
13857 unsafe_torch_err!(atg_cross(
13858 c_tensors.as_mut_ptr(),
13859 self.c_tensor,
13860 other.c_tensor,
13861 dim.unwrap_or(0i64),
13862 dim.is_none() as i8
13863 ));
13864 Ok(Tensor { c_tensor: c_tensors[0] })
13865 }
13866
13867 pub fn f_cross_entropy_loss<T: Borrow<Tensor>>(
13868 &self,
13869 target: &Tensor,
13870 weight: Option<T>,
13871 reduction: crate::Reduction,
13872 ignore_index: i64,
13873 label_smoothing: f64,
13874 ) -> Result<Tensor, TchError> {
13875 let mut c_tensors = [std::ptr::null_mut(); 1];
13876 unsafe_torch_err!(atg_cross_entropy_loss(
13877 c_tensors.as_mut_ptr(),
13878 self.c_tensor,
13879 target.c_tensor,
13880 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
13881 reduction.to_int(),
13882 ignore_index,
13883 label_smoothing
13884 ));
13885 Ok(Tensor { c_tensor: c_tensors[0] })
13886 }
13887
13888 pub fn f_cross_out(
13889 &self,
13890 out: &Tensor,
13891 other: &Tensor,
13892 dim: impl Into<Option<i64>>,
13893 ) -> Result<Tensor, TchError> {
13894 let dim = dim.into();
13895 let mut c_tensors = [std::ptr::null_mut(); 1];
13896 unsafe_torch_err!(atg_cross_out(
13897 c_tensors.as_mut_ptr(),
13898 out.c_tensor,
13899 self.c_tensor,
13900 other.c_tensor,
13901 dim.unwrap_or(0i64),
13902 dim.is_none() as i8
13903 ));
13904 Ok(Tensor { c_tensor: c_tensors[0] })
13905 }
13906
13907 pub fn f_crow_indices(&self) -> Result<Tensor, TchError> {
13908 let mut c_tensors = [std::ptr::null_mut(); 1];
13909 unsafe_torch_err!(atg_crow_indices(c_tensors.as_mut_ptr(), self.c_tensor));
13910 Ok(Tensor { c_tensor: c_tensors[0] })
13911 }
13912
13913 pub fn f_crow_indices_copy(&self) -> Result<Tensor, TchError> {
13914 let mut c_tensors = [std::ptr::null_mut(); 1];
13915 unsafe_torch_err!(atg_crow_indices_copy(c_tensors.as_mut_ptr(), self.c_tensor));
13916 Ok(Tensor { c_tensor: c_tensors[0] })
13917 }
13918
13919 pub fn f_crow_indices_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
13920 let mut c_tensors = [std::ptr::null_mut(); 1];
13921 unsafe_torch_err!(atg_crow_indices_copy_out(
13922 c_tensors.as_mut_ptr(),
13923 out.c_tensor,
13924 self.c_tensor
13925 ));
13926 Ok(Tensor { c_tensor: c_tensors[0] })
13927 }
13928
13929 pub fn f_ctc_loss(
13930 log_probs: &Tensor,
13931 targets: &Tensor,
13932 input_lengths: impl IntList,
13933 target_lengths: impl IntList,
13934 blank: i64,
13935 reduction: crate::Reduction,
13936 zero_infinity: bool,
13937 ) -> Result<Tensor, TchError> {
13938 let mut c_tensors = [std::ptr::null_mut(); 1];
13939 unsafe_torch_err!(atg_ctc_loss(
13940 c_tensors.as_mut_ptr(),
13941 log_probs.c_tensor,
13942 targets.c_tensor,
13943 input_lengths.as_ptr(),
13944 input_lengths.len_i32(),
13945 target_lengths.as_ptr(),
13946 target_lengths.len_i32(),
13947 blank,
13948 reduction.to_int(),
13949 if zero_infinity { 1 } else { 0 }
13950 ));
13951 Ok(Tensor { c_tensor: c_tensors[0] })
13952 }
13953
13954 pub fn f_ctc_loss_tensor(
13955 log_probs: &Tensor,
13956 targets: &Tensor,
13957 input_lengths: &Tensor,
13958 target_lengths: &Tensor,
13959 blank: i64,
13960 reduction: crate::Reduction,
13961 zero_infinity: bool,
13962 ) -> Result<Tensor, TchError> {
13963 let mut c_tensors = [std::ptr::null_mut(); 1];
13964 unsafe_torch_err!(atg_ctc_loss_tensor(
13965 c_tensors.as_mut_ptr(),
13966 log_probs.c_tensor,
13967 targets.c_tensor,
13968 input_lengths.c_tensor,
13969 target_lengths.c_tensor,
13970 blank,
13971 reduction.to_int(),
13972 if zero_infinity { 1 } else { 0 }
13973 ));
13974 Ok(Tensor { c_tensor: c_tensors[0] })
13975 }
13976
13977 pub fn f_cudnn_affine_grid_generator(
13978 theta: &Tensor,
13979 n: i64,
13980 c: i64,
13981 h: i64,
13982 w: i64,
13983 ) -> Result<Tensor, TchError> {
13984 let mut c_tensors = [std::ptr::null_mut(); 1];
13985 unsafe_torch_err!(atg_cudnn_affine_grid_generator(
13986 c_tensors.as_mut_ptr(),
13987 theta.c_tensor,
13988 n,
13989 c,
13990 h,
13991 w
13992 ));
13993 Ok(Tensor { c_tensor: c_tensors[0] })
13994 }
13995
13996 pub fn f_cudnn_affine_grid_generator_backward(
13997 grad: &Tensor,
13998 n: i64,
13999 c: i64,
14000 h: i64,
14001 w: i64,
14002 ) -> Result<Tensor, TchError> {
14003 let mut c_tensors = [std::ptr::null_mut(); 1];
14004 unsafe_torch_err!(atg_cudnn_affine_grid_generator_backward(
14005 c_tensors.as_mut_ptr(),
14006 grad.c_tensor,
14007 n,
14008 c,
14009 h,
14010 w
14011 ));
14012 Ok(Tensor { c_tensor: c_tensors[0] })
14013 }
14014
14015 pub fn f_cudnn_affine_grid_generator_backward_out(
14016 out: &Tensor,
14017 grad: &Tensor,
14018 n: i64,
14019 c: i64,
14020 h: i64,
14021 w: i64,
14022 ) -> Result<Tensor, TchError> {
14023 let mut c_tensors = [std::ptr::null_mut(); 1];
14024 unsafe_torch_err!(atg_cudnn_affine_grid_generator_backward_out(
14025 c_tensors.as_mut_ptr(),
14026 out.c_tensor,
14027 grad.c_tensor,
14028 n,
14029 c,
14030 h,
14031 w
14032 ));
14033 Ok(Tensor { c_tensor: c_tensors[0] })
14034 }
14035
14036 pub fn f_cudnn_affine_grid_generator_out(
14037 out: &Tensor,
14038 theta: &Tensor,
14039 n: i64,
14040 c: i64,
14041 h: i64,
14042 w: i64,
14043 ) -> Result<Tensor, TchError> {
14044 let mut c_tensors = [std::ptr::null_mut(); 1];
14045 unsafe_torch_err!(atg_cudnn_affine_grid_generator_out(
14046 c_tensors.as_mut_ptr(),
14047 out.c_tensor,
14048 theta.c_tensor,
14049 n,
14050 c,
14051 h,
14052 w
14053 ));
14054 Ok(Tensor { c_tensor: c_tensors[0] })
14055 }
14056
14057 pub fn f_cudnn_batch_norm<T: Borrow<Tensor>>(
14058 &self,
14059 weight: &Tensor,
14060 bias: Option<T>,
14061 running_mean: Option<T>,
14062 running_var: Option<T>,
14063 training: bool,
14064 exponential_average_factor: f64,
14065 epsilon: f64,
14066 ) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
14067 let mut c_tensors = [std::ptr::null_mut(); 4];
14068 unsafe_torch_err!(atg_cudnn_batch_norm(
14069 c_tensors.as_mut_ptr(),
14070 self.c_tensor,
14071 weight.c_tensor,
14072 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
14073 running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
14074 running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
14075 if training { 1 } else { 0 },
14076 exponential_average_factor,
14077 epsilon
14078 ));
14079 Ok((
14080 Tensor { c_tensor: c_tensors[0] },
14081 Tensor { c_tensor: c_tensors[1] },
14082 Tensor { c_tensor: c_tensors[2] },
14083 Tensor { c_tensor: c_tensors[3] },
14084 ))
14085 }
14086
14087 pub fn f_cudnn_batch_norm_backward<T: Borrow<Tensor>>(
14088 &self,
14089 grad_output: &Tensor,
14090 weight: &Tensor,
14091 running_mean: Option<T>,
14092 running_var: Option<T>,
14093 save_mean: Option<T>,
14094 save_var: Option<T>,
14095 epsilon: f64,
14096 reservespace: &Tensor,
14097 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
14098 let mut c_tensors = [std::ptr::null_mut(); 3];
14099 unsafe_torch_err!(atg_cudnn_batch_norm_backward(
14100 c_tensors.as_mut_ptr(),
14101 self.c_tensor,
14102 grad_output.c_tensor,
14103 weight.c_tensor,
14104 running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
14105 running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
14106 save_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
14107 save_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
14108 epsilon,
14109 reservespace.c_tensor
14110 ));
14111 Ok((
14112 Tensor { c_tensor: c_tensors[0] },
14113 Tensor { c_tensor: c_tensors[1] },
14114 Tensor { c_tensor: c_tensors[2] },
14115 ))
14116 }
14117
14118 pub fn f_cudnn_batch_norm_backward_out<T: Borrow<Tensor>>(
14119 &self,
14120 out0: &Tensor,
14121 out1: &Tensor,
14122 out2: &Tensor,
14123 grad_output: &Tensor,
14124 weight: &Tensor,
14125 running_mean: Option<T>,
14126 running_var: Option<T>,
14127 save_mean: Option<T>,
14128 save_var: Option<T>,
14129 epsilon: f64,
14130 reservespace: &Tensor,
14131 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
14132 let mut c_tensors = [std::ptr::null_mut(); 3];
14133 unsafe_torch_err!(atg_cudnn_batch_norm_backward_out(
14134 c_tensors.as_mut_ptr(),
14135 out0.c_tensor,
14136 out1.c_tensor,
14137 out2.c_tensor,
14138 self.c_tensor,
14139 grad_output.c_tensor,
14140 weight.c_tensor,
14141 running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
14142 running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
14143 save_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
14144 save_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
14145 epsilon,
14146 reservespace.c_tensor
14147 ));
14148 Ok((
14149 Tensor { c_tensor: c_tensors[0] },
14150 Tensor { c_tensor: c_tensors[1] },
14151 Tensor { c_tensor: c_tensors[2] },
14152 ))
14153 }
14154
14155 pub fn f_cudnn_batch_norm_out<T: Borrow<Tensor>>(
14156 &self,
14157 out0: &Tensor,
14158 out1: &Tensor,
14159 out2: &Tensor,
14160 out3: &Tensor,
14161 weight: &Tensor,
14162 bias: Option<T>,
14163 running_mean: Option<T>,
14164 running_var: Option<T>,
14165 training: bool,
14166 exponential_average_factor: f64,
14167 epsilon: f64,
14168 ) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
14169 let mut c_tensors = [std::ptr::null_mut(); 4];
14170 unsafe_torch_err!(atg_cudnn_batch_norm_out(
14171 c_tensors.as_mut_ptr(),
14172 out0.c_tensor,
14173 out1.c_tensor,
14174 out2.c_tensor,
14175 out3.c_tensor,
14176 self.c_tensor,
14177 weight.c_tensor,
14178 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
14179 running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
14180 running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
14181 if training { 1 } else { 0 },
14182 exponential_average_factor,
14183 epsilon
14184 ));
14185 Ok((
14186 Tensor { c_tensor: c_tensors[0] },
14187 Tensor { c_tensor: c_tensors[1] },
14188 Tensor { c_tensor: c_tensors[2] },
14189 Tensor { c_tensor: c_tensors[3] },
14190 ))
14191 }
14192
14193 pub fn f_cudnn_convolution(
14194 &self,
14195 weight: &Tensor,
14196 padding: impl IntList,
14197 stride: impl IntList,
14198 dilation: impl IntList,
14199 groups: i64,
14200 benchmark: bool,
14201 deterministic: bool,
14202 allow_tf32: bool,
14203 ) -> Result<Tensor, TchError> {
14204 let mut c_tensors = [std::ptr::null_mut(); 1];
14205 unsafe_torch_err!(atg_cudnn_convolution(
14206 c_tensors.as_mut_ptr(),
14207 self.c_tensor,
14208 weight.c_tensor,
14209 padding.as_ptr(),
14210 padding.len_i32(),
14211 stride.as_ptr(),
14212 stride.len_i32(),
14213 dilation.as_ptr(),
14214 dilation.len_i32(),
14215 groups,
14216 if benchmark { 1 } else { 0 },
14217 if deterministic { 1 } else { 0 },
14218 if allow_tf32 { 1 } else { 0 }
14219 ));
14220 Ok(Tensor { c_tensor: c_tensors[0] })
14221 }
14222
14223 pub fn f_cudnn_convolution_add_relu<T: Borrow<Tensor>, S: Into<Scalar>>(
14224 &self,
14225 weight: &Tensor,
14226 z: &Tensor,
14227 alpha: S,
14228 bias: Option<T>,
14229 stride: impl IntList,
14230 padding: impl IntList,
14231 dilation: impl IntList,
14232 groups: i64,
14233 ) -> Result<Tensor, TchError> {
14234 let mut c_tensors = [std::ptr::null_mut(); 1];
14235 unsafe_torch_err!(atg_cudnn_convolution_add_relu(
14236 c_tensors.as_mut_ptr(),
14237 self.c_tensor,
14238 weight.c_tensor,
14239 z.c_tensor,
14240 alpha.into().c_scalar,
14241 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
14242 stride.as_ptr(),
14243 stride.len_i32(),
14244 padding.as_ptr(),
14245 padding.len_i32(),
14246 dilation.as_ptr(),
14247 dilation.len_i32(),
14248 groups
14249 ));
14250 Ok(Tensor { c_tensor: c_tensors[0] })
14251 }
14252
14253 pub fn f_cudnn_convolution_add_relu_out<T: Borrow<Tensor>, S: Into<Scalar>>(
14254 &self,
14255 out: &Tensor,
14256 weight: &Tensor,
14257 z: &Tensor,
14258 alpha: S,
14259 bias: Option<T>,
14260 stride: impl IntList,
14261 padding: impl IntList,
14262 dilation: impl IntList,
14263 groups: i64,
14264 ) -> Result<Tensor, TchError> {
14265 let mut c_tensors = [std::ptr::null_mut(); 1];
14266 unsafe_torch_err!(atg_cudnn_convolution_add_relu_out(
14267 c_tensors.as_mut_ptr(),
14268 out.c_tensor,
14269 self.c_tensor,
14270 weight.c_tensor,
14271 z.c_tensor,
14272 alpha.into().c_scalar,
14273 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
14274 stride.as_ptr(),
14275 stride.len_i32(),
14276 padding.as_ptr(),
14277 padding.len_i32(),
14278 dilation.as_ptr(),
14279 dilation.len_i32(),
14280 groups
14281 ));
14282 Ok(Tensor { c_tensor: c_tensors[0] })
14283 }
14284
14285 pub fn f_cudnn_convolution_out(
14286 &self,
14287 out: &Tensor,
14288 weight: &Tensor,
14289 padding: impl IntList,
14290 stride: impl IntList,
14291 dilation: impl IntList,
14292 groups: i64,
14293 benchmark: bool,
14294 deterministic: bool,
14295 allow_tf32: bool,
14296 ) -> Result<Tensor, TchError> {
14297 let mut c_tensors = [std::ptr::null_mut(); 1];
14298 unsafe_torch_err!(atg_cudnn_convolution_out(
14299 c_tensors.as_mut_ptr(),
14300 out.c_tensor,
14301 self.c_tensor,
14302 weight.c_tensor,
14303 padding.as_ptr(),
14304 padding.len_i32(),
14305 stride.as_ptr(),
14306 stride.len_i32(),
14307 dilation.as_ptr(),
14308 dilation.len_i32(),
14309 groups,
14310 if benchmark { 1 } else { 0 },
14311 if deterministic { 1 } else { 0 },
14312 if allow_tf32 { 1 } else { 0 }
14313 ));
14314 Ok(Tensor { c_tensor: c_tensors[0] })
14315 }
14316
14317 pub fn f_cudnn_convolution_relu<T: Borrow<Tensor>>(
14318 &self,
14319 weight: &Tensor,
14320 bias: Option<T>,
14321 stride: impl IntList,
14322 padding: impl IntList,
14323 dilation: impl IntList,
14324 groups: i64,
14325 ) -> Result<Tensor, TchError> {
14326 let mut c_tensors = [std::ptr::null_mut(); 1];
14327 unsafe_torch_err!(atg_cudnn_convolution_relu(
14328 c_tensors.as_mut_ptr(),
14329 self.c_tensor,
14330 weight.c_tensor,
14331 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
14332 stride.as_ptr(),
14333 stride.len_i32(),
14334 padding.as_ptr(),
14335 padding.len_i32(),
14336 dilation.as_ptr(),
14337 dilation.len_i32(),
14338 groups
14339 ));
14340 Ok(Tensor { c_tensor: c_tensors[0] })
14341 }
14342
14343 pub fn f_cudnn_convolution_relu_out<T: Borrow<Tensor>>(
14344 &self,
14345 out: &Tensor,
14346 weight: &Tensor,
14347 bias: Option<T>,
14348 stride: impl IntList,
14349 padding: impl IntList,
14350 dilation: impl IntList,
14351 groups: i64,
14352 ) -> Result<Tensor, TchError> {
14353 let mut c_tensors = [std::ptr::null_mut(); 1];
14354 unsafe_torch_err!(atg_cudnn_convolution_relu_out(
14355 c_tensors.as_mut_ptr(),
14356 out.c_tensor,
14357 self.c_tensor,
14358 weight.c_tensor,
14359 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
14360 stride.as_ptr(),
14361 stride.len_i32(),
14362 padding.as_ptr(),
14363 padding.len_i32(),
14364 dilation.as_ptr(),
14365 dilation.len_i32(),
14366 groups
14367 ));
14368 Ok(Tensor { c_tensor: c_tensors[0] })
14369 }
14370
14371 pub fn f_cudnn_convolution_transpose(
14372 &self,
14373 weight: &Tensor,
14374 padding: impl IntList,
14375 output_padding: impl IntList,
14376 stride: impl IntList,
14377 dilation: impl IntList,
14378 groups: i64,
14379 benchmark: bool,
14380 deterministic: bool,
14381 allow_tf32: bool,
14382 ) -> Result<Tensor, TchError> {
14383 let mut c_tensors = [std::ptr::null_mut(); 1];
14384 unsafe_torch_err!(atg_cudnn_convolution_transpose(
14385 c_tensors.as_mut_ptr(),
14386 self.c_tensor,
14387 weight.c_tensor,
14388 padding.as_ptr(),
14389 padding.len_i32(),
14390 output_padding.as_ptr(),
14391 output_padding.len_i32(),
14392 stride.as_ptr(),
14393 stride.len_i32(),
14394 dilation.as_ptr(),
14395 dilation.len_i32(),
14396 groups,
14397 if benchmark { 1 } else { 0 },
14398 if deterministic { 1 } else { 0 },
14399 if allow_tf32 { 1 } else { 0 }
14400 ));
14401 Ok(Tensor { c_tensor: c_tensors[0] })
14402 }
14403
14404 pub fn f_cudnn_convolution_transpose_out(
14405 &self,
14406 out: &Tensor,
14407 weight: &Tensor,
14408 padding: impl IntList,
14409 output_padding: impl IntList,
14410 stride: impl IntList,
14411 dilation: impl IntList,
14412 groups: i64,
14413 benchmark: bool,
14414 deterministic: bool,
14415 allow_tf32: bool,
14416 ) -> Result<Tensor, TchError> {
14417 let mut c_tensors = [std::ptr::null_mut(); 1];
14418 unsafe_torch_err!(atg_cudnn_convolution_transpose_out(
14419 c_tensors.as_mut_ptr(),
14420 out.c_tensor,
14421 self.c_tensor,
14422 weight.c_tensor,
14423 padding.as_ptr(),
14424 padding.len_i32(),
14425 output_padding.as_ptr(),
14426 output_padding.len_i32(),
14427 stride.as_ptr(),
14428 stride.len_i32(),
14429 dilation.as_ptr(),
14430 dilation.len_i32(),
14431 groups,
14432 if benchmark { 1 } else { 0 },
14433 if deterministic { 1 } else { 0 },
14434 if allow_tf32 { 1 } else { 0 }
14435 ));
14436 Ok(Tensor { c_tensor: c_tensors[0] })
14437 }
14438
14439 pub fn f_cudnn_grid_sampler(&self, grid: &Tensor) -> Result<Tensor, TchError> {
14440 let mut c_tensors = [std::ptr::null_mut(); 1];
14441 unsafe_torch_err!(atg_cudnn_grid_sampler(
14442 c_tensors.as_mut_ptr(),
14443 self.c_tensor,
14444 grid.c_tensor
14445 ));
14446 Ok(Tensor { c_tensor: c_tensors[0] })
14447 }
14448
14449 pub fn f_cudnn_grid_sampler_backward(
14450 &self,
14451 grid: &Tensor,
14452 grad_output: &Tensor,
14453 ) -> Result<(Tensor, Tensor), TchError> {
14454 let mut c_tensors = [std::ptr::null_mut(); 2];
14455 unsafe_torch_err!(atg_cudnn_grid_sampler_backward(
14456 c_tensors.as_mut_ptr(),
14457 self.c_tensor,
14458 grid.c_tensor,
14459 grad_output.c_tensor
14460 ));
14461 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
14462 }
14463
14464 pub fn f_cudnn_grid_sampler_backward_out(
14465 &self,
14466 out0: &Tensor,
14467 out1: &Tensor,
14468 grid: &Tensor,
14469 grad_output: &Tensor,
14470 ) -> Result<(Tensor, Tensor), TchError> {
14471 let mut c_tensors = [std::ptr::null_mut(); 2];
14472 unsafe_torch_err!(atg_cudnn_grid_sampler_backward_out(
14473 c_tensors.as_mut_ptr(),
14474 out0.c_tensor,
14475 out1.c_tensor,
14476 self.c_tensor,
14477 grid.c_tensor,
14478 grad_output.c_tensor
14479 ));
14480 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
14481 }
14482
14483 pub fn f_cudnn_grid_sampler_out(
14484 &self,
14485 out: &Tensor,
14486 grid: &Tensor,
14487 ) -> Result<Tensor, TchError> {
14488 let mut c_tensors = [std::ptr::null_mut(); 1];
14489 unsafe_torch_err!(atg_cudnn_grid_sampler_out(
14490 c_tensors.as_mut_ptr(),
14491 out.c_tensor,
14492 self.c_tensor,
14493 grid.c_tensor
14494 ));
14495 Ok(Tensor { c_tensor: c_tensors[0] })
14496 }
14497
14498 pub fn f_cudnn_is_acceptable(&self) -> Result<bool, TchError> {
14499 let return_;
14500 unsafe_torch_err!(return_ = atg_cudnn_is_acceptable(self.c_tensor));
14501 Ok(return_ != 0)
14502 }
14503
14504 pub fn f_cummax(&self, dim: i64) -> Result<(Tensor, Tensor), TchError> {
14505 let mut c_tensors = [std::ptr::null_mut(); 2];
14506 unsafe_torch_err!(atg_cummax(c_tensors.as_mut_ptr(), self.c_tensor, dim));
14507 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
14508 }
14509
14510 pub fn f_cummax_out(
14511 &self,
14512 values: &Tensor,
14513 indices: &Tensor,
14514 dim: i64,
14515 ) -> Result<(Tensor, Tensor), TchError> {
14516 let mut c_tensors = [std::ptr::null_mut(); 2];
14517 unsafe_torch_err!(atg_cummax_out(
14518 c_tensors.as_mut_ptr(),
14519 values.c_tensor,
14520 indices.c_tensor,
14521 self.c_tensor,
14522 dim
14523 ));
14524 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
14525 }
14526
14527 pub fn f_cummaxmin_backward(
14528 &self,
14529 grad: &Tensor,
14530 indices: &Tensor,
14531 dim: i64,
14532 ) -> Result<Tensor, TchError> {
14533 let mut c_tensors = [std::ptr::null_mut(); 1];
14534 unsafe_torch_err!(atg_cummaxmin_backward(
14535 c_tensors.as_mut_ptr(),
14536 grad.c_tensor,
14537 self.c_tensor,
14538 indices.c_tensor,
14539 dim
14540 ));
14541 Ok(Tensor { c_tensor: c_tensors[0] })
14542 }
14543
14544 pub fn f_cummin(&self, dim: i64) -> Result<(Tensor, Tensor), TchError> {
14545 let mut c_tensors = [std::ptr::null_mut(); 2];
14546 unsafe_torch_err!(atg_cummin(c_tensors.as_mut_ptr(), self.c_tensor, dim));
14547 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
14548 }
14549
14550 pub fn f_cummin_out(
14551 &self,
14552 values: &Tensor,
14553 indices: &Tensor,
14554 dim: i64,
14555 ) -> Result<(Tensor, Tensor), TchError> {
14556 let mut c_tensors = [std::ptr::null_mut(); 2];
14557 unsafe_torch_err!(atg_cummin_out(
14558 c_tensors.as_mut_ptr(),
14559 values.c_tensor,
14560 indices.c_tensor,
14561 self.c_tensor,
14562 dim
14563 ));
14564 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
14565 }
14566
14567 pub fn f_cumprod(&self, dim: i64, dtype: impl Into<Option<Kind>>) -> Result<Tensor, TchError> {
14568 let mut c_tensors = [std::ptr::null_mut(); 1];
14569 unsafe_torch_err!(atg_cumprod(
14570 c_tensors.as_mut_ptr(),
14571 self.c_tensor,
14572 dim,
14573 dtype.into().map_or(-1, |s| s.c_int())
14574 ));
14575 Ok(Tensor { c_tensor: c_tensors[0] })
14576 }
14577
14578 pub fn f_cumprod_(
14579 &mut self,
14580 dim: i64,
14581 dtype: impl Into<Option<Kind>>,
14582 ) -> Result<Tensor, TchError> {
14583 let mut c_tensors = [std::ptr::null_mut(); 1];
14584 unsafe_torch_err!(atg_cumprod_(
14585 c_tensors.as_mut_ptr(),
14586 self.c_tensor,
14587 dim,
14588 dtype.into().map_or(-1, |s| s.c_int())
14589 ));
14590 Ok(Tensor { c_tensor: c_tensors[0] })
14591 }
14592
14593 pub fn f_cumprod_backward(
14594 &self,
14595 grad: &Tensor,
14596 dim: i64,
14597 output: &Tensor,
14598 ) -> Result<Tensor, TchError> {
14599 let mut c_tensors = [std::ptr::null_mut(); 1];
14600 unsafe_torch_err!(atg_cumprod_backward(
14601 c_tensors.as_mut_ptr(),
14602 grad.c_tensor,
14603 self.c_tensor,
14604 dim,
14605 output.c_tensor
14606 ));
14607 Ok(Tensor { c_tensor: c_tensors[0] })
14608 }
14609
14610 pub fn f_cumprod_out(
14611 &self,
14612 out: &Tensor,
14613 dim: i64,
14614 dtype: impl Into<Option<Kind>>,
14615 ) -> Result<Tensor, TchError> {
14616 let mut c_tensors = [std::ptr::null_mut(); 1];
14617 unsafe_torch_err!(atg_cumprod_out(
14618 c_tensors.as_mut_ptr(),
14619 out.c_tensor,
14620 self.c_tensor,
14621 dim,
14622 dtype.into().map_or(-1, |s| s.c_int())
14623 ));
14624 Ok(Tensor { c_tensor: c_tensors[0] })
14625 }
14626
14627 pub fn f_cumsum(&self, dim: i64, dtype: impl Into<Option<Kind>>) -> Result<Tensor, TchError> {
14628 let mut c_tensors = [std::ptr::null_mut(); 1];
14629 unsafe_torch_err!(atg_cumsum(
14630 c_tensors.as_mut_ptr(),
14631 self.c_tensor,
14632 dim,
14633 dtype.into().map_or(-1, |s| s.c_int())
14634 ));
14635 Ok(Tensor { c_tensor: c_tensors[0] })
14636 }
14637
14638 pub fn f_cumsum_(
14639 &mut self,
14640 dim: i64,
14641 dtype: impl Into<Option<Kind>>,
14642 ) -> Result<Tensor, TchError> {
14643 let mut c_tensors = [std::ptr::null_mut(); 1];
14644 unsafe_torch_err!(atg_cumsum_(
14645 c_tensors.as_mut_ptr(),
14646 self.c_tensor,
14647 dim,
14648 dtype.into().map_or(-1, |s| s.c_int())
14649 ));
14650 Ok(Tensor { c_tensor: c_tensors[0] })
14651 }
14652
14653 pub fn f_cumsum_out(
14654 &self,
14655 out: &Tensor,
14656 dim: i64,
14657 dtype: impl Into<Option<Kind>>,
14658 ) -> Result<Tensor, TchError> {
14659 let mut c_tensors = [std::ptr::null_mut(); 1];
14660 unsafe_torch_err!(atg_cumsum_out(
14661 c_tensors.as_mut_ptr(),
14662 out.c_tensor,
14663 self.c_tensor,
14664 dim,
14665 dtype.into().map_or(-1, |s| s.c_int())
14666 ));
14667 Ok(Tensor { c_tensor: c_tensors[0] })
14668 }
14669
14670 pub fn f_cumulative_trapezoid(y: &Tensor, dim: i64) -> Result<Tensor, TchError> {
14671 let mut c_tensors = [std::ptr::null_mut(); 1];
14672 unsafe_torch_err!(atg_cumulative_trapezoid(c_tensors.as_mut_ptr(), y.c_tensor, dim));
14673 Ok(Tensor { c_tensor: c_tensors[0] })
14674 }
14675
14676 pub fn f_cumulative_trapezoid_x(y: &Tensor, x: &Tensor, dim: i64) -> Result<Tensor, TchError> {
14677 let mut c_tensors = [std::ptr::null_mut(); 1];
14678 unsafe_torch_err!(atg_cumulative_trapezoid_x(
14679 c_tensors.as_mut_ptr(),
14680 y.c_tensor,
14681 x.c_tensor,
14682 dim
14683 ));
14684 Ok(Tensor { c_tensor: c_tensors[0] })
14685 }
14686
14687 pub fn f_data(&self) -> Result<Tensor, TchError> {
14688 let mut c_tensors = [std::ptr::null_mut(); 1];
14689 unsafe_torch_err!(atg_data(c_tensors.as_mut_ptr(), self.c_tensor));
14690 Ok(Tensor { c_tensor: c_tensors[0] })
14691 }
14692
14693 pub fn f_deg2rad(&self) -> Result<Tensor, TchError> {
14694 let mut c_tensors = [std::ptr::null_mut(); 1];
14695 unsafe_torch_err!(atg_deg2rad(c_tensors.as_mut_ptr(), self.c_tensor));
14696 Ok(Tensor { c_tensor: c_tensors[0] })
14697 }
14698
14699 pub fn f_deg2rad_(&mut self) -> Result<Tensor, TchError> {
14700 let mut c_tensors = [std::ptr::null_mut(); 1];
14701 unsafe_torch_err!(atg_deg2rad_(c_tensors.as_mut_ptr(), self.c_tensor));
14702 Ok(Tensor { c_tensor: c_tensors[0] })
14703 }
14704
14705 pub fn f_deg2rad_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
14706 let mut c_tensors = [std::ptr::null_mut(); 1];
14707 unsafe_torch_err!(atg_deg2rad_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
14708 Ok(Tensor { c_tensor: c_tensors[0] })
14709 }
14710
14711 pub fn f_dense_dim(&self) -> Result<i64, TchError> {
14712 let return_;
14713 unsafe_torch_err!(return_ = atg_dense_dim(self.c_tensor));
14714 Ok(return_)
14715 }
14716
14717 pub fn f_dequantize(&self) -> Result<Tensor, TchError> {
14718 let mut c_tensors = [std::ptr::null_mut(); 1];
14719 unsafe_torch_err!(atg_dequantize(c_tensors.as_mut_ptr(), self.c_tensor));
14720 Ok(Tensor { c_tensor: c_tensors[0] })
14721 }
14722
14723 pub fn f_dequantize_self_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
14724 let mut c_tensors = [std::ptr::null_mut(); 1];
14725 unsafe_torch_err!(atg_dequantize_self_out(
14726 c_tensors.as_mut_ptr(),
14727 out.c_tensor,
14728 self.c_tensor
14729 ));
14730 Ok(Tensor { c_tensor: c_tensors[0] })
14731 }
14732
14733 pub fn f_dequantize_tensors<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Vec<Tensor>, TchError> {
14734 let c_tensors = unsafe_torch_err!(atg_dequantize_tensors(
14735 ptr_list(tensors).as_ptr(),
14736 tensors.len() as i32
14737 ));
14738 let mut r__ = vec![];
14739 let mut i = 0;
14740 loop {
14741 let c__ = unsafe { *c_tensors.add(i) };
14742 if c__.is_null() {
14743 break;
14744 }
14745 r__.push(Tensor { c_tensor: c__ });
14746 i += 1;
14747 }
14748 unsafe { libc::free(c_tensors as *mut libc::c_void) }
14749 Ok(r__)
14750 }
14751
14752 pub fn f_dequantize_tensors_out<T: Borrow<Tensor>>(
14753 out: &[T],
14754 tensors: &[T],
14755 ) -> Result<(), TchError> {
14756 unsafe_torch_err!(atg_dequantize_tensors_out(
14757 ptr_list(out).as_ptr(),
14758 out.len() as i32,
14759 ptr_list(tensors).as_ptr(),
14760 tensors.len() as i32
14761 ));
14762 Ok(())
14763 }
14764
14765 pub fn f_det(&self) -> Result<Tensor, TchError> {
14766 let mut c_tensors = [std::ptr::null_mut(); 1];
14767 unsafe_torch_err!(atg_det(c_tensors.as_mut_ptr(), self.c_tensor));
14768 Ok(Tensor { c_tensor: c_tensors[0] })
14769 }
14770
14771 pub fn f_detach(&self) -> Result<Tensor, TchError> {
14772 let mut c_tensors = [std::ptr::null_mut(); 1];
14773 unsafe_torch_err!(atg_detach(c_tensors.as_mut_ptr(), self.c_tensor));
14774 Ok(Tensor { c_tensor: c_tensors[0] })
14775 }
14776
14777 pub fn f_detach_(&mut self) -> Result<Tensor, TchError> {
14778 let mut c_tensors = [std::ptr::null_mut(); 1];
14779 unsafe_torch_err!(atg_detach_(c_tensors.as_mut_ptr(), self.c_tensor));
14780 Ok(Tensor { c_tensor: c_tensors[0] })
14781 }
14782
14783 pub fn f_detach_copy(&self) -> Result<Tensor, TchError> {
14784 let mut c_tensors = [std::ptr::null_mut(); 1];
14785 unsafe_torch_err!(atg_detach_copy(c_tensors.as_mut_ptr(), self.c_tensor));
14786 Ok(Tensor { c_tensor: c_tensors[0] })
14787 }
14788
14789 pub fn f_detach_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
14790 let mut c_tensors = [std::ptr::null_mut(); 1];
14791 unsafe_torch_err!(atg_detach_copy_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
14792 Ok(Tensor { c_tensor: c_tensors[0] })
14793 }
14794
14795 pub fn f_diag(&self, diagonal: i64) -> Result<Tensor, TchError> {
14796 let mut c_tensors = [std::ptr::null_mut(); 1];
14797 unsafe_torch_err!(atg_diag(c_tensors.as_mut_ptr(), self.c_tensor, diagonal));
14798 Ok(Tensor { c_tensor: c_tensors[0] })
14799 }
14800
14801 pub fn f_diag_embed(&self, offset: i64, dim1: i64, dim2: i64) -> Result<Tensor, TchError> {
14802 let mut c_tensors = [std::ptr::null_mut(); 1];
14803 unsafe_torch_err!(atg_diag_embed(
14804 c_tensors.as_mut_ptr(),
14805 self.c_tensor,
14806 offset,
14807 dim1,
14808 dim2
14809 ));
14810 Ok(Tensor { c_tensor: c_tensors[0] })
14811 }
14812
14813 pub fn f_diag_embed_out(
14814 &self,
14815 out: &Tensor,
14816 offset: i64,
14817 dim1: i64,
14818 dim2: i64,
14819 ) -> Result<Tensor, TchError> {
14820 let mut c_tensors = [std::ptr::null_mut(); 1];
14821 unsafe_torch_err!(atg_diag_embed_out(
14822 c_tensors.as_mut_ptr(),
14823 out.c_tensor,
14824 self.c_tensor,
14825 offset,
14826 dim1,
14827 dim2
14828 ));
14829 Ok(Tensor { c_tensor: c_tensors[0] })
14830 }
14831
14832 pub fn f_diag_out(&self, out: &Tensor, diagonal: i64) -> Result<Tensor, TchError> {
14833 let mut c_tensors = [std::ptr::null_mut(); 1];
14834 unsafe_torch_err!(atg_diag_out(
14835 c_tensors.as_mut_ptr(),
14836 out.c_tensor,
14837 self.c_tensor,
14838 diagonal
14839 ));
14840 Ok(Tensor { c_tensor: c_tensors[0] })
14841 }
14842
14843 pub fn f_diagflat(&self, offset: i64) -> Result<Tensor, TchError> {
14844 let mut c_tensors = [std::ptr::null_mut(); 1];
14845 unsafe_torch_err!(atg_diagflat(c_tensors.as_mut_ptr(), self.c_tensor, offset));
14846 Ok(Tensor { c_tensor: c_tensors[0] })
14847 }
14848
14849 pub fn f_diagonal(&self, offset: i64, dim1: i64, dim2: i64) -> Result<Tensor, TchError> {
14850 let mut c_tensors = [std::ptr::null_mut(); 1];
14851 unsafe_torch_err!(atg_diagonal(c_tensors.as_mut_ptr(), self.c_tensor, offset, dim1, dim2));
14852 Ok(Tensor { c_tensor: c_tensors[0] })
14853 }
14854
14855 pub fn f_diagonal_backward(
14856 grad_output: &Tensor,
14857 input_sizes: impl IntList,
14858 offset: i64,
14859 dim1: i64,
14860 dim2: i64,
14861 ) -> Result<Tensor, TchError> {
14862 let mut c_tensors = [std::ptr::null_mut(); 1];
14863 unsafe_torch_err!(atg_diagonal_backward(
14864 c_tensors.as_mut_ptr(),
14865 grad_output.c_tensor,
14866 input_sizes.as_ptr(),
14867 input_sizes.len_i32(),
14868 offset,
14869 dim1,
14870 dim2
14871 ));
14872 Ok(Tensor { c_tensor: c_tensors[0] })
14873 }
14874
14875 pub fn f_diagonal_backward_out(
14876 out: &Tensor,
14877 grad_output: &Tensor,
14878 input_sizes: impl IntList,
14879 offset: i64,
14880 dim1: i64,
14881 dim2: i64,
14882 ) -> Result<Tensor, TchError> {
14883 let mut c_tensors = [std::ptr::null_mut(); 1];
14884 unsafe_torch_err!(atg_diagonal_backward_out(
14885 c_tensors.as_mut_ptr(),
14886 out.c_tensor,
14887 grad_output.c_tensor,
14888 input_sizes.as_ptr(),
14889 input_sizes.len_i32(),
14890 offset,
14891 dim1,
14892 dim2
14893 ));
14894 Ok(Tensor { c_tensor: c_tensors[0] })
14895 }
14896
14897 pub fn f_diagonal_copy(&self, offset: i64, dim1: i64, dim2: i64) -> Result<Tensor, TchError> {
14898 let mut c_tensors = [std::ptr::null_mut(); 1];
14899 unsafe_torch_err!(atg_diagonal_copy(
14900 c_tensors.as_mut_ptr(),
14901 self.c_tensor,
14902 offset,
14903 dim1,
14904 dim2
14905 ));
14906 Ok(Tensor { c_tensor: c_tensors[0] })
14907 }
14908
14909 pub fn f_diagonal_copy_out(
14910 &self,
14911 out: &Tensor,
14912 offset: i64,
14913 dim1: i64,
14914 dim2: i64,
14915 ) -> Result<Tensor, TchError> {
14916 let mut c_tensors = [std::ptr::null_mut(); 1];
14917 unsafe_torch_err!(atg_diagonal_copy_out(
14918 c_tensors.as_mut_ptr(),
14919 out.c_tensor,
14920 self.c_tensor,
14921 offset,
14922 dim1,
14923 dim2
14924 ));
14925 Ok(Tensor { c_tensor: c_tensors[0] })
14926 }
14927
14928 pub fn f_diagonal_scatter(
14929 &self,
14930 src: &Tensor,
14931 offset: i64,
14932 dim1: i64,
14933 dim2: i64,
14934 ) -> Result<Tensor, TchError> {
14935 let mut c_tensors = [std::ptr::null_mut(); 1];
14936 unsafe_torch_err!(atg_diagonal_scatter(
14937 c_tensors.as_mut_ptr(),
14938 self.c_tensor,
14939 src.c_tensor,
14940 offset,
14941 dim1,
14942 dim2
14943 ));
14944 Ok(Tensor { c_tensor: c_tensors[0] })
14945 }
14946
14947 pub fn f_diagonal_scatter_out(
14948 &self,
14949 out: &Tensor,
14950 src: &Tensor,
14951 offset: i64,
14952 dim1: i64,
14953 dim2: i64,
14954 ) -> Result<Tensor, TchError> {
14955 let mut c_tensors = [std::ptr::null_mut(); 1];
14956 unsafe_torch_err!(atg_diagonal_scatter_out(
14957 c_tensors.as_mut_ptr(),
14958 out.c_tensor,
14959 self.c_tensor,
14960 src.c_tensor,
14961 offset,
14962 dim1,
14963 dim2
14964 ));
14965 Ok(Tensor { c_tensor: c_tensors[0] })
14966 }
14967
14968 pub fn f_diff<T: Borrow<Tensor>>(
14969 &self,
14970 n: i64,
14971 dim: i64,
14972 prepend: Option<T>,
14973 append: Option<T>,
14974 ) -> Result<Tensor, TchError> {
14975 let mut c_tensors = [std::ptr::null_mut(); 1];
14976 unsafe_torch_err!(atg_diff(
14977 c_tensors.as_mut_ptr(),
14978 self.c_tensor,
14979 n,
14980 dim,
14981 prepend.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
14982 append.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
14983 ));
14984 Ok(Tensor { c_tensor: c_tensors[0] })
14985 }
14986
14987 pub fn f_diff_out<T: Borrow<Tensor>>(
14988 &self,
14989 out: &Tensor,
14990 n: i64,
14991 dim: i64,
14992 prepend: Option<T>,
14993 append: Option<T>,
14994 ) -> Result<Tensor, TchError> {
14995 let mut c_tensors = [std::ptr::null_mut(); 1];
14996 unsafe_torch_err!(atg_diff_out(
14997 c_tensors.as_mut_ptr(),
14998 out.c_tensor,
14999 self.c_tensor,
15000 n,
15001 dim,
15002 prepend.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
15003 append.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
15004 ));
15005 Ok(Tensor { c_tensor: c_tensors[0] })
15006 }
15007
15008 pub fn f_digamma(&self) -> Result<Tensor, TchError> {
15009 let mut c_tensors = [std::ptr::null_mut(); 1];
15010 unsafe_torch_err!(atg_digamma(c_tensors.as_mut_ptr(), self.c_tensor));
15011 Ok(Tensor { c_tensor: c_tensors[0] })
15012 }
15013
15014 pub fn f_digamma_(&mut self) -> Result<Tensor, TchError> {
15015 let mut c_tensors = [std::ptr::null_mut(); 1];
15016 unsafe_torch_err!(atg_digamma_(c_tensors.as_mut_ptr(), self.c_tensor));
15017 Ok(Tensor { c_tensor: c_tensors[0] })
15018 }
15019
15020 pub fn f_digamma_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
15021 let mut c_tensors = [std::ptr::null_mut(); 1];
15022 unsafe_torch_err!(atg_digamma_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
15023 Ok(Tensor { c_tensor: c_tensors[0] })
15024 }
15025
15026 pub fn f_dist(&self, other: &Tensor) -> Result<Tensor, TchError> {
15027 let mut c_tensors = [std::ptr::null_mut(); 1];
15028 unsafe_torch_err!(atg_dist(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
15029 Ok(Tensor { c_tensor: c_tensors[0] })
15030 }
15031
15032 pub fn f_dist_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
15033 let mut c_tensors = [std::ptr::null_mut(); 1];
15034 unsafe_torch_err!(atg_dist_out(
15035 c_tensors.as_mut_ptr(),
15036 out.c_tensor,
15037 self.c_tensor,
15038 other.c_tensor
15039 ));
15040 Ok(Tensor { c_tensor: c_tensors[0] })
15041 }
15042
15043 pub fn f_div(&self, other: &Tensor) -> Result<Tensor, TchError> {
15044 let mut c_tensors = [std::ptr::null_mut(); 1];
15045 unsafe_torch_err!(atg_div(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
15046 Ok(Tensor { c_tensor: c_tensors[0] })
15047 }
15048
15049 pub fn f_div_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
15050 let mut c_tensors = [std::ptr::null_mut(); 1];
15051 unsafe_torch_err!(atg_div_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
15052 Ok(Tensor { c_tensor: c_tensors[0] })
15053 }
15054
15055 pub fn f_div_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
15056 let mut c_tensors = [std::ptr::null_mut(); 1];
15057 unsafe_torch_err!(atg_div_out(
15058 c_tensors.as_mut_ptr(),
15059 out.c_tensor,
15060 self.c_tensor,
15061 other.c_tensor
15062 ));
15063 Ok(Tensor { c_tensor: c_tensors[0] })
15064 }
15065
15066 pub fn f_div_out_mode(
15067 &self,
15068 out: &Tensor,
15069 other: &Tensor,
15070 rounding_mode: &str,
15071 ) -> Result<Tensor, TchError> {
15072 let mut c_tensors = [std::ptr::null_mut(); 1];
15073 unsafe_torch_err!(atg_div_out_mode(
15074 c_tensors.as_mut_ptr(),
15075 out.c_tensor,
15076 self.c_tensor,
15077 other.c_tensor,
15078 rounding_mode.as_ptr(),
15079 rounding_mode.len() as i32
15080 ));
15081 Ok(Tensor { c_tensor: c_tensors[0] })
15082 }
15083
15084 pub fn f_div_scalar<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
15085 let mut c_tensors = [std::ptr::null_mut(); 1];
15086 unsafe_torch_err!(atg_div_scalar(
15087 c_tensors.as_mut_ptr(),
15088 self.c_tensor,
15089 other.into().c_scalar
15090 ));
15091 Ok(Tensor { c_tensor: c_tensors[0] })
15092 }
15093
15094 pub fn f_div_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
15095 let mut c_tensors = [std::ptr::null_mut(); 1];
15096 unsafe_torch_err!(atg_div_scalar_(
15097 c_tensors.as_mut_ptr(),
15098 self.c_tensor,
15099 other.into().c_scalar
15100 ));
15101 Ok(Tensor { c_tensor: c_tensors[0] })
15102 }
15103
15104 pub fn f_div_scalar_mode<S: Into<Scalar>>(
15105 &self,
15106 other: S,
15107 rounding_mode: &str,
15108 ) -> Result<Tensor, TchError> {
15109 let mut c_tensors = [std::ptr::null_mut(); 1];
15110 unsafe_torch_err!(atg_div_scalar_mode(
15111 c_tensors.as_mut_ptr(),
15112 self.c_tensor,
15113 other.into().c_scalar,
15114 rounding_mode.as_ptr(),
15115 rounding_mode.len() as i32
15116 ));
15117 Ok(Tensor { c_tensor: c_tensors[0] })
15118 }
15119
15120 pub fn f_div_scalar_mode_<S: Into<Scalar>>(
15121 &mut self,
15122 other: S,
15123 rounding_mode: &str,
15124 ) -> Result<Tensor, TchError> {
15125 let mut c_tensors = [std::ptr::null_mut(); 1];
15126 unsafe_torch_err!(atg_div_scalar_mode_(
15127 c_tensors.as_mut_ptr(),
15128 self.c_tensor,
15129 other.into().c_scalar,
15130 rounding_mode.as_ptr(),
15131 rounding_mode.len() as i32
15132 ));
15133 Ok(Tensor { c_tensor: c_tensors[0] })
15134 }
15135
15136 pub fn f_div_scalar_mode_out<S: Into<Scalar>>(
15137 &self,
15138 out: &Tensor,
15139 other: S,
15140 rounding_mode: &str,
15141 ) -> Result<Tensor, TchError> {
15142 let mut c_tensors = [std::ptr::null_mut(); 1];
15143 unsafe_torch_err!(atg_div_scalar_mode_out(
15144 c_tensors.as_mut_ptr(),
15145 out.c_tensor,
15146 self.c_tensor,
15147 other.into().c_scalar,
15148 rounding_mode.as_ptr(),
15149 rounding_mode.len() as i32
15150 ));
15151 Ok(Tensor { c_tensor: c_tensors[0] })
15152 }
15153
15154 pub fn f_div_scalar_out<S: Into<Scalar>>(
15155 &self,
15156 out: &Tensor,
15157 other: S,
15158 ) -> Result<Tensor, TchError> {
15159 let mut c_tensors = [std::ptr::null_mut(); 1];
15160 unsafe_torch_err!(atg_div_scalar_out(
15161 c_tensors.as_mut_ptr(),
15162 out.c_tensor,
15163 self.c_tensor,
15164 other.into().c_scalar
15165 ));
15166 Ok(Tensor { c_tensor: c_tensors[0] })
15167 }
15168
15169 pub fn f_div_tensor_mode(
15170 &self,
15171 other: &Tensor,
15172 rounding_mode: &str,
15173 ) -> Result<Tensor, TchError> {
15174 let mut c_tensors = [std::ptr::null_mut(); 1];
15175 unsafe_torch_err!(atg_div_tensor_mode(
15176 c_tensors.as_mut_ptr(),
15177 self.c_tensor,
15178 other.c_tensor,
15179 rounding_mode.as_ptr(),
15180 rounding_mode.len() as i32
15181 ));
15182 Ok(Tensor { c_tensor: c_tensors[0] })
15183 }
15184
15185 pub fn f_div_tensor_mode_(
15186 &mut self,
15187 other: &Tensor,
15188 rounding_mode: &str,
15189 ) -> Result<Tensor, TchError> {
15190 let mut c_tensors = [std::ptr::null_mut(); 1];
15191 unsafe_torch_err!(atg_div_tensor_mode_(
15192 c_tensors.as_mut_ptr(),
15193 self.c_tensor,
15194 other.c_tensor,
15195 rounding_mode.as_ptr(),
15196 rounding_mode.len() as i32
15197 ));
15198 Ok(Tensor { c_tensor: c_tensors[0] })
15199 }
15200
15201 pub fn f_divide(&self, other: &Tensor) -> Result<Tensor, TchError> {
15202 let mut c_tensors = [std::ptr::null_mut(); 1];
15203 unsafe_torch_err!(atg_divide(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
15204 Ok(Tensor { c_tensor: c_tensors[0] })
15205 }
15206
15207 pub fn f_divide_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
15208 let mut c_tensors = [std::ptr::null_mut(); 1];
15209 unsafe_torch_err!(atg_divide_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
15210 Ok(Tensor { c_tensor: c_tensors[0] })
15211 }
15212
15213 pub fn f_divide_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
15214 let mut c_tensors = [std::ptr::null_mut(); 1];
15215 unsafe_torch_err!(atg_divide_out(
15216 c_tensors.as_mut_ptr(),
15217 out.c_tensor,
15218 self.c_tensor,
15219 other.c_tensor
15220 ));
15221 Ok(Tensor { c_tensor: c_tensors[0] })
15222 }
15223
15224 pub fn f_divide_out_mode(
15225 &self,
15226 out: &Tensor,
15227 other: &Tensor,
15228 rounding_mode: &str,
15229 ) -> Result<Tensor, TchError> {
15230 let mut c_tensors = [std::ptr::null_mut(); 1];
15231 unsafe_torch_err!(atg_divide_out_mode(
15232 c_tensors.as_mut_ptr(),
15233 out.c_tensor,
15234 self.c_tensor,
15235 other.c_tensor,
15236 rounding_mode.as_ptr(),
15237 rounding_mode.len() as i32
15238 ));
15239 Ok(Tensor { c_tensor: c_tensors[0] })
15240 }
15241
15242 pub fn f_divide_scalar<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
15243 let mut c_tensors = [std::ptr::null_mut(); 1];
15244 unsafe_torch_err!(atg_divide_scalar(
15245 c_tensors.as_mut_ptr(),
15246 self.c_tensor,
15247 other.into().c_scalar
15248 ));
15249 Ok(Tensor { c_tensor: c_tensors[0] })
15250 }
15251
15252 pub fn f_divide_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
15253 let mut c_tensors = [std::ptr::null_mut(); 1];
15254 unsafe_torch_err!(atg_divide_scalar_(
15255 c_tensors.as_mut_ptr(),
15256 self.c_tensor,
15257 other.into().c_scalar
15258 ));
15259 Ok(Tensor { c_tensor: c_tensors[0] })
15260 }
15261
15262 pub fn f_divide_scalar_mode<S: Into<Scalar>>(
15263 &self,
15264 other: S,
15265 rounding_mode: &str,
15266 ) -> Result<Tensor, TchError> {
15267 let mut c_tensors = [std::ptr::null_mut(); 1];
15268 unsafe_torch_err!(atg_divide_scalar_mode(
15269 c_tensors.as_mut_ptr(),
15270 self.c_tensor,
15271 other.into().c_scalar,
15272 rounding_mode.as_ptr(),
15273 rounding_mode.len() as i32
15274 ));
15275 Ok(Tensor { c_tensor: c_tensors[0] })
15276 }
15277
15278 pub fn f_divide_scalar_mode_<S: Into<Scalar>>(
15279 &mut self,
15280 other: S,
15281 rounding_mode: &str,
15282 ) -> Result<Tensor, TchError> {
15283 let mut c_tensors = [std::ptr::null_mut(); 1];
15284 unsafe_torch_err!(atg_divide_scalar_mode_(
15285 c_tensors.as_mut_ptr(),
15286 self.c_tensor,
15287 other.into().c_scalar,
15288 rounding_mode.as_ptr(),
15289 rounding_mode.len() as i32
15290 ));
15291 Ok(Tensor { c_tensor: c_tensors[0] })
15292 }
15293
15294 pub fn f_divide_tensor_mode(
15295 &self,
15296 other: &Tensor,
15297 rounding_mode: &str,
15298 ) -> Result<Tensor, TchError> {
15299 let mut c_tensors = [std::ptr::null_mut(); 1];
15300 unsafe_torch_err!(atg_divide_tensor_mode(
15301 c_tensors.as_mut_ptr(),
15302 self.c_tensor,
15303 other.c_tensor,
15304 rounding_mode.as_ptr(),
15305 rounding_mode.len() as i32
15306 ));
15307 Ok(Tensor { c_tensor: c_tensors[0] })
15308 }
15309
15310 pub fn f_divide_tensor_mode_(
15311 &mut self,
15312 other: &Tensor,
15313 rounding_mode: &str,
15314 ) -> Result<Tensor, TchError> {
15315 let mut c_tensors = [std::ptr::null_mut(); 1];
15316 unsafe_torch_err!(atg_divide_tensor_mode_(
15317 c_tensors.as_mut_ptr(),
15318 self.c_tensor,
15319 other.c_tensor,
15320 rounding_mode.as_ptr(),
15321 rounding_mode.len() as i32
15322 ));
15323 Ok(Tensor { c_tensor: c_tensors[0] })
15324 }
15325
15326 pub fn f_dot(&self, tensor: &Tensor) -> Result<Tensor, TchError> {
15327 let mut c_tensors = [std::ptr::null_mut(); 1];
15328 unsafe_torch_err!(atg_dot(c_tensors.as_mut_ptr(), self.c_tensor, tensor.c_tensor));
15329 Ok(Tensor { c_tensor: c_tensors[0] })
15330 }
15331
15332 pub fn f_dot_out(&self, out: &Tensor, tensor: &Tensor) -> Result<Tensor, TchError> {
15333 let mut c_tensors = [std::ptr::null_mut(); 1];
15334 unsafe_torch_err!(atg_dot_out(
15335 c_tensors.as_mut_ptr(),
15336 out.c_tensor,
15337 self.c_tensor,
15338 tensor.c_tensor
15339 ));
15340 Ok(Tensor { c_tensor: c_tensors[0] })
15341 }
15342
15343 pub fn f_dropout(&self, p: f64, train: bool) -> Result<Tensor, TchError> {
15344 let mut c_tensors = [std::ptr::null_mut(); 1];
15345 unsafe_torch_err!(atg_dropout(
15346 c_tensors.as_mut_ptr(),
15347 self.c_tensor,
15348 p,
15349 if train { 1 } else { 0 }
15350 ));
15351 Ok(Tensor { c_tensor: c_tensors[0] })
15352 }
15353
15354 pub fn f_dropout_(&mut self, p: f64, train: bool) -> Result<Tensor, TchError> {
15355 let mut c_tensors = [std::ptr::null_mut(); 1];
15356 unsafe_torch_err!(atg_dropout_(
15357 c_tensors.as_mut_ptr(),
15358 self.c_tensor,
15359 p,
15360 if train { 1 } else { 0 }
15361 ));
15362 Ok(Tensor { c_tensor: c_tensors[0] })
15363 }
15364
15365 pub fn f_dsplit(&self, sections: i64) -> Result<Vec<Tensor>, TchError> {
15366 let c_tensors = unsafe_torch_err!(atg_dsplit(self.c_tensor, sections));
15367 let mut r__ = vec![];
15368 let mut i = 0;
15369 loop {
15370 let c__ = unsafe { *c_tensors.add(i) };
15371 if c__.is_null() {
15372 break;
15373 }
15374 r__.push(Tensor { c_tensor: c__ });
15375 i += 1;
15376 }
15377 unsafe { libc::free(c_tensors as *mut libc::c_void) }
15378 Ok(r__)
15379 }
15380
15381 pub fn f_dsplit_array(&self, indices: impl IntList) -> Result<Vec<Tensor>, TchError> {
15382 let c_tensors =
15383 unsafe_torch_err!(atg_dsplit_array(self.c_tensor, indices.as_ptr(), indices.len_i32()));
15384 let mut r__ = vec![];
15385 let mut i = 0;
15386 loop {
15387 let c__ = unsafe { *c_tensors.add(i) };
15388 if c__.is_null() {
15389 break;
15390 }
15391 r__.push(Tensor { c_tensor: c__ });
15392 i += 1;
15393 }
15394 unsafe { libc::free(c_tensors as *mut libc::c_void) }
15395 Ok(r__)
15396 }
15397
15398 pub fn f_dstack<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Tensor, TchError> {
15399 let mut c_tensors = [std::ptr::null_mut(); 1];
15400 unsafe_torch_err!(atg_dstack(
15401 c_tensors.as_mut_ptr(),
15402 ptr_list(tensors).as_ptr(),
15403 tensors.len() as i32
15404 ));
15405 Ok(Tensor { c_tensor: c_tensors[0] })
15406 }
15407
15408 pub fn f_dstack_out<T: Borrow<Tensor>>(
15409 out: &Tensor,
15410 tensors: &[T],
15411 ) -> Result<Tensor, TchError> {
15412 let mut c_tensors = [std::ptr::null_mut(); 1];
15413 unsafe_torch_err!(atg_dstack_out(
15414 c_tensors.as_mut_ptr(),
15415 out.c_tensor,
15416 ptr_list(tensors).as_ptr(),
15417 tensors.len() as i32
15418 ));
15419 Ok(Tensor { c_tensor: c_tensors[0] })
15420 }
15421
15422 pub fn f_einsum<T: Borrow<Tensor>>(
15423 equation: &str,
15424 tensors: &[T],
15425 path: impl IntListOption,
15426 ) -> Result<Tensor, TchError> {
15427 let mut c_tensors = [std::ptr::null_mut(); 1];
15428 unsafe_torch_err!(atg_einsum(
15429 c_tensors.as_mut_ptr(),
15430 equation.as_ptr(),
15431 equation.len() as i32,
15432 ptr_list(tensors).as_ptr(),
15433 tensors.len() as i32,
15434 path.as_ptr(),
15435 path.len_i32()
15436 ));
15437 Ok(Tensor { c_tensor: c_tensors[0] })
15438 }
15439
15440 pub fn f_elu(&self) -> Result<Tensor, TchError> {
15441 let mut c_tensors = [std::ptr::null_mut(); 1];
15442 unsafe_torch_err!(atg_elu(c_tensors.as_mut_ptr(), self.c_tensor));
15443 Ok(Tensor { c_tensor: c_tensors[0] })
15444 }
15445
15446 pub fn f_elu_(&mut self) -> Result<Tensor, TchError> {
15447 let mut c_tensors = [std::ptr::null_mut(); 1];
15448 unsafe_torch_err!(atg_elu_(c_tensors.as_mut_ptr(), self.c_tensor));
15449 Ok(Tensor { c_tensor: c_tensors[0] })
15450 }
15451
15452 pub fn f_elu_backward<S: Into<Scalar>>(
15453 grad_output: &Tensor,
15454 alpha: S,
15455 scale: S,
15456 input_scale: S,
15457 is_result: bool,
15458 self_or_result: &Tensor,
15459 ) -> Result<Tensor, TchError> {
15460 let mut c_tensors = [std::ptr::null_mut(); 1];
15461 unsafe_torch_err!(atg_elu_backward(
15462 c_tensors.as_mut_ptr(),
15463 grad_output.c_tensor,
15464 alpha.into().c_scalar,
15465 scale.into().c_scalar,
15466 input_scale.into().c_scalar,
15467 if is_result { 1 } else { 0 },
15468 self_or_result.c_tensor
15469 ));
15470 Ok(Tensor { c_tensor: c_tensors[0] })
15471 }
15472
15473 pub fn f_elu_backward_grad_input<S: Into<Scalar>>(
15474 grad_input: &Tensor,
15475 grad_output: &Tensor,
15476 alpha: S,
15477 scale: S,
15478 input_scale: S,
15479 is_result: bool,
15480 self_or_result: &Tensor,
15481 ) -> Result<Tensor, TchError> {
15482 let mut c_tensors = [std::ptr::null_mut(); 1];
15483 unsafe_torch_err!(atg_elu_backward_grad_input(
15484 c_tensors.as_mut_ptr(),
15485 grad_input.c_tensor,
15486 grad_output.c_tensor,
15487 alpha.into().c_scalar,
15488 scale.into().c_scalar,
15489 input_scale.into().c_scalar,
15490 if is_result { 1 } else { 0 },
15491 self_or_result.c_tensor
15492 ));
15493 Ok(Tensor { c_tensor: c_tensors[0] })
15494 }
15495
15496 pub fn f_elu_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
15497 let mut c_tensors = [std::ptr::null_mut(); 1];
15498 unsafe_torch_err!(atg_elu_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
15499 Ok(Tensor { c_tensor: c_tensors[0] })
15500 }
15501
15502 pub fn f_embedding(
15503 weight: &Tensor,
15504 indices: &Tensor,
15505 padding_idx: i64,
15506 scale_grad_by_freq: bool,
15507 sparse: bool,
15508 ) -> Result<Tensor, TchError> {
15509 let mut c_tensors = [std::ptr::null_mut(); 1];
15510 unsafe_torch_err!(atg_embedding(
15511 c_tensors.as_mut_ptr(),
15512 weight.c_tensor,
15513 indices.c_tensor,
15514 padding_idx,
15515 if scale_grad_by_freq { 1 } else { 0 },
15516 if sparse { 1 } else { 0 }
15517 ));
15518 Ok(Tensor { c_tensor: c_tensors[0] })
15519 }
15520
15521 pub fn f_embedding_backward(
15522 grad: &Tensor,
15523 indices: &Tensor,
15524 num_weights: i64,
15525 padding_idx: i64,
15526 scale_grad_by_freq: bool,
15527 sparse: bool,
15528 ) -> Result<Tensor, TchError> {
15529 let mut c_tensors = [std::ptr::null_mut(); 1];
15530 unsafe_torch_err!(atg_embedding_backward(
15531 c_tensors.as_mut_ptr(),
15532 grad.c_tensor,
15533 indices.c_tensor,
15534 num_weights,
15535 padding_idx,
15536 if scale_grad_by_freq { 1 } else { 0 },
15537 if sparse { 1 } else { 0 }
15538 ));
15539 Ok(Tensor { c_tensor: c_tensors[0] })
15540 }
15541
15542 pub fn f_embedding_bag<T: Borrow<Tensor>>(
15543 weight: &Tensor,
15544 indices: &Tensor,
15545 offsets: &Tensor,
15546 scale_grad_by_freq: bool,
15547 mode: i64,
15548 sparse: bool,
15549 per_sample_weights: Option<T>,
15550 include_last_offset: bool,
15551 ) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
15552 let mut c_tensors = [std::ptr::null_mut(); 4];
15553 unsafe_torch_err!(atg_embedding_bag(
15554 c_tensors.as_mut_ptr(),
15555 weight.c_tensor,
15556 indices.c_tensor,
15557 offsets.c_tensor,
15558 if scale_grad_by_freq { 1 } else { 0 },
15559 mode,
15560 if sparse { 1 } else { 0 },
15561 per_sample_weights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
15562 if include_last_offset { 1 } else { 0 }
15563 ));
15564 Ok((
15565 Tensor { c_tensor: c_tensors[0] },
15566 Tensor { c_tensor: c_tensors[1] },
15567 Tensor { c_tensor: c_tensors[2] },
15568 Tensor { c_tensor: c_tensors[3] },
15569 ))
15570 }
15571
15572 pub fn f_embedding_bag_padding_idx<T: Borrow<Tensor>>(
15573 weight: &Tensor,
15574 indices: &Tensor,
15575 offsets: &Tensor,
15576 scale_grad_by_freq: bool,
15577 mode: i64,
15578 sparse: bool,
15579 per_sample_weights: Option<T>,
15580 include_last_offset: bool,
15581 padding_idx: impl Into<Option<i64>>,
15582 ) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
15583 let padding_idx = padding_idx.into();
15584 let mut c_tensors = [std::ptr::null_mut(); 4];
15585 unsafe_torch_err!(atg_embedding_bag_padding_idx(
15586 c_tensors.as_mut_ptr(),
15587 weight.c_tensor,
15588 indices.c_tensor,
15589 offsets.c_tensor,
15590 if scale_grad_by_freq { 1 } else { 0 },
15591 mode,
15592 if sparse { 1 } else { 0 },
15593 per_sample_weights.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
15594 if include_last_offset { 1 } else { 0 },
15595 padding_idx.unwrap_or(0i64),
15596 padding_idx.is_none() as i8
15597 ));
15598 Ok((
15599 Tensor { c_tensor: c_tensors[0] },
15600 Tensor { c_tensor: c_tensors[1] },
15601 Tensor { c_tensor: c_tensors[2] },
15602 Tensor { c_tensor: c_tensors[3] },
15603 ))
15604 }
15605
15606 pub fn f_embedding_dense_backward(
15607 grad_output: &Tensor,
15608 indices: &Tensor,
15609 num_weights: i64,
15610 padding_idx: i64,
15611 scale_grad_by_freq: bool,
15612 ) -> Result<Tensor, TchError> {
15613 let mut c_tensors = [std::ptr::null_mut(); 1];
15614 unsafe_torch_err!(atg_embedding_dense_backward(
15615 c_tensors.as_mut_ptr(),
15616 grad_output.c_tensor,
15617 indices.c_tensor,
15618 num_weights,
15619 padding_idx,
15620 if scale_grad_by_freq { 1 } else { 0 }
15621 ));
15622 Ok(Tensor { c_tensor: c_tensors[0] })
15623 }
15624
15625 pub fn f_embedding_dense_backward_out(
15626 out: &Tensor,
15627 grad_output: &Tensor,
15628 indices: &Tensor,
15629 num_weights: i64,
15630 padding_idx: i64,
15631 scale_grad_by_freq: bool,
15632 ) -> Result<Tensor, TchError> {
15633 let mut c_tensors = [std::ptr::null_mut(); 1];
15634 unsafe_torch_err!(atg_embedding_dense_backward_out(
15635 c_tensors.as_mut_ptr(),
15636 out.c_tensor,
15637 grad_output.c_tensor,
15638 indices.c_tensor,
15639 num_weights,
15640 padding_idx,
15641 if scale_grad_by_freq { 1 } else { 0 }
15642 ));
15643 Ok(Tensor { c_tensor: c_tensors[0] })
15644 }
15645
15646 pub fn f_embedding_out(
15647 out: &Tensor,
15648 weight: &Tensor,
15649 indices: &Tensor,
15650 padding_idx: i64,
15651 scale_grad_by_freq: bool,
15652 sparse: bool,
15653 ) -> Result<Tensor, TchError> {
15654 let mut c_tensors = [std::ptr::null_mut(); 1];
15655 unsafe_torch_err!(atg_embedding_out(
15656 c_tensors.as_mut_ptr(),
15657 out.c_tensor,
15658 weight.c_tensor,
15659 indices.c_tensor,
15660 padding_idx,
15661 if scale_grad_by_freq { 1 } else { 0 },
15662 if sparse { 1 } else { 0 }
15663 ));
15664 Ok(Tensor { c_tensor: c_tensors[0] })
15665 }
15666
15667 pub fn f_embedding_renorm(
15668 &self,
15669 indices: &Tensor,
15670 max_norm: f64,
15671 norm_type: f64,
15672 ) -> Result<Tensor, TchError> {
15673 let mut c_tensors = [std::ptr::null_mut(); 1];
15674 unsafe_torch_err!(atg_embedding_renorm(
15675 c_tensors.as_mut_ptr(),
15676 self.c_tensor,
15677 indices.c_tensor,
15678 max_norm,
15679 norm_type
15680 ));
15681 Ok(Tensor { c_tensor: c_tensors[0] })
15682 }
15683
15684 pub fn f_embedding_renorm_(
15685 &mut self,
15686 indices: &Tensor,
15687 max_norm: f64,
15688 norm_type: f64,
15689 ) -> Result<Tensor, TchError> {
15690 let mut c_tensors = [std::ptr::null_mut(); 1];
15691 unsafe_torch_err!(atg_embedding_renorm_(
15692 c_tensors.as_mut_ptr(),
15693 self.c_tensor,
15694 indices.c_tensor,
15695 max_norm,
15696 norm_type
15697 ));
15698 Ok(Tensor { c_tensor: c_tensors[0] })
15699 }
15700
15701 pub fn f_embedding_renorm_out(
15702 &self,
15703 out: &Tensor,
15704 indices: &Tensor,
15705 max_norm: f64,
15706 norm_type: f64,
15707 ) -> Result<Tensor, TchError> {
15708 let mut c_tensors = [std::ptr::null_mut(); 1];
15709 unsafe_torch_err!(atg_embedding_renorm_out(
15710 c_tensors.as_mut_ptr(),
15711 out.c_tensor,
15712 self.c_tensor,
15713 indices.c_tensor,
15714 max_norm,
15715 norm_type
15716 ));
15717 Ok(Tensor { c_tensor: c_tensors[0] })
15718 }
15719
15720 pub fn f_embedding_sparse_backward(
15721 grad: &Tensor,
15722 indices: &Tensor,
15723 num_weights: i64,
15724 padding_idx: i64,
15725 scale_grad_by_freq: bool,
15726 ) -> Result<Tensor, TchError> {
15727 let mut c_tensors = [std::ptr::null_mut(); 1];
15728 unsafe_torch_err!(atg_embedding_sparse_backward(
15729 c_tensors.as_mut_ptr(),
15730 grad.c_tensor,
15731 indices.c_tensor,
15732 num_weights,
15733 padding_idx,
15734 if scale_grad_by_freq { 1 } else { 0 }
15735 ));
15736 Ok(Tensor { c_tensor: c_tensors[0] })
15737 }
15738
15739 pub fn f_empty(size: impl IntList, options: (Kind, Device)) -> Result<Tensor, TchError> {
15740 let mut c_tensors = [std::ptr::null_mut(); 1];
15741 unsafe_torch_err!(atg_empty(
15742 c_tensors.as_mut_ptr(),
15743 size.as_ptr(),
15744 size.len_i32(),
15745 options.0.c_int(),
15746 options.1.c_int()
15747 ));
15748 Ok(Tensor { c_tensor: c_tensors[0] })
15749 }
15750
15751 pub fn f_empty_like(&self) -> Result<Tensor, TchError> {
15752 let mut c_tensors = [std::ptr::null_mut(); 1];
15753 unsafe_torch_err!(atg_empty_like(c_tensors.as_mut_ptr(), self.c_tensor));
15754 Ok(Tensor { c_tensor: c_tensors[0] })
15755 }
15756
15757 pub fn f_empty_like_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
15758 let mut c_tensors = [std::ptr::null_mut(); 1];
15759 unsafe_torch_err!(atg_empty_like_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
15760 Ok(Tensor { c_tensor: c_tensors[0] })
15761 }
15762
15763 pub fn f_empty_out(out: &Tensor, size: impl IntList) -> Result<Tensor, TchError> {
15764 let mut c_tensors = [std::ptr::null_mut(); 1];
15765 unsafe_torch_err!(atg_empty_out(
15766 c_tensors.as_mut_ptr(),
15767 out.c_tensor,
15768 size.as_ptr(),
15769 size.len_i32()
15770 ));
15771 Ok(Tensor { c_tensor: c_tensors[0] })
15772 }
15773
15774 pub fn f_empty_permuted(
15775 size: impl IntList,
15776 physical_layout: impl IntList,
15777 options: (Kind, Device),
15778 ) -> Result<Tensor, TchError> {
15779 let mut c_tensors = [std::ptr::null_mut(); 1];
15780 unsafe_torch_err!(atg_empty_permuted(
15781 c_tensors.as_mut_ptr(),
15782 size.as_ptr(),
15783 size.len_i32(),
15784 physical_layout.as_ptr(),
15785 physical_layout.len_i32(),
15786 options.0.c_int(),
15787 options.1.c_int()
15788 ));
15789 Ok(Tensor { c_tensor: c_tensors[0] })
15790 }
15791
15792 pub fn f_empty_permuted_out(
15793 out: &Tensor,
15794 size: impl IntList,
15795 physical_layout: impl IntList,
15796 ) -> Result<Tensor, TchError> {
15797 let mut c_tensors = [std::ptr::null_mut(); 1];
15798 unsafe_torch_err!(atg_empty_permuted_out(
15799 c_tensors.as_mut_ptr(),
15800 out.c_tensor,
15801 size.as_ptr(),
15802 size.len_i32(),
15803 physical_layout.as_ptr(),
15804 physical_layout.len_i32()
15805 ));
15806 Ok(Tensor { c_tensor: c_tensors[0] })
15807 }
15808
15809 pub fn f_empty_quantized(
15810 size: impl IntList,
15811 qtensor: &Tensor,
15812 options: (Kind, Device),
15813 ) -> Result<Tensor, TchError> {
15814 let mut c_tensors = [std::ptr::null_mut(); 1];
15815 unsafe_torch_err!(atg_empty_quantized(
15816 c_tensors.as_mut_ptr(),
15817 size.as_ptr(),
15818 size.len_i32(),
15819 qtensor.c_tensor,
15820 options.0.c_int(),
15821 options.1.c_int()
15822 ));
15823 Ok(Tensor { c_tensor: c_tensors[0] })
15824 }
15825
15826 pub fn f_empty_quantized_out(
15827 out: &Tensor,
15828 size: impl IntList,
15829 qtensor: &Tensor,
15830 ) -> Result<Tensor, TchError> {
15831 let mut c_tensors = [std::ptr::null_mut(); 1];
15832 unsafe_torch_err!(atg_empty_quantized_out(
15833 c_tensors.as_mut_ptr(),
15834 out.c_tensor,
15835 size.as_ptr(),
15836 size.len_i32(),
15837 qtensor.c_tensor
15838 ));
15839 Ok(Tensor { c_tensor: c_tensors[0] })
15840 }
15841
15842 pub fn f_empty_strided(
15843 size: impl IntList,
15844 stride: impl IntList,
15845 options: (Kind, Device),
15846 ) -> Result<Tensor, TchError> {
15847 let mut c_tensors = [std::ptr::null_mut(); 1];
15848 unsafe_torch_err!(atg_empty_strided(
15849 c_tensors.as_mut_ptr(),
15850 size.as_ptr(),
15851 size.len_i32(),
15852 stride.as_ptr(),
15853 stride.len_i32(),
15854 options.0.c_int(),
15855 options.1.c_int()
15856 ));
15857 Ok(Tensor { c_tensor: c_tensors[0] })
15858 }
15859
15860 pub fn f_empty_strided_out(
15861 out: &Tensor,
15862 size: impl IntList,
15863 stride: impl IntList,
15864 ) -> Result<Tensor, TchError> {
15865 let mut c_tensors = [std::ptr::null_mut(); 1];
15866 unsafe_torch_err!(atg_empty_strided_out(
15867 c_tensors.as_mut_ptr(),
15868 out.c_tensor,
15869 size.as_ptr(),
15870 size.len_i32(),
15871 stride.as_ptr(),
15872 stride.len_i32()
15873 ));
15874 Ok(Tensor { c_tensor: c_tensors[0] })
15875 }
15876
15877 pub fn f_eq<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
15878 let mut c_tensors = [std::ptr::null_mut(); 1];
15879 unsafe_torch_err!(atg_eq(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
15880 Ok(Tensor { c_tensor: c_tensors[0] })
15881 }
15882
15883 pub fn f_eq_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
15884 let mut c_tensors = [std::ptr::null_mut(); 1];
15885 unsafe_torch_err!(atg_eq_(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
15886 Ok(Tensor { c_tensor: c_tensors[0] })
15887 }
15888
15889 pub fn f_eq_scalar_out<S: Into<Scalar>>(
15890 &self,
15891 out: &Tensor,
15892 other: S,
15893 ) -> Result<Tensor, TchError> {
15894 let mut c_tensors = [std::ptr::null_mut(); 1];
15895 unsafe_torch_err!(atg_eq_scalar_out(
15896 c_tensors.as_mut_ptr(),
15897 out.c_tensor,
15898 self.c_tensor,
15899 other.into().c_scalar
15900 ));
15901 Ok(Tensor { c_tensor: c_tensors[0] })
15902 }
15903
15904 pub fn f_eq_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
15905 let mut c_tensors = [std::ptr::null_mut(); 1];
15906 unsafe_torch_err!(atg_eq_tensor(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
15907 Ok(Tensor { c_tensor: c_tensors[0] })
15908 }
15909
15910 pub fn f_eq_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
15911 let mut c_tensors = [std::ptr::null_mut(); 1];
15912 unsafe_torch_err!(atg_eq_tensor_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
15913 Ok(Tensor { c_tensor: c_tensors[0] })
15914 }
15915
15916 pub fn f_eq_tensor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
15917 let mut c_tensors = [std::ptr::null_mut(); 1];
15918 unsafe_torch_err!(atg_eq_tensor_out(
15919 c_tensors.as_mut_ptr(),
15920 out.c_tensor,
15921 self.c_tensor,
15922 other.c_tensor
15923 ));
15924 Ok(Tensor { c_tensor: c_tensors[0] })
15925 }
15926
15927 pub fn f_equal(&self, other: &Tensor) -> Result<bool, TchError> {
15928 let return_;
15929 unsafe_torch_err!(return_ = atg_equal(self.c_tensor, other.c_tensor));
15930 Ok(return_ != 0)
15931 }
15932
15933 pub fn f_erf(&self) -> Result<Tensor, TchError> {
15934 let mut c_tensors = [std::ptr::null_mut(); 1];
15935 unsafe_torch_err!(atg_erf(c_tensors.as_mut_ptr(), self.c_tensor));
15936 Ok(Tensor { c_tensor: c_tensors[0] })
15937 }
15938
15939 pub fn f_erf_(&mut self) -> Result<Tensor, TchError> {
15940 let mut c_tensors = [std::ptr::null_mut(); 1];
15941 unsafe_torch_err!(atg_erf_(c_tensors.as_mut_ptr(), self.c_tensor));
15942 Ok(Tensor { c_tensor: c_tensors[0] })
15943 }
15944
15945 pub fn f_erf_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
15946 let mut c_tensors = [std::ptr::null_mut(); 1];
15947 unsafe_torch_err!(atg_erf_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
15948 Ok(Tensor { c_tensor: c_tensors[0] })
15949 }
15950
15951 pub fn f_erfc(&self) -> Result<Tensor, TchError> {
15952 let mut c_tensors = [std::ptr::null_mut(); 1];
15953 unsafe_torch_err!(atg_erfc(c_tensors.as_mut_ptr(), self.c_tensor));
15954 Ok(Tensor { c_tensor: c_tensors[0] })
15955 }
15956
15957 pub fn f_erfc_(&mut self) -> Result<Tensor, TchError> {
15958 let mut c_tensors = [std::ptr::null_mut(); 1];
15959 unsafe_torch_err!(atg_erfc_(c_tensors.as_mut_ptr(), self.c_tensor));
15960 Ok(Tensor { c_tensor: c_tensors[0] })
15961 }
15962
15963 pub fn f_erfc_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
15964 let mut c_tensors = [std::ptr::null_mut(); 1];
15965 unsafe_torch_err!(atg_erfc_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
15966 Ok(Tensor { c_tensor: c_tensors[0] })
15967 }
15968
15969 pub fn f_erfinv(&self) -> Result<Tensor, TchError> {
15970 let mut c_tensors = [std::ptr::null_mut(); 1];
15971 unsafe_torch_err!(atg_erfinv(c_tensors.as_mut_ptr(), self.c_tensor));
15972 Ok(Tensor { c_tensor: c_tensors[0] })
15973 }
15974
15975 pub fn f_erfinv_(&mut self) -> Result<Tensor, TchError> {
15976 let mut c_tensors = [std::ptr::null_mut(); 1];
15977 unsafe_torch_err!(atg_erfinv_(c_tensors.as_mut_ptr(), self.c_tensor));
15978 Ok(Tensor { c_tensor: c_tensors[0] })
15979 }
15980
15981 pub fn f_erfinv_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
15982 let mut c_tensors = [std::ptr::null_mut(); 1];
15983 unsafe_torch_err!(atg_erfinv_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
15984 Ok(Tensor { c_tensor: c_tensors[0] })
15985 }
15986
15987 pub fn f_exp(&self) -> Result<Tensor, TchError> {
15988 let mut c_tensors = [std::ptr::null_mut(); 1];
15989 unsafe_torch_err!(atg_exp(c_tensors.as_mut_ptr(), self.c_tensor));
15990 Ok(Tensor { c_tensor: c_tensors[0] })
15991 }
15992
15993 pub fn f_exp2(&self) -> Result<Tensor, TchError> {
15994 let mut c_tensors = [std::ptr::null_mut(); 1];
15995 unsafe_torch_err!(atg_exp2(c_tensors.as_mut_ptr(), self.c_tensor));
15996 Ok(Tensor { c_tensor: c_tensors[0] })
15997 }
15998
15999 pub fn f_exp2_(&mut self) -> Result<Tensor, TchError> {
16000 let mut c_tensors = [std::ptr::null_mut(); 1];
16001 unsafe_torch_err!(atg_exp2_(c_tensors.as_mut_ptr(), self.c_tensor));
16002 Ok(Tensor { c_tensor: c_tensors[0] })
16003 }
16004
16005 pub fn f_exp2_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
16006 let mut c_tensors = [std::ptr::null_mut(); 1];
16007 unsafe_torch_err!(atg_exp2_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
16008 Ok(Tensor { c_tensor: c_tensors[0] })
16009 }
16010
16011 pub fn f_exp_(&mut self) -> Result<Tensor, TchError> {
16012 let mut c_tensors = [std::ptr::null_mut(); 1];
16013 unsafe_torch_err!(atg_exp_(c_tensors.as_mut_ptr(), self.c_tensor));
16014 Ok(Tensor { c_tensor: c_tensors[0] })
16015 }
16016
16017 pub fn f_exp_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
16018 let mut c_tensors = [std::ptr::null_mut(); 1];
16019 unsafe_torch_err!(atg_exp_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
16020 Ok(Tensor { c_tensor: c_tensors[0] })
16021 }
16022
16023 pub fn f_expand(&self, size: impl IntList, implicit: bool) -> Result<Tensor, TchError> {
16024 let mut c_tensors = [std::ptr::null_mut(); 1];
16025 unsafe_torch_err!(atg_expand(
16026 c_tensors.as_mut_ptr(),
16027 self.c_tensor,
16028 size.as_ptr(),
16029 size.len_i32(),
16030 if implicit { 1 } else { 0 }
16031 ));
16032 Ok(Tensor { c_tensor: c_tensors[0] })
16033 }
16034
16035 pub fn f_expand_as(&self, other: &Tensor) -> Result<Tensor, TchError> {
16036 let mut c_tensors = [std::ptr::null_mut(); 1];
16037 unsafe_torch_err!(atg_expand_as(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
16038 Ok(Tensor { c_tensor: c_tensors[0] })
16039 }
16040
16041 pub fn f_expand_copy(&self, size: impl IntList, implicit: bool) -> Result<Tensor, TchError> {
16042 let mut c_tensors = [std::ptr::null_mut(); 1];
16043 unsafe_torch_err!(atg_expand_copy(
16044 c_tensors.as_mut_ptr(),
16045 self.c_tensor,
16046 size.as_ptr(),
16047 size.len_i32(),
16048 if implicit { 1 } else { 0 }
16049 ));
16050 Ok(Tensor { c_tensor: c_tensors[0] })
16051 }
16052
16053 pub fn f_expand_copy_out(
16054 &self,
16055 out: &Tensor,
16056 size: impl IntList,
16057 implicit: bool,
16058 ) -> Result<Tensor, TchError> {
16059 let mut c_tensors = [std::ptr::null_mut(); 1];
16060 unsafe_torch_err!(atg_expand_copy_out(
16061 c_tensors.as_mut_ptr(),
16062 out.c_tensor,
16063 self.c_tensor,
16064 size.as_ptr(),
16065 size.len_i32(),
16066 if implicit { 1 } else { 0 }
16067 ));
16068 Ok(Tensor { c_tensor: c_tensors[0] })
16069 }
16070
16071 pub fn f_expm1(&self) -> Result<Tensor, TchError> {
16072 let mut c_tensors = [std::ptr::null_mut(); 1];
16073 unsafe_torch_err!(atg_expm1(c_tensors.as_mut_ptr(), self.c_tensor));
16074 Ok(Tensor { c_tensor: c_tensors[0] })
16075 }
16076
16077 pub fn f_expm1_(&mut self) -> Result<Tensor, TchError> {
16078 let mut c_tensors = [std::ptr::null_mut(); 1];
16079 unsafe_torch_err!(atg_expm1_(c_tensors.as_mut_ptr(), self.c_tensor));
16080 Ok(Tensor { c_tensor: c_tensors[0] })
16081 }
16082
16083 pub fn f_expm1_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
16084 let mut c_tensors = [std::ptr::null_mut(); 1];
16085 unsafe_torch_err!(atg_expm1_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
16086 Ok(Tensor { c_tensor: c_tensors[0] })
16087 }
16088
16089 pub fn f_exponential(&self, lambd: f64) -> Result<Tensor, TchError> {
16090 let mut c_tensors = [std::ptr::null_mut(); 1];
16091 unsafe_torch_err!(atg_exponential(c_tensors.as_mut_ptr(), self.c_tensor, lambd));
16092 Ok(Tensor { c_tensor: c_tensors[0] })
16093 }
16094
16095 pub fn f_exponential_(&mut self, lambd: f64) -> Result<Tensor, TchError> {
16096 let mut c_tensors = [std::ptr::null_mut(); 1];
16097 unsafe_torch_err!(atg_exponential_(c_tensors.as_mut_ptr(), self.c_tensor, lambd));
16098 Ok(Tensor { c_tensor: c_tensors[0] })
16099 }
16100
16101 pub fn f_exponential_out(&self, out: &Tensor, lambd: f64) -> Result<Tensor, TchError> {
16102 let mut c_tensors = [std::ptr::null_mut(); 1];
16103 unsafe_torch_err!(atg_exponential_out(
16104 c_tensors.as_mut_ptr(),
16105 out.c_tensor,
16106 self.c_tensor,
16107 lambd
16108 ));
16109 Ok(Tensor { c_tensor: c_tensors[0] })
16110 }
16111
16112 pub fn f_eye(n: i64, options: (Kind, Device)) -> Result<Tensor, TchError> {
16113 let mut c_tensors = [std::ptr::null_mut(); 1];
16114 unsafe_torch_err!(atg_eye(c_tensors.as_mut_ptr(), n, options.0.c_int(), options.1.c_int()));
16115 Ok(Tensor { c_tensor: c_tensors[0] })
16116 }
16117
16118 pub fn f_eye_m(n: i64, m: i64, options: (Kind, Device)) -> Result<Tensor, TchError> {
16119 let mut c_tensors = [std::ptr::null_mut(); 1];
16120 unsafe_torch_err!(atg_eye_m(
16121 c_tensors.as_mut_ptr(),
16122 n,
16123 m,
16124 options.0.c_int(),
16125 options.1.c_int()
16126 ));
16127 Ok(Tensor { c_tensor: c_tensors[0] })
16128 }
16129
16130 pub fn f_eye_m_out(out: &Tensor, n: i64, m: i64) -> Result<Tensor, TchError> {
16131 let mut c_tensors = [std::ptr::null_mut(); 1];
16132 unsafe_torch_err!(atg_eye_m_out(c_tensors.as_mut_ptr(), out.c_tensor, n, m));
16133 Ok(Tensor { c_tensor: c_tensors[0] })
16134 }
16135
16136 pub fn f_eye_out(out: &Tensor, n: i64) -> Result<Tensor, TchError> {
16137 let mut c_tensors = [std::ptr::null_mut(); 1];
16138 unsafe_torch_err!(atg_eye_out(c_tensors.as_mut_ptr(), out.c_tensor, n));
16139 Ok(Tensor { c_tensor: c_tensors[0] })
16140 }
16141
16142 pub fn f_fake_quantize_per_channel_affine(
16143 &self,
16144 scale: &Tensor,
16145 zero_point: &Tensor,
16146 axis: i64,
16147 quant_min: i64,
16148 quant_max: i64,
16149 ) -> Result<Tensor, TchError> {
16150 let mut c_tensors = [std::ptr::null_mut(); 1];
16151 unsafe_torch_err!(atg_fake_quantize_per_channel_affine(
16152 c_tensors.as_mut_ptr(),
16153 self.c_tensor,
16154 scale.c_tensor,
16155 zero_point.c_tensor,
16156 axis,
16157 quant_min,
16158 quant_max
16159 ));
16160 Ok(Tensor { c_tensor: c_tensors[0] })
16161 }
16162
16163 pub fn f_fake_quantize_per_channel_affine_cachemask(
16164 &self,
16165 scale: &Tensor,
16166 zero_point: &Tensor,
16167 axis: i64,
16168 quant_min: i64,
16169 quant_max: i64,
16170 ) -> Result<(Tensor, Tensor), TchError> {
16171 let mut c_tensors = [std::ptr::null_mut(); 2];
16172 unsafe_torch_err!(atg_fake_quantize_per_channel_affine_cachemask(
16173 c_tensors.as_mut_ptr(),
16174 self.c_tensor,
16175 scale.c_tensor,
16176 zero_point.c_tensor,
16177 axis,
16178 quant_min,
16179 quant_max
16180 ));
16181 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
16182 }
16183
16184 pub fn f_fake_quantize_per_channel_affine_cachemask_backward(
16185 grad: &Tensor,
16186 mask: &Tensor,
16187 ) -> Result<Tensor, TchError> {
16188 let mut c_tensors = [std::ptr::null_mut(); 1];
16189 unsafe_torch_err!(atg_fake_quantize_per_channel_affine_cachemask_backward(
16190 c_tensors.as_mut_ptr(),
16191 grad.c_tensor,
16192 mask.c_tensor
16193 ));
16194 Ok(Tensor { c_tensor: c_tensors[0] })
16195 }
16196
16197 pub fn f_fake_quantize_per_channel_affine_cachemask_out(
16198 &self,
16199 out0: &Tensor,
16200 out1: &Tensor,
16201 scale: &Tensor,
16202 zero_point: &Tensor,
16203 axis: i64,
16204 quant_min: i64,
16205 quant_max: i64,
16206 ) -> Result<(Tensor, Tensor), TchError> {
16207 let mut c_tensors = [std::ptr::null_mut(); 2];
16208 unsafe_torch_err!(atg_fake_quantize_per_channel_affine_cachemask_out(
16209 c_tensors.as_mut_ptr(),
16210 out0.c_tensor,
16211 out1.c_tensor,
16212 self.c_tensor,
16213 scale.c_tensor,
16214 zero_point.c_tensor,
16215 axis,
16216 quant_min,
16217 quant_max
16218 ));
16219 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
16220 }
16221
16222 pub fn f_fake_quantize_per_tensor_affine(
16223 &self,
16224 scale: f64,
16225 zero_point: i64,
16226 quant_min: i64,
16227 quant_max: i64,
16228 ) -> Result<Tensor, TchError> {
16229 let mut c_tensors = [std::ptr::null_mut(); 1];
16230 unsafe_torch_err!(atg_fake_quantize_per_tensor_affine(
16231 c_tensors.as_mut_ptr(),
16232 self.c_tensor,
16233 scale,
16234 zero_point,
16235 quant_min,
16236 quant_max
16237 ));
16238 Ok(Tensor { c_tensor: c_tensors[0] })
16239 }
16240
16241 pub fn f_fake_quantize_per_tensor_affine_cachemask(
16242 &self,
16243 scale: f64,
16244 zero_point: i64,
16245 quant_min: i64,
16246 quant_max: i64,
16247 ) -> Result<(Tensor, Tensor), TchError> {
16248 let mut c_tensors = [std::ptr::null_mut(); 2];
16249 unsafe_torch_err!(atg_fake_quantize_per_tensor_affine_cachemask(
16250 c_tensors.as_mut_ptr(),
16251 self.c_tensor,
16252 scale,
16253 zero_point,
16254 quant_min,
16255 quant_max
16256 ));
16257 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
16258 }
16259
16260 pub fn f_fake_quantize_per_tensor_affine_cachemask_backward(
16261 grad: &Tensor,
16262 mask: &Tensor,
16263 ) -> Result<Tensor, TchError> {
16264 let mut c_tensors = [std::ptr::null_mut(); 1];
16265 unsafe_torch_err!(atg_fake_quantize_per_tensor_affine_cachemask_backward(
16266 c_tensors.as_mut_ptr(),
16267 grad.c_tensor,
16268 mask.c_tensor
16269 ));
16270 Ok(Tensor { c_tensor: c_tensors[0] })
16271 }
16272
16273 pub fn f_fake_quantize_per_tensor_affine_cachemask_out(
16274 &self,
16275 out0: &Tensor,
16276 out1: &Tensor,
16277 scale: f64,
16278 zero_point: i64,
16279 quant_min: i64,
16280 quant_max: i64,
16281 ) -> Result<(Tensor, Tensor), TchError> {
16282 let mut c_tensors = [std::ptr::null_mut(); 2];
16283 unsafe_torch_err!(atg_fake_quantize_per_tensor_affine_cachemask_out(
16284 c_tensors.as_mut_ptr(),
16285 out0.c_tensor,
16286 out1.c_tensor,
16287 self.c_tensor,
16288 scale,
16289 zero_point,
16290 quant_min,
16291 quant_max
16292 ));
16293 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
16294 }
16295
16296 pub fn f_fake_quantize_per_tensor_affine_tensor_qparams(
16297 &self,
16298 scale: &Tensor,
16299 zero_point: &Tensor,
16300 quant_min: i64,
16301 quant_max: i64,
16302 ) -> Result<Tensor, TchError> {
16303 let mut c_tensors = [std::ptr::null_mut(); 1];
16304 unsafe_torch_err!(atg_fake_quantize_per_tensor_affine_tensor_qparams(
16305 c_tensors.as_mut_ptr(),
16306 self.c_tensor,
16307 scale.c_tensor,
16308 zero_point.c_tensor,
16309 quant_min,
16310 quant_max
16311 ));
16312 Ok(Tensor { c_tensor: c_tensors[0] })
16313 }
16314
16315 pub fn f_fbgemm_linear_fp16_weight(
16316 &self,
16317 packed_weight: &Tensor,
16318 bias: &Tensor,
16319 ) -> Result<Tensor, TchError> {
16320 let mut c_tensors = [std::ptr::null_mut(); 1];
16321 unsafe_torch_err!(atg_fbgemm_linear_fp16_weight(
16322 c_tensors.as_mut_ptr(),
16323 self.c_tensor,
16324 packed_weight.c_tensor,
16325 bias.c_tensor
16326 ));
16327 Ok(Tensor { c_tensor: c_tensors[0] })
16328 }
16329
16330 pub fn f_fbgemm_linear_fp16_weight_fp32_activation(
16331 &self,
16332 packed_weight: &Tensor,
16333 bias: &Tensor,
16334 ) -> Result<Tensor, TchError> {
16335 let mut c_tensors = [std::ptr::null_mut(); 1];
16336 unsafe_torch_err!(atg_fbgemm_linear_fp16_weight_fp32_activation(
16337 c_tensors.as_mut_ptr(),
16338 self.c_tensor,
16339 packed_weight.c_tensor,
16340 bias.c_tensor
16341 ));
16342 Ok(Tensor { c_tensor: c_tensors[0] })
16343 }
16344
16345 pub fn f_fbgemm_linear_int8_weight<S: Into<Scalar>>(
16346 &self,
16347 weight: &Tensor,
16348 packed: &Tensor,
16349 col_offsets: &Tensor,
16350 weight_scale: S,
16351 weight_zero_point: S,
16352 bias: &Tensor,
16353 ) -> Result<Tensor, TchError> {
16354 let mut c_tensors = [std::ptr::null_mut(); 1];
16355 unsafe_torch_err!(atg_fbgemm_linear_int8_weight(
16356 c_tensors.as_mut_ptr(),
16357 self.c_tensor,
16358 weight.c_tensor,
16359 packed.c_tensor,
16360 col_offsets.c_tensor,
16361 weight_scale.into().c_scalar,
16362 weight_zero_point.into().c_scalar,
16363 bias.c_tensor
16364 ));
16365 Ok(Tensor { c_tensor: c_tensors[0] })
16366 }
16367
16368 pub fn f_fbgemm_linear_int8_weight_fp32_activation<S: Into<Scalar>>(
16369 &self,
16370 weight: &Tensor,
16371 packed: &Tensor,
16372 col_offsets: &Tensor,
16373 weight_scale: S,
16374 weight_zero_point: S,
16375 bias: &Tensor,
16376 ) -> Result<Tensor, TchError> {
16377 let mut c_tensors = [std::ptr::null_mut(); 1];
16378 unsafe_torch_err!(atg_fbgemm_linear_int8_weight_fp32_activation(
16379 c_tensors.as_mut_ptr(),
16380 self.c_tensor,
16381 weight.c_tensor,
16382 packed.c_tensor,
16383 col_offsets.c_tensor,
16384 weight_scale.into().c_scalar,
16385 weight_zero_point.into().c_scalar,
16386 bias.c_tensor
16387 ));
16388 Ok(Tensor { c_tensor: c_tensors[0] })
16389 }
16390
16391 pub fn f_fbgemm_pack_gemm_matrix_fp16(&self) -> Result<Tensor, TchError> {
16392 let mut c_tensors = [std::ptr::null_mut(); 1];
16393 unsafe_torch_err!(atg_fbgemm_pack_gemm_matrix_fp16(c_tensors.as_mut_ptr(), self.c_tensor));
16394 Ok(Tensor { c_tensor: c_tensors[0] })
16395 }
16396
16397 pub fn f_fbgemm_pack_quantized_matrix(&self) -> Result<Tensor, TchError> {
16398 let mut c_tensors = [std::ptr::null_mut(); 1];
16399 unsafe_torch_err!(atg_fbgemm_pack_quantized_matrix(c_tensors.as_mut_ptr(), self.c_tensor));
16400 Ok(Tensor { c_tensor: c_tensors[0] })
16401 }
16402
16403 pub fn f_fbgemm_pack_quantized_matrix_kn(&self, k: i64, n: i64) -> Result<Tensor, TchError> {
16404 let mut c_tensors = [std::ptr::null_mut(); 1];
16405 unsafe_torch_err!(atg_fbgemm_pack_quantized_matrix_kn(
16406 c_tensors.as_mut_ptr(),
16407 self.c_tensor,
16408 k,
16409 n
16410 ));
16411 Ok(Tensor { c_tensor: c_tensors[0] })
16412 }
16413
16414 pub fn f_feature_alpha_dropout(&self, p: f64, train: bool) -> Result<Tensor, TchError> {
16415 let mut c_tensors = [std::ptr::null_mut(); 1];
16416 unsafe_torch_err!(atg_feature_alpha_dropout(
16417 c_tensors.as_mut_ptr(),
16418 self.c_tensor,
16419 p,
16420 if train { 1 } else { 0 }
16421 ));
16422 Ok(Tensor { c_tensor: c_tensors[0] })
16423 }
16424
16425 pub fn f_feature_alpha_dropout_(&mut self, p: f64, train: bool) -> Result<Tensor, TchError> {
16426 let mut c_tensors = [std::ptr::null_mut(); 1];
16427 unsafe_torch_err!(atg_feature_alpha_dropout_(
16428 c_tensors.as_mut_ptr(),
16429 self.c_tensor,
16430 p,
16431 if train { 1 } else { 0 }
16432 ));
16433 Ok(Tensor { c_tensor: c_tensors[0] })
16434 }
16435
16436 pub fn f_feature_dropout(&self, p: f64, train: bool) -> Result<Tensor, TchError> {
16437 let mut c_tensors = [std::ptr::null_mut(); 1];
16438 unsafe_torch_err!(atg_feature_dropout(
16439 c_tensors.as_mut_ptr(),
16440 self.c_tensor,
16441 p,
16442 if train { 1 } else { 0 }
16443 ));
16444 Ok(Tensor { c_tensor: c_tensors[0] })
16445 }
16446
16447 pub fn f_feature_dropout_(&mut self, p: f64, train: bool) -> Result<Tensor, TchError> {
16448 let mut c_tensors = [std::ptr::null_mut(); 1];
16449 unsafe_torch_err!(atg_feature_dropout_(
16450 c_tensors.as_mut_ptr(),
16451 self.c_tensor,
16452 p,
16453 if train { 1 } else { 0 }
16454 ));
16455 Ok(Tensor { c_tensor: c_tensors[0] })
16456 }
16457
16458 pub fn f_fft_fft(
16459 &self,
16460 n: impl Into<Option<i64>>,
16461 dim: i64,
16462 norm: &str,
16463 ) -> Result<Tensor, TchError> {
16464 let n = n.into();
16465 let mut c_tensors = [std::ptr::null_mut(); 1];
16466 unsafe_torch_err!(atg_fft_fft(
16467 c_tensors.as_mut_ptr(),
16468 self.c_tensor,
16469 n.unwrap_or(0i64),
16470 n.is_none() as i8,
16471 dim,
16472 norm.as_ptr(),
16473 norm.len() as i32
16474 ));
16475 Ok(Tensor { c_tensor: c_tensors[0] })
16476 }
16477
16478 pub fn f_fft_fft2(
16479 &self,
16480 s: impl IntListOption,
16481 dim: impl IntList,
16482 norm: &str,
16483 ) -> Result<Tensor, TchError> {
16484 let mut c_tensors = [std::ptr::null_mut(); 1];
16485 unsafe_torch_err!(atg_fft_fft2(
16486 c_tensors.as_mut_ptr(),
16487 self.c_tensor,
16488 s.as_ptr(),
16489 s.len_i32(),
16490 dim.as_ptr(),
16491 dim.len_i32(),
16492 norm.as_ptr(),
16493 norm.len() as i32
16494 ));
16495 Ok(Tensor { c_tensor: c_tensors[0] })
16496 }
16497
16498 pub fn f_fft_fft2_out(
16499 &self,
16500 out: &Tensor,
16501 s: impl IntListOption,
16502 dim: impl IntList,
16503 norm: &str,
16504 ) -> Result<Tensor, TchError> {
16505 let mut c_tensors = [std::ptr::null_mut(); 1];
16506 unsafe_torch_err!(atg_fft_fft2_out(
16507 c_tensors.as_mut_ptr(),
16508 out.c_tensor,
16509 self.c_tensor,
16510 s.as_ptr(),
16511 s.len_i32(),
16512 dim.as_ptr(),
16513 dim.len_i32(),
16514 norm.as_ptr(),
16515 norm.len() as i32
16516 ));
16517 Ok(Tensor { c_tensor: c_tensors[0] })
16518 }
16519
16520 pub fn f_fft_fft_out(
16521 &self,
16522 out: &Tensor,
16523 n: impl Into<Option<i64>>,
16524 dim: i64,
16525 norm: &str,
16526 ) -> Result<Tensor, TchError> {
16527 let n = n.into();
16528 let mut c_tensors = [std::ptr::null_mut(); 1];
16529 unsafe_torch_err!(atg_fft_fft_out(
16530 c_tensors.as_mut_ptr(),
16531 out.c_tensor,
16532 self.c_tensor,
16533 n.unwrap_or(0i64),
16534 n.is_none() as i8,
16535 dim,
16536 norm.as_ptr(),
16537 norm.len() as i32
16538 ));
16539 Ok(Tensor { c_tensor: c_tensors[0] })
16540 }
16541
16542 pub fn f_fft_fftfreq(n: i64, d: f64, options: (Kind, Device)) -> Result<Tensor, TchError> {
16543 let mut c_tensors = [std::ptr::null_mut(); 1];
16544 unsafe_torch_err!(atg_fft_fftfreq(
16545 c_tensors.as_mut_ptr(),
16546 n,
16547 d,
16548 options.0.c_int(),
16549 options.1.c_int()
16550 ));
16551 Ok(Tensor { c_tensor: c_tensors[0] })
16552 }
16553
16554 pub fn f_fft_fftfreq_out(out: &Tensor, n: i64, d: f64) -> Result<Tensor, TchError> {
16555 let mut c_tensors = [std::ptr::null_mut(); 1];
16556 unsafe_torch_err!(atg_fft_fftfreq_out(c_tensors.as_mut_ptr(), out.c_tensor, n, d));
16557 Ok(Tensor { c_tensor: c_tensors[0] })
16558 }
16559
16560 pub fn f_fft_fftn(
16561 &self,
16562 s: impl IntListOption,
16563 dim: impl IntListOption,
16564 norm: &str,
16565 ) -> Result<Tensor, TchError> {
16566 let mut c_tensors = [std::ptr::null_mut(); 1];
16567 unsafe_torch_err!(atg_fft_fftn(
16568 c_tensors.as_mut_ptr(),
16569 self.c_tensor,
16570 s.as_ptr(),
16571 s.len_i32(),
16572 dim.as_ptr(),
16573 dim.len_i32(),
16574 norm.as_ptr(),
16575 norm.len() as i32
16576 ));
16577 Ok(Tensor { c_tensor: c_tensors[0] })
16578 }
16579
16580 pub fn f_fft_fftn_out(
16581 &self,
16582 out: &Tensor,
16583 s: impl IntListOption,
16584 dim: impl IntListOption,
16585 norm: &str,
16586 ) -> Result<Tensor, TchError> {
16587 let mut c_tensors = [std::ptr::null_mut(); 1];
16588 unsafe_torch_err!(atg_fft_fftn_out(
16589 c_tensors.as_mut_ptr(),
16590 out.c_tensor,
16591 self.c_tensor,
16592 s.as_ptr(),
16593 s.len_i32(),
16594 dim.as_ptr(),
16595 dim.len_i32(),
16596 norm.as_ptr(),
16597 norm.len() as i32
16598 ));
16599 Ok(Tensor { c_tensor: c_tensors[0] })
16600 }
16601
16602 pub fn f_fft_fftshift(&self, dim: impl IntListOption) -> Result<Tensor, TchError> {
16603 let mut c_tensors = [std::ptr::null_mut(); 1];
16604 unsafe_torch_err!(atg_fft_fftshift(
16605 c_tensors.as_mut_ptr(),
16606 self.c_tensor,
16607 dim.as_ptr(),
16608 dim.len_i32()
16609 ));
16610 Ok(Tensor { c_tensor: c_tensors[0] })
16611 }
16612
16613 pub fn f_fft_hfft(
16614 &self,
16615 n: impl Into<Option<i64>>,
16616 dim: i64,
16617 norm: &str,
16618 ) -> Result<Tensor, TchError> {
16619 let n = n.into();
16620 let mut c_tensors = [std::ptr::null_mut(); 1];
16621 unsafe_torch_err!(atg_fft_hfft(
16622 c_tensors.as_mut_ptr(),
16623 self.c_tensor,
16624 n.unwrap_or(0i64),
16625 n.is_none() as i8,
16626 dim,
16627 norm.as_ptr(),
16628 norm.len() as i32
16629 ));
16630 Ok(Tensor { c_tensor: c_tensors[0] })
16631 }
16632
16633 pub fn f_fft_hfft2(
16634 &self,
16635 s: impl IntListOption,
16636 dim: impl IntList,
16637 norm: &str,
16638 ) -> Result<Tensor, TchError> {
16639 let mut c_tensors = [std::ptr::null_mut(); 1];
16640 unsafe_torch_err!(atg_fft_hfft2(
16641 c_tensors.as_mut_ptr(),
16642 self.c_tensor,
16643 s.as_ptr(),
16644 s.len_i32(),
16645 dim.as_ptr(),
16646 dim.len_i32(),
16647 norm.as_ptr(),
16648 norm.len() as i32
16649 ));
16650 Ok(Tensor { c_tensor: c_tensors[0] })
16651 }
16652
16653 pub fn f_fft_hfft2_out(
16654 &self,
16655 out: &Tensor,
16656 s: impl IntListOption,
16657 dim: impl IntList,
16658 norm: &str,
16659 ) -> Result<Tensor, TchError> {
16660 let mut c_tensors = [std::ptr::null_mut(); 1];
16661 unsafe_torch_err!(atg_fft_hfft2_out(
16662 c_tensors.as_mut_ptr(),
16663 out.c_tensor,
16664 self.c_tensor,
16665 s.as_ptr(),
16666 s.len_i32(),
16667 dim.as_ptr(),
16668 dim.len_i32(),
16669 norm.as_ptr(),
16670 norm.len() as i32
16671 ));
16672 Ok(Tensor { c_tensor: c_tensors[0] })
16673 }
16674
16675 pub fn f_fft_hfft_out(
16676 &self,
16677 out: &Tensor,
16678 n: impl Into<Option<i64>>,
16679 dim: i64,
16680 norm: &str,
16681 ) -> Result<Tensor, TchError> {
16682 let n = n.into();
16683 let mut c_tensors = [std::ptr::null_mut(); 1];
16684 unsafe_torch_err!(atg_fft_hfft_out(
16685 c_tensors.as_mut_ptr(),
16686 out.c_tensor,
16687 self.c_tensor,
16688 n.unwrap_or(0i64),
16689 n.is_none() as i8,
16690 dim,
16691 norm.as_ptr(),
16692 norm.len() as i32
16693 ));
16694 Ok(Tensor { c_tensor: c_tensors[0] })
16695 }
16696
16697 pub fn f_fft_hfftn(
16698 &self,
16699 s: impl IntListOption,
16700 dim: impl IntListOption,
16701 norm: &str,
16702 ) -> Result<Tensor, TchError> {
16703 let mut c_tensors = [std::ptr::null_mut(); 1];
16704 unsafe_torch_err!(atg_fft_hfftn(
16705 c_tensors.as_mut_ptr(),
16706 self.c_tensor,
16707 s.as_ptr(),
16708 s.len_i32(),
16709 dim.as_ptr(),
16710 dim.len_i32(),
16711 norm.as_ptr(),
16712 norm.len() as i32
16713 ));
16714 Ok(Tensor { c_tensor: c_tensors[0] })
16715 }
16716
16717 pub fn f_fft_hfftn_out(
16718 &self,
16719 out: &Tensor,
16720 s: impl IntListOption,
16721 dim: impl IntListOption,
16722 norm: &str,
16723 ) -> Result<Tensor, TchError> {
16724 let mut c_tensors = [std::ptr::null_mut(); 1];
16725 unsafe_torch_err!(atg_fft_hfftn_out(
16726 c_tensors.as_mut_ptr(),
16727 out.c_tensor,
16728 self.c_tensor,
16729 s.as_ptr(),
16730 s.len_i32(),
16731 dim.as_ptr(),
16732 dim.len_i32(),
16733 norm.as_ptr(),
16734 norm.len() as i32
16735 ));
16736 Ok(Tensor { c_tensor: c_tensors[0] })
16737 }
16738
16739 pub fn f_fft_ifft(
16740 &self,
16741 n: impl Into<Option<i64>>,
16742 dim: i64,
16743 norm: &str,
16744 ) -> Result<Tensor, TchError> {
16745 let n = n.into();
16746 let mut c_tensors = [std::ptr::null_mut(); 1];
16747 unsafe_torch_err!(atg_fft_ifft(
16748 c_tensors.as_mut_ptr(),
16749 self.c_tensor,
16750 n.unwrap_or(0i64),
16751 n.is_none() as i8,
16752 dim,
16753 norm.as_ptr(),
16754 norm.len() as i32
16755 ));
16756 Ok(Tensor { c_tensor: c_tensors[0] })
16757 }
16758
16759 pub fn f_fft_ifft2(
16760 &self,
16761 s: impl IntListOption,
16762 dim: impl IntList,
16763 norm: &str,
16764 ) -> Result<Tensor, TchError> {
16765 let mut c_tensors = [std::ptr::null_mut(); 1];
16766 unsafe_torch_err!(atg_fft_ifft2(
16767 c_tensors.as_mut_ptr(),
16768 self.c_tensor,
16769 s.as_ptr(),
16770 s.len_i32(),
16771 dim.as_ptr(),
16772 dim.len_i32(),
16773 norm.as_ptr(),
16774 norm.len() as i32
16775 ));
16776 Ok(Tensor { c_tensor: c_tensors[0] })
16777 }
16778
16779 pub fn f_fft_ifft2_out(
16780 &self,
16781 out: &Tensor,
16782 s: impl IntListOption,
16783 dim: impl IntList,
16784 norm: &str,
16785 ) -> Result<Tensor, TchError> {
16786 let mut c_tensors = [std::ptr::null_mut(); 1];
16787 unsafe_torch_err!(atg_fft_ifft2_out(
16788 c_tensors.as_mut_ptr(),
16789 out.c_tensor,
16790 self.c_tensor,
16791 s.as_ptr(),
16792 s.len_i32(),
16793 dim.as_ptr(),
16794 dim.len_i32(),
16795 norm.as_ptr(),
16796 norm.len() as i32
16797 ));
16798 Ok(Tensor { c_tensor: c_tensors[0] })
16799 }
16800
16801 pub fn f_fft_ifft_out(
16802 &self,
16803 out: &Tensor,
16804 n: impl Into<Option<i64>>,
16805 dim: i64,
16806 norm: &str,
16807 ) -> Result<Tensor, TchError> {
16808 let n = n.into();
16809 let mut c_tensors = [std::ptr::null_mut(); 1];
16810 unsafe_torch_err!(atg_fft_ifft_out(
16811 c_tensors.as_mut_ptr(),
16812 out.c_tensor,
16813 self.c_tensor,
16814 n.unwrap_or(0i64),
16815 n.is_none() as i8,
16816 dim,
16817 norm.as_ptr(),
16818 norm.len() as i32
16819 ));
16820 Ok(Tensor { c_tensor: c_tensors[0] })
16821 }
16822
16823 pub fn f_fft_ifftn(
16824 &self,
16825 s: impl IntListOption,
16826 dim: impl IntListOption,
16827 norm: &str,
16828 ) -> Result<Tensor, TchError> {
16829 let mut c_tensors = [std::ptr::null_mut(); 1];
16830 unsafe_torch_err!(atg_fft_ifftn(
16831 c_tensors.as_mut_ptr(),
16832 self.c_tensor,
16833 s.as_ptr(),
16834 s.len_i32(),
16835 dim.as_ptr(),
16836 dim.len_i32(),
16837 norm.as_ptr(),
16838 norm.len() as i32
16839 ));
16840 Ok(Tensor { c_tensor: c_tensors[0] })
16841 }
16842
16843 pub fn f_fft_ifftn_out(
16844 &self,
16845 out: &Tensor,
16846 s: impl IntListOption,
16847 dim: impl IntListOption,
16848 norm: &str,
16849 ) -> Result<Tensor, TchError> {
16850 let mut c_tensors = [std::ptr::null_mut(); 1];
16851 unsafe_torch_err!(atg_fft_ifftn_out(
16852 c_tensors.as_mut_ptr(),
16853 out.c_tensor,
16854 self.c_tensor,
16855 s.as_ptr(),
16856 s.len_i32(),
16857 dim.as_ptr(),
16858 dim.len_i32(),
16859 norm.as_ptr(),
16860 norm.len() as i32
16861 ));
16862 Ok(Tensor { c_tensor: c_tensors[0] })
16863 }
16864
16865 pub fn f_fft_ifftshift(&self, dim: impl IntListOption) -> Result<Tensor, TchError> {
16866 let mut c_tensors = [std::ptr::null_mut(); 1];
16867 unsafe_torch_err!(atg_fft_ifftshift(
16868 c_tensors.as_mut_ptr(),
16869 self.c_tensor,
16870 dim.as_ptr(),
16871 dim.len_i32()
16872 ));
16873 Ok(Tensor { c_tensor: c_tensors[0] })
16874 }
16875
16876 pub fn f_fft_ihfft(
16877 &self,
16878 n: impl Into<Option<i64>>,
16879 dim: i64,
16880 norm: &str,
16881 ) -> Result<Tensor, TchError> {
16882 let n = n.into();
16883 let mut c_tensors = [std::ptr::null_mut(); 1];
16884 unsafe_torch_err!(atg_fft_ihfft(
16885 c_tensors.as_mut_ptr(),
16886 self.c_tensor,
16887 n.unwrap_or(0i64),
16888 n.is_none() as i8,
16889 dim,
16890 norm.as_ptr(),
16891 norm.len() as i32
16892 ));
16893 Ok(Tensor { c_tensor: c_tensors[0] })
16894 }
16895
16896 pub fn f_fft_ihfft2(
16897 &self,
16898 s: impl IntListOption,
16899 dim: impl IntList,
16900 norm: &str,
16901 ) -> Result<Tensor, TchError> {
16902 let mut c_tensors = [std::ptr::null_mut(); 1];
16903 unsafe_torch_err!(atg_fft_ihfft2(
16904 c_tensors.as_mut_ptr(),
16905 self.c_tensor,
16906 s.as_ptr(),
16907 s.len_i32(),
16908 dim.as_ptr(),
16909 dim.len_i32(),
16910 norm.as_ptr(),
16911 norm.len() as i32
16912 ));
16913 Ok(Tensor { c_tensor: c_tensors[0] })
16914 }
16915
16916 pub fn f_fft_ihfft2_out(
16917 &self,
16918 out: &Tensor,
16919 s: impl IntListOption,
16920 dim: impl IntList,
16921 norm: &str,
16922 ) -> Result<Tensor, TchError> {
16923 let mut c_tensors = [std::ptr::null_mut(); 1];
16924 unsafe_torch_err!(atg_fft_ihfft2_out(
16925 c_tensors.as_mut_ptr(),
16926 out.c_tensor,
16927 self.c_tensor,
16928 s.as_ptr(),
16929 s.len_i32(),
16930 dim.as_ptr(),
16931 dim.len_i32(),
16932 norm.as_ptr(),
16933 norm.len() as i32
16934 ));
16935 Ok(Tensor { c_tensor: c_tensors[0] })
16936 }
16937
16938 pub fn f_fft_ihfft_out(
16939 &self,
16940 out: &Tensor,
16941 n: impl Into<Option<i64>>,
16942 dim: i64,
16943 norm: &str,
16944 ) -> Result<Tensor, TchError> {
16945 let n = n.into();
16946 let mut c_tensors = [std::ptr::null_mut(); 1];
16947 unsafe_torch_err!(atg_fft_ihfft_out(
16948 c_tensors.as_mut_ptr(),
16949 out.c_tensor,
16950 self.c_tensor,
16951 n.unwrap_or(0i64),
16952 n.is_none() as i8,
16953 dim,
16954 norm.as_ptr(),
16955 norm.len() as i32
16956 ));
16957 Ok(Tensor { c_tensor: c_tensors[0] })
16958 }
16959
16960 pub fn f_fft_ihfftn(
16961 &self,
16962 s: impl IntListOption,
16963 dim: impl IntListOption,
16964 norm: &str,
16965 ) -> Result<Tensor, TchError> {
16966 let mut c_tensors = [std::ptr::null_mut(); 1];
16967 unsafe_torch_err!(atg_fft_ihfftn(
16968 c_tensors.as_mut_ptr(),
16969 self.c_tensor,
16970 s.as_ptr(),
16971 s.len_i32(),
16972 dim.as_ptr(),
16973 dim.len_i32(),
16974 norm.as_ptr(),
16975 norm.len() as i32
16976 ));
16977 Ok(Tensor { c_tensor: c_tensors[0] })
16978 }
16979
16980 pub fn f_fft_ihfftn_out(
16981 &self,
16982 out: &Tensor,
16983 s: impl IntListOption,
16984 dim: impl IntListOption,
16985 norm: &str,
16986 ) -> Result<Tensor, TchError> {
16987 let mut c_tensors = [std::ptr::null_mut(); 1];
16988 unsafe_torch_err!(atg_fft_ihfftn_out(
16989 c_tensors.as_mut_ptr(),
16990 out.c_tensor,
16991 self.c_tensor,
16992 s.as_ptr(),
16993 s.len_i32(),
16994 dim.as_ptr(),
16995 dim.len_i32(),
16996 norm.as_ptr(),
16997 norm.len() as i32
16998 ));
16999 Ok(Tensor { c_tensor: c_tensors[0] })
17000 }
17001
17002 pub fn f_fft_irfft(
17003 &self,
17004 n: impl Into<Option<i64>>,
17005 dim: i64,
17006 norm: &str,
17007 ) -> Result<Tensor, TchError> {
17008 let n = n.into();
17009 let mut c_tensors = [std::ptr::null_mut(); 1];
17010 unsafe_torch_err!(atg_fft_irfft(
17011 c_tensors.as_mut_ptr(),
17012 self.c_tensor,
17013 n.unwrap_or(0i64),
17014 n.is_none() as i8,
17015 dim,
17016 norm.as_ptr(),
17017 norm.len() as i32
17018 ));
17019 Ok(Tensor { c_tensor: c_tensors[0] })
17020 }
17021
17022 pub fn f_fft_irfft2(
17023 &self,
17024 s: impl IntListOption,
17025 dim: impl IntList,
17026 norm: &str,
17027 ) -> Result<Tensor, TchError> {
17028 let mut c_tensors = [std::ptr::null_mut(); 1];
17029 unsafe_torch_err!(atg_fft_irfft2(
17030 c_tensors.as_mut_ptr(),
17031 self.c_tensor,
17032 s.as_ptr(),
17033 s.len_i32(),
17034 dim.as_ptr(),
17035 dim.len_i32(),
17036 norm.as_ptr(),
17037 norm.len() as i32
17038 ));
17039 Ok(Tensor { c_tensor: c_tensors[0] })
17040 }
17041
17042 pub fn f_fft_irfft2_out(
17043 &self,
17044 out: &Tensor,
17045 s: impl IntListOption,
17046 dim: impl IntList,
17047 norm: &str,
17048 ) -> Result<Tensor, TchError> {
17049 let mut c_tensors = [std::ptr::null_mut(); 1];
17050 unsafe_torch_err!(atg_fft_irfft2_out(
17051 c_tensors.as_mut_ptr(),
17052 out.c_tensor,
17053 self.c_tensor,
17054 s.as_ptr(),
17055 s.len_i32(),
17056 dim.as_ptr(),
17057 dim.len_i32(),
17058 norm.as_ptr(),
17059 norm.len() as i32
17060 ));
17061 Ok(Tensor { c_tensor: c_tensors[0] })
17062 }
17063
17064 pub fn f_fft_irfft_out(
17065 &self,
17066 out: &Tensor,
17067 n: impl Into<Option<i64>>,
17068 dim: i64,
17069 norm: &str,
17070 ) -> Result<Tensor, TchError> {
17071 let n = n.into();
17072 let mut c_tensors = [std::ptr::null_mut(); 1];
17073 unsafe_torch_err!(atg_fft_irfft_out(
17074 c_tensors.as_mut_ptr(),
17075 out.c_tensor,
17076 self.c_tensor,
17077 n.unwrap_or(0i64),
17078 n.is_none() as i8,
17079 dim,
17080 norm.as_ptr(),
17081 norm.len() as i32
17082 ));
17083 Ok(Tensor { c_tensor: c_tensors[0] })
17084 }
17085
17086 pub fn f_fft_irfftn(
17087 &self,
17088 s: impl IntListOption,
17089 dim: impl IntListOption,
17090 norm: &str,
17091 ) -> Result<Tensor, TchError> {
17092 let mut c_tensors = [std::ptr::null_mut(); 1];
17093 unsafe_torch_err!(atg_fft_irfftn(
17094 c_tensors.as_mut_ptr(),
17095 self.c_tensor,
17096 s.as_ptr(),
17097 s.len_i32(),
17098 dim.as_ptr(),
17099 dim.len_i32(),
17100 norm.as_ptr(),
17101 norm.len() as i32
17102 ));
17103 Ok(Tensor { c_tensor: c_tensors[0] })
17104 }
17105
17106 pub fn f_fft_irfftn_out(
17107 &self,
17108 out: &Tensor,
17109 s: impl IntListOption,
17110 dim: impl IntListOption,
17111 norm: &str,
17112 ) -> Result<Tensor, TchError> {
17113 let mut c_tensors = [std::ptr::null_mut(); 1];
17114 unsafe_torch_err!(atg_fft_irfftn_out(
17115 c_tensors.as_mut_ptr(),
17116 out.c_tensor,
17117 self.c_tensor,
17118 s.as_ptr(),
17119 s.len_i32(),
17120 dim.as_ptr(),
17121 dim.len_i32(),
17122 norm.as_ptr(),
17123 norm.len() as i32
17124 ));
17125 Ok(Tensor { c_tensor: c_tensors[0] })
17126 }
17127
17128 pub fn f_fft_rfft(
17129 &self,
17130 n: impl Into<Option<i64>>,
17131 dim: i64,
17132 norm: &str,
17133 ) -> Result<Tensor, TchError> {
17134 let n = n.into();
17135 let mut c_tensors = [std::ptr::null_mut(); 1];
17136 unsafe_torch_err!(atg_fft_rfft(
17137 c_tensors.as_mut_ptr(),
17138 self.c_tensor,
17139 n.unwrap_or(0i64),
17140 n.is_none() as i8,
17141 dim,
17142 norm.as_ptr(),
17143 norm.len() as i32
17144 ));
17145 Ok(Tensor { c_tensor: c_tensors[0] })
17146 }
17147
17148 pub fn f_fft_rfft2(
17149 &self,
17150 s: impl IntListOption,
17151 dim: impl IntList,
17152 norm: &str,
17153 ) -> Result<Tensor, TchError> {
17154 let mut c_tensors = [std::ptr::null_mut(); 1];
17155 unsafe_torch_err!(atg_fft_rfft2(
17156 c_tensors.as_mut_ptr(),
17157 self.c_tensor,
17158 s.as_ptr(),
17159 s.len_i32(),
17160 dim.as_ptr(),
17161 dim.len_i32(),
17162 norm.as_ptr(),
17163 norm.len() as i32
17164 ));
17165 Ok(Tensor { c_tensor: c_tensors[0] })
17166 }
17167
17168 pub fn f_fft_rfft2_out(
17169 &self,
17170 out: &Tensor,
17171 s: impl IntListOption,
17172 dim: impl IntList,
17173 norm: &str,
17174 ) -> Result<Tensor, TchError> {
17175 let mut c_tensors = [std::ptr::null_mut(); 1];
17176 unsafe_torch_err!(atg_fft_rfft2_out(
17177 c_tensors.as_mut_ptr(),
17178 out.c_tensor,
17179 self.c_tensor,
17180 s.as_ptr(),
17181 s.len_i32(),
17182 dim.as_ptr(),
17183 dim.len_i32(),
17184 norm.as_ptr(),
17185 norm.len() as i32
17186 ));
17187 Ok(Tensor { c_tensor: c_tensors[0] })
17188 }
17189
17190 pub fn f_fft_rfft_out(
17191 &self,
17192 out: &Tensor,
17193 n: impl Into<Option<i64>>,
17194 dim: i64,
17195 norm: &str,
17196 ) -> Result<Tensor, TchError> {
17197 let n = n.into();
17198 let mut c_tensors = [std::ptr::null_mut(); 1];
17199 unsafe_torch_err!(atg_fft_rfft_out(
17200 c_tensors.as_mut_ptr(),
17201 out.c_tensor,
17202 self.c_tensor,
17203 n.unwrap_or(0i64),
17204 n.is_none() as i8,
17205 dim,
17206 norm.as_ptr(),
17207 norm.len() as i32
17208 ));
17209 Ok(Tensor { c_tensor: c_tensors[0] })
17210 }
17211
17212 pub fn f_fft_rfftfreq(n: i64, d: f64, options: (Kind, Device)) -> Result<Tensor, TchError> {
17213 let mut c_tensors = [std::ptr::null_mut(); 1];
17214 unsafe_torch_err!(atg_fft_rfftfreq(
17215 c_tensors.as_mut_ptr(),
17216 n,
17217 d,
17218 options.0.c_int(),
17219 options.1.c_int()
17220 ));
17221 Ok(Tensor { c_tensor: c_tensors[0] })
17222 }
17223
17224 pub fn f_fft_rfftfreq_out(out: &Tensor, n: i64, d: f64) -> Result<Tensor, TchError> {
17225 let mut c_tensors = [std::ptr::null_mut(); 1];
17226 unsafe_torch_err!(atg_fft_rfftfreq_out(c_tensors.as_mut_ptr(), out.c_tensor, n, d));
17227 Ok(Tensor { c_tensor: c_tensors[0] })
17228 }
17229
17230 pub fn f_fft_rfftn(
17231 &self,
17232 s: impl IntListOption,
17233 dim: impl IntListOption,
17234 norm: &str,
17235 ) -> Result<Tensor, TchError> {
17236 let mut c_tensors = [std::ptr::null_mut(); 1];
17237 unsafe_torch_err!(atg_fft_rfftn(
17238 c_tensors.as_mut_ptr(),
17239 self.c_tensor,
17240 s.as_ptr(),
17241 s.len_i32(),
17242 dim.as_ptr(),
17243 dim.len_i32(),
17244 norm.as_ptr(),
17245 norm.len() as i32
17246 ));
17247 Ok(Tensor { c_tensor: c_tensors[0] })
17248 }
17249
17250 pub fn f_fft_rfftn_out(
17251 &self,
17252 out: &Tensor,
17253 s: impl IntListOption,
17254 dim: impl IntListOption,
17255 norm: &str,
17256 ) -> Result<Tensor, TchError> {
17257 let mut c_tensors = [std::ptr::null_mut(); 1];
17258 unsafe_torch_err!(atg_fft_rfftn_out(
17259 c_tensors.as_mut_ptr(),
17260 out.c_tensor,
17261 self.c_tensor,
17262 s.as_ptr(),
17263 s.len_i32(),
17264 dim.as_ptr(),
17265 dim.len_i32(),
17266 norm.as_ptr(),
17267 norm.len() as i32
17268 ));
17269 Ok(Tensor { c_tensor: c_tensors[0] })
17270 }
17271
17272 pub fn f_fill<S: Into<Scalar>>(&self, value: S) -> Result<Tensor, TchError> {
17273 let mut c_tensors = [std::ptr::null_mut(); 1];
17274 unsafe_torch_err!(atg_fill(c_tensors.as_mut_ptr(), self.c_tensor, value.into().c_scalar));
17275 Ok(Tensor { c_tensor: c_tensors[0] })
17276 }
17277
17278 pub fn f_fill_<S: Into<Scalar>>(&mut self, value: S) -> Result<Tensor, TchError> {
17279 let mut c_tensors = [std::ptr::null_mut(); 1];
17280 unsafe_torch_err!(atg_fill_(c_tensors.as_mut_ptr(), self.c_tensor, value.into().c_scalar));
17281 Ok(Tensor { c_tensor: c_tensors[0] })
17282 }
17283
17284 pub fn f_fill_diagonal_<S: Into<Scalar>>(
17285 &mut self,
17286 fill_value: S,
17287 wrap: bool,
17288 ) -> Result<Tensor, TchError> {
17289 let mut c_tensors = [std::ptr::null_mut(); 1];
17290 unsafe_torch_err!(atg_fill_diagonal_(
17291 c_tensors.as_mut_ptr(),
17292 self.c_tensor,
17293 fill_value.into().c_scalar,
17294 if wrap { 1 } else { 0 }
17295 ));
17296 Ok(Tensor { c_tensor: c_tensors[0] })
17297 }
17298
17299 pub fn f_fill_scalar_out<S: Into<Scalar>>(
17300 &self,
17301 out: &Tensor,
17302 value: S,
17303 ) -> Result<Tensor, TchError> {
17304 let mut c_tensors = [std::ptr::null_mut(); 1];
17305 unsafe_torch_err!(atg_fill_scalar_out(
17306 c_tensors.as_mut_ptr(),
17307 out.c_tensor,
17308 self.c_tensor,
17309 value.into().c_scalar
17310 ));
17311 Ok(Tensor { c_tensor: c_tensors[0] })
17312 }
17313
17314 pub fn f_fill_tensor(&self, value: &Tensor) -> Result<Tensor, TchError> {
17315 let mut c_tensors = [std::ptr::null_mut(); 1];
17316 unsafe_torch_err!(atg_fill_tensor(c_tensors.as_mut_ptr(), self.c_tensor, value.c_tensor));
17317 Ok(Tensor { c_tensor: c_tensors[0] })
17318 }
17319
17320 pub fn f_fill_tensor_(&mut self, value: &Tensor) -> Result<Tensor, TchError> {
17321 let mut c_tensors = [std::ptr::null_mut(); 1];
17322 unsafe_torch_err!(atg_fill_tensor_(c_tensors.as_mut_ptr(), self.c_tensor, value.c_tensor));
17323 Ok(Tensor { c_tensor: c_tensors[0] })
17324 }
17325
17326 pub fn f_fill_tensor_out(&self, out: &Tensor, value: &Tensor) -> Result<Tensor, TchError> {
17327 let mut c_tensors = [std::ptr::null_mut(); 1];
17328 unsafe_torch_err!(atg_fill_tensor_out(
17329 c_tensors.as_mut_ptr(),
17330 out.c_tensor,
17331 self.c_tensor,
17332 value.c_tensor
17333 ));
17334 Ok(Tensor { c_tensor: c_tensors[0] })
17335 }
17336
17337 pub fn f_fix(&self) -> Result<Tensor, TchError> {
17338 let mut c_tensors = [std::ptr::null_mut(); 1];
17339 unsafe_torch_err!(atg_fix(c_tensors.as_mut_ptr(), self.c_tensor));
17340 Ok(Tensor { c_tensor: c_tensors[0] })
17341 }
17342
17343 pub fn f_fix_(&mut self) -> Result<Tensor, TchError> {
17344 let mut c_tensors = [std::ptr::null_mut(); 1];
17345 unsafe_torch_err!(atg_fix_(c_tensors.as_mut_ptr(), self.c_tensor));
17346 Ok(Tensor { c_tensor: c_tensors[0] })
17347 }
17348
17349 pub fn f_fix_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
17350 let mut c_tensors = [std::ptr::null_mut(); 1];
17351 unsafe_torch_err!(atg_fix_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
17352 Ok(Tensor { c_tensor: c_tensors[0] })
17353 }
17354
17355 pub fn f_flatten(&self, start_dim: i64, end_dim: i64) -> Result<Tensor, TchError> {
17356 let mut c_tensors = [std::ptr::null_mut(); 1];
17357 unsafe_torch_err!(atg_flatten(c_tensors.as_mut_ptr(), self.c_tensor, start_dim, end_dim));
17358 Ok(Tensor { c_tensor: c_tensors[0] })
17359 }
17360
17361 pub fn f_flatten_dense_tensors<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Tensor, TchError> {
17362 let mut c_tensors = [std::ptr::null_mut(); 1];
17363 unsafe_torch_err!(atg_flatten_dense_tensors(
17364 c_tensors.as_mut_ptr(),
17365 ptr_list(tensors).as_ptr(),
17366 tensors.len() as i32
17367 ));
17368 Ok(Tensor { c_tensor: c_tensors[0] })
17369 }
17370
17371 pub fn f_flip(&self, dims: impl IntList) -> Result<Tensor, TchError> {
17372 let mut c_tensors = [std::ptr::null_mut(); 1];
17373 unsafe_torch_err!(atg_flip(
17374 c_tensors.as_mut_ptr(),
17375 self.c_tensor,
17376 dims.as_ptr(),
17377 dims.len_i32()
17378 ));
17379 Ok(Tensor { c_tensor: c_tensors[0] })
17380 }
17381
17382 pub fn f_flip_out(&self, out: &Tensor, dims: impl IntList) -> Result<Tensor, TchError> {
17383 let mut c_tensors = [std::ptr::null_mut(); 1];
17384 unsafe_torch_err!(atg_flip_out(
17385 c_tensors.as_mut_ptr(),
17386 out.c_tensor,
17387 self.c_tensor,
17388 dims.as_ptr(),
17389 dims.len_i32()
17390 ));
17391 Ok(Tensor { c_tensor: c_tensors[0] })
17392 }
17393
17394 pub fn f_fliplr(&self) -> Result<Tensor, TchError> {
17395 let mut c_tensors = [std::ptr::null_mut(); 1];
17396 unsafe_torch_err!(atg_fliplr(c_tensors.as_mut_ptr(), self.c_tensor));
17397 Ok(Tensor { c_tensor: c_tensors[0] })
17398 }
17399
17400 pub fn f_flipud(&self) -> Result<Tensor, TchError> {
17401 let mut c_tensors = [std::ptr::null_mut(); 1];
17402 unsafe_torch_err!(atg_flipud(c_tensors.as_mut_ptr(), self.c_tensor));
17403 Ok(Tensor { c_tensor: c_tensors[0] })
17404 }
17405
17406 pub fn f_float_power(&self, exponent: &Tensor) -> Result<Tensor, TchError> {
17407 let mut c_tensors = [std::ptr::null_mut(); 1];
17408 unsafe_torch_err!(atg_float_power(
17409 c_tensors.as_mut_ptr(),
17410 self.c_tensor,
17411 exponent.c_tensor
17412 ));
17413 Ok(Tensor { c_tensor: c_tensors[0] })
17414 }
17415
17416 pub fn f_float_power_<S: Into<Scalar>>(&mut self, exponent: S) -> Result<Tensor, TchError> {
17417 let mut c_tensors = [std::ptr::null_mut(); 1];
17418 unsafe_torch_err!(atg_float_power_(
17419 c_tensors.as_mut_ptr(),
17420 self.c_tensor,
17421 exponent.into().c_scalar
17422 ));
17423 Ok(Tensor { c_tensor: c_tensors[0] })
17424 }
17425
17426 pub fn f_float_power_scalar<S: Into<Scalar>>(
17427 self_scalar: S,
17428 exponent: &Tensor,
17429 ) -> Result<Tensor, TchError> {
17430 let mut c_tensors = [std::ptr::null_mut(); 1];
17431 unsafe_torch_err!(atg_float_power_scalar(
17432 c_tensors.as_mut_ptr(),
17433 self_scalar.into().c_scalar,
17434 exponent.c_tensor
17435 ));
17436 Ok(Tensor { c_tensor: c_tensors[0] })
17437 }
17438
17439 pub fn f_float_power_scalar_out<S: Into<Scalar>>(
17440 out: &Tensor,
17441 self_scalar: S,
17442 exponent: &Tensor,
17443 ) -> Result<Tensor, TchError> {
17444 let mut c_tensors = [std::ptr::null_mut(); 1];
17445 unsafe_torch_err!(atg_float_power_scalar_out(
17446 c_tensors.as_mut_ptr(),
17447 out.c_tensor,
17448 self_scalar.into().c_scalar,
17449 exponent.c_tensor
17450 ));
17451 Ok(Tensor { c_tensor: c_tensors[0] })
17452 }
17453
17454 pub fn f_float_power_tensor_(&mut self, exponent: &Tensor) -> Result<Tensor, TchError> {
17455 let mut c_tensors = [std::ptr::null_mut(); 1];
17456 unsafe_torch_err!(atg_float_power_tensor_(
17457 c_tensors.as_mut_ptr(),
17458 self.c_tensor,
17459 exponent.c_tensor
17460 ));
17461 Ok(Tensor { c_tensor: c_tensors[0] })
17462 }
17463
17464 pub fn f_float_power_tensor_scalar<S: Into<Scalar>>(
17465 &self,
17466 exponent: S,
17467 ) -> Result<Tensor, TchError> {
17468 let mut c_tensors = [std::ptr::null_mut(); 1];
17469 unsafe_torch_err!(atg_float_power_tensor_scalar(
17470 c_tensors.as_mut_ptr(),
17471 self.c_tensor,
17472 exponent.into().c_scalar
17473 ));
17474 Ok(Tensor { c_tensor: c_tensors[0] })
17475 }
17476
17477 pub fn f_float_power_tensor_scalar_out<S: Into<Scalar>>(
17478 &self,
17479 out: &Tensor,
17480 exponent: S,
17481 ) -> Result<Tensor, TchError> {
17482 let mut c_tensors = [std::ptr::null_mut(); 1];
17483 unsafe_torch_err!(atg_float_power_tensor_scalar_out(
17484 c_tensors.as_mut_ptr(),
17485 out.c_tensor,
17486 self.c_tensor,
17487 exponent.into().c_scalar
17488 ));
17489 Ok(Tensor { c_tensor: c_tensors[0] })
17490 }
17491
17492 pub fn f_float_power_tensor_tensor_out(
17493 &self,
17494 out: &Tensor,
17495 exponent: &Tensor,
17496 ) -> Result<Tensor, TchError> {
17497 let mut c_tensors = [std::ptr::null_mut(); 1];
17498 unsafe_torch_err!(atg_float_power_tensor_tensor_out(
17499 c_tensors.as_mut_ptr(),
17500 out.c_tensor,
17501 self.c_tensor,
17502 exponent.c_tensor
17503 ));
17504 Ok(Tensor { c_tensor: c_tensors[0] })
17505 }
17506
17507 pub fn f_floor(&self) -> Result<Tensor, TchError> {
17508 let mut c_tensors = [std::ptr::null_mut(); 1];
17509 unsafe_torch_err!(atg_floor(c_tensors.as_mut_ptr(), self.c_tensor));
17510 Ok(Tensor { c_tensor: c_tensors[0] })
17511 }
17512
17513 pub fn f_floor_(&mut self) -> Result<Tensor, TchError> {
17514 let mut c_tensors = [std::ptr::null_mut(); 1];
17515 unsafe_torch_err!(atg_floor_(c_tensors.as_mut_ptr(), self.c_tensor));
17516 Ok(Tensor { c_tensor: c_tensors[0] })
17517 }
17518
17519 pub fn f_floor_divide(&self, other: &Tensor) -> Result<Tensor, TchError> {
17520 let mut c_tensors = [std::ptr::null_mut(); 1];
17521 unsafe_torch_err!(atg_floor_divide(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
17522 Ok(Tensor { c_tensor: c_tensors[0] })
17523 }
17524
17525 pub fn f_floor_divide_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
17526 let mut c_tensors = [std::ptr::null_mut(); 1];
17527 unsafe_torch_err!(atg_floor_divide_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
17528 Ok(Tensor { c_tensor: c_tensors[0] })
17529 }
17530
17531 pub fn f_floor_divide_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
17532 let mut c_tensors = [std::ptr::null_mut(); 1];
17533 unsafe_torch_err!(atg_floor_divide_out(
17534 c_tensors.as_mut_ptr(),
17535 out.c_tensor,
17536 self.c_tensor,
17537 other.c_tensor
17538 ));
17539 Ok(Tensor { c_tensor: c_tensors[0] })
17540 }
17541
17542 pub fn f_floor_divide_scalar<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
17543 let mut c_tensors = [std::ptr::null_mut(); 1];
17544 unsafe_torch_err!(atg_floor_divide_scalar(
17545 c_tensors.as_mut_ptr(),
17546 self.c_tensor,
17547 other.into().c_scalar
17548 ));
17549 Ok(Tensor { c_tensor: c_tensors[0] })
17550 }
17551
17552 pub fn f_floor_divide_scalar_<S: Into<Scalar>>(
17553 &mut self,
17554 other: S,
17555 ) -> Result<Tensor, TchError> {
17556 let mut c_tensors = [std::ptr::null_mut(); 1];
17557 unsafe_torch_err!(atg_floor_divide_scalar_(
17558 c_tensors.as_mut_ptr(),
17559 self.c_tensor,
17560 other.into().c_scalar
17561 ));
17562 Ok(Tensor { c_tensor: c_tensors[0] })
17563 }
17564
17565 pub fn f_floor_divide_scalar_out<S: Into<Scalar>>(
17566 &self,
17567 out: &Tensor,
17568 other: S,
17569 ) -> Result<Tensor, TchError> {
17570 let mut c_tensors = [std::ptr::null_mut(); 1];
17571 unsafe_torch_err!(atg_floor_divide_scalar_out(
17572 c_tensors.as_mut_ptr(),
17573 out.c_tensor,
17574 self.c_tensor,
17575 other.into().c_scalar
17576 ));
17577 Ok(Tensor { c_tensor: c_tensors[0] })
17578 }
17579
17580 pub fn f_floor_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
17581 let mut c_tensors = [std::ptr::null_mut(); 1];
17582 unsafe_torch_err!(atg_floor_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
17583 Ok(Tensor { c_tensor: c_tensors[0] })
17584 }
17585
17586 pub fn f_fmax(&self, other: &Tensor) -> Result<Tensor, TchError> {
17587 let mut c_tensors = [std::ptr::null_mut(); 1];
17588 unsafe_torch_err!(atg_fmax(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
17589 Ok(Tensor { c_tensor: c_tensors[0] })
17590 }
17591
17592 pub fn f_fmax_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
17593 let mut c_tensors = [std::ptr::null_mut(); 1];
17594 unsafe_torch_err!(atg_fmax_out(
17595 c_tensors.as_mut_ptr(),
17596 out.c_tensor,
17597 self.c_tensor,
17598 other.c_tensor
17599 ));
17600 Ok(Tensor { c_tensor: c_tensors[0] })
17601 }
17602
17603 pub fn f_fmin(&self, other: &Tensor) -> Result<Tensor, TchError> {
17604 let mut c_tensors = [std::ptr::null_mut(); 1];
17605 unsafe_torch_err!(atg_fmin(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
17606 Ok(Tensor { c_tensor: c_tensors[0] })
17607 }
17608
17609 pub fn f_fmin_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
17610 let mut c_tensors = [std::ptr::null_mut(); 1];
17611 unsafe_torch_err!(atg_fmin_out(
17612 c_tensors.as_mut_ptr(),
17613 out.c_tensor,
17614 self.c_tensor,
17615 other.c_tensor
17616 ));
17617 Ok(Tensor { c_tensor: c_tensors[0] })
17618 }
17619
17620 pub fn f_fmod<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
17621 let mut c_tensors = [std::ptr::null_mut(); 1];
17622 unsafe_torch_err!(atg_fmod(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
17623 Ok(Tensor { c_tensor: c_tensors[0] })
17624 }
17625
17626 pub fn f_fmod_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
17627 let mut c_tensors = [std::ptr::null_mut(); 1];
17628 unsafe_torch_err!(atg_fmod_(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
17629 Ok(Tensor { c_tensor: c_tensors[0] })
17630 }
17631
17632 pub fn f_fmod_scalar_out<S: Into<Scalar>>(
17633 &self,
17634 out: &Tensor,
17635 other: S,
17636 ) -> Result<Tensor, TchError> {
17637 let mut c_tensors = [std::ptr::null_mut(); 1];
17638 unsafe_torch_err!(atg_fmod_scalar_out(
17639 c_tensors.as_mut_ptr(),
17640 out.c_tensor,
17641 self.c_tensor,
17642 other.into().c_scalar
17643 ));
17644 Ok(Tensor { c_tensor: c_tensors[0] })
17645 }
17646
17647 pub fn f_fmod_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
17648 let mut c_tensors = [std::ptr::null_mut(); 1];
17649 unsafe_torch_err!(atg_fmod_tensor(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
17650 Ok(Tensor { c_tensor: c_tensors[0] })
17651 }
17652
17653 pub fn f_fmod_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
17654 let mut c_tensors = [std::ptr::null_mut(); 1];
17655 unsafe_torch_err!(atg_fmod_tensor_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
17656 Ok(Tensor { c_tensor: c_tensors[0] })
17657 }
17658
17659 pub fn f_fmod_tensor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
17660 let mut c_tensors = [std::ptr::null_mut(); 1];
17661 unsafe_torch_err!(atg_fmod_tensor_out(
17662 c_tensors.as_mut_ptr(),
17663 out.c_tensor,
17664 self.c_tensor,
17665 other.c_tensor
17666 ));
17667 Ok(Tensor { c_tensor: c_tensors[0] })
17668 }
17669
17670 pub fn f_frac(&self) -> Result<Tensor, TchError> {
17671 let mut c_tensors = [std::ptr::null_mut(); 1];
17672 unsafe_torch_err!(atg_frac(c_tensors.as_mut_ptr(), self.c_tensor));
17673 Ok(Tensor { c_tensor: c_tensors[0] })
17674 }
17675
17676 pub fn f_frac_(&mut self) -> Result<Tensor, TchError> {
17677 let mut c_tensors = [std::ptr::null_mut(); 1];
17678 unsafe_torch_err!(atg_frac_(c_tensors.as_mut_ptr(), self.c_tensor));
17679 Ok(Tensor { c_tensor: c_tensors[0] })
17680 }
17681
17682 pub fn f_frac_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
17683 let mut c_tensors = [std::ptr::null_mut(); 1];
17684 unsafe_torch_err!(atg_frac_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
17685 Ok(Tensor { c_tensor: c_tensors[0] })
17686 }
17687
17688 pub fn f_fractional_max_pool2d(
17689 &self,
17690 kernel_size: impl IntList,
17691 output_size: impl IntList,
17692 random_samples: &Tensor,
17693 ) -> Result<(Tensor, Tensor), TchError> {
17694 let mut c_tensors = [std::ptr::null_mut(); 2];
17695 unsafe_torch_err!(atg_fractional_max_pool2d(
17696 c_tensors.as_mut_ptr(),
17697 self.c_tensor,
17698 kernel_size.as_ptr(),
17699 kernel_size.len_i32(),
17700 output_size.as_ptr(),
17701 output_size.len_i32(),
17702 random_samples.c_tensor
17703 ));
17704 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
17705 }
17706
17707 pub fn f_fractional_max_pool2d_backward(
17708 &self,
17709 grad_output: &Tensor,
17710 kernel_size: impl IntList,
17711 output_size: impl IntList,
17712 indices: &Tensor,
17713 ) -> Result<Tensor, TchError> {
17714 let mut c_tensors = [std::ptr::null_mut(); 1];
17715 unsafe_torch_err!(atg_fractional_max_pool2d_backward(
17716 c_tensors.as_mut_ptr(),
17717 grad_output.c_tensor,
17718 self.c_tensor,
17719 kernel_size.as_ptr(),
17720 kernel_size.len_i32(),
17721 output_size.as_ptr(),
17722 output_size.len_i32(),
17723 indices.c_tensor
17724 ));
17725 Ok(Tensor { c_tensor: c_tensors[0] })
17726 }
17727
17728 pub fn f_fractional_max_pool2d_backward_grad_input(
17729 &self,
17730 grad_input: &Tensor,
17731 grad_output: &Tensor,
17732 kernel_size: impl IntList,
17733 output_size: impl IntList,
17734 indices: &Tensor,
17735 ) -> Result<Tensor, TchError> {
17736 let mut c_tensors = [std::ptr::null_mut(); 1];
17737 unsafe_torch_err!(atg_fractional_max_pool2d_backward_grad_input(
17738 c_tensors.as_mut_ptr(),
17739 grad_input.c_tensor,
17740 grad_output.c_tensor,
17741 self.c_tensor,
17742 kernel_size.as_ptr(),
17743 kernel_size.len_i32(),
17744 output_size.as_ptr(),
17745 output_size.len_i32(),
17746 indices.c_tensor
17747 ));
17748 Ok(Tensor { c_tensor: c_tensors[0] })
17749 }
17750
17751 pub fn f_fractional_max_pool2d_output(
17752 &self,
17753 output: &Tensor,
17754 indices: &Tensor,
17755 kernel_size: impl IntList,
17756 output_size: impl IntList,
17757 random_samples: &Tensor,
17758 ) -> Result<(Tensor, Tensor), TchError> {
17759 let mut c_tensors = [std::ptr::null_mut(); 2];
17760 unsafe_torch_err!(atg_fractional_max_pool2d_output(
17761 c_tensors.as_mut_ptr(),
17762 output.c_tensor,
17763 indices.c_tensor,
17764 self.c_tensor,
17765 kernel_size.as_ptr(),
17766 kernel_size.len_i32(),
17767 output_size.as_ptr(),
17768 output_size.len_i32(),
17769 random_samples.c_tensor
17770 ));
17771 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
17772 }
17773
17774 pub fn f_fractional_max_pool3d(
17775 &self,
17776 kernel_size: impl IntList,
17777 output_size: impl IntList,
17778 random_samples: &Tensor,
17779 ) -> Result<(Tensor, Tensor), TchError> {
17780 let mut c_tensors = [std::ptr::null_mut(); 2];
17781 unsafe_torch_err!(atg_fractional_max_pool3d(
17782 c_tensors.as_mut_ptr(),
17783 self.c_tensor,
17784 kernel_size.as_ptr(),
17785 kernel_size.len_i32(),
17786 output_size.as_ptr(),
17787 output_size.len_i32(),
17788 random_samples.c_tensor
17789 ));
17790 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
17791 }
17792
17793 pub fn f_fractional_max_pool3d_backward(
17794 &self,
17795 grad_output: &Tensor,
17796 kernel_size: impl IntList,
17797 output_size: impl IntList,
17798 indices: &Tensor,
17799 ) -> Result<Tensor, TchError> {
17800 let mut c_tensors = [std::ptr::null_mut(); 1];
17801 unsafe_torch_err!(atg_fractional_max_pool3d_backward(
17802 c_tensors.as_mut_ptr(),
17803 grad_output.c_tensor,
17804 self.c_tensor,
17805 kernel_size.as_ptr(),
17806 kernel_size.len_i32(),
17807 output_size.as_ptr(),
17808 output_size.len_i32(),
17809 indices.c_tensor
17810 ));
17811 Ok(Tensor { c_tensor: c_tensors[0] })
17812 }
17813
17814 pub fn f_fractional_max_pool3d_backward_grad_input(
17815 &self,
17816 grad_input: &Tensor,
17817 grad_output: &Tensor,
17818 kernel_size: impl IntList,
17819 output_size: impl IntList,
17820 indices: &Tensor,
17821 ) -> Result<Tensor, TchError> {
17822 let mut c_tensors = [std::ptr::null_mut(); 1];
17823 unsafe_torch_err!(atg_fractional_max_pool3d_backward_grad_input(
17824 c_tensors.as_mut_ptr(),
17825 grad_input.c_tensor,
17826 grad_output.c_tensor,
17827 self.c_tensor,
17828 kernel_size.as_ptr(),
17829 kernel_size.len_i32(),
17830 output_size.as_ptr(),
17831 output_size.len_i32(),
17832 indices.c_tensor
17833 ));
17834 Ok(Tensor { c_tensor: c_tensors[0] })
17835 }
17836
17837 pub fn f_fractional_max_pool3d_output(
17838 &self,
17839 output: &Tensor,
17840 indices: &Tensor,
17841 kernel_size: impl IntList,
17842 output_size: impl IntList,
17843 random_samples: &Tensor,
17844 ) -> Result<(Tensor, Tensor), TchError> {
17845 let mut c_tensors = [std::ptr::null_mut(); 2];
17846 unsafe_torch_err!(atg_fractional_max_pool3d_output(
17847 c_tensors.as_mut_ptr(),
17848 output.c_tensor,
17849 indices.c_tensor,
17850 self.c_tensor,
17851 kernel_size.as_ptr(),
17852 kernel_size.len_i32(),
17853 output_size.as_ptr(),
17854 output_size.len_i32(),
17855 random_samples.c_tensor
17856 ));
17857 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
17858 }
17859
17860 pub fn f_frexp(&self) -> Result<(Tensor, Tensor), TchError> {
17861 let mut c_tensors = [std::ptr::null_mut(); 2];
17862 unsafe_torch_err!(atg_frexp(c_tensors.as_mut_ptr(), self.c_tensor));
17863 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
17864 }
17865
17866 pub fn f_frexp_tensor_out(
17867 &self,
17868 mantissa: &Tensor,
17869 exponent: &Tensor,
17870 ) -> Result<(Tensor, Tensor), TchError> {
17871 let mut c_tensors = [std::ptr::null_mut(); 2];
17872 unsafe_torch_err!(atg_frexp_tensor_out(
17873 c_tensors.as_mut_ptr(),
17874 mantissa.c_tensor,
17875 exponent.c_tensor,
17876 self.c_tensor
17877 ));
17878 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
17879 }
17880
17881 pub fn f_frobenius_norm(&self, dim: impl IntList, keepdim: bool) -> Result<Tensor, TchError> {
17882 let mut c_tensors = [std::ptr::null_mut(); 1];
17883 unsafe_torch_err!(atg_frobenius_norm(
17884 c_tensors.as_mut_ptr(),
17885 self.c_tensor,
17886 dim.as_ptr(),
17887 dim.len_i32(),
17888 if keepdim { 1 } else { 0 }
17889 ));
17890 Ok(Tensor { c_tensor: c_tensors[0] })
17891 }
17892
17893 pub fn f_frobenius_norm_out(
17894 &self,
17895 out: &Tensor,
17896 dim: impl IntList,
17897 keepdim: bool,
17898 ) -> Result<Tensor, TchError> {
17899 let mut c_tensors = [std::ptr::null_mut(); 1];
17900 unsafe_torch_err!(atg_frobenius_norm_out(
17901 c_tensors.as_mut_ptr(),
17902 out.c_tensor,
17903 self.c_tensor,
17904 dim.as_ptr(),
17905 dim.len_i32(),
17906 if keepdim { 1 } else { 0 }
17907 ));
17908 Ok(Tensor { c_tensor: c_tensors[0] })
17909 }
17910
17911 pub fn f_from_file(
17912 filename: &str,
17913 shared: bool,
17914 size: impl Into<Option<i64>>,
17915 options: (Kind, Device),
17916 ) -> Result<Tensor, TchError> {
17917 let size = size.into();
17918 let mut c_tensors = [std::ptr::null_mut(); 1];
17919 unsafe_torch_err!(atg_from_file(
17920 c_tensors.as_mut_ptr(),
17921 filename.as_ptr(),
17922 filename.len() as i32,
17923 if shared { 1 } else { 0 },
17924 size.unwrap_or(0i64),
17925 size.is_none() as i8,
17926 options.0.c_int(),
17927 options.1.c_int()
17928 ));
17929 Ok(Tensor { c_tensor: c_tensors[0] })
17930 }
17931
17932 pub fn f_from_file_out(
17933 out: &Tensor,
17934 filename: &str,
17935 shared: bool,
17936 size: impl Into<Option<i64>>,
17937 ) -> Result<Tensor, TchError> {
17938 let size = size.into();
17939 let mut c_tensors = [std::ptr::null_mut(); 1];
17940 unsafe_torch_err!(atg_from_file_out(
17941 c_tensors.as_mut_ptr(),
17942 out.c_tensor,
17943 filename.as_ptr(),
17944 filename.len() as i32,
17945 if shared { 1 } else { 0 },
17946 size.unwrap_or(0i64),
17947 size.is_none() as i8
17948 ));
17949 Ok(Tensor { c_tensor: c_tensors[0] })
17950 }
17951
17952 pub fn f_full<S: Into<Scalar>>(
17953 size: impl IntList,
17954 fill_value: S,
17955 options: (Kind, Device),
17956 ) -> Result<Tensor, TchError> {
17957 let mut c_tensors = [std::ptr::null_mut(); 1];
17958 unsafe_torch_err!(atg_full(
17959 c_tensors.as_mut_ptr(),
17960 size.as_ptr(),
17961 size.len_i32(),
17962 fill_value.into().c_scalar,
17963 options.0.c_int(),
17964 options.1.c_int()
17965 ));
17966 Ok(Tensor { c_tensor: c_tensors[0] })
17967 }
17968
17969 pub fn f_full_like<S: Into<Scalar>>(&self, fill_value: S) -> Result<Tensor, TchError> {
17970 let mut c_tensors = [std::ptr::null_mut(); 1];
17971 unsafe_torch_err!(atg_full_like(
17972 c_tensors.as_mut_ptr(),
17973 self.c_tensor,
17974 fill_value.into().c_scalar
17975 ));
17976 Ok(Tensor { c_tensor: c_tensors[0] })
17977 }
17978
17979 pub fn f_full_like_out<S: Into<Scalar>>(
17980 &self,
17981 out: &Tensor,
17982 fill_value: S,
17983 ) -> Result<Tensor, TchError> {
17984 let mut c_tensors = [std::ptr::null_mut(); 1];
17985 unsafe_torch_err!(atg_full_like_out(
17986 c_tensors.as_mut_ptr(),
17987 out.c_tensor,
17988 self.c_tensor,
17989 fill_value.into().c_scalar
17990 ));
17991 Ok(Tensor { c_tensor: c_tensors[0] })
17992 }
17993
17994 pub fn f_full_out<S: Into<Scalar>>(
17995 out: &Tensor,
17996 size: impl IntList,
17997 fill_value: S,
17998 ) -> Result<Tensor, TchError> {
17999 let mut c_tensors = [std::ptr::null_mut(); 1];
18000 unsafe_torch_err!(atg_full_out(
18001 c_tensors.as_mut_ptr(),
18002 out.c_tensor,
18003 size.as_ptr(),
18004 size.len_i32(),
18005 fill_value.into().c_scalar
18006 ));
18007 Ok(Tensor { c_tensor: c_tensors[0] })
18008 }
18009
18010 pub fn f_fused_moving_avg_obs_fake_quant(
18011 &self,
18012 observer_on: &Tensor,
18013 fake_quant_on: &Tensor,
18014 running_min: &Tensor,
18015 running_max: &Tensor,
18016 scale: &Tensor,
18017 zero_point: &Tensor,
18018 averaging_const: f64,
18019 quant_min: i64,
18020 quant_max: i64,
18021 ch_axis: i64,
18022 per_row_fake_quant: bool,
18023 symmetric_quant: bool,
18024 ) -> Result<Tensor, TchError> {
18025 let mut c_tensors = [std::ptr::null_mut(); 1];
18026 unsafe_torch_err!(atg_fused_moving_avg_obs_fake_quant(
18027 c_tensors.as_mut_ptr(),
18028 self.c_tensor,
18029 observer_on.c_tensor,
18030 fake_quant_on.c_tensor,
18031 running_min.c_tensor,
18032 running_max.c_tensor,
18033 scale.c_tensor,
18034 zero_point.c_tensor,
18035 averaging_const,
18036 quant_min,
18037 quant_max,
18038 ch_axis,
18039 if per_row_fake_quant { 1 } else { 0 },
18040 if symmetric_quant { 1 } else { 0 }
18041 ));
18042 Ok(Tensor { c_tensor: c_tensors[0] })
18043 }
18044
18045 pub fn f_gather(
18046 &self,
18047 dim: i64,
18048 index: &Tensor,
18049 sparse_grad: bool,
18050 ) -> Result<Tensor, TchError> {
18051 let mut c_tensors = [std::ptr::null_mut(); 1];
18052 unsafe_torch_err!(atg_gather(
18053 c_tensors.as_mut_ptr(),
18054 self.c_tensor,
18055 dim,
18056 index.c_tensor,
18057 if sparse_grad { 1 } else { 0 }
18058 ));
18059 Ok(Tensor { c_tensor: c_tensors[0] })
18060 }
18061
18062 pub fn f_gather_backward(
18063 &self,
18064 grad: &Tensor,
18065 dim: i64,
18066 index: &Tensor,
18067 sparse_grad: bool,
18068 ) -> Result<Tensor, TchError> {
18069 let mut c_tensors = [std::ptr::null_mut(); 1];
18070 unsafe_torch_err!(atg_gather_backward(
18071 c_tensors.as_mut_ptr(),
18072 grad.c_tensor,
18073 self.c_tensor,
18074 dim,
18075 index.c_tensor,
18076 if sparse_grad { 1 } else { 0 }
18077 ));
18078 Ok(Tensor { c_tensor: c_tensors[0] })
18079 }
18080
18081 pub fn f_gather_out(
18082 &self,
18083 out: &Tensor,
18084 dim: i64,
18085 index: &Tensor,
18086 sparse_grad: bool,
18087 ) -> Result<Tensor, TchError> {
18088 let mut c_tensors = [std::ptr::null_mut(); 1];
18089 unsafe_torch_err!(atg_gather_out(
18090 c_tensors.as_mut_ptr(),
18091 out.c_tensor,
18092 self.c_tensor,
18093 dim,
18094 index.c_tensor,
18095 if sparse_grad { 1 } else { 0 }
18096 ));
18097 Ok(Tensor { c_tensor: c_tensors[0] })
18098 }
18099
18100 pub fn f_gcd(&self, other: &Tensor) -> Result<Tensor, TchError> {
18101 let mut c_tensors = [std::ptr::null_mut(); 1];
18102 unsafe_torch_err!(atg_gcd(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
18103 Ok(Tensor { c_tensor: c_tensors[0] })
18104 }
18105
18106 pub fn f_gcd_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
18107 let mut c_tensors = [std::ptr::null_mut(); 1];
18108 unsafe_torch_err!(atg_gcd_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
18109 Ok(Tensor { c_tensor: c_tensors[0] })
18110 }
18111
18112 pub fn f_gcd_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
18113 let mut c_tensors = [std::ptr::null_mut(); 1];
18114 unsafe_torch_err!(atg_gcd_out(
18115 c_tensors.as_mut_ptr(),
18116 out.c_tensor,
18117 self.c_tensor,
18118 other.c_tensor
18119 ));
18120 Ok(Tensor { c_tensor: c_tensors[0] })
18121 }
18122
18123 pub fn f_ge<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
18124 let mut c_tensors = [std::ptr::null_mut(); 1];
18125 unsafe_torch_err!(atg_ge(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
18126 Ok(Tensor { c_tensor: c_tensors[0] })
18127 }
18128
18129 pub fn f_ge_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
18130 let mut c_tensors = [std::ptr::null_mut(); 1];
18131 unsafe_torch_err!(atg_ge_(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
18132 Ok(Tensor { c_tensor: c_tensors[0] })
18133 }
18134
18135 pub fn f_ge_scalar_out<S: Into<Scalar>>(
18136 &self,
18137 out: &Tensor,
18138 other: S,
18139 ) -> Result<Tensor, TchError> {
18140 let mut c_tensors = [std::ptr::null_mut(); 1];
18141 unsafe_torch_err!(atg_ge_scalar_out(
18142 c_tensors.as_mut_ptr(),
18143 out.c_tensor,
18144 self.c_tensor,
18145 other.into().c_scalar
18146 ));
18147 Ok(Tensor { c_tensor: c_tensors[0] })
18148 }
18149
18150 pub fn f_ge_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
18151 let mut c_tensors = [std::ptr::null_mut(); 1];
18152 unsafe_torch_err!(atg_ge_tensor(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
18153 Ok(Tensor { c_tensor: c_tensors[0] })
18154 }
18155
18156 pub fn f_ge_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
18157 let mut c_tensors = [std::ptr::null_mut(); 1];
18158 unsafe_torch_err!(atg_ge_tensor_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
18159 Ok(Tensor { c_tensor: c_tensors[0] })
18160 }
18161
18162 pub fn f_ge_tensor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
18163 let mut c_tensors = [std::ptr::null_mut(); 1];
18164 unsafe_torch_err!(atg_ge_tensor_out(
18165 c_tensors.as_mut_ptr(),
18166 out.c_tensor,
18167 self.c_tensor,
18168 other.c_tensor
18169 ));
18170 Ok(Tensor { c_tensor: c_tensors[0] })
18171 }
18172
18173 pub fn f_gelu(&self, approximate: &str) -> Result<Tensor, TchError> {
18174 let mut c_tensors = [std::ptr::null_mut(); 1];
18175 unsafe_torch_err!(atg_gelu(
18176 c_tensors.as_mut_ptr(),
18177 self.c_tensor,
18178 approximate.as_ptr(),
18179 approximate.len() as i32
18180 ));
18181 Ok(Tensor { c_tensor: c_tensors[0] })
18182 }
18183
18184 pub fn f_gelu_(&mut self, approximate: &str) -> Result<Tensor, TchError> {
18185 let mut c_tensors = [std::ptr::null_mut(); 1];
18186 unsafe_torch_err!(atg_gelu_(
18187 c_tensors.as_mut_ptr(),
18188 self.c_tensor,
18189 approximate.as_ptr(),
18190 approximate.len() as i32
18191 ));
18192 Ok(Tensor { c_tensor: c_tensors[0] })
18193 }
18194
18195 pub fn f_gelu_backward(
18196 &self,
18197 grad_output: &Tensor,
18198 approximate: &str,
18199 ) -> Result<Tensor, TchError> {
18200 let mut c_tensors = [std::ptr::null_mut(); 1];
18201 unsafe_torch_err!(atg_gelu_backward(
18202 c_tensors.as_mut_ptr(),
18203 grad_output.c_tensor,
18204 self.c_tensor,
18205 approximate.as_ptr(),
18206 approximate.len() as i32
18207 ));
18208 Ok(Tensor { c_tensor: c_tensors[0] })
18209 }
18210
18211 pub fn f_gelu_backward_grad_input(
18212 &self,
18213 grad_input: &Tensor,
18214 grad_output: &Tensor,
18215 approximate: &str,
18216 ) -> Result<Tensor, TchError> {
18217 let mut c_tensors = [std::ptr::null_mut(); 1];
18218 unsafe_torch_err!(atg_gelu_backward_grad_input(
18219 c_tensors.as_mut_ptr(),
18220 grad_input.c_tensor,
18221 grad_output.c_tensor,
18222 self.c_tensor,
18223 approximate.as_ptr(),
18224 approximate.len() as i32
18225 ));
18226 Ok(Tensor { c_tensor: c_tensors[0] })
18227 }
18228
18229 pub fn f_gelu_out(&self, out: &Tensor, approximate: &str) -> Result<Tensor, TchError> {
18230 let mut c_tensors = [std::ptr::null_mut(); 1];
18231 unsafe_torch_err!(atg_gelu_out(
18232 c_tensors.as_mut_ptr(),
18233 out.c_tensor,
18234 self.c_tensor,
18235 approximate.as_ptr(),
18236 approximate.len() as i32
18237 ));
18238 Ok(Tensor { c_tensor: c_tensors[0] })
18239 }
18240
18241 pub fn f_geometric(&self, p: f64) -> Result<Tensor, TchError> {
18242 let mut c_tensors = [std::ptr::null_mut(); 1];
18243 unsafe_torch_err!(atg_geometric(c_tensors.as_mut_ptr(), self.c_tensor, p));
18244 Ok(Tensor { c_tensor: c_tensors[0] })
18245 }
18246
18247 pub fn f_geometric_(&mut self, p: f64) -> Result<Tensor, TchError> {
18248 let mut c_tensors = [std::ptr::null_mut(); 1];
18249 unsafe_torch_err!(atg_geometric_(c_tensors.as_mut_ptr(), self.c_tensor, p));
18250 Ok(Tensor { c_tensor: c_tensors[0] })
18251 }
18252
18253 pub fn f_geometric_out(&self, out: &Tensor, p: f64) -> Result<Tensor, TchError> {
18254 let mut c_tensors = [std::ptr::null_mut(); 1];
18255 unsafe_torch_err!(atg_geometric_out(
18256 c_tensors.as_mut_ptr(),
18257 out.c_tensor,
18258 self.c_tensor,
18259 p
18260 ));
18261 Ok(Tensor { c_tensor: c_tensors[0] })
18262 }
18263
18264 pub fn f_geqrf(&self) -> Result<(Tensor, Tensor), TchError> {
18265 let mut c_tensors = [std::ptr::null_mut(); 2];
18266 unsafe_torch_err!(atg_geqrf(c_tensors.as_mut_ptr(), self.c_tensor));
18267 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
18268 }
18269
18270 pub fn f_geqrf_a(&self, a: &Tensor, tau: &Tensor) -> Result<(Tensor, Tensor), TchError> {
18271 let mut c_tensors = [std::ptr::null_mut(); 2];
18272 unsafe_torch_err!(atg_geqrf_a(
18273 c_tensors.as_mut_ptr(),
18274 a.c_tensor,
18275 tau.c_tensor,
18276 self.c_tensor
18277 ));
18278 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
18279 }
18280
18281 pub fn f_ger(&self, vec2: &Tensor) -> Result<Tensor, TchError> {
18282 let mut c_tensors = [std::ptr::null_mut(); 1];
18283 unsafe_torch_err!(atg_ger(c_tensors.as_mut_ptr(), self.c_tensor, vec2.c_tensor));
18284 Ok(Tensor { c_tensor: c_tensors[0] })
18285 }
18286
18287 pub fn f_ger_out(&self, out: &Tensor, vec2: &Tensor) -> Result<Tensor, TchError> {
18288 let mut c_tensors = [std::ptr::null_mut(); 1];
18289 unsafe_torch_err!(atg_ger_out(
18290 c_tensors.as_mut_ptr(),
18291 out.c_tensor,
18292 self.c_tensor,
18293 vec2.c_tensor
18294 ));
18295 Ok(Tensor { c_tensor: c_tensors[0] })
18296 }
18297
18298 pub fn f_glu(&self, dim: i64) -> Result<Tensor, TchError> {
18299 let mut c_tensors = [std::ptr::null_mut(); 1];
18300 unsafe_torch_err!(atg_glu(c_tensors.as_mut_ptr(), self.c_tensor, dim));
18301 Ok(Tensor { c_tensor: c_tensors[0] })
18302 }
18303
18304 pub fn f_glu_backward(&self, grad_output: &Tensor, dim: i64) -> Result<Tensor, TchError> {
18305 let mut c_tensors = [std::ptr::null_mut(); 1];
18306 unsafe_torch_err!(atg_glu_backward(
18307 c_tensors.as_mut_ptr(),
18308 grad_output.c_tensor,
18309 self.c_tensor,
18310 dim
18311 ));
18312 Ok(Tensor { c_tensor: c_tensors[0] })
18313 }
18314
18315 pub fn f_glu_backward_grad_input(
18316 &self,
18317 grad_input: &Tensor,
18318 grad_output: &Tensor,
18319 dim: i64,
18320 ) -> Result<Tensor, TchError> {
18321 let mut c_tensors = [std::ptr::null_mut(); 1];
18322 unsafe_torch_err!(atg_glu_backward_grad_input(
18323 c_tensors.as_mut_ptr(),
18324 grad_input.c_tensor,
18325 grad_output.c_tensor,
18326 self.c_tensor,
18327 dim
18328 ));
18329 Ok(Tensor { c_tensor: c_tensors[0] })
18330 }
18331
18332 pub fn f_glu_backward_jvp(
18333 grad_x: &Tensor,
18334 grad_glu: &Tensor,
18335 x: &Tensor,
18336 dgrad_glu: &Tensor,
18337 dx: &Tensor,
18338 dim: i64,
18339 ) -> Result<Tensor, TchError> {
18340 let mut c_tensors = [std::ptr::null_mut(); 1];
18341 unsafe_torch_err!(atg_glu_backward_jvp(
18342 c_tensors.as_mut_ptr(),
18343 grad_x.c_tensor,
18344 grad_glu.c_tensor,
18345 x.c_tensor,
18346 dgrad_glu.c_tensor,
18347 dx.c_tensor,
18348 dim
18349 ));
18350 Ok(Tensor { c_tensor: c_tensors[0] })
18351 }
18352
18353 pub fn f_glu_backward_jvp_out(
18354 out: &Tensor,
18355 grad_x: &Tensor,
18356 grad_glu: &Tensor,
18357 x: &Tensor,
18358 dgrad_glu: &Tensor,
18359 dx: &Tensor,
18360 dim: i64,
18361 ) -> Result<Tensor, TchError> {
18362 let mut c_tensors = [std::ptr::null_mut(); 1];
18363 unsafe_torch_err!(atg_glu_backward_jvp_out(
18364 c_tensors.as_mut_ptr(),
18365 out.c_tensor,
18366 grad_x.c_tensor,
18367 grad_glu.c_tensor,
18368 x.c_tensor,
18369 dgrad_glu.c_tensor,
18370 dx.c_tensor,
18371 dim
18372 ));
18373 Ok(Tensor { c_tensor: c_tensors[0] })
18374 }
18375
18376 pub fn f_glu_jvp(glu: &Tensor, x: &Tensor, dx: &Tensor, dim: i64) -> Result<Tensor, TchError> {
18377 let mut c_tensors = [std::ptr::null_mut(); 1];
18378 unsafe_torch_err!(atg_glu_jvp(
18379 c_tensors.as_mut_ptr(),
18380 glu.c_tensor,
18381 x.c_tensor,
18382 dx.c_tensor,
18383 dim
18384 ));
18385 Ok(Tensor { c_tensor: c_tensors[0] })
18386 }
18387
18388 pub fn f_glu_jvp_out(
18389 out: &Tensor,
18390 glu: &Tensor,
18391 x: &Tensor,
18392 dx: &Tensor,
18393 dim: i64,
18394 ) -> Result<Tensor, TchError> {
18395 let mut c_tensors = [std::ptr::null_mut(); 1];
18396 unsafe_torch_err!(atg_glu_jvp_out(
18397 c_tensors.as_mut_ptr(),
18398 out.c_tensor,
18399 glu.c_tensor,
18400 x.c_tensor,
18401 dx.c_tensor,
18402 dim
18403 ));
18404 Ok(Tensor { c_tensor: c_tensors[0] })
18405 }
18406
18407 pub fn f_glu_out(&self, out: &Tensor, dim: i64) -> Result<Tensor, TchError> {
18408 let mut c_tensors = [std::ptr::null_mut(); 1];
18409 unsafe_torch_err!(atg_glu_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor, dim));
18410 Ok(Tensor { c_tensor: c_tensors[0] })
18411 }
18412
18413 pub fn f_grad(&self) -> Result<Tensor, TchError> {
18414 let mut c_tensors = [std::ptr::null_mut(); 1];
18415 unsafe_torch_err!(atg_grad(c_tensors.as_mut_ptr(), self.c_tensor));
18416 Ok(Tensor { c_tensor: c_tensors[0] })
18417 }
18418
18419 pub fn f_greater<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
18420 let mut c_tensors = [std::ptr::null_mut(); 1];
18421 unsafe_torch_err!(atg_greater(
18422 c_tensors.as_mut_ptr(),
18423 self.c_tensor,
18424 other.into().c_scalar
18425 ));
18426 Ok(Tensor { c_tensor: c_tensors[0] })
18427 }
18428
18429 pub fn f_greater_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
18430 let mut c_tensors = [std::ptr::null_mut(); 1];
18431 unsafe_torch_err!(atg_greater_(
18432 c_tensors.as_mut_ptr(),
18433 self.c_tensor,
18434 other.into().c_scalar
18435 ));
18436 Ok(Tensor { c_tensor: c_tensors[0] })
18437 }
18438
18439 pub fn f_greater_equal<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
18440 let mut c_tensors = [std::ptr::null_mut(); 1];
18441 unsafe_torch_err!(atg_greater_equal(
18442 c_tensors.as_mut_ptr(),
18443 self.c_tensor,
18444 other.into().c_scalar
18445 ));
18446 Ok(Tensor { c_tensor: c_tensors[0] })
18447 }
18448
18449 pub fn f_greater_equal_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
18450 let mut c_tensors = [std::ptr::null_mut(); 1];
18451 unsafe_torch_err!(atg_greater_equal_(
18452 c_tensors.as_mut_ptr(),
18453 self.c_tensor,
18454 other.into().c_scalar
18455 ));
18456 Ok(Tensor { c_tensor: c_tensors[0] })
18457 }
18458
18459 pub fn f_greater_equal_scalar_out<S: Into<Scalar>>(
18460 &self,
18461 out: &Tensor,
18462 other: S,
18463 ) -> Result<Tensor, TchError> {
18464 let mut c_tensors = [std::ptr::null_mut(); 1];
18465 unsafe_torch_err!(atg_greater_equal_scalar_out(
18466 c_tensors.as_mut_ptr(),
18467 out.c_tensor,
18468 self.c_tensor,
18469 other.into().c_scalar
18470 ));
18471 Ok(Tensor { c_tensor: c_tensors[0] })
18472 }
18473
18474 pub fn f_greater_equal_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
18475 let mut c_tensors = [std::ptr::null_mut(); 1];
18476 unsafe_torch_err!(atg_greater_equal_tensor(
18477 c_tensors.as_mut_ptr(),
18478 self.c_tensor,
18479 other.c_tensor
18480 ));
18481 Ok(Tensor { c_tensor: c_tensors[0] })
18482 }
18483
18484 pub fn f_greater_equal_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
18485 let mut c_tensors = [std::ptr::null_mut(); 1];
18486 unsafe_torch_err!(atg_greater_equal_tensor_(
18487 c_tensors.as_mut_ptr(),
18488 self.c_tensor,
18489 other.c_tensor
18490 ));
18491 Ok(Tensor { c_tensor: c_tensors[0] })
18492 }
18493
18494 pub fn f_greater_equal_tensor_out(
18495 &self,
18496 out: &Tensor,
18497 other: &Tensor,
18498 ) -> Result<Tensor, TchError> {
18499 let mut c_tensors = [std::ptr::null_mut(); 1];
18500 unsafe_torch_err!(atg_greater_equal_tensor_out(
18501 c_tensors.as_mut_ptr(),
18502 out.c_tensor,
18503 self.c_tensor,
18504 other.c_tensor
18505 ));
18506 Ok(Tensor { c_tensor: c_tensors[0] })
18507 }
18508
18509 pub fn f_greater_scalar_out<S: Into<Scalar>>(
18510 &self,
18511 out: &Tensor,
18512 other: S,
18513 ) -> Result<Tensor, TchError> {
18514 let mut c_tensors = [std::ptr::null_mut(); 1];
18515 unsafe_torch_err!(atg_greater_scalar_out(
18516 c_tensors.as_mut_ptr(),
18517 out.c_tensor,
18518 self.c_tensor,
18519 other.into().c_scalar
18520 ));
18521 Ok(Tensor { c_tensor: c_tensors[0] })
18522 }
18523
18524 pub fn f_greater_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
18525 let mut c_tensors = [std::ptr::null_mut(); 1];
18526 unsafe_torch_err!(atg_greater_tensor(
18527 c_tensors.as_mut_ptr(),
18528 self.c_tensor,
18529 other.c_tensor
18530 ));
18531 Ok(Tensor { c_tensor: c_tensors[0] })
18532 }
18533
18534 pub fn f_greater_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
18535 let mut c_tensors = [std::ptr::null_mut(); 1];
18536 unsafe_torch_err!(atg_greater_tensor_(
18537 c_tensors.as_mut_ptr(),
18538 self.c_tensor,
18539 other.c_tensor
18540 ));
18541 Ok(Tensor { c_tensor: c_tensors[0] })
18542 }
18543
18544 pub fn f_greater_tensor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
18545 let mut c_tensors = [std::ptr::null_mut(); 1];
18546 unsafe_torch_err!(atg_greater_tensor_out(
18547 c_tensors.as_mut_ptr(),
18548 out.c_tensor,
18549 self.c_tensor,
18550 other.c_tensor
18551 ));
18552 Ok(Tensor { c_tensor: c_tensors[0] })
18553 }
18554
18555 pub fn f_grid_sampler(
18556 &self,
18557 grid: &Tensor,
18558 interpolation_mode: i64,
18559 padding_mode: i64,
18560 align_corners: bool,
18561 ) -> Result<Tensor, TchError> {
18562 let mut c_tensors = [std::ptr::null_mut(); 1];
18563 unsafe_torch_err!(atg_grid_sampler(
18564 c_tensors.as_mut_ptr(),
18565 self.c_tensor,
18566 grid.c_tensor,
18567 interpolation_mode,
18568 padding_mode,
18569 if align_corners { 1 } else { 0 }
18570 ));
18571 Ok(Tensor { c_tensor: c_tensors[0] })
18572 }
18573
18574 pub fn f_grid_sampler_2d(
18575 &self,
18576 grid: &Tensor,
18577 interpolation_mode: i64,
18578 padding_mode: i64,
18579 align_corners: bool,
18580 ) -> Result<Tensor, TchError> {
18581 let mut c_tensors = [std::ptr::null_mut(); 1];
18582 unsafe_torch_err!(atg_grid_sampler_2d(
18583 c_tensors.as_mut_ptr(),
18584 self.c_tensor,
18585 grid.c_tensor,
18586 interpolation_mode,
18587 padding_mode,
18588 if align_corners { 1 } else { 0 }
18589 ));
18590 Ok(Tensor { c_tensor: c_tensors[0] })
18591 }
18592
18593 pub fn f_grid_sampler_2d_out(
18594 &self,
18595 out: &Tensor,
18596 grid: &Tensor,
18597 interpolation_mode: i64,
18598 padding_mode: i64,
18599 align_corners: bool,
18600 ) -> Result<Tensor, TchError> {
18601 let mut c_tensors = [std::ptr::null_mut(); 1];
18602 unsafe_torch_err!(atg_grid_sampler_2d_out(
18603 c_tensors.as_mut_ptr(),
18604 out.c_tensor,
18605 self.c_tensor,
18606 grid.c_tensor,
18607 interpolation_mode,
18608 padding_mode,
18609 if align_corners { 1 } else { 0 }
18610 ));
18611 Ok(Tensor { c_tensor: c_tensors[0] })
18612 }
18613
18614 pub fn f_grid_sampler_3d(
18615 &self,
18616 grid: &Tensor,
18617 interpolation_mode: i64,
18618 padding_mode: i64,
18619 align_corners: bool,
18620 ) -> Result<Tensor, TchError> {
18621 let mut c_tensors = [std::ptr::null_mut(); 1];
18622 unsafe_torch_err!(atg_grid_sampler_3d(
18623 c_tensors.as_mut_ptr(),
18624 self.c_tensor,
18625 grid.c_tensor,
18626 interpolation_mode,
18627 padding_mode,
18628 if align_corners { 1 } else { 0 }
18629 ));
18630 Ok(Tensor { c_tensor: c_tensors[0] })
18631 }
18632
18633 pub fn f_grid_sampler_3d_out(
18634 &self,
18635 out: &Tensor,
18636 grid: &Tensor,
18637 interpolation_mode: i64,
18638 padding_mode: i64,
18639 align_corners: bool,
18640 ) -> Result<Tensor, TchError> {
18641 let mut c_tensors = [std::ptr::null_mut(); 1];
18642 unsafe_torch_err!(atg_grid_sampler_3d_out(
18643 c_tensors.as_mut_ptr(),
18644 out.c_tensor,
18645 self.c_tensor,
18646 grid.c_tensor,
18647 interpolation_mode,
18648 padding_mode,
18649 if align_corners { 1 } else { 0 }
18650 ));
18651 Ok(Tensor { c_tensor: c_tensors[0] })
18652 }
18653
18654 pub fn f_group_norm<T: Borrow<Tensor>>(
18655 &self,
18656 num_groups: i64,
18657 weight: Option<T>,
18658 bias: Option<T>,
18659 eps: f64,
18660 cudnn_enabled: bool,
18661 ) -> Result<Tensor, TchError> {
18662 let mut c_tensors = [std::ptr::null_mut(); 1];
18663 unsafe_torch_err!(atg_group_norm(
18664 c_tensors.as_mut_ptr(),
18665 self.c_tensor,
18666 num_groups,
18667 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
18668 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
18669 eps,
18670 if cudnn_enabled { 1 } else { 0 }
18671 ));
18672 Ok(Tensor { c_tensor: c_tensors[0] })
18673 }
18674
18675 pub fn f_gru<T: Borrow<Tensor>>(
18676 &self,
18677 hx: &Tensor,
18678 params: &[T],
18679 has_biases: bool,
18680 num_layers: i64,
18681 dropout: f64,
18682 train: bool,
18683 bidirectional: bool,
18684 batch_first: bool,
18685 ) -> Result<(Tensor, Tensor), TchError> {
18686 let mut c_tensors = [std::ptr::null_mut(); 2];
18687 unsafe_torch_err!(atg_gru(
18688 c_tensors.as_mut_ptr(),
18689 self.c_tensor,
18690 hx.c_tensor,
18691 ptr_list(params).as_ptr(),
18692 params.len() as i32,
18693 if has_biases { 1 } else { 0 },
18694 num_layers,
18695 dropout,
18696 if train { 1 } else { 0 },
18697 if bidirectional { 1 } else { 0 },
18698 if batch_first { 1 } else { 0 }
18699 ));
18700 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
18701 }
18702
18703 pub fn f_gru_cell<T: Borrow<Tensor>>(
18704 &self,
18705 hx: &Tensor,
18706 w_ih: &Tensor,
18707 w_hh: &Tensor,
18708 b_ih: Option<T>,
18709 b_hh: Option<T>,
18710 ) -> Result<Tensor, TchError> {
18711 let mut c_tensors = [std::ptr::null_mut(); 1];
18712 unsafe_torch_err!(atg_gru_cell(
18713 c_tensors.as_mut_ptr(),
18714 self.c_tensor,
18715 hx.c_tensor,
18716 w_ih.c_tensor,
18717 w_hh.c_tensor,
18718 b_ih.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
18719 b_hh.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
18720 ));
18721 Ok(Tensor { c_tensor: c_tensors[0] })
18722 }
18723
18724 pub fn f_gru_data<T: Borrow<Tensor>>(
18725 data: &Tensor,
18726 batch_sizes: &Tensor,
18727 hx: &Tensor,
18728 params: &[T],
18729 has_biases: bool,
18730 num_layers: i64,
18731 dropout: f64,
18732 train: bool,
18733 bidirectional: bool,
18734 ) -> Result<(Tensor, Tensor), TchError> {
18735 let mut c_tensors = [std::ptr::null_mut(); 2];
18736 unsafe_torch_err!(atg_gru_data(
18737 c_tensors.as_mut_ptr(),
18738 data.c_tensor,
18739 batch_sizes.c_tensor,
18740 hx.c_tensor,
18741 ptr_list(params).as_ptr(),
18742 params.len() as i32,
18743 if has_biases { 1 } else { 0 },
18744 num_layers,
18745 dropout,
18746 if train { 1 } else { 0 },
18747 if bidirectional { 1 } else { 0 }
18748 ));
18749 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
18750 }
18751
18752 pub fn f_gt<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
18753 let mut c_tensors = [std::ptr::null_mut(); 1];
18754 unsafe_torch_err!(atg_gt(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
18755 Ok(Tensor { c_tensor: c_tensors[0] })
18756 }
18757
18758 pub fn f_gt_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
18759 let mut c_tensors = [std::ptr::null_mut(); 1];
18760 unsafe_torch_err!(atg_gt_(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
18761 Ok(Tensor { c_tensor: c_tensors[0] })
18762 }
18763
18764 pub fn f_gt_scalar_out<S: Into<Scalar>>(
18765 &self,
18766 out: &Tensor,
18767 other: S,
18768 ) -> Result<Tensor, TchError> {
18769 let mut c_tensors = [std::ptr::null_mut(); 1];
18770 unsafe_torch_err!(atg_gt_scalar_out(
18771 c_tensors.as_mut_ptr(),
18772 out.c_tensor,
18773 self.c_tensor,
18774 other.into().c_scalar
18775 ));
18776 Ok(Tensor { c_tensor: c_tensors[0] })
18777 }
18778
18779 pub fn f_gt_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
18780 let mut c_tensors = [std::ptr::null_mut(); 1];
18781 unsafe_torch_err!(atg_gt_tensor(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
18782 Ok(Tensor { c_tensor: c_tensors[0] })
18783 }
18784
18785 pub fn f_gt_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
18786 let mut c_tensors = [std::ptr::null_mut(); 1];
18787 unsafe_torch_err!(atg_gt_tensor_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
18788 Ok(Tensor { c_tensor: c_tensors[0] })
18789 }
18790
18791 pub fn f_gt_tensor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
18792 let mut c_tensors = [std::ptr::null_mut(); 1];
18793 unsafe_torch_err!(atg_gt_tensor_out(
18794 c_tensors.as_mut_ptr(),
18795 out.c_tensor,
18796 self.c_tensor,
18797 other.c_tensor
18798 ));
18799 Ok(Tensor { c_tensor: c_tensors[0] })
18800 }
18801
18802 pub fn f_hamming_window(
18803 window_length: i64,
18804 options: (Kind, Device),
18805 ) -> Result<Tensor, TchError> {
18806 let mut c_tensors = [std::ptr::null_mut(); 1];
18807 unsafe_torch_err!(atg_hamming_window(
18808 c_tensors.as_mut_ptr(),
18809 window_length,
18810 options.0.c_int(),
18811 options.1.c_int()
18812 ));
18813 Ok(Tensor { c_tensor: c_tensors[0] })
18814 }
18815
18816 pub fn f_hamming_window_out(out: &Tensor, window_length: i64) -> Result<Tensor, TchError> {
18817 let mut c_tensors = [std::ptr::null_mut(); 1];
18818 unsafe_torch_err!(atg_hamming_window_out(
18819 c_tensors.as_mut_ptr(),
18820 out.c_tensor,
18821 window_length
18822 ));
18823 Ok(Tensor { c_tensor: c_tensors[0] })
18824 }
18825
18826 pub fn f_hamming_window_periodic(
18827 window_length: i64,
18828 periodic: bool,
18829 options: (Kind, Device),
18830 ) -> Result<Tensor, TchError> {
18831 let mut c_tensors = [std::ptr::null_mut(); 1];
18832 unsafe_torch_err!(atg_hamming_window_periodic(
18833 c_tensors.as_mut_ptr(),
18834 window_length,
18835 if periodic { 1 } else { 0 },
18836 options.0.c_int(),
18837 options.1.c_int()
18838 ));
18839 Ok(Tensor { c_tensor: c_tensors[0] })
18840 }
18841
18842 pub fn f_hamming_window_periodic_alpha(
18843 window_length: i64,
18844 periodic: bool,
18845 alpha: f64,
18846 options: (Kind, Device),
18847 ) -> Result<Tensor, TchError> {
18848 let mut c_tensors = [std::ptr::null_mut(); 1];
18849 unsafe_torch_err!(atg_hamming_window_periodic_alpha(
18850 c_tensors.as_mut_ptr(),
18851 window_length,
18852 if periodic { 1 } else { 0 },
18853 alpha,
18854 options.0.c_int(),
18855 options.1.c_int()
18856 ));
18857 Ok(Tensor { c_tensor: c_tensors[0] })
18858 }
18859
18860 pub fn f_hamming_window_periodic_alpha_beta(
18861 window_length: i64,
18862 periodic: bool,
18863 alpha: f64,
18864 beta: f64,
18865 options: (Kind, Device),
18866 ) -> Result<Tensor, TchError> {
18867 let mut c_tensors = [std::ptr::null_mut(); 1];
18868 unsafe_torch_err!(atg_hamming_window_periodic_alpha_beta(
18869 c_tensors.as_mut_ptr(),
18870 window_length,
18871 if periodic { 1 } else { 0 },
18872 alpha,
18873 beta,
18874 options.0.c_int(),
18875 options.1.c_int()
18876 ));
18877 Ok(Tensor { c_tensor: c_tensors[0] })
18878 }
18879
18880 pub fn f_hamming_window_periodic_alpha_beta_out(
18881 out: &Tensor,
18882 window_length: i64,
18883 periodic: bool,
18884 alpha: f64,
18885 beta: f64,
18886 ) -> Result<Tensor, TchError> {
18887 let mut c_tensors = [std::ptr::null_mut(); 1];
18888 unsafe_torch_err!(atg_hamming_window_periodic_alpha_beta_out(
18889 c_tensors.as_mut_ptr(),
18890 out.c_tensor,
18891 window_length,
18892 if periodic { 1 } else { 0 },
18893 alpha,
18894 beta
18895 ));
18896 Ok(Tensor { c_tensor: c_tensors[0] })
18897 }
18898
18899 pub fn f_hamming_window_periodic_alpha_out(
18900 out: &Tensor,
18901 window_length: i64,
18902 periodic: bool,
18903 alpha: f64,
18904 ) -> Result<Tensor, TchError> {
18905 let mut c_tensors = [std::ptr::null_mut(); 1];
18906 unsafe_torch_err!(atg_hamming_window_periodic_alpha_out(
18907 c_tensors.as_mut_ptr(),
18908 out.c_tensor,
18909 window_length,
18910 if periodic { 1 } else { 0 },
18911 alpha
18912 ));
18913 Ok(Tensor { c_tensor: c_tensors[0] })
18914 }
18915
18916 pub fn f_hamming_window_periodic_out(
18917 out: &Tensor,
18918 window_length: i64,
18919 periodic: bool,
18920 ) -> Result<Tensor, TchError> {
18921 let mut c_tensors = [std::ptr::null_mut(); 1];
18922 unsafe_torch_err!(atg_hamming_window_periodic_out(
18923 c_tensors.as_mut_ptr(),
18924 out.c_tensor,
18925 window_length,
18926 if periodic { 1 } else { 0 }
18927 ));
18928 Ok(Tensor { c_tensor: c_tensors[0] })
18929 }
18930
18931 pub fn f_hann_window(window_length: i64, options: (Kind, Device)) -> Result<Tensor, TchError> {
18932 let mut c_tensors = [std::ptr::null_mut(); 1];
18933 unsafe_torch_err!(atg_hann_window(
18934 c_tensors.as_mut_ptr(),
18935 window_length,
18936 options.0.c_int(),
18937 options.1.c_int()
18938 ));
18939 Ok(Tensor { c_tensor: c_tensors[0] })
18940 }
18941
18942 pub fn f_hann_window_out(out: &Tensor, window_length: i64) -> Result<Tensor, TchError> {
18943 let mut c_tensors = [std::ptr::null_mut(); 1];
18944 unsafe_torch_err!(atg_hann_window_out(c_tensors.as_mut_ptr(), out.c_tensor, window_length));
18945 Ok(Tensor { c_tensor: c_tensors[0] })
18946 }
18947
18948 pub fn f_hann_window_periodic(
18949 window_length: i64,
18950 periodic: bool,
18951 options: (Kind, Device),
18952 ) -> Result<Tensor, TchError> {
18953 let mut c_tensors = [std::ptr::null_mut(); 1];
18954 unsafe_torch_err!(atg_hann_window_periodic(
18955 c_tensors.as_mut_ptr(),
18956 window_length,
18957 if periodic { 1 } else { 0 },
18958 options.0.c_int(),
18959 options.1.c_int()
18960 ));
18961 Ok(Tensor { c_tensor: c_tensors[0] })
18962 }
18963
18964 pub fn f_hann_window_periodic_out(
18965 out: &Tensor,
18966 window_length: i64,
18967 periodic: bool,
18968 ) -> Result<Tensor, TchError> {
18969 let mut c_tensors = [std::ptr::null_mut(); 1];
18970 unsafe_torch_err!(atg_hann_window_periodic_out(
18971 c_tensors.as_mut_ptr(),
18972 out.c_tensor,
18973 window_length,
18974 if periodic { 1 } else { 0 }
18975 ));
18976 Ok(Tensor { c_tensor: c_tensors[0] })
18977 }
18978
18979 pub fn f_hardshrink(&self) -> Result<Tensor, TchError> {
18980 let mut c_tensors = [std::ptr::null_mut(); 1];
18981 unsafe_torch_err!(atg_hardshrink(c_tensors.as_mut_ptr(), self.c_tensor));
18982 Ok(Tensor { c_tensor: c_tensors[0] })
18983 }
18984
18985 pub fn f_hardshrink_backward<S: Into<Scalar>>(
18986 &self,
18987 grad_out: &Tensor,
18988 lambd: S,
18989 ) -> Result<Tensor, TchError> {
18990 let mut c_tensors = [std::ptr::null_mut(); 1];
18991 unsafe_torch_err!(atg_hardshrink_backward(
18992 c_tensors.as_mut_ptr(),
18993 grad_out.c_tensor,
18994 self.c_tensor,
18995 lambd.into().c_scalar
18996 ));
18997 Ok(Tensor { c_tensor: c_tensors[0] })
18998 }
18999
19000 pub fn f_hardshrink_backward_grad_input<S: Into<Scalar>>(
19001 &self,
19002 grad_input: &Tensor,
19003 grad_out: &Tensor,
19004 lambd: S,
19005 ) -> Result<Tensor, TchError> {
19006 let mut c_tensors = [std::ptr::null_mut(); 1];
19007 unsafe_torch_err!(atg_hardshrink_backward_grad_input(
19008 c_tensors.as_mut_ptr(),
19009 grad_input.c_tensor,
19010 grad_out.c_tensor,
19011 self.c_tensor,
19012 lambd.into().c_scalar
19013 ));
19014 Ok(Tensor { c_tensor: c_tensors[0] })
19015 }
19016
19017 pub fn f_hardshrink_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
19018 let mut c_tensors = [std::ptr::null_mut(); 1];
19019 unsafe_torch_err!(atg_hardshrink_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
19020 Ok(Tensor { c_tensor: c_tensors[0] })
19021 }
19022
19023 pub fn f_hardsigmoid(&self) -> Result<Tensor, TchError> {
19024 let mut c_tensors = [std::ptr::null_mut(); 1];
19025 unsafe_torch_err!(atg_hardsigmoid(c_tensors.as_mut_ptr(), self.c_tensor));
19026 Ok(Tensor { c_tensor: c_tensors[0] })
19027 }
19028
19029 pub fn f_hardsigmoid_(&mut self) -> Result<Tensor, TchError> {
19030 let mut c_tensors = [std::ptr::null_mut(); 1];
19031 unsafe_torch_err!(atg_hardsigmoid_(c_tensors.as_mut_ptr(), self.c_tensor));
19032 Ok(Tensor { c_tensor: c_tensors[0] })
19033 }
19034
19035 pub fn f_hardsigmoid_backward(&self, grad_output: &Tensor) -> Result<Tensor, TchError> {
19036 let mut c_tensors = [std::ptr::null_mut(); 1];
19037 unsafe_torch_err!(atg_hardsigmoid_backward(
19038 c_tensors.as_mut_ptr(),
19039 grad_output.c_tensor,
19040 self.c_tensor
19041 ));
19042 Ok(Tensor { c_tensor: c_tensors[0] })
19043 }
19044
19045 pub fn f_hardsigmoid_backward_grad_input(
19046 &self,
19047 grad_input: &Tensor,
19048 grad_output: &Tensor,
19049 ) -> Result<Tensor, TchError> {
19050 let mut c_tensors = [std::ptr::null_mut(); 1];
19051 unsafe_torch_err!(atg_hardsigmoid_backward_grad_input(
19052 c_tensors.as_mut_ptr(),
19053 grad_input.c_tensor,
19054 grad_output.c_tensor,
19055 self.c_tensor
19056 ));
19057 Ok(Tensor { c_tensor: c_tensors[0] })
19058 }
19059
19060 pub fn f_hardsigmoid_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
19061 let mut c_tensors = [std::ptr::null_mut(); 1];
19062 unsafe_torch_err!(atg_hardsigmoid_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
19063 Ok(Tensor { c_tensor: c_tensors[0] })
19064 }
19065
19066 pub fn f_hardswish(&self) -> Result<Tensor, TchError> {
19067 let mut c_tensors = [std::ptr::null_mut(); 1];
19068 unsafe_torch_err!(atg_hardswish(c_tensors.as_mut_ptr(), self.c_tensor));
19069 Ok(Tensor { c_tensor: c_tensors[0] })
19070 }
19071
19072 pub fn f_hardswish_(&mut self) -> Result<Tensor, TchError> {
19073 let mut c_tensors = [std::ptr::null_mut(); 1];
19074 unsafe_torch_err!(atg_hardswish_(c_tensors.as_mut_ptr(), self.c_tensor));
19075 Ok(Tensor { c_tensor: c_tensors[0] })
19076 }
19077
19078 pub fn f_hardswish_backward(&self, grad_output: &Tensor) -> Result<Tensor, TchError> {
19079 let mut c_tensors = [std::ptr::null_mut(); 1];
19080 unsafe_torch_err!(atg_hardswish_backward(
19081 c_tensors.as_mut_ptr(),
19082 grad_output.c_tensor,
19083 self.c_tensor
19084 ));
19085 Ok(Tensor { c_tensor: c_tensors[0] })
19086 }
19087
19088 pub fn f_hardswish_backward_out(
19089 &self,
19090 out: &Tensor,
19091 grad_output: &Tensor,
19092 ) -> Result<Tensor, TchError> {
19093 let mut c_tensors = [std::ptr::null_mut(); 1];
19094 unsafe_torch_err!(atg_hardswish_backward_out(
19095 c_tensors.as_mut_ptr(),
19096 out.c_tensor,
19097 grad_output.c_tensor,
19098 self.c_tensor
19099 ));
19100 Ok(Tensor { c_tensor: c_tensors[0] })
19101 }
19102
19103 pub fn f_hardswish_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
19104 let mut c_tensors = [std::ptr::null_mut(); 1];
19105 unsafe_torch_err!(atg_hardswish_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
19106 Ok(Tensor { c_tensor: c_tensors[0] })
19107 }
19108
19109 pub fn f_hardtanh(&self) -> Result<Tensor, TchError> {
19110 let mut c_tensors = [std::ptr::null_mut(); 1];
19111 unsafe_torch_err!(atg_hardtanh(c_tensors.as_mut_ptr(), self.c_tensor));
19112 Ok(Tensor { c_tensor: c_tensors[0] })
19113 }
19114
19115 pub fn f_hardtanh_(&mut self) -> Result<Tensor, TchError> {
19116 let mut c_tensors = [std::ptr::null_mut(); 1];
19117 unsafe_torch_err!(atg_hardtanh_(c_tensors.as_mut_ptr(), self.c_tensor));
19118 Ok(Tensor { c_tensor: c_tensors[0] })
19119 }
19120
19121 pub fn f_hardtanh_backward<S: Into<Scalar>>(
19122 &self,
19123 grad_output: &Tensor,
19124 min_val: S,
19125 max_val: S,
19126 ) -> Result<Tensor, TchError> {
19127 let mut c_tensors = [std::ptr::null_mut(); 1];
19128 unsafe_torch_err!(atg_hardtanh_backward(
19129 c_tensors.as_mut_ptr(),
19130 grad_output.c_tensor,
19131 self.c_tensor,
19132 min_val.into().c_scalar,
19133 max_val.into().c_scalar
19134 ));
19135 Ok(Tensor { c_tensor: c_tensors[0] })
19136 }
19137
19138 pub fn f_hardtanh_backward_grad_input<S: Into<Scalar>>(
19139 &self,
19140 grad_input: &Tensor,
19141 grad_output: &Tensor,
19142 min_val: S,
19143 max_val: S,
19144 ) -> Result<Tensor, TchError> {
19145 let mut c_tensors = [std::ptr::null_mut(); 1];
19146 unsafe_torch_err!(atg_hardtanh_backward_grad_input(
19147 c_tensors.as_mut_ptr(),
19148 grad_input.c_tensor,
19149 grad_output.c_tensor,
19150 self.c_tensor,
19151 min_val.into().c_scalar,
19152 max_val.into().c_scalar
19153 ));
19154 Ok(Tensor { c_tensor: c_tensors[0] })
19155 }
19156
19157 pub fn f_hardtanh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
19158 let mut c_tensors = [std::ptr::null_mut(); 1];
19159 unsafe_torch_err!(atg_hardtanh_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
19160 Ok(Tensor { c_tensor: c_tensors[0] })
19161 }
19162
19163 pub fn f_heaviside(&self, values: &Tensor) -> Result<Tensor, TchError> {
19164 let mut c_tensors = [std::ptr::null_mut(); 1];
19165 unsafe_torch_err!(atg_heaviside(c_tensors.as_mut_ptr(), self.c_tensor, values.c_tensor));
19166 Ok(Tensor { c_tensor: c_tensors[0] })
19167 }
19168
19169 pub fn f_heaviside_(&mut self, values: &Tensor) -> Result<Tensor, TchError> {
19170 let mut c_tensors = [std::ptr::null_mut(); 1];
19171 unsafe_torch_err!(atg_heaviside_(c_tensors.as_mut_ptr(), self.c_tensor, values.c_tensor));
19172 Ok(Tensor { c_tensor: c_tensors[0] })
19173 }
19174
19175 pub fn f_heaviside_out(&self, out: &Tensor, values: &Tensor) -> Result<Tensor, TchError> {
19176 let mut c_tensors = [std::ptr::null_mut(); 1];
19177 unsafe_torch_err!(atg_heaviside_out(
19178 c_tensors.as_mut_ptr(),
19179 out.c_tensor,
19180 self.c_tensor,
19181 values.c_tensor
19182 ));
19183 Ok(Tensor { c_tensor: c_tensors[0] })
19184 }
19185
19186 pub fn f_hinge_embedding_loss(
19187 &self,
19188 target: &Tensor,
19189 margin: f64,
19190 reduction: crate::Reduction,
19191 ) -> Result<Tensor, TchError> {
19192 let mut c_tensors = [std::ptr::null_mut(); 1];
19193 unsafe_torch_err!(atg_hinge_embedding_loss(
19194 c_tensors.as_mut_ptr(),
19195 self.c_tensor,
19196 target.c_tensor,
19197 margin,
19198 reduction.to_int()
19199 ));
19200 Ok(Tensor { c_tensor: c_tensors[0] })
19201 }
19202
19203 pub fn f_histc(&self, bins: i64) -> Result<Tensor, TchError> {
19204 let mut c_tensors = [std::ptr::null_mut(); 1];
19205 unsafe_torch_err!(atg_histc(c_tensors.as_mut_ptr(), self.c_tensor, bins));
19206 Ok(Tensor { c_tensor: c_tensors[0] })
19207 }
19208
19209 pub fn f_histc_out(&self, out: &Tensor, bins: i64) -> Result<Tensor, TchError> {
19210 let mut c_tensors = [std::ptr::null_mut(); 1];
19211 unsafe_torch_err!(atg_histc_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor, bins));
19212 Ok(Tensor { c_tensor: c_tensors[0] })
19213 }
19214
19215 pub fn f_histogram<T: Borrow<Tensor>>(
19216 &self,
19217 bins: &Tensor,
19218 weight: Option<T>,
19219 density: bool,
19220 ) -> Result<(Tensor, Tensor), TchError> {
19221 let mut c_tensors = [std::ptr::null_mut(); 2];
19222 unsafe_torch_err!(atg_histogram(
19223 c_tensors.as_mut_ptr(),
19224 self.c_tensor,
19225 bins.c_tensor,
19226 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
19227 if density { 1 } else { 0 }
19228 ));
19229 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
19230 }
19231
19232 pub fn f_histogram_bin_ct<T: Borrow<Tensor>>(
19233 &self,
19234 bins: i64,
19235 range: impl DoubleList,
19236 weight: Option<T>,
19237 density: bool,
19238 ) -> Result<(Tensor, Tensor), TchError> {
19239 let mut c_tensors = [std::ptr::null_mut(); 2];
19240 unsafe_torch_err!(atg_histogram_bin_ct(
19241 c_tensors.as_mut_ptr(),
19242 self.c_tensor,
19243 bins,
19244 range.as_ptr(),
19245 range.len_i32(),
19246 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
19247 if density { 1 } else { 0 }
19248 ));
19249 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
19250 }
19251
19252 pub fn f_histogram_bin_ct_out<T: Borrow<Tensor>>(
19253 &self,
19254 hist: &Tensor,
19255 bin_edges: &Tensor,
19256 bins: i64,
19257 range: impl DoubleList,
19258 weight: Option<T>,
19259 density: bool,
19260 ) -> Result<(Tensor, Tensor), TchError> {
19261 let mut c_tensors = [std::ptr::null_mut(); 2];
19262 unsafe_torch_err!(atg_histogram_bin_ct_out(
19263 c_tensors.as_mut_ptr(),
19264 hist.c_tensor,
19265 bin_edges.c_tensor,
19266 self.c_tensor,
19267 bins,
19268 range.as_ptr(),
19269 range.len_i32(),
19270 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
19271 if density { 1 } else { 0 }
19272 ));
19273 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
19274 }
19275
19276 pub fn f_histogram_bins_tensor_out<T: Borrow<Tensor>>(
19277 &self,
19278 hist: &Tensor,
19279 bin_edges: &Tensor,
19280 bins: &Tensor,
19281 weight: Option<T>,
19282 density: bool,
19283 ) -> Result<(Tensor, Tensor), TchError> {
19284 let mut c_tensors = [std::ptr::null_mut(); 2];
19285 unsafe_torch_err!(atg_histogram_bins_tensor_out(
19286 c_tensors.as_mut_ptr(),
19287 hist.c_tensor,
19288 bin_edges.c_tensor,
19289 self.c_tensor,
19290 bins.c_tensor,
19291 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
19292 if density { 1 } else { 0 }
19293 ));
19294 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
19295 }
19296
19297 pub fn f_hsplit(&self, sections: i64) -> Result<Vec<Tensor>, TchError> {
19298 let c_tensors = unsafe_torch_err!(atg_hsplit(self.c_tensor, sections));
19299 let mut r__ = vec![];
19300 let mut i = 0;
19301 loop {
19302 let c__ = unsafe { *c_tensors.add(i) };
19303 if c__.is_null() {
19304 break;
19305 }
19306 r__.push(Tensor { c_tensor: c__ });
19307 i += 1;
19308 }
19309 unsafe { libc::free(c_tensors as *mut libc::c_void) }
19310 Ok(r__)
19311 }
19312
19313 pub fn f_hsplit_array(&self, indices: impl IntList) -> Result<Vec<Tensor>, TchError> {
19314 let c_tensors =
19315 unsafe_torch_err!(atg_hsplit_array(self.c_tensor, indices.as_ptr(), indices.len_i32()));
19316 let mut r__ = vec![];
19317 let mut i = 0;
19318 loop {
19319 let c__ = unsafe { *c_tensors.add(i) };
19320 if c__.is_null() {
19321 break;
19322 }
19323 r__.push(Tensor { c_tensor: c__ });
19324 i += 1;
19325 }
19326 unsafe { libc::free(c_tensors as *mut libc::c_void) }
19327 Ok(r__)
19328 }
19329
19330 pub fn f_hspmm(mat1: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError> {
19331 let mut c_tensors = [std::ptr::null_mut(); 1];
19332 unsafe_torch_err!(atg_hspmm(c_tensors.as_mut_ptr(), mat1.c_tensor, mat2.c_tensor));
19333 Ok(Tensor { c_tensor: c_tensors[0] })
19334 }
19335
19336 pub fn f_hspmm_out(out: &Tensor, mat1: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError> {
19337 let mut c_tensors = [std::ptr::null_mut(); 1];
19338 unsafe_torch_err!(atg_hspmm_out(
19339 c_tensors.as_mut_ptr(),
19340 out.c_tensor,
19341 mat1.c_tensor,
19342 mat2.c_tensor
19343 ));
19344 Ok(Tensor { c_tensor: c_tensors[0] })
19345 }
19346
19347 pub fn f_hstack<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Tensor, TchError> {
19348 let mut c_tensors = [std::ptr::null_mut(); 1];
19349 unsafe_torch_err!(atg_hstack(
19350 c_tensors.as_mut_ptr(),
19351 ptr_list(tensors).as_ptr(),
19352 tensors.len() as i32
19353 ));
19354 Ok(Tensor { c_tensor: c_tensors[0] })
19355 }
19356
19357 pub fn f_hstack_out<T: Borrow<Tensor>>(
19358 out: &Tensor,
19359 tensors: &[T],
19360 ) -> Result<Tensor, TchError> {
19361 let mut c_tensors = [std::ptr::null_mut(); 1];
19362 unsafe_torch_err!(atg_hstack_out(
19363 c_tensors.as_mut_ptr(),
19364 out.c_tensor,
19365 ptr_list(tensors).as_ptr(),
19366 tensors.len() as i32
19367 ));
19368 Ok(Tensor { c_tensor: c_tensors[0] })
19369 }
19370
19371 pub fn f_huber_loss(
19372 &self,
19373 target: &Tensor,
19374 reduction: crate::Reduction,
19375 delta: f64,
19376 ) -> Result<Tensor, TchError> {
19377 let mut c_tensors = [std::ptr::null_mut(); 1];
19378 unsafe_torch_err!(atg_huber_loss(
19379 c_tensors.as_mut_ptr(),
19380 self.c_tensor,
19381 target.c_tensor,
19382 reduction.to_int(),
19383 delta
19384 ));
19385 Ok(Tensor { c_tensor: c_tensors[0] })
19386 }
19387
19388 pub fn f_huber_loss_backward(
19389 &self,
19390 grad_output: &Tensor,
19391 target: &Tensor,
19392 reduction: crate::Reduction,
19393 delta: f64,
19394 ) -> Result<Tensor, TchError> {
19395 let mut c_tensors = [std::ptr::null_mut(); 1];
19396 unsafe_torch_err!(atg_huber_loss_backward(
19397 c_tensors.as_mut_ptr(),
19398 grad_output.c_tensor,
19399 self.c_tensor,
19400 target.c_tensor,
19401 reduction.to_int(),
19402 delta
19403 ));
19404 Ok(Tensor { c_tensor: c_tensors[0] })
19405 }
19406
19407 pub fn f_huber_loss_backward_out(
19408 &self,
19409 grad_input: &Tensor,
19410 grad_output: &Tensor,
19411 target: &Tensor,
19412 reduction: crate::Reduction,
19413 delta: f64,
19414 ) -> Result<Tensor, TchError> {
19415 let mut c_tensors = [std::ptr::null_mut(); 1];
19416 unsafe_torch_err!(atg_huber_loss_backward_out(
19417 c_tensors.as_mut_ptr(),
19418 grad_input.c_tensor,
19419 grad_output.c_tensor,
19420 self.c_tensor,
19421 target.c_tensor,
19422 reduction.to_int(),
19423 delta
19424 ));
19425 Ok(Tensor { c_tensor: c_tensors[0] })
19426 }
19427
19428 pub fn f_huber_loss_out(
19429 &self,
19430 out: &Tensor,
19431 target: &Tensor,
19432 reduction: crate::Reduction,
19433 delta: f64,
19434 ) -> Result<Tensor, TchError> {
19435 let mut c_tensors = [std::ptr::null_mut(); 1];
19436 unsafe_torch_err!(atg_huber_loss_out(
19437 c_tensors.as_mut_ptr(),
19438 out.c_tensor,
19439 self.c_tensor,
19440 target.c_tensor,
19441 reduction.to_int(),
19442 delta
19443 ));
19444 Ok(Tensor { c_tensor: c_tensors[0] })
19445 }
19446
19447 pub fn f_hypot(&self, other: &Tensor) -> Result<Tensor, TchError> {
19448 let mut c_tensors = [std::ptr::null_mut(); 1];
19449 unsafe_torch_err!(atg_hypot(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
19450 Ok(Tensor { c_tensor: c_tensors[0] })
19451 }
19452
19453 pub fn f_hypot_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
19454 let mut c_tensors = [std::ptr::null_mut(); 1];
19455 unsafe_torch_err!(atg_hypot_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
19456 Ok(Tensor { c_tensor: c_tensors[0] })
19457 }
19458
19459 pub fn f_hypot_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
19460 let mut c_tensors = [std::ptr::null_mut(); 1];
19461 unsafe_torch_err!(atg_hypot_out(
19462 c_tensors.as_mut_ptr(),
19463 out.c_tensor,
19464 self.c_tensor,
19465 other.c_tensor
19466 ));
19467 Ok(Tensor { c_tensor: c_tensors[0] })
19468 }
19469
19470 pub fn f_i0(&self) -> Result<Tensor, TchError> {
19471 let mut c_tensors = [std::ptr::null_mut(); 1];
19472 unsafe_torch_err!(atg_i0(c_tensors.as_mut_ptr(), self.c_tensor));
19473 Ok(Tensor { c_tensor: c_tensors[0] })
19474 }
19475
19476 pub fn f_i0_(&mut self) -> Result<Tensor, TchError> {
19477 let mut c_tensors = [std::ptr::null_mut(); 1];
19478 unsafe_torch_err!(atg_i0_(c_tensors.as_mut_ptr(), self.c_tensor));
19479 Ok(Tensor { c_tensor: c_tensors[0] })
19480 }
19481
19482 pub fn f_i0_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
19483 let mut c_tensors = [std::ptr::null_mut(); 1];
19484 unsafe_torch_err!(atg_i0_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
19485 Ok(Tensor { c_tensor: c_tensors[0] })
19486 }
19487
19488 pub fn f_igamma(&self, other: &Tensor) -> Result<Tensor, TchError> {
19489 let mut c_tensors = [std::ptr::null_mut(); 1];
19490 unsafe_torch_err!(atg_igamma(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
19491 Ok(Tensor { c_tensor: c_tensors[0] })
19492 }
19493
19494 pub fn f_igamma_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
19495 let mut c_tensors = [std::ptr::null_mut(); 1];
19496 unsafe_torch_err!(atg_igamma_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
19497 Ok(Tensor { c_tensor: c_tensors[0] })
19498 }
19499
19500 pub fn f_igamma_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
19501 let mut c_tensors = [std::ptr::null_mut(); 1];
19502 unsafe_torch_err!(atg_igamma_out(
19503 c_tensors.as_mut_ptr(),
19504 out.c_tensor,
19505 self.c_tensor,
19506 other.c_tensor
19507 ));
19508 Ok(Tensor { c_tensor: c_tensors[0] })
19509 }
19510
19511 pub fn f_igammac(&self, other: &Tensor) -> Result<Tensor, TchError> {
19512 let mut c_tensors = [std::ptr::null_mut(); 1];
19513 unsafe_torch_err!(atg_igammac(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
19514 Ok(Tensor { c_tensor: c_tensors[0] })
19515 }
19516
19517 pub fn f_igammac_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
19518 let mut c_tensors = [std::ptr::null_mut(); 1];
19519 unsafe_torch_err!(atg_igammac_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
19520 Ok(Tensor { c_tensor: c_tensors[0] })
19521 }
19522
19523 pub fn f_igammac_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
19524 let mut c_tensors = [std::ptr::null_mut(); 1];
19525 unsafe_torch_err!(atg_igammac_out(
19526 c_tensors.as_mut_ptr(),
19527 out.c_tensor,
19528 self.c_tensor,
19529 other.c_tensor
19530 ));
19531 Ok(Tensor { c_tensor: c_tensors[0] })
19532 }
19533
19534 pub fn f_im2col(
19535 &self,
19536 kernel_size: impl IntList,
19537 dilation: impl IntList,
19538 padding: impl IntList,
19539 stride: impl IntList,
19540 ) -> Result<Tensor, TchError> {
19541 let mut c_tensors = [std::ptr::null_mut(); 1];
19542 unsafe_torch_err!(atg_im2col(
19543 c_tensors.as_mut_ptr(),
19544 self.c_tensor,
19545 kernel_size.as_ptr(),
19546 kernel_size.len_i32(),
19547 dilation.as_ptr(),
19548 dilation.len_i32(),
19549 padding.as_ptr(),
19550 padding.len_i32(),
19551 stride.as_ptr(),
19552 stride.len_i32()
19553 ));
19554 Ok(Tensor { c_tensor: c_tensors[0] })
19555 }
19556
19557 pub fn f_im2col_out(
19558 &self,
19559 out: &Tensor,
19560 kernel_size: impl IntList,
19561 dilation: impl IntList,
19562 padding: impl IntList,
19563 stride: impl IntList,
19564 ) -> Result<Tensor, TchError> {
19565 let mut c_tensors = [std::ptr::null_mut(); 1];
19566 unsafe_torch_err!(atg_im2col_out(
19567 c_tensors.as_mut_ptr(),
19568 out.c_tensor,
19569 self.c_tensor,
19570 kernel_size.as_ptr(),
19571 kernel_size.len_i32(),
19572 dilation.as_ptr(),
19573 dilation.len_i32(),
19574 padding.as_ptr(),
19575 padding.len_i32(),
19576 stride.as_ptr(),
19577 stride.len_i32()
19578 ));
19579 Ok(Tensor { c_tensor: c_tensors[0] })
19580 }
19581
19582 pub fn f_imag(&self) -> Result<Tensor, TchError> {
19583 let mut c_tensors = [std::ptr::null_mut(); 1];
19584 unsafe_torch_err!(atg_imag(c_tensors.as_mut_ptr(), self.c_tensor));
19585 Ok(Tensor { c_tensor: c_tensors[0] })
19586 }
19587
19588 pub fn f_index<T: Borrow<Tensor>>(&self, indices: &[Option<T>]) -> Result<Tensor, TchError> {
19589 let mut c_tensors = [std::ptr::null_mut(); 1];
19590 unsafe_torch_err!(atg_index(
19591 c_tensors.as_mut_ptr(),
19592 self.c_tensor,
19593 ptr_list_opt(indices).as_ptr(),
19594 indices.len() as i32
19595 ));
19596 Ok(Tensor { c_tensor: c_tensors[0] })
19597 }
19598
19599 pub fn f_index_add(
19600 &self,
19601 dim: i64,
19602 index: &Tensor,
19603 source: &Tensor,
19604 ) -> Result<Tensor, TchError> {
19605 let mut c_tensors = [std::ptr::null_mut(); 1];
19606 unsafe_torch_err!(atg_index_add(
19607 c_tensors.as_mut_ptr(),
19608 self.c_tensor,
19609 dim,
19610 index.c_tensor,
19611 source.c_tensor
19612 ));
19613 Ok(Tensor { c_tensor: c_tensors[0] })
19614 }
19615
19616 pub fn f_index_add_(
19617 &mut self,
19618 dim: i64,
19619 index: &Tensor,
19620 source: &Tensor,
19621 ) -> Result<Tensor, TchError> {
19622 let mut c_tensors = [std::ptr::null_mut(); 1];
19623 unsafe_torch_err!(atg_index_add_(
19624 c_tensors.as_mut_ptr(),
19625 self.c_tensor,
19626 dim,
19627 index.c_tensor,
19628 source.c_tensor
19629 ));
19630 Ok(Tensor { c_tensor: c_tensors[0] })
19631 }
19632
19633 pub fn f_index_add_out(
19634 &self,
19635 out: &Tensor,
19636 dim: i64,
19637 index: &Tensor,
19638 source: &Tensor,
19639 ) -> Result<Tensor, TchError> {
19640 let mut c_tensors = [std::ptr::null_mut(); 1];
19641 unsafe_torch_err!(atg_index_add_out(
19642 c_tensors.as_mut_ptr(),
19643 out.c_tensor,
19644 self.c_tensor,
19645 dim,
19646 index.c_tensor,
19647 source.c_tensor
19648 ));
19649 Ok(Tensor { c_tensor: c_tensors[0] })
19650 }
19651
19652 pub fn f_index_copy(
19653 &self,
19654 dim: i64,
19655 index: &Tensor,
19656 source: &Tensor,
19657 ) -> Result<Tensor, TchError> {
19658 let mut c_tensors = [std::ptr::null_mut(); 1];
19659 unsafe_torch_err!(atg_index_copy(
19660 c_tensors.as_mut_ptr(),
19661 self.c_tensor,
19662 dim,
19663 index.c_tensor,
19664 source.c_tensor
19665 ));
19666 Ok(Tensor { c_tensor: c_tensors[0] })
19667 }
19668
19669 pub fn f_index_copy_(
19670 &mut self,
19671 dim: i64,
19672 index: &Tensor,
19673 source: &Tensor,
19674 ) -> Result<Tensor, TchError> {
19675 let mut c_tensors = [std::ptr::null_mut(); 1];
19676 unsafe_torch_err!(atg_index_copy_(
19677 c_tensors.as_mut_ptr(),
19678 self.c_tensor,
19679 dim,
19680 index.c_tensor,
19681 source.c_tensor
19682 ));
19683 Ok(Tensor { c_tensor: c_tensors[0] })
19684 }
19685
19686 pub fn f_index_copy_out(
19687 &self,
19688 out: &Tensor,
19689 dim: i64,
19690 index: &Tensor,
19691 source: &Tensor,
19692 ) -> Result<Tensor, TchError> {
19693 let mut c_tensors = [std::ptr::null_mut(); 1];
19694 unsafe_torch_err!(atg_index_copy_out(
19695 c_tensors.as_mut_ptr(),
19696 out.c_tensor,
19697 self.c_tensor,
19698 dim,
19699 index.c_tensor,
19700 source.c_tensor
19701 ));
19702 Ok(Tensor { c_tensor: c_tensors[0] })
19703 }
19704
19705 pub fn f_index_fill<S: Into<Scalar>>(
19706 &self,
19707 dim: i64,
19708 index: &Tensor,
19709 value: S,
19710 ) -> Result<Tensor, TchError> {
19711 let mut c_tensors = [std::ptr::null_mut(); 1];
19712 unsafe_torch_err!(atg_index_fill(
19713 c_tensors.as_mut_ptr(),
19714 self.c_tensor,
19715 dim,
19716 index.c_tensor,
19717 value.into().c_scalar
19718 ));
19719 Ok(Tensor { c_tensor: c_tensors[0] })
19720 }
19721
19722 pub fn f_index_fill_<S: Into<Scalar>>(
19723 &mut self,
19724 dim: i64,
19725 index: &Tensor,
19726 value: S,
19727 ) -> Result<Tensor, TchError> {
19728 let mut c_tensors = [std::ptr::null_mut(); 1];
19729 unsafe_torch_err!(atg_index_fill_(
19730 c_tensors.as_mut_ptr(),
19731 self.c_tensor,
19732 dim,
19733 index.c_tensor,
19734 value.into().c_scalar
19735 ));
19736 Ok(Tensor { c_tensor: c_tensors[0] })
19737 }
19738
19739 pub fn f_index_fill_int_scalar_out<S: Into<Scalar>>(
19740 &self,
19741 out: &Tensor,
19742 dim: i64,
19743 index: &Tensor,
19744 value: S,
19745 ) -> Result<Tensor, TchError> {
19746 let mut c_tensors = [std::ptr::null_mut(); 1];
19747 unsafe_torch_err!(atg_index_fill_int_scalar_out(
19748 c_tensors.as_mut_ptr(),
19749 out.c_tensor,
19750 self.c_tensor,
19751 dim,
19752 index.c_tensor,
19753 value.into().c_scalar
19754 ));
19755 Ok(Tensor { c_tensor: c_tensors[0] })
19756 }
19757
19758 pub fn f_index_fill_int_tensor(
19759 &self,
19760 dim: i64,
19761 index: &Tensor,
19762 value: &Tensor,
19763 ) -> Result<Tensor, TchError> {
19764 let mut c_tensors = [std::ptr::null_mut(); 1];
19765 unsafe_torch_err!(atg_index_fill_int_tensor(
19766 c_tensors.as_mut_ptr(),
19767 self.c_tensor,
19768 dim,
19769 index.c_tensor,
19770 value.c_tensor
19771 ));
19772 Ok(Tensor { c_tensor: c_tensors[0] })
19773 }
19774
19775 pub fn f_index_fill_int_tensor_(
19776 &mut self,
19777 dim: i64,
19778 index: &Tensor,
19779 value: &Tensor,
19780 ) -> Result<Tensor, TchError> {
19781 let mut c_tensors = [std::ptr::null_mut(); 1];
19782 unsafe_torch_err!(atg_index_fill_int_tensor_(
19783 c_tensors.as_mut_ptr(),
19784 self.c_tensor,
19785 dim,
19786 index.c_tensor,
19787 value.c_tensor
19788 ));
19789 Ok(Tensor { c_tensor: c_tensors[0] })
19790 }
19791
19792 pub fn f_index_fill_int_tensor_out(
19793 &self,
19794 out: &Tensor,
19795 dim: i64,
19796 index: &Tensor,
19797 value: &Tensor,
19798 ) -> Result<Tensor, TchError> {
19799 let mut c_tensors = [std::ptr::null_mut(); 1];
19800 unsafe_torch_err!(atg_index_fill_int_tensor_out(
19801 c_tensors.as_mut_ptr(),
19802 out.c_tensor,
19803 self.c_tensor,
19804 dim,
19805 index.c_tensor,
19806 value.c_tensor
19807 ));
19808 Ok(Tensor { c_tensor: c_tensors[0] })
19809 }
19810
19811 pub fn f_index_put<T: Borrow<Tensor>>(
19812 &self,
19813 indices: &[Option<T>],
19814 values: &Tensor,
19815 accumulate: bool,
19816 ) -> Result<Tensor, TchError> {
19817 let mut c_tensors = [std::ptr::null_mut(); 1];
19818 unsafe_torch_err!(atg_index_put(
19819 c_tensors.as_mut_ptr(),
19820 self.c_tensor,
19821 ptr_list_opt(indices).as_ptr(),
19822 indices.len() as i32,
19823 values.c_tensor,
19824 if accumulate { 1 } else { 0 }
19825 ));
19826 Ok(Tensor { c_tensor: c_tensors[0] })
19827 }
19828
19829 pub fn f_index_put_<T: Borrow<Tensor>>(
19830 &mut self,
19831 indices: &[Option<T>],
19832 values: &Tensor,
19833 accumulate: bool,
19834 ) -> Result<Tensor, TchError> {
19835 let mut c_tensors = [std::ptr::null_mut(); 1];
19836 unsafe_torch_err!(atg_index_put_(
19837 c_tensors.as_mut_ptr(),
19838 self.c_tensor,
19839 ptr_list_opt(indices).as_ptr(),
19840 indices.len() as i32,
19841 values.c_tensor,
19842 if accumulate { 1 } else { 0 }
19843 ));
19844 Ok(Tensor { c_tensor: c_tensors[0] })
19845 }
19846
19847 pub fn f_index_put_out<T: Borrow<Tensor>>(
19848 &self,
19849 out: &Tensor,
19850 indices: &[Option<T>],
19851 values: &Tensor,
19852 accumulate: bool,
19853 ) -> Result<Tensor, TchError> {
19854 let mut c_tensors = [std::ptr::null_mut(); 1];
19855 unsafe_torch_err!(atg_index_put_out(
19856 c_tensors.as_mut_ptr(),
19857 out.c_tensor,
19858 self.c_tensor,
19859 ptr_list_opt(indices).as_ptr(),
19860 indices.len() as i32,
19861 values.c_tensor,
19862 if accumulate { 1 } else { 0 }
19863 ));
19864 Ok(Tensor { c_tensor: c_tensors[0] })
19865 }
19866
19867 pub fn f_index_reduce(
19868 &self,
19869 dim: i64,
19870 index: &Tensor,
19871 source: &Tensor,
19872 reduce: &str,
19873 include_self: bool,
19874 ) -> Result<Tensor, TchError> {
19875 let mut c_tensors = [std::ptr::null_mut(); 1];
19876 unsafe_torch_err!(atg_index_reduce(
19877 c_tensors.as_mut_ptr(),
19878 self.c_tensor,
19879 dim,
19880 index.c_tensor,
19881 source.c_tensor,
19882 reduce.as_ptr(),
19883 reduce.len() as i32,
19884 if include_self { 1 } else { 0 }
19885 ));
19886 Ok(Tensor { c_tensor: c_tensors[0] })
19887 }
19888
19889 pub fn f_index_reduce_(
19890 &mut self,
19891 dim: i64,
19892 index: &Tensor,
19893 source: &Tensor,
19894 reduce: &str,
19895 include_self: bool,
19896 ) -> Result<Tensor, TchError> {
19897 let mut c_tensors = [std::ptr::null_mut(); 1];
19898 unsafe_torch_err!(atg_index_reduce_(
19899 c_tensors.as_mut_ptr(),
19900 self.c_tensor,
19901 dim,
19902 index.c_tensor,
19903 source.c_tensor,
19904 reduce.as_ptr(),
19905 reduce.len() as i32,
19906 if include_self { 1 } else { 0 }
19907 ));
19908 Ok(Tensor { c_tensor: c_tensors[0] })
19909 }
19910
19911 pub fn f_index_reduce_out(
19912 &self,
19913 out: &Tensor,
19914 dim: i64,
19915 index: &Tensor,
19916 source: &Tensor,
19917 reduce: &str,
19918 include_self: bool,
19919 ) -> Result<Tensor, TchError> {
19920 let mut c_tensors = [std::ptr::null_mut(); 1];
19921 unsafe_torch_err!(atg_index_reduce_out(
19922 c_tensors.as_mut_ptr(),
19923 out.c_tensor,
19924 self.c_tensor,
19925 dim,
19926 index.c_tensor,
19927 source.c_tensor,
19928 reduce.as_ptr(),
19929 reduce.len() as i32,
19930 if include_self { 1 } else { 0 }
19931 ));
19932 Ok(Tensor { c_tensor: c_tensors[0] })
19933 }
19934
19935 pub fn f_index_select(&self, dim: i64, index: &Tensor) -> Result<Tensor, TchError> {
19936 let mut c_tensors = [std::ptr::null_mut(); 1];
19937 unsafe_torch_err!(atg_index_select(
19938 c_tensors.as_mut_ptr(),
19939 self.c_tensor,
19940 dim,
19941 index.c_tensor
19942 ));
19943 Ok(Tensor { c_tensor: c_tensors[0] })
19944 }
19945
19946 pub fn f_index_select_backward(
19947 grad: &Tensor,
19948 self_sizes: impl IntList,
19949 dim: i64,
19950 index: &Tensor,
19951 ) -> Result<Tensor, TchError> {
19952 let mut c_tensors = [std::ptr::null_mut(); 1];
19953 unsafe_torch_err!(atg_index_select_backward(
19954 c_tensors.as_mut_ptr(),
19955 grad.c_tensor,
19956 self_sizes.as_ptr(),
19957 self_sizes.len_i32(),
19958 dim,
19959 index.c_tensor
19960 ));
19961 Ok(Tensor { c_tensor: c_tensors[0] })
19962 }
19963
19964 pub fn f_index_select_out(
19965 &self,
19966 out: &Tensor,
19967 dim: i64,
19968 index: &Tensor,
19969 ) -> Result<Tensor, TchError> {
19970 let mut c_tensors = [std::ptr::null_mut(); 1];
19971 unsafe_torch_err!(atg_index_select_out(
19972 c_tensors.as_mut_ptr(),
19973 out.c_tensor,
19974 self.c_tensor,
19975 dim,
19976 index.c_tensor
19977 ));
19978 Ok(Tensor { c_tensor: c_tensors[0] })
19979 }
19980
19981 pub fn f_index_tensor_out<T: Borrow<Tensor>>(
19982 &self,
19983 out: &Tensor,
19984 indices: &[Option<T>],
19985 ) -> Result<Tensor, TchError> {
19986 let mut c_tensors = [std::ptr::null_mut(); 1];
19987 unsafe_torch_err!(atg_index_tensor_out(
19988 c_tensors.as_mut_ptr(),
19989 out.c_tensor,
19990 self.c_tensor,
19991 ptr_list_opt(indices).as_ptr(),
19992 indices.len() as i32
19993 ));
19994 Ok(Tensor { c_tensor: c_tensors[0] })
19995 }
19996
19997 pub fn f_indices(&self) -> Result<Tensor, TchError> {
19998 let mut c_tensors = [std::ptr::null_mut(); 1];
19999 unsafe_torch_err!(atg_indices(c_tensors.as_mut_ptr(), self.c_tensor));
20000 Ok(Tensor { c_tensor: c_tensors[0] })
20001 }
20002
20003 pub fn f_indices_copy(&self) -> Result<Tensor, TchError> {
20004 let mut c_tensors = [std::ptr::null_mut(); 1];
20005 unsafe_torch_err!(atg_indices_copy(c_tensors.as_mut_ptr(), self.c_tensor));
20006 Ok(Tensor { c_tensor: c_tensors[0] })
20007 }
20008
20009 pub fn f_indices_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
20010 let mut c_tensors = [std::ptr::null_mut(); 1];
20011 unsafe_torch_err!(atg_indices_copy_out(
20012 c_tensors.as_mut_ptr(),
20013 out.c_tensor,
20014 self.c_tensor
20015 ));
20016 Ok(Tensor { c_tensor: c_tensors[0] })
20017 }
20018
20019 pub fn f_infinitely_differentiable_gelu_backward(
20020 &self,
20021 grad: &Tensor,
20022 ) -> Result<Tensor, TchError> {
20023 let mut c_tensors = [std::ptr::null_mut(); 1];
20024 unsafe_torch_err!(atg_infinitely_differentiable_gelu_backward(
20025 c_tensors.as_mut_ptr(),
20026 grad.c_tensor,
20027 self.c_tensor
20028 ));
20029 Ok(Tensor { c_tensor: c_tensors[0] })
20030 }
20031
20032 pub fn f_inner(&self, other: &Tensor) -> Result<Tensor, TchError> {
20033 let mut c_tensors = [std::ptr::null_mut(); 1];
20034 unsafe_torch_err!(atg_inner(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
20035 Ok(Tensor { c_tensor: c_tensors[0] })
20036 }
20037
20038 pub fn f_inner_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
20039 let mut c_tensors = [std::ptr::null_mut(); 1];
20040 unsafe_torch_err!(atg_inner_out(
20041 c_tensors.as_mut_ptr(),
20042 out.c_tensor,
20043 self.c_tensor,
20044 other.c_tensor
20045 ));
20046 Ok(Tensor { c_tensor: c_tensors[0] })
20047 }
20048
20049 pub fn f_instance_norm<T: Borrow<Tensor>>(
20050 &self,
20051 weight: Option<T>,
20052 bias: Option<T>,
20053 running_mean: Option<T>,
20054 running_var: Option<T>,
20055 use_input_stats: bool,
20056 momentum: f64,
20057 eps: f64,
20058 cudnn_enabled: bool,
20059 ) -> Result<Tensor, TchError> {
20060 let mut c_tensors = [std::ptr::null_mut(); 1];
20061 unsafe_torch_err!(atg_instance_norm(
20062 c_tensors.as_mut_ptr(),
20063 self.c_tensor,
20064 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
20065 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
20066 running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
20067 running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
20068 if use_input_stats { 1 } else { 0 },
20069 momentum,
20070 eps,
20071 if cudnn_enabled { 1 } else { 0 }
20072 ));
20073 Ok(Tensor { c_tensor: c_tensors[0] })
20074 }
20075
20076 pub fn f_int_repr(&self) -> Result<Tensor, TchError> {
20077 let mut c_tensors = [std::ptr::null_mut(); 1];
20078 unsafe_torch_err!(atg_int_repr(c_tensors.as_mut_ptr(), self.c_tensor));
20079 Ok(Tensor { c_tensor: c_tensors[0] })
20080 }
20081
20082 pub fn f_int_repr_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
20083 let mut c_tensors = [std::ptr::null_mut(); 1];
20084 unsafe_torch_err!(atg_int_repr_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
20085 Ok(Tensor { c_tensor: c_tensors[0] })
20086 }
20087
20088 pub fn f_inverse(&self) -> Result<Tensor, TchError> {
20089 let mut c_tensors = [std::ptr::null_mut(); 1];
20090 unsafe_torch_err!(atg_inverse(c_tensors.as_mut_ptr(), self.c_tensor));
20091 Ok(Tensor { c_tensor: c_tensors[0] })
20092 }
20093
20094 pub fn f_inverse_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
20095 let mut c_tensors = [std::ptr::null_mut(); 1];
20096 unsafe_torch_err!(atg_inverse_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
20097 Ok(Tensor { c_tensor: c_tensors[0] })
20098 }
20099
20100 pub fn f_is_coalesced(&self) -> Result<bool, TchError> {
20101 let return_;
20102 unsafe_torch_err!(return_ = atg_is_coalesced(self.c_tensor));
20103 Ok(return_ != 0)
20104 }
20105
20106 pub fn f_is_complex(&self) -> Result<bool, TchError> {
20107 let return_;
20108 unsafe_torch_err!(return_ = atg_is_complex(self.c_tensor));
20109 Ok(return_ != 0)
20110 }
20111
20112 pub fn f_is_conj(&self) -> Result<bool, TchError> {
20113 let return_;
20114 unsafe_torch_err!(return_ = atg_is_conj(self.c_tensor));
20115 Ok(return_ != 0)
20116 }
20117
20118 pub fn f_is_distributed(&self) -> Result<bool, TchError> {
20119 let return_;
20120 unsafe_torch_err!(return_ = atg_is_distributed(self.c_tensor));
20121 Ok(return_ != 0)
20122 }
20123
20124 pub fn f_is_floating_point(&self) -> Result<bool, TchError> {
20125 let return_;
20126 unsafe_torch_err!(return_ = atg_is_floating_point(self.c_tensor));
20127 Ok(return_ != 0)
20128 }
20129
20130 pub fn f_is_inference(&self) -> Result<bool, TchError> {
20131 let return_;
20132 unsafe_torch_err!(return_ = atg_is_inference(self.c_tensor));
20133 Ok(return_ != 0)
20134 }
20135
20136 pub fn f_is_leaf(&self) -> Result<bool, TchError> {
20137 let return_;
20138 unsafe_torch_err!(return_ = atg_is_leaf(self.c_tensor));
20139 Ok(return_ != 0)
20140 }
20141
20142 pub fn f_is_neg(&self) -> Result<bool, TchError> {
20143 let return_;
20144 unsafe_torch_err!(return_ = atg_is_neg(self.c_tensor));
20145 Ok(return_ != 0)
20146 }
20147
20148 pub fn f_is_nonzero(&self) -> Result<bool, TchError> {
20149 let return_;
20150 unsafe_torch_err!(return_ = atg_is_nonzero(self.c_tensor));
20151 Ok(return_ != 0)
20152 }
20153
20154 pub fn f_is_pinned(&self, device: Device) -> Result<bool, TchError> {
20155 let return_;
20156 unsafe_torch_err!(return_ = atg_is_pinned(self.c_tensor, device.c_int()));
20157 Ok(return_ != 0)
20158 }
20159
20160 pub fn f_is_same_size(&self, other: &Tensor) -> Result<bool, TchError> {
20161 let return_;
20162 unsafe_torch_err!(return_ = atg_is_same_size(self.c_tensor, other.c_tensor));
20163 Ok(return_ != 0)
20164 }
20165
20166 pub fn f_is_set_to(&self, tensor: &Tensor) -> Result<bool, TchError> {
20167 let return_;
20168 unsafe_torch_err!(return_ = atg_is_set_to(self.c_tensor, tensor.c_tensor));
20169 Ok(return_ != 0)
20170 }
20171
20172 pub fn f_is_signed(&self) -> Result<bool, TchError> {
20173 let return_;
20174 unsafe_torch_err!(return_ = atg_is_signed(self.c_tensor));
20175 Ok(return_ != 0)
20176 }
20177
20178 pub fn f_is_vulkan_available() -> Result<bool, TchError> {
20179 let return_;
20180 unsafe_torch_err!(return_ = atg_is_vulkan_available());
20181 Ok(return_ != 0)
20182 }
20183
20184 pub fn f_isclose(
20185 &self,
20186 other: &Tensor,
20187 rtol: f64,
20188 atol: f64,
20189 equal_nan: bool,
20190 ) -> Result<Tensor, TchError> {
20191 let mut c_tensors = [std::ptr::null_mut(); 1];
20192 unsafe_torch_err!(atg_isclose(
20193 c_tensors.as_mut_ptr(),
20194 self.c_tensor,
20195 other.c_tensor,
20196 rtol,
20197 atol,
20198 if equal_nan { 1 } else { 0 }
20199 ));
20200 Ok(Tensor { c_tensor: c_tensors[0] })
20201 }
20202
20203 pub fn f_isfinite(&self) -> Result<Tensor, TchError> {
20204 let mut c_tensors = [std::ptr::null_mut(); 1];
20205 unsafe_torch_err!(atg_isfinite(c_tensors.as_mut_ptr(), self.c_tensor));
20206 Ok(Tensor { c_tensor: c_tensors[0] })
20207 }
20208
20209 pub fn f_isin(
20210 elements: &Tensor,
20211 test_elements: &Tensor,
20212 assume_unique: bool,
20213 invert: bool,
20214 ) -> Result<Tensor, TchError> {
20215 let mut c_tensors = [std::ptr::null_mut(); 1];
20216 unsafe_torch_err!(atg_isin(
20217 c_tensors.as_mut_ptr(),
20218 elements.c_tensor,
20219 test_elements.c_tensor,
20220 if assume_unique { 1 } else { 0 },
20221 if invert { 1 } else { 0 }
20222 ));
20223 Ok(Tensor { c_tensor: c_tensors[0] })
20224 }
20225
20226 pub fn f_isin_scalar_tensor<S: Into<Scalar>>(
20227 element: S,
20228 test_elements: &Tensor,
20229 assume_unique: bool,
20230 invert: bool,
20231 ) -> Result<Tensor, TchError> {
20232 let mut c_tensors = [std::ptr::null_mut(); 1];
20233 unsafe_torch_err!(atg_isin_scalar_tensor(
20234 c_tensors.as_mut_ptr(),
20235 element.into().c_scalar,
20236 test_elements.c_tensor,
20237 if assume_unique { 1 } else { 0 },
20238 if invert { 1 } else { 0 }
20239 ));
20240 Ok(Tensor { c_tensor: c_tensors[0] })
20241 }
20242
20243 pub fn f_isin_scalar_tensor_out<S: Into<Scalar>>(
20244 out: &Tensor,
20245 element: S,
20246 test_elements: &Tensor,
20247 assume_unique: bool,
20248 invert: bool,
20249 ) -> Result<Tensor, TchError> {
20250 let mut c_tensors = [std::ptr::null_mut(); 1];
20251 unsafe_torch_err!(atg_isin_scalar_tensor_out(
20252 c_tensors.as_mut_ptr(),
20253 out.c_tensor,
20254 element.into().c_scalar,
20255 test_elements.c_tensor,
20256 if assume_unique { 1 } else { 0 },
20257 if invert { 1 } else { 0 }
20258 ));
20259 Ok(Tensor { c_tensor: c_tensors[0] })
20260 }
20261
20262 pub fn f_isin_tensor_scalar<S: Into<Scalar>>(
20263 elements: &Tensor,
20264 test_element: S,
20265 assume_unique: bool,
20266 invert: bool,
20267 ) -> Result<Tensor, TchError> {
20268 let mut c_tensors = [std::ptr::null_mut(); 1];
20269 unsafe_torch_err!(atg_isin_tensor_scalar(
20270 c_tensors.as_mut_ptr(),
20271 elements.c_tensor,
20272 test_element.into().c_scalar,
20273 if assume_unique { 1 } else { 0 },
20274 if invert { 1 } else { 0 }
20275 ));
20276 Ok(Tensor { c_tensor: c_tensors[0] })
20277 }
20278
20279 pub fn f_isin_tensor_scalar_out<S: Into<Scalar>>(
20280 out: &Tensor,
20281 elements: &Tensor,
20282 test_element: S,
20283 assume_unique: bool,
20284 invert: bool,
20285 ) -> Result<Tensor, TchError> {
20286 let mut c_tensors = [std::ptr::null_mut(); 1];
20287 unsafe_torch_err!(atg_isin_tensor_scalar_out(
20288 c_tensors.as_mut_ptr(),
20289 out.c_tensor,
20290 elements.c_tensor,
20291 test_element.into().c_scalar,
20292 if assume_unique { 1 } else { 0 },
20293 if invert { 1 } else { 0 }
20294 ));
20295 Ok(Tensor { c_tensor: c_tensors[0] })
20296 }
20297
20298 pub fn f_isin_tensor_tensor_out(
20299 out: &Tensor,
20300 elements: &Tensor,
20301 test_elements: &Tensor,
20302 assume_unique: bool,
20303 invert: bool,
20304 ) -> Result<Tensor, TchError> {
20305 let mut c_tensors = [std::ptr::null_mut(); 1];
20306 unsafe_torch_err!(atg_isin_tensor_tensor_out(
20307 c_tensors.as_mut_ptr(),
20308 out.c_tensor,
20309 elements.c_tensor,
20310 test_elements.c_tensor,
20311 if assume_unique { 1 } else { 0 },
20312 if invert { 1 } else { 0 }
20313 ));
20314 Ok(Tensor { c_tensor: c_tensors[0] })
20315 }
20316
20317 pub fn f_isinf(&self) -> Result<Tensor, TchError> {
20318 let mut c_tensors = [std::ptr::null_mut(); 1];
20319 unsafe_torch_err!(atg_isinf(c_tensors.as_mut_ptr(), self.c_tensor));
20320 Ok(Tensor { c_tensor: c_tensors[0] })
20321 }
20322
20323 pub fn f_isinf_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
20324 let mut c_tensors = [std::ptr::null_mut(); 1];
20325 unsafe_torch_err!(atg_isinf_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
20326 Ok(Tensor { c_tensor: c_tensors[0] })
20327 }
20328
20329 pub fn f_isnan(&self) -> Result<Tensor, TchError> {
20330 let mut c_tensors = [std::ptr::null_mut(); 1];
20331 unsafe_torch_err!(atg_isnan(c_tensors.as_mut_ptr(), self.c_tensor));
20332 Ok(Tensor { c_tensor: c_tensors[0] })
20333 }
20334
20335 pub fn f_isnan_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
20336 let mut c_tensors = [std::ptr::null_mut(); 1];
20337 unsafe_torch_err!(atg_isnan_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
20338 Ok(Tensor { c_tensor: c_tensors[0] })
20339 }
20340
20341 pub fn f_isneginf(&self) -> Result<Tensor, TchError> {
20342 let mut c_tensors = [std::ptr::null_mut(); 1];
20343 unsafe_torch_err!(atg_isneginf(c_tensors.as_mut_ptr(), self.c_tensor));
20344 Ok(Tensor { c_tensor: c_tensors[0] })
20345 }
20346
20347 pub fn f_isneginf_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
20348 let mut c_tensors = [std::ptr::null_mut(); 1];
20349 unsafe_torch_err!(atg_isneginf_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
20350 Ok(Tensor { c_tensor: c_tensors[0] })
20351 }
20352
20353 pub fn f_isposinf(&self) -> Result<Tensor, TchError> {
20354 let mut c_tensors = [std::ptr::null_mut(); 1];
20355 unsafe_torch_err!(atg_isposinf(c_tensors.as_mut_ptr(), self.c_tensor));
20356 Ok(Tensor { c_tensor: c_tensors[0] })
20357 }
20358
20359 pub fn f_isposinf_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
20360 let mut c_tensors = [std::ptr::null_mut(); 1];
20361 unsafe_torch_err!(atg_isposinf_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
20362 Ok(Tensor { c_tensor: c_tensors[0] })
20363 }
20364
20365 pub fn f_isreal(&self) -> Result<Tensor, TchError> {
20366 let mut c_tensors = [std::ptr::null_mut(); 1];
20367 unsafe_torch_err!(atg_isreal(c_tensors.as_mut_ptr(), self.c_tensor));
20368 Ok(Tensor { c_tensor: c_tensors[0] })
20369 }
20370
20371 pub fn f_istft<T: Borrow<Tensor>>(
20372 &self,
20373 n_fft: i64,
20374 hop_length: impl Into<Option<i64>>,
20375 win_length: impl Into<Option<i64>>,
20376 window: Option<T>,
20377 center: bool,
20378 normalized: bool,
20379 onesided: bool,
20380 length: impl Into<Option<i64>>,
20381 return_complex: bool,
20382 ) -> Result<Tensor, TchError> {
20383 let hop_length = hop_length.into();
20384 let win_length = win_length.into();
20385 let length = length.into();
20386 let mut c_tensors = [std::ptr::null_mut(); 1];
20387 unsafe_torch_err!(atg_istft(
20388 c_tensors.as_mut_ptr(),
20389 self.c_tensor,
20390 n_fft,
20391 hop_length.unwrap_or(0i64),
20392 hop_length.is_none() as i8,
20393 win_length.unwrap_or(0i64),
20394 win_length.is_none() as i8,
20395 window.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
20396 if center { 1 } else { 0 },
20397 if normalized { 1 } else { 0 },
20398 if onesided { 1 } else { 0 },
20399 length.unwrap_or(0i64),
20400 length.is_none() as i8,
20401 if return_complex { 1 } else { 0 }
20402 ));
20403 Ok(Tensor { c_tensor: c_tensors[0] })
20404 }
20405
20406 pub fn f_kaiser_window(
20407 window_length: i64,
20408 options: (Kind, Device),
20409 ) -> Result<Tensor, TchError> {
20410 let mut c_tensors = [std::ptr::null_mut(); 1];
20411 unsafe_torch_err!(atg_kaiser_window(
20412 c_tensors.as_mut_ptr(),
20413 window_length,
20414 options.0.c_int(),
20415 options.1.c_int()
20416 ));
20417 Ok(Tensor { c_tensor: c_tensors[0] })
20418 }
20419
20420 pub fn f_kaiser_window_beta(
20421 window_length: i64,
20422 periodic: bool,
20423 beta: f64,
20424 options: (Kind, Device),
20425 ) -> Result<Tensor, TchError> {
20426 let mut c_tensors = [std::ptr::null_mut(); 1];
20427 unsafe_torch_err!(atg_kaiser_window_beta(
20428 c_tensors.as_mut_ptr(),
20429 window_length,
20430 if periodic { 1 } else { 0 },
20431 beta,
20432 options.0.c_int(),
20433 options.1.c_int()
20434 ));
20435 Ok(Tensor { c_tensor: c_tensors[0] })
20436 }
20437
20438 pub fn f_kaiser_window_beta_out(
20439 out: &Tensor,
20440 window_length: i64,
20441 periodic: bool,
20442 beta: f64,
20443 ) -> Result<Tensor, TchError> {
20444 let mut c_tensors = [std::ptr::null_mut(); 1];
20445 unsafe_torch_err!(atg_kaiser_window_beta_out(
20446 c_tensors.as_mut_ptr(),
20447 out.c_tensor,
20448 window_length,
20449 if periodic { 1 } else { 0 },
20450 beta
20451 ));
20452 Ok(Tensor { c_tensor: c_tensors[0] })
20453 }
20454
20455 pub fn f_kaiser_window_out(out: &Tensor, window_length: i64) -> Result<Tensor, TchError> {
20456 let mut c_tensors = [std::ptr::null_mut(); 1];
20457 unsafe_torch_err!(atg_kaiser_window_out(
20458 c_tensors.as_mut_ptr(),
20459 out.c_tensor,
20460 window_length
20461 ));
20462 Ok(Tensor { c_tensor: c_tensors[0] })
20463 }
20464
20465 pub fn f_kaiser_window_periodic(
20466 window_length: i64,
20467 periodic: bool,
20468 options: (Kind, Device),
20469 ) -> Result<Tensor, TchError> {
20470 let mut c_tensors = [std::ptr::null_mut(); 1];
20471 unsafe_torch_err!(atg_kaiser_window_periodic(
20472 c_tensors.as_mut_ptr(),
20473 window_length,
20474 if periodic { 1 } else { 0 },
20475 options.0.c_int(),
20476 options.1.c_int()
20477 ));
20478 Ok(Tensor { c_tensor: c_tensors[0] })
20479 }
20480
20481 pub fn f_kaiser_window_periodic_out(
20482 out: &Tensor,
20483 window_length: i64,
20484 periodic: bool,
20485 ) -> Result<Tensor, TchError> {
20486 let mut c_tensors = [std::ptr::null_mut(); 1];
20487 unsafe_torch_err!(atg_kaiser_window_periodic_out(
20488 c_tensors.as_mut_ptr(),
20489 out.c_tensor,
20490 window_length,
20491 if periodic { 1 } else { 0 }
20492 ));
20493 Ok(Tensor { c_tensor: c_tensors[0] })
20494 }
20495
20496 pub fn f_kl_div(
20497 &self,
20498 target: &Tensor,
20499 reduction: crate::Reduction,
20500 log_target: bool,
20501 ) -> Result<Tensor, TchError> {
20502 let mut c_tensors = [std::ptr::null_mut(); 1];
20503 unsafe_torch_err!(atg_kl_div(
20504 c_tensors.as_mut_ptr(),
20505 self.c_tensor,
20506 target.c_tensor,
20507 reduction.to_int(),
20508 if log_target { 1 } else { 0 }
20509 ));
20510 Ok(Tensor { c_tensor: c_tensors[0] })
20511 }
20512
20513 pub fn f_kron(&self, other: &Tensor) -> Result<Tensor, TchError> {
20514 let mut c_tensors = [std::ptr::null_mut(); 1];
20515 unsafe_torch_err!(atg_kron(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
20516 Ok(Tensor { c_tensor: c_tensors[0] })
20517 }
20518
20519 pub fn f_kron_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
20520 let mut c_tensors = [std::ptr::null_mut(); 1];
20521 unsafe_torch_err!(atg_kron_out(
20522 c_tensors.as_mut_ptr(),
20523 out.c_tensor,
20524 self.c_tensor,
20525 other.c_tensor
20526 ));
20527 Ok(Tensor { c_tensor: c_tensors[0] })
20528 }
20529
20530 pub fn f_kthvalue(
20531 &self,
20532 k: i64,
20533 dim: i64,
20534 keepdim: bool,
20535 ) -> Result<(Tensor, Tensor), TchError> {
20536 let mut c_tensors = [std::ptr::null_mut(); 2];
20537 unsafe_torch_err!(atg_kthvalue(
20538 c_tensors.as_mut_ptr(),
20539 self.c_tensor,
20540 k,
20541 dim,
20542 if keepdim { 1 } else { 0 }
20543 ));
20544 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
20545 }
20546
20547 pub fn f_kthvalue_values(
20548 &self,
20549 values: &Tensor,
20550 indices: &Tensor,
20551 k: i64,
20552 dim: i64,
20553 keepdim: bool,
20554 ) -> Result<(Tensor, Tensor), TchError> {
20555 let mut c_tensors = [std::ptr::null_mut(); 2];
20556 unsafe_torch_err!(atg_kthvalue_values(
20557 c_tensors.as_mut_ptr(),
20558 values.c_tensor,
20559 indices.c_tensor,
20560 self.c_tensor,
20561 k,
20562 dim,
20563 if keepdim { 1 } else { 0 }
20564 ));
20565 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
20566 }
20567
20568 pub fn f_l1_loss(
20569 &self,
20570 target: &Tensor,
20571 reduction: crate::Reduction,
20572 ) -> Result<Tensor, TchError> {
20573 let mut c_tensors = [std::ptr::null_mut(); 1];
20574 unsafe_torch_err!(atg_l1_loss(
20575 c_tensors.as_mut_ptr(),
20576 self.c_tensor,
20577 target.c_tensor,
20578 reduction.to_int()
20579 ));
20580 Ok(Tensor { c_tensor: c_tensors[0] })
20581 }
20582
20583 pub fn f_layer_norm<T: Borrow<Tensor>>(
20584 &self,
20585 normalized_shape: impl IntList,
20586 weight: Option<T>,
20587 bias: Option<T>,
20588 eps: f64,
20589 cudnn_enable: bool,
20590 ) -> Result<Tensor, TchError> {
20591 let mut c_tensors = [std::ptr::null_mut(); 1];
20592 unsafe_torch_err!(atg_layer_norm(
20593 c_tensors.as_mut_ptr(),
20594 self.c_tensor,
20595 normalized_shape.as_ptr(),
20596 normalized_shape.len_i32(),
20597 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
20598 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
20599 eps,
20600 if cudnn_enable { 1 } else { 0 }
20601 ));
20602 Ok(Tensor { c_tensor: c_tensors[0] })
20603 }
20604
20605 pub fn f_lcm(&self, other: &Tensor) -> Result<Tensor, TchError> {
20606 let mut c_tensors = [std::ptr::null_mut(); 1];
20607 unsafe_torch_err!(atg_lcm(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
20608 Ok(Tensor { c_tensor: c_tensors[0] })
20609 }
20610
20611 pub fn f_lcm_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
20612 let mut c_tensors = [std::ptr::null_mut(); 1];
20613 unsafe_torch_err!(atg_lcm_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
20614 Ok(Tensor { c_tensor: c_tensors[0] })
20615 }
20616
20617 pub fn f_lcm_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
20618 let mut c_tensors = [std::ptr::null_mut(); 1];
20619 unsafe_torch_err!(atg_lcm_out(
20620 c_tensors.as_mut_ptr(),
20621 out.c_tensor,
20622 self.c_tensor,
20623 other.c_tensor
20624 ));
20625 Ok(Tensor { c_tensor: c_tensors[0] })
20626 }
20627
20628 pub fn f_ldexp(&self, other: &Tensor) -> Result<Tensor, TchError> {
20629 let mut c_tensors = [std::ptr::null_mut(); 1];
20630 unsafe_torch_err!(atg_ldexp(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
20631 Ok(Tensor { c_tensor: c_tensors[0] })
20632 }
20633
20634 pub fn f_ldexp_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
20635 let mut c_tensors = [std::ptr::null_mut(); 1];
20636 unsafe_torch_err!(atg_ldexp_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
20637 Ok(Tensor { c_tensor: c_tensors[0] })
20638 }
20639
20640 pub fn f_ldexp_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
20641 let mut c_tensors = [std::ptr::null_mut(); 1];
20642 unsafe_torch_err!(atg_ldexp_out(
20643 c_tensors.as_mut_ptr(),
20644 out.c_tensor,
20645 self.c_tensor,
20646 other.c_tensor
20647 ));
20648 Ok(Tensor { c_tensor: c_tensors[0] })
20649 }
20650
20651 pub fn f_le<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
20652 let mut c_tensors = [std::ptr::null_mut(); 1];
20653 unsafe_torch_err!(atg_le(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
20654 Ok(Tensor { c_tensor: c_tensors[0] })
20655 }
20656
20657 pub fn f_le_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
20658 let mut c_tensors = [std::ptr::null_mut(); 1];
20659 unsafe_torch_err!(atg_le_(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
20660 Ok(Tensor { c_tensor: c_tensors[0] })
20661 }
20662
20663 pub fn f_le_scalar_out<S: Into<Scalar>>(
20664 &self,
20665 out: &Tensor,
20666 other: S,
20667 ) -> Result<Tensor, TchError> {
20668 let mut c_tensors = [std::ptr::null_mut(); 1];
20669 unsafe_torch_err!(atg_le_scalar_out(
20670 c_tensors.as_mut_ptr(),
20671 out.c_tensor,
20672 self.c_tensor,
20673 other.into().c_scalar
20674 ));
20675 Ok(Tensor { c_tensor: c_tensors[0] })
20676 }
20677
20678 pub fn f_le_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
20679 let mut c_tensors = [std::ptr::null_mut(); 1];
20680 unsafe_torch_err!(atg_le_tensor(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
20681 Ok(Tensor { c_tensor: c_tensors[0] })
20682 }
20683
20684 pub fn f_le_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
20685 let mut c_tensors = [std::ptr::null_mut(); 1];
20686 unsafe_torch_err!(atg_le_tensor_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
20687 Ok(Tensor { c_tensor: c_tensors[0] })
20688 }
20689
20690 pub fn f_le_tensor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
20691 let mut c_tensors = [std::ptr::null_mut(); 1];
20692 unsafe_torch_err!(atg_le_tensor_out(
20693 c_tensors.as_mut_ptr(),
20694 out.c_tensor,
20695 self.c_tensor,
20696 other.c_tensor
20697 ));
20698 Ok(Tensor { c_tensor: c_tensors[0] })
20699 }
20700
20701 pub fn f_leaky_relu(&self) -> Result<Tensor, TchError> {
20702 let mut c_tensors = [std::ptr::null_mut(); 1];
20703 unsafe_torch_err!(atg_leaky_relu(c_tensors.as_mut_ptr(), self.c_tensor));
20704 Ok(Tensor { c_tensor: c_tensors[0] })
20705 }
20706
20707 pub fn f_leaky_relu_(&mut self) -> Result<Tensor, TchError> {
20708 let mut c_tensors = [std::ptr::null_mut(); 1];
20709 unsafe_torch_err!(atg_leaky_relu_(c_tensors.as_mut_ptr(), self.c_tensor));
20710 Ok(Tensor { c_tensor: c_tensors[0] })
20711 }
20712
20713 pub fn f_leaky_relu_backward<S: Into<Scalar>>(
20714 &self,
20715 grad_output: &Tensor,
20716 negative_slope: S,
20717 self_is_result: bool,
20718 ) -> Result<Tensor, TchError> {
20719 let mut c_tensors = [std::ptr::null_mut(); 1];
20720 unsafe_torch_err!(atg_leaky_relu_backward(
20721 c_tensors.as_mut_ptr(),
20722 grad_output.c_tensor,
20723 self.c_tensor,
20724 negative_slope.into().c_scalar,
20725 if self_is_result { 1 } else { 0 }
20726 ));
20727 Ok(Tensor { c_tensor: c_tensors[0] })
20728 }
20729
20730 pub fn f_leaky_relu_backward_grad_input<S: Into<Scalar>>(
20731 &self,
20732 grad_input: &Tensor,
20733 grad_output: &Tensor,
20734 negative_slope: S,
20735 self_is_result: bool,
20736 ) -> Result<Tensor, TchError> {
20737 let mut c_tensors = [std::ptr::null_mut(); 1];
20738 unsafe_torch_err!(atg_leaky_relu_backward_grad_input(
20739 c_tensors.as_mut_ptr(),
20740 grad_input.c_tensor,
20741 grad_output.c_tensor,
20742 self.c_tensor,
20743 negative_slope.into().c_scalar,
20744 if self_is_result { 1 } else { 0 }
20745 ));
20746 Ok(Tensor { c_tensor: c_tensors[0] })
20747 }
20748
20749 pub fn f_leaky_relu_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
20750 let mut c_tensors = [std::ptr::null_mut(); 1];
20751 unsafe_torch_err!(atg_leaky_relu_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
20752 Ok(Tensor { c_tensor: c_tensors[0] })
20753 }
20754
20755 pub fn f_lerp<S: Into<Scalar>>(&self, end: &Tensor, weight: S) -> Result<Tensor, TchError> {
20756 let mut c_tensors = [std::ptr::null_mut(); 1];
20757 unsafe_torch_err!(atg_lerp(
20758 c_tensors.as_mut_ptr(),
20759 self.c_tensor,
20760 end.c_tensor,
20761 weight.into().c_scalar
20762 ));
20763 Ok(Tensor { c_tensor: c_tensors[0] })
20764 }
20765
20766 pub fn f_lerp_<S: Into<Scalar>>(
20767 &mut self,
20768 end: &Tensor,
20769 weight: S,
20770 ) -> Result<Tensor, TchError> {
20771 let mut c_tensors = [std::ptr::null_mut(); 1];
20772 unsafe_torch_err!(atg_lerp_(
20773 c_tensors.as_mut_ptr(),
20774 self.c_tensor,
20775 end.c_tensor,
20776 weight.into().c_scalar
20777 ));
20778 Ok(Tensor { c_tensor: c_tensors[0] })
20779 }
20780
20781 pub fn f_lerp_scalar_out<S: Into<Scalar>>(
20782 &self,
20783 out: &Tensor,
20784 end: &Tensor,
20785 weight: S,
20786 ) -> Result<Tensor, TchError> {
20787 let mut c_tensors = [std::ptr::null_mut(); 1];
20788 unsafe_torch_err!(atg_lerp_scalar_out(
20789 c_tensors.as_mut_ptr(),
20790 out.c_tensor,
20791 self.c_tensor,
20792 end.c_tensor,
20793 weight.into().c_scalar
20794 ));
20795 Ok(Tensor { c_tensor: c_tensors[0] })
20796 }
20797
20798 pub fn f_lerp_tensor(&self, end: &Tensor, weight: &Tensor) -> Result<Tensor, TchError> {
20799 let mut c_tensors = [std::ptr::null_mut(); 1];
20800 unsafe_torch_err!(atg_lerp_tensor(
20801 c_tensors.as_mut_ptr(),
20802 self.c_tensor,
20803 end.c_tensor,
20804 weight.c_tensor
20805 ));
20806 Ok(Tensor { c_tensor: c_tensors[0] })
20807 }
20808
20809 pub fn f_lerp_tensor_(&mut self, end: &Tensor, weight: &Tensor) -> Result<Tensor, TchError> {
20810 let mut c_tensors = [std::ptr::null_mut(); 1];
20811 unsafe_torch_err!(atg_lerp_tensor_(
20812 c_tensors.as_mut_ptr(),
20813 self.c_tensor,
20814 end.c_tensor,
20815 weight.c_tensor
20816 ));
20817 Ok(Tensor { c_tensor: c_tensors[0] })
20818 }
20819
20820 pub fn f_lerp_tensor_out(
20821 &self,
20822 out: &Tensor,
20823 end: &Tensor,
20824 weight: &Tensor,
20825 ) -> Result<Tensor, TchError> {
20826 let mut c_tensors = [std::ptr::null_mut(); 1];
20827 unsafe_torch_err!(atg_lerp_tensor_out(
20828 c_tensors.as_mut_ptr(),
20829 out.c_tensor,
20830 self.c_tensor,
20831 end.c_tensor,
20832 weight.c_tensor
20833 ));
20834 Ok(Tensor { c_tensor: c_tensors[0] })
20835 }
20836
20837 pub fn f_less<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
20838 let mut c_tensors = [std::ptr::null_mut(); 1];
20839 unsafe_torch_err!(atg_less(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
20840 Ok(Tensor { c_tensor: c_tensors[0] })
20841 }
20842
20843 pub fn f_less_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
20844 let mut c_tensors = [std::ptr::null_mut(); 1];
20845 unsafe_torch_err!(atg_less_(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
20846 Ok(Tensor { c_tensor: c_tensors[0] })
20847 }
20848
20849 pub fn f_less_equal<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
20850 let mut c_tensors = [std::ptr::null_mut(); 1];
20851 unsafe_torch_err!(atg_less_equal(
20852 c_tensors.as_mut_ptr(),
20853 self.c_tensor,
20854 other.into().c_scalar
20855 ));
20856 Ok(Tensor { c_tensor: c_tensors[0] })
20857 }
20858
20859 pub fn f_less_equal_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
20860 let mut c_tensors = [std::ptr::null_mut(); 1];
20861 unsafe_torch_err!(atg_less_equal_(
20862 c_tensors.as_mut_ptr(),
20863 self.c_tensor,
20864 other.into().c_scalar
20865 ));
20866 Ok(Tensor { c_tensor: c_tensors[0] })
20867 }
20868
20869 pub fn f_less_equal_scalar_out<S: Into<Scalar>>(
20870 &self,
20871 out: &Tensor,
20872 other: S,
20873 ) -> Result<Tensor, TchError> {
20874 let mut c_tensors = [std::ptr::null_mut(); 1];
20875 unsafe_torch_err!(atg_less_equal_scalar_out(
20876 c_tensors.as_mut_ptr(),
20877 out.c_tensor,
20878 self.c_tensor,
20879 other.into().c_scalar
20880 ));
20881 Ok(Tensor { c_tensor: c_tensors[0] })
20882 }
20883
20884 pub fn f_less_equal_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
20885 let mut c_tensors = [std::ptr::null_mut(); 1];
20886 unsafe_torch_err!(atg_less_equal_tensor(
20887 c_tensors.as_mut_ptr(),
20888 self.c_tensor,
20889 other.c_tensor
20890 ));
20891 Ok(Tensor { c_tensor: c_tensors[0] })
20892 }
20893
20894 pub fn f_less_equal_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
20895 let mut c_tensors = [std::ptr::null_mut(); 1];
20896 unsafe_torch_err!(atg_less_equal_tensor_(
20897 c_tensors.as_mut_ptr(),
20898 self.c_tensor,
20899 other.c_tensor
20900 ));
20901 Ok(Tensor { c_tensor: c_tensors[0] })
20902 }
20903
20904 pub fn f_less_equal_tensor_out(
20905 &self,
20906 out: &Tensor,
20907 other: &Tensor,
20908 ) -> Result<Tensor, TchError> {
20909 let mut c_tensors = [std::ptr::null_mut(); 1];
20910 unsafe_torch_err!(atg_less_equal_tensor_out(
20911 c_tensors.as_mut_ptr(),
20912 out.c_tensor,
20913 self.c_tensor,
20914 other.c_tensor
20915 ));
20916 Ok(Tensor { c_tensor: c_tensors[0] })
20917 }
20918
20919 pub fn f_less_scalar_out<S: Into<Scalar>>(
20920 &self,
20921 out: &Tensor,
20922 other: S,
20923 ) -> Result<Tensor, TchError> {
20924 let mut c_tensors = [std::ptr::null_mut(); 1];
20925 unsafe_torch_err!(atg_less_scalar_out(
20926 c_tensors.as_mut_ptr(),
20927 out.c_tensor,
20928 self.c_tensor,
20929 other.into().c_scalar
20930 ));
20931 Ok(Tensor { c_tensor: c_tensors[0] })
20932 }
20933
20934 pub fn f_less_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
20935 let mut c_tensors = [std::ptr::null_mut(); 1];
20936 unsafe_torch_err!(atg_less_tensor(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
20937 Ok(Tensor { c_tensor: c_tensors[0] })
20938 }
20939
20940 pub fn f_less_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
20941 let mut c_tensors = [std::ptr::null_mut(); 1];
20942 unsafe_torch_err!(atg_less_tensor_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
20943 Ok(Tensor { c_tensor: c_tensors[0] })
20944 }
20945
20946 pub fn f_less_tensor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
20947 let mut c_tensors = [std::ptr::null_mut(); 1];
20948 unsafe_torch_err!(atg_less_tensor_out(
20949 c_tensors.as_mut_ptr(),
20950 out.c_tensor,
20951 self.c_tensor,
20952 other.c_tensor
20953 ));
20954 Ok(Tensor { c_tensor: c_tensors[0] })
20955 }
20956
20957 pub fn f_lgamma(&self) -> Result<Tensor, TchError> {
20958 let mut c_tensors = [std::ptr::null_mut(); 1];
20959 unsafe_torch_err!(atg_lgamma(c_tensors.as_mut_ptr(), self.c_tensor));
20960 Ok(Tensor { c_tensor: c_tensors[0] })
20961 }
20962
20963 pub fn f_lgamma_(&mut self) -> Result<Tensor, TchError> {
20964 let mut c_tensors = [std::ptr::null_mut(); 1];
20965 unsafe_torch_err!(atg_lgamma_(c_tensors.as_mut_ptr(), self.c_tensor));
20966 Ok(Tensor { c_tensor: c_tensors[0] })
20967 }
20968
20969 pub fn f_lgamma_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
20970 let mut c_tensors = [std::ptr::null_mut(); 1];
20971 unsafe_torch_err!(atg_lgamma_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
20972 Ok(Tensor { c_tensor: c_tensors[0] })
20973 }
20974
20975 pub fn f_lift(&self) -> Result<Tensor, TchError> {
20976 let mut c_tensors = [std::ptr::null_mut(); 1];
20977 unsafe_torch_err!(atg_lift(c_tensors.as_mut_ptr(), self.c_tensor));
20978 Ok(Tensor { c_tensor: c_tensors[0] })
20979 }
20980
20981 pub fn f_lift_fresh(&self) -> Result<Tensor, TchError> {
20982 let mut c_tensors = [std::ptr::null_mut(); 1];
20983 unsafe_torch_err!(atg_lift_fresh(c_tensors.as_mut_ptr(), self.c_tensor));
20984 Ok(Tensor { c_tensor: c_tensors[0] })
20985 }
20986
20987 pub fn f_lift_fresh_copy(&self) -> Result<Tensor, TchError> {
20988 let mut c_tensors = [std::ptr::null_mut(); 1];
20989 unsafe_torch_err!(atg_lift_fresh_copy(c_tensors.as_mut_ptr(), self.c_tensor));
20990 Ok(Tensor { c_tensor: c_tensors[0] })
20991 }
20992
20993 pub fn f_lift_fresh_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
20994 let mut c_tensors = [std::ptr::null_mut(); 1];
20995 unsafe_torch_err!(atg_lift_fresh_copy_out(
20996 c_tensors.as_mut_ptr(),
20997 out.c_tensor,
20998 self.c_tensor
20999 ));
21000 Ok(Tensor { c_tensor: c_tensors[0] })
21001 }
21002
21003 pub fn f_lift_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
21004 let mut c_tensors = [std::ptr::null_mut(); 1];
21005 unsafe_torch_err!(atg_lift_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
21006 Ok(Tensor { c_tensor: c_tensors[0] })
21007 }
21008
21009 pub fn f_linalg_cholesky(&self, upper: bool) -> Result<Tensor, TchError> {
21010 let mut c_tensors = [std::ptr::null_mut(); 1];
21011 unsafe_torch_err!(atg_linalg_cholesky(
21012 c_tensors.as_mut_ptr(),
21013 self.c_tensor,
21014 if upper { 1 } else { 0 }
21015 ));
21016 Ok(Tensor { c_tensor: c_tensors[0] })
21017 }
21018
21019 pub fn f_linalg_cholesky_ex(
21020 &self,
21021 upper: bool,
21022 check_errors: bool,
21023 ) -> Result<(Tensor, Tensor), TchError> {
21024 let mut c_tensors = [std::ptr::null_mut(); 2];
21025 unsafe_torch_err!(atg_linalg_cholesky_ex(
21026 c_tensors.as_mut_ptr(),
21027 self.c_tensor,
21028 if upper { 1 } else { 0 },
21029 if check_errors { 1 } else { 0 }
21030 ));
21031 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
21032 }
21033
21034 pub fn f_linalg_cholesky_ex_l(
21035 &self,
21036 l: &Tensor,
21037 info: &Tensor,
21038 upper: bool,
21039 check_errors: bool,
21040 ) -> Result<(Tensor, Tensor), TchError> {
21041 let mut c_tensors = [std::ptr::null_mut(); 2];
21042 unsafe_torch_err!(atg_linalg_cholesky_ex_l(
21043 c_tensors.as_mut_ptr(),
21044 l.c_tensor,
21045 info.c_tensor,
21046 self.c_tensor,
21047 if upper { 1 } else { 0 },
21048 if check_errors { 1 } else { 0 }
21049 ));
21050 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
21051 }
21052
21053 pub fn f_linalg_cholesky_out(&self, out: &Tensor, upper: bool) -> Result<Tensor, TchError> {
21054 let mut c_tensors = [std::ptr::null_mut(); 1];
21055 unsafe_torch_err!(atg_linalg_cholesky_out(
21056 c_tensors.as_mut_ptr(),
21057 out.c_tensor,
21058 self.c_tensor,
21059 if upper { 1 } else { 0 }
21060 ));
21061 Ok(Tensor { c_tensor: c_tensors[0] })
21062 }
21063
21064 pub fn f_linalg_cond<S: Into<Scalar>>(&self, p: S) -> Result<Tensor, TchError> {
21065 let mut c_tensors = [std::ptr::null_mut(); 1];
21066 unsafe_torch_err!(atg_linalg_cond(
21067 c_tensors.as_mut_ptr(),
21068 self.c_tensor,
21069 p.into().c_scalar
21070 ));
21071 Ok(Tensor { c_tensor: c_tensors[0] })
21072 }
21073
21074 pub fn f_linalg_cond_out<S: Into<Scalar>>(
21075 &self,
21076 out: &Tensor,
21077 p: S,
21078 ) -> Result<Tensor, TchError> {
21079 let mut c_tensors = [std::ptr::null_mut(); 1];
21080 unsafe_torch_err!(atg_linalg_cond_out(
21081 c_tensors.as_mut_ptr(),
21082 out.c_tensor,
21083 self.c_tensor,
21084 p.into().c_scalar
21085 ));
21086 Ok(Tensor { c_tensor: c_tensors[0] })
21087 }
21088
21089 pub fn f_linalg_cond_p_str(&self, p: &str) -> Result<Tensor, TchError> {
21090 let mut c_tensors = [std::ptr::null_mut(); 1];
21091 unsafe_torch_err!(atg_linalg_cond_p_str(
21092 c_tensors.as_mut_ptr(),
21093 self.c_tensor,
21094 p.as_ptr(),
21095 p.len() as i32
21096 ));
21097 Ok(Tensor { c_tensor: c_tensors[0] })
21098 }
21099
21100 pub fn f_linalg_cond_p_str_out(&self, out: &Tensor, p: &str) -> Result<Tensor, TchError> {
21101 let mut c_tensors = [std::ptr::null_mut(); 1];
21102 unsafe_torch_err!(atg_linalg_cond_p_str_out(
21103 c_tensors.as_mut_ptr(),
21104 out.c_tensor,
21105 self.c_tensor,
21106 p.as_ptr(),
21107 p.len() as i32
21108 ));
21109 Ok(Tensor { c_tensor: c_tensors[0] })
21110 }
21111
21112 pub fn f_linalg_cross(&self, other: &Tensor, dim: i64) -> Result<Tensor, TchError> {
21113 let mut c_tensors = [std::ptr::null_mut(); 1];
21114 unsafe_torch_err!(atg_linalg_cross(
21115 c_tensors.as_mut_ptr(),
21116 self.c_tensor,
21117 other.c_tensor,
21118 dim
21119 ));
21120 Ok(Tensor { c_tensor: c_tensors[0] })
21121 }
21122
21123 pub fn f_linalg_cross_out(
21124 &self,
21125 out: &Tensor,
21126 other: &Tensor,
21127 dim: i64,
21128 ) -> Result<Tensor, TchError> {
21129 let mut c_tensors = [std::ptr::null_mut(); 1];
21130 unsafe_torch_err!(atg_linalg_cross_out(
21131 c_tensors.as_mut_ptr(),
21132 out.c_tensor,
21133 self.c_tensor,
21134 other.c_tensor,
21135 dim
21136 ));
21137 Ok(Tensor { c_tensor: c_tensors[0] })
21138 }
21139
21140 pub fn f_linalg_det(a: &Tensor) -> Result<Tensor, TchError> {
21141 let mut c_tensors = [std::ptr::null_mut(); 1];
21142 unsafe_torch_err!(atg_linalg_det(c_tensors.as_mut_ptr(), a.c_tensor));
21143 Ok(Tensor { c_tensor: c_tensors[0] })
21144 }
21145
21146 pub fn f_linalg_det_out(out: &Tensor, a: &Tensor) -> Result<Tensor, TchError> {
21147 let mut c_tensors = [std::ptr::null_mut(); 1];
21148 unsafe_torch_err!(atg_linalg_det_out(c_tensors.as_mut_ptr(), out.c_tensor, a.c_tensor));
21149 Ok(Tensor { c_tensor: c_tensors[0] })
21150 }
21151
21152 pub fn f_linalg_diagonal(
21153 a: &Tensor,
21154 offset: i64,
21155 dim1: i64,
21156 dim2: i64,
21157 ) -> Result<Tensor, TchError> {
21158 let mut c_tensors = [std::ptr::null_mut(); 1];
21159 unsafe_torch_err!(atg_linalg_diagonal(
21160 c_tensors.as_mut_ptr(),
21161 a.c_tensor,
21162 offset,
21163 dim1,
21164 dim2
21165 ));
21166 Ok(Tensor { c_tensor: c_tensors[0] })
21167 }
21168
21169 pub fn f_linalg_eig(&self) -> Result<(Tensor, Tensor), TchError> {
21170 let mut c_tensors = [std::ptr::null_mut(); 2];
21171 unsafe_torch_err!(atg_linalg_eig(c_tensors.as_mut_ptr(), self.c_tensor));
21172 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
21173 }
21174
21175 pub fn f_linalg_eig_out(
21176 &self,
21177 eigenvalues: &Tensor,
21178 eigenvectors: &Tensor,
21179 ) -> Result<(Tensor, Tensor), TchError> {
21180 let mut c_tensors = [std::ptr::null_mut(); 2];
21181 unsafe_torch_err!(atg_linalg_eig_out(
21182 c_tensors.as_mut_ptr(),
21183 eigenvalues.c_tensor,
21184 eigenvectors.c_tensor,
21185 self.c_tensor
21186 ));
21187 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
21188 }
21189
21190 pub fn f_linalg_eigh(&self, uplo: &str) -> Result<(Tensor, Tensor), TchError> {
21191 let mut c_tensors = [std::ptr::null_mut(); 2];
21192 unsafe_torch_err!(atg_linalg_eigh(
21193 c_tensors.as_mut_ptr(),
21194 self.c_tensor,
21195 uplo.as_ptr(),
21196 uplo.len() as i32
21197 ));
21198 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
21199 }
21200
21201 pub fn f_linalg_eigh_eigvals(
21202 &self,
21203 eigvals: &Tensor,
21204 eigvecs: &Tensor,
21205 uplo: &str,
21206 ) -> Result<(Tensor, Tensor), TchError> {
21207 let mut c_tensors = [std::ptr::null_mut(); 2];
21208 unsafe_torch_err!(atg_linalg_eigh_eigvals(
21209 c_tensors.as_mut_ptr(),
21210 eigvals.c_tensor,
21211 eigvecs.c_tensor,
21212 self.c_tensor,
21213 uplo.as_ptr(),
21214 uplo.len() as i32
21215 ));
21216 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
21217 }
21218
21219 pub fn f_linalg_eigvals(&self) -> Result<Tensor, TchError> {
21220 let mut c_tensors = [std::ptr::null_mut(); 1];
21221 unsafe_torch_err!(atg_linalg_eigvals(c_tensors.as_mut_ptr(), self.c_tensor));
21222 Ok(Tensor { c_tensor: c_tensors[0] })
21223 }
21224
21225 pub fn f_linalg_eigvals_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
21226 let mut c_tensors = [std::ptr::null_mut(); 1];
21227 unsafe_torch_err!(atg_linalg_eigvals_out(
21228 c_tensors.as_mut_ptr(),
21229 out.c_tensor,
21230 self.c_tensor
21231 ));
21232 Ok(Tensor { c_tensor: c_tensors[0] })
21233 }
21234
21235 pub fn f_linalg_eigvalsh(&self, uplo: &str) -> Result<Tensor, TchError> {
21236 let mut c_tensors = [std::ptr::null_mut(); 1];
21237 unsafe_torch_err!(atg_linalg_eigvalsh(
21238 c_tensors.as_mut_ptr(),
21239 self.c_tensor,
21240 uplo.as_ptr(),
21241 uplo.len() as i32
21242 ));
21243 Ok(Tensor { c_tensor: c_tensors[0] })
21244 }
21245
21246 pub fn f_linalg_eigvalsh_out(&self, out: &Tensor, uplo: &str) -> Result<Tensor, TchError> {
21247 let mut c_tensors = [std::ptr::null_mut(); 1];
21248 unsafe_torch_err!(atg_linalg_eigvalsh_out(
21249 c_tensors.as_mut_ptr(),
21250 out.c_tensor,
21251 self.c_tensor,
21252 uplo.as_ptr(),
21253 uplo.len() as i32
21254 ));
21255 Ok(Tensor { c_tensor: c_tensors[0] })
21256 }
21257
21258 pub fn f_linalg_householder_product(&self, tau: &Tensor) -> Result<Tensor, TchError> {
21259 let mut c_tensors = [std::ptr::null_mut(); 1];
21260 unsafe_torch_err!(atg_linalg_householder_product(
21261 c_tensors.as_mut_ptr(),
21262 self.c_tensor,
21263 tau.c_tensor
21264 ));
21265 Ok(Tensor { c_tensor: c_tensors[0] })
21266 }
21267
21268 pub fn f_linalg_householder_product_out(
21269 &self,
21270 out: &Tensor,
21271 tau: &Tensor,
21272 ) -> Result<Tensor, TchError> {
21273 let mut c_tensors = [std::ptr::null_mut(); 1];
21274 unsafe_torch_err!(atg_linalg_householder_product_out(
21275 c_tensors.as_mut_ptr(),
21276 out.c_tensor,
21277 self.c_tensor,
21278 tau.c_tensor
21279 ));
21280 Ok(Tensor { c_tensor: c_tensors[0] })
21281 }
21282
21283 pub fn f_linalg_inv(a: &Tensor) -> Result<Tensor, TchError> {
21284 let mut c_tensors = [std::ptr::null_mut(); 1];
21285 unsafe_torch_err!(atg_linalg_inv(c_tensors.as_mut_ptr(), a.c_tensor));
21286 Ok(Tensor { c_tensor: c_tensors[0] })
21287 }
21288
21289 pub fn f_linalg_inv_ex(a: &Tensor, check_errors: bool) -> Result<(Tensor, Tensor), TchError> {
21290 let mut c_tensors = [std::ptr::null_mut(); 2];
21291 unsafe_torch_err!(atg_linalg_inv_ex(
21292 c_tensors.as_mut_ptr(),
21293 a.c_tensor,
21294 if check_errors { 1 } else { 0 }
21295 ));
21296 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
21297 }
21298
21299 pub fn f_linalg_inv_ex_inverse(
21300 inverse: &Tensor,
21301 info: &Tensor,
21302 a: &Tensor,
21303 check_errors: bool,
21304 ) -> Result<(Tensor, Tensor), TchError> {
21305 let mut c_tensors = [std::ptr::null_mut(); 2];
21306 unsafe_torch_err!(atg_linalg_inv_ex_inverse(
21307 c_tensors.as_mut_ptr(),
21308 inverse.c_tensor,
21309 info.c_tensor,
21310 a.c_tensor,
21311 if check_errors { 1 } else { 0 }
21312 ));
21313 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
21314 }
21315
21316 pub fn f_linalg_inv_out(out: &Tensor, a: &Tensor) -> Result<Tensor, TchError> {
21317 let mut c_tensors = [std::ptr::null_mut(); 1];
21318 unsafe_torch_err!(atg_linalg_inv_out(c_tensors.as_mut_ptr(), out.c_tensor, a.c_tensor));
21319 Ok(Tensor { c_tensor: c_tensors[0] })
21320 }
21321
21322 pub fn f_linalg_ldl_factor(&self, hermitian: bool) -> Result<(Tensor, Tensor), TchError> {
21323 let mut c_tensors = [std::ptr::null_mut(); 2];
21324 unsafe_torch_err!(atg_linalg_ldl_factor(
21325 c_tensors.as_mut_ptr(),
21326 self.c_tensor,
21327 if hermitian { 1 } else { 0 }
21328 ));
21329 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
21330 }
21331
21332 pub fn f_linalg_ldl_factor_ex(
21333 &self,
21334 hermitian: bool,
21335 check_errors: bool,
21336 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
21337 let mut c_tensors = [std::ptr::null_mut(); 3];
21338 unsafe_torch_err!(atg_linalg_ldl_factor_ex(
21339 c_tensors.as_mut_ptr(),
21340 self.c_tensor,
21341 if hermitian { 1 } else { 0 },
21342 if check_errors { 1 } else { 0 }
21343 ));
21344 Ok((
21345 Tensor { c_tensor: c_tensors[0] },
21346 Tensor { c_tensor: c_tensors[1] },
21347 Tensor { c_tensor: c_tensors[2] },
21348 ))
21349 }
21350
21351 pub fn f_linalg_ldl_factor_ex_out(
21352 &self,
21353 ld: &Tensor,
21354 pivots: &Tensor,
21355 info: &Tensor,
21356 hermitian: bool,
21357 check_errors: bool,
21358 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
21359 let mut c_tensors = [std::ptr::null_mut(); 3];
21360 unsafe_torch_err!(atg_linalg_ldl_factor_ex_out(
21361 c_tensors.as_mut_ptr(),
21362 ld.c_tensor,
21363 pivots.c_tensor,
21364 info.c_tensor,
21365 self.c_tensor,
21366 if hermitian { 1 } else { 0 },
21367 if check_errors { 1 } else { 0 }
21368 ));
21369 Ok((
21370 Tensor { c_tensor: c_tensors[0] },
21371 Tensor { c_tensor: c_tensors[1] },
21372 Tensor { c_tensor: c_tensors[2] },
21373 ))
21374 }
21375
21376 pub fn f_linalg_ldl_factor_out(
21377 &self,
21378 ld: &Tensor,
21379 pivots: &Tensor,
21380 hermitian: bool,
21381 ) -> Result<(Tensor, Tensor), TchError> {
21382 let mut c_tensors = [std::ptr::null_mut(); 2];
21383 unsafe_torch_err!(atg_linalg_ldl_factor_out(
21384 c_tensors.as_mut_ptr(),
21385 ld.c_tensor,
21386 pivots.c_tensor,
21387 self.c_tensor,
21388 if hermitian { 1 } else { 0 }
21389 ));
21390 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
21391 }
21392
21393 pub fn f_linalg_ldl_solve(
21394 ld: &Tensor,
21395 pivots: &Tensor,
21396 b: &Tensor,
21397 hermitian: bool,
21398 ) -> Result<Tensor, TchError> {
21399 let mut c_tensors = [std::ptr::null_mut(); 1];
21400 unsafe_torch_err!(atg_linalg_ldl_solve(
21401 c_tensors.as_mut_ptr(),
21402 ld.c_tensor,
21403 pivots.c_tensor,
21404 b.c_tensor,
21405 if hermitian { 1 } else { 0 }
21406 ));
21407 Ok(Tensor { c_tensor: c_tensors[0] })
21408 }
21409
21410 pub fn f_linalg_ldl_solve_out(
21411 out: &Tensor,
21412 ld: &Tensor,
21413 pivots: &Tensor,
21414 b: &Tensor,
21415 hermitian: bool,
21416 ) -> Result<Tensor, TchError> {
21417 let mut c_tensors = [std::ptr::null_mut(); 1];
21418 unsafe_torch_err!(atg_linalg_ldl_solve_out(
21419 c_tensors.as_mut_ptr(),
21420 out.c_tensor,
21421 ld.c_tensor,
21422 pivots.c_tensor,
21423 b.c_tensor,
21424 if hermitian { 1 } else { 0 }
21425 ));
21426 Ok(Tensor { c_tensor: c_tensors[0] })
21427 }
21428
21429 pub fn f_linalg_lstsq(
21430 &self,
21431 b: &Tensor,
21432 rcond: impl Into<Option<f64>>,
21433 driver: &str,
21434 ) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
21435 let rcond = rcond.into();
21436 let mut c_tensors = [std::ptr::null_mut(); 4];
21437 unsafe_torch_err!(atg_linalg_lstsq(
21438 c_tensors.as_mut_ptr(),
21439 self.c_tensor,
21440 b.c_tensor,
21441 rcond.unwrap_or(std::f64::NAN),
21442 rcond.is_none() as i8,
21443 driver.as_ptr(),
21444 driver.len() as i32
21445 ));
21446 Ok((
21447 Tensor { c_tensor: c_tensors[0] },
21448 Tensor { c_tensor: c_tensors[1] },
21449 Tensor { c_tensor: c_tensors[2] },
21450 Tensor { c_tensor: c_tensors[3] },
21451 ))
21452 }
21453
21454 pub fn f_linalg_lstsq_out(
21455 &self,
21456 solution: &Tensor,
21457 residuals: &Tensor,
21458 rank: &Tensor,
21459 singular_values: &Tensor,
21460 b: &Tensor,
21461 rcond: impl Into<Option<f64>>,
21462 driver: &str,
21463 ) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
21464 let rcond = rcond.into();
21465 let mut c_tensors = [std::ptr::null_mut(); 4];
21466 unsafe_torch_err!(atg_linalg_lstsq_out(
21467 c_tensors.as_mut_ptr(),
21468 solution.c_tensor,
21469 residuals.c_tensor,
21470 rank.c_tensor,
21471 singular_values.c_tensor,
21472 self.c_tensor,
21473 b.c_tensor,
21474 rcond.unwrap_or(std::f64::NAN),
21475 rcond.is_none() as i8,
21476 driver.as_ptr(),
21477 driver.len() as i32
21478 ));
21479 Ok((
21480 Tensor { c_tensor: c_tensors[0] },
21481 Tensor { c_tensor: c_tensors[1] },
21482 Tensor { c_tensor: c_tensors[2] },
21483 Tensor { c_tensor: c_tensors[3] },
21484 ))
21485 }
21486
21487 pub fn f_linalg_lu(a: &Tensor, pivot: bool) -> Result<(Tensor, Tensor, Tensor), TchError> {
21488 let mut c_tensors = [std::ptr::null_mut(); 3];
21489 unsafe_torch_err!(atg_linalg_lu(
21490 c_tensors.as_mut_ptr(),
21491 a.c_tensor,
21492 if pivot { 1 } else { 0 }
21493 ));
21494 Ok((
21495 Tensor { c_tensor: c_tensors[0] },
21496 Tensor { c_tensor: c_tensors[1] },
21497 Tensor { c_tensor: c_tensors[2] },
21498 ))
21499 }
21500
21501 pub fn f_linalg_lu_factor(a: &Tensor, pivot: bool) -> Result<(Tensor, Tensor), TchError> {
21502 let mut c_tensors = [std::ptr::null_mut(); 2];
21503 unsafe_torch_err!(atg_linalg_lu_factor(
21504 c_tensors.as_mut_ptr(),
21505 a.c_tensor,
21506 if pivot { 1 } else { 0 }
21507 ));
21508 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
21509 }
21510
21511 pub fn f_linalg_lu_factor_ex(
21512 a: &Tensor,
21513 pivot: bool,
21514 check_errors: bool,
21515 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
21516 let mut c_tensors = [std::ptr::null_mut(); 3];
21517 unsafe_torch_err!(atg_linalg_lu_factor_ex(
21518 c_tensors.as_mut_ptr(),
21519 a.c_tensor,
21520 if pivot { 1 } else { 0 },
21521 if check_errors { 1 } else { 0 }
21522 ));
21523 Ok((
21524 Tensor { c_tensor: c_tensors[0] },
21525 Tensor { c_tensor: c_tensors[1] },
21526 Tensor { c_tensor: c_tensors[2] },
21527 ))
21528 }
21529
21530 pub fn f_linalg_lu_factor_ex_out(
21531 lu: &Tensor,
21532 pivots: &Tensor,
21533 info: &Tensor,
21534 a: &Tensor,
21535 pivot: bool,
21536 check_errors: bool,
21537 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
21538 let mut c_tensors = [std::ptr::null_mut(); 3];
21539 unsafe_torch_err!(atg_linalg_lu_factor_ex_out(
21540 c_tensors.as_mut_ptr(),
21541 lu.c_tensor,
21542 pivots.c_tensor,
21543 info.c_tensor,
21544 a.c_tensor,
21545 if pivot { 1 } else { 0 },
21546 if check_errors { 1 } else { 0 }
21547 ));
21548 Ok((
21549 Tensor { c_tensor: c_tensors[0] },
21550 Tensor { c_tensor: c_tensors[1] },
21551 Tensor { c_tensor: c_tensors[2] },
21552 ))
21553 }
21554
21555 pub fn f_linalg_lu_factor_out(
21556 lu: &Tensor,
21557 pivots: &Tensor,
21558 a: &Tensor,
21559 pivot: bool,
21560 ) -> Result<(Tensor, Tensor), TchError> {
21561 let mut c_tensors = [std::ptr::null_mut(); 2];
21562 unsafe_torch_err!(atg_linalg_lu_factor_out(
21563 c_tensors.as_mut_ptr(),
21564 lu.c_tensor,
21565 pivots.c_tensor,
21566 a.c_tensor,
21567 if pivot { 1 } else { 0 }
21568 ));
21569 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
21570 }
21571
21572 pub fn f_linalg_lu_out(
21573 p: &Tensor,
21574 l: &Tensor,
21575 u: &Tensor,
21576 a: &Tensor,
21577 pivot: bool,
21578 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
21579 let mut c_tensors = [std::ptr::null_mut(); 3];
21580 unsafe_torch_err!(atg_linalg_lu_out(
21581 c_tensors.as_mut_ptr(),
21582 p.c_tensor,
21583 l.c_tensor,
21584 u.c_tensor,
21585 a.c_tensor,
21586 if pivot { 1 } else { 0 }
21587 ));
21588 Ok((
21589 Tensor { c_tensor: c_tensors[0] },
21590 Tensor { c_tensor: c_tensors[1] },
21591 Tensor { c_tensor: c_tensors[2] },
21592 ))
21593 }
21594
21595 pub fn f_linalg_lu_solve(
21596 lu: &Tensor,
21597 pivots: &Tensor,
21598 b: &Tensor,
21599 left: bool,
21600 adjoint: bool,
21601 ) -> Result<Tensor, TchError> {
21602 let mut c_tensors = [std::ptr::null_mut(); 1];
21603 unsafe_torch_err!(atg_linalg_lu_solve(
21604 c_tensors.as_mut_ptr(),
21605 lu.c_tensor,
21606 pivots.c_tensor,
21607 b.c_tensor,
21608 if left { 1 } else { 0 },
21609 if adjoint { 1 } else { 0 }
21610 ));
21611 Ok(Tensor { c_tensor: c_tensors[0] })
21612 }
21613
21614 pub fn f_linalg_lu_solve_out(
21615 out: &Tensor,
21616 lu: &Tensor,
21617 pivots: &Tensor,
21618 b: &Tensor,
21619 left: bool,
21620 adjoint: bool,
21621 ) -> Result<Tensor, TchError> {
21622 let mut c_tensors = [std::ptr::null_mut(); 1];
21623 unsafe_torch_err!(atg_linalg_lu_solve_out(
21624 c_tensors.as_mut_ptr(),
21625 out.c_tensor,
21626 lu.c_tensor,
21627 pivots.c_tensor,
21628 b.c_tensor,
21629 if left { 1 } else { 0 },
21630 if adjoint { 1 } else { 0 }
21631 ));
21632 Ok(Tensor { c_tensor: c_tensors[0] })
21633 }
21634
21635 pub fn f_linalg_matmul(&self, other: &Tensor) -> Result<Tensor, TchError> {
21636 let mut c_tensors = [std::ptr::null_mut(); 1];
21637 unsafe_torch_err!(atg_linalg_matmul(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
21638 Ok(Tensor { c_tensor: c_tensors[0] })
21639 }
21640
21641 pub fn f_linalg_matmul_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
21642 let mut c_tensors = [std::ptr::null_mut(); 1];
21643 unsafe_torch_err!(atg_linalg_matmul_out(
21644 c_tensors.as_mut_ptr(),
21645 out.c_tensor,
21646 self.c_tensor,
21647 other.c_tensor
21648 ));
21649 Ok(Tensor { c_tensor: c_tensors[0] })
21650 }
21651
21652 pub fn f_linalg_matrix_exp(&self) -> Result<Tensor, TchError> {
21653 let mut c_tensors = [std::ptr::null_mut(); 1];
21654 unsafe_torch_err!(atg_linalg_matrix_exp(c_tensors.as_mut_ptr(), self.c_tensor));
21655 Ok(Tensor { c_tensor: c_tensors[0] })
21656 }
21657
21658 pub fn f_linalg_matrix_exp_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
21659 let mut c_tensors = [std::ptr::null_mut(); 1];
21660 unsafe_torch_err!(atg_linalg_matrix_exp_out(
21661 c_tensors.as_mut_ptr(),
21662 out.c_tensor,
21663 self.c_tensor
21664 ));
21665 Ok(Tensor { c_tensor: c_tensors[0] })
21666 }
21667
21668 pub fn f_linalg_matrix_power(&self, n: i64) -> Result<Tensor, TchError> {
21669 let mut c_tensors = [std::ptr::null_mut(); 1];
21670 unsafe_torch_err!(atg_linalg_matrix_power(c_tensors.as_mut_ptr(), self.c_tensor, n));
21671 Ok(Tensor { c_tensor: c_tensors[0] })
21672 }
21673
21674 pub fn f_linalg_matrix_power_out(&self, out: &Tensor, n: i64) -> Result<Tensor, TchError> {
21675 let mut c_tensors = [std::ptr::null_mut(); 1];
21676 unsafe_torch_err!(atg_linalg_matrix_power_out(
21677 c_tensors.as_mut_ptr(),
21678 out.c_tensor,
21679 self.c_tensor,
21680 n
21681 ));
21682 Ok(Tensor { c_tensor: c_tensors[0] })
21683 }
21684
21685 pub fn f_linalg_matrix_rank(&self, tol: f64, hermitian: bool) -> Result<Tensor, TchError> {
21686 let mut c_tensors = [std::ptr::null_mut(); 1];
21687 unsafe_torch_err!(atg_linalg_matrix_rank(
21688 c_tensors.as_mut_ptr(),
21689 self.c_tensor,
21690 tol,
21691 if hermitian { 1 } else { 0 }
21692 ));
21693 Ok(Tensor { c_tensor: c_tensors[0] })
21694 }
21695
21696 pub fn f_linalg_matrix_rank_atol_rtol_float(
21697 &self,
21698 atol: impl Into<Option<f64>>,
21699 rtol: impl Into<Option<f64>>,
21700 hermitian: bool,
21701 ) -> Result<Tensor, TchError> {
21702 let atol = atol.into();
21703 let rtol = rtol.into();
21704 let mut c_tensors = [std::ptr::null_mut(); 1];
21705 unsafe_torch_err!(atg_linalg_matrix_rank_atol_rtol_float(
21706 c_tensors.as_mut_ptr(),
21707 self.c_tensor,
21708 atol.unwrap_or(std::f64::NAN),
21709 atol.is_none() as i8,
21710 rtol.unwrap_or(std::f64::NAN),
21711 rtol.is_none() as i8,
21712 if hermitian { 1 } else { 0 }
21713 ));
21714 Ok(Tensor { c_tensor: c_tensors[0] })
21715 }
21716
21717 pub fn f_linalg_matrix_rank_atol_rtol_float_out(
21718 &self,
21719 out: &Tensor,
21720 atol: impl Into<Option<f64>>,
21721 rtol: impl Into<Option<f64>>,
21722 hermitian: bool,
21723 ) -> Result<Tensor, TchError> {
21724 let atol = atol.into();
21725 let rtol = rtol.into();
21726 let mut c_tensors = [std::ptr::null_mut(); 1];
21727 unsafe_torch_err!(atg_linalg_matrix_rank_atol_rtol_float_out(
21728 c_tensors.as_mut_ptr(),
21729 out.c_tensor,
21730 self.c_tensor,
21731 atol.unwrap_or(std::f64::NAN),
21732 atol.is_none() as i8,
21733 rtol.unwrap_or(std::f64::NAN),
21734 rtol.is_none() as i8,
21735 if hermitian { 1 } else { 0 }
21736 ));
21737 Ok(Tensor { c_tensor: c_tensors[0] })
21738 }
21739
21740 pub fn f_linalg_matrix_rank_atol_rtol_tensor<T: Borrow<Tensor>>(
21741 &self,
21742 atol: Option<T>,
21743 rtol: Option<T>,
21744 hermitian: bool,
21745 ) -> Result<Tensor, TchError> {
21746 let mut c_tensors = [std::ptr::null_mut(); 1];
21747 unsafe_torch_err!(atg_linalg_matrix_rank_atol_rtol_tensor(
21748 c_tensors.as_mut_ptr(),
21749 self.c_tensor,
21750 atol.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
21751 rtol.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
21752 if hermitian { 1 } else { 0 }
21753 ));
21754 Ok(Tensor { c_tensor: c_tensors[0] })
21755 }
21756
21757 pub fn f_linalg_matrix_rank_atol_rtol_tensor_out<T: Borrow<Tensor>>(
21758 &self,
21759 out: &Tensor,
21760 atol: Option<T>,
21761 rtol: Option<T>,
21762 hermitian: bool,
21763 ) -> Result<Tensor, TchError> {
21764 let mut c_tensors = [std::ptr::null_mut(); 1];
21765 unsafe_torch_err!(atg_linalg_matrix_rank_atol_rtol_tensor_out(
21766 c_tensors.as_mut_ptr(),
21767 out.c_tensor,
21768 self.c_tensor,
21769 atol.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
21770 rtol.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
21771 if hermitian { 1 } else { 0 }
21772 ));
21773 Ok(Tensor { c_tensor: c_tensors[0] })
21774 }
21775
21776 pub fn f_linalg_matrix_rank_out(
21777 &self,
21778 out: &Tensor,
21779 tol: f64,
21780 hermitian: bool,
21781 ) -> Result<Tensor, TchError> {
21782 let mut c_tensors = [std::ptr::null_mut(); 1];
21783 unsafe_torch_err!(atg_linalg_matrix_rank_out(
21784 c_tensors.as_mut_ptr(),
21785 out.c_tensor,
21786 self.c_tensor,
21787 tol,
21788 if hermitian { 1 } else { 0 }
21789 ));
21790 Ok(Tensor { c_tensor: c_tensors[0] })
21791 }
21792
21793 pub fn f_linalg_matrix_rank_out_tol_tensor(
21794 &self,
21795 out: &Tensor,
21796 tol: &Tensor,
21797 hermitian: bool,
21798 ) -> Result<Tensor, TchError> {
21799 let mut c_tensors = [std::ptr::null_mut(); 1];
21800 unsafe_torch_err!(atg_linalg_matrix_rank_out_tol_tensor(
21801 c_tensors.as_mut_ptr(),
21802 out.c_tensor,
21803 self.c_tensor,
21804 tol.c_tensor,
21805 if hermitian { 1 } else { 0 }
21806 ));
21807 Ok(Tensor { c_tensor: c_tensors[0] })
21808 }
21809
21810 pub fn f_linalg_matrix_rank_tol_tensor(
21811 &self,
21812 tol: &Tensor,
21813 hermitian: bool,
21814 ) -> Result<Tensor, TchError> {
21815 let mut c_tensors = [std::ptr::null_mut(); 1];
21816 unsafe_torch_err!(atg_linalg_matrix_rank_tol_tensor(
21817 c_tensors.as_mut_ptr(),
21818 self.c_tensor,
21819 tol.c_tensor,
21820 if hermitian { 1 } else { 0 }
21821 ));
21822 Ok(Tensor { c_tensor: c_tensors[0] })
21823 }
21824
21825 pub fn f_linalg_multi_dot<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Tensor, TchError> {
21826 let mut c_tensors = [std::ptr::null_mut(); 1];
21827 unsafe_torch_err!(atg_linalg_multi_dot(
21828 c_tensors.as_mut_ptr(),
21829 ptr_list(tensors).as_ptr(),
21830 tensors.len() as i32
21831 ));
21832 Ok(Tensor { c_tensor: c_tensors[0] })
21833 }
21834
21835 pub fn f_linalg_multi_dot_out<T: Borrow<Tensor>>(
21836 out: &Tensor,
21837 tensors: &[T],
21838 ) -> Result<Tensor, TchError> {
21839 let mut c_tensors = [std::ptr::null_mut(); 1];
21840 unsafe_torch_err!(atg_linalg_multi_dot_out(
21841 c_tensors.as_mut_ptr(),
21842 out.c_tensor,
21843 ptr_list(tensors).as_ptr(),
21844 tensors.len() as i32
21845 ));
21846 Ok(Tensor { c_tensor: c_tensors[0] })
21847 }
21848
21849 pub fn f_linalg_norm<S: Into<Scalar>>(
21850 &self,
21851 ord: S,
21852 dim: impl IntListOption,
21853 keepdim: bool,
21854 dtype: impl Into<Option<Kind>>,
21855 ) -> Result<Tensor, TchError> {
21856 let mut c_tensors = [std::ptr::null_mut(); 1];
21857 unsafe_torch_err!(atg_linalg_norm(
21858 c_tensors.as_mut_ptr(),
21859 self.c_tensor,
21860 ord.into().c_scalar,
21861 dim.as_ptr(),
21862 dim.len_i32(),
21863 if keepdim { 1 } else { 0 },
21864 dtype.into().map_or(-1, |s| s.c_int())
21865 ));
21866 Ok(Tensor { c_tensor: c_tensors[0] })
21867 }
21868
21869 pub fn f_linalg_norm_ord_str(
21870 &self,
21871 ord: &str,
21872 dim: impl IntListOption,
21873 keepdim: bool,
21874 dtype: impl Into<Option<Kind>>,
21875 ) -> Result<Tensor, TchError> {
21876 let mut c_tensors = [std::ptr::null_mut(); 1];
21877 unsafe_torch_err!(atg_linalg_norm_ord_str(
21878 c_tensors.as_mut_ptr(),
21879 self.c_tensor,
21880 ord.as_ptr(),
21881 ord.len() as i32,
21882 dim.as_ptr(),
21883 dim.len_i32(),
21884 if keepdim { 1 } else { 0 },
21885 dtype.into().map_or(-1, |s| s.c_int())
21886 ));
21887 Ok(Tensor { c_tensor: c_tensors[0] })
21888 }
21889
21890 pub fn f_linalg_norm_ord_str_out(
21891 &self,
21892 out: &Tensor,
21893 ord: &str,
21894 dim: impl IntListOption,
21895 keepdim: bool,
21896 dtype: impl Into<Option<Kind>>,
21897 ) -> Result<Tensor, TchError> {
21898 let mut c_tensors = [std::ptr::null_mut(); 1];
21899 unsafe_torch_err!(atg_linalg_norm_ord_str_out(
21900 c_tensors.as_mut_ptr(),
21901 out.c_tensor,
21902 self.c_tensor,
21903 ord.as_ptr(),
21904 ord.len() as i32,
21905 dim.as_ptr(),
21906 dim.len_i32(),
21907 if keepdim { 1 } else { 0 },
21908 dtype.into().map_or(-1, |s| s.c_int())
21909 ));
21910 Ok(Tensor { c_tensor: c_tensors[0] })
21911 }
21912
21913 pub fn f_linalg_norm_out<S: Into<Scalar>>(
21914 &self,
21915 out: &Tensor,
21916 ord: S,
21917 dim: impl IntListOption,
21918 keepdim: bool,
21919 dtype: impl Into<Option<Kind>>,
21920 ) -> Result<Tensor, TchError> {
21921 let mut c_tensors = [std::ptr::null_mut(); 1];
21922 unsafe_torch_err!(atg_linalg_norm_out(
21923 c_tensors.as_mut_ptr(),
21924 out.c_tensor,
21925 self.c_tensor,
21926 ord.into().c_scalar,
21927 dim.as_ptr(),
21928 dim.len_i32(),
21929 if keepdim { 1 } else { 0 },
21930 dtype.into().map_or(-1, |s| s.c_int())
21931 ));
21932 Ok(Tensor { c_tensor: c_tensors[0] })
21933 }
21934
21935 pub fn f_linalg_pinv(&self, rcond: f64, hermitian: bool) -> Result<Tensor, TchError> {
21936 let mut c_tensors = [std::ptr::null_mut(); 1];
21937 unsafe_torch_err!(atg_linalg_pinv(
21938 c_tensors.as_mut_ptr(),
21939 self.c_tensor,
21940 rcond,
21941 if hermitian { 1 } else { 0 }
21942 ));
21943 Ok(Tensor { c_tensor: c_tensors[0] })
21944 }
21945
21946 pub fn f_linalg_pinv_atol_rtol_float(
21947 &self,
21948 atol: impl Into<Option<f64>>,
21949 rtol: impl Into<Option<f64>>,
21950 hermitian: bool,
21951 ) -> Result<Tensor, TchError> {
21952 let atol = atol.into();
21953 let rtol = rtol.into();
21954 let mut c_tensors = [std::ptr::null_mut(); 1];
21955 unsafe_torch_err!(atg_linalg_pinv_atol_rtol_float(
21956 c_tensors.as_mut_ptr(),
21957 self.c_tensor,
21958 atol.unwrap_or(std::f64::NAN),
21959 atol.is_none() as i8,
21960 rtol.unwrap_or(std::f64::NAN),
21961 rtol.is_none() as i8,
21962 if hermitian { 1 } else { 0 }
21963 ));
21964 Ok(Tensor { c_tensor: c_tensors[0] })
21965 }
21966
21967 pub fn f_linalg_pinv_atol_rtol_float_out(
21968 &self,
21969 out: &Tensor,
21970 atol: impl Into<Option<f64>>,
21971 rtol: impl Into<Option<f64>>,
21972 hermitian: bool,
21973 ) -> Result<Tensor, TchError> {
21974 let atol = atol.into();
21975 let rtol = rtol.into();
21976 let mut c_tensors = [std::ptr::null_mut(); 1];
21977 unsafe_torch_err!(atg_linalg_pinv_atol_rtol_float_out(
21978 c_tensors.as_mut_ptr(),
21979 out.c_tensor,
21980 self.c_tensor,
21981 atol.unwrap_or(std::f64::NAN),
21982 atol.is_none() as i8,
21983 rtol.unwrap_or(std::f64::NAN),
21984 rtol.is_none() as i8,
21985 if hermitian { 1 } else { 0 }
21986 ));
21987 Ok(Tensor { c_tensor: c_tensors[0] })
21988 }
21989
21990 pub fn f_linalg_pinv_atol_rtol_tensor<T: Borrow<Tensor>>(
21991 &self,
21992 atol: Option<T>,
21993 rtol: Option<T>,
21994 hermitian: bool,
21995 ) -> Result<Tensor, TchError> {
21996 let mut c_tensors = [std::ptr::null_mut(); 1];
21997 unsafe_torch_err!(atg_linalg_pinv_atol_rtol_tensor(
21998 c_tensors.as_mut_ptr(),
21999 self.c_tensor,
22000 atol.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
22001 rtol.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
22002 if hermitian { 1 } else { 0 }
22003 ));
22004 Ok(Tensor { c_tensor: c_tensors[0] })
22005 }
22006
22007 pub fn f_linalg_pinv_atol_rtol_tensor_out<T: Borrow<Tensor>>(
22008 &self,
22009 out: &Tensor,
22010 atol: Option<T>,
22011 rtol: Option<T>,
22012 hermitian: bool,
22013 ) -> Result<Tensor, TchError> {
22014 let mut c_tensors = [std::ptr::null_mut(); 1];
22015 unsafe_torch_err!(atg_linalg_pinv_atol_rtol_tensor_out(
22016 c_tensors.as_mut_ptr(),
22017 out.c_tensor,
22018 self.c_tensor,
22019 atol.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
22020 rtol.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
22021 if hermitian { 1 } else { 0 }
22022 ));
22023 Ok(Tensor { c_tensor: c_tensors[0] })
22024 }
22025
22026 pub fn f_linalg_pinv_out(
22027 &self,
22028 out: &Tensor,
22029 rcond: f64,
22030 hermitian: bool,
22031 ) -> Result<Tensor, TchError> {
22032 let mut c_tensors = [std::ptr::null_mut(); 1];
22033 unsafe_torch_err!(atg_linalg_pinv_out(
22034 c_tensors.as_mut_ptr(),
22035 out.c_tensor,
22036 self.c_tensor,
22037 rcond,
22038 if hermitian { 1 } else { 0 }
22039 ));
22040 Ok(Tensor { c_tensor: c_tensors[0] })
22041 }
22042
22043 pub fn f_linalg_pinv_out_rcond_tensor(
22044 &self,
22045 out: &Tensor,
22046 rcond: &Tensor,
22047 hermitian: bool,
22048 ) -> Result<Tensor, TchError> {
22049 let mut c_tensors = [std::ptr::null_mut(); 1];
22050 unsafe_torch_err!(atg_linalg_pinv_out_rcond_tensor(
22051 c_tensors.as_mut_ptr(),
22052 out.c_tensor,
22053 self.c_tensor,
22054 rcond.c_tensor,
22055 if hermitian { 1 } else { 0 }
22056 ));
22057 Ok(Tensor { c_tensor: c_tensors[0] })
22058 }
22059
22060 pub fn f_linalg_pinv_rcond_tensor(
22061 &self,
22062 rcond: &Tensor,
22063 hermitian: bool,
22064 ) -> Result<Tensor, TchError> {
22065 let mut c_tensors = [std::ptr::null_mut(); 1];
22066 unsafe_torch_err!(atg_linalg_pinv_rcond_tensor(
22067 c_tensors.as_mut_ptr(),
22068 self.c_tensor,
22069 rcond.c_tensor,
22070 if hermitian { 1 } else { 0 }
22071 ));
22072 Ok(Tensor { c_tensor: c_tensors[0] })
22073 }
22074
22075 pub fn f_linalg_qr(a: &Tensor, mode: &str) -> Result<(Tensor, Tensor), TchError> {
22076 let mut c_tensors = [std::ptr::null_mut(); 2];
22077 unsafe_torch_err!(atg_linalg_qr(
22078 c_tensors.as_mut_ptr(),
22079 a.c_tensor,
22080 mode.as_ptr(),
22081 mode.len() as i32
22082 ));
22083 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
22084 }
22085
22086 pub fn f_linalg_qr_out(
22087 q: &Tensor,
22088 r: &Tensor,
22089 a: &Tensor,
22090 mode: &str,
22091 ) -> Result<(Tensor, Tensor), TchError> {
22092 let mut c_tensors = [std::ptr::null_mut(); 2];
22093 unsafe_torch_err!(atg_linalg_qr_out(
22094 c_tensors.as_mut_ptr(),
22095 q.c_tensor,
22096 r.c_tensor,
22097 a.c_tensor,
22098 mode.as_ptr(),
22099 mode.len() as i32
22100 ));
22101 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
22102 }
22103
22104 pub fn f_linalg_slogdet(a: &Tensor) -> Result<(Tensor, Tensor), TchError> {
22105 let mut c_tensors = [std::ptr::null_mut(); 2];
22106 unsafe_torch_err!(atg_linalg_slogdet(c_tensors.as_mut_ptr(), a.c_tensor));
22107 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
22108 }
22109
22110 pub fn f_linalg_slogdet_out(
22111 sign: &Tensor,
22112 logabsdet: &Tensor,
22113 a: &Tensor,
22114 ) -> Result<(Tensor, Tensor), TchError> {
22115 let mut c_tensors = [std::ptr::null_mut(); 2];
22116 unsafe_torch_err!(atg_linalg_slogdet_out(
22117 c_tensors.as_mut_ptr(),
22118 sign.c_tensor,
22119 logabsdet.c_tensor,
22120 a.c_tensor
22121 ));
22122 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
22123 }
22124
22125 pub fn f_linalg_solve(a: &Tensor, b: &Tensor, left: bool) -> Result<Tensor, TchError> {
22126 let mut c_tensors = [std::ptr::null_mut(); 1];
22127 unsafe_torch_err!(atg_linalg_solve(
22128 c_tensors.as_mut_ptr(),
22129 a.c_tensor,
22130 b.c_tensor,
22131 if left { 1 } else { 0 }
22132 ));
22133 Ok(Tensor { c_tensor: c_tensors[0] })
22134 }
22135
22136 pub fn f_linalg_solve_ex(
22137 a: &Tensor,
22138 b: &Tensor,
22139 left: bool,
22140 check_errors: bool,
22141 ) -> Result<(Tensor, Tensor), TchError> {
22142 let mut c_tensors = [std::ptr::null_mut(); 2];
22143 unsafe_torch_err!(atg_linalg_solve_ex(
22144 c_tensors.as_mut_ptr(),
22145 a.c_tensor,
22146 b.c_tensor,
22147 if left { 1 } else { 0 },
22148 if check_errors { 1 } else { 0 }
22149 ));
22150 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
22151 }
22152
22153 pub fn f_linalg_solve_ex_out(
22154 result: &Tensor,
22155 info: &Tensor,
22156 a: &Tensor,
22157 b: &Tensor,
22158 left: bool,
22159 check_errors: bool,
22160 ) -> Result<(Tensor, Tensor), TchError> {
22161 let mut c_tensors = [std::ptr::null_mut(); 2];
22162 unsafe_torch_err!(atg_linalg_solve_ex_out(
22163 c_tensors.as_mut_ptr(),
22164 result.c_tensor,
22165 info.c_tensor,
22166 a.c_tensor,
22167 b.c_tensor,
22168 if left { 1 } else { 0 },
22169 if check_errors { 1 } else { 0 }
22170 ));
22171 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
22172 }
22173
22174 pub fn f_linalg_solve_out(
22175 out: &Tensor,
22176 a: &Tensor,
22177 b: &Tensor,
22178 left: bool,
22179 ) -> Result<Tensor, TchError> {
22180 let mut c_tensors = [std::ptr::null_mut(); 1];
22181 unsafe_torch_err!(atg_linalg_solve_out(
22182 c_tensors.as_mut_ptr(),
22183 out.c_tensor,
22184 a.c_tensor,
22185 b.c_tensor,
22186 if left { 1 } else { 0 }
22187 ));
22188 Ok(Tensor { c_tensor: c_tensors[0] })
22189 }
22190
22191 pub fn f_linalg_solve_triangular(
22192 &self,
22193 b: &Tensor,
22194 upper: bool,
22195 left: bool,
22196 unitriangular: bool,
22197 ) -> Result<Tensor, TchError> {
22198 let mut c_tensors = [std::ptr::null_mut(); 1];
22199 unsafe_torch_err!(atg_linalg_solve_triangular(
22200 c_tensors.as_mut_ptr(),
22201 self.c_tensor,
22202 b.c_tensor,
22203 if upper { 1 } else { 0 },
22204 if left { 1 } else { 0 },
22205 if unitriangular { 1 } else { 0 }
22206 ));
22207 Ok(Tensor { c_tensor: c_tensors[0] })
22208 }
22209
22210 pub fn f_linalg_solve_triangular_out(
22211 &self,
22212 out: &Tensor,
22213 b: &Tensor,
22214 upper: bool,
22215 left: bool,
22216 unitriangular: bool,
22217 ) -> Result<Tensor, TchError> {
22218 let mut c_tensors = [std::ptr::null_mut(); 1];
22219 unsafe_torch_err!(atg_linalg_solve_triangular_out(
22220 c_tensors.as_mut_ptr(),
22221 out.c_tensor,
22222 self.c_tensor,
22223 b.c_tensor,
22224 if upper { 1 } else { 0 },
22225 if left { 1 } else { 0 },
22226 if unitriangular { 1 } else { 0 }
22227 ));
22228 Ok(Tensor { c_tensor: c_tensors[0] })
22229 }
22230
22231 pub fn f_linalg_svd(
22232 a: &Tensor,
22233 full_matrices: bool,
22234 driver: &str,
22235 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
22236 let mut c_tensors = [std::ptr::null_mut(); 3];
22237 unsafe_torch_err!(atg_linalg_svd(
22238 c_tensors.as_mut_ptr(),
22239 a.c_tensor,
22240 if full_matrices { 1 } else { 0 },
22241 driver.as_ptr(),
22242 driver.len() as i32
22243 ));
22244 Ok((
22245 Tensor { c_tensor: c_tensors[0] },
22246 Tensor { c_tensor: c_tensors[1] },
22247 Tensor { c_tensor: c_tensors[2] },
22248 ))
22249 }
22250
22251 pub fn f_linalg_svd_u(
22252 u: &Tensor,
22253 s: &Tensor,
22254 vh: &Tensor,
22255 a: &Tensor,
22256 full_matrices: bool,
22257 driver: &str,
22258 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
22259 let mut c_tensors = [std::ptr::null_mut(); 3];
22260 unsafe_torch_err!(atg_linalg_svd_u(
22261 c_tensors.as_mut_ptr(),
22262 u.c_tensor,
22263 s.c_tensor,
22264 vh.c_tensor,
22265 a.c_tensor,
22266 if full_matrices { 1 } else { 0 },
22267 driver.as_ptr(),
22268 driver.len() as i32
22269 ));
22270 Ok((
22271 Tensor { c_tensor: c_tensors[0] },
22272 Tensor { c_tensor: c_tensors[1] },
22273 Tensor { c_tensor: c_tensors[2] },
22274 ))
22275 }
22276
22277 pub fn f_linalg_svdvals(a: &Tensor, driver: &str) -> Result<Tensor, TchError> {
22278 let mut c_tensors = [std::ptr::null_mut(); 1];
22279 unsafe_torch_err!(atg_linalg_svdvals(
22280 c_tensors.as_mut_ptr(),
22281 a.c_tensor,
22282 driver.as_ptr(),
22283 driver.len() as i32
22284 ));
22285 Ok(Tensor { c_tensor: c_tensors[0] })
22286 }
22287
22288 pub fn f_linalg_svdvals_out(
22289 out: &Tensor,
22290 a: &Tensor,
22291 driver: &str,
22292 ) -> Result<Tensor, TchError> {
22293 let mut c_tensors = [std::ptr::null_mut(); 1];
22294 unsafe_torch_err!(atg_linalg_svdvals_out(
22295 c_tensors.as_mut_ptr(),
22296 out.c_tensor,
22297 a.c_tensor,
22298 driver.as_ptr(),
22299 driver.len() as i32
22300 ));
22301 Ok(Tensor { c_tensor: c_tensors[0] })
22302 }
22303
22304 pub fn f_linalg_tensorinv(&self, ind: i64) -> Result<Tensor, TchError> {
22305 let mut c_tensors = [std::ptr::null_mut(); 1];
22306 unsafe_torch_err!(atg_linalg_tensorinv(c_tensors.as_mut_ptr(), self.c_tensor, ind));
22307 Ok(Tensor { c_tensor: c_tensors[0] })
22308 }
22309
22310 pub fn f_linalg_tensorinv_out(&self, out: &Tensor, ind: i64) -> Result<Tensor, TchError> {
22311 let mut c_tensors = [std::ptr::null_mut(); 1];
22312 unsafe_torch_err!(atg_linalg_tensorinv_out(
22313 c_tensors.as_mut_ptr(),
22314 out.c_tensor,
22315 self.c_tensor,
22316 ind
22317 ));
22318 Ok(Tensor { c_tensor: c_tensors[0] })
22319 }
22320
22321 pub fn f_linalg_tensorsolve(
22322 &self,
22323 other: &Tensor,
22324 dims: impl IntListOption,
22325 ) -> Result<Tensor, TchError> {
22326 let mut c_tensors = [std::ptr::null_mut(); 1];
22327 unsafe_torch_err!(atg_linalg_tensorsolve(
22328 c_tensors.as_mut_ptr(),
22329 self.c_tensor,
22330 other.c_tensor,
22331 dims.as_ptr(),
22332 dims.len_i32()
22333 ));
22334 Ok(Tensor { c_tensor: c_tensors[0] })
22335 }
22336
22337 pub fn f_linalg_tensorsolve_out(
22338 &self,
22339 out: &Tensor,
22340 other: &Tensor,
22341 dims: impl IntListOption,
22342 ) -> Result<Tensor, TchError> {
22343 let mut c_tensors = [std::ptr::null_mut(); 1];
22344 unsafe_torch_err!(atg_linalg_tensorsolve_out(
22345 c_tensors.as_mut_ptr(),
22346 out.c_tensor,
22347 self.c_tensor,
22348 other.c_tensor,
22349 dims.as_ptr(),
22350 dims.len_i32()
22351 ));
22352 Ok(Tensor { c_tensor: c_tensors[0] })
22353 }
22354
22355 pub fn f_linalg_vander(x: &Tensor, n: impl Into<Option<i64>>) -> Result<Tensor, TchError> {
22356 let n = n.into();
22357 let mut c_tensors = [std::ptr::null_mut(); 1];
22358 unsafe_torch_err!(atg_linalg_vander(
22359 c_tensors.as_mut_ptr(),
22360 x.c_tensor,
22361 n.unwrap_or(0i64),
22362 n.is_none() as i8
22363 ));
22364 Ok(Tensor { c_tensor: c_tensors[0] })
22365 }
22366
22367 pub fn f_linalg_vecdot(x: &Tensor, y: &Tensor, dim: i64) -> Result<Tensor, TchError> {
22368 let mut c_tensors = [std::ptr::null_mut(); 1];
22369 unsafe_torch_err!(atg_linalg_vecdot(c_tensors.as_mut_ptr(), x.c_tensor, y.c_tensor, dim));
22370 Ok(Tensor { c_tensor: c_tensors[0] })
22371 }
22372
22373 pub fn f_linalg_vecdot_out(
22374 out: &Tensor,
22375 x: &Tensor,
22376 y: &Tensor,
22377 dim: i64,
22378 ) -> Result<Tensor, TchError> {
22379 let mut c_tensors = [std::ptr::null_mut(); 1];
22380 unsafe_torch_err!(atg_linalg_vecdot_out(
22381 c_tensors.as_mut_ptr(),
22382 out.c_tensor,
22383 x.c_tensor,
22384 y.c_tensor,
22385 dim
22386 ));
22387 Ok(Tensor { c_tensor: c_tensors[0] })
22388 }
22389
22390 pub fn f_linear<T: Borrow<Tensor>>(
22391 &self,
22392 weight: &Tensor,
22393 bias: Option<T>,
22394 ) -> Result<Tensor, TchError> {
22395 let mut c_tensors = [std::ptr::null_mut(); 1];
22396 unsafe_torch_err!(atg_linear(
22397 c_tensors.as_mut_ptr(),
22398 self.c_tensor,
22399 weight.c_tensor,
22400 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
22401 ));
22402 Ok(Tensor { c_tensor: c_tensors[0] })
22403 }
22404
22405 pub fn f_linear_out<T: Borrow<Tensor>>(
22406 &self,
22407 out: &Tensor,
22408 weight: &Tensor,
22409 bias: Option<T>,
22410 ) -> Result<Tensor, TchError> {
22411 let mut c_tensors = [std::ptr::null_mut(); 1];
22412 unsafe_torch_err!(atg_linear_out(
22413 c_tensors.as_mut_ptr(),
22414 out.c_tensor,
22415 self.c_tensor,
22416 weight.c_tensor,
22417 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
22418 ));
22419 Ok(Tensor { c_tensor: c_tensors[0] })
22420 }
22421
22422 pub fn f_linspace<S: Into<Scalar>>(
22423 start: S,
22424 end: S,
22425 steps: i64,
22426 options: (Kind, Device),
22427 ) -> Result<Tensor, TchError> {
22428 let mut c_tensors = [std::ptr::null_mut(); 1];
22429 unsafe_torch_err!(atg_linspace(
22430 c_tensors.as_mut_ptr(),
22431 start.into().c_scalar,
22432 end.into().c_scalar,
22433 steps,
22434 options.0.c_int(),
22435 options.1.c_int()
22436 ));
22437 Ok(Tensor { c_tensor: c_tensors[0] })
22438 }
22439
22440 pub fn f_linspace_out<S: Into<Scalar>>(
22441 out: &Tensor,
22442 start: S,
22443 end: S,
22444 steps: i64,
22445 ) -> Result<Tensor, TchError> {
22446 let mut c_tensors = [std::ptr::null_mut(); 1];
22447 unsafe_torch_err!(atg_linspace_out(
22448 c_tensors.as_mut_ptr(),
22449 out.c_tensor,
22450 start.into().c_scalar,
22451 end.into().c_scalar,
22452 steps
22453 ));
22454 Ok(Tensor { c_tensor: c_tensors[0] })
22455 }
22456
22457 pub fn f_linspace_scalar_tensor<S: Into<Scalar>>(
22458 start: S,
22459 end: &Tensor,
22460 steps: i64,
22461 options: (Kind, Device),
22462 ) -> Result<Tensor, TchError> {
22463 let mut c_tensors = [std::ptr::null_mut(); 1];
22464 unsafe_torch_err!(atg_linspace_scalar_tensor(
22465 c_tensors.as_mut_ptr(),
22466 start.into().c_scalar,
22467 end.c_tensor,
22468 steps,
22469 options.0.c_int(),
22470 options.1.c_int()
22471 ));
22472 Ok(Tensor { c_tensor: c_tensors[0] })
22473 }
22474
22475 pub fn f_linspace_scalar_tensor_out<S: Into<Scalar>>(
22476 out: &Tensor,
22477 start: S,
22478 end: &Tensor,
22479 steps: i64,
22480 ) -> Result<Tensor, TchError> {
22481 let mut c_tensors = [std::ptr::null_mut(); 1];
22482 unsafe_torch_err!(atg_linspace_scalar_tensor_out(
22483 c_tensors.as_mut_ptr(),
22484 out.c_tensor,
22485 start.into().c_scalar,
22486 end.c_tensor,
22487 steps
22488 ));
22489 Ok(Tensor { c_tensor: c_tensors[0] })
22490 }
22491
22492 pub fn f_linspace_tensor_scalar<S: Into<Scalar>>(
22493 start: &Tensor,
22494 end: S,
22495 steps: i64,
22496 options: (Kind, Device),
22497 ) -> Result<Tensor, TchError> {
22498 let mut c_tensors = [std::ptr::null_mut(); 1];
22499 unsafe_torch_err!(atg_linspace_tensor_scalar(
22500 c_tensors.as_mut_ptr(),
22501 start.c_tensor,
22502 end.into().c_scalar,
22503 steps,
22504 options.0.c_int(),
22505 options.1.c_int()
22506 ));
22507 Ok(Tensor { c_tensor: c_tensors[0] })
22508 }
22509
22510 pub fn f_linspace_tensor_scalar_out<S: Into<Scalar>>(
22511 out: &Tensor,
22512 start: &Tensor,
22513 end: S,
22514 steps: i64,
22515 ) -> Result<Tensor, TchError> {
22516 let mut c_tensors = [std::ptr::null_mut(); 1];
22517 unsafe_torch_err!(atg_linspace_tensor_scalar_out(
22518 c_tensors.as_mut_ptr(),
22519 out.c_tensor,
22520 start.c_tensor,
22521 end.into().c_scalar,
22522 steps
22523 ));
22524 Ok(Tensor { c_tensor: c_tensors[0] })
22525 }
22526
22527 pub fn f_linspace_tensor_tensor(
22528 start: &Tensor,
22529 end: &Tensor,
22530 steps: i64,
22531 options: (Kind, Device),
22532 ) -> Result<Tensor, TchError> {
22533 let mut c_tensors = [std::ptr::null_mut(); 1];
22534 unsafe_torch_err!(atg_linspace_tensor_tensor(
22535 c_tensors.as_mut_ptr(),
22536 start.c_tensor,
22537 end.c_tensor,
22538 steps,
22539 options.0.c_int(),
22540 options.1.c_int()
22541 ));
22542 Ok(Tensor { c_tensor: c_tensors[0] })
22543 }
22544
22545 pub fn f_linspace_tensor_tensor_out(
22546 out: &Tensor,
22547 start: &Tensor,
22548 end: &Tensor,
22549 steps: i64,
22550 ) -> Result<Tensor, TchError> {
22551 let mut c_tensors = [std::ptr::null_mut(); 1];
22552 unsafe_torch_err!(atg_linspace_tensor_tensor_out(
22553 c_tensors.as_mut_ptr(),
22554 out.c_tensor,
22555 start.c_tensor,
22556 end.c_tensor,
22557 steps
22558 ));
22559 Ok(Tensor { c_tensor: c_tensors[0] })
22560 }
22561
22562 pub fn f_log(&self) -> Result<Tensor, TchError> {
22563 let mut c_tensors = [std::ptr::null_mut(); 1];
22564 unsafe_torch_err!(atg_log(c_tensors.as_mut_ptr(), self.c_tensor));
22565 Ok(Tensor { c_tensor: c_tensors[0] })
22566 }
22567
22568 pub fn f_log10(&self) -> Result<Tensor, TchError> {
22569 let mut c_tensors = [std::ptr::null_mut(); 1];
22570 unsafe_torch_err!(atg_log10(c_tensors.as_mut_ptr(), self.c_tensor));
22571 Ok(Tensor { c_tensor: c_tensors[0] })
22572 }
22573
22574 pub fn f_log10_(&mut self) -> Result<Tensor, TchError> {
22575 let mut c_tensors = [std::ptr::null_mut(); 1];
22576 unsafe_torch_err!(atg_log10_(c_tensors.as_mut_ptr(), self.c_tensor));
22577 Ok(Tensor { c_tensor: c_tensors[0] })
22578 }
22579
22580 pub fn f_log10_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
22581 let mut c_tensors = [std::ptr::null_mut(); 1];
22582 unsafe_torch_err!(atg_log10_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
22583 Ok(Tensor { c_tensor: c_tensors[0] })
22584 }
22585
22586 pub fn f_log1p(&self) -> Result<Tensor, TchError> {
22587 let mut c_tensors = [std::ptr::null_mut(); 1];
22588 unsafe_torch_err!(atg_log1p(c_tensors.as_mut_ptr(), self.c_tensor));
22589 Ok(Tensor { c_tensor: c_tensors[0] })
22590 }
22591
22592 pub fn f_log1p_(&mut self) -> Result<Tensor, TchError> {
22593 let mut c_tensors = [std::ptr::null_mut(); 1];
22594 unsafe_torch_err!(atg_log1p_(c_tensors.as_mut_ptr(), self.c_tensor));
22595 Ok(Tensor { c_tensor: c_tensors[0] })
22596 }
22597
22598 pub fn f_log1p_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
22599 let mut c_tensors = [std::ptr::null_mut(); 1];
22600 unsafe_torch_err!(atg_log1p_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
22601 Ok(Tensor { c_tensor: c_tensors[0] })
22602 }
22603
22604 pub fn f_log2(&self) -> Result<Tensor, TchError> {
22605 let mut c_tensors = [std::ptr::null_mut(); 1];
22606 unsafe_torch_err!(atg_log2(c_tensors.as_mut_ptr(), self.c_tensor));
22607 Ok(Tensor { c_tensor: c_tensors[0] })
22608 }
22609
22610 pub fn f_log2_(&mut self) -> Result<Tensor, TchError> {
22611 let mut c_tensors = [std::ptr::null_mut(); 1];
22612 unsafe_torch_err!(atg_log2_(c_tensors.as_mut_ptr(), self.c_tensor));
22613 Ok(Tensor { c_tensor: c_tensors[0] })
22614 }
22615
22616 pub fn f_log2_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
22617 let mut c_tensors = [std::ptr::null_mut(); 1];
22618 unsafe_torch_err!(atg_log2_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
22619 Ok(Tensor { c_tensor: c_tensors[0] })
22620 }
22621
22622 pub fn f_log_(&mut self) -> Result<Tensor, TchError> {
22623 let mut c_tensors = [std::ptr::null_mut(); 1];
22624 unsafe_torch_err!(atg_log_(c_tensors.as_mut_ptr(), self.c_tensor));
22625 Ok(Tensor { c_tensor: c_tensors[0] })
22626 }
22627
22628 pub fn f_log_normal(&self, mean: f64, std: f64) -> Result<Tensor, TchError> {
22629 let mut c_tensors = [std::ptr::null_mut(); 1];
22630 unsafe_torch_err!(atg_log_normal(c_tensors.as_mut_ptr(), self.c_tensor, mean, std));
22631 Ok(Tensor { c_tensor: c_tensors[0] })
22632 }
22633
22634 pub fn f_log_normal_(&mut self, mean: f64, std: f64) -> Result<Tensor, TchError> {
22635 let mut c_tensors = [std::ptr::null_mut(); 1];
22636 unsafe_torch_err!(atg_log_normal_(c_tensors.as_mut_ptr(), self.c_tensor, mean, std));
22637 Ok(Tensor { c_tensor: c_tensors[0] })
22638 }
22639
22640 pub fn f_log_normal_out(&self, out: &Tensor, mean: f64, std: f64) -> Result<Tensor, TchError> {
22641 let mut c_tensors = [std::ptr::null_mut(); 1];
22642 unsafe_torch_err!(atg_log_normal_out(
22643 c_tensors.as_mut_ptr(),
22644 out.c_tensor,
22645 self.c_tensor,
22646 mean,
22647 std
22648 ));
22649 Ok(Tensor { c_tensor: c_tensors[0] })
22650 }
22651
22652 pub fn f_log_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
22653 let mut c_tensors = [std::ptr::null_mut(); 1];
22654 unsafe_torch_err!(atg_log_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
22655 Ok(Tensor { c_tensor: c_tensors[0] })
22656 }
22657
22658 pub fn f_log_sigmoid(&self) -> Result<Tensor, TchError> {
22659 let mut c_tensors = [std::ptr::null_mut(); 1];
22660 unsafe_torch_err!(atg_log_sigmoid(c_tensors.as_mut_ptr(), self.c_tensor));
22661 Ok(Tensor { c_tensor: c_tensors[0] })
22662 }
22663
22664 pub fn f_log_sigmoid_backward(
22665 &self,
22666 grad_output: &Tensor,
22667 buffer: &Tensor,
22668 ) -> Result<Tensor, TchError> {
22669 let mut c_tensors = [std::ptr::null_mut(); 1];
22670 unsafe_torch_err!(atg_log_sigmoid_backward(
22671 c_tensors.as_mut_ptr(),
22672 grad_output.c_tensor,
22673 self.c_tensor,
22674 buffer.c_tensor
22675 ));
22676 Ok(Tensor { c_tensor: c_tensors[0] })
22677 }
22678
22679 pub fn f_log_sigmoid_backward_grad_input(
22680 &self,
22681 grad_input: &Tensor,
22682 grad_output: &Tensor,
22683 buffer: &Tensor,
22684 ) -> Result<Tensor, TchError> {
22685 let mut c_tensors = [std::ptr::null_mut(); 1];
22686 unsafe_torch_err!(atg_log_sigmoid_backward_grad_input(
22687 c_tensors.as_mut_ptr(),
22688 grad_input.c_tensor,
22689 grad_output.c_tensor,
22690 self.c_tensor,
22691 buffer.c_tensor
22692 ));
22693 Ok(Tensor { c_tensor: c_tensors[0] })
22694 }
22695
22696 pub fn f_log_sigmoid_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
22697 let mut c_tensors = [std::ptr::null_mut(); 1];
22698 unsafe_torch_err!(atg_log_sigmoid_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
22699 Ok(Tensor { c_tensor: c_tensors[0] })
22700 }
22701
22702 pub fn f_log_softmax(
22703 &self,
22704 dim: i64,
22705 dtype: impl Into<Option<Kind>>,
22706 ) -> Result<Tensor, TchError> {
22707 let mut c_tensors = [std::ptr::null_mut(); 1];
22708 unsafe_torch_err!(atg_log_softmax(
22709 c_tensors.as_mut_ptr(),
22710 self.c_tensor,
22711 dim,
22712 dtype.into().map_or(-1, |s| s.c_int())
22713 ));
22714 Ok(Tensor { c_tensor: c_tensors[0] })
22715 }
22716
22717 pub fn f_log_softmax_int_out(
22718 &self,
22719 out: &Tensor,
22720 dim: i64,
22721 dtype: impl Into<Option<Kind>>,
22722 ) -> Result<Tensor, TchError> {
22723 let mut c_tensors = [std::ptr::null_mut(); 1];
22724 unsafe_torch_err!(atg_log_softmax_int_out(
22725 c_tensors.as_mut_ptr(),
22726 out.c_tensor,
22727 self.c_tensor,
22728 dim,
22729 dtype.into().map_or(-1, |s| s.c_int())
22730 ));
22731 Ok(Tensor { c_tensor: c_tensors[0] })
22732 }
22733
22734 pub fn f_logaddexp(&self, other: &Tensor) -> Result<Tensor, TchError> {
22735 let mut c_tensors = [std::ptr::null_mut(); 1];
22736 unsafe_torch_err!(atg_logaddexp(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
22737 Ok(Tensor { c_tensor: c_tensors[0] })
22738 }
22739
22740 pub fn f_logaddexp2(&self, other: &Tensor) -> Result<Tensor, TchError> {
22741 let mut c_tensors = [std::ptr::null_mut(); 1];
22742 unsafe_torch_err!(atg_logaddexp2(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
22743 Ok(Tensor { c_tensor: c_tensors[0] })
22744 }
22745
22746 pub fn f_logaddexp2_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
22747 let mut c_tensors = [std::ptr::null_mut(); 1];
22748 unsafe_torch_err!(atg_logaddexp2_out(
22749 c_tensors.as_mut_ptr(),
22750 out.c_tensor,
22751 self.c_tensor,
22752 other.c_tensor
22753 ));
22754 Ok(Tensor { c_tensor: c_tensors[0] })
22755 }
22756
22757 pub fn f_logaddexp_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
22758 let mut c_tensors = [std::ptr::null_mut(); 1];
22759 unsafe_torch_err!(atg_logaddexp_out(
22760 c_tensors.as_mut_ptr(),
22761 out.c_tensor,
22762 self.c_tensor,
22763 other.c_tensor
22764 ));
22765 Ok(Tensor { c_tensor: c_tensors[0] })
22766 }
22767
22768 pub fn f_logcumsumexp(&self, dim: i64) -> Result<Tensor, TchError> {
22769 let mut c_tensors = [std::ptr::null_mut(); 1];
22770 unsafe_torch_err!(atg_logcumsumexp(c_tensors.as_mut_ptr(), self.c_tensor, dim));
22771 Ok(Tensor { c_tensor: c_tensors[0] })
22772 }
22773
22774 pub fn f_logcumsumexp_out(&self, out: &Tensor, dim: i64) -> Result<Tensor, TchError> {
22775 let mut c_tensors = [std::ptr::null_mut(); 1];
22776 unsafe_torch_err!(atg_logcumsumexp_out(
22777 c_tensors.as_mut_ptr(),
22778 out.c_tensor,
22779 self.c_tensor,
22780 dim
22781 ));
22782 Ok(Tensor { c_tensor: c_tensors[0] })
22783 }
22784
22785 pub fn f_logdet(&self) -> Result<Tensor, TchError> {
22786 let mut c_tensors = [std::ptr::null_mut(); 1];
22787 unsafe_torch_err!(atg_logdet(c_tensors.as_mut_ptr(), self.c_tensor));
22788 Ok(Tensor { c_tensor: c_tensors[0] })
22789 }
22790
22791 pub fn f_logical_and(&self, other: &Tensor) -> Result<Tensor, TchError> {
22792 let mut c_tensors = [std::ptr::null_mut(); 1];
22793 unsafe_torch_err!(atg_logical_and(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
22794 Ok(Tensor { c_tensor: c_tensors[0] })
22795 }
22796
22797 pub fn f_logical_and_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
22798 let mut c_tensors = [std::ptr::null_mut(); 1];
22799 unsafe_torch_err!(atg_logical_and_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
22800 Ok(Tensor { c_tensor: c_tensors[0] })
22801 }
22802
22803 pub fn f_logical_and_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
22804 let mut c_tensors = [std::ptr::null_mut(); 1];
22805 unsafe_torch_err!(atg_logical_and_out(
22806 c_tensors.as_mut_ptr(),
22807 out.c_tensor,
22808 self.c_tensor,
22809 other.c_tensor
22810 ));
22811 Ok(Tensor { c_tensor: c_tensors[0] })
22812 }
22813
22814 pub fn f_logical_not(&self) -> Result<Tensor, TchError> {
22815 let mut c_tensors = [std::ptr::null_mut(); 1];
22816 unsafe_torch_err!(atg_logical_not(c_tensors.as_mut_ptr(), self.c_tensor));
22817 Ok(Tensor { c_tensor: c_tensors[0] })
22818 }
22819
22820 pub fn f_logical_not_(&mut self) -> Result<Tensor, TchError> {
22821 let mut c_tensors = [std::ptr::null_mut(); 1];
22822 unsafe_torch_err!(atg_logical_not_(c_tensors.as_mut_ptr(), self.c_tensor));
22823 Ok(Tensor { c_tensor: c_tensors[0] })
22824 }
22825
22826 pub fn f_logical_not_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
22827 let mut c_tensors = [std::ptr::null_mut(); 1];
22828 unsafe_torch_err!(atg_logical_not_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
22829 Ok(Tensor { c_tensor: c_tensors[0] })
22830 }
22831
22832 pub fn f_logical_or(&self, other: &Tensor) -> Result<Tensor, TchError> {
22833 let mut c_tensors = [std::ptr::null_mut(); 1];
22834 unsafe_torch_err!(atg_logical_or(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
22835 Ok(Tensor { c_tensor: c_tensors[0] })
22836 }
22837
22838 pub fn f_logical_or_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
22839 let mut c_tensors = [std::ptr::null_mut(); 1];
22840 unsafe_torch_err!(atg_logical_or_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
22841 Ok(Tensor { c_tensor: c_tensors[0] })
22842 }
22843
22844 pub fn f_logical_or_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
22845 let mut c_tensors = [std::ptr::null_mut(); 1];
22846 unsafe_torch_err!(atg_logical_or_out(
22847 c_tensors.as_mut_ptr(),
22848 out.c_tensor,
22849 self.c_tensor,
22850 other.c_tensor
22851 ));
22852 Ok(Tensor { c_tensor: c_tensors[0] })
22853 }
22854
22855 pub fn f_logical_xor(&self, other: &Tensor) -> Result<Tensor, TchError> {
22856 let mut c_tensors = [std::ptr::null_mut(); 1];
22857 unsafe_torch_err!(atg_logical_xor(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
22858 Ok(Tensor { c_tensor: c_tensors[0] })
22859 }
22860
22861 pub fn f_logical_xor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
22862 let mut c_tensors = [std::ptr::null_mut(); 1];
22863 unsafe_torch_err!(atg_logical_xor_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
22864 Ok(Tensor { c_tensor: c_tensors[0] })
22865 }
22866
22867 pub fn f_logical_xor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
22868 let mut c_tensors = [std::ptr::null_mut(); 1];
22869 unsafe_torch_err!(atg_logical_xor_out(
22870 c_tensors.as_mut_ptr(),
22871 out.c_tensor,
22872 self.c_tensor,
22873 other.c_tensor
22874 ));
22875 Ok(Tensor { c_tensor: c_tensors[0] })
22876 }
22877
22878 pub fn f_logit(&self, eps: impl Into<Option<f64>>) -> Result<Tensor, TchError> {
22879 let eps = eps.into();
22880 let mut c_tensors = [std::ptr::null_mut(); 1];
22881 unsafe_torch_err!(atg_logit(
22882 c_tensors.as_mut_ptr(),
22883 self.c_tensor,
22884 eps.unwrap_or(std::f64::NAN),
22885 eps.is_none() as i8
22886 ));
22887 Ok(Tensor { c_tensor: c_tensors[0] })
22888 }
22889
22890 pub fn f_logit_(&mut self, eps: impl Into<Option<f64>>) -> Result<Tensor, TchError> {
22891 let eps = eps.into();
22892 let mut c_tensors = [std::ptr::null_mut(); 1];
22893 unsafe_torch_err!(atg_logit_(
22894 c_tensors.as_mut_ptr(),
22895 self.c_tensor,
22896 eps.unwrap_or(std::f64::NAN),
22897 eps.is_none() as i8
22898 ));
22899 Ok(Tensor { c_tensor: c_tensors[0] })
22900 }
22901
22902 pub fn f_logit_backward(
22903 &self,
22904 grad_output: &Tensor,
22905 eps: impl Into<Option<f64>>,
22906 ) -> Result<Tensor, TchError> {
22907 let eps = eps.into();
22908 let mut c_tensors = [std::ptr::null_mut(); 1];
22909 unsafe_torch_err!(atg_logit_backward(
22910 c_tensors.as_mut_ptr(),
22911 grad_output.c_tensor,
22912 self.c_tensor,
22913 eps.unwrap_or(std::f64::NAN),
22914 eps.is_none() as i8
22915 ));
22916 Ok(Tensor { c_tensor: c_tensors[0] })
22917 }
22918
22919 pub fn f_logit_backward_grad_input(
22920 &self,
22921 grad_input: &Tensor,
22922 grad_output: &Tensor,
22923 eps: impl Into<Option<f64>>,
22924 ) -> Result<Tensor, TchError> {
22925 let eps = eps.into();
22926 let mut c_tensors = [std::ptr::null_mut(); 1];
22927 unsafe_torch_err!(atg_logit_backward_grad_input(
22928 c_tensors.as_mut_ptr(),
22929 grad_input.c_tensor,
22930 grad_output.c_tensor,
22931 self.c_tensor,
22932 eps.unwrap_or(std::f64::NAN),
22933 eps.is_none() as i8
22934 ));
22935 Ok(Tensor { c_tensor: c_tensors[0] })
22936 }
22937
22938 pub fn f_logit_out(
22939 &self,
22940 out: &Tensor,
22941 eps: impl Into<Option<f64>>,
22942 ) -> Result<Tensor, TchError> {
22943 let eps = eps.into();
22944 let mut c_tensors = [std::ptr::null_mut(); 1];
22945 unsafe_torch_err!(atg_logit_out(
22946 c_tensors.as_mut_ptr(),
22947 out.c_tensor,
22948 self.c_tensor,
22949 eps.unwrap_or(std::f64::NAN),
22950 eps.is_none() as i8
22951 ));
22952 Ok(Tensor { c_tensor: c_tensors[0] })
22953 }
22954
22955 pub fn f_logspace<S: Into<Scalar>>(
22956 start: S,
22957 end: S,
22958 steps: i64,
22959 base: f64,
22960 options: (Kind, Device),
22961 ) -> Result<Tensor, TchError> {
22962 let mut c_tensors = [std::ptr::null_mut(); 1];
22963 unsafe_torch_err!(atg_logspace(
22964 c_tensors.as_mut_ptr(),
22965 start.into().c_scalar,
22966 end.into().c_scalar,
22967 steps,
22968 base,
22969 options.0.c_int(),
22970 options.1.c_int()
22971 ));
22972 Ok(Tensor { c_tensor: c_tensors[0] })
22973 }
22974
22975 pub fn f_logspace_out<S: Into<Scalar>>(
22976 out: &Tensor,
22977 start: S,
22978 end: S,
22979 steps: i64,
22980 base: f64,
22981 ) -> Result<Tensor, TchError> {
22982 let mut c_tensors = [std::ptr::null_mut(); 1];
22983 unsafe_torch_err!(atg_logspace_out(
22984 c_tensors.as_mut_ptr(),
22985 out.c_tensor,
22986 start.into().c_scalar,
22987 end.into().c_scalar,
22988 steps,
22989 base
22990 ));
22991 Ok(Tensor { c_tensor: c_tensors[0] })
22992 }
22993
22994 pub fn f_logspace_scalar_tensor<S: Into<Scalar>>(
22995 start: S,
22996 end: &Tensor,
22997 steps: i64,
22998 base: f64,
22999 options: (Kind, Device),
23000 ) -> Result<Tensor, TchError> {
23001 let mut c_tensors = [std::ptr::null_mut(); 1];
23002 unsafe_torch_err!(atg_logspace_scalar_tensor(
23003 c_tensors.as_mut_ptr(),
23004 start.into().c_scalar,
23005 end.c_tensor,
23006 steps,
23007 base,
23008 options.0.c_int(),
23009 options.1.c_int()
23010 ));
23011 Ok(Tensor { c_tensor: c_tensors[0] })
23012 }
23013
23014 pub fn f_logspace_scalar_tensor_out<S: Into<Scalar>>(
23015 out: &Tensor,
23016 start: S,
23017 end: &Tensor,
23018 steps: i64,
23019 base: f64,
23020 ) -> Result<Tensor, TchError> {
23021 let mut c_tensors = [std::ptr::null_mut(); 1];
23022 unsafe_torch_err!(atg_logspace_scalar_tensor_out(
23023 c_tensors.as_mut_ptr(),
23024 out.c_tensor,
23025 start.into().c_scalar,
23026 end.c_tensor,
23027 steps,
23028 base
23029 ));
23030 Ok(Tensor { c_tensor: c_tensors[0] })
23031 }
23032
23033 pub fn f_logspace_tensor_scalar<S: Into<Scalar>>(
23034 start: &Tensor,
23035 end: S,
23036 steps: i64,
23037 base: f64,
23038 options: (Kind, Device),
23039 ) -> Result<Tensor, TchError> {
23040 let mut c_tensors = [std::ptr::null_mut(); 1];
23041 unsafe_torch_err!(atg_logspace_tensor_scalar(
23042 c_tensors.as_mut_ptr(),
23043 start.c_tensor,
23044 end.into().c_scalar,
23045 steps,
23046 base,
23047 options.0.c_int(),
23048 options.1.c_int()
23049 ));
23050 Ok(Tensor { c_tensor: c_tensors[0] })
23051 }
23052
23053 pub fn f_logspace_tensor_scalar_out<S: Into<Scalar>>(
23054 out: &Tensor,
23055 start: &Tensor,
23056 end: S,
23057 steps: i64,
23058 base: f64,
23059 ) -> Result<Tensor, TchError> {
23060 let mut c_tensors = [std::ptr::null_mut(); 1];
23061 unsafe_torch_err!(atg_logspace_tensor_scalar_out(
23062 c_tensors.as_mut_ptr(),
23063 out.c_tensor,
23064 start.c_tensor,
23065 end.into().c_scalar,
23066 steps,
23067 base
23068 ));
23069 Ok(Tensor { c_tensor: c_tensors[0] })
23070 }
23071
23072 pub fn f_logspace_tensor_tensor(
23073 start: &Tensor,
23074 end: &Tensor,
23075 steps: i64,
23076 base: f64,
23077 options: (Kind, Device),
23078 ) -> Result<Tensor, TchError> {
23079 let mut c_tensors = [std::ptr::null_mut(); 1];
23080 unsafe_torch_err!(atg_logspace_tensor_tensor(
23081 c_tensors.as_mut_ptr(),
23082 start.c_tensor,
23083 end.c_tensor,
23084 steps,
23085 base,
23086 options.0.c_int(),
23087 options.1.c_int()
23088 ));
23089 Ok(Tensor { c_tensor: c_tensors[0] })
23090 }
23091
23092 pub fn f_logspace_tensor_tensor_out(
23093 out: &Tensor,
23094 start: &Tensor,
23095 end: &Tensor,
23096 steps: i64,
23097 base: f64,
23098 ) -> Result<Tensor, TchError> {
23099 let mut c_tensors = [std::ptr::null_mut(); 1];
23100 unsafe_torch_err!(atg_logspace_tensor_tensor_out(
23101 c_tensors.as_mut_ptr(),
23102 out.c_tensor,
23103 start.c_tensor,
23104 end.c_tensor,
23105 steps,
23106 base
23107 ));
23108 Ok(Tensor { c_tensor: c_tensors[0] })
23109 }
23110
23111 pub fn f_logsumexp(&self, dim: impl IntList, keepdim: bool) -> Result<Tensor, TchError> {
23112 let mut c_tensors = [std::ptr::null_mut(); 1];
23113 unsafe_torch_err!(atg_logsumexp(
23114 c_tensors.as_mut_ptr(),
23115 self.c_tensor,
23116 dim.as_ptr(),
23117 dim.len_i32(),
23118 if keepdim { 1 } else { 0 }
23119 ));
23120 Ok(Tensor { c_tensor: c_tensors[0] })
23121 }
23122
23123 pub fn f_logsumexp_out(
23124 &self,
23125 out: &Tensor,
23126 dim: impl IntList,
23127 keepdim: bool,
23128 ) -> Result<Tensor, TchError> {
23129 let mut c_tensors = [std::ptr::null_mut(); 1];
23130 unsafe_torch_err!(atg_logsumexp_out(
23131 c_tensors.as_mut_ptr(),
23132 out.c_tensor,
23133 self.c_tensor,
23134 dim.as_ptr(),
23135 dim.len_i32(),
23136 if keepdim { 1 } else { 0 }
23137 ));
23138 Ok(Tensor { c_tensor: c_tensors[0] })
23139 }
23140
23141 pub fn f_lstm<T: Borrow<Tensor>>(
23142 &self,
23143 hx: &[T],
23144 params: &[T],
23145 has_biases: bool,
23146 num_layers: i64,
23147 dropout: f64,
23148 train: bool,
23149 bidirectional: bool,
23150 batch_first: bool,
23151 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
23152 let mut c_tensors = [std::ptr::null_mut(); 3];
23153 unsafe_torch_err!(atg_lstm(
23154 c_tensors.as_mut_ptr(),
23155 self.c_tensor,
23156 ptr_list(hx).as_ptr(),
23157 hx.len() as i32,
23158 ptr_list(params).as_ptr(),
23159 params.len() as i32,
23160 if has_biases { 1 } else { 0 },
23161 num_layers,
23162 dropout,
23163 if train { 1 } else { 0 },
23164 if bidirectional { 1 } else { 0 },
23165 if batch_first { 1 } else { 0 }
23166 ));
23167 Ok((
23168 Tensor { c_tensor: c_tensors[0] },
23169 Tensor { c_tensor: c_tensors[1] },
23170 Tensor { c_tensor: c_tensors[2] },
23171 ))
23172 }
23173
23174 pub fn f_lstm_cell<T: Borrow<Tensor>>(
23175 &self,
23176 hx: &[T],
23177 w_ih: &Tensor,
23178 w_hh: &Tensor,
23179 b_ih: Option<T>,
23180 b_hh: Option<T>,
23181 ) -> Result<(Tensor, Tensor), TchError> {
23182 let mut c_tensors = [std::ptr::null_mut(); 2];
23183 unsafe_torch_err!(atg_lstm_cell(
23184 c_tensors.as_mut_ptr(),
23185 self.c_tensor,
23186 ptr_list(hx).as_ptr(),
23187 hx.len() as i32,
23188 w_ih.c_tensor,
23189 w_hh.c_tensor,
23190 b_ih.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
23191 b_hh.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
23192 ));
23193 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
23194 }
23195
23196 pub fn f_lstm_data<T: Borrow<Tensor>>(
23197 data: &Tensor,
23198 batch_sizes: &Tensor,
23199 hx: &[T],
23200 params: &[T],
23201 has_biases: bool,
23202 num_layers: i64,
23203 dropout: f64,
23204 train: bool,
23205 bidirectional: bool,
23206 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
23207 let mut c_tensors = [std::ptr::null_mut(); 3];
23208 unsafe_torch_err!(atg_lstm_data(
23209 c_tensors.as_mut_ptr(),
23210 data.c_tensor,
23211 batch_sizes.c_tensor,
23212 ptr_list(hx).as_ptr(),
23213 hx.len() as i32,
23214 ptr_list(params).as_ptr(),
23215 params.len() as i32,
23216 if has_biases { 1 } else { 0 },
23217 num_layers,
23218 dropout,
23219 if train { 1 } else { 0 },
23220 if bidirectional { 1 } else { 0 }
23221 ));
23222 Ok((
23223 Tensor { c_tensor: c_tensors[0] },
23224 Tensor { c_tensor: c_tensors[1] },
23225 Tensor { c_tensor: c_tensors[2] },
23226 ))
23227 }
23228
23229 pub fn f_lstm_mps_backward<T: Borrow<Tensor>>(
23230 &self,
23231 out0: &Tensor,
23232 out1: &[T],
23233 out2: &[T],
23234 grad_y: Option<T>,
23235 grad_hy: Option<T>,
23236 grad_cy: Option<T>,
23237 z_state: &Tensor,
23238 cell_state_fwd: &Tensor,
23239 layersoutputs: &Tensor,
23240 hx: &[T],
23241 params: &[T],
23242 has_biases: bool,
23243 num_layers: i64,
23244 dropout: f64,
23245 train: bool,
23246 bidirectional: bool,
23247 batch_first: bool,
23248 ) -> Result<(), TchError> {
23249 unsafe_torch_err!(atg_lstm_mps_backward(
23250 out0.c_tensor,
23251 ptr_list(out1).as_ptr(),
23252 out1.len() as i32,
23253 ptr_list(out2).as_ptr(),
23254 out2.len() as i32,
23255 grad_y.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
23256 grad_hy.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
23257 grad_cy.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
23258 z_state.c_tensor,
23259 cell_state_fwd.c_tensor,
23260 self.c_tensor,
23261 layersoutputs.c_tensor,
23262 ptr_list(hx).as_ptr(),
23263 hx.len() as i32,
23264 ptr_list(params).as_ptr(),
23265 params.len() as i32,
23266 if has_biases { 1 } else { 0 },
23267 num_layers,
23268 dropout,
23269 if train { 1 } else { 0 },
23270 if bidirectional { 1 } else { 0 },
23271 if batch_first { 1 } else { 0 }
23272 ));
23273 Ok(())
23274 }
23275
23276 pub fn f_lt<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
23277 let mut c_tensors = [std::ptr::null_mut(); 1];
23278 unsafe_torch_err!(atg_lt(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
23279 Ok(Tensor { c_tensor: c_tensors[0] })
23280 }
23281
23282 pub fn f_lt_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
23283 let mut c_tensors = [std::ptr::null_mut(); 1];
23284 unsafe_torch_err!(atg_lt_(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
23285 Ok(Tensor { c_tensor: c_tensors[0] })
23286 }
23287
23288 pub fn f_lt_scalar_out<S: Into<Scalar>>(
23289 &self,
23290 out: &Tensor,
23291 other: S,
23292 ) -> Result<Tensor, TchError> {
23293 let mut c_tensors = [std::ptr::null_mut(); 1];
23294 unsafe_torch_err!(atg_lt_scalar_out(
23295 c_tensors.as_mut_ptr(),
23296 out.c_tensor,
23297 self.c_tensor,
23298 other.into().c_scalar
23299 ));
23300 Ok(Tensor { c_tensor: c_tensors[0] })
23301 }
23302
23303 pub fn f_lt_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
23304 let mut c_tensors = [std::ptr::null_mut(); 1];
23305 unsafe_torch_err!(atg_lt_tensor(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
23306 Ok(Tensor { c_tensor: c_tensors[0] })
23307 }
23308
23309 pub fn f_lt_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
23310 let mut c_tensors = [std::ptr::null_mut(); 1];
23311 unsafe_torch_err!(atg_lt_tensor_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
23312 Ok(Tensor { c_tensor: c_tensors[0] })
23313 }
23314
23315 pub fn f_lt_tensor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
23316 let mut c_tensors = [std::ptr::null_mut(); 1];
23317 unsafe_torch_err!(atg_lt_tensor_out(
23318 c_tensors.as_mut_ptr(),
23319 out.c_tensor,
23320 self.c_tensor,
23321 other.c_tensor
23322 ));
23323 Ok(Tensor { c_tensor: c_tensors[0] })
23324 }
23325
23326 pub fn f_lu_solve(&self, lu_data: &Tensor, lu_pivots: &Tensor) -> Result<Tensor, TchError> {
23327 let mut c_tensors = [std::ptr::null_mut(); 1];
23328 unsafe_torch_err!(atg_lu_solve(
23329 c_tensors.as_mut_ptr(),
23330 self.c_tensor,
23331 lu_data.c_tensor,
23332 lu_pivots.c_tensor
23333 ));
23334 Ok(Tensor { c_tensor: c_tensors[0] })
23335 }
23336
23337 pub fn f_lu_solve_out(
23338 &self,
23339 out: &Tensor,
23340 lu_data: &Tensor,
23341 lu_pivots: &Tensor,
23342 ) -> Result<Tensor, TchError> {
23343 let mut c_tensors = [std::ptr::null_mut(); 1];
23344 unsafe_torch_err!(atg_lu_solve_out(
23345 c_tensors.as_mut_ptr(),
23346 out.c_tensor,
23347 self.c_tensor,
23348 lu_data.c_tensor,
23349 lu_pivots.c_tensor
23350 ));
23351 Ok(Tensor { c_tensor: c_tensors[0] })
23352 }
23353
23354 pub fn f_lu_unpack(
23355 lu_data: &Tensor,
23356 lu_pivots: &Tensor,
23357 unpack_data: bool,
23358 unpack_pivots: bool,
23359 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
23360 let mut c_tensors = [std::ptr::null_mut(); 3];
23361 unsafe_torch_err!(atg_lu_unpack(
23362 c_tensors.as_mut_ptr(),
23363 lu_data.c_tensor,
23364 lu_pivots.c_tensor,
23365 if unpack_data { 1 } else { 0 },
23366 if unpack_pivots { 1 } else { 0 }
23367 ));
23368 Ok((
23369 Tensor { c_tensor: c_tensors[0] },
23370 Tensor { c_tensor: c_tensors[1] },
23371 Tensor { c_tensor: c_tensors[2] },
23372 ))
23373 }
23374
23375 pub fn f_lu_unpack_out(
23376 p: &Tensor,
23377 l: &Tensor,
23378 u: &Tensor,
23379 lu_data: &Tensor,
23380 lu_pivots: &Tensor,
23381 unpack_data: bool,
23382 unpack_pivots: bool,
23383 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
23384 let mut c_tensors = [std::ptr::null_mut(); 3];
23385 unsafe_torch_err!(atg_lu_unpack_out(
23386 c_tensors.as_mut_ptr(),
23387 p.c_tensor,
23388 l.c_tensor,
23389 u.c_tensor,
23390 lu_data.c_tensor,
23391 lu_pivots.c_tensor,
23392 if unpack_data { 1 } else { 0 },
23393 if unpack_pivots { 1 } else { 0 }
23394 ));
23395 Ok((
23396 Tensor { c_tensor: c_tensors[0] },
23397 Tensor { c_tensor: c_tensors[1] },
23398 Tensor { c_tensor: c_tensors[2] },
23399 ))
23400 }
23401
23402 pub fn f_margin_ranking_loss(
23403 input1: &Tensor,
23404 input2: &Tensor,
23405 target: &Tensor,
23406 margin: f64,
23407 reduction: crate::Reduction,
23408 ) -> Result<Tensor, TchError> {
23409 let mut c_tensors = [std::ptr::null_mut(); 1];
23410 unsafe_torch_err!(atg_margin_ranking_loss(
23411 c_tensors.as_mut_ptr(),
23412 input1.c_tensor,
23413 input2.c_tensor,
23414 target.c_tensor,
23415 margin,
23416 reduction.to_int()
23417 ));
23418 Ok(Tensor { c_tensor: c_tensors[0] })
23419 }
23420
23421 pub fn f_masked_fill<S: Into<Scalar>>(
23422 &self,
23423 mask: &Tensor,
23424 value: S,
23425 ) -> Result<Tensor, TchError> {
23426 let mut c_tensors = [std::ptr::null_mut(); 1];
23427 unsafe_torch_err!(atg_masked_fill(
23428 c_tensors.as_mut_ptr(),
23429 self.c_tensor,
23430 mask.c_tensor,
23431 value.into().c_scalar
23432 ));
23433 Ok(Tensor { c_tensor: c_tensors[0] })
23434 }
23435
23436 pub fn f_masked_fill_<S: Into<Scalar>>(
23437 &mut self,
23438 mask: &Tensor,
23439 value: S,
23440 ) -> Result<Tensor, TchError> {
23441 let mut c_tensors = [std::ptr::null_mut(); 1];
23442 unsafe_torch_err!(atg_masked_fill_(
23443 c_tensors.as_mut_ptr(),
23444 self.c_tensor,
23445 mask.c_tensor,
23446 value.into().c_scalar
23447 ));
23448 Ok(Tensor { c_tensor: c_tensors[0] })
23449 }
23450
23451 pub fn f_masked_fill_scalar_out<S: Into<Scalar>>(
23452 &self,
23453 out: &Tensor,
23454 mask: &Tensor,
23455 value: S,
23456 ) -> Result<Tensor, TchError> {
23457 let mut c_tensors = [std::ptr::null_mut(); 1];
23458 unsafe_torch_err!(atg_masked_fill_scalar_out(
23459 c_tensors.as_mut_ptr(),
23460 out.c_tensor,
23461 self.c_tensor,
23462 mask.c_tensor,
23463 value.into().c_scalar
23464 ));
23465 Ok(Tensor { c_tensor: c_tensors[0] })
23466 }
23467
23468 pub fn f_masked_fill_tensor(&self, mask: &Tensor, value: &Tensor) -> Result<Tensor, TchError> {
23469 let mut c_tensors = [std::ptr::null_mut(); 1];
23470 unsafe_torch_err!(atg_masked_fill_tensor(
23471 c_tensors.as_mut_ptr(),
23472 self.c_tensor,
23473 mask.c_tensor,
23474 value.c_tensor
23475 ));
23476 Ok(Tensor { c_tensor: c_tensors[0] })
23477 }
23478
23479 pub fn f_masked_fill_tensor_(
23480 &mut self,
23481 mask: &Tensor,
23482 value: &Tensor,
23483 ) -> Result<Tensor, TchError> {
23484 let mut c_tensors = [std::ptr::null_mut(); 1];
23485 unsafe_torch_err!(atg_masked_fill_tensor_(
23486 c_tensors.as_mut_ptr(),
23487 self.c_tensor,
23488 mask.c_tensor,
23489 value.c_tensor
23490 ));
23491 Ok(Tensor { c_tensor: c_tensors[0] })
23492 }
23493
23494 pub fn f_masked_fill_tensor_out(
23495 &self,
23496 out: &Tensor,
23497 mask: &Tensor,
23498 value: &Tensor,
23499 ) -> Result<Tensor, TchError> {
23500 let mut c_tensors = [std::ptr::null_mut(); 1];
23501 unsafe_torch_err!(atg_masked_fill_tensor_out(
23502 c_tensors.as_mut_ptr(),
23503 out.c_tensor,
23504 self.c_tensor,
23505 mask.c_tensor,
23506 value.c_tensor
23507 ));
23508 Ok(Tensor { c_tensor: c_tensors[0] })
23509 }
23510
23511 pub fn f_masked_scatter(&self, mask: &Tensor, source: &Tensor) -> Result<Tensor, TchError> {
23512 let mut c_tensors = [std::ptr::null_mut(); 1];
23513 unsafe_torch_err!(atg_masked_scatter(
23514 c_tensors.as_mut_ptr(),
23515 self.c_tensor,
23516 mask.c_tensor,
23517 source.c_tensor
23518 ));
23519 Ok(Tensor { c_tensor: c_tensors[0] })
23520 }
23521
23522 pub fn f_masked_scatter_(
23523 &mut self,
23524 mask: &Tensor,
23525 source: &Tensor,
23526 ) -> Result<Tensor, TchError> {
23527 let mut c_tensors = [std::ptr::null_mut(); 1];
23528 unsafe_torch_err!(atg_masked_scatter_(
23529 c_tensors.as_mut_ptr(),
23530 self.c_tensor,
23531 mask.c_tensor,
23532 source.c_tensor
23533 ));
23534 Ok(Tensor { c_tensor: c_tensors[0] })
23535 }
23536
23537 pub fn f_masked_scatter_backward(
23538 grad_output: &Tensor,
23539 mask: &Tensor,
23540 sizes: impl IntList,
23541 ) -> Result<Tensor, TchError> {
23542 let mut c_tensors = [std::ptr::null_mut(); 1];
23543 unsafe_torch_err!(atg_masked_scatter_backward(
23544 c_tensors.as_mut_ptr(),
23545 grad_output.c_tensor,
23546 mask.c_tensor,
23547 sizes.as_ptr(),
23548 sizes.len_i32()
23549 ));
23550 Ok(Tensor { c_tensor: c_tensors[0] })
23551 }
23552
23553 pub fn f_masked_scatter_out(
23554 &self,
23555 out: &Tensor,
23556 mask: &Tensor,
23557 source: &Tensor,
23558 ) -> Result<Tensor, TchError> {
23559 let mut c_tensors = [std::ptr::null_mut(); 1];
23560 unsafe_torch_err!(atg_masked_scatter_out(
23561 c_tensors.as_mut_ptr(),
23562 out.c_tensor,
23563 self.c_tensor,
23564 mask.c_tensor,
23565 source.c_tensor
23566 ));
23567 Ok(Tensor { c_tensor: c_tensors[0] })
23568 }
23569
23570 pub fn f_masked_select(&self, mask: &Tensor) -> Result<Tensor, TchError> {
23571 let mut c_tensors = [std::ptr::null_mut(); 1];
23572 unsafe_torch_err!(atg_masked_select(c_tensors.as_mut_ptr(), self.c_tensor, mask.c_tensor));
23573 Ok(Tensor { c_tensor: c_tensors[0] })
23574 }
23575
23576 pub fn f_masked_select_backward(
23577 &self,
23578 grad: &Tensor,
23579 mask: &Tensor,
23580 ) -> Result<Tensor, TchError> {
23581 let mut c_tensors = [std::ptr::null_mut(); 1];
23582 unsafe_torch_err!(atg_masked_select_backward(
23583 c_tensors.as_mut_ptr(),
23584 grad.c_tensor,
23585 self.c_tensor,
23586 mask.c_tensor
23587 ));
23588 Ok(Tensor { c_tensor: c_tensors[0] })
23589 }
23590
23591 pub fn f_masked_select_out(&self, out: &Tensor, mask: &Tensor) -> Result<Tensor, TchError> {
23592 let mut c_tensors = [std::ptr::null_mut(); 1];
23593 unsafe_torch_err!(atg_masked_select_out(
23594 c_tensors.as_mut_ptr(),
23595 out.c_tensor,
23596 self.c_tensor,
23597 mask.c_tensor
23598 ));
23599 Ok(Tensor { c_tensor: c_tensors[0] })
23600 }
23601
23602 pub fn f_matmul(&self, other: &Tensor) -> Result<Tensor, TchError> {
23603 let mut c_tensors = [std::ptr::null_mut(); 1];
23604 unsafe_torch_err!(atg_matmul(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
23605 Ok(Tensor { c_tensor: c_tensors[0] })
23606 }
23607
23608 pub fn f_matmul_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
23609 let mut c_tensors = [std::ptr::null_mut(); 1];
23610 unsafe_torch_err!(atg_matmul_out(
23611 c_tensors.as_mut_ptr(),
23612 out.c_tensor,
23613 self.c_tensor,
23614 other.c_tensor
23615 ));
23616 Ok(Tensor { c_tensor: c_tensors[0] })
23617 }
23618
23619 pub fn f_matrix_exp(&self) -> Result<Tensor, TchError> {
23620 let mut c_tensors = [std::ptr::null_mut(); 1];
23621 unsafe_torch_err!(atg_matrix_exp(c_tensors.as_mut_ptr(), self.c_tensor));
23622 Ok(Tensor { c_tensor: c_tensors[0] })
23623 }
23624
23625 pub fn f_matrix_exp_backward(&self, grad: &Tensor) -> Result<Tensor, TchError> {
23626 let mut c_tensors = [std::ptr::null_mut(); 1];
23627 unsafe_torch_err!(atg_matrix_exp_backward(
23628 c_tensors.as_mut_ptr(),
23629 self.c_tensor,
23630 grad.c_tensor
23631 ));
23632 Ok(Tensor { c_tensor: c_tensors[0] })
23633 }
23634
23635 pub fn f_matrix_h(&self) -> Result<Tensor, TchError> {
23636 let mut c_tensors = [std::ptr::null_mut(); 1];
23637 unsafe_torch_err!(atg_matrix_h(c_tensors.as_mut_ptr(), self.c_tensor));
23638 Ok(Tensor { c_tensor: c_tensors[0] })
23639 }
23640
23641 pub fn f_matrix_power(&self, n: i64) -> Result<Tensor, TchError> {
23642 let mut c_tensors = [std::ptr::null_mut(); 1];
23643 unsafe_torch_err!(atg_matrix_power(c_tensors.as_mut_ptr(), self.c_tensor, n));
23644 Ok(Tensor { c_tensor: c_tensors[0] })
23645 }
23646
23647 pub fn f_matrix_power_out(&self, out: &Tensor, n: i64) -> Result<Tensor, TchError> {
23648 let mut c_tensors = [std::ptr::null_mut(); 1];
23649 unsafe_torch_err!(atg_matrix_power_out(
23650 c_tensors.as_mut_ptr(),
23651 out.c_tensor,
23652 self.c_tensor,
23653 n
23654 ));
23655 Ok(Tensor { c_tensor: c_tensors[0] })
23656 }
23657
23658 pub fn f_max(&self) -> Result<Tensor, TchError> {
23659 let mut c_tensors = [std::ptr::null_mut(); 1];
23660 unsafe_torch_err!(atg_max(c_tensors.as_mut_ptr(), self.c_tensor));
23661 Ok(Tensor { c_tensor: c_tensors[0] })
23662 }
23663
23664 pub fn f_max_dim(&self, dim: i64, keepdim: bool) -> Result<(Tensor, Tensor), TchError> {
23665 let mut c_tensors = [std::ptr::null_mut(); 2];
23666 unsafe_torch_err!(atg_max_dim(
23667 c_tensors.as_mut_ptr(),
23668 self.c_tensor,
23669 dim,
23670 if keepdim { 1 } else { 0 }
23671 ));
23672 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
23673 }
23674
23675 pub fn f_max_dim_max(
23676 &self,
23677 max: &Tensor,
23678 max_values: &Tensor,
23679 dim: i64,
23680 keepdim: bool,
23681 ) -> Result<(Tensor, Tensor), TchError> {
23682 let mut c_tensors = [std::ptr::null_mut(); 2];
23683 unsafe_torch_err!(atg_max_dim_max(
23684 c_tensors.as_mut_ptr(),
23685 max.c_tensor,
23686 max_values.c_tensor,
23687 self.c_tensor,
23688 dim,
23689 if keepdim { 1 } else { 0 }
23690 ));
23691 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
23692 }
23693
23694 pub fn f_max_other(&self, other: &Tensor) -> Result<Tensor, TchError> {
23695 let mut c_tensors = [std::ptr::null_mut(); 1];
23696 unsafe_torch_err!(atg_max_other(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
23697 Ok(Tensor { c_tensor: c_tensors[0] })
23698 }
23699
23700 pub fn f_max_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
23701 let mut c_tensors = [std::ptr::null_mut(); 1];
23702 unsafe_torch_err!(atg_max_out(
23703 c_tensors.as_mut_ptr(),
23704 out.c_tensor,
23705 self.c_tensor,
23706 other.c_tensor
23707 ));
23708 Ok(Tensor { c_tensor: c_tensors[0] })
23709 }
23710
23711 pub fn f_max_pool1d(
23712 &self,
23713 kernel_size: impl IntList,
23714 stride: impl IntList,
23715 padding: impl IntList,
23716 dilation: impl IntList,
23717 ceil_mode: bool,
23718 ) -> Result<Tensor, TchError> {
23719 let mut c_tensors = [std::ptr::null_mut(); 1];
23720 unsafe_torch_err!(atg_max_pool1d(
23721 c_tensors.as_mut_ptr(),
23722 self.c_tensor,
23723 kernel_size.as_ptr(),
23724 kernel_size.len_i32(),
23725 stride.as_ptr(),
23726 stride.len_i32(),
23727 padding.as_ptr(),
23728 padding.len_i32(),
23729 dilation.as_ptr(),
23730 dilation.len_i32(),
23731 if ceil_mode { 1 } else { 0 }
23732 ));
23733 Ok(Tensor { c_tensor: c_tensors[0] })
23734 }
23735
23736 pub fn f_max_pool1d_with_indices(
23737 &self,
23738 kernel_size: impl IntList,
23739 stride: impl IntList,
23740 padding: impl IntList,
23741 dilation: impl IntList,
23742 ceil_mode: bool,
23743 ) -> Result<(Tensor, Tensor), TchError> {
23744 let mut c_tensors = [std::ptr::null_mut(); 2];
23745 unsafe_torch_err!(atg_max_pool1d_with_indices(
23746 c_tensors.as_mut_ptr(),
23747 self.c_tensor,
23748 kernel_size.as_ptr(),
23749 kernel_size.len_i32(),
23750 stride.as_ptr(),
23751 stride.len_i32(),
23752 padding.as_ptr(),
23753 padding.len_i32(),
23754 dilation.as_ptr(),
23755 dilation.len_i32(),
23756 if ceil_mode { 1 } else { 0 }
23757 ));
23758 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
23759 }
23760
23761 pub fn f_max_pool2d(
23762 &self,
23763 kernel_size: impl IntList,
23764 stride: impl IntList,
23765 padding: impl IntList,
23766 dilation: impl IntList,
23767 ceil_mode: bool,
23768 ) -> Result<Tensor, TchError> {
23769 let mut c_tensors = [std::ptr::null_mut(); 1];
23770 unsafe_torch_err!(atg_max_pool2d(
23771 c_tensors.as_mut_ptr(),
23772 self.c_tensor,
23773 kernel_size.as_ptr(),
23774 kernel_size.len_i32(),
23775 stride.as_ptr(),
23776 stride.len_i32(),
23777 padding.as_ptr(),
23778 padding.len_i32(),
23779 dilation.as_ptr(),
23780 dilation.len_i32(),
23781 if ceil_mode { 1 } else { 0 }
23782 ));
23783 Ok(Tensor { c_tensor: c_tensors[0] })
23784 }
23785
23786 pub fn f_max_pool2d_backward(
23787 &self,
23788 grad_output: &Tensor,
23789 kernel_size: impl IntList,
23790 stride: impl IntList,
23791 padding: impl IntList,
23792 dilation: impl IntList,
23793 ceil_mode: bool,
23794 ) -> Result<Tensor, TchError> {
23795 let mut c_tensors = [std::ptr::null_mut(); 1];
23796 unsafe_torch_err!(atg_max_pool2d_backward(
23797 c_tensors.as_mut_ptr(),
23798 grad_output.c_tensor,
23799 self.c_tensor,
23800 kernel_size.as_ptr(),
23801 kernel_size.len_i32(),
23802 stride.as_ptr(),
23803 stride.len_i32(),
23804 padding.as_ptr(),
23805 padding.len_i32(),
23806 dilation.as_ptr(),
23807 dilation.len_i32(),
23808 if ceil_mode { 1 } else { 0 }
23809 ));
23810 Ok(Tensor { c_tensor: c_tensors[0] })
23811 }
23812
23813 pub fn f_max_pool2d_backward_out(
23814 &self,
23815 out: &Tensor,
23816 grad_output: &Tensor,
23817 kernel_size: impl IntList,
23818 stride: impl IntList,
23819 padding: impl IntList,
23820 dilation: impl IntList,
23821 ceil_mode: bool,
23822 ) -> Result<Tensor, TchError> {
23823 let mut c_tensors = [std::ptr::null_mut(); 1];
23824 unsafe_torch_err!(atg_max_pool2d_backward_out(
23825 c_tensors.as_mut_ptr(),
23826 out.c_tensor,
23827 grad_output.c_tensor,
23828 self.c_tensor,
23829 kernel_size.as_ptr(),
23830 kernel_size.len_i32(),
23831 stride.as_ptr(),
23832 stride.len_i32(),
23833 padding.as_ptr(),
23834 padding.len_i32(),
23835 dilation.as_ptr(),
23836 dilation.len_i32(),
23837 if ceil_mode { 1 } else { 0 }
23838 ));
23839 Ok(Tensor { c_tensor: c_tensors[0] })
23840 }
23841
23842 pub fn f_max_pool2d_with_indices(
23843 &self,
23844 kernel_size: impl IntList,
23845 stride: impl IntList,
23846 padding: impl IntList,
23847 dilation: impl IntList,
23848 ceil_mode: bool,
23849 ) -> Result<(Tensor, Tensor), TchError> {
23850 let mut c_tensors = [std::ptr::null_mut(); 2];
23851 unsafe_torch_err!(atg_max_pool2d_with_indices(
23852 c_tensors.as_mut_ptr(),
23853 self.c_tensor,
23854 kernel_size.as_ptr(),
23855 kernel_size.len_i32(),
23856 stride.as_ptr(),
23857 stride.len_i32(),
23858 padding.as_ptr(),
23859 padding.len_i32(),
23860 dilation.as_ptr(),
23861 dilation.len_i32(),
23862 if ceil_mode { 1 } else { 0 }
23863 ));
23864 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
23865 }
23866
23867 pub fn f_max_pool2d_with_indices_backward(
23868 &self,
23869 grad_output: &Tensor,
23870 kernel_size: impl IntList,
23871 stride: impl IntList,
23872 padding: impl IntList,
23873 dilation: impl IntList,
23874 ceil_mode: bool,
23875 indices: &Tensor,
23876 ) -> Result<Tensor, TchError> {
23877 let mut c_tensors = [std::ptr::null_mut(); 1];
23878 unsafe_torch_err!(atg_max_pool2d_with_indices_backward(
23879 c_tensors.as_mut_ptr(),
23880 grad_output.c_tensor,
23881 self.c_tensor,
23882 kernel_size.as_ptr(),
23883 kernel_size.len_i32(),
23884 stride.as_ptr(),
23885 stride.len_i32(),
23886 padding.as_ptr(),
23887 padding.len_i32(),
23888 dilation.as_ptr(),
23889 dilation.len_i32(),
23890 if ceil_mode { 1 } else { 0 },
23891 indices.c_tensor
23892 ));
23893 Ok(Tensor { c_tensor: c_tensors[0] })
23894 }
23895
23896 pub fn f_max_pool2d_with_indices_backward_grad_input(
23897 &self,
23898 grad_input: &Tensor,
23899 grad_output: &Tensor,
23900 kernel_size: impl IntList,
23901 stride: impl IntList,
23902 padding: impl IntList,
23903 dilation: impl IntList,
23904 ceil_mode: bool,
23905 indices: &Tensor,
23906 ) -> Result<Tensor, TchError> {
23907 let mut c_tensors = [std::ptr::null_mut(); 1];
23908 unsafe_torch_err!(atg_max_pool2d_with_indices_backward_grad_input(
23909 c_tensors.as_mut_ptr(),
23910 grad_input.c_tensor,
23911 grad_output.c_tensor,
23912 self.c_tensor,
23913 kernel_size.as_ptr(),
23914 kernel_size.len_i32(),
23915 stride.as_ptr(),
23916 stride.len_i32(),
23917 padding.as_ptr(),
23918 padding.len_i32(),
23919 dilation.as_ptr(),
23920 dilation.len_i32(),
23921 if ceil_mode { 1 } else { 0 },
23922 indices.c_tensor
23923 ));
23924 Ok(Tensor { c_tensor: c_tensors[0] })
23925 }
23926
23927 pub fn f_max_pool2d_with_indices_out(
23928 &self,
23929 out: &Tensor,
23930 indices: &Tensor,
23931 kernel_size: impl IntList,
23932 stride: impl IntList,
23933 padding: impl IntList,
23934 dilation: impl IntList,
23935 ceil_mode: bool,
23936 ) -> Result<(Tensor, Tensor), TchError> {
23937 let mut c_tensors = [std::ptr::null_mut(); 2];
23938 unsafe_torch_err!(atg_max_pool2d_with_indices_out(
23939 c_tensors.as_mut_ptr(),
23940 out.c_tensor,
23941 indices.c_tensor,
23942 self.c_tensor,
23943 kernel_size.as_ptr(),
23944 kernel_size.len_i32(),
23945 stride.as_ptr(),
23946 stride.len_i32(),
23947 padding.as_ptr(),
23948 padding.len_i32(),
23949 dilation.as_ptr(),
23950 dilation.len_i32(),
23951 if ceil_mode { 1 } else { 0 }
23952 ));
23953 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
23954 }
23955
23956 pub fn f_max_pool3d(
23957 &self,
23958 kernel_size: impl IntList,
23959 stride: impl IntList,
23960 padding: impl IntList,
23961 dilation: impl IntList,
23962 ceil_mode: bool,
23963 ) -> Result<Tensor, TchError> {
23964 let mut c_tensors = [std::ptr::null_mut(); 1];
23965 unsafe_torch_err!(atg_max_pool3d(
23966 c_tensors.as_mut_ptr(),
23967 self.c_tensor,
23968 kernel_size.as_ptr(),
23969 kernel_size.len_i32(),
23970 stride.as_ptr(),
23971 stride.len_i32(),
23972 padding.as_ptr(),
23973 padding.len_i32(),
23974 dilation.as_ptr(),
23975 dilation.len_i32(),
23976 if ceil_mode { 1 } else { 0 }
23977 ));
23978 Ok(Tensor { c_tensor: c_tensors[0] })
23979 }
23980
23981 pub fn f_max_pool3d_with_indices(
23982 &self,
23983 kernel_size: impl IntList,
23984 stride: impl IntList,
23985 padding: impl IntList,
23986 dilation: impl IntList,
23987 ceil_mode: bool,
23988 ) -> Result<(Tensor, Tensor), TchError> {
23989 let mut c_tensors = [std::ptr::null_mut(); 2];
23990 unsafe_torch_err!(atg_max_pool3d_with_indices(
23991 c_tensors.as_mut_ptr(),
23992 self.c_tensor,
23993 kernel_size.as_ptr(),
23994 kernel_size.len_i32(),
23995 stride.as_ptr(),
23996 stride.len_i32(),
23997 padding.as_ptr(),
23998 padding.len_i32(),
23999 dilation.as_ptr(),
24000 dilation.len_i32(),
24001 if ceil_mode { 1 } else { 0 }
24002 ));
24003 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
24004 }
24005
24006 pub fn f_max_pool3d_with_indices_backward(
24007 &self,
24008 grad_output: &Tensor,
24009 kernel_size: impl IntList,
24010 stride: impl IntList,
24011 padding: impl IntList,
24012 dilation: impl IntList,
24013 ceil_mode: bool,
24014 indices: &Tensor,
24015 ) -> Result<Tensor, TchError> {
24016 let mut c_tensors = [std::ptr::null_mut(); 1];
24017 unsafe_torch_err!(atg_max_pool3d_with_indices_backward(
24018 c_tensors.as_mut_ptr(),
24019 grad_output.c_tensor,
24020 self.c_tensor,
24021 kernel_size.as_ptr(),
24022 kernel_size.len_i32(),
24023 stride.as_ptr(),
24024 stride.len_i32(),
24025 padding.as_ptr(),
24026 padding.len_i32(),
24027 dilation.as_ptr(),
24028 dilation.len_i32(),
24029 if ceil_mode { 1 } else { 0 },
24030 indices.c_tensor
24031 ));
24032 Ok(Tensor { c_tensor: c_tensors[0] })
24033 }
24034
24035 pub fn f_max_pool3d_with_indices_backward_grad_input(
24036 &self,
24037 grad_input: &Tensor,
24038 grad_output: &Tensor,
24039 kernel_size: impl IntList,
24040 stride: impl IntList,
24041 padding: impl IntList,
24042 dilation: impl IntList,
24043 ceil_mode: bool,
24044 indices: &Tensor,
24045 ) -> Result<Tensor, TchError> {
24046 let mut c_tensors = [std::ptr::null_mut(); 1];
24047 unsafe_torch_err!(atg_max_pool3d_with_indices_backward_grad_input(
24048 c_tensors.as_mut_ptr(),
24049 grad_input.c_tensor,
24050 grad_output.c_tensor,
24051 self.c_tensor,
24052 kernel_size.as_ptr(),
24053 kernel_size.len_i32(),
24054 stride.as_ptr(),
24055 stride.len_i32(),
24056 padding.as_ptr(),
24057 padding.len_i32(),
24058 dilation.as_ptr(),
24059 dilation.len_i32(),
24060 if ceil_mode { 1 } else { 0 },
24061 indices.c_tensor
24062 ));
24063 Ok(Tensor { c_tensor: c_tensors[0] })
24064 }
24065
24066 pub fn f_max_pool3d_with_indices_out(
24067 &self,
24068 out: &Tensor,
24069 indices: &Tensor,
24070 kernel_size: impl IntList,
24071 stride: impl IntList,
24072 padding: impl IntList,
24073 dilation: impl IntList,
24074 ceil_mode: bool,
24075 ) -> Result<(Tensor, Tensor), TchError> {
24076 let mut c_tensors = [std::ptr::null_mut(); 2];
24077 unsafe_torch_err!(atg_max_pool3d_with_indices_out(
24078 c_tensors.as_mut_ptr(),
24079 out.c_tensor,
24080 indices.c_tensor,
24081 self.c_tensor,
24082 kernel_size.as_ptr(),
24083 kernel_size.len_i32(),
24084 stride.as_ptr(),
24085 stride.len_i32(),
24086 padding.as_ptr(),
24087 padding.len_i32(),
24088 dilation.as_ptr(),
24089 dilation.len_i32(),
24090 if ceil_mode { 1 } else { 0 }
24091 ));
24092 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
24093 }
24094
24095 pub fn f_max_unary_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
24096 let mut c_tensors = [std::ptr::null_mut(); 1];
24097 unsafe_torch_err!(atg_max_unary_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
24098 Ok(Tensor { c_tensor: c_tensors[0] })
24099 }
24100
24101 pub fn f_max_unpool2d(
24102 &self,
24103 indices: &Tensor,
24104 output_size: impl IntList,
24105 ) -> Result<Tensor, TchError> {
24106 let mut c_tensors = [std::ptr::null_mut(); 1];
24107 unsafe_torch_err!(atg_max_unpool2d(
24108 c_tensors.as_mut_ptr(),
24109 self.c_tensor,
24110 indices.c_tensor,
24111 output_size.as_ptr(),
24112 output_size.len_i32()
24113 ));
24114 Ok(Tensor { c_tensor: c_tensors[0] })
24115 }
24116
24117 pub fn f_max_unpool2d_out(
24118 &self,
24119 out: &Tensor,
24120 indices: &Tensor,
24121 output_size: impl IntList,
24122 ) -> Result<Tensor, TchError> {
24123 let mut c_tensors = [std::ptr::null_mut(); 1];
24124 unsafe_torch_err!(atg_max_unpool2d_out(
24125 c_tensors.as_mut_ptr(),
24126 out.c_tensor,
24127 self.c_tensor,
24128 indices.c_tensor,
24129 output_size.as_ptr(),
24130 output_size.len_i32()
24131 ));
24132 Ok(Tensor { c_tensor: c_tensors[0] })
24133 }
24134
24135 pub fn f_max_unpool3d(
24136 &self,
24137 indices: &Tensor,
24138 output_size: impl IntList,
24139 stride: impl IntList,
24140 padding: impl IntList,
24141 ) -> Result<Tensor, TchError> {
24142 let mut c_tensors = [std::ptr::null_mut(); 1];
24143 unsafe_torch_err!(atg_max_unpool3d(
24144 c_tensors.as_mut_ptr(),
24145 self.c_tensor,
24146 indices.c_tensor,
24147 output_size.as_ptr(),
24148 output_size.len_i32(),
24149 stride.as_ptr(),
24150 stride.len_i32(),
24151 padding.as_ptr(),
24152 padding.len_i32()
24153 ));
24154 Ok(Tensor { c_tensor: c_tensors[0] })
24155 }
24156
24157 pub fn f_max_unpool3d_out(
24158 &self,
24159 out: &Tensor,
24160 indices: &Tensor,
24161 output_size: impl IntList,
24162 stride: impl IntList,
24163 padding: impl IntList,
24164 ) -> Result<Tensor, TchError> {
24165 let mut c_tensors = [std::ptr::null_mut(); 1];
24166 unsafe_torch_err!(atg_max_unpool3d_out(
24167 c_tensors.as_mut_ptr(),
24168 out.c_tensor,
24169 self.c_tensor,
24170 indices.c_tensor,
24171 output_size.as_ptr(),
24172 output_size.len_i32(),
24173 stride.as_ptr(),
24174 stride.len_i32(),
24175 padding.as_ptr(),
24176 padding.len_i32()
24177 ));
24178 Ok(Tensor { c_tensor: c_tensors[0] })
24179 }
24180
24181 pub fn f_maximum(&self, other: &Tensor) -> Result<Tensor, TchError> {
24182 let mut c_tensors = [std::ptr::null_mut(); 1];
24183 unsafe_torch_err!(atg_maximum(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
24184 Ok(Tensor { c_tensor: c_tensors[0] })
24185 }
24186
24187 pub fn f_maximum_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
24188 let mut c_tensors = [std::ptr::null_mut(); 1];
24189 unsafe_torch_err!(atg_maximum_out(
24190 c_tensors.as_mut_ptr(),
24191 out.c_tensor,
24192 self.c_tensor,
24193 other.c_tensor
24194 ));
24195 Ok(Tensor { c_tensor: c_tensors[0] })
24196 }
24197
24198 pub fn f_mean(&self, dtype: impl Into<Option<Kind>>) -> Result<Tensor, TchError> {
24199 let mut c_tensors = [std::ptr::null_mut(); 1];
24200 unsafe_torch_err!(atg_mean(
24201 c_tensors.as_mut_ptr(),
24202 self.c_tensor,
24203 dtype.into().map_or(-1, |s| s.c_int())
24204 ));
24205 Ok(Tensor { c_tensor: c_tensors[0] })
24206 }
24207
24208 pub fn f_mean_dim(
24209 &self,
24210 dim: impl IntListOption,
24211 keepdim: bool,
24212 dtype: impl Into<Option<Kind>>,
24213 ) -> Result<Tensor, TchError> {
24214 let mut c_tensors = [std::ptr::null_mut(); 1];
24215 unsafe_torch_err!(atg_mean_dim(
24216 c_tensors.as_mut_ptr(),
24217 self.c_tensor,
24218 dim.as_ptr(),
24219 dim.len_i32(),
24220 if keepdim { 1 } else { 0 },
24221 dtype.into().map_or(-1, |s| s.c_int())
24222 ));
24223 Ok(Tensor { c_tensor: c_tensors[0] })
24224 }
24225
24226 pub fn f_mean_dtype_out(
24227 &self,
24228 out: &Tensor,
24229 dtype: impl Into<Option<Kind>>,
24230 ) -> Result<Tensor, TchError> {
24231 let mut c_tensors = [std::ptr::null_mut(); 1];
24232 unsafe_torch_err!(atg_mean_dtype_out(
24233 c_tensors.as_mut_ptr(),
24234 out.c_tensor,
24235 self.c_tensor,
24236 dtype.into().map_or(-1, |s| s.c_int())
24237 ));
24238 Ok(Tensor { c_tensor: c_tensors[0] })
24239 }
24240
24241 pub fn f_mean_out(
24242 &self,
24243 out: &Tensor,
24244 dim: impl IntListOption,
24245 keepdim: bool,
24246 dtype: impl Into<Option<Kind>>,
24247 ) -> Result<Tensor, TchError> {
24248 let mut c_tensors = [std::ptr::null_mut(); 1];
24249 unsafe_torch_err!(atg_mean_out(
24250 c_tensors.as_mut_ptr(),
24251 out.c_tensor,
24252 self.c_tensor,
24253 dim.as_ptr(),
24254 dim.len_i32(),
24255 if keepdim { 1 } else { 0 },
24256 dtype.into().map_or(-1, |s| s.c_int())
24257 ));
24258 Ok(Tensor { c_tensor: c_tensors[0] })
24259 }
24260
24261 pub fn f_median(&self) -> Result<Tensor, TchError> {
24262 let mut c_tensors = [std::ptr::null_mut(); 1];
24263 unsafe_torch_err!(atg_median(c_tensors.as_mut_ptr(), self.c_tensor));
24264 Ok(Tensor { c_tensor: c_tensors[0] })
24265 }
24266
24267 pub fn f_median_dim(&self, dim: i64, keepdim: bool) -> Result<(Tensor, Tensor), TchError> {
24268 let mut c_tensors = [std::ptr::null_mut(); 2];
24269 unsafe_torch_err!(atg_median_dim(
24270 c_tensors.as_mut_ptr(),
24271 self.c_tensor,
24272 dim,
24273 if keepdim { 1 } else { 0 }
24274 ));
24275 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
24276 }
24277
24278 pub fn f_median_dim_values(
24279 &self,
24280 values: &Tensor,
24281 indices: &Tensor,
24282 dim: i64,
24283 keepdim: bool,
24284 ) -> Result<(Tensor, Tensor), TchError> {
24285 let mut c_tensors = [std::ptr::null_mut(); 2];
24286 unsafe_torch_err!(atg_median_dim_values(
24287 c_tensors.as_mut_ptr(),
24288 values.c_tensor,
24289 indices.c_tensor,
24290 self.c_tensor,
24291 dim,
24292 if keepdim { 1 } else { 0 }
24293 ));
24294 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
24295 }
24296
24297 pub fn f_median_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
24298 let mut c_tensors = [std::ptr::null_mut(); 1];
24299 unsafe_torch_err!(atg_median_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
24300 Ok(Tensor { c_tensor: c_tensors[0] })
24301 }
24302
24303 pub fn f_meshgrid<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Vec<Tensor>, TchError> {
24304 let c_tensors =
24305 unsafe_torch_err!(atg_meshgrid(ptr_list(tensors).as_ptr(), tensors.len() as i32));
24306 let mut r__ = vec![];
24307 let mut i = 0;
24308 loop {
24309 let c__ = unsafe { *c_tensors.add(i) };
24310 if c__.is_null() {
24311 break;
24312 }
24313 r__.push(Tensor { c_tensor: c__ });
24314 i += 1;
24315 }
24316 unsafe { libc::free(c_tensors as *mut libc::c_void) }
24317 Ok(r__)
24318 }
24319
24320 pub fn f_meshgrid_indexing<T: Borrow<Tensor>>(
24321 tensors: &[T],
24322 indexing: &str,
24323 ) -> Result<Vec<Tensor>, TchError> {
24324 let c_tensors = unsafe_torch_err!(atg_meshgrid_indexing(
24325 ptr_list(tensors).as_ptr(),
24326 tensors.len() as i32,
24327 indexing.as_ptr(),
24328 indexing.len() as i32
24329 ));
24330 let mut r__ = vec![];
24331 let mut i = 0;
24332 loop {
24333 let c__ = unsafe { *c_tensors.add(i) };
24334 if c__.is_null() {
24335 break;
24336 }
24337 r__.push(Tensor { c_tensor: c__ });
24338 i += 1;
24339 }
24340 unsafe { libc::free(c_tensors as *mut libc::c_void) }
24341 Ok(r__)
24342 }
24343
24344 pub fn f_mh(&self) -> Result<Tensor, TchError> {
24345 let mut c_tensors = [std::ptr::null_mut(); 1];
24346 unsafe_torch_err!(atg_mh(c_tensors.as_mut_ptr(), self.c_tensor));
24347 Ok(Tensor { c_tensor: c_tensors[0] })
24348 }
24349
24350 pub fn f_min(&self) -> Result<Tensor, TchError> {
24351 let mut c_tensors = [std::ptr::null_mut(); 1];
24352 unsafe_torch_err!(atg_min(c_tensors.as_mut_ptr(), self.c_tensor));
24353 Ok(Tensor { c_tensor: c_tensors[0] })
24354 }
24355
24356 pub fn f_min_dim(&self, dim: i64, keepdim: bool) -> Result<(Tensor, Tensor), TchError> {
24357 let mut c_tensors = [std::ptr::null_mut(); 2];
24358 unsafe_torch_err!(atg_min_dim(
24359 c_tensors.as_mut_ptr(),
24360 self.c_tensor,
24361 dim,
24362 if keepdim { 1 } else { 0 }
24363 ));
24364 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
24365 }
24366
24367 pub fn f_min_dim_min(
24368 &self,
24369 min: &Tensor,
24370 min_indices: &Tensor,
24371 dim: i64,
24372 keepdim: bool,
24373 ) -> Result<(Tensor, Tensor), TchError> {
24374 let mut c_tensors = [std::ptr::null_mut(); 2];
24375 unsafe_torch_err!(atg_min_dim_min(
24376 c_tensors.as_mut_ptr(),
24377 min.c_tensor,
24378 min_indices.c_tensor,
24379 self.c_tensor,
24380 dim,
24381 if keepdim { 1 } else { 0 }
24382 ));
24383 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
24384 }
24385
24386 pub fn f_min_other(&self, other: &Tensor) -> Result<Tensor, TchError> {
24387 let mut c_tensors = [std::ptr::null_mut(); 1];
24388 unsafe_torch_err!(atg_min_other(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
24389 Ok(Tensor { c_tensor: c_tensors[0] })
24390 }
24391
24392 pub fn f_min_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
24393 let mut c_tensors = [std::ptr::null_mut(); 1];
24394 unsafe_torch_err!(atg_min_out(
24395 c_tensors.as_mut_ptr(),
24396 out.c_tensor,
24397 self.c_tensor,
24398 other.c_tensor
24399 ));
24400 Ok(Tensor { c_tensor: c_tensors[0] })
24401 }
24402
24403 pub fn f_min_unary_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
24404 let mut c_tensors = [std::ptr::null_mut(); 1];
24405 unsafe_torch_err!(atg_min_unary_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
24406 Ok(Tensor { c_tensor: c_tensors[0] })
24407 }
24408
24409 pub fn f_minimum(&self, other: &Tensor) -> Result<Tensor, TchError> {
24410 let mut c_tensors = [std::ptr::null_mut(); 1];
24411 unsafe_torch_err!(atg_minimum(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
24412 Ok(Tensor { c_tensor: c_tensors[0] })
24413 }
24414
24415 pub fn f_minimum_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
24416 let mut c_tensors = [std::ptr::null_mut(); 1];
24417 unsafe_torch_err!(atg_minimum_out(
24418 c_tensors.as_mut_ptr(),
24419 out.c_tensor,
24420 self.c_tensor,
24421 other.c_tensor
24422 ));
24423 Ok(Tensor { c_tensor: c_tensors[0] })
24424 }
24425
24426 pub fn f_miopen_batch_norm<T: Borrow<Tensor>>(
24427 &self,
24428 weight: &Tensor,
24429 bias: Option<T>,
24430 running_mean: Option<T>,
24431 running_var: Option<T>,
24432 training: bool,
24433 exponential_average_factor: f64,
24434 epsilon: f64,
24435 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
24436 let mut c_tensors = [std::ptr::null_mut(); 3];
24437 unsafe_torch_err!(atg_miopen_batch_norm(
24438 c_tensors.as_mut_ptr(),
24439 self.c_tensor,
24440 weight.c_tensor,
24441 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
24442 running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
24443 running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
24444 if training { 1 } else { 0 },
24445 exponential_average_factor,
24446 epsilon
24447 ));
24448 Ok((
24449 Tensor { c_tensor: c_tensors[0] },
24450 Tensor { c_tensor: c_tensors[1] },
24451 Tensor { c_tensor: c_tensors[2] },
24452 ))
24453 }
24454
24455 pub fn f_miopen_batch_norm_backward<T: Borrow<Tensor>>(
24456 &self,
24457 grad_output: &Tensor,
24458 weight: &Tensor,
24459 running_mean: Option<T>,
24460 running_var: Option<T>,
24461 save_mean: Option<T>,
24462 save_var: Option<T>,
24463 epsilon: f64,
24464 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
24465 let mut c_tensors = [std::ptr::null_mut(); 3];
24466 unsafe_torch_err!(atg_miopen_batch_norm_backward(
24467 c_tensors.as_mut_ptr(),
24468 self.c_tensor,
24469 grad_output.c_tensor,
24470 weight.c_tensor,
24471 running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
24472 running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
24473 save_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
24474 save_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
24475 epsilon
24476 ));
24477 Ok((
24478 Tensor { c_tensor: c_tensors[0] },
24479 Tensor { c_tensor: c_tensors[1] },
24480 Tensor { c_tensor: c_tensors[2] },
24481 ))
24482 }
24483
24484 pub fn f_miopen_batch_norm_backward_out<T: Borrow<Tensor>>(
24485 &self,
24486 out0: &Tensor,
24487 out1: &Tensor,
24488 out2: &Tensor,
24489 grad_output: &Tensor,
24490 weight: &Tensor,
24491 running_mean: Option<T>,
24492 running_var: Option<T>,
24493 save_mean: Option<T>,
24494 save_var: Option<T>,
24495 epsilon: f64,
24496 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
24497 let mut c_tensors = [std::ptr::null_mut(); 3];
24498 unsafe_torch_err!(atg_miopen_batch_norm_backward_out(
24499 c_tensors.as_mut_ptr(),
24500 out0.c_tensor,
24501 out1.c_tensor,
24502 out2.c_tensor,
24503 self.c_tensor,
24504 grad_output.c_tensor,
24505 weight.c_tensor,
24506 running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
24507 running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
24508 save_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
24509 save_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
24510 epsilon
24511 ));
24512 Ok((
24513 Tensor { c_tensor: c_tensors[0] },
24514 Tensor { c_tensor: c_tensors[1] },
24515 Tensor { c_tensor: c_tensors[2] },
24516 ))
24517 }
24518
24519 pub fn f_miopen_batch_norm_out<T: Borrow<Tensor>>(
24520 &self,
24521 out0: &Tensor,
24522 out1: &Tensor,
24523 out2: &Tensor,
24524 weight: &Tensor,
24525 bias: Option<T>,
24526 running_mean: Option<T>,
24527 running_var: Option<T>,
24528 training: bool,
24529 exponential_average_factor: f64,
24530 epsilon: f64,
24531 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
24532 let mut c_tensors = [std::ptr::null_mut(); 3];
24533 unsafe_torch_err!(atg_miopen_batch_norm_out(
24534 c_tensors.as_mut_ptr(),
24535 out0.c_tensor,
24536 out1.c_tensor,
24537 out2.c_tensor,
24538 self.c_tensor,
24539 weight.c_tensor,
24540 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
24541 running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
24542 running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
24543 if training { 1 } else { 0 },
24544 exponential_average_factor,
24545 epsilon
24546 ));
24547 Ok((
24548 Tensor { c_tensor: c_tensors[0] },
24549 Tensor { c_tensor: c_tensors[1] },
24550 Tensor { c_tensor: c_tensors[2] },
24551 ))
24552 }
24553
24554 pub fn f_miopen_convolution<T: Borrow<Tensor>>(
24555 &self,
24556 weight: &Tensor,
24557 bias: Option<T>,
24558 padding: impl IntList,
24559 stride: impl IntList,
24560 dilation: impl IntList,
24561 groups: i64,
24562 benchmark: bool,
24563 deterministic: bool,
24564 ) -> Result<Tensor, TchError> {
24565 let mut c_tensors = [std::ptr::null_mut(); 1];
24566 unsafe_torch_err!(atg_miopen_convolution(
24567 c_tensors.as_mut_ptr(),
24568 self.c_tensor,
24569 weight.c_tensor,
24570 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
24571 padding.as_ptr(),
24572 padding.len_i32(),
24573 stride.as_ptr(),
24574 stride.len_i32(),
24575 dilation.as_ptr(),
24576 dilation.len_i32(),
24577 groups,
24578 if benchmark { 1 } else { 0 },
24579 if deterministic { 1 } else { 0 }
24580 ));
24581 Ok(Tensor { c_tensor: c_tensors[0] })
24582 }
24583
24584 pub fn f_miopen_convolution_add_relu<T: Borrow<Tensor>, S: Into<Scalar>>(
24585 &self,
24586 weight: &Tensor,
24587 z: &Tensor,
24588 alpha: S,
24589 bias: Option<T>,
24590 stride: impl IntList,
24591 padding: impl IntList,
24592 dilation: impl IntList,
24593 groups: i64,
24594 ) -> Result<Tensor, TchError> {
24595 let mut c_tensors = [std::ptr::null_mut(); 1];
24596 unsafe_torch_err!(atg_miopen_convolution_add_relu(
24597 c_tensors.as_mut_ptr(),
24598 self.c_tensor,
24599 weight.c_tensor,
24600 z.c_tensor,
24601 alpha.into().c_scalar,
24602 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
24603 stride.as_ptr(),
24604 stride.len_i32(),
24605 padding.as_ptr(),
24606 padding.len_i32(),
24607 dilation.as_ptr(),
24608 dilation.len_i32(),
24609 groups
24610 ));
24611 Ok(Tensor { c_tensor: c_tensors[0] })
24612 }
24613
24614 pub fn f_miopen_convolution_out<T: Borrow<Tensor>>(
24615 &self,
24616 out: &Tensor,
24617 weight: &Tensor,
24618 bias: Option<T>,
24619 padding: impl IntList,
24620 stride: impl IntList,
24621 dilation: impl IntList,
24622 groups: i64,
24623 benchmark: bool,
24624 deterministic: bool,
24625 ) -> Result<Tensor, TchError> {
24626 let mut c_tensors = [std::ptr::null_mut(); 1];
24627 unsafe_torch_err!(atg_miopen_convolution_out(
24628 c_tensors.as_mut_ptr(),
24629 out.c_tensor,
24630 self.c_tensor,
24631 weight.c_tensor,
24632 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
24633 padding.as_ptr(),
24634 padding.len_i32(),
24635 stride.as_ptr(),
24636 stride.len_i32(),
24637 dilation.as_ptr(),
24638 dilation.len_i32(),
24639 groups,
24640 if benchmark { 1 } else { 0 },
24641 if deterministic { 1 } else { 0 }
24642 ));
24643 Ok(Tensor { c_tensor: c_tensors[0] })
24644 }
24645
24646 pub fn f_miopen_convolution_relu<T: Borrow<Tensor>>(
24647 &self,
24648 weight: &Tensor,
24649 bias: Option<T>,
24650 stride: impl IntList,
24651 padding: impl IntList,
24652 dilation: impl IntList,
24653 groups: i64,
24654 ) -> Result<Tensor, TchError> {
24655 let mut c_tensors = [std::ptr::null_mut(); 1];
24656 unsafe_torch_err!(atg_miopen_convolution_relu(
24657 c_tensors.as_mut_ptr(),
24658 self.c_tensor,
24659 weight.c_tensor,
24660 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
24661 stride.as_ptr(),
24662 stride.len_i32(),
24663 padding.as_ptr(),
24664 padding.len_i32(),
24665 dilation.as_ptr(),
24666 dilation.len_i32(),
24667 groups
24668 ));
24669 Ok(Tensor { c_tensor: c_tensors[0] })
24670 }
24671
24672 pub fn f_miopen_convolution_transpose<T: Borrow<Tensor>>(
24673 &self,
24674 weight: &Tensor,
24675 bias: Option<T>,
24676 padding: impl IntList,
24677 output_padding: impl IntList,
24678 stride: impl IntList,
24679 dilation: impl IntList,
24680 groups: i64,
24681 benchmark: bool,
24682 deterministic: bool,
24683 ) -> Result<Tensor, TchError> {
24684 let mut c_tensors = [std::ptr::null_mut(); 1];
24685 unsafe_torch_err!(atg_miopen_convolution_transpose(
24686 c_tensors.as_mut_ptr(),
24687 self.c_tensor,
24688 weight.c_tensor,
24689 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
24690 padding.as_ptr(),
24691 padding.len_i32(),
24692 output_padding.as_ptr(),
24693 output_padding.len_i32(),
24694 stride.as_ptr(),
24695 stride.len_i32(),
24696 dilation.as_ptr(),
24697 dilation.len_i32(),
24698 groups,
24699 if benchmark { 1 } else { 0 },
24700 if deterministic { 1 } else { 0 }
24701 ));
24702 Ok(Tensor { c_tensor: c_tensors[0] })
24703 }
24704
24705 pub fn f_miopen_convolution_transpose_out<T: Borrow<Tensor>>(
24706 &self,
24707 out: &Tensor,
24708 weight: &Tensor,
24709 bias: Option<T>,
24710 padding: impl IntList,
24711 output_padding: impl IntList,
24712 stride: impl IntList,
24713 dilation: impl IntList,
24714 groups: i64,
24715 benchmark: bool,
24716 deterministic: bool,
24717 ) -> Result<Tensor, TchError> {
24718 let mut c_tensors = [std::ptr::null_mut(); 1];
24719 unsafe_torch_err!(atg_miopen_convolution_transpose_out(
24720 c_tensors.as_mut_ptr(),
24721 out.c_tensor,
24722 self.c_tensor,
24723 weight.c_tensor,
24724 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
24725 padding.as_ptr(),
24726 padding.len_i32(),
24727 output_padding.as_ptr(),
24728 output_padding.len_i32(),
24729 stride.as_ptr(),
24730 stride.len_i32(),
24731 dilation.as_ptr(),
24732 dilation.len_i32(),
24733 groups,
24734 if benchmark { 1 } else { 0 },
24735 if deterministic { 1 } else { 0 }
24736 ));
24737 Ok(Tensor { c_tensor: c_tensors[0] })
24738 }
24739
24740 pub fn f_miopen_depthwise_convolution<T: Borrow<Tensor>>(
24741 &self,
24742 weight: &Tensor,
24743 bias: Option<T>,
24744 padding: impl IntList,
24745 stride: impl IntList,
24746 dilation: impl IntList,
24747 groups: i64,
24748 benchmark: bool,
24749 deterministic: bool,
24750 ) -> Result<Tensor, TchError> {
24751 let mut c_tensors = [std::ptr::null_mut(); 1];
24752 unsafe_torch_err!(atg_miopen_depthwise_convolution(
24753 c_tensors.as_mut_ptr(),
24754 self.c_tensor,
24755 weight.c_tensor,
24756 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
24757 padding.as_ptr(),
24758 padding.len_i32(),
24759 stride.as_ptr(),
24760 stride.len_i32(),
24761 dilation.as_ptr(),
24762 dilation.len_i32(),
24763 groups,
24764 if benchmark { 1 } else { 0 },
24765 if deterministic { 1 } else { 0 }
24766 ));
24767 Ok(Tensor { c_tensor: c_tensors[0] })
24768 }
24769
24770 pub fn f_miopen_depthwise_convolution_out<T: Borrow<Tensor>>(
24771 &self,
24772 out: &Tensor,
24773 weight: &Tensor,
24774 bias: Option<T>,
24775 padding: impl IntList,
24776 stride: impl IntList,
24777 dilation: impl IntList,
24778 groups: i64,
24779 benchmark: bool,
24780 deterministic: bool,
24781 ) -> Result<Tensor, TchError> {
24782 let mut c_tensors = [std::ptr::null_mut(); 1];
24783 unsafe_torch_err!(atg_miopen_depthwise_convolution_out(
24784 c_tensors.as_mut_ptr(),
24785 out.c_tensor,
24786 self.c_tensor,
24787 weight.c_tensor,
24788 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
24789 padding.as_ptr(),
24790 padding.len_i32(),
24791 stride.as_ptr(),
24792 stride.len_i32(),
24793 dilation.as_ptr(),
24794 dilation.len_i32(),
24795 groups,
24796 if benchmark { 1 } else { 0 },
24797 if deterministic { 1 } else { 0 }
24798 ));
24799 Ok(Tensor { c_tensor: c_tensors[0] })
24800 }
24801
24802 pub fn f_miopen_rnn<T: Borrow<Tensor>>(
24803 &self,
24804 weight: &[T],
24805 weight_stride0: i64,
24806 hx: &Tensor,
24807 cx: Option<T>,
24808 mode: i64,
24809 hidden_size: i64,
24810 num_layers: i64,
24811 batch_first: bool,
24812 dropout: f64,
24813 train: bool,
24814 bidirectional: bool,
24815 batch_sizes: impl IntList,
24816 dropout_state: Option<T>,
24817 ) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor), TchError> {
24818 let mut c_tensors = [std::ptr::null_mut(); 5];
24819 unsafe_torch_err!(atg_miopen_rnn(
24820 c_tensors.as_mut_ptr(),
24821 self.c_tensor,
24822 ptr_list(weight).as_ptr(),
24823 weight.len() as i32,
24824 weight_stride0,
24825 hx.c_tensor,
24826 cx.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
24827 mode,
24828 hidden_size,
24829 num_layers,
24830 if batch_first { 1 } else { 0 },
24831 dropout,
24832 if train { 1 } else { 0 },
24833 if bidirectional { 1 } else { 0 },
24834 batch_sizes.as_ptr(),
24835 batch_sizes.len_i32(),
24836 dropout_state.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
24837 ));
24838 Ok((
24839 Tensor { c_tensor: c_tensors[0] },
24840 Tensor { c_tensor: c_tensors[1] },
24841 Tensor { c_tensor: c_tensors[2] },
24842 Tensor { c_tensor: c_tensors[3] },
24843 Tensor { c_tensor: c_tensors[4] },
24844 ))
24845 }
24846
24847 pub fn f_miopen_rnn_out<T: Borrow<Tensor>>(
24848 &self,
24849 out0: &Tensor,
24850 out1: &Tensor,
24851 out2: &Tensor,
24852 out3: &Tensor,
24853 out4: &Tensor,
24854 weight: &[T],
24855 weight_stride0: i64,
24856 hx: &Tensor,
24857 cx: Option<T>,
24858 mode: i64,
24859 hidden_size: i64,
24860 num_layers: i64,
24861 batch_first: bool,
24862 dropout: f64,
24863 train: bool,
24864 bidirectional: bool,
24865 batch_sizes: impl IntList,
24866 dropout_state: Option<T>,
24867 ) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor), TchError> {
24868 let mut c_tensors = [std::ptr::null_mut(); 5];
24869 unsafe_torch_err!(atg_miopen_rnn_out(
24870 c_tensors.as_mut_ptr(),
24871 out0.c_tensor,
24872 out1.c_tensor,
24873 out2.c_tensor,
24874 out3.c_tensor,
24875 out4.c_tensor,
24876 self.c_tensor,
24877 ptr_list(weight).as_ptr(),
24878 weight.len() as i32,
24879 weight_stride0,
24880 hx.c_tensor,
24881 cx.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
24882 mode,
24883 hidden_size,
24884 num_layers,
24885 if batch_first { 1 } else { 0 },
24886 dropout,
24887 if train { 1 } else { 0 },
24888 if bidirectional { 1 } else { 0 },
24889 batch_sizes.as_ptr(),
24890 batch_sizes.len_i32(),
24891 dropout_state.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
24892 ));
24893 Ok((
24894 Tensor { c_tensor: c_tensors[0] },
24895 Tensor { c_tensor: c_tensors[1] },
24896 Tensor { c_tensor: c_tensors[2] },
24897 Tensor { c_tensor: c_tensors[3] },
24898 Tensor { c_tensor: c_tensors[4] },
24899 ))
24900 }
24901
24902 pub fn f_mish(&self) -> Result<Tensor, TchError> {
24903 let mut c_tensors = [std::ptr::null_mut(); 1];
24904 unsafe_torch_err!(atg_mish(c_tensors.as_mut_ptr(), self.c_tensor));
24905 Ok(Tensor { c_tensor: c_tensors[0] })
24906 }
24907
24908 pub fn f_mish_(&mut self) -> Result<Tensor, TchError> {
24909 let mut c_tensors = [std::ptr::null_mut(); 1];
24910 unsafe_torch_err!(atg_mish_(c_tensors.as_mut_ptr(), self.c_tensor));
24911 Ok(Tensor { c_tensor: c_tensors[0] })
24912 }
24913
24914 pub fn f_mish_backward(&self, grad_output: &Tensor) -> Result<Tensor, TchError> {
24915 let mut c_tensors = [std::ptr::null_mut(); 1];
24916 unsafe_torch_err!(atg_mish_backward(
24917 c_tensors.as_mut_ptr(),
24918 grad_output.c_tensor,
24919 self.c_tensor
24920 ));
24921 Ok(Tensor { c_tensor: c_tensors[0] })
24922 }
24923
24924 pub fn f_mish_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
24925 let mut c_tensors = [std::ptr::null_mut(); 1];
24926 unsafe_torch_err!(atg_mish_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
24927 Ok(Tensor { c_tensor: c_tensors[0] })
24928 }
24929
24930 pub fn f_mkldnn_adaptive_avg_pool2d(
24931 &self,
24932 output_size: impl IntList,
24933 ) -> Result<Tensor, TchError> {
24934 let mut c_tensors = [std::ptr::null_mut(); 1];
24935 unsafe_torch_err!(atg_mkldnn_adaptive_avg_pool2d(
24936 c_tensors.as_mut_ptr(),
24937 self.c_tensor,
24938 output_size.as_ptr(),
24939 output_size.len_i32()
24940 ));
24941 Ok(Tensor { c_tensor: c_tensors[0] })
24942 }
24943
24944 pub fn f_mkldnn_adaptive_avg_pool2d_backward(
24945 &self,
24946 grad_output: &Tensor,
24947 ) -> Result<Tensor, TchError> {
24948 let mut c_tensors = [std::ptr::null_mut(); 1];
24949 unsafe_torch_err!(atg_mkldnn_adaptive_avg_pool2d_backward(
24950 c_tensors.as_mut_ptr(),
24951 grad_output.c_tensor,
24952 self.c_tensor
24953 ));
24954 Ok(Tensor { c_tensor: c_tensors[0] })
24955 }
24956
24957 pub fn f_mkldnn_adaptive_avg_pool2d_backward_out(
24958 &self,
24959 out: &Tensor,
24960 grad_output: &Tensor,
24961 ) -> Result<Tensor, TchError> {
24962 let mut c_tensors = [std::ptr::null_mut(); 1];
24963 unsafe_torch_err!(atg_mkldnn_adaptive_avg_pool2d_backward_out(
24964 c_tensors.as_mut_ptr(),
24965 out.c_tensor,
24966 grad_output.c_tensor,
24967 self.c_tensor
24968 ));
24969 Ok(Tensor { c_tensor: c_tensors[0] })
24970 }
24971
24972 pub fn f_mkldnn_adaptive_avg_pool2d_out(
24973 &self,
24974 out: &Tensor,
24975 output_size: impl IntList,
24976 ) -> Result<Tensor, TchError> {
24977 let mut c_tensors = [std::ptr::null_mut(); 1];
24978 unsafe_torch_err!(atg_mkldnn_adaptive_avg_pool2d_out(
24979 c_tensors.as_mut_ptr(),
24980 out.c_tensor,
24981 self.c_tensor,
24982 output_size.as_ptr(),
24983 output_size.len_i32()
24984 ));
24985 Ok(Tensor { c_tensor: c_tensors[0] })
24986 }
24987
24988 pub fn f_mkldnn_convolution<T: Borrow<Tensor>>(
24989 &self,
24990 weight: &Tensor,
24991 bias: Option<T>,
24992 padding: impl IntList,
24993 stride: impl IntList,
24994 dilation: impl IntList,
24995 groups: i64,
24996 ) -> Result<Tensor, TchError> {
24997 let mut c_tensors = [std::ptr::null_mut(); 1];
24998 unsafe_torch_err!(atg_mkldnn_convolution(
24999 c_tensors.as_mut_ptr(),
25000 self.c_tensor,
25001 weight.c_tensor,
25002 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
25003 padding.as_ptr(),
25004 padding.len_i32(),
25005 stride.as_ptr(),
25006 stride.len_i32(),
25007 dilation.as_ptr(),
25008 dilation.len_i32(),
25009 groups
25010 ));
25011 Ok(Tensor { c_tensor: c_tensors[0] })
25012 }
25013
25014 pub fn f_mkldnn_convolution_out<T: Borrow<Tensor>>(
25015 &self,
25016 out: &Tensor,
25017 weight: &Tensor,
25018 bias: Option<T>,
25019 padding: impl IntList,
25020 stride: impl IntList,
25021 dilation: impl IntList,
25022 groups: i64,
25023 ) -> Result<Tensor, TchError> {
25024 let mut c_tensors = [std::ptr::null_mut(); 1];
25025 unsafe_torch_err!(atg_mkldnn_convolution_out(
25026 c_tensors.as_mut_ptr(),
25027 out.c_tensor,
25028 self.c_tensor,
25029 weight.c_tensor,
25030 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
25031 padding.as_ptr(),
25032 padding.len_i32(),
25033 stride.as_ptr(),
25034 stride.len_i32(),
25035 dilation.as_ptr(),
25036 dilation.len_i32(),
25037 groups
25038 ));
25039 Ok(Tensor { c_tensor: c_tensors[0] })
25040 }
25041
25042 pub fn f_mkldnn_linear<T: Borrow<Tensor>>(
25043 &self,
25044 weight: &Tensor,
25045 bias: Option<T>,
25046 ) -> Result<Tensor, TchError> {
25047 let mut c_tensors = [std::ptr::null_mut(); 1];
25048 unsafe_torch_err!(atg_mkldnn_linear(
25049 c_tensors.as_mut_ptr(),
25050 self.c_tensor,
25051 weight.c_tensor,
25052 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
25053 ));
25054 Ok(Tensor { c_tensor: c_tensors[0] })
25055 }
25056
25057 pub fn f_mkldnn_linear_backward_input(
25058 input_size: impl IntList,
25059 grad_output: &Tensor,
25060 weight: &Tensor,
25061 ) -> Result<Tensor, TchError> {
25062 let mut c_tensors = [std::ptr::null_mut(); 1];
25063 unsafe_torch_err!(atg_mkldnn_linear_backward_input(
25064 c_tensors.as_mut_ptr(),
25065 input_size.as_ptr(),
25066 input_size.len_i32(),
25067 grad_output.c_tensor,
25068 weight.c_tensor
25069 ));
25070 Ok(Tensor { c_tensor: c_tensors[0] })
25071 }
25072
25073 pub fn f_mkldnn_linear_backward_input_out(
25074 out: &Tensor,
25075 input_size: impl IntList,
25076 grad_output: &Tensor,
25077 weight: &Tensor,
25078 ) -> Result<Tensor, TchError> {
25079 let mut c_tensors = [std::ptr::null_mut(); 1];
25080 unsafe_torch_err!(atg_mkldnn_linear_backward_input_out(
25081 c_tensors.as_mut_ptr(),
25082 out.c_tensor,
25083 input_size.as_ptr(),
25084 input_size.len_i32(),
25085 grad_output.c_tensor,
25086 weight.c_tensor
25087 ));
25088 Ok(Tensor { c_tensor: c_tensors[0] })
25089 }
25090
25091 pub fn f_mkldnn_linear_backward_weights(
25092 &self,
25093 grad_output: &Tensor,
25094 weight: &Tensor,
25095 bias_defined: bool,
25096 ) -> Result<(Tensor, Tensor), TchError> {
25097 let mut c_tensors = [std::ptr::null_mut(); 2];
25098 unsafe_torch_err!(atg_mkldnn_linear_backward_weights(
25099 c_tensors.as_mut_ptr(),
25100 grad_output.c_tensor,
25101 self.c_tensor,
25102 weight.c_tensor,
25103 if bias_defined { 1 } else { 0 }
25104 ));
25105 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
25106 }
25107
25108 pub fn f_mkldnn_linear_backward_weights_out(
25109 &self,
25110 out0: &Tensor,
25111 out1: &Tensor,
25112 grad_output: &Tensor,
25113 weight: &Tensor,
25114 bias_defined: bool,
25115 ) -> Result<(Tensor, Tensor), TchError> {
25116 let mut c_tensors = [std::ptr::null_mut(); 2];
25117 unsafe_torch_err!(atg_mkldnn_linear_backward_weights_out(
25118 c_tensors.as_mut_ptr(),
25119 out0.c_tensor,
25120 out1.c_tensor,
25121 grad_output.c_tensor,
25122 self.c_tensor,
25123 weight.c_tensor,
25124 if bias_defined { 1 } else { 0 }
25125 ));
25126 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
25127 }
25128
25129 pub fn f_mkldnn_linear_out<T: Borrow<Tensor>>(
25130 &self,
25131 out: &Tensor,
25132 weight: &Tensor,
25133 bias: Option<T>,
25134 ) -> Result<Tensor, TchError> {
25135 let mut c_tensors = [std::ptr::null_mut(); 1];
25136 unsafe_torch_err!(atg_mkldnn_linear_out(
25137 c_tensors.as_mut_ptr(),
25138 out.c_tensor,
25139 self.c_tensor,
25140 weight.c_tensor,
25141 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
25142 ));
25143 Ok(Tensor { c_tensor: c_tensors[0] })
25144 }
25145
25146 pub fn f_mkldnn_max_pool2d(
25147 &self,
25148 kernel_size: impl IntList,
25149 stride: impl IntList,
25150 padding: impl IntList,
25151 dilation: impl IntList,
25152 ceil_mode: bool,
25153 ) -> Result<Tensor, TchError> {
25154 let mut c_tensors = [std::ptr::null_mut(); 1];
25155 unsafe_torch_err!(atg_mkldnn_max_pool2d(
25156 c_tensors.as_mut_ptr(),
25157 self.c_tensor,
25158 kernel_size.as_ptr(),
25159 kernel_size.len_i32(),
25160 stride.as_ptr(),
25161 stride.len_i32(),
25162 padding.as_ptr(),
25163 padding.len_i32(),
25164 dilation.as_ptr(),
25165 dilation.len_i32(),
25166 if ceil_mode { 1 } else { 0 }
25167 ));
25168 Ok(Tensor { c_tensor: c_tensors[0] })
25169 }
25170
25171 pub fn f_mkldnn_max_pool2d_backward(
25172 &self,
25173 grad_output: &Tensor,
25174 output: &Tensor,
25175 kernel_size: impl IntList,
25176 stride: impl IntList,
25177 padding: impl IntList,
25178 dilation: impl IntList,
25179 ceil_mode: bool,
25180 ) -> Result<Tensor, TchError> {
25181 let mut c_tensors = [std::ptr::null_mut(); 1];
25182 unsafe_torch_err!(atg_mkldnn_max_pool2d_backward(
25183 c_tensors.as_mut_ptr(),
25184 grad_output.c_tensor,
25185 output.c_tensor,
25186 self.c_tensor,
25187 kernel_size.as_ptr(),
25188 kernel_size.len_i32(),
25189 stride.as_ptr(),
25190 stride.len_i32(),
25191 padding.as_ptr(),
25192 padding.len_i32(),
25193 dilation.as_ptr(),
25194 dilation.len_i32(),
25195 if ceil_mode { 1 } else { 0 }
25196 ));
25197 Ok(Tensor { c_tensor: c_tensors[0] })
25198 }
25199
25200 pub fn f_mkldnn_max_pool2d_backward_out(
25201 &self,
25202 out: &Tensor,
25203 grad_output: &Tensor,
25204 output: &Tensor,
25205 kernel_size: impl IntList,
25206 stride: impl IntList,
25207 padding: impl IntList,
25208 dilation: impl IntList,
25209 ceil_mode: bool,
25210 ) -> Result<Tensor, TchError> {
25211 let mut c_tensors = [std::ptr::null_mut(); 1];
25212 unsafe_torch_err!(atg_mkldnn_max_pool2d_backward_out(
25213 c_tensors.as_mut_ptr(),
25214 out.c_tensor,
25215 grad_output.c_tensor,
25216 output.c_tensor,
25217 self.c_tensor,
25218 kernel_size.as_ptr(),
25219 kernel_size.len_i32(),
25220 stride.as_ptr(),
25221 stride.len_i32(),
25222 padding.as_ptr(),
25223 padding.len_i32(),
25224 dilation.as_ptr(),
25225 dilation.len_i32(),
25226 if ceil_mode { 1 } else { 0 }
25227 ));
25228 Ok(Tensor { c_tensor: c_tensors[0] })
25229 }
25230
25231 pub fn f_mkldnn_max_pool2d_out(
25232 &self,
25233 out: &Tensor,
25234 kernel_size: impl IntList,
25235 stride: impl IntList,
25236 padding: impl IntList,
25237 dilation: impl IntList,
25238 ceil_mode: bool,
25239 ) -> Result<Tensor, TchError> {
25240 let mut c_tensors = [std::ptr::null_mut(); 1];
25241 unsafe_torch_err!(atg_mkldnn_max_pool2d_out(
25242 c_tensors.as_mut_ptr(),
25243 out.c_tensor,
25244 self.c_tensor,
25245 kernel_size.as_ptr(),
25246 kernel_size.len_i32(),
25247 stride.as_ptr(),
25248 stride.len_i32(),
25249 padding.as_ptr(),
25250 padding.len_i32(),
25251 dilation.as_ptr(),
25252 dilation.len_i32(),
25253 if ceil_mode { 1 } else { 0 }
25254 ));
25255 Ok(Tensor { c_tensor: c_tensors[0] })
25256 }
25257
25258 pub fn f_mkldnn_max_pool3d(
25259 &self,
25260 kernel_size: impl IntList,
25261 stride: impl IntList,
25262 padding: impl IntList,
25263 dilation: impl IntList,
25264 ceil_mode: bool,
25265 ) -> Result<Tensor, TchError> {
25266 let mut c_tensors = [std::ptr::null_mut(); 1];
25267 unsafe_torch_err!(atg_mkldnn_max_pool3d(
25268 c_tensors.as_mut_ptr(),
25269 self.c_tensor,
25270 kernel_size.as_ptr(),
25271 kernel_size.len_i32(),
25272 stride.as_ptr(),
25273 stride.len_i32(),
25274 padding.as_ptr(),
25275 padding.len_i32(),
25276 dilation.as_ptr(),
25277 dilation.len_i32(),
25278 if ceil_mode { 1 } else { 0 }
25279 ));
25280 Ok(Tensor { c_tensor: c_tensors[0] })
25281 }
25282
25283 pub fn f_mkldnn_max_pool3d_backward(
25284 &self,
25285 grad_output: &Tensor,
25286 output: &Tensor,
25287 kernel_size: impl IntList,
25288 stride: impl IntList,
25289 padding: impl IntList,
25290 dilation: impl IntList,
25291 ceil_mode: bool,
25292 ) -> Result<Tensor, TchError> {
25293 let mut c_tensors = [std::ptr::null_mut(); 1];
25294 unsafe_torch_err!(atg_mkldnn_max_pool3d_backward(
25295 c_tensors.as_mut_ptr(),
25296 grad_output.c_tensor,
25297 output.c_tensor,
25298 self.c_tensor,
25299 kernel_size.as_ptr(),
25300 kernel_size.len_i32(),
25301 stride.as_ptr(),
25302 stride.len_i32(),
25303 padding.as_ptr(),
25304 padding.len_i32(),
25305 dilation.as_ptr(),
25306 dilation.len_i32(),
25307 if ceil_mode { 1 } else { 0 }
25308 ));
25309 Ok(Tensor { c_tensor: c_tensors[0] })
25310 }
25311
25312 pub fn f_mkldnn_max_pool3d_backward_out(
25313 &self,
25314 out: &Tensor,
25315 grad_output: &Tensor,
25316 output: &Tensor,
25317 kernel_size: impl IntList,
25318 stride: impl IntList,
25319 padding: impl IntList,
25320 dilation: impl IntList,
25321 ceil_mode: bool,
25322 ) -> Result<Tensor, TchError> {
25323 let mut c_tensors = [std::ptr::null_mut(); 1];
25324 unsafe_torch_err!(atg_mkldnn_max_pool3d_backward_out(
25325 c_tensors.as_mut_ptr(),
25326 out.c_tensor,
25327 grad_output.c_tensor,
25328 output.c_tensor,
25329 self.c_tensor,
25330 kernel_size.as_ptr(),
25331 kernel_size.len_i32(),
25332 stride.as_ptr(),
25333 stride.len_i32(),
25334 padding.as_ptr(),
25335 padding.len_i32(),
25336 dilation.as_ptr(),
25337 dilation.len_i32(),
25338 if ceil_mode { 1 } else { 0 }
25339 ));
25340 Ok(Tensor { c_tensor: c_tensors[0] })
25341 }
25342
25343 pub fn f_mkldnn_max_pool3d_out(
25344 &self,
25345 out: &Tensor,
25346 kernel_size: impl IntList,
25347 stride: impl IntList,
25348 padding: impl IntList,
25349 dilation: impl IntList,
25350 ceil_mode: bool,
25351 ) -> Result<Tensor, TchError> {
25352 let mut c_tensors = [std::ptr::null_mut(); 1];
25353 unsafe_torch_err!(atg_mkldnn_max_pool3d_out(
25354 c_tensors.as_mut_ptr(),
25355 out.c_tensor,
25356 self.c_tensor,
25357 kernel_size.as_ptr(),
25358 kernel_size.len_i32(),
25359 stride.as_ptr(),
25360 stride.len_i32(),
25361 padding.as_ptr(),
25362 padding.len_i32(),
25363 dilation.as_ptr(),
25364 dilation.len_i32(),
25365 if ceil_mode { 1 } else { 0 }
25366 ));
25367 Ok(Tensor { c_tensor: c_tensors[0] })
25368 }
25369
25370 pub fn f_mkldnn_reorder_conv2d_weight(
25371 &self,
25372 padding: impl IntList,
25373 stride: impl IntList,
25374 dilation: impl IntList,
25375 groups: i64,
25376 input_size: impl IntListOption,
25377 ) -> Result<Tensor, TchError> {
25378 let mut c_tensors = [std::ptr::null_mut(); 1];
25379 unsafe_torch_err!(atg_mkldnn_reorder_conv2d_weight(
25380 c_tensors.as_mut_ptr(),
25381 self.c_tensor,
25382 padding.as_ptr(),
25383 padding.len_i32(),
25384 stride.as_ptr(),
25385 stride.len_i32(),
25386 dilation.as_ptr(),
25387 dilation.len_i32(),
25388 groups,
25389 input_size.as_ptr(),
25390 input_size.len_i32()
25391 ));
25392 Ok(Tensor { c_tensor: c_tensors[0] })
25393 }
25394
25395 pub fn f_mkldnn_reorder_conv2d_weight_out(
25396 &self,
25397 out: &Tensor,
25398 padding: impl IntList,
25399 stride: impl IntList,
25400 dilation: impl IntList,
25401 groups: i64,
25402 input_size: impl IntListOption,
25403 ) -> Result<Tensor, TchError> {
25404 let mut c_tensors = [std::ptr::null_mut(); 1];
25405 unsafe_torch_err!(atg_mkldnn_reorder_conv2d_weight_out(
25406 c_tensors.as_mut_ptr(),
25407 out.c_tensor,
25408 self.c_tensor,
25409 padding.as_ptr(),
25410 padding.len_i32(),
25411 stride.as_ptr(),
25412 stride.len_i32(),
25413 dilation.as_ptr(),
25414 dilation.len_i32(),
25415 groups,
25416 input_size.as_ptr(),
25417 input_size.len_i32()
25418 ));
25419 Ok(Tensor { c_tensor: c_tensors[0] })
25420 }
25421
25422 pub fn f_mkldnn_reorder_conv3d_weight(
25423 &self,
25424 padding: impl IntList,
25425 stride: impl IntList,
25426 dilation: impl IntList,
25427 groups: i64,
25428 input_size: impl IntListOption,
25429 ) -> Result<Tensor, TchError> {
25430 let mut c_tensors = [std::ptr::null_mut(); 1];
25431 unsafe_torch_err!(atg_mkldnn_reorder_conv3d_weight(
25432 c_tensors.as_mut_ptr(),
25433 self.c_tensor,
25434 padding.as_ptr(),
25435 padding.len_i32(),
25436 stride.as_ptr(),
25437 stride.len_i32(),
25438 dilation.as_ptr(),
25439 dilation.len_i32(),
25440 groups,
25441 input_size.as_ptr(),
25442 input_size.len_i32()
25443 ));
25444 Ok(Tensor { c_tensor: c_tensors[0] })
25445 }
25446
25447 pub fn f_mkldnn_reorder_conv3d_weight_out(
25448 &self,
25449 out: &Tensor,
25450 padding: impl IntList,
25451 stride: impl IntList,
25452 dilation: impl IntList,
25453 groups: i64,
25454 input_size: impl IntListOption,
25455 ) -> Result<Tensor, TchError> {
25456 let mut c_tensors = [std::ptr::null_mut(); 1];
25457 unsafe_torch_err!(atg_mkldnn_reorder_conv3d_weight_out(
25458 c_tensors.as_mut_ptr(),
25459 out.c_tensor,
25460 self.c_tensor,
25461 padding.as_ptr(),
25462 padding.len_i32(),
25463 stride.as_ptr(),
25464 stride.len_i32(),
25465 dilation.as_ptr(),
25466 dilation.len_i32(),
25467 groups,
25468 input_size.as_ptr(),
25469 input_size.len_i32()
25470 ));
25471 Ok(Tensor { c_tensor: c_tensors[0] })
25472 }
25473
25474 pub fn f_mkldnn_rnn_layer(
25475 &self,
25476 weight0: &Tensor,
25477 weight1: &Tensor,
25478 weight2: &Tensor,
25479 weight3: &Tensor,
25480 hx_: &Tensor,
25481 cx_: &Tensor,
25482 reverse: bool,
25483 batch_sizes: impl IntList,
25484 mode: i64,
25485 hidden_size: i64,
25486 num_layers: i64,
25487 has_biases: bool,
25488 bidirectional: bool,
25489 batch_first: bool,
25490 train: bool,
25491 ) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
25492 let mut c_tensors = [std::ptr::null_mut(); 4];
25493 unsafe_torch_err!(atg_mkldnn_rnn_layer(
25494 c_tensors.as_mut_ptr(),
25495 self.c_tensor,
25496 weight0.c_tensor,
25497 weight1.c_tensor,
25498 weight2.c_tensor,
25499 weight3.c_tensor,
25500 hx_.c_tensor,
25501 cx_.c_tensor,
25502 if reverse { 1 } else { 0 },
25503 batch_sizes.as_ptr(),
25504 batch_sizes.len_i32(),
25505 mode,
25506 hidden_size,
25507 num_layers,
25508 if has_biases { 1 } else { 0 },
25509 if bidirectional { 1 } else { 0 },
25510 if batch_first { 1 } else { 0 },
25511 if train { 1 } else { 0 }
25512 ));
25513 Ok((
25514 Tensor { c_tensor: c_tensors[0] },
25515 Tensor { c_tensor: c_tensors[1] },
25516 Tensor { c_tensor: c_tensors[2] },
25517 Tensor { c_tensor: c_tensors[3] },
25518 ))
25519 }
25520
25521 pub fn f_mkldnn_rnn_layer_backward<T: Borrow<Tensor>>(
25522 &self,
25523 weight1: &Tensor,
25524 weight2: &Tensor,
25525 weight3: &Tensor,
25526 weight4: &Tensor,
25527 hx_: &Tensor,
25528 cx_tmp: &Tensor,
25529 output: &Tensor,
25530 hy_: &Tensor,
25531 cy_: &Tensor,
25532 grad_output: Option<T>,
25533 grad_hy: Option<T>,
25534 grad_cy: Option<T>,
25535 reverse: bool,
25536 mode: i64,
25537 hidden_size: i64,
25538 num_layers: i64,
25539 has_biases: bool,
25540 train: bool,
25541 bidirectional: bool,
25542 batch_sizes: impl IntList,
25543 batch_first: bool,
25544 workspace: &Tensor,
25545 ) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor), TchError> {
25546 let mut c_tensors = [std::ptr::null_mut(); 7];
25547 unsafe_torch_err!(atg_mkldnn_rnn_layer_backward(
25548 c_tensors.as_mut_ptr(),
25549 self.c_tensor,
25550 weight1.c_tensor,
25551 weight2.c_tensor,
25552 weight3.c_tensor,
25553 weight4.c_tensor,
25554 hx_.c_tensor,
25555 cx_tmp.c_tensor,
25556 output.c_tensor,
25557 hy_.c_tensor,
25558 cy_.c_tensor,
25559 grad_output.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
25560 grad_hy.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
25561 grad_cy.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
25562 if reverse { 1 } else { 0 },
25563 mode,
25564 hidden_size,
25565 num_layers,
25566 if has_biases { 1 } else { 0 },
25567 if train { 1 } else { 0 },
25568 if bidirectional { 1 } else { 0 },
25569 batch_sizes.as_ptr(),
25570 batch_sizes.len_i32(),
25571 if batch_first { 1 } else { 0 },
25572 workspace.c_tensor
25573 ));
25574 Ok((
25575 Tensor { c_tensor: c_tensors[0] },
25576 Tensor { c_tensor: c_tensors[1] },
25577 Tensor { c_tensor: c_tensors[2] },
25578 Tensor { c_tensor: c_tensors[3] },
25579 Tensor { c_tensor: c_tensors[4] },
25580 Tensor { c_tensor: c_tensors[5] },
25581 Tensor { c_tensor: c_tensors[6] },
25582 ))
25583 }
25584
25585 pub fn f_mkldnn_rnn_layer_backward_out<T: Borrow<Tensor>>(
25586 &self,
25587 out0: &Tensor,
25588 out1: &Tensor,
25589 out2: &Tensor,
25590 out3: &Tensor,
25591 out4: &Tensor,
25592 out5: &Tensor,
25593 out6: &Tensor,
25594 weight1: &Tensor,
25595 weight2: &Tensor,
25596 weight3: &Tensor,
25597 weight4: &Tensor,
25598 hx_: &Tensor,
25599 cx_tmp: &Tensor,
25600 output: &Tensor,
25601 hy_: &Tensor,
25602 cy_: &Tensor,
25603 grad_output: Option<T>,
25604 grad_hy: Option<T>,
25605 grad_cy: Option<T>,
25606 reverse: bool,
25607 mode: i64,
25608 hidden_size: i64,
25609 num_layers: i64,
25610 has_biases: bool,
25611 train: bool,
25612 bidirectional: bool,
25613 batch_sizes: impl IntList,
25614 batch_first: bool,
25615 workspace: &Tensor,
25616 ) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor), TchError> {
25617 let mut c_tensors = [std::ptr::null_mut(); 7];
25618 unsafe_torch_err!(atg_mkldnn_rnn_layer_backward_out(
25619 c_tensors.as_mut_ptr(),
25620 out0.c_tensor,
25621 out1.c_tensor,
25622 out2.c_tensor,
25623 out3.c_tensor,
25624 out4.c_tensor,
25625 out5.c_tensor,
25626 out6.c_tensor,
25627 self.c_tensor,
25628 weight1.c_tensor,
25629 weight2.c_tensor,
25630 weight3.c_tensor,
25631 weight4.c_tensor,
25632 hx_.c_tensor,
25633 cx_tmp.c_tensor,
25634 output.c_tensor,
25635 hy_.c_tensor,
25636 cy_.c_tensor,
25637 grad_output.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
25638 grad_hy.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
25639 grad_cy.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
25640 if reverse { 1 } else { 0 },
25641 mode,
25642 hidden_size,
25643 num_layers,
25644 if has_biases { 1 } else { 0 },
25645 if train { 1 } else { 0 },
25646 if bidirectional { 1 } else { 0 },
25647 batch_sizes.as_ptr(),
25648 batch_sizes.len_i32(),
25649 if batch_first { 1 } else { 0 },
25650 workspace.c_tensor
25651 ));
25652 Ok((
25653 Tensor { c_tensor: c_tensors[0] },
25654 Tensor { c_tensor: c_tensors[1] },
25655 Tensor { c_tensor: c_tensors[2] },
25656 Tensor { c_tensor: c_tensors[3] },
25657 Tensor { c_tensor: c_tensors[4] },
25658 Tensor { c_tensor: c_tensors[5] },
25659 Tensor { c_tensor: c_tensors[6] },
25660 ))
25661 }
25662
25663 pub fn f_mkldnn_rnn_layer_out(
25664 &self,
25665 out0: &Tensor,
25666 out1: &Tensor,
25667 out2: &Tensor,
25668 out3: &Tensor,
25669 weight0: &Tensor,
25670 weight1: &Tensor,
25671 weight2: &Tensor,
25672 weight3: &Tensor,
25673 hx_: &Tensor,
25674 cx_: &Tensor,
25675 reverse: bool,
25676 batch_sizes: impl IntList,
25677 mode: i64,
25678 hidden_size: i64,
25679 num_layers: i64,
25680 has_biases: bool,
25681 bidirectional: bool,
25682 batch_first: bool,
25683 train: bool,
25684 ) -> Result<(Tensor, Tensor, Tensor, Tensor), TchError> {
25685 let mut c_tensors = [std::ptr::null_mut(); 4];
25686 unsafe_torch_err!(atg_mkldnn_rnn_layer_out(
25687 c_tensors.as_mut_ptr(),
25688 out0.c_tensor,
25689 out1.c_tensor,
25690 out2.c_tensor,
25691 out3.c_tensor,
25692 self.c_tensor,
25693 weight0.c_tensor,
25694 weight1.c_tensor,
25695 weight2.c_tensor,
25696 weight3.c_tensor,
25697 hx_.c_tensor,
25698 cx_.c_tensor,
25699 if reverse { 1 } else { 0 },
25700 batch_sizes.as_ptr(),
25701 batch_sizes.len_i32(),
25702 mode,
25703 hidden_size,
25704 num_layers,
25705 if has_biases { 1 } else { 0 },
25706 if bidirectional { 1 } else { 0 },
25707 if batch_first { 1 } else { 0 },
25708 if train { 1 } else { 0 }
25709 ));
25710 Ok((
25711 Tensor { c_tensor: c_tensors[0] },
25712 Tensor { c_tensor: c_tensors[1] },
25713 Tensor { c_tensor: c_tensors[2] },
25714 Tensor { c_tensor: c_tensors[3] },
25715 ))
25716 }
25717
25718 pub fn f_mm(&self, mat2: &Tensor) -> Result<Tensor, TchError> {
25719 let mut c_tensors = [std::ptr::null_mut(); 1];
25720 unsafe_torch_err!(atg_mm(c_tensors.as_mut_ptr(), self.c_tensor, mat2.c_tensor));
25721 Ok(Tensor { c_tensor: c_tensors[0] })
25722 }
25723
25724 pub fn f_mm_out(&self, out: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError> {
25725 let mut c_tensors = [std::ptr::null_mut(); 1];
25726 unsafe_torch_err!(atg_mm_out(
25727 c_tensors.as_mut_ptr(),
25728 out.c_tensor,
25729 self.c_tensor,
25730 mat2.c_tensor
25731 ));
25732 Ok(Tensor { c_tensor: c_tensors[0] })
25733 }
25734
25735 pub fn f_mode(&self, dim: i64, keepdim: bool) -> Result<(Tensor, Tensor), TchError> {
25736 let mut c_tensors = [std::ptr::null_mut(); 2];
25737 unsafe_torch_err!(atg_mode(
25738 c_tensors.as_mut_ptr(),
25739 self.c_tensor,
25740 dim,
25741 if keepdim { 1 } else { 0 }
25742 ));
25743 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
25744 }
25745
25746 pub fn f_mode_values(
25747 &self,
25748 values: &Tensor,
25749 indices: &Tensor,
25750 dim: i64,
25751 keepdim: bool,
25752 ) -> Result<(Tensor, Tensor), TchError> {
25753 let mut c_tensors = [std::ptr::null_mut(); 2];
25754 unsafe_torch_err!(atg_mode_values(
25755 c_tensors.as_mut_ptr(),
25756 values.c_tensor,
25757 indices.c_tensor,
25758 self.c_tensor,
25759 dim,
25760 if keepdim { 1 } else { 0 }
25761 ));
25762 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
25763 }
25764
25765 pub fn f_moveaxis(
25766 &self,
25767 source: impl IntList,
25768 destination: impl IntList,
25769 ) -> Result<Tensor, TchError> {
25770 let mut c_tensors = [std::ptr::null_mut(); 1];
25771 unsafe_torch_err!(atg_moveaxis(
25772 c_tensors.as_mut_ptr(),
25773 self.c_tensor,
25774 source.as_ptr(),
25775 source.len_i32(),
25776 destination.as_ptr(),
25777 destination.len_i32()
25778 ));
25779 Ok(Tensor { c_tensor: c_tensors[0] })
25780 }
25781
25782 pub fn f_moveaxis_int(&self, source: i64, destination: i64) -> Result<Tensor, TchError> {
25783 let mut c_tensors = [std::ptr::null_mut(); 1];
25784 unsafe_torch_err!(atg_moveaxis_int(
25785 c_tensors.as_mut_ptr(),
25786 self.c_tensor,
25787 source,
25788 destination
25789 ));
25790 Ok(Tensor { c_tensor: c_tensors[0] })
25791 }
25792
25793 pub fn f_movedim(
25794 &self,
25795 source: impl IntList,
25796 destination: impl IntList,
25797 ) -> Result<Tensor, TchError> {
25798 let mut c_tensors = [std::ptr::null_mut(); 1];
25799 unsafe_torch_err!(atg_movedim(
25800 c_tensors.as_mut_ptr(),
25801 self.c_tensor,
25802 source.as_ptr(),
25803 source.len_i32(),
25804 destination.as_ptr(),
25805 destination.len_i32()
25806 ));
25807 Ok(Tensor { c_tensor: c_tensors[0] })
25808 }
25809
25810 pub fn f_movedim_int(&self, source: i64, destination: i64) -> Result<Tensor, TchError> {
25811 let mut c_tensors = [std::ptr::null_mut(); 1];
25812 unsafe_torch_err!(atg_movedim_int(
25813 c_tensors.as_mut_ptr(),
25814 self.c_tensor,
25815 source,
25816 destination
25817 ));
25818 Ok(Tensor { c_tensor: c_tensors[0] })
25819 }
25820
25821 pub fn f_mse_loss(
25822 &self,
25823 target: &Tensor,
25824 reduction: crate::Reduction,
25825 ) -> Result<Tensor, TchError> {
25826 let mut c_tensors = [std::ptr::null_mut(); 1];
25827 unsafe_torch_err!(atg_mse_loss(
25828 c_tensors.as_mut_ptr(),
25829 self.c_tensor,
25830 target.c_tensor,
25831 reduction.to_int()
25832 ));
25833 Ok(Tensor { c_tensor: c_tensors[0] })
25834 }
25835
25836 pub fn f_mse_loss_backward(
25837 &self,
25838 grad_output: &Tensor,
25839 target: &Tensor,
25840 reduction: crate::Reduction,
25841 ) -> Result<Tensor, TchError> {
25842 let mut c_tensors = [std::ptr::null_mut(); 1];
25843 unsafe_torch_err!(atg_mse_loss_backward(
25844 c_tensors.as_mut_ptr(),
25845 grad_output.c_tensor,
25846 self.c_tensor,
25847 target.c_tensor,
25848 reduction.to_int()
25849 ));
25850 Ok(Tensor { c_tensor: c_tensors[0] })
25851 }
25852
25853 pub fn f_mse_loss_backward_grad_input(
25854 &self,
25855 grad_input: &Tensor,
25856 grad_output: &Tensor,
25857 target: &Tensor,
25858 reduction: crate::Reduction,
25859 ) -> Result<Tensor, TchError> {
25860 let mut c_tensors = [std::ptr::null_mut(); 1];
25861 unsafe_torch_err!(atg_mse_loss_backward_grad_input(
25862 c_tensors.as_mut_ptr(),
25863 grad_input.c_tensor,
25864 grad_output.c_tensor,
25865 self.c_tensor,
25866 target.c_tensor,
25867 reduction.to_int()
25868 ));
25869 Ok(Tensor { c_tensor: c_tensors[0] })
25870 }
25871
25872 pub fn f_mse_loss_out(
25873 &self,
25874 out: &Tensor,
25875 target: &Tensor,
25876 reduction: crate::Reduction,
25877 ) -> Result<Tensor, TchError> {
25878 let mut c_tensors = [std::ptr::null_mut(); 1];
25879 unsafe_torch_err!(atg_mse_loss_out(
25880 c_tensors.as_mut_ptr(),
25881 out.c_tensor,
25882 self.c_tensor,
25883 target.c_tensor,
25884 reduction.to_int()
25885 ));
25886 Ok(Tensor { c_tensor: c_tensors[0] })
25887 }
25888
25889 pub fn f_msort(&self) -> Result<Tensor, TchError> {
25890 let mut c_tensors = [std::ptr::null_mut(); 1];
25891 unsafe_torch_err!(atg_msort(c_tensors.as_mut_ptr(), self.c_tensor));
25892 Ok(Tensor { c_tensor: c_tensors[0] })
25893 }
25894
25895 pub fn f_msort_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
25896 let mut c_tensors = [std::ptr::null_mut(); 1];
25897 unsafe_torch_err!(atg_msort_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
25898 Ok(Tensor { c_tensor: c_tensors[0] })
25899 }
25900
25901 pub fn f_mt(&self) -> Result<Tensor, TchError> {
25902 let mut c_tensors = [std::ptr::null_mut(); 1];
25903 unsafe_torch_err!(atg_mt(c_tensors.as_mut_ptr(), self.c_tensor));
25904 Ok(Tensor { c_tensor: c_tensors[0] })
25905 }
25906
25907 pub fn f_mul(&self, other: &Tensor) -> Result<Tensor, TchError> {
25908 let mut c_tensors = [std::ptr::null_mut(); 1];
25909 unsafe_torch_err!(atg_mul(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
25910 Ok(Tensor { c_tensor: c_tensors[0] })
25911 }
25912
25913 pub fn f_mul_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
25914 let mut c_tensors = [std::ptr::null_mut(); 1];
25915 unsafe_torch_err!(atg_mul_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
25916 Ok(Tensor { c_tensor: c_tensors[0] })
25917 }
25918
25919 pub fn f_mul_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
25920 let mut c_tensors = [std::ptr::null_mut(); 1];
25921 unsafe_torch_err!(atg_mul_out(
25922 c_tensors.as_mut_ptr(),
25923 out.c_tensor,
25924 self.c_tensor,
25925 other.c_tensor
25926 ));
25927 Ok(Tensor { c_tensor: c_tensors[0] })
25928 }
25929
25930 pub fn f_mul_scalar<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
25931 let mut c_tensors = [std::ptr::null_mut(); 1];
25932 unsafe_torch_err!(atg_mul_scalar(
25933 c_tensors.as_mut_ptr(),
25934 self.c_tensor,
25935 other.into().c_scalar
25936 ));
25937 Ok(Tensor { c_tensor: c_tensors[0] })
25938 }
25939
25940 pub fn f_mul_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
25941 let mut c_tensors = [std::ptr::null_mut(); 1];
25942 unsafe_torch_err!(atg_mul_scalar_(
25943 c_tensors.as_mut_ptr(),
25944 self.c_tensor,
25945 other.into().c_scalar
25946 ));
25947 Ok(Tensor { c_tensor: c_tensors[0] })
25948 }
25949
25950 pub fn f_mul_scalar_out<S: Into<Scalar>>(
25951 &self,
25952 out: &Tensor,
25953 other: S,
25954 ) -> Result<Tensor, TchError> {
25955 let mut c_tensors = [std::ptr::null_mut(); 1];
25956 unsafe_torch_err!(atg_mul_scalar_out(
25957 c_tensors.as_mut_ptr(),
25958 out.c_tensor,
25959 self.c_tensor,
25960 other.into().c_scalar
25961 ));
25962 Ok(Tensor { c_tensor: c_tensors[0] })
25963 }
25964
25965 pub fn f_multi_margin_loss_backward<T: Borrow<Tensor>, S: Into<Scalar>>(
25966 &self,
25967 grad_output: &Tensor,
25968 target: &Tensor,
25969 p: S,
25970 margin: S,
25971 weight: Option<T>,
25972 reduction: crate::Reduction,
25973 ) -> Result<Tensor, TchError> {
25974 let mut c_tensors = [std::ptr::null_mut(); 1];
25975 unsafe_torch_err!(atg_multi_margin_loss_backward(
25976 c_tensors.as_mut_ptr(),
25977 grad_output.c_tensor,
25978 self.c_tensor,
25979 target.c_tensor,
25980 p.into().c_scalar,
25981 margin.into().c_scalar,
25982 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
25983 reduction.to_int()
25984 ));
25985 Ok(Tensor { c_tensor: c_tensors[0] })
25986 }
25987
25988 pub fn f_multi_margin_loss_backward_grad_input<T: Borrow<Tensor>, S: Into<Scalar>>(
25989 &self,
25990 grad_input: &Tensor,
25991 grad_output: &Tensor,
25992 target: &Tensor,
25993 p: S,
25994 margin: S,
25995 weight: Option<T>,
25996 reduction: crate::Reduction,
25997 ) -> Result<Tensor, TchError> {
25998 let mut c_tensors = [std::ptr::null_mut(); 1];
25999 unsafe_torch_err!(atg_multi_margin_loss_backward_grad_input(
26000 c_tensors.as_mut_ptr(),
26001 grad_input.c_tensor,
26002 grad_output.c_tensor,
26003 self.c_tensor,
26004 target.c_tensor,
26005 p.into().c_scalar,
26006 margin.into().c_scalar,
26007 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
26008 reduction.to_int()
26009 ));
26010 Ok(Tensor { c_tensor: c_tensors[0] })
26011 }
26012
26013 pub fn f_multilabel_margin_loss(
26014 &self,
26015 target: &Tensor,
26016 reduction: crate::Reduction,
26017 ) -> Result<Tensor, TchError> {
26018 let mut c_tensors = [std::ptr::null_mut(); 1];
26019 unsafe_torch_err!(atg_multilabel_margin_loss(
26020 c_tensors.as_mut_ptr(),
26021 self.c_tensor,
26022 target.c_tensor,
26023 reduction.to_int()
26024 ));
26025 Ok(Tensor { c_tensor: c_tensors[0] })
26026 }
26027
26028 pub fn f_multilabel_margin_loss_backward(
26029 &self,
26030 grad_output: &Tensor,
26031 target: &Tensor,
26032 reduction: crate::Reduction,
26033 is_target: &Tensor,
26034 ) -> Result<Tensor, TchError> {
26035 let mut c_tensors = [std::ptr::null_mut(); 1];
26036 unsafe_torch_err!(atg_multilabel_margin_loss_backward(
26037 c_tensors.as_mut_ptr(),
26038 grad_output.c_tensor,
26039 self.c_tensor,
26040 target.c_tensor,
26041 reduction.to_int(),
26042 is_target.c_tensor
26043 ));
26044 Ok(Tensor { c_tensor: c_tensors[0] })
26045 }
26046
26047 pub fn f_multilabel_margin_loss_backward_grad_input(
26048 &self,
26049 grad_input: &Tensor,
26050 grad_output: &Tensor,
26051 target: &Tensor,
26052 reduction: crate::Reduction,
26053 is_target: &Tensor,
26054 ) -> Result<Tensor, TchError> {
26055 let mut c_tensors = [std::ptr::null_mut(); 1];
26056 unsafe_torch_err!(atg_multilabel_margin_loss_backward_grad_input(
26057 c_tensors.as_mut_ptr(),
26058 grad_input.c_tensor,
26059 grad_output.c_tensor,
26060 self.c_tensor,
26061 target.c_tensor,
26062 reduction.to_int(),
26063 is_target.c_tensor
26064 ));
26065 Ok(Tensor { c_tensor: c_tensors[0] })
26066 }
26067
26068 pub fn f_multilabel_margin_loss_out(
26069 &self,
26070 out: &Tensor,
26071 target: &Tensor,
26072 reduction: crate::Reduction,
26073 ) -> Result<Tensor, TchError> {
26074 let mut c_tensors = [std::ptr::null_mut(); 1];
26075 unsafe_torch_err!(atg_multilabel_margin_loss_out(
26076 c_tensors.as_mut_ptr(),
26077 out.c_tensor,
26078 self.c_tensor,
26079 target.c_tensor,
26080 reduction.to_int()
26081 ));
26082 Ok(Tensor { c_tensor: c_tensors[0] })
26083 }
26084
26085 pub fn f_multinomial(&self, num_samples: i64, replacement: bool) -> Result<Tensor, TchError> {
26086 let mut c_tensors = [std::ptr::null_mut(); 1];
26087 unsafe_torch_err!(atg_multinomial(
26088 c_tensors.as_mut_ptr(),
26089 self.c_tensor,
26090 num_samples,
26091 if replacement { 1 } else { 0 }
26092 ));
26093 Ok(Tensor { c_tensor: c_tensors[0] })
26094 }
26095
26096 pub fn f_multinomial_out(
26097 &self,
26098 out: &Tensor,
26099 num_samples: i64,
26100 replacement: bool,
26101 ) -> Result<Tensor, TchError> {
26102 let mut c_tensors = [std::ptr::null_mut(); 1];
26103 unsafe_torch_err!(atg_multinomial_out(
26104 c_tensors.as_mut_ptr(),
26105 out.c_tensor,
26106 self.c_tensor,
26107 num_samples,
26108 if replacement { 1 } else { 0 }
26109 ));
26110 Ok(Tensor { c_tensor: c_tensors[0] })
26111 }
26112
26113 pub fn f_multiply(&self, other: &Tensor) -> Result<Tensor, TchError> {
26114 let mut c_tensors = [std::ptr::null_mut(); 1];
26115 unsafe_torch_err!(atg_multiply(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
26116 Ok(Tensor { c_tensor: c_tensors[0] })
26117 }
26118
26119 pub fn f_multiply_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
26120 let mut c_tensors = [std::ptr::null_mut(); 1];
26121 unsafe_torch_err!(atg_multiply_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
26122 Ok(Tensor { c_tensor: c_tensors[0] })
26123 }
26124
26125 pub fn f_multiply_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
26126 let mut c_tensors = [std::ptr::null_mut(); 1];
26127 unsafe_torch_err!(atg_multiply_out(
26128 c_tensors.as_mut_ptr(),
26129 out.c_tensor,
26130 self.c_tensor,
26131 other.c_tensor
26132 ));
26133 Ok(Tensor { c_tensor: c_tensors[0] })
26134 }
26135
26136 pub fn f_multiply_scalar<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
26137 let mut c_tensors = [std::ptr::null_mut(); 1];
26138 unsafe_torch_err!(atg_multiply_scalar(
26139 c_tensors.as_mut_ptr(),
26140 self.c_tensor,
26141 other.into().c_scalar
26142 ));
26143 Ok(Tensor { c_tensor: c_tensors[0] })
26144 }
26145
26146 pub fn f_multiply_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
26147 let mut c_tensors = [std::ptr::null_mut(); 1];
26148 unsafe_torch_err!(atg_multiply_scalar_(
26149 c_tensors.as_mut_ptr(),
26150 self.c_tensor,
26151 other.into().c_scalar
26152 ));
26153 Ok(Tensor { c_tensor: c_tensors[0] })
26154 }
26155
26156 pub fn f_mv(&self, vec: &Tensor) -> Result<Tensor, TchError> {
26157 let mut c_tensors = [std::ptr::null_mut(); 1];
26158 unsafe_torch_err!(atg_mv(c_tensors.as_mut_ptr(), self.c_tensor, vec.c_tensor));
26159 Ok(Tensor { c_tensor: c_tensors[0] })
26160 }
26161
26162 pub fn f_mv_out(&self, out: &Tensor, vec: &Tensor) -> Result<Tensor, TchError> {
26163 let mut c_tensors = [std::ptr::null_mut(); 1];
26164 unsafe_torch_err!(atg_mv_out(
26165 c_tensors.as_mut_ptr(),
26166 out.c_tensor,
26167 self.c_tensor,
26168 vec.c_tensor
26169 ));
26170 Ok(Tensor { c_tensor: c_tensors[0] })
26171 }
26172
26173 pub fn f_mvlgamma(&self, p: i64) -> Result<Tensor, TchError> {
26174 let mut c_tensors = [std::ptr::null_mut(); 1];
26175 unsafe_torch_err!(atg_mvlgamma(c_tensors.as_mut_ptr(), self.c_tensor, p));
26176 Ok(Tensor { c_tensor: c_tensors[0] })
26177 }
26178
26179 pub fn f_mvlgamma_(&mut self, p: i64) -> Result<Tensor, TchError> {
26180 let mut c_tensors = [std::ptr::null_mut(); 1];
26181 unsafe_torch_err!(atg_mvlgamma_(c_tensors.as_mut_ptr(), self.c_tensor, p));
26182 Ok(Tensor { c_tensor: c_tensors[0] })
26183 }
26184
26185 pub fn f_mvlgamma_out(&self, out: &Tensor, p: i64) -> Result<Tensor, TchError> {
26186 let mut c_tensors = [std::ptr::null_mut(); 1];
26187 unsafe_torch_err!(atg_mvlgamma_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor, p));
26188 Ok(Tensor { c_tensor: c_tensors[0] })
26189 }
26190
26191 pub fn f_nan_to_num(
26192 &self,
26193 nan: impl Into<Option<f64>>,
26194 posinf: impl Into<Option<f64>>,
26195 neginf: impl Into<Option<f64>>,
26196 ) -> Result<Tensor, TchError> {
26197 let nan = nan.into();
26198 let posinf = posinf.into();
26199 let neginf = neginf.into();
26200 let mut c_tensors = [std::ptr::null_mut(); 1];
26201 unsafe_torch_err!(atg_nan_to_num(
26202 c_tensors.as_mut_ptr(),
26203 self.c_tensor,
26204 nan.unwrap_or(std::f64::NAN),
26205 nan.is_none() as i8,
26206 posinf.unwrap_or(std::f64::NAN),
26207 posinf.is_none() as i8,
26208 neginf.unwrap_or(std::f64::NAN),
26209 neginf.is_none() as i8
26210 ));
26211 Ok(Tensor { c_tensor: c_tensors[0] })
26212 }
26213
26214 pub fn f_nan_to_num_(
26215 &mut self,
26216 nan: impl Into<Option<f64>>,
26217 posinf: impl Into<Option<f64>>,
26218 neginf: impl Into<Option<f64>>,
26219 ) -> Result<Tensor, TchError> {
26220 let nan = nan.into();
26221 let posinf = posinf.into();
26222 let neginf = neginf.into();
26223 let mut c_tensors = [std::ptr::null_mut(); 1];
26224 unsafe_torch_err!(atg_nan_to_num_(
26225 c_tensors.as_mut_ptr(),
26226 self.c_tensor,
26227 nan.unwrap_or(std::f64::NAN),
26228 nan.is_none() as i8,
26229 posinf.unwrap_or(std::f64::NAN),
26230 posinf.is_none() as i8,
26231 neginf.unwrap_or(std::f64::NAN),
26232 neginf.is_none() as i8
26233 ));
26234 Ok(Tensor { c_tensor: c_tensors[0] })
26235 }
26236
26237 pub fn f_nan_to_num_out(
26238 &self,
26239 out: &Tensor,
26240 nan: impl Into<Option<f64>>,
26241 posinf: impl Into<Option<f64>>,
26242 neginf: impl Into<Option<f64>>,
26243 ) -> Result<Tensor, TchError> {
26244 let nan = nan.into();
26245 let posinf = posinf.into();
26246 let neginf = neginf.into();
26247 let mut c_tensors = [std::ptr::null_mut(); 1];
26248 unsafe_torch_err!(atg_nan_to_num_out(
26249 c_tensors.as_mut_ptr(),
26250 out.c_tensor,
26251 self.c_tensor,
26252 nan.unwrap_or(std::f64::NAN),
26253 nan.is_none() as i8,
26254 posinf.unwrap_or(std::f64::NAN),
26255 posinf.is_none() as i8,
26256 neginf.unwrap_or(std::f64::NAN),
26257 neginf.is_none() as i8
26258 ));
26259 Ok(Tensor { c_tensor: c_tensors[0] })
26260 }
26261
26262 pub fn f_nanmean(
26263 &self,
26264 dim: impl IntListOption,
26265 keepdim: bool,
26266 dtype: impl Into<Option<Kind>>,
26267 ) -> Result<Tensor, TchError> {
26268 let mut c_tensors = [std::ptr::null_mut(); 1];
26269 unsafe_torch_err!(atg_nanmean(
26270 c_tensors.as_mut_ptr(),
26271 self.c_tensor,
26272 dim.as_ptr(),
26273 dim.len_i32(),
26274 if keepdim { 1 } else { 0 },
26275 dtype.into().map_or(-1, |s| s.c_int())
26276 ));
26277 Ok(Tensor { c_tensor: c_tensors[0] })
26278 }
26279
26280 pub fn f_nanmean_out(
26281 &self,
26282 out: &Tensor,
26283 dim: impl IntListOption,
26284 keepdim: bool,
26285 dtype: impl Into<Option<Kind>>,
26286 ) -> Result<Tensor, TchError> {
26287 let mut c_tensors = [std::ptr::null_mut(); 1];
26288 unsafe_torch_err!(atg_nanmean_out(
26289 c_tensors.as_mut_ptr(),
26290 out.c_tensor,
26291 self.c_tensor,
26292 dim.as_ptr(),
26293 dim.len_i32(),
26294 if keepdim { 1 } else { 0 },
26295 dtype.into().map_or(-1, |s| s.c_int())
26296 ));
26297 Ok(Tensor { c_tensor: c_tensors[0] })
26298 }
26299
26300 pub fn f_nanmedian(&self) -> Result<Tensor, TchError> {
26301 let mut c_tensors = [std::ptr::null_mut(); 1];
26302 unsafe_torch_err!(atg_nanmedian(c_tensors.as_mut_ptr(), self.c_tensor));
26303 Ok(Tensor { c_tensor: c_tensors[0] })
26304 }
26305
26306 pub fn f_nanmedian_dim(&self, dim: i64, keepdim: bool) -> Result<(Tensor, Tensor), TchError> {
26307 let mut c_tensors = [std::ptr::null_mut(); 2];
26308 unsafe_torch_err!(atg_nanmedian_dim(
26309 c_tensors.as_mut_ptr(),
26310 self.c_tensor,
26311 dim,
26312 if keepdim { 1 } else { 0 }
26313 ));
26314 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
26315 }
26316
26317 pub fn f_nanmedian_dim_values(
26318 &self,
26319 values: &Tensor,
26320 indices: &Tensor,
26321 dim: i64,
26322 keepdim: bool,
26323 ) -> Result<(Tensor, Tensor), TchError> {
26324 let mut c_tensors = [std::ptr::null_mut(); 2];
26325 unsafe_torch_err!(atg_nanmedian_dim_values(
26326 c_tensors.as_mut_ptr(),
26327 values.c_tensor,
26328 indices.c_tensor,
26329 self.c_tensor,
26330 dim,
26331 if keepdim { 1 } else { 0 }
26332 ));
26333 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
26334 }
26335
26336 pub fn f_nanmedian_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
26337 let mut c_tensors = [std::ptr::null_mut(); 1];
26338 unsafe_torch_err!(atg_nanmedian_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
26339 Ok(Tensor { c_tensor: c_tensors[0] })
26340 }
26341
26342 pub fn f_nanquantile(
26343 &self,
26344 q: &Tensor,
26345 dim: impl Into<Option<i64>>,
26346 keepdim: bool,
26347 interpolation: &str,
26348 ) -> Result<Tensor, TchError> {
26349 let dim = dim.into();
26350 let mut c_tensors = [std::ptr::null_mut(); 1];
26351 unsafe_torch_err!(atg_nanquantile(
26352 c_tensors.as_mut_ptr(),
26353 self.c_tensor,
26354 q.c_tensor,
26355 dim.unwrap_or(0i64),
26356 dim.is_none() as i8,
26357 if keepdim { 1 } else { 0 },
26358 interpolation.as_ptr(),
26359 interpolation.len() as i32
26360 ));
26361 Ok(Tensor { c_tensor: c_tensors[0] })
26362 }
26363
26364 pub fn f_nanquantile_out(
26365 &self,
26366 out: &Tensor,
26367 q: &Tensor,
26368 dim: impl Into<Option<i64>>,
26369 keepdim: bool,
26370 interpolation: &str,
26371 ) -> Result<Tensor, TchError> {
26372 let dim = dim.into();
26373 let mut c_tensors = [std::ptr::null_mut(); 1];
26374 unsafe_torch_err!(atg_nanquantile_out(
26375 c_tensors.as_mut_ptr(),
26376 out.c_tensor,
26377 self.c_tensor,
26378 q.c_tensor,
26379 dim.unwrap_or(0i64),
26380 dim.is_none() as i8,
26381 if keepdim { 1 } else { 0 },
26382 interpolation.as_ptr(),
26383 interpolation.len() as i32
26384 ));
26385 Ok(Tensor { c_tensor: c_tensors[0] })
26386 }
26387
26388 pub fn f_nanquantile_scalar(
26389 &self,
26390 q: f64,
26391 dim: impl Into<Option<i64>>,
26392 keepdim: bool,
26393 interpolation: &str,
26394 ) -> Result<Tensor, TchError> {
26395 let dim = dim.into();
26396 let mut c_tensors = [std::ptr::null_mut(); 1];
26397 unsafe_torch_err!(atg_nanquantile_scalar(
26398 c_tensors.as_mut_ptr(),
26399 self.c_tensor,
26400 q,
26401 dim.unwrap_or(0i64),
26402 dim.is_none() as i8,
26403 if keepdim { 1 } else { 0 },
26404 interpolation.as_ptr(),
26405 interpolation.len() as i32
26406 ));
26407 Ok(Tensor { c_tensor: c_tensors[0] })
26408 }
26409
26410 pub fn f_nanquantile_scalar_out(
26411 &self,
26412 out: &Tensor,
26413 q: f64,
26414 dim: impl Into<Option<i64>>,
26415 keepdim: bool,
26416 interpolation: &str,
26417 ) -> Result<Tensor, TchError> {
26418 let dim = dim.into();
26419 let mut c_tensors = [std::ptr::null_mut(); 1];
26420 unsafe_torch_err!(atg_nanquantile_scalar_out(
26421 c_tensors.as_mut_ptr(),
26422 out.c_tensor,
26423 self.c_tensor,
26424 q,
26425 dim.unwrap_or(0i64),
26426 dim.is_none() as i8,
26427 if keepdim { 1 } else { 0 },
26428 interpolation.as_ptr(),
26429 interpolation.len() as i32
26430 ));
26431 Ok(Tensor { c_tensor: c_tensors[0] })
26432 }
26433
26434 pub fn f_nansum(
26435 &self,
26436 dim: impl IntListOption,
26437 keepdim: bool,
26438 dtype: impl Into<Option<Kind>>,
26439 ) -> Result<Tensor, TchError> {
26440 let mut c_tensors = [std::ptr::null_mut(); 1];
26441 unsafe_torch_err!(atg_nansum(
26442 c_tensors.as_mut_ptr(),
26443 self.c_tensor,
26444 dim.as_ptr(),
26445 dim.len_i32(),
26446 if keepdim { 1 } else { 0 },
26447 dtype.into().map_or(-1, |s| s.c_int())
26448 ));
26449 Ok(Tensor { c_tensor: c_tensors[0] })
26450 }
26451
26452 pub fn f_nansum_out(
26453 &self,
26454 out: &Tensor,
26455 dim: impl IntListOption,
26456 keepdim: bool,
26457 dtype: impl Into<Option<Kind>>,
26458 ) -> Result<Tensor, TchError> {
26459 let mut c_tensors = [std::ptr::null_mut(); 1];
26460 unsafe_torch_err!(atg_nansum_out(
26461 c_tensors.as_mut_ptr(),
26462 out.c_tensor,
26463 self.c_tensor,
26464 dim.as_ptr(),
26465 dim.len_i32(),
26466 if keepdim { 1 } else { 0 },
26467 dtype.into().map_or(-1, |s| s.c_int())
26468 ));
26469 Ok(Tensor { c_tensor: c_tensors[0] })
26470 }
26471
26472 pub fn f_narrow(&self, dim: i64, start: i64, length: i64) -> Result<Tensor, TchError> {
26473 let mut c_tensors = [std::ptr::null_mut(); 1];
26474 unsafe_torch_err!(atg_narrow(c_tensors.as_mut_ptr(), self.c_tensor, dim, start, length));
26475 Ok(Tensor { c_tensor: c_tensors[0] })
26476 }
26477
26478 pub fn f_narrow_copy(&self, dim: i64, start: i64, length: i64) -> Result<Tensor, TchError> {
26479 let mut c_tensors = [std::ptr::null_mut(); 1];
26480 unsafe_torch_err!(atg_narrow_copy(
26481 c_tensors.as_mut_ptr(),
26482 self.c_tensor,
26483 dim,
26484 start,
26485 length
26486 ));
26487 Ok(Tensor { c_tensor: c_tensors[0] })
26488 }
26489
26490 pub fn f_narrow_copy_out(
26491 &self,
26492 out: &Tensor,
26493 dim: i64,
26494 start: i64,
26495 length: i64,
26496 ) -> Result<Tensor, TchError> {
26497 let mut c_tensors = [std::ptr::null_mut(); 1];
26498 unsafe_torch_err!(atg_narrow_copy_out(
26499 c_tensors.as_mut_ptr(),
26500 out.c_tensor,
26501 self.c_tensor,
26502 dim,
26503 start,
26504 length
26505 ));
26506 Ok(Tensor { c_tensor: c_tensors[0] })
26507 }
26508
26509 pub fn f_narrow_tensor(
26510 &self,
26511 dim: i64,
26512 start: &Tensor,
26513 length: i64,
26514 ) -> Result<Tensor, TchError> {
26515 let mut c_tensors = [std::ptr::null_mut(); 1];
26516 unsafe_torch_err!(atg_narrow_tensor(
26517 c_tensors.as_mut_ptr(),
26518 self.c_tensor,
26519 dim,
26520 start.c_tensor,
26521 length
26522 ));
26523 Ok(Tensor { c_tensor: c_tensors[0] })
26524 }
26525
26526 pub fn f_native_batch_norm<T: Borrow<Tensor>>(
26527 &self,
26528 weight: Option<T>,
26529 bias: Option<T>,
26530 running_mean: Option<T>,
26531 running_var: Option<T>,
26532 training: bool,
26533 momentum: f64,
26534 eps: f64,
26535 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
26536 let mut c_tensors = [std::ptr::null_mut(); 3];
26537 unsafe_torch_err!(atg_native_batch_norm(
26538 c_tensors.as_mut_ptr(),
26539 self.c_tensor,
26540 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
26541 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
26542 running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
26543 running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
26544 if training { 1 } else { 0 },
26545 momentum,
26546 eps
26547 ));
26548 Ok((
26549 Tensor { c_tensor: c_tensors[0] },
26550 Tensor { c_tensor: c_tensors[1] },
26551 Tensor { c_tensor: c_tensors[2] },
26552 ))
26553 }
26554
26555 pub fn f_native_batch_norm_out<T: Borrow<Tensor>>(
26556 &self,
26557 out: &Tensor,
26558 save_mean: &Tensor,
26559 save_invstd: &Tensor,
26560 weight: Option<T>,
26561 bias: Option<T>,
26562 running_mean: Option<T>,
26563 running_var: Option<T>,
26564 training: bool,
26565 momentum: f64,
26566 eps: f64,
26567 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
26568 let mut c_tensors = [std::ptr::null_mut(); 3];
26569 unsafe_torch_err!(atg_native_batch_norm_out(
26570 c_tensors.as_mut_ptr(),
26571 out.c_tensor,
26572 save_mean.c_tensor,
26573 save_invstd.c_tensor,
26574 self.c_tensor,
26575 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
26576 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
26577 running_mean.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
26578 running_var.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
26579 if training { 1 } else { 0 },
26580 momentum,
26581 eps
26582 ));
26583 Ok((
26584 Tensor { c_tensor: c_tensors[0] },
26585 Tensor { c_tensor: c_tensors[1] },
26586 Tensor { c_tensor: c_tensors[2] },
26587 ))
26588 }
26589
26590 pub fn f_native_channel_shuffle(&self, groups: i64) -> Result<Tensor, TchError> {
26591 let mut c_tensors = [std::ptr::null_mut(); 1];
26592 unsafe_torch_err!(atg_native_channel_shuffle(
26593 c_tensors.as_mut_ptr(),
26594 self.c_tensor,
26595 groups
26596 ));
26597 Ok(Tensor { c_tensor: c_tensors[0] })
26598 }
26599
26600 pub fn f_native_dropout(&self, p: f64, train: bool) -> Result<(Tensor, Tensor), TchError> {
26601 let mut c_tensors = [std::ptr::null_mut(); 2];
26602 unsafe_torch_err!(atg_native_dropout(
26603 c_tensors.as_mut_ptr(),
26604 self.c_tensor,
26605 p,
26606 if train { 1 } else { 0 }
26607 ));
26608 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
26609 }
26610
26611 pub fn f_native_dropout_backward(
26612 grad_output: &Tensor,
26613 mask: &Tensor,
26614 scale: f64,
26615 ) -> Result<Tensor, TchError> {
26616 let mut c_tensors = [std::ptr::null_mut(); 1];
26617 unsafe_torch_err!(atg_native_dropout_backward(
26618 c_tensors.as_mut_ptr(),
26619 grad_output.c_tensor,
26620 mask.c_tensor,
26621 scale
26622 ));
26623 Ok(Tensor { c_tensor: c_tensors[0] })
26624 }
26625
26626 pub fn f_native_dropout_backward_out(
26627 out: &Tensor,
26628 grad_output: &Tensor,
26629 mask: &Tensor,
26630 scale: f64,
26631 ) -> Result<Tensor, TchError> {
26632 let mut c_tensors = [std::ptr::null_mut(); 1];
26633 unsafe_torch_err!(atg_native_dropout_backward_out(
26634 c_tensors.as_mut_ptr(),
26635 out.c_tensor,
26636 grad_output.c_tensor,
26637 mask.c_tensor,
26638 scale
26639 ));
26640 Ok(Tensor { c_tensor: c_tensors[0] })
26641 }
26642
26643 pub fn f_native_dropout_out(
26644 &self,
26645 out0: &Tensor,
26646 out1: &Tensor,
26647 p: f64,
26648 train: bool,
26649 ) -> Result<(Tensor, Tensor), TchError> {
26650 let mut c_tensors = [std::ptr::null_mut(); 2];
26651 unsafe_torch_err!(atg_native_dropout_out(
26652 c_tensors.as_mut_ptr(),
26653 out0.c_tensor,
26654 out1.c_tensor,
26655 self.c_tensor,
26656 p,
26657 if train { 1 } else { 0 }
26658 ));
26659 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
26660 }
26661
26662 pub fn f_native_group_norm<T: Borrow<Tensor>>(
26663 &self,
26664 weight: Option<T>,
26665 bias: Option<T>,
26666 n: i64,
26667 c: i64,
26668 hxw: i64,
26669 group: i64,
26670 eps: f64,
26671 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
26672 let mut c_tensors = [std::ptr::null_mut(); 3];
26673 unsafe_torch_err!(atg_native_group_norm(
26674 c_tensors.as_mut_ptr(),
26675 self.c_tensor,
26676 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
26677 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
26678 n,
26679 c,
26680 hxw,
26681 group,
26682 eps
26683 ));
26684 Ok((
26685 Tensor { c_tensor: c_tensors[0] },
26686 Tensor { c_tensor: c_tensors[1] },
26687 Tensor { c_tensor: c_tensors[2] },
26688 ))
26689 }
26690
26691 pub fn f_native_group_norm_out<T: Borrow<Tensor>>(
26692 &self,
26693 out0: &Tensor,
26694 out1: &Tensor,
26695 out2: &Tensor,
26696 weight: Option<T>,
26697 bias: Option<T>,
26698 n: i64,
26699 c: i64,
26700 hxw: i64,
26701 group: i64,
26702 eps: f64,
26703 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
26704 let mut c_tensors = [std::ptr::null_mut(); 3];
26705 unsafe_torch_err!(atg_native_group_norm_out(
26706 c_tensors.as_mut_ptr(),
26707 out0.c_tensor,
26708 out1.c_tensor,
26709 out2.c_tensor,
26710 self.c_tensor,
26711 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
26712 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
26713 n,
26714 c,
26715 hxw,
26716 group,
26717 eps
26718 ));
26719 Ok((
26720 Tensor { c_tensor: c_tensors[0] },
26721 Tensor { c_tensor: c_tensors[1] },
26722 Tensor { c_tensor: c_tensors[2] },
26723 ))
26724 }
26725
26726 pub fn f_native_layer_norm<T: Borrow<Tensor>>(
26727 &self,
26728 normalized_shape: impl IntList,
26729 weight: Option<T>,
26730 bias: Option<T>,
26731 eps: f64,
26732 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
26733 let mut c_tensors = [std::ptr::null_mut(); 3];
26734 unsafe_torch_err!(atg_native_layer_norm(
26735 c_tensors.as_mut_ptr(),
26736 self.c_tensor,
26737 normalized_shape.as_ptr(),
26738 normalized_shape.len_i32(),
26739 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
26740 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
26741 eps
26742 ));
26743 Ok((
26744 Tensor { c_tensor: c_tensors[0] },
26745 Tensor { c_tensor: c_tensors[1] },
26746 Tensor { c_tensor: c_tensors[2] },
26747 ))
26748 }
26749
26750 pub fn f_native_layer_norm_out<T: Borrow<Tensor>>(
26751 &self,
26752 out0: &Tensor,
26753 out1: &Tensor,
26754 out2: &Tensor,
26755 normalized_shape: impl IntList,
26756 weight: Option<T>,
26757 bias: Option<T>,
26758 eps: f64,
26759 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
26760 let mut c_tensors = [std::ptr::null_mut(); 3];
26761 unsafe_torch_err!(atg_native_layer_norm_out(
26762 c_tensors.as_mut_ptr(),
26763 out0.c_tensor,
26764 out1.c_tensor,
26765 out2.c_tensor,
26766 self.c_tensor,
26767 normalized_shape.as_ptr(),
26768 normalized_shape.len_i32(),
26769 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
26770 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
26771 eps
26772 ));
26773 Ok((
26774 Tensor { c_tensor: c_tensors[0] },
26775 Tensor { c_tensor: c_tensors[1] },
26776 Tensor { c_tensor: c_tensors[2] },
26777 ))
26778 }
26779
26780 pub fn f_native_norm(&self) -> Result<Tensor, TchError> {
26781 let mut c_tensors = [std::ptr::null_mut(); 1];
26782 unsafe_torch_err!(atg_native_norm(c_tensors.as_mut_ptr(), self.c_tensor));
26783 Ok(Tensor { c_tensor: c_tensors[0] })
26784 }
26785
26786 pub fn f_native_norm_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
26787 let mut c_tensors = [std::ptr::null_mut(); 1];
26788 unsafe_torch_err!(atg_native_norm_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
26789 Ok(Tensor { c_tensor: c_tensors[0] })
26790 }
26791
26792 pub fn f_native_norm_scalaropt_dim_dtype<S: Into<Scalar>>(
26793 &self,
26794 p: S,
26795 dim: impl IntList,
26796 keepdim: bool,
26797 dtype: impl Into<Option<Kind>>,
26798 ) -> Result<Tensor, TchError> {
26799 let mut c_tensors = [std::ptr::null_mut(); 1];
26800 unsafe_torch_err!(atg_native_norm_scalaropt_dim_dtype(
26801 c_tensors.as_mut_ptr(),
26802 self.c_tensor,
26803 p.into().c_scalar,
26804 dim.as_ptr(),
26805 dim.len_i32(),
26806 if keepdim { 1 } else { 0 },
26807 dtype.into().map_or(-1, |s| s.c_int())
26808 ));
26809 Ok(Tensor { c_tensor: c_tensors[0] })
26810 }
26811
26812 pub fn f_native_norm_scalaropt_dim_dtype_out<S: Into<Scalar>>(
26813 &self,
26814 out: &Tensor,
26815 p: S,
26816 dim: impl IntList,
26817 keepdim: bool,
26818 dtype: impl Into<Option<Kind>>,
26819 ) -> Result<Tensor, TchError> {
26820 let mut c_tensors = [std::ptr::null_mut(); 1];
26821 unsafe_torch_err!(atg_native_norm_scalaropt_dim_dtype_out(
26822 c_tensors.as_mut_ptr(),
26823 out.c_tensor,
26824 self.c_tensor,
26825 p.into().c_scalar,
26826 dim.as_ptr(),
26827 dim.len_i32(),
26828 if keepdim { 1 } else { 0 },
26829 dtype.into().map_or(-1, |s| s.c_int())
26830 ));
26831 Ok(Tensor { c_tensor: c_tensors[0] })
26832 }
26833
26834 pub fn f_ne<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
26835 let mut c_tensors = [std::ptr::null_mut(); 1];
26836 unsafe_torch_err!(atg_ne(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
26837 Ok(Tensor { c_tensor: c_tensors[0] })
26838 }
26839
26840 pub fn f_ne_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
26841 let mut c_tensors = [std::ptr::null_mut(); 1];
26842 unsafe_torch_err!(atg_ne_(c_tensors.as_mut_ptr(), self.c_tensor, other.into().c_scalar));
26843 Ok(Tensor { c_tensor: c_tensors[0] })
26844 }
26845
26846 pub fn f_ne_scalar_out<S: Into<Scalar>>(
26847 &self,
26848 out: &Tensor,
26849 other: S,
26850 ) -> Result<Tensor, TchError> {
26851 let mut c_tensors = [std::ptr::null_mut(); 1];
26852 unsafe_torch_err!(atg_ne_scalar_out(
26853 c_tensors.as_mut_ptr(),
26854 out.c_tensor,
26855 self.c_tensor,
26856 other.into().c_scalar
26857 ));
26858 Ok(Tensor { c_tensor: c_tensors[0] })
26859 }
26860
26861 pub fn f_ne_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
26862 let mut c_tensors = [std::ptr::null_mut(); 1];
26863 unsafe_torch_err!(atg_ne_tensor(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
26864 Ok(Tensor { c_tensor: c_tensors[0] })
26865 }
26866
26867 pub fn f_ne_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
26868 let mut c_tensors = [std::ptr::null_mut(); 1];
26869 unsafe_torch_err!(atg_ne_tensor_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
26870 Ok(Tensor { c_tensor: c_tensors[0] })
26871 }
26872
26873 pub fn f_ne_tensor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
26874 let mut c_tensors = [std::ptr::null_mut(); 1];
26875 unsafe_torch_err!(atg_ne_tensor_out(
26876 c_tensors.as_mut_ptr(),
26877 out.c_tensor,
26878 self.c_tensor,
26879 other.c_tensor
26880 ));
26881 Ok(Tensor { c_tensor: c_tensors[0] })
26882 }
26883
26884 pub fn f_neg(&self) -> Result<Tensor, TchError> {
26885 let mut c_tensors = [std::ptr::null_mut(); 1];
26886 unsafe_torch_err!(atg_neg(c_tensors.as_mut_ptr(), self.c_tensor));
26887 Ok(Tensor { c_tensor: c_tensors[0] })
26888 }
26889
26890 pub fn f_neg_(&mut self) -> Result<Tensor, TchError> {
26891 let mut c_tensors = [std::ptr::null_mut(); 1];
26892 unsafe_torch_err!(atg_neg_(c_tensors.as_mut_ptr(), self.c_tensor));
26893 Ok(Tensor { c_tensor: c_tensors[0] })
26894 }
26895
26896 pub fn f_neg_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
26897 let mut c_tensors = [std::ptr::null_mut(); 1];
26898 unsafe_torch_err!(atg_neg_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
26899 Ok(Tensor { c_tensor: c_tensors[0] })
26900 }
26901
26902 pub fn f_negative(&self) -> Result<Tensor, TchError> {
26903 let mut c_tensors = [std::ptr::null_mut(); 1];
26904 unsafe_torch_err!(atg_negative(c_tensors.as_mut_ptr(), self.c_tensor));
26905 Ok(Tensor { c_tensor: c_tensors[0] })
26906 }
26907
26908 pub fn f_negative_(&mut self) -> Result<Tensor, TchError> {
26909 let mut c_tensors = [std::ptr::null_mut(); 1];
26910 unsafe_torch_err!(atg_negative_(c_tensors.as_mut_ptr(), self.c_tensor));
26911 Ok(Tensor { c_tensor: c_tensors[0] })
26912 }
26913
26914 pub fn f_negative_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
26915 let mut c_tensors = [std::ptr::null_mut(); 1];
26916 unsafe_torch_err!(atg_negative_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
26917 Ok(Tensor { c_tensor: c_tensors[0] })
26918 }
26919
26920 pub fn f_nested_to_padded_tensor(
26921 &self,
26922 padding: f64,
26923 output_size: impl IntListOption,
26924 ) -> Result<Tensor, TchError> {
26925 let mut c_tensors = [std::ptr::null_mut(); 1];
26926 unsafe_torch_err!(atg_nested_to_padded_tensor(
26927 c_tensors.as_mut_ptr(),
26928 self.c_tensor,
26929 padding,
26930 output_size.as_ptr(),
26931 output_size.len_i32()
26932 ));
26933 Ok(Tensor { c_tensor: c_tensors[0] })
26934 }
26935
26936 pub fn f_new_empty(
26937 &self,
26938 size: impl IntList,
26939 options: (Kind, Device),
26940 ) -> Result<Tensor, TchError> {
26941 let mut c_tensors = [std::ptr::null_mut(); 1];
26942 unsafe_torch_err!(atg_new_empty(
26943 c_tensors.as_mut_ptr(),
26944 self.c_tensor,
26945 size.as_ptr(),
26946 size.len_i32(),
26947 options.0.c_int(),
26948 options.1.c_int()
26949 ));
26950 Ok(Tensor { c_tensor: c_tensors[0] })
26951 }
26952
26953 pub fn f_new_empty_out(&self, out: &Tensor, size: impl IntList) -> Result<Tensor, TchError> {
26954 let mut c_tensors = [std::ptr::null_mut(); 1];
26955 unsafe_torch_err!(atg_new_empty_out(
26956 c_tensors.as_mut_ptr(),
26957 out.c_tensor,
26958 self.c_tensor,
26959 size.as_ptr(),
26960 size.len_i32()
26961 ));
26962 Ok(Tensor { c_tensor: c_tensors[0] })
26963 }
26964
26965 pub fn f_new_empty_strided(
26966 &self,
26967 size: impl IntList,
26968 stride: impl IntList,
26969 options: (Kind, Device),
26970 ) -> Result<Tensor, TchError> {
26971 let mut c_tensors = [std::ptr::null_mut(); 1];
26972 unsafe_torch_err!(atg_new_empty_strided(
26973 c_tensors.as_mut_ptr(),
26974 self.c_tensor,
26975 size.as_ptr(),
26976 size.len_i32(),
26977 stride.as_ptr(),
26978 stride.len_i32(),
26979 options.0.c_int(),
26980 options.1.c_int()
26981 ));
26982 Ok(Tensor { c_tensor: c_tensors[0] })
26983 }
26984
26985 pub fn f_new_empty_strided_out(
26986 &self,
26987 out: &Tensor,
26988 size: impl IntList,
26989 stride: impl IntList,
26990 ) -> Result<Tensor, TchError> {
26991 let mut c_tensors = [std::ptr::null_mut(); 1];
26992 unsafe_torch_err!(atg_new_empty_strided_out(
26993 c_tensors.as_mut_ptr(),
26994 out.c_tensor,
26995 self.c_tensor,
26996 size.as_ptr(),
26997 size.len_i32(),
26998 stride.as_ptr(),
26999 stride.len_i32()
27000 ));
27001 Ok(Tensor { c_tensor: c_tensors[0] })
27002 }
27003
27004 pub fn f_new_full<S: Into<Scalar>>(
27005 &self,
27006 size: impl IntList,
27007 fill_value: S,
27008 options: (Kind, Device),
27009 ) -> Result<Tensor, TchError> {
27010 let mut c_tensors = [std::ptr::null_mut(); 1];
27011 unsafe_torch_err!(atg_new_full(
27012 c_tensors.as_mut_ptr(),
27013 self.c_tensor,
27014 size.as_ptr(),
27015 size.len_i32(),
27016 fill_value.into().c_scalar,
27017 options.0.c_int(),
27018 options.1.c_int()
27019 ));
27020 Ok(Tensor { c_tensor: c_tensors[0] })
27021 }
27022
27023 pub fn f_new_full_out<S: Into<Scalar>>(
27024 &self,
27025 out: &Tensor,
27026 size: impl IntList,
27027 fill_value: S,
27028 ) -> Result<Tensor, TchError> {
27029 let mut c_tensors = [std::ptr::null_mut(); 1];
27030 unsafe_torch_err!(atg_new_full_out(
27031 c_tensors.as_mut_ptr(),
27032 out.c_tensor,
27033 self.c_tensor,
27034 size.as_ptr(),
27035 size.len_i32(),
27036 fill_value.into().c_scalar
27037 ));
27038 Ok(Tensor { c_tensor: c_tensors[0] })
27039 }
27040
27041 pub fn f_new_ones(
27042 &self,
27043 size: impl IntList,
27044 options: (Kind, Device),
27045 ) -> Result<Tensor, TchError> {
27046 let mut c_tensors = [std::ptr::null_mut(); 1];
27047 unsafe_torch_err!(atg_new_ones(
27048 c_tensors.as_mut_ptr(),
27049 self.c_tensor,
27050 size.as_ptr(),
27051 size.len_i32(),
27052 options.0.c_int(),
27053 options.1.c_int()
27054 ));
27055 Ok(Tensor { c_tensor: c_tensors[0] })
27056 }
27057
27058 pub fn f_new_ones_out(&self, out: &Tensor, size: impl IntList) -> Result<Tensor, TchError> {
27059 let mut c_tensors = [std::ptr::null_mut(); 1];
27060 unsafe_torch_err!(atg_new_ones_out(
27061 c_tensors.as_mut_ptr(),
27062 out.c_tensor,
27063 self.c_tensor,
27064 size.as_ptr(),
27065 size.len_i32()
27066 ));
27067 Ok(Tensor { c_tensor: c_tensors[0] })
27068 }
27069
27070 pub fn f_new_zeros(
27071 &self,
27072 size: impl IntList,
27073 options: (Kind, Device),
27074 ) -> Result<Tensor, TchError> {
27075 let mut c_tensors = [std::ptr::null_mut(); 1];
27076 unsafe_torch_err!(atg_new_zeros(
27077 c_tensors.as_mut_ptr(),
27078 self.c_tensor,
27079 size.as_ptr(),
27080 size.len_i32(),
27081 options.0.c_int(),
27082 options.1.c_int()
27083 ));
27084 Ok(Tensor { c_tensor: c_tensors[0] })
27085 }
27086
27087 pub fn f_new_zeros_out(&self, out: &Tensor, size: impl IntList) -> Result<Tensor, TchError> {
27088 let mut c_tensors = [std::ptr::null_mut(); 1];
27089 unsafe_torch_err!(atg_new_zeros_out(
27090 c_tensors.as_mut_ptr(),
27091 out.c_tensor,
27092 self.c_tensor,
27093 size.as_ptr(),
27094 size.len_i32()
27095 ));
27096 Ok(Tensor { c_tensor: c_tensors[0] })
27097 }
27098
27099 pub fn f_nextafter(&self, other: &Tensor) -> Result<Tensor, TchError> {
27100 let mut c_tensors = [std::ptr::null_mut(); 1];
27101 unsafe_torch_err!(atg_nextafter(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
27102 Ok(Tensor { c_tensor: c_tensors[0] })
27103 }
27104
27105 pub fn f_nextafter_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
27106 let mut c_tensors = [std::ptr::null_mut(); 1];
27107 unsafe_torch_err!(atg_nextafter_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
27108 Ok(Tensor { c_tensor: c_tensors[0] })
27109 }
27110
27111 pub fn f_nextafter_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
27112 let mut c_tensors = [std::ptr::null_mut(); 1];
27113 unsafe_torch_err!(atg_nextafter_out(
27114 c_tensors.as_mut_ptr(),
27115 out.c_tensor,
27116 self.c_tensor,
27117 other.c_tensor
27118 ));
27119 Ok(Tensor { c_tensor: c_tensors[0] })
27120 }
27121
27122 pub fn f_nll_loss<T: Borrow<Tensor>>(
27123 &self,
27124 target: &Tensor,
27125 weight: Option<T>,
27126 reduction: crate::Reduction,
27127 ignore_index: i64,
27128 ) -> Result<Tensor, TchError> {
27129 let mut c_tensors = [std::ptr::null_mut(); 1];
27130 unsafe_torch_err!(atg_nll_loss(
27131 c_tensors.as_mut_ptr(),
27132 self.c_tensor,
27133 target.c_tensor,
27134 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
27135 reduction.to_int(),
27136 ignore_index
27137 ));
27138 Ok(Tensor { c_tensor: c_tensors[0] })
27139 }
27140
27141 pub fn f_nll_loss2d<T: Borrow<Tensor>>(
27142 &self,
27143 target: &Tensor,
27144 weight: Option<T>,
27145 reduction: crate::Reduction,
27146 ignore_index: i64,
27147 ) -> Result<Tensor, TchError> {
27148 let mut c_tensors = [std::ptr::null_mut(); 1];
27149 unsafe_torch_err!(atg_nll_loss2d(
27150 c_tensors.as_mut_ptr(),
27151 self.c_tensor,
27152 target.c_tensor,
27153 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
27154 reduction.to_int(),
27155 ignore_index
27156 ));
27157 Ok(Tensor { c_tensor: c_tensors[0] })
27158 }
27159
27160 pub fn f_nll_loss2d_backward<T: Borrow<Tensor>>(
27161 &self,
27162 grad_output: &Tensor,
27163 target: &Tensor,
27164 weight: Option<T>,
27165 reduction: crate::Reduction,
27166 ignore_index: i64,
27167 total_weight: &Tensor,
27168 ) -> Result<Tensor, TchError> {
27169 let mut c_tensors = [std::ptr::null_mut(); 1];
27170 unsafe_torch_err!(atg_nll_loss2d_backward(
27171 c_tensors.as_mut_ptr(),
27172 grad_output.c_tensor,
27173 self.c_tensor,
27174 target.c_tensor,
27175 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
27176 reduction.to_int(),
27177 ignore_index,
27178 total_weight.c_tensor
27179 ));
27180 Ok(Tensor { c_tensor: c_tensors[0] })
27181 }
27182
27183 pub fn f_nll_loss2d_backward_grad_input<T: Borrow<Tensor>>(
27184 &self,
27185 grad_input: &Tensor,
27186 grad_output: &Tensor,
27187 target: &Tensor,
27188 weight: Option<T>,
27189 reduction: crate::Reduction,
27190 ignore_index: i64,
27191 total_weight: &Tensor,
27192 ) -> Result<Tensor, TchError> {
27193 let mut c_tensors = [std::ptr::null_mut(); 1];
27194 unsafe_torch_err!(atg_nll_loss2d_backward_grad_input(
27195 c_tensors.as_mut_ptr(),
27196 grad_input.c_tensor,
27197 grad_output.c_tensor,
27198 self.c_tensor,
27199 target.c_tensor,
27200 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
27201 reduction.to_int(),
27202 ignore_index,
27203 total_weight.c_tensor
27204 ));
27205 Ok(Tensor { c_tensor: c_tensors[0] })
27206 }
27207
27208 pub fn f_nll_loss2d_out<T: Borrow<Tensor>>(
27209 &self,
27210 out: &Tensor,
27211 target: &Tensor,
27212 weight: Option<T>,
27213 reduction: crate::Reduction,
27214 ignore_index: i64,
27215 ) -> Result<Tensor, TchError> {
27216 let mut c_tensors = [std::ptr::null_mut(); 1];
27217 unsafe_torch_err!(atg_nll_loss2d_out(
27218 c_tensors.as_mut_ptr(),
27219 out.c_tensor,
27220 self.c_tensor,
27221 target.c_tensor,
27222 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
27223 reduction.to_int(),
27224 ignore_index
27225 ));
27226 Ok(Tensor { c_tensor: c_tensors[0] })
27227 }
27228
27229 pub fn f_nll_loss_backward<T: Borrow<Tensor>>(
27230 &self,
27231 grad_output: &Tensor,
27232 target: &Tensor,
27233 weight: Option<T>,
27234 reduction: crate::Reduction,
27235 ignore_index: i64,
27236 total_weight: &Tensor,
27237 ) -> Result<Tensor, TchError> {
27238 let mut c_tensors = [std::ptr::null_mut(); 1];
27239 unsafe_torch_err!(atg_nll_loss_backward(
27240 c_tensors.as_mut_ptr(),
27241 grad_output.c_tensor,
27242 self.c_tensor,
27243 target.c_tensor,
27244 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
27245 reduction.to_int(),
27246 ignore_index,
27247 total_weight.c_tensor
27248 ));
27249 Ok(Tensor { c_tensor: c_tensors[0] })
27250 }
27251
27252 pub fn f_nll_loss_backward_grad_input<T: Borrow<Tensor>>(
27253 &self,
27254 grad_input: &Tensor,
27255 grad_output: &Tensor,
27256 target: &Tensor,
27257 weight: Option<T>,
27258 reduction: crate::Reduction,
27259 ignore_index: i64,
27260 total_weight: &Tensor,
27261 ) -> Result<Tensor, TchError> {
27262 let mut c_tensors = [std::ptr::null_mut(); 1];
27263 unsafe_torch_err!(atg_nll_loss_backward_grad_input(
27264 c_tensors.as_mut_ptr(),
27265 grad_input.c_tensor,
27266 grad_output.c_tensor,
27267 self.c_tensor,
27268 target.c_tensor,
27269 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
27270 reduction.to_int(),
27271 ignore_index,
27272 total_weight.c_tensor
27273 ));
27274 Ok(Tensor { c_tensor: c_tensors[0] })
27275 }
27276
27277 pub fn f_nll_loss_nd<T: Borrow<Tensor>>(
27278 &self,
27279 target: &Tensor,
27280 weight: Option<T>,
27281 reduction: crate::Reduction,
27282 ignore_index: i64,
27283 ) -> Result<Tensor, TchError> {
27284 let mut c_tensors = [std::ptr::null_mut(); 1];
27285 unsafe_torch_err!(atg_nll_loss_nd(
27286 c_tensors.as_mut_ptr(),
27287 self.c_tensor,
27288 target.c_tensor,
27289 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
27290 reduction.to_int(),
27291 ignore_index
27292 ));
27293 Ok(Tensor { c_tensor: c_tensors[0] })
27294 }
27295
27296 pub fn f_nll_loss_out<T: Borrow<Tensor>>(
27297 &self,
27298 out: &Tensor,
27299 target: &Tensor,
27300 weight: Option<T>,
27301 reduction: crate::Reduction,
27302 ignore_index: i64,
27303 ) -> Result<Tensor, TchError> {
27304 let mut c_tensors = [std::ptr::null_mut(); 1];
27305 unsafe_torch_err!(atg_nll_loss_out(
27306 c_tensors.as_mut_ptr(),
27307 out.c_tensor,
27308 self.c_tensor,
27309 target.c_tensor,
27310 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
27311 reduction.to_int(),
27312 ignore_index
27313 ));
27314 Ok(Tensor { c_tensor: c_tensors[0] })
27315 }
27316
27317 pub fn f_nonzero(&self) -> Result<Tensor, TchError> {
27318 let mut c_tensors = [std::ptr::null_mut(); 1];
27319 unsafe_torch_err!(atg_nonzero(c_tensors.as_mut_ptr(), self.c_tensor));
27320 Ok(Tensor { c_tensor: c_tensors[0] })
27321 }
27322
27323 pub fn f_nonzero_numpy(&self) -> Result<Vec<Tensor>, TchError> {
27324 let c_tensors = unsafe_torch_err!(atg_nonzero_numpy(self.c_tensor));
27325 let mut r__ = vec![];
27326 let mut i = 0;
27327 loop {
27328 let c__ = unsafe { *c_tensors.add(i) };
27329 if c__.is_null() {
27330 break;
27331 }
27332 r__.push(Tensor { c_tensor: c__ });
27333 i += 1;
27334 }
27335 unsafe { libc::free(c_tensors as *mut libc::c_void) }
27336 Ok(r__)
27337 }
27338
27339 pub fn f_nonzero_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
27340 let mut c_tensors = [std::ptr::null_mut(); 1];
27341 unsafe_torch_err!(atg_nonzero_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
27342 Ok(Tensor { c_tensor: c_tensors[0] })
27343 }
27344
27345 pub fn f_nonzero_static(&self, size: i64, fill_value: i64) -> Result<Tensor, TchError> {
27346 let mut c_tensors = [std::ptr::null_mut(); 1];
27347 unsafe_torch_err!(atg_nonzero_static(
27348 c_tensors.as_mut_ptr(),
27349 self.c_tensor,
27350 size,
27351 fill_value
27352 ));
27353 Ok(Tensor { c_tensor: c_tensors[0] })
27354 }
27355
27356 pub fn f_nonzero_static_out(
27357 &self,
27358 out: &Tensor,
27359 size: i64,
27360 fill_value: i64,
27361 ) -> Result<Tensor, TchError> {
27362 let mut c_tensors = [std::ptr::null_mut(); 1];
27363 unsafe_torch_err!(atg_nonzero_static_out(
27364 c_tensors.as_mut_ptr(),
27365 out.c_tensor,
27366 self.c_tensor,
27367 size,
27368 fill_value
27369 ));
27370 Ok(Tensor { c_tensor: c_tensors[0] })
27371 }
27372
27373 pub fn f_norm(&self) -> Result<Tensor, TchError> {
27374 let mut c_tensors = [std::ptr::null_mut(); 1];
27375 unsafe_torch_err!(atg_norm(c_tensors.as_mut_ptr(), self.c_tensor));
27376 Ok(Tensor { c_tensor: c_tensors[0] })
27377 }
27378
27379 pub fn f_norm_dtype_out<S: Into<Scalar>>(
27380 &self,
27381 out: &Tensor,
27382 p: S,
27383 dim: impl IntList,
27384 keepdim: bool,
27385 dtype: Kind,
27386 ) -> Result<Tensor, TchError> {
27387 let mut c_tensors = [std::ptr::null_mut(); 1];
27388 unsafe_torch_err!(atg_norm_dtype_out(
27389 c_tensors.as_mut_ptr(),
27390 out.c_tensor,
27391 self.c_tensor,
27392 p.into().c_scalar,
27393 dim.as_ptr(),
27394 dim.len_i32(),
27395 if keepdim { 1 } else { 0 },
27396 dtype.c_int()
27397 ));
27398 Ok(Tensor { c_tensor: c_tensors[0] })
27399 }
27400
27401 pub fn f_norm_except_dim(v: &Tensor, pow: i64, dim: i64) -> Result<Tensor, TchError> {
27402 let mut c_tensors = [std::ptr::null_mut(); 1];
27403 unsafe_torch_err!(atg_norm_except_dim(c_tensors.as_mut_ptr(), v.c_tensor, pow, dim));
27404 Ok(Tensor { c_tensor: c_tensors[0] })
27405 }
27406
27407 pub fn f_norm_out<S: Into<Scalar>>(
27408 &self,
27409 out: &Tensor,
27410 p: S,
27411 dim: impl IntList,
27412 keepdim: bool,
27413 ) -> Result<Tensor, TchError> {
27414 let mut c_tensors = [std::ptr::null_mut(); 1];
27415 unsafe_torch_err!(atg_norm_out(
27416 c_tensors.as_mut_ptr(),
27417 out.c_tensor,
27418 self.c_tensor,
27419 p.into().c_scalar,
27420 dim.as_ptr(),
27421 dim.len_i32(),
27422 if keepdim { 1 } else { 0 }
27423 ));
27424 Ok(Tensor { c_tensor: c_tensors[0] })
27425 }
27426
27427 pub fn f_norm_scalar_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
27428 let mut c_tensors = [std::ptr::null_mut(); 1];
27429 unsafe_torch_err!(atg_norm_scalar_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
27430 Ok(Tensor { c_tensor: c_tensors[0] })
27431 }
27432
27433 pub fn f_norm_scalaropt_dim<S: Into<Scalar>>(
27434 &self,
27435 p: S,
27436 dim: impl IntList,
27437 keepdim: bool,
27438 ) -> Result<Tensor, TchError> {
27439 let mut c_tensors = [std::ptr::null_mut(); 1];
27440 unsafe_torch_err!(atg_norm_scalaropt_dim(
27441 c_tensors.as_mut_ptr(),
27442 self.c_tensor,
27443 p.into().c_scalar,
27444 dim.as_ptr(),
27445 dim.len_i32(),
27446 if keepdim { 1 } else { 0 }
27447 ));
27448 Ok(Tensor { c_tensor: c_tensors[0] })
27449 }
27450
27451 pub fn f_norm_scalaropt_dim_dtype<S: Into<Scalar>>(
27452 &self,
27453 p: S,
27454 dim: impl IntList,
27455 keepdim: bool,
27456 dtype: Kind,
27457 ) -> Result<Tensor, TchError> {
27458 let mut c_tensors = [std::ptr::null_mut(); 1];
27459 unsafe_torch_err!(atg_norm_scalaropt_dim_dtype(
27460 c_tensors.as_mut_ptr(),
27461 self.c_tensor,
27462 p.into().c_scalar,
27463 dim.as_ptr(),
27464 dim.len_i32(),
27465 if keepdim { 1 } else { 0 },
27466 dtype.c_int()
27467 ));
27468 Ok(Tensor { c_tensor: c_tensors[0] })
27469 }
27470
27471 pub fn f_norm_scalaropt_dtype<S: Into<Scalar>>(
27472 &self,
27473 p: S,
27474 dtype: Kind,
27475 ) -> Result<Tensor, TchError> {
27476 let mut c_tensors = [std::ptr::null_mut(); 1];
27477 unsafe_torch_err!(atg_norm_scalaropt_dtype(
27478 c_tensors.as_mut_ptr(),
27479 self.c_tensor,
27480 p.into().c_scalar,
27481 dtype.c_int()
27482 ));
27483 Ok(Tensor { c_tensor: c_tensors[0] })
27484 }
27485
27486 pub fn f_norm_scalaropt_dtype_out<S: Into<Scalar>>(
27487 &self,
27488 out: &Tensor,
27489 p: S,
27490 dtype: Kind,
27491 ) -> Result<Tensor, TchError> {
27492 let mut c_tensors = [std::ptr::null_mut(); 1];
27493 unsafe_torch_err!(atg_norm_scalaropt_dtype_out(
27494 c_tensors.as_mut_ptr(),
27495 out.c_tensor,
27496 self.c_tensor,
27497 p.into().c_scalar,
27498 dtype.c_int()
27499 ));
27500 Ok(Tensor { c_tensor: c_tensors[0] })
27501 }
27502
27503 pub fn f_normal_(&mut self, mean: f64, std: f64) -> Result<Tensor, TchError> {
27504 let mut c_tensors = [std::ptr::null_mut(); 1];
27505 unsafe_torch_err!(atg_normal_(c_tensors.as_mut_ptr(), self.c_tensor, mean, std));
27506 Ok(Tensor { c_tensor: c_tensors[0] })
27507 }
27508
27509 pub fn f_normal_functional(&self, mean: f64, std: f64) -> Result<Tensor, TchError> {
27510 let mut c_tensors = [std::ptr::null_mut(); 1];
27511 unsafe_torch_err!(atg_normal_functional(c_tensors.as_mut_ptr(), self.c_tensor, mean, std));
27512 Ok(Tensor { c_tensor: c_tensors[0] })
27513 }
27514
27515 pub fn f_not_equal<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
27516 let mut c_tensors = [std::ptr::null_mut(); 1];
27517 unsafe_torch_err!(atg_not_equal(
27518 c_tensors.as_mut_ptr(),
27519 self.c_tensor,
27520 other.into().c_scalar
27521 ));
27522 Ok(Tensor { c_tensor: c_tensors[0] })
27523 }
27524
27525 pub fn f_not_equal_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
27526 let mut c_tensors = [std::ptr::null_mut(); 1];
27527 unsafe_torch_err!(atg_not_equal_(
27528 c_tensors.as_mut_ptr(),
27529 self.c_tensor,
27530 other.into().c_scalar
27531 ));
27532 Ok(Tensor { c_tensor: c_tensors[0] })
27533 }
27534
27535 pub fn f_not_equal_scalar_out<S: Into<Scalar>>(
27536 &self,
27537 out: &Tensor,
27538 other: S,
27539 ) -> Result<Tensor, TchError> {
27540 let mut c_tensors = [std::ptr::null_mut(); 1];
27541 unsafe_torch_err!(atg_not_equal_scalar_out(
27542 c_tensors.as_mut_ptr(),
27543 out.c_tensor,
27544 self.c_tensor,
27545 other.into().c_scalar
27546 ));
27547 Ok(Tensor { c_tensor: c_tensors[0] })
27548 }
27549
27550 pub fn f_not_equal_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
27551 let mut c_tensors = [std::ptr::null_mut(); 1];
27552 unsafe_torch_err!(atg_not_equal_tensor(
27553 c_tensors.as_mut_ptr(),
27554 self.c_tensor,
27555 other.c_tensor
27556 ));
27557 Ok(Tensor { c_tensor: c_tensors[0] })
27558 }
27559
27560 pub fn f_not_equal_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
27561 let mut c_tensors = [std::ptr::null_mut(); 1];
27562 unsafe_torch_err!(atg_not_equal_tensor_(
27563 c_tensors.as_mut_ptr(),
27564 self.c_tensor,
27565 other.c_tensor
27566 ));
27567 Ok(Tensor { c_tensor: c_tensors[0] })
27568 }
27569
27570 pub fn f_not_equal_tensor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
27571 let mut c_tensors = [std::ptr::null_mut(); 1];
27572 unsafe_torch_err!(atg_not_equal_tensor_out(
27573 c_tensors.as_mut_ptr(),
27574 out.c_tensor,
27575 self.c_tensor,
27576 other.c_tensor
27577 ));
27578 Ok(Tensor { c_tensor: c_tensors[0] })
27579 }
27580
27581 pub fn f_nuclear_norm(&self, keepdim: bool) -> Result<Tensor, TchError> {
27582 let mut c_tensors = [std::ptr::null_mut(); 1];
27583 unsafe_torch_err!(atg_nuclear_norm(
27584 c_tensors.as_mut_ptr(),
27585 self.c_tensor,
27586 if keepdim { 1 } else { 0 }
27587 ));
27588 Ok(Tensor { c_tensor: c_tensors[0] })
27589 }
27590
27591 pub fn f_nuclear_norm_dim(&self, dim: impl IntList, keepdim: bool) -> Result<Tensor, TchError> {
27592 let mut c_tensors = [std::ptr::null_mut(); 1];
27593 unsafe_torch_err!(atg_nuclear_norm_dim(
27594 c_tensors.as_mut_ptr(),
27595 self.c_tensor,
27596 dim.as_ptr(),
27597 dim.len_i32(),
27598 if keepdim { 1 } else { 0 }
27599 ));
27600 Ok(Tensor { c_tensor: c_tensors[0] })
27601 }
27602
27603 pub fn f_nuclear_norm_dim_out(
27604 &self,
27605 out: &Tensor,
27606 dim: impl IntList,
27607 keepdim: bool,
27608 ) -> Result<Tensor, TchError> {
27609 let mut c_tensors = [std::ptr::null_mut(); 1];
27610 unsafe_torch_err!(atg_nuclear_norm_dim_out(
27611 c_tensors.as_mut_ptr(),
27612 out.c_tensor,
27613 self.c_tensor,
27614 dim.as_ptr(),
27615 dim.len_i32(),
27616 if keepdim { 1 } else { 0 }
27617 ));
27618 Ok(Tensor { c_tensor: c_tensors[0] })
27619 }
27620
27621 pub fn f_nuclear_norm_out(&self, out: &Tensor, keepdim: bool) -> Result<Tensor, TchError> {
27622 let mut c_tensors = [std::ptr::null_mut(); 1];
27623 unsafe_torch_err!(atg_nuclear_norm_out(
27624 c_tensors.as_mut_ptr(),
27625 out.c_tensor,
27626 self.c_tensor,
27627 if keepdim { 1 } else { 0 }
27628 ));
27629 Ok(Tensor { c_tensor: c_tensors[0] })
27630 }
27631
27632 pub fn f_numpy_t(&self) -> Result<Tensor, TchError> {
27633 let mut c_tensors = [std::ptr::null_mut(); 1];
27634 unsafe_torch_err!(atg_numpy_t(c_tensors.as_mut_ptr(), self.c_tensor));
27635 Ok(Tensor { c_tensor: c_tensors[0] })
27636 }
27637
27638 pub fn f_one_hot(&self, num_classes: i64) -> Result<Tensor, TchError> {
27639 let mut c_tensors = [std::ptr::null_mut(); 1];
27640 unsafe_torch_err!(atg_one_hot(c_tensors.as_mut_ptr(), self.c_tensor, num_classes));
27641 Ok(Tensor { c_tensor: c_tensors[0] })
27642 }
27643
27644 pub fn f_ones(size: impl IntList, options: (Kind, Device)) -> Result<Tensor, TchError> {
27645 let mut c_tensors = [std::ptr::null_mut(); 1];
27646 unsafe_torch_err!(atg_ones(
27647 c_tensors.as_mut_ptr(),
27648 size.as_ptr(),
27649 size.len_i32(),
27650 options.0.c_int(),
27651 options.1.c_int()
27652 ));
27653 Ok(Tensor { c_tensor: c_tensors[0] })
27654 }
27655
27656 pub fn f_ones_like(&self) -> Result<Tensor, TchError> {
27657 let mut c_tensors = [std::ptr::null_mut(); 1];
27658 unsafe_torch_err!(atg_ones_like(c_tensors.as_mut_ptr(), self.c_tensor));
27659 Ok(Tensor { c_tensor: c_tensors[0] })
27660 }
27661
27662 pub fn f_ones_like_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
27663 let mut c_tensors = [std::ptr::null_mut(); 1];
27664 unsafe_torch_err!(atg_ones_like_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
27665 Ok(Tensor { c_tensor: c_tensors[0] })
27666 }
27667
27668 pub fn f_ones_out(out: &Tensor, size: impl IntList) -> Result<Tensor, TchError> {
27669 let mut c_tensors = [std::ptr::null_mut(); 1];
27670 unsafe_torch_err!(atg_ones_out(
27671 c_tensors.as_mut_ptr(),
27672 out.c_tensor,
27673 size.as_ptr(),
27674 size.len_i32()
27675 ));
27676 Ok(Tensor { c_tensor: c_tensors[0] })
27677 }
27678
27679 pub fn f_orgqr(&self, input2: &Tensor) -> Result<Tensor, TchError> {
27680 let mut c_tensors = [std::ptr::null_mut(); 1];
27681 unsafe_torch_err!(atg_orgqr(c_tensors.as_mut_ptr(), self.c_tensor, input2.c_tensor));
27682 Ok(Tensor { c_tensor: c_tensors[0] })
27683 }
27684
27685 pub fn f_orgqr_out(&self, out: &Tensor, input2: &Tensor) -> Result<Tensor, TchError> {
27686 let mut c_tensors = [std::ptr::null_mut(); 1];
27687 unsafe_torch_err!(atg_orgqr_out(
27688 c_tensors.as_mut_ptr(),
27689 out.c_tensor,
27690 self.c_tensor,
27691 input2.c_tensor
27692 ));
27693 Ok(Tensor { c_tensor: c_tensors[0] })
27694 }
27695
27696 pub fn f_ormqr(
27697 &self,
27698 input2: &Tensor,
27699 input3: &Tensor,
27700 left: bool,
27701 transpose: bool,
27702 ) -> Result<Tensor, TchError> {
27703 let mut c_tensors = [std::ptr::null_mut(); 1];
27704 unsafe_torch_err!(atg_ormqr(
27705 c_tensors.as_mut_ptr(),
27706 self.c_tensor,
27707 input2.c_tensor,
27708 input3.c_tensor,
27709 if left { 1 } else { 0 },
27710 if transpose { 1 } else { 0 }
27711 ));
27712 Ok(Tensor { c_tensor: c_tensors[0] })
27713 }
27714
27715 pub fn f_ormqr_out(
27716 &self,
27717 out: &Tensor,
27718 input2: &Tensor,
27719 input3: &Tensor,
27720 left: bool,
27721 transpose: bool,
27722 ) -> Result<Tensor, TchError> {
27723 let mut c_tensors = [std::ptr::null_mut(); 1];
27724 unsafe_torch_err!(atg_ormqr_out(
27725 c_tensors.as_mut_ptr(),
27726 out.c_tensor,
27727 self.c_tensor,
27728 input2.c_tensor,
27729 input3.c_tensor,
27730 if left { 1 } else { 0 },
27731 if transpose { 1 } else { 0 }
27732 ));
27733 Ok(Tensor { c_tensor: c_tensors[0] })
27734 }
27735
27736 pub fn f_outer(&self, vec2: &Tensor) -> Result<Tensor, TchError> {
27737 let mut c_tensors = [std::ptr::null_mut(); 1];
27738 unsafe_torch_err!(atg_outer(c_tensors.as_mut_ptr(), self.c_tensor, vec2.c_tensor));
27739 Ok(Tensor { c_tensor: c_tensors[0] })
27740 }
27741
27742 pub fn f_outer_out(&self, out: &Tensor, vec2: &Tensor) -> Result<Tensor, TchError> {
27743 let mut c_tensors = [std::ptr::null_mut(); 1];
27744 unsafe_torch_err!(atg_outer_out(
27745 c_tensors.as_mut_ptr(),
27746 out.c_tensor,
27747 self.c_tensor,
27748 vec2.c_tensor
27749 ));
27750 Ok(Tensor { c_tensor: c_tensors[0] })
27751 }
27752
27753 pub fn f_output_nr(&self) -> Result<i64, TchError> {
27754 let return_;
27755 unsafe_torch_err!(return_ = atg_output_nr(self.c_tensor));
27756 Ok(return_)
27757 }
27758
27759 pub fn f_pad(
27760 &self,
27761 pad: impl IntList,
27762 mode: &str,
27763 value: impl Into<Option<f64>>,
27764 ) -> Result<Tensor, TchError> {
27765 let value = value.into();
27766 let mut c_tensors = [std::ptr::null_mut(); 1];
27767 unsafe_torch_err!(atg_pad(
27768 c_tensors.as_mut_ptr(),
27769 self.c_tensor,
27770 pad.as_ptr(),
27771 pad.len_i32(),
27772 mode.as_ptr(),
27773 mode.len() as i32,
27774 value.unwrap_or(std::f64::NAN),
27775 value.is_none() as i8
27776 ));
27777 Ok(Tensor { c_tensor: c_tensors[0] })
27778 }
27779
27780 pub fn f_pad_sequence<T: Borrow<Tensor>>(
27781 sequences: &[T],
27782 batch_first: bool,
27783 padding_value: f64,
27784 padding_side: &str,
27785 ) -> Result<Tensor, TchError> {
27786 let mut c_tensors = [std::ptr::null_mut(); 1];
27787 unsafe_torch_err!(atg_pad_sequence(
27788 c_tensors.as_mut_ptr(),
27789 ptr_list(sequences).as_ptr(),
27790 sequences.len() as i32,
27791 if batch_first { 1 } else { 0 },
27792 padding_value,
27793 padding_side.as_ptr(),
27794 padding_side.len() as i32
27795 ));
27796 Ok(Tensor { c_tensor: c_tensors[0] })
27797 }
27798
27799 pub fn f_pairwise_distance(
27800 x1: &Tensor,
27801 x2: &Tensor,
27802 p: f64,
27803 eps: f64,
27804 keepdim: bool,
27805 ) -> Result<Tensor, TchError> {
27806 let mut c_tensors = [std::ptr::null_mut(); 1];
27807 unsafe_torch_err!(atg_pairwise_distance(
27808 c_tensors.as_mut_ptr(),
27809 x1.c_tensor,
27810 x2.c_tensor,
27811 p,
27812 eps,
27813 if keepdim { 1 } else { 0 }
27814 ));
27815 Ok(Tensor { c_tensor: c_tensors[0] })
27816 }
27817
27818 pub fn f_pdist(&self, p: f64) -> Result<Tensor, TchError> {
27819 let mut c_tensors = [std::ptr::null_mut(); 1];
27820 unsafe_torch_err!(atg_pdist(c_tensors.as_mut_ptr(), self.c_tensor, p));
27821 Ok(Tensor { c_tensor: c_tensors[0] })
27822 }
27823
27824 pub fn f_permute(&self, dims: impl IntList) -> Result<Tensor, TchError> {
27825 let mut c_tensors = [std::ptr::null_mut(); 1];
27826 unsafe_torch_err!(atg_permute(
27827 c_tensors.as_mut_ptr(),
27828 self.c_tensor,
27829 dims.as_ptr(),
27830 dims.len_i32()
27831 ));
27832 Ok(Tensor { c_tensor: c_tensors[0] })
27833 }
27834
27835 pub fn f_permute_copy(&self, dims: impl IntList) -> Result<Tensor, TchError> {
27836 let mut c_tensors = [std::ptr::null_mut(); 1];
27837 unsafe_torch_err!(atg_permute_copy(
27838 c_tensors.as_mut_ptr(),
27839 self.c_tensor,
27840 dims.as_ptr(),
27841 dims.len_i32()
27842 ));
27843 Ok(Tensor { c_tensor: c_tensors[0] })
27844 }
27845
27846 pub fn f_permute_copy_out(&self, out: &Tensor, dims: impl IntList) -> Result<Tensor, TchError> {
27847 let mut c_tensors = [std::ptr::null_mut(); 1];
27848 unsafe_torch_err!(atg_permute_copy_out(
27849 c_tensors.as_mut_ptr(),
27850 out.c_tensor,
27851 self.c_tensor,
27852 dims.as_ptr(),
27853 dims.len_i32()
27854 ));
27855 Ok(Tensor { c_tensor: c_tensors[0] })
27856 }
27857
27858 pub fn f_pin_memory(&self, device: Device) -> Result<Tensor, TchError> {
27859 let mut c_tensors = [std::ptr::null_mut(); 1];
27860 unsafe_torch_err!(atg_pin_memory(c_tensors.as_mut_ptr(), self.c_tensor, device.c_int()));
27861 Ok(Tensor { c_tensor: c_tensors[0] })
27862 }
27863
27864 pub fn f_pinverse(&self, rcond: f64) -> Result<Tensor, TchError> {
27865 let mut c_tensors = [std::ptr::null_mut(); 1];
27866 unsafe_torch_err!(atg_pinverse(c_tensors.as_mut_ptr(), self.c_tensor, rcond));
27867 Ok(Tensor { c_tensor: c_tensors[0] })
27868 }
27869
27870 pub fn f_pixel_shuffle(&self, upscale_factor: i64) -> Result<Tensor, TchError> {
27871 let mut c_tensors = [std::ptr::null_mut(); 1];
27872 unsafe_torch_err!(atg_pixel_shuffle(c_tensors.as_mut_ptr(), self.c_tensor, upscale_factor));
27873 Ok(Tensor { c_tensor: c_tensors[0] })
27874 }
27875
27876 pub fn f_pixel_shuffle_out(
27877 &self,
27878 out: &Tensor,
27879 upscale_factor: i64,
27880 ) -> Result<Tensor, TchError> {
27881 let mut c_tensors = [std::ptr::null_mut(); 1];
27882 unsafe_torch_err!(atg_pixel_shuffle_out(
27883 c_tensors.as_mut_ptr(),
27884 out.c_tensor,
27885 self.c_tensor,
27886 upscale_factor
27887 ));
27888 Ok(Tensor { c_tensor: c_tensors[0] })
27889 }
27890
27891 pub fn f_pixel_unshuffle(&self, downscale_factor: i64) -> Result<Tensor, TchError> {
27892 let mut c_tensors = [std::ptr::null_mut(); 1];
27893 unsafe_torch_err!(atg_pixel_unshuffle(
27894 c_tensors.as_mut_ptr(),
27895 self.c_tensor,
27896 downscale_factor
27897 ));
27898 Ok(Tensor { c_tensor: c_tensors[0] })
27899 }
27900
27901 pub fn f_pixel_unshuffle_out(
27902 &self,
27903 out: &Tensor,
27904 downscale_factor: i64,
27905 ) -> Result<Tensor, TchError> {
27906 let mut c_tensors = [std::ptr::null_mut(); 1];
27907 unsafe_torch_err!(atg_pixel_unshuffle_out(
27908 c_tensors.as_mut_ptr(),
27909 out.c_tensor,
27910 self.c_tensor,
27911 downscale_factor
27912 ));
27913 Ok(Tensor { c_tensor: c_tensors[0] })
27914 }
27915
27916 pub fn f_poisson(&self) -> Result<Tensor, TchError> {
27917 let mut c_tensors = [std::ptr::null_mut(); 1];
27918 unsafe_torch_err!(atg_poisson(c_tensors.as_mut_ptr(), self.c_tensor));
27919 Ok(Tensor { c_tensor: c_tensors[0] })
27920 }
27921
27922 pub fn f_poisson_nll_loss(
27923 &self,
27924 target: &Tensor,
27925 log_input: bool,
27926 full: bool,
27927 eps: f64,
27928 reduction: crate::Reduction,
27929 ) -> Result<Tensor, TchError> {
27930 let mut c_tensors = [std::ptr::null_mut(); 1];
27931 unsafe_torch_err!(atg_poisson_nll_loss(
27932 c_tensors.as_mut_ptr(),
27933 self.c_tensor,
27934 target.c_tensor,
27935 if log_input { 1 } else { 0 },
27936 if full { 1 } else { 0 },
27937 eps,
27938 reduction.to_int()
27939 ));
27940 Ok(Tensor { c_tensor: c_tensors[0] })
27941 }
27942
27943 pub fn f_poisson_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
27944 let mut c_tensors = [std::ptr::null_mut(); 1];
27945 unsafe_torch_err!(atg_poisson_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
27946 Ok(Tensor { c_tensor: c_tensors[0] })
27947 }
27948
27949 pub fn f_polar(abs: &Tensor, angle: &Tensor) -> Result<Tensor, TchError> {
27950 let mut c_tensors = [std::ptr::null_mut(); 1];
27951 unsafe_torch_err!(atg_polar(c_tensors.as_mut_ptr(), abs.c_tensor, angle.c_tensor));
27952 Ok(Tensor { c_tensor: c_tensors[0] })
27953 }
27954
27955 pub fn f_polar_out(out: &Tensor, abs: &Tensor, angle: &Tensor) -> Result<Tensor, TchError> {
27956 let mut c_tensors = [std::ptr::null_mut(); 1];
27957 unsafe_torch_err!(atg_polar_out(
27958 c_tensors.as_mut_ptr(),
27959 out.c_tensor,
27960 abs.c_tensor,
27961 angle.c_tensor
27962 ));
27963 Ok(Tensor { c_tensor: c_tensors[0] })
27964 }
27965
27966 pub fn f_polygamma(&self, n: i64) -> Result<Tensor, TchError> {
27967 let mut c_tensors = [std::ptr::null_mut(); 1];
27968 unsafe_torch_err!(atg_polygamma(c_tensors.as_mut_ptr(), n, self.c_tensor));
27969 Ok(Tensor { c_tensor: c_tensors[0] })
27970 }
27971
27972 pub fn f_polygamma_(&mut self, n: i64) -> Result<Tensor, TchError> {
27973 let mut c_tensors = [std::ptr::null_mut(); 1];
27974 unsafe_torch_err!(atg_polygamma_(c_tensors.as_mut_ptr(), self.c_tensor, n));
27975 Ok(Tensor { c_tensor: c_tensors[0] })
27976 }
27977
27978 pub fn f_polygamma_out(&self, out: &Tensor, n: i64) -> Result<Tensor, TchError> {
27979 let mut c_tensors = [std::ptr::null_mut(); 1];
27980 unsafe_torch_err!(atg_polygamma_out(
27981 c_tensors.as_mut_ptr(),
27982 out.c_tensor,
27983 n,
27984 self.c_tensor
27985 ));
27986 Ok(Tensor { c_tensor: c_tensors[0] })
27987 }
27988
27989 pub fn f_positive(&self) -> Result<Tensor, TchError> {
27990 let mut c_tensors = [std::ptr::null_mut(); 1];
27991 unsafe_torch_err!(atg_positive(c_tensors.as_mut_ptr(), self.c_tensor));
27992 Ok(Tensor { c_tensor: c_tensors[0] })
27993 }
27994
27995 pub fn f_pow(&self, exponent: &Tensor) -> Result<Tensor, TchError> {
27996 let mut c_tensors = [std::ptr::null_mut(); 1];
27997 unsafe_torch_err!(atg_pow(c_tensors.as_mut_ptr(), self.c_tensor, exponent.c_tensor));
27998 Ok(Tensor { c_tensor: c_tensors[0] })
27999 }
28000
28001 pub fn f_pow_<S: Into<Scalar>>(&mut self, exponent: S) -> Result<Tensor, TchError> {
28002 let mut c_tensors = [std::ptr::null_mut(); 1];
28003 unsafe_torch_err!(atg_pow_(
28004 c_tensors.as_mut_ptr(),
28005 self.c_tensor,
28006 exponent.into().c_scalar
28007 ));
28008 Ok(Tensor { c_tensor: c_tensors[0] })
28009 }
28010
28011 pub fn f_pow_scalar<S: Into<Scalar>>(
28012 self_scalar: S,
28013 exponent: &Tensor,
28014 ) -> Result<Tensor, TchError> {
28015 let mut c_tensors = [std::ptr::null_mut(); 1];
28016 unsafe_torch_err!(atg_pow_scalar(
28017 c_tensors.as_mut_ptr(),
28018 self_scalar.into().c_scalar,
28019 exponent.c_tensor
28020 ));
28021 Ok(Tensor { c_tensor: c_tensors[0] })
28022 }
28023
28024 pub fn f_pow_scalar_out<S: Into<Scalar>>(
28025 out: &Tensor,
28026 self_scalar: S,
28027 exponent: &Tensor,
28028 ) -> Result<Tensor, TchError> {
28029 let mut c_tensors = [std::ptr::null_mut(); 1];
28030 unsafe_torch_err!(atg_pow_scalar_out(
28031 c_tensors.as_mut_ptr(),
28032 out.c_tensor,
28033 self_scalar.into().c_scalar,
28034 exponent.c_tensor
28035 ));
28036 Ok(Tensor { c_tensor: c_tensors[0] })
28037 }
28038
28039 pub fn f_pow_tensor_(&mut self, exponent: &Tensor) -> Result<Tensor, TchError> {
28040 let mut c_tensors = [std::ptr::null_mut(); 1];
28041 unsafe_torch_err!(atg_pow_tensor_(
28042 c_tensors.as_mut_ptr(),
28043 self.c_tensor,
28044 exponent.c_tensor
28045 ));
28046 Ok(Tensor { c_tensor: c_tensors[0] })
28047 }
28048
28049 pub fn f_pow_tensor_scalar<S: Into<Scalar>>(&self, exponent: S) -> Result<Tensor, TchError> {
28050 let mut c_tensors = [std::ptr::null_mut(); 1];
28051 unsafe_torch_err!(atg_pow_tensor_scalar(
28052 c_tensors.as_mut_ptr(),
28053 self.c_tensor,
28054 exponent.into().c_scalar
28055 ));
28056 Ok(Tensor { c_tensor: c_tensors[0] })
28057 }
28058
28059 pub fn f_pow_tensor_scalar_out<S: Into<Scalar>>(
28060 &self,
28061 out: &Tensor,
28062 exponent: S,
28063 ) -> Result<Tensor, TchError> {
28064 let mut c_tensors = [std::ptr::null_mut(); 1];
28065 unsafe_torch_err!(atg_pow_tensor_scalar_out(
28066 c_tensors.as_mut_ptr(),
28067 out.c_tensor,
28068 self.c_tensor,
28069 exponent.into().c_scalar
28070 ));
28071 Ok(Tensor { c_tensor: c_tensors[0] })
28072 }
28073
28074 pub fn f_pow_tensor_tensor_out(
28075 &self,
28076 out: &Tensor,
28077 exponent: &Tensor,
28078 ) -> Result<Tensor, TchError> {
28079 let mut c_tensors = [std::ptr::null_mut(); 1];
28080 unsafe_torch_err!(atg_pow_tensor_tensor_out(
28081 c_tensors.as_mut_ptr(),
28082 out.c_tensor,
28083 self.c_tensor,
28084 exponent.c_tensor
28085 ));
28086 Ok(Tensor { c_tensor: c_tensors[0] })
28087 }
28088
28089 pub fn f_prelu(&self, weight: &Tensor) -> Result<Tensor, TchError> {
28090 let mut c_tensors = [std::ptr::null_mut(); 1];
28091 unsafe_torch_err!(atg_prelu(c_tensors.as_mut_ptr(), self.c_tensor, weight.c_tensor));
28092 Ok(Tensor { c_tensor: c_tensors[0] })
28093 }
28094
28095 pub fn f_prod(&self, dtype: impl Into<Option<Kind>>) -> Result<Tensor, TchError> {
28096 let mut c_tensors = [std::ptr::null_mut(); 1];
28097 unsafe_torch_err!(atg_prod(
28098 c_tensors.as_mut_ptr(),
28099 self.c_tensor,
28100 dtype.into().map_or(-1, |s| s.c_int())
28101 ));
28102 Ok(Tensor { c_tensor: c_tensors[0] })
28103 }
28104
28105 pub fn f_prod_dim_int(
28106 &self,
28107 dim: i64,
28108 keepdim: bool,
28109 dtype: impl Into<Option<Kind>>,
28110 ) -> Result<Tensor, TchError> {
28111 let mut c_tensors = [std::ptr::null_mut(); 1];
28112 unsafe_torch_err!(atg_prod_dim_int(
28113 c_tensors.as_mut_ptr(),
28114 self.c_tensor,
28115 dim,
28116 if keepdim { 1 } else { 0 },
28117 dtype.into().map_or(-1, |s| s.c_int())
28118 ));
28119 Ok(Tensor { c_tensor: c_tensors[0] })
28120 }
28121
28122 pub fn f_prod_int_out(
28123 &self,
28124 out: &Tensor,
28125 dim: i64,
28126 keepdim: bool,
28127 dtype: impl Into<Option<Kind>>,
28128 ) -> Result<Tensor, TchError> {
28129 let mut c_tensors = [std::ptr::null_mut(); 1];
28130 unsafe_torch_err!(atg_prod_int_out(
28131 c_tensors.as_mut_ptr(),
28132 out.c_tensor,
28133 self.c_tensor,
28134 dim,
28135 if keepdim { 1 } else { 0 },
28136 dtype.into().map_or(-1, |s| s.c_int())
28137 ));
28138 Ok(Tensor { c_tensor: c_tensors[0] })
28139 }
28140
28141 pub fn f_prod_out(
28142 &self,
28143 out: &Tensor,
28144 dtype: impl Into<Option<Kind>>,
28145 ) -> Result<Tensor, TchError> {
28146 let mut c_tensors = [std::ptr::null_mut(); 1];
28147 unsafe_torch_err!(atg_prod_out(
28148 c_tensors.as_mut_ptr(),
28149 out.c_tensor,
28150 self.c_tensor,
28151 dtype.into().map_or(-1, |s| s.c_int())
28152 ));
28153 Ok(Tensor { c_tensor: c_tensors[0] })
28154 }
28155
28156 pub fn f_put(
28157 &self,
28158 index: &Tensor,
28159 source: &Tensor,
28160 accumulate: bool,
28161 ) -> Result<Tensor, TchError> {
28162 let mut c_tensors = [std::ptr::null_mut(); 1];
28163 unsafe_torch_err!(atg_put(
28164 c_tensors.as_mut_ptr(),
28165 self.c_tensor,
28166 index.c_tensor,
28167 source.c_tensor,
28168 if accumulate { 1 } else { 0 }
28169 ));
28170 Ok(Tensor { c_tensor: c_tensors[0] })
28171 }
28172
28173 pub fn f_put_(
28174 &mut self,
28175 index: &Tensor,
28176 source: &Tensor,
28177 accumulate: bool,
28178 ) -> Result<Tensor, TchError> {
28179 let mut c_tensors = [std::ptr::null_mut(); 1];
28180 unsafe_torch_err!(atg_put_(
28181 c_tensors.as_mut_ptr(),
28182 self.c_tensor,
28183 index.c_tensor,
28184 source.c_tensor,
28185 if accumulate { 1 } else { 0 }
28186 ));
28187 Ok(Tensor { c_tensor: c_tensors[0] })
28188 }
28189
28190 pub fn f_put_out(
28191 &self,
28192 out: &Tensor,
28193 index: &Tensor,
28194 source: &Tensor,
28195 accumulate: bool,
28196 ) -> Result<Tensor, TchError> {
28197 let mut c_tensors = [std::ptr::null_mut(); 1];
28198 unsafe_torch_err!(atg_put_out(
28199 c_tensors.as_mut_ptr(),
28200 out.c_tensor,
28201 self.c_tensor,
28202 index.c_tensor,
28203 source.c_tensor,
28204 if accumulate { 1 } else { 0 }
28205 ));
28206 Ok(Tensor { c_tensor: c_tensors[0] })
28207 }
28208
28209 pub fn f_q_per_channel_axis(&self) -> Result<i64, TchError> {
28210 let return_;
28211 unsafe_torch_err!(return_ = atg_q_per_channel_axis(self.c_tensor));
28212 Ok(return_)
28213 }
28214
28215 pub fn f_q_per_channel_scales(&self) -> Result<Tensor, TchError> {
28216 let mut c_tensors = [std::ptr::null_mut(); 1];
28217 unsafe_torch_err!(atg_q_per_channel_scales(c_tensors.as_mut_ptr(), self.c_tensor));
28218 Ok(Tensor { c_tensor: c_tensors[0] })
28219 }
28220
28221 pub fn f_q_per_channel_scales_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
28222 let mut c_tensors = [std::ptr::null_mut(); 1];
28223 unsafe_torch_err!(atg_q_per_channel_scales_out(
28224 c_tensors.as_mut_ptr(),
28225 out.c_tensor,
28226 self.c_tensor
28227 ));
28228 Ok(Tensor { c_tensor: c_tensors[0] })
28229 }
28230
28231 pub fn f_q_per_channel_zero_points(&self) -> Result<Tensor, TchError> {
28232 let mut c_tensors = [std::ptr::null_mut(); 1];
28233 unsafe_torch_err!(atg_q_per_channel_zero_points(c_tensors.as_mut_ptr(), self.c_tensor));
28234 Ok(Tensor { c_tensor: c_tensors[0] })
28235 }
28236
28237 pub fn f_q_per_channel_zero_points_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
28238 let mut c_tensors = [std::ptr::null_mut(); 1];
28239 unsafe_torch_err!(atg_q_per_channel_zero_points_out(
28240 c_tensors.as_mut_ptr(),
28241 out.c_tensor,
28242 self.c_tensor
28243 ));
28244 Ok(Tensor { c_tensor: c_tensors[0] })
28245 }
28246
28247 pub fn f_q_scale(&self) -> Result<f64, TchError> {
28248 let return_;
28249 unsafe_torch_err!(return_ = atg_q_scale(self.c_tensor));
28250 Ok(return_)
28251 }
28252
28253 pub fn f_q_zero_point(&self) -> Result<i64, TchError> {
28254 let return_;
28255 unsafe_torch_err!(return_ = atg_q_zero_point(self.c_tensor));
28256 Ok(return_)
28257 }
28258
28259 pub fn f_qr(&self, some: bool) -> Result<(Tensor, Tensor), TchError> {
28260 let mut c_tensors = [std::ptr::null_mut(); 2];
28261 unsafe_torch_err!(atg_qr(c_tensors.as_mut_ptr(), self.c_tensor, if some { 1 } else { 0 }));
28262 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
28263 }
28264
28265 pub fn f_qr_q(&self, q: &Tensor, r: &Tensor, some: bool) -> Result<(Tensor, Tensor), TchError> {
28266 let mut c_tensors = [std::ptr::null_mut(); 2];
28267 unsafe_torch_err!(atg_qr_q(
28268 c_tensors.as_mut_ptr(),
28269 q.c_tensor,
28270 r.c_tensor,
28271 self.c_tensor,
28272 if some { 1 } else { 0 }
28273 ));
28274 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
28275 }
28276
28277 pub fn f_quantile(
28278 &self,
28279 q: &Tensor,
28280 dim: impl Into<Option<i64>>,
28281 keepdim: bool,
28282 interpolation: &str,
28283 ) -> Result<Tensor, TchError> {
28284 let dim = dim.into();
28285 let mut c_tensors = [std::ptr::null_mut(); 1];
28286 unsafe_torch_err!(atg_quantile(
28287 c_tensors.as_mut_ptr(),
28288 self.c_tensor,
28289 q.c_tensor,
28290 dim.unwrap_or(0i64),
28291 dim.is_none() as i8,
28292 if keepdim { 1 } else { 0 },
28293 interpolation.as_ptr(),
28294 interpolation.len() as i32
28295 ));
28296 Ok(Tensor { c_tensor: c_tensors[0] })
28297 }
28298
28299 pub fn f_quantile_out(
28300 &self,
28301 out: &Tensor,
28302 q: &Tensor,
28303 dim: impl Into<Option<i64>>,
28304 keepdim: bool,
28305 interpolation: &str,
28306 ) -> Result<Tensor, TchError> {
28307 let dim = dim.into();
28308 let mut c_tensors = [std::ptr::null_mut(); 1];
28309 unsafe_torch_err!(atg_quantile_out(
28310 c_tensors.as_mut_ptr(),
28311 out.c_tensor,
28312 self.c_tensor,
28313 q.c_tensor,
28314 dim.unwrap_or(0i64),
28315 dim.is_none() as i8,
28316 if keepdim { 1 } else { 0 },
28317 interpolation.as_ptr(),
28318 interpolation.len() as i32
28319 ));
28320 Ok(Tensor { c_tensor: c_tensors[0] })
28321 }
28322
28323 pub fn f_quantile_scalar(
28324 &self,
28325 q: f64,
28326 dim: impl Into<Option<i64>>,
28327 keepdim: bool,
28328 interpolation: &str,
28329 ) -> Result<Tensor, TchError> {
28330 let dim = dim.into();
28331 let mut c_tensors = [std::ptr::null_mut(); 1];
28332 unsafe_torch_err!(atg_quantile_scalar(
28333 c_tensors.as_mut_ptr(),
28334 self.c_tensor,
28335 q,
28336 dim.unwrap_or(0i64),
28337 dim.is_none() as i8,
28338 if keepdim { 1 } else { 0 },
28339 interpolation.as_ptr(),
28340 interpolation.len() as i32
28341 ));
28342 Ok(Tensor { c_tensor: c_tensors[0] })
28343 }
28344
28345 pub fn f_quantile_scalar_out(
28346 &self,
28347 out: &Tensor,
28348 q: f64,
28349 dim: impl Into<Option<i64>>,
28350 keepdim: bool,
28351 interpolation: &str,
28352 ) -> Result<Tensor, TchError> {
28353 let dim = dim.into();
28354 let mut c_tensors = [std::ptr::null_mut(); 1];
28355 unsafe_torch_err!(atg_quantile_scalar_out(
28356 c_tensors.as_mut_ptr(),
28357 out.c_tensor,
28358 self.c_tensor,
28359 q,
28360 dim.unwrap_or(0i64),
28361 dim.is_none() as i8,
28362 if keepdim { 1 } else { 0 },
28363 interpolation.as_ptr(),
28364 interpolation.len() as i32
28365 ));
28366 Ok(Tensor { c_tensor: c_tensors[0] })
28367 }
28368
28369 pub fn f_quantize_per_channel(
28370 &self,
28371 scales: &Tensor,
28372 zero_points: &Tensor,
28373 axis: i64,
28374 dtype: Kind,
28375 ) -> Result<Tensor, TchError> {
28376 let mut c_tensors = [std::ptr::null_mut(); 1];
28377 unsafe_torch_err!(atg_quantize_per_channel(
28378 c_tensors.as_mut_ptr(),
28379 self.c_tensor,
28380 scales.c_tensor,
28381 zero_points.c_tensor,
28382 axis,
28383 dtype.c_int()
28384 ));
28385 Ok(Tensor { c_tensor: c_tensors[0] })
28386 }
28387
28388 pub fn f_quantize_per_channel_out(
28389 &self,
28390 out: &Tensor,
28391 scales: &Tensor,
28392 zero_points: &Tensor,
28393 axis: i64,
28394 dtype: Kind,
28395 ) -> Result<Tensor, TchError> {
28396 let mut c_tensors = [std::ptr::null_mut(); 1];
28397 unsafe_torch_err!(atg_quantize_per_channel_out(
28398 c_tensors.as_mut_ptr(),
28399 out.c_tensor,
28400 self.c_tensor,
28401 scales.c_tensor,
28402 zero_points.c_tensor,
28403 axis,
28404 dtype.c_int()
28405 ));
28406 Ok(Tensor { c_tensor: c_tensors[0] })
28407 }
28408
28409 pub fn f_quantize_per_tensor(
28410 &self,
28411 scale: f64,
28412 zero_point: i64,
28413 dtype: Kind,
28414 ) -> Result<Tensor, TchError> {
28415 let mut c_tensors = [std::ptr::null_mut(); 1];
28416 unsafe_torch_err!(atg_quantize_per_tensor(
28417 c_tensors.as_mut_ptr(),
28418 self.c_tensor,
28419 scale,
28420 zero_point,
28421 dtype.c_int()
28422 ));
28423 Ok(Tensor { c_tensor: c_tensors[0] })
28424 }
28425
28426 pub fn f_quantize_per_tensor_dynamic(
28427 &self,
28428 dtype: Kind,
28429 reduce_range: bool,
28430 ) -> Result<Tensor, TchError> {
28431 let mut c_tensors = [std::ptr::null_mut(); 1];
28432 unsafe_torch_err!(atg_quantize_per_tensor_dynamic(
28433 c_tensors.as_mut_ptr(),
28434 self.c_tensor,
28435 dtype.c_int(),
28436 if reduce_range { 1 } else { 0 }
28437 ));
28438 Ok(Tensor { c_tensor: c_tensors[0] })
28439 }
28440
28441 pub fn f_quantize_per_tensor_dynamic_out(
28442 &self,
28443 out: &Tensor,
28444 dtype: Kind,
28445 reduce_range: bool,
28446 ) -> Result<Tensor, TchError> {
28447 let mut c_tensors = [std::ptr::null_mut(); 1];
28448 unsafe_torch_err!(atg_quantize_per_tensor_dynamic_out(
28449 c_tensors.as_mut_ptr(),
28450 out.c_tensor,
28451 self.c_tensor,
28452 dtype.c_int(),
28453 if reduce_range { 1 } else { 0 }
28454 ));
28455 Ok(Tensor { c_tensor: c_tensors[0] })
28456 }
28457
28458 pub fn f_quantize_per_tensor_out(
28459 &self,
28460 out: &Tensor,
28461 scale: f64,
28462 zero_point: i64,
28463 dtype: Kind,
28464 ) -> Result<Tensor, TchError> {
28465 let mut c_tensors = [std::ptr::null_mut(); 1];
28466 unsafe_torch_err!(atg_quantize_per_tensor_out(
28467 c_tensors.as_mut_ptr(),
28468 out.c_tensor,
28469 self.c_tensor,
28470 scale,
28471 zero_point,
28472 dtype.c_int()
28473 ));
28474 Ok(Tensor { c_tensor: c_tensors[0] })
28475 }
28476
28477 pub fn f_quantize_per_tensor_tensor_qparams(
28478 &self,
28479 scale: &Tensor,
28480 zero_point: &Tensor,
28481 dtype: Kind,
28482 ) -> Result<Tensor, TchError> {
28483 let mut c_tensors = [std::ptr::null_mut(); 1];
28484 unsafe_torch_err!(atg_quantize_per_tensor_tensor_qparams(
28485 c_tensors.as_mut_ptr(),
28486 self.c_tensor,
28487 scale.c_tensor,
28488 zero_point.c_tensor,
28489 dtype.c_int()
28490 ));
28491 Ok(Tensor { c_tensor: c_tensors[0] })
28492 }
28493
28494 pub fn f_quantize_per_tensor_tensor_qparams_out(
28495 &self,
28496 out: &Tensor,
28497 scale: &Tensor,
28498 zero_point: &Tensor,
28499 dtype: Kind,
28500 ) -> Result<Tensor, TchError> {
28501 let mut c_tensors = [std::ptr::null_mut(); 1];
28502 unsafe_torch_err!(atg_quantize_per_tensor_tensor_qparams_out(
28503 c_tensors.as_mut_ptr(),
28504 out.c_tensor,
28505 self.c_tensor,
28506 scale.c_tensor,
28507 zero_point.c_tensor,
28508 dtype.c_int()
28509 ));
28510 Ok(Tensor { c_tensor: c_tensors[0] })
28511 }
28512
28513 pub fn f_quantize_per_tensor_tensors<T: Borrow<Tensor>>(
28514 tensors: &[T],
28515 scales: &Tensor,
28516 zero_points: &Tensor,
28517 dtype: Kind,
28518 ) -> Result<Vec<Tensor>, TchError> {
28519 let c_tensors = unsafe_torch_err!(atg_quantize_per_tensor_tensors(
28520 ptr_list(tensors).as_ptr(),
28521 tensors.len() as i32,
28522 scales.c_tensor,
28523 zero_points.c_tensor,
28524 dtype.c_int()
28525 ));
28526 let mut r__ = vec![];
28527 let mut i = 0;
28528 loop {
28529 let c__ = unsafe { *c_tensors.add(i) };
28530 if c__.is_null() {
28531 break;
28532 }
28533 r__.push(Tensor { c_tensor: c__ });
28534 i += 1;
28535 }
28536 unsafe { libc::free(c_tensors as *mut libc::c_void) }
28537 Ok(r__)
28538 }
28539
28540 pub fn f_quantize_per_tensor_tensors_out<T: Borrow<Tensor>>(
28541 out: &[T],
28542 tensors: &[T],
28543 scales: &Tensor,
28544 zero_points: &Tensor,
28545 dtype: Kind,
28546 ) -> Result<(), TchError> {
28547 unsafe_torch_err!(atg_quantize_per_tensor_tensors_out(
28548 ptr_list(out).as_ptr(),
28549 out.len() as i32,
28550 ptr_list(tensors).as_ptr(),
28551 tensors.len() as i32,
28552 scales.c_tensor,
28553 zero_points.c_tensor,
28554 dtype.c_int()
28555 ));
28556 Ok(())
28557 }
28558
28559 pub fn f_quantized_batch_norm<T: Borrow<Tensor>>(
28560 &self,
28561 weight: Option<T>,
28562 bias: Option<T>,
28563 mean: &Tensor,
28564 var: &Tensor,
28565 eps: f64,
28566 output_scale: f64,
28567 output_zero_point: i64,
28568 ) -> Result<Tensor, TchError> {
28569 let mut c_tensors = [std::ptr::null_mut(); 1];
28570 unsafe_torch_err!(atg_quantized_batch_norm(
28571 c_tensors.as_mut_ptr(),
28572 self.c_tensor,
28573 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
28574 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
28575 mean.c_tensor,
28576 var.c_tensor,
28577 eps,
28578 output_scale,
28579 output_zero_point
28580 ));
28581 Ok(Tensor { c_tensor: c_tensors[0] })
28582 }
28583
28584 pub fn f_quantized_batch_norm_out<T: Borrow<Tensor>>(
28585 &self,
28586 out: &Tensor,
28587 weight: Option<T>,
28588 bias: Option<T>,
28589 mean: &Tensor,
28590 var: &Tensor,
28591 eps: f64,
28592 output_scale: f64,
28593 output_zero_point: i64,
28594 ) -> Result<Tensor, TchError> {
28595 let mut c_tensors = [std::ptr::null_mut(); 1];
28596 unsafe_torch_err!(atg_quantized_batch_norm_out(
28597 c_tensors.as_mut_ptr(),
28598 out.c_tensor,
28599 self.c_tensor,
28600 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
28601 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
28602 mean.c_tensor,
28603 var.c_tensor,
28604 eps,
28605 output_scale,
28606 output_zero_point
28607 ));
28608 Ok(Tensor { c_tensor: c_tensors[0] })
28609 }
28610
28611 pub fn f_quantized_gru_cell<S: Into<Scalar>>(
28612 &self,
28613 hx: &Tensor,
28614 w_ih: &Tensor,
28615 w_hh: &Tensor,
28616 b_ih: &Tensor,
28617 b_hh: &Tensor,
28618 packed_ih: &Tensor,
28619 packed_hh: &Tensor,
28620 col_offsets_ih: &Tensor,
28621 col_offsets_hh: &Tensor,
28622 scale_ih: S,
28623 scale_hh: S,
28624 zero_point_ih: S,
28625 zero_point_hh: S,
28626 ) -> Result<Tensor, TchError> {
28627 let mut c_tensors = [std::ptr::null_mut(); 1];
28628 unsafe_torch_err!(atg_quantized_gru_cell(
28629 c_tensors.as_mut_ptr(),
28630 self.c_tensor,
28631 hx.c_tensor,
28632 w_ih.c_tensor,
28633 w_hh.c_tensor,
28634 b_ih.c_tensor,
28635 b_hh.c_tensor,
28636 packed_ih.c_tensor,
28637 packed_hh.c_tensor,
28638 col_offsets_ih.c_tensor,
28639 col_offsets_hh.c_tensor,
28640 scale_ih.into().c_scalar,
28641 scale_hh.into().c_scalar,
28642 zero_point_ih.into().c_scalar,
28643 zero_point_hh.into().c_scalar
28644 ));
28645 Ok(Tensor { c_tensor: c_tensors[0] })
28646 }
28647
28648 pub fn f_quantized_lstm_cell<T: Borrow<Tensor>, S: Into<Scalar>>(
28649 &self,
28650 hx: &[T],
28651 w_ih: &Tensor,
28652 w_hh: &Tensor,
28653 b_ih: &Tensor,
28654 b_hh: &Tensor,
28655 packed_ih: &Tensor,
28656 packed_hh: &Tensor,
28657 col_offsets_ih: &Tensor,
28658 col_offsets_hh: &Tensor,
28659 scale_ih: S,
28660 scale_hh: S,
28661 zero_point_ih: S,
28662 zero_point_hh: S,
28663 ) -> Result<(Tensor, Tensor), TchError> {
28664 let mut c_tensors = [std::ptr::null_mut(); 2];
28665 unsafe_torch_err!(atg_quantized_lstm_cell(
28666 c_tensors.as_mut_ptr(),
28667 self.c_tensor,
28668 ptr_list(hx).as_ptr(),
28669 hx.len() as i32,
28670 w_ih.c_tensor,
28671 w_hh.c_tensor,
28672 b_ih.c_tensor,
28673 b_hh.c_tensor,
28674 packed_ih.c_tensor,
28675 packed_hh.c_tensor,
28676 col_offsets_ih.c_tensor,
28677 col_offsets_hh.c_tensor,
28678 scale_ih.into().c_scalar,
28679 scale_hh.into().c_scalar,
28680 zero_point_ih.into().c_scalar,
28681 zero_point_hh.into().c_scalar
28682 ));
28683 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
28684 }
28685
28686 pub fn f_quantized_max_pool1d(
28687 &self,
28688 kernel_size: impl IntList,
28689 stride: impl IntList,
28690 padding: impl IntList,
28691 dilation: impl IntList,
28692 ceil_mode: bool,
28693 ) -> Result<Tensor, TchError> {
28694 let mut c_tensors = [std::ptr::null_mut(); 1];
28695 unsafe_torch_err!(atg_quantized_max_pool1d(
28696 c_tensors.as_mut_ptr(),
28697 self.c_tensor,
28698 kernel_size.as_ptr(),
28699 kernel_size.len_i32(),
28700 stride.as_ptr(),
28701 stride.len_i32(),
28702 padding.as_ptr(),
28703 padding.len_i32(),
28704 dilation.as_ptr(),
28705 dilation.len_i32(),
28706 if ceil_mode { 1 } else { 0 }
28707 ));
28708 Ok(Tensor { c_tensor: c_tensors[0] })
28709 }
28710
28711 pub fn f_quantized_max_pool1d_out(
28712 &self,
28713 out: &Tensor,
28714 kernel_size: impl IntList,
28715 stride: impl IntList,
28716 padding: impl IntList,
28717 dilation: impl IntList,
28718 ceil_mode: bool,
28719 ) -> Result<Tensor, TchError> {
28720 let mut c_tensors = [std::ptr::null_mut(); 1];
28721 unsafe_torch_err!(atg_quantized_max_pool1d_out(
28722 c_tensors.as_mut_ptr(),
28723 out.c_tensor,
28724 self.c_tensor,
28725 kernel_size.as_ptr(),
28726 kernel_size.len_i32(),
28727 stride.as_ptr(),
28728 stride.len_i32(),
28729 padding.as_ptr(),
28730 padding.len_i32(),
28731 dilation.as_ptr(),
28732 dilation.len_i32(),
28733 if ceil_mode { 1 } else { 0 }
28734 ));
28735 Ok(Tensor { c_tensor: c_tensors[0] })
28736 }
28737
28738 pub fn f_quantized_max_pool2d(
28739 &self,
28740 kernel_size: impl IntList,
28741 stride: impl IntList,
28742 padding: impl IntList,
28743 dilation: impl IntList,
28744 ceil_mode: bool,
28745 ) -> Result<Tensor, TchError> {
28746 let mut c_tensors = [std::ptr::null_mut(); 1];
28747 unsafe_torch_err!(atg_quantized_max_pool2d(
28748 c_tensors.as_mut_ptr(),
28749 self.c_tensor,
28750 kernel_size.as_ptr(),
28751 kernel_size.len_i32(),
28752 stride.as_ptr(),
28753 stride.len_i32(),
28754 padding.as_ptr(),
28755 padding.len_i32(),
28756 dilation.as_ptr(),
28757 dilation.len_i32(),
28758 if ceil_mode { 1 } else { 0 }
28759 ));
28760 Ok(Tensor { c_tensor: c_tensors[0] })
28761 }
28762
28763 pub fn f_quantized_max_pool2d_out(
28764 &self,
28765 out: &Tensor,
28766 kernel_size: impl IntList,
28767 stride: impl IntList,
28768 padding: impl IntList,
28769 dilation: impl IntList,
28770 ceil_mode: bool,
28771 ) -> Result<Tensor, TchError> {
28772 let mut c_tensors = [std::ptr::null_mut(); 1];
28773 unsafe_torch_err!(atg_quantized_max_pool2d_out(
28774 c_tensors.as_mut_ptr(),
28775 out.c_tensor,
28776 self.c_tensor,
28777 kernel_size.as_ptr(),
28778 kernel_size.len_i32(),
28779 stride.as_ptr(),
28780 stride.len_i32(),
28781 padding.as_ptr(),
28782 padding.len_i32(),
28783 dilation.as_ptr(),
28784 dilation.len_i32(),
28785 if ceil_mode { 1 } else { 0 }
28786 ));
28787 Ok(Tensor { c_tensor: c_tensors[0] })
28788 }
28789
28790 pub fn f_quantized_max_pool3d(
28791 &self,
28792 kernel_size: impl IntList,
28793 stride: impl IntList,
28794 padding: impl IntList,
28795 dilation: impl IntList,
28796 ceil_mode: bool,
28797 ) -> Result<Tensor, TchError> {
28798 let mut c_tensors = [std::ptr::null_mut(); 1];
28799 unsafe_torch_err!(atg_quantized_max_pool3d(
28800 c_tensors.as_mut_ptr(),
28801 self.c_tensor,
28802 kernel_size.as_ptr(),
28803 kernel_size.len_i32(),
28804 stride.as_ptr(),
28805 stride.len_i32(),
28806 padding.as_ptr(),
28807 padding.len_i32(),
28808 dilation.as_ptr(),
28809 dilation.len_i32(),
28810 if ceil_mode { 1 } else { 0 }
28811 ));
28812 Ok(Tensor { c_tensor: c_tensors[0] })
28813 }
28814
28815 pub fn f_quantized_max_pool3d_out(
28816 &self,
28817 out: &Tensor,
28818 kernel_size: impl IntList,
28819 stride: impl IntList,
28820 padding: impl IntList,
28821 dilation: impl IntList,
28822 ceil_mode: bool,
28823 ) -> Result<Tensor, TchError> {
28824 let mut c_tensors = [std::ptr::null_mut(); 1];
28825 unsafe_torch_err!(atg_quantized_max_pool3d_out(
28826 c_tensors.as_mut_ptr(),
28827 out.c_tensor,
28828 self.c_tensor,
28829 kernel_size.as_ptr(),
28830 kernel_size.len_i32(),
28831 stride.as_ptr(),
28832 stride.len_i32(),
28833 padding.as_ptr(),
28834 padding.len_i32(),
28835 dilation.as_ptr(),
28836 dilation.len_i32(),
28837 if ceil_mode { 1 } else { 0 }
28838 ));
28839 Ok(Tensor { c_tensor: c_tensors[0] })
28840 }
28841
28842 pub fn f_quantized_rnn_relu_cell<S: Into<Scalar>>(
28843 &self,
28844 hx: &Tensor,
28845 w_ih: &Tensor,
28846 w_hh: &Tensor,
28847 b_ih: &Tensor,
28848 b_hh: &Tensor,
28849 packed_ih: &Tensor,
28850 packed_hh: &Tensor,
28851 col_offsets_ih: &Tensor,
28852 col_offsets_hh: &Tensor,
28853 scale_ih: S,
28854 scale_hh: S,
28855 zero_point_ih: S,
28856 zero_point_hh: S,
28857 ) -> Result<Tensor, TchError> {
28858 let mut c_tensors = [std::ptr::null_mut(); 1];
28859 unsafe_torch_err!(atg_quantized_rnn_relu_cell(
28860 c_tensors.as_mut_ptr(),
28861 self.c_tensor,
28862 hx.c_tensor,
28863 w_ih.c_tensor,
28864 w_hh.c_tensor,
28865 b_ih.c_tensor,
28866 b_hh.c_tensor,
28867 packed_ih.c_tensor,
28868 packed_hh.c_tensor,
28869 col_offsets_ih.c_tensor,
28870 col_offsets_hh.c_tensor,
28871 scale_ih.into().c_scalar,
28872 scale_hh.into().c_scalar,
28873 zero_point_ih.into().c_scalar,
28874 zero_point_hh.into().c_scalar
28875 ));
28876 Ok(Tensor { c_tensor: c_tensors[0] })
28877 }
28878
28879 pub fn f_quantized_rnn_tanh_cell<S: Into<Scalar>>(
28880 &self,
28881 hx: &Tensor,
28882 w_ih: &Tensor,
28883 w_hh: &Tensor,
28884 b_ih: &Tensor,
28885 b_hh: &Tensor,
28886 packed_ih: &Tensor,
28887 packed_hh: &Tensor,
28888 col_offsets_ih: &Tensor,
28889 col_offsets_hh: &Tensor,
28890 scale_ih: S,
28891 scale_hh: S,
28892 zero_point_ih: S,
28893 zero_point_hh: S,
28894 ) -> Result<Tensor, TchError> {
28895 let mut c_tensors = [std::ptr::null_mut(); 1];
28896 unsafe_torch_err!(atg_quantized_rnn_tanh_cell(
28897 c_tensors.as_mut_ptr(),
28898 self.c_tensor,
28899 hx.c_tensor,
28900 w_ih.c_tensor,
28901 w_hh.c_tensor,
28902 b_ih.c_tensor,
28903 b_hh.c_tensor,
28904 packed_ih.c_tensor,
28905 packed_hh.c_tensor,
28906 col_offsets_ih.c_tensor,
28907 col_offsets_hh.c_tensor,
28908 scale_ih.into().c_scalar,
28909 scale_hh.into().c_scalar,
28910 zero_point_ih.into().c_scalar,
28911 zero_point_hh.into().c_scalar
28912 ));
28913 Ok(Tensor { c_tensor: c_tensors[0] })
28914 }
28915
28916 pub fn f_rad2deg(&self) -> Result<Tensor, TchError> {
28917 let mut c_tensors = [std::ptr::null_mut(); 1];
28918 unsafe_torch_err!(atg_rad2deg(c_tensors.as_mut_ptr(), self.c_tensor));
28919 Ok(Tensor { c_tensor: c_tensors[0] })
28920 }
28921
28922 pub fn f_rad2deg_(&mut self) -> Result<Tensor, TchError> {
28923 let mut c_tensors = [std::ptr::null_mut(); 1];
28924 unsafe_torch_err!(atg_rad2deg_(c_tensors.as_mut_ptr(), self.c_tensor));
28925 Ok(Tensor { c_tensor: c_tensors[0] })
28926 }
28927
28928 pub fn f_rad2deg_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
28929 let mut c_tensors = [std::ptr::null_mut(); 1];
28930 unsafe_torch_err!(atg_rad2deg_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
28931 Ok(Tensor { c_tensor: c_tensors[0] })
28932 }
28933
28934 pub fn f_rand(size: impl IntList, options: (Kind, Device)) -> Result<Tensor, TchError> {
28935 let mut c_tensors = [std::ptr::null_mut(); 1];
28936 unsafe_torch_err!(atg_rand(
28937 c_tensors.as_mut_ptr(),
28938 size.as_ptr(),
28939 size.len_i32(),
28940 options.0.c_int(),
28941 options.1.c_int()
28942 ));
28943 Ok(Tensor { c_tensor: c_tensors[0] })
28944 }
28945
28946 pub fn f_rand_like(&self) -> Result<Tensor, TchError> {
28947 let mut c_tensors = [std::ptr::null_mut(); 1];
28948 unsafe_torch_err!(atg_rand_like(c_tensors.as_mut_ptr(), self.c_tensor));
28949 Ok(Tensor { c_tensor: c_tensors[0] })
28950 }
28951
28952 pub fn f_rand_like_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
28953 let mut c_tensors = [std::ptr::null_mut(); 1];
28954 unsafe_torch_err!(atg_rand_like_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
28955 Ok(Tensor { c_tensor: c_tensors[0] })
28956 }
28957
28958 pub fn f_rand_out(out: &Tensor, size: impl IntList) -> Result<Tensor, TchError> {
28959 let mut c_tensors = [std::ptr::null_mut(); 1];
28960 unsafe_torch_err!(atg_rand_out(
28961 c_tensors.as_mut_ptr(),
28962 out.c_tensor,
28963 size.as_ptr(),
28964 size.len_i32()
28965 ));
28966 Ok(Tensor { c_tensor: c_tensors[0] })
28967 }
28968
28969 pub fn f_randint(
28970 high: i64,
28971 size: impl IntList,
28972 options: (Kind, Device),
28973 ) -> Result<Tensor, TchError> {
28974 let mut c_tensors = [std::ptr::null_mut(); 1];
28975 unsafe_torch_err!(atg_randint(
28976 c_tensors.as_mut_ptr(),
28977 high,
28978 size.as_ptr(),
28979 size.len_i32(),
28980 options.0.c_int(),
28981 options.1.c_int()
28982 ));
28983 Ok(Tensor { c_tensor: c_tensors[0] })
28984 }
28985
28986 pub fn f_randint_like(&self, high: i64) -> Result<Tensor, TchError> {
28987 let mut c_tensors = [std::ptr::null_mut(); 1];
28988 unsafe_torch_err!(atg_randint_like(c_tensors.as_mut_ptr(), self.c_tensor, high));
28989 Ok(Tensor { c_tensor: c_tensors[0] })
28990 }
28991
28992 pub fn f_randint_like_low_dtype(&self, low: i64, high: i64) -> Result<Tensor, TchError> {
28993 let mut c_tensors = [std::ptr::null_mut(); 1];
28994 unsafe_torch_err!(atg_randint_like_low_dtype(
28995 c_tensors.as_mut_ptr(),
28996 self.c_tensor,
28997 low,
28998 high
28999 ));
29000 Ok(Tensor { c_tensor: c_tensors[0] })
29001 }
29002
29003 pub fn f_randint_like_low_dtype_out(
29004 &self,
29005 out: &Tensor,
29006 low: i64,
29007 high: i64,
29008 ) -> Result<Tensor, TchError> {
29009 let mut c_tensors = [std::ptr::null_mut(); 1];
29010 unsafe_torch_err!(atg_randint_like_low_dtype_out(
29011 c_tensors.as_mut_ptr(),
29012 out.c_tensor,
29013 self.c_tensor,
29014 low,
29015 high
29016 ));
29017 Ok(Tensor { c_tensor: c_tensors[0] })
29018 }
29019
29020 pub fn f_randint_like_out(&self, out: &Tensor, high: i64) -> Result<Tensor, TchError> {
29021 let mut c_tensors = [std::ptr::null_mut(); 1];
29022 unsafe_torch_err!(atg_randint_like_out(
29023 c_tensors.as_mut_ptr(),
29024 out.c_tensor,
29025 self.c_tensor,
29026 high
29027 ));
29028 Ok(Tensor { c_tensor: c_tensors[0] })
29029 }
29030
29031 pub fn f_randint_low(
29032 low: i64,
29033 high: i64,
29034 size: impl IntList,
29035 options: (Kind, Device),
29036 ) -> Result<Tensor, TchError> {
29037 let mut c_tensors = [std::ptr::null_mut(); 1];
29038 unsafe_torch_err!(atg_randint_low(
29039 c_tensors.as_mut_ptr(),
29040 low,
29041 high,
29042 size.as_ptr(),
29043 size.len_i32(),
29044 options.0.c_int(),
29045 options.1.c_int()
29046 ));
29047 Ok(Tensor { c_tensor: c_tensors[0] })
29048 }
29049
29050 pub fn f_randint_low_out(
29051 out: &Tensor,
29052 low: i64,
29053 high: i64,
29054 size: impl IntList,
29055 ) -> Result<Tensor, TchError> {
29056 let mut c_tensors = [std::ptr::null_mut(); 1];
29057 unsafe_torch_err!(atg_randint_low_out(
29058 c_tensors.as_mut_ptr(),
29059 out.c_tensor,
29060 low,
29061 high,
29062 size.as_ptr(),
29063 size.len_i32()
29064 ));
29065 Ok(Tensor { c_tensor: c_tensors[0] })
29066 }
29067
29068 pub fn f_randint_out(out: &Tensor, high: i64, size: impl IntList) -> Result<Tensor, TchError> {
29069 let mut c_tensors = [std::ptr::null_mut(); 1];
29070 unsafe_torch_err!(atg_randint_out(
29071 c_tensors.as_mut_ptr(),
29072 out.c_tensor,
29073 high,
29074 size.as_ptr(),
29075 size.len_i32()
29076 ));
29077 Ok(Tensor { c_tensor: c_tensors[0] })
29078 }
29079
29080 pub fn f_randn(size: impl IntList, options: (Kind, Device)) -> Result<Tensor, TchError> {
29081 let mut c_tensors = [std::ptr::null_mut(); 1];
29082 unsafe_torch_err!(atg_randn(
29083 c_tensors.as_mut_ptr(),
29084 size.as_ptr(),
29085 size.len_i32(),
29086 options.0.c_int(),
29087 options.1.c_int()
29088 ));
29089 Ok(Tensor { c_tensor: c_tensors[0] })
29090 }
29091
29092 pub fn f_randn_like(&self) -> Result<Tensor, TchError> {
29093 let mut c_tensors = [std::ptr::null_mut(); 1];
29094 unsafe_torch_err!(atg_randn_like(c_tensors.as_mut_ptr(), self.c_tensor));
29095 Ok(Tensor { c_tensor: c_tensors[0] })
29096 }
29097
29098 pub fn f_randn_like_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
29099 let mut c_tensors = [std::ptr::null_mut(); 1];
29100 unsafe_torch_err!(atg_randn_like_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
29101 Ok(Tensor { c_tensor: c_tensors[0] })
29102 }
29103
29104 pub fn f_randn_out(out: &Tensor, size: impl IntList) -> Result<Tensor, TchError> {
29105 let mut c_tensors = [std::ptr::null_mut(); 1];
29106 unsafe_torch_err!(atg_randn_out(
29107 c_tensors.as_mut_ptr(),
29108 out.c_tensor,
29109 size.as_ptr(),
29110 size.len_i32()
29111 ));
29112 Ok(Tensor { c_tensor: c_tensors[0] })
29113 }
29114
29115 pub fn f_random(&self) -> Result<Tensor, TchError> {
29116 let mut c_tensors = [std::ptr::null_mut(); 1];
29117 unsafe_torch_err!(atg_random(c_tensors.as_mut_ptr(), self.c_tensor));
29118 Ok(Tensor { c_tensor: c_tensors[0] })
29119 }
29120
29121 pub fn f_random_(&mut self) -> Result<Tensor, TchError> {
29122 let mut c_tensors = [std::ptr::null_mut(); 1];
29123 unsafe_torch_err!(atg_random_(c_tensors.as_mut_ptr(), self.c_tensor));
29124 Ok(Tensor { c_tensor: c_tensors[0] })
29125 }
29126
29127 pub fn f_random_from(&self, from: i64, to: impl Into<Option<i64>>) -> Result<Tensor, TchError> {
29128 let to = to.into();
29129 let mut c_tensors = [std::ptr::null_mut(); 1];
29130 unsafe_torch_err!(atg_random_from(
29131 c_tensors.as_mut_ptr(),
29132 self.c_tensor,
29133 from,
29134 to.unwrap_or(0i64),
29135 to.is_none() as i8
29136 ));
29137 Ok(Tensor { c_tensor: c_tensors[0] })
29138 }
29139
29140 pub fn f_random_from_(
29141 &mut self,
29142 from: i64,
29143 to: impl Into<Option<i64>>,
29144 ) -> Result<Tensor, TchError> {
29145 let to = to.into();
29146 let mut c_tensors = [std::ptr::null_mut(); 1];
29147 unsafe_torch_err!(atg_random_from_(
29148 c_tensors.as_mut_ptr(),
29149 self.c_tensor,
29150 from,
29151 to.unwrap_or(0i64),
29152 to.is_none() as i8
29153 ));
29154 Ok(Tensor { c_tensor: c_tensors[0] })
29155 }
29156
29157 pub fn f_random_from_out(
29158 &self,
29159 out: &Tensor,
29160 from: i64,
29161 to: impl Into<Option<i64>>,
29162 ) -> Result<Tensor, TchError> {
29163 let to = to.into();
29164 let mut c_tensors = [std::ptr::null_mut(); 1];
29165 unsafe_torch_err!(atg_random_from_out(
29166 c_tensors.as_mut_ptr(),
29167 out.c_tensor,
29168 self.c_tensor,
29169 from,
29170 to.unwrap_or(0i64),
29171 to.is_none() as i8
29172 ));
29173 Ok(Tensor { c_tensor: c_tensors[0] })
29174 }
29175
29176 pub fn f_random_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
29177 let mut c_tensors = [std::ptr::null_mut(); 1];
29178 unsafe_torch_err!(atg_random_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
29179 Ok(Tensor { c_tensor: c_tensors[0] })
29180 }
29181
29182 pub fn f_random_to(&self, to: i64) -> Result<Tensor, TchError> {
29183 let mut c_tensors = [std::ptr::null_mut(); 1];
29184 unsafe_torch_err!(atg_random_to(c_tensors.as_mut_ptr(), self.c_tensor, to));
29185 Ok(Tensor { c_tensor: c_tensors[0] })
29186 }
29187
29188 pub fn f_random_to_(&mut self, to: i64) -> Result<Tensor, TchError> {
29189 let mut c_tensors = [std::ptr::null_mut(); 1];
29190 unsafe_torch_err!(atg_random_to_(c_tensors.as_mut_ptr(), self.c_tensor, to));
29191 Ok(Tensor { c_tensor: c_tensors[0] })
29192 }
29193
29194 pub fn f_random_to_out(&self, out: &Tensor, to: i64) -> Result<Tensor, TchError> {
29195 let mut c_tensors = [std::ptr::null_mut(); 1];
29196 unsafe_torch_err!(atg_random_to_out(
29197 c_tensors.as_mut_ptr(),
29198 out.c_tensor,
29199 self.c_tensor,
29200 to
29201 ));
29202 Ok(Tensor { c_tensor: c_tensors[0] })
29203 }
29204
29205 pub fn f_randperm(n: i64, options: (Kind, Device)) -> Result<Tensor, TchError> {
29206 let mut c_tensors = [std::ptr::null_mut(); 1];
29207 unsafe_torch_err!(atg_randperm(
29208 c_tensors.as_mut_ptr(),
29209 n,
29210 options.0.c_int(),
29211 options.1.c_int()
29212 ));
29213 Ok(Tensor { c_tensor: c_tensors[0] })
29214 }
29215
29216 pub fn f_randperm_out(out: &Tensor, n: i64) -> Result<Tensor, TchError> {
29217 let mut c_tensors = [std::ptr::null_mut(); 1];
29218 unsafe_torch_err!(atg_randperm_out(c_tensors.as_mut_ptr(), out.c_tensor, n));
29219 Ok(Tensor { c_tensor: c_tensors[0] })
29220 }
29221
29222 pub fn f_range<S: Into<Scalar>>(
29223 start: S,
29224 end: S,
29225 options: (Kind, Device),
29226 ) -> Result<Tensor, TchError> {
29227 let mut c_tensors = [std::ptr::null_mut(); 1];
29228 unsafe_torch_err!(atg_range(
29229 c_tensors.as_mut_ptr(),
29230 start.into().c_scalar,
29231 end.into().c_scalar,
29232 options.0.c_int(),
29233 options.1.c_int()
29234 ));
29235 Ok(Tensor { c_tensor: c_tensors[0] })
29236 }
29237
29238 pub fn f_range_out<S: Into<Scalar>>(
29239 out: &Tensor,
29240 start: S,
29241 end: S,
29242 ) -> Result<Tensor, TchError> {
29243 let mut c_tensors = [std::ptr::null_mut(); 1];
29244 unsafe_torch_err!(atg_range_out(
29245 c_tensors.as_mut_ptr(),
29246 out.c_tensor,
29247 start.into().c_scalar,
29248 end.into().c_scalar
29249 ));
29250 Ok(Tensor { c_tensor: c_tensors[0] })
29251 }
29252
29253 pub fn f_range_out_<S: Into<Scalar>>(
29254 out: &Tensor,
29255 start: S,
29256 end: S,
29257 ) -> Result<Tensor, TchError> {
29258 let mut c_tensors = [std::ptr::null_mut(); 1];
29259 unsafe_torch_err!(atg_range_out_(
29260 c_tensors.as_mut_ptr(),
29261 out.c_tensor,
29262 start.into().c_scalar,
29263 end.into().c_scalar
29264 ));
29265 Ok(Tensor { c_tensor: c_tensors[0] })
29266 }
29267
29268 pub fn f_range_step<S: Into<Scalar>>(
29269 start: S,
29270 end: S,
29271 options: (Kind, Device),
29272 ) -> Result<Tensor, TchError> {
29273 let mut c_tensors = [std::ptr::null_mut(); 1];
29274 unsafe_torch_err!(atg_range_step(
29275 c_tensors.as_mut_ptr(),
29276 start.into().c_scalar,
29277 end.into().c_scalar,
29278 options.0.c_int(),
29279 options.1.c_int()
29280 ));
29281 Ok(Tensor { c_tensor: c_tensors[0] })
29282 }
29283
29284 pub fn f_ravel(&self) -> Result<Tensor, TchError> {
29285 let mut c_tensors = [std::ptr::null_mut(); 1];
29286 unsafe_torch_err!(atg_ravel(c_tensors.as_mut_ptr(), self.c_tensor));
29287 Ok(Tensor { c_tensor: c_tensors[0] })
29288 }
29289
29290 pub fn f_real(&self) -> Result<Tensor, TchError> {
29291 let mut c_tensors = [std::ptr::null_mut(); 1];
29292 unsafe_torch_err!(atg_real(c_tensors.as_mut_ptr(), self.c_tensor));
29293 Ok(Tensor { c_tensor: c_tensors[0] })
29294 }
29295
29296 pub fn f_reciprocal(&self) -> Result<Tensor, TchError> {
29297 let mut c_tensors = [std::ptr::null_mut(); 1];
29298 unsafe_torch_err!(atg_reciprocal(c_tensors.as_mut_ptr(), self.c_tensor));
29299 Ok(Tensor { c_tensor: c_tensors[0] })
29300 }
29301
29302 pub fn f_reciprocal_(&mut self) -> Result<Tensor, TchError> {
29303 let mut c_tensors = [std::ptr::null_mut(); 1];
29304 unsafe_torch_err!(atg_reciprocal_(c_tensors.as_mut_ptr(), self.c_tensor));
29305 Ok(Tensor { c_tensor: c_tensors[0] })
29306 }
29307
29308 pub fn f_reciprocal_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
29309 let mut c_tensors = [std::ptr::null_mut(); 1];
29310 unsafe_torch_err!(atg_reciprocal_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
29311 Ok(Tensor { c_tensor: c_tensors[0] })
29312 }
29313
29314 pub fn f_reflection_pad1d(&self, padding: impl IntList) -> Result<Tensor, TchError> {
29315 let mut c_tensors = [std::ptr::null_mut(); 1];
29316 unsafe_torch_err!(atg_reflection_pad1d(
29317 c_tensors.as_mut_ptr(),
29318 self.c_tensor,
29319 padding.as_ptr(),
29320 padding.len_i32()
29321 ));
29322 Ok(Tensor { c_tensor: c_tensors[0] })
29323 }
29324
29325 pub fn f_reflection_pad1d_backward(
29326 &self,
29327 grad_output: &Tensor,
29328 padding: impl IntList,
29329 ) -> Result<Tensor, TchError> {
29330 let mut c_tensors = [std::ptr::null_mut(); 1];
29331 unsafe_torch_err!(atg_reflection_pad1d_backward(
29332 c_tensors.as_mut_ptr(),
29333 grad_output.c_tensor,
29334 self.c_tensor,
29335 padding.as_ptr(),
29336 padding.len_i32()
29337 ));
29338 Ok(Tensor { c_tensor: c_tensors[0] })
29339 }
29340
29341 pub fn f_reflection_pad1d_backward_grad_input(
29342 &self,
29343 grad_input: &Tensor,
29344 grad_output: &Tensor,
29345 padding: impl IntList,
29346 ) -> Result<Tensor, TchError> {
29347 let mut c_tensors = [std::ptr::null_mut(); 1];
29348 unsafe_torch_err!(atg_reflection_pad1d_backward_grad_input(
29349 c_tensors.as_mut_ptr(),
29350 grad_input.c_tensor,
29351 grad_output.c_tensor,
29352 self.c_tensor,
29353 padding.as_ptr(),
29354 padding.len_i32()
29355 ));
29356 Ok(Tensor { c_tensor: c_tensors[0] })
29357 }
29358
29359 pub fn f_reflection_pad1d_out(
29360 &self,
29361 out: &Tensor,
29362 padding: impl IntList,
29363 ) -> Result<Tensor, TchError> {
29364 let mut c_tensors = [std::ptr::null_mut(); 1];
29365 unsafe_torch_err!(atg_reflection_pad1d_out(
29366 c_tensors.as_mut_ptr(),
29367 out.c_tensor,
29368 self.c_tensor,
29369 padding.as_ptr(),
29370 padding.len_i32()
29371 ));
29372 Ok(Tensor { c_tensor: c_tensors[0] })
29373 }
29374
29375 pub fn f_reflection_pad2d(&self, padding: impl IntList) -> Result<Tensor, TchError> {
29376 let mut c_tensors = [std::ptr::null_mut(); 1];
29377 unsafe_torch_err!(atg_reflection_pad2d(
29378 c_tensors.as_mut_ptr(),
29379 self.c_tensor,
29380 padding.as_ptr(),
29381 padding.len_i32()
29382 ));
29383 Ok(Tensor { c_tensor: c_tensors[0] })
29384 }
29385
29386 pub fn f_reflection_pad2d_backward(
29387 &self,
29388 grad_output: &Tensor,
29389 padding: impl IntList,
29390 ) -> Result<Tensor, TchError> {
29391 let mut c_tensors = [std::ptr::null_mut(); 1];
29392 unsafe_torch_err!(atg_reflection_pad2d_backward(
29393 c_tensors.as_mut_ptr(),
29394 grad_output.c_tensor,
29395 self.c_tensor,
29396 padding.as_ptr(),
29397 padding.len_i32()
29398 ));
29399 Ok(Tensor { c_tensor: c_tensors[0] })
29400 }
29401
29402 pub fn f_reflection_pad2d_backward_grad_input(
29403 &self,
29404 grad_input: &Tensor,
29405 grad_output: &Tensor,
29406 padding: impl IntList,
29407 ) -> Result<Tensor, TchError> {
29408 let mut c_tensors = [std::ptr::null_mut(); 1];
29409 unsafe_torch_err!(atg_reflection_pad2d_backward_grad_input(
29410 c_tensors.as_mut_ptr(),
29411 grad_input.c_tensor,
29412 grad_output.c_tensor,
29413 self.c_tensor,
29414 padding.as_ptr(),
29415 padding.len_i32()
29416 ));
29417 Ok(Tensor { c_tensor: c_tensors[0] })
29418 }
29419
29420 pub fn f_reflection_pad2d_out(
29421 &self,
29422 out: &Tensor,
29423 padding: impl IntList,
29424 ) -> Result<Tensor, TchError> {
29425 let mut c_tensors = [std::ptr::null_mut(); 1];
29426 unsafe_torch_err!(atg_reflection_pad2d_out(
29427 c_tensors.as_mut_ptr(),
29428 out.c_tensor,
29429 self.c_tensor,
29430 padding.as_ptr(),
29431 padding.len_i32()
29432 ));
29433 Ok(Tensor { c_tensor: c_tensors[0] })
29434 }
29435
29436 pub fn f_reflection_pad3d(&self, padding: impl IntList) -> Result<Tensor, TchError> {
29437 let mut c_tensors = [std::ptr::null_mut(); 1];
29438 unsafe_torch_err!(atg_reflection_pad3d(
29439 c_tensors.as_mut_ptr(),
29440 self.c_tensor,
29441 padding.as_ptr(),
29442 padding.len_i32()
29443 ));
29444 Ok(Tensor { c_tensor: c_tensors[0] })
29445 }
29446
29447 pub fn f_reflection_pad3d_backward(
29448 &self,
29449 grad_output: &Tensor,
29450 padding: impl IntList,
29451 ) -> Result<Tensor, TchError> {
29452 let mut c_tensors = [std::ptr::null_mut(); 1];
29453 unsafe_torch_err!(atg_reflection_pad3d_backward(
29454 c_tensors.as_mut_ptr(),
29455 grad_output.c_tensor,
29456 self.c_tensor,
29457 padding.as_ptr(),
29458 padding.len_i32()
29459 ));
29460 Ok(Tensor { c_tensor: c_tensors[0] })
29461 }
29462
29463 pub fn f_reflection_pad3d_backward_grad_input(
29464 &self,
29465 grad_input: &Tensor,
29466 grad_output: &Tensor,
29467 padding: impl IntList,
29468 ) -> Result<Tensor, TchError> {
29469 let mut c_tensors = [std::ptr::null_mut(); 1];
29470 unsafe_torch_err!(atg_reflection_pad3d_backward_grad_input(
29471 c_tensors.as_mut_ptr(),
29472 grad_input.c_tensor,
29473 grad_output.c_tensor,
29474 self.c_tensor,
29475 padding.as_ptr(),
29476 padding.len_i32()
29477 ));
29478 Ok(Tensor { c_tensor: c_tensors[0] })
29479 }
29480
29481 pub fn f_reflection_pad3d_out(
29482 &self,
29483 out: &Tensor,
29484 padding: impl IntList,
29485 ) -> Result<Tensor, TchError> {
29486 let mut c_tensors = [std::ptr::null_mut(); 1];
29487 unsafe_torch_err!(atg_reflection_pad3d_out(
29488 c_tensors.as_mut_ptr(),
29489 out.c_tensor,
29490 self.c_tensor,
29491 padding.as_ptr(),
29492 padding.len_i32()
29493 ));
29494 Ok(Tensor { c_tensor: c_tensors[0] })
29495 }
29496
29497 pub fn f_relu(&self) -> Result<Tensor, TchError> {
29498 let mut c_tensors = [std::ptr::null_mut(); 1];
29499 unsafe_torch_err!(atg_relu(c_tensors.as_mut_ptr(), self.c_tensor));
29500 Ok(Tensor { c_tensor: c_tensors[0] })
29501 }
29502
29503 pub fn f_relu6(&self) -> Result<Tensor, TchError> {
29504 let mut c_tensors = [std::ptr::null_mut(); 1];
29505 unsafe_torch_err!(atg_relu6(c_tensors.as_mut_ptr(), self.c_tensor));
29506 Ok(Tensor { c_tensor: c_tensors[0] })
29507 }
29508
29509 pub fn f_relu6_(&mut self) -> Result<Tensor, TchError> {
29510 let mut c_tensors = [std::ptr::null_mut(); 1];
29511 unsafe_torch_err!(atg_relu6_(c_tensors.as_mut_ptr(), self.c_tensor));
29512 Ok(Tensor { c_tensor: c_tensors[0] })
29513 }
29514
29515 pub fn f_relu_(&mut self) -> Result<Tensor, TchError> {
29516 let mut c_tensors = [std::ptr::null_mut(); 1];
29517 unsafe_torch_err!(atg_relu_(c_tensors.as_mut_ptr(), self.c_tensor));
29518 Ok(Tensor { c_tensor: c_tensors[0] })
29519 }
29520
29521 pub fn f_relu_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
29522 let mut c_tensors = [std::ptr::null_mut(); 1];
29523 unsafe_torch_err!(atg_relu_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
29524 Ok(Tensor { c_tensor: c_tensors[0] })
29525 }
29526
29527 pub fn f_remainder<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
29528 let mut c_tensors = [std::ptr::null_mut(); 1];
29529 unsafe_torch_err!(atg_remainder(
29530 c_tensors.as_mut_ptr(),
29531 self.c_tensor,
29532 other.into().c_scalar
29533 ));
29534 Ok(Tensor { c_tensor: c_tensors[0] })
29535 }
29536
29537 pub fn f_remainder_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
29538 let mut c_tensors = [std::ptr::null_mut(); 1];
29539 unsafe_torch_err!(atg_remainder_(
29540 c_tensors.as_mut_ptr(),
29541 self.c_tensor,
29542 other.into().c_scalar
29543 ));
29544 Ok(Tensor { c_tensor: c_tensors[0] })
29545 }
29546
29547 pub fn f_remainder_scalar_out<S: Into<Scalar>>(
29548 &self,
29549 out: &Tensor,
29550 other: S,
29551 ) -> Result<Tensor, TchError> {
29552 let mut c_tensors = [std::ptr::null_mut(); 1];
29553 unsafe_torch_err!(atg_remainder_scalar_out(
29554 c_tensors.as_mut_ptr(),
29555 out.c_tensor,
29556 self.c_tensor,
29557 other.into().c_scalar
29558 ));
29559 Ok(Tensor { c_tensor: c_tensors[0] })
29560 }
29561
29562 pub fn f_remainder_scalar_tensor<S: Into<Scalar>>(
29563 self_scalar: S,
29564 other: &Tensor,
29565 ) -> Result<Tensor, TchError> {
29566 let mut c_tensors = [std::ptr::null_mut(); 1];
29567 unsafe_torch_err!(atg_remainder_scalar_tensor(
29568 c_tensors.as_mut_ptr(),
29569 self_scalar.into().c_scalar,
29570 other.c_tensor
29571 ));
29572 Ok(Tensor { c_tensor: c_tensors[0] })
29573 }
29574
29575 pub fn f_remainder_scalar_tensor_out<S: Into<Scalar>>(
29576 out: &Tensor,
29577 self_scalar: S,
29578 other: &Tensor,
29579 ) -> Result<Tensor, TchError> {
29580 let mut c_tensors = [std::ptr::null_mut(); 1];
29581 unsafe_torch_err!(atg_remainder_scalar_tensor_out(
29582 c_tensors.as_mut_ptr(),
29583 out.c_tensor,
29584 self_scalar.into().c_scalar,
29585 other.c_tensor
29586 ));
29587 Ok(Tensor { c_tensor: c_tensors[0] })
29588 }
29589
29590 pub fn f_remainder_tensor(&self, other: &Tensor) -> Result<Tensor, TchError> {
29591 let mut c_tensors = [std::ptr::null_mut(); 1];
29592 unsafe_torch_err!(atg_remainder_tensor(
29593 c_tensors.as_mut_ptr(),
29594 self.c_tensor,
29595 other.c_tensor
29596 ));
29597 Ok(Tensor { c_tensor: c_tensors[0] })
29598 }
29599
29600 pub fn f_remainder_tensor_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
29601 let mut c_tensors = [std::ptr::null_mut(); 1];
29602 unsafe_torch_err!(atg_remainder_tensor_(
29603 c_tensors.as_mut_ptr(),
29604 self.c_tensor,
29605 other.c_tensor
29606 ));
29607 Ok(Tensor { c_tensor: c_tensors[0] })
29608 }
29609
29610 pub fn f_remainder_tensor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
29611 let mut c_tensors = [std::ptr::null_mut(); 1];
29612 unsafe_torch_err!(atg_remainder_tensor_out(
29613 c_tensors.as_mut_ptr(),
29614 out.c_tensor,
29615 self.c_tensor,
29616 other.c_tensor
29617 ));
29618 Ok(Tensor { c_tensor: c_tensors[0] })
29619 }
29620
29621 pub fn f_renorm<S: Into<Scalar>>(
29622 &self,
29623 p: S,
29624 dim: i64,
29625 maxnorm: S,
29626 ) -> Result<Tensor, TchError> {
29627 let mut c_tensors = [std::ptr::null_mut(); 1];
29628 unsafe_torch_err!(atg_renorm(
29629 c_tensors.as_mut_ptr(),
29630 self.c_tensor,
29631 p.into().c_scalar,
29632 dim,
29633 maxnorm.into().c_scalar
29634 ));
29635 Ok(Tensor { c_tensor: c_tensors[0] })
29636 }
29637
29638 pub fn f_renorm_<S: Into<Scalar>>(
29639 &mut self,
29640 p: S,
29641 dim: i64,
29642 maxnorm: S,
29643 ) -> Result<Tensor, TchError> {
29644 let mut c_tensors = [std::ptr::null_mut(); 1];
29645 unsafe_torch_err!(atg_renorm_(
29646 c_tensors.as_mut_ptr(),
29647 self.c_tensor,
29648 p.into().c_scalar,
29649 dim,
29650 maxnorm.into().c_scalar
29651 ));
29652 Ok(Tensor { c_tensor: c_tensors[0] })
29653 }
29654
29655 pub fn f_renorm_out<S: Into<Scalar>>(
29656 &self,
29657 out: &Tensor,
29658 p: S,
29659 dim: i64,
29660 maxnorm: S,
29661 ) -> Result<Tensor, TchError> {
29662 let mut c_tensors = [std::ptr::null_mut(); 1];
29663 unsafe_torch_err!(atg_renorm_out(
29664 c_tensors.as_mut_ptr(),
29665 out.c_tensor,
29666 self.c_tensor,
29667 p.into().c_scalar,
29668 dim,
29669 maxnorm.into().c_scalar
29670 ));
29671 Ok(Tensor { c_tensor: c_tensors[0] })
29672 }
29673
29674 pub fn f_repeat(&self, repeats: impl IntList) -> Result<Tensor, TchError> {
29675 let mut c_tensors = [std::ptr::null_mut(); 1];
29676 unsafe_torch_err!(atg_repeat(
29677 c_tensors.as_mut_ptr(),
29678 self.c_tensor,
29679 repeats.as_ptr(),
29680 repeats.len_i32()
29681 ));
29682 Ok(Tensor { c_tensor: c_tensors[0] })
29683 }
29684
29685 pub fn f_repeat_interleave(
29686 repeats: &Tensor,
29687 output_size: impl Into<Option<i64>>,
29688 ) -> Result<Tensor, TchError> {
29689 let output_size = output_size.into();
29690 let mut c_tensors = [std::ptr::null_mut(); 1];
29691 unsafe_torch_err!(atg_repeat_interleave(
29692 c_tensors.as_mut_ptr(),
29693 repeats.c_tensor,
29694 output_size.unwrap_or(0i64),
29695 output_size.is_none() as i8
29696 ));
29697 Ok(Tensor { c_tensor: c_tensors[0] })
29698 }
29699
29700 pub fn f_repeat_interleave_self_int(
29701 &self,
29702 repeats: i64,
29703 dim: impl Into<Option<i64>>,
29704 output_size: impl Into<Option<i64>>,
29705 ) -> Result<Tensor, TchError> {
29706 let dim = dim.into();
29707 let output_size = output_size.into();
29708 let mut c_tensors = [std::ptr::null_mut(); 1];
29709 unsafe_torch_err!(atg_repeat_interleave_self_int(
29710 c_tensors.as_mut_ptr(),
29711 self.c_tensor,
29712 repeats,
29713 dim.unwrap_or(0i64),
29714 dim.is_none() as i8,
29715 output_size.unwrap_or(0i64),
29716 output_size.is_none() as i8
29717 ));
29718 Ok(Tensor { c_tensor: c_tensors[0] })
29719 }
29720
29721 pub fn f_repeat_interleave_self_tensor(
29722 &self,
29723 repeats: &Tensor,
29724 dim: impl Into<Option<i64>>,
29725 output_size: impl Into<Option<i64>>,
29726 ) -> Result<Tensor, TchError> {
29727 let dim = dim.into();
29728 let output_size = output_size.into();
29729 let mut c_tensors = [std::ptr::null_mut(); 1];
29730 unsafe_torch_err!(atg_repeat_interleave_self_tensor(
29731 c_tensors.as_mut_ptr(),
29732 self.c_tensor,
29733 repeats.c_tensor,
29734 dim.unwrap_or(0i64),
29735 dim.is_none() as i8,
29736 output_size.unwrap_or(0i64),
29737 output_size.is_none() as i8
29738 ));
29739 Ok(Tensor { c_tensor: c_tensors[0] })
29740 }
29741
29742 pub fn f_repeat_interleave_tensor_out(
29743 out: &Tensor,
29744 repeats: &Tensor,
29745 output_size: impl Into<Option<i64>>,
29746 ) -> Result<Tensor, TchError> {
29747 let output_size = output_size.into();
29748 let mut c_tensors = [std::ptr::null_mut(); 1];
29749 unsafe_torch_err!(atg_repeat_interleave_tensor_out(
29750 c_tensors.as_mut_ptr(),
29751 out.c_tensor,
29752 repeats.c_tensor,
29753 output_size.unwrap_or(0i64),
29754 output_size.is_none() as i8
29755 ));
29756 Ok(Tensor { c_tensor: c_tensors[0] })
29757 }
29758
29759 pub fn f_repeat_out(&self, out: &Tensor, repeats: impl IntList) -> Result<Tensor, TchError> {
29760 let mut c_tensors = [std::ptr::null_mut(); 1];
29761 unsafe_torch_err!(atg_repeat_out(
29762 c_tensors.as_mut_ptr(),
29763 out.c_tensor,
29764 self.c_tensor,
29765 repeats.as_ptr(),
29766 repeats.len_i32()
29767 ));
29768 Ok(Tensor { c_tensor: c_tensors[0] })
29769 }
29770
29771 pub fn f_replication_pad1d(&self, padding: impl IntList) -> Result<Tensor, TchError> {
29772 let mut c_tensors = [std::ptr::null_mut(); 1];
29773 unsafe_torch_err!(atg_replication_pad1d(
29774 c_tensors.as_mut_ptr(),
29775 self.c_tensor,
29776 padding.as_ptr(),
29777 padding.len_i32()
29778 ));
29779 Ok(Tensor { c_tensor: c_tensors[0] })
29780 }
29781
29782 pub fn f_replication_pad1d_backward(
29783 &self,
29784 grad_output: &Tensor,
29785 padding: impl IntList,
29786 ) -> Result<Tensor, TchError> {
29787 let mut c_tensors = [std::ptr::null_mut(); 1];
29788 unsafe_torch_err!(atg_replication_pad1d_backward(
29789 c_tensors.as_mut_ptr(),
29790 grad_output.c_tensor,
29791 self.c_tensor,
29792 padding.as_ptr(),
29793 padding.len_i32()
29794 ));
29795 Ok(Tensor { c_tensor: c_tensors[0] })
29796 }
29797
29798 pub fn f_replication_pad1d_backward_grad_input(
29799 &self,
29800 grad_input: &Tensor,
29801 grad_output: &Tensor,
29802 padding: impl IntList,
29803 ) -> Result<Tensor, TchError> {
29804 let mut c_tensors = [std::ptr::null_mut(); 1];
29805 unsafe_torch_err!(atg_replication_pad1d_backward_grad_input(
29806 c_tensors.as_mut_ptr(),
29807 grad_input.c_tensor,
29808 grad_output.c_tensor,
29809 self.c_tensor,
29810 padding.as_ptr(),
29811 padding.len_i32()
29812 ));
29813 Ok(Tensor { c_tensor: c_tensors[0] })
29814 }
29815
29816 pub fn f_replication_pad1d_out(
29817 &self,
29818 out: &Tensor,
29819 padding: impl IntList,
29820 ) -> Result<Tensor, TchError> {
29821 let mut c_tensors = [std::ptr::null_mut(); 1];
29822 unsafe_torch_err!(atg_replication_pad1d_out(
29823 c_tensors.as_mut_ptr(),
29824 out.c_tensor,
29825 self.c_tensor,
29826 padding.as_ptr(),
29827 padding.len_i32()
29828 ));
29829 Ok(Tensor { c_tensor: c_tensors[0] })
29830 }
29831
29832 pub fn f_replication_pad2d(&self, padding: impl IntList) -> Result<Tensor, TchError> {
29833 let mut c_tensors = [std::ptr::null_mut(); 1];
29834 unsafe_torch_err!(atg_replication_pad2d(
29835 c_tensors.as_mut_ptr(),
29836 self.c_tensor,
29837 padding.as_ptr(),
29838 padding.len_i32()
29839 ));
29840 Ok(Tensor { c_tensor: c_tensors[0] })
29841 }
29842
29843 pub fn f_replication_pad2d_backward(
29844 &self,
29845 grad_output: &Tensor,
29846 padding: impl IntList,
29847 ) -> Result<Tensor, TchError> {
29848 let mut c_tensors = [std::ptr::null_mut(); 1];
29849 unsafe_torch_err!(atg_replication_pad2d_backward(
29850 c_tensors.as_mut_ptr(),
29851 grad_output.c_tensor,
29852 self.c_tensor,
29853 padding.as_ptr(),
29854 padding.len_i32()
29855 ));
29856 Ok(Tensor { c_tensor: c_tensors[0] })
29857 }
29858
29859 pub fn f_replication_pad2d_backward_grad_input(
29860 &self,
29861 grad_input: &Tensor,
29862 grad_output: &Tensor,
29863 padding: impl IntList,
29864 ) -> Result<Tensor, TchError> {
29865 let mut c_tensors = [std::ptr::null_mut(); 1];
29866 unsafe_torch_err!(atg_replication_pad2d_backward_grad_input(
29867 c_tensors.as_mut_ptr(),
29868 grad_input.c_tensor,
29869 grad_output.c_tensor,
29870 self.c_tensor,
29871 padding.as_ptr(),
29872 padding.len_i32()
29873 ));
29874 Ok(Tensor { c_tensor: c_tensors[0] })
29875 }
29876
29877 pub fn f_replication_pad2d_out(
29878 &self,
29879 out: &Tensor,
29880 padding: impl IntList,
29881 ) -> Result<Tensor, TchError> {
29882 let mut c_tensors = [std::ptr::null_mut(); 1];
29883 unsafe_torch_err!(atg_replication_pad2d_out(
29884 c_tensors.as_mut_ptr(),
29885 out.c_tensor,
29886 self.c_tensor,
29887 padding.as_ptr(),
29888 padding.len_i32()
29889 ));
29890 Ok(Tensor { c_tensor: c_tensors[0] })
29891 }
29892
29893 pub fn f_replication_pad3d(&self, padding: impl IntList) -> Result<Tensor, TchError> {
29894 let mut c_tensors = [std::ptr::null_mut(); 1];
29895 unsafe_torch_err!(atg_replication_pad3d(
29896 c_tensors.as_mut_ptr(),
29897 self.c_tensor,
29898 padding.as_ptr(),
29899 padding.len_i32()
29900 ));
29901 Ok(Tensor { c_tensor: c_tensors[0] })
29902 }
29903
29904 pub fn f_replication_pad3d_backward(
29905 &self,
29906 grad_output: &Tensor,
29907 padding: impl IntList,
29908 ) -> Result<Tensor, TchError> {
29909 let mut c_tensors = [std::ptr::null_mut(); 1];
29910 unsafe_torch_err!(atg_replication_pad3d_backward(
29911 c_tensors.as_mut_ptr(),
29912 grad_output.c_tensor,
29913 self.c_tensor,
29914 padding.as_ptr(),
29915 padding.len_i32()
29916 ));
29917 Ok(Tensor { c_tensor: c_tensors[0] })
29918 }
29919
29920 pub fn f_replication_pad3d_backward_grad_input(
29921 &self,
29922 grad_input: &Tensor,
29923 grad_output: &Tensor,
29924 padding: impl IntList,
29925 ) -> Result<Tensor, TchError> {
29926 let mut c_tensors = [std::ptr::null_mut(); 1];
29927 unsafe_torch_err!(atg_replication_pad3d_backward_grad_input(
29928 c_tensors.as_mut_ptr(),
29929 grad_input.c_tensor,
29930 grad_output.c_tensor,
29931 self.c_tensor,
29932 padding.as_ptr(),
29933 padding.len_i32()
29934 ));
29935 Ok(Tensor { c_tensor: c_tensors[0] })
29936 }
29937
29938 pub fn f_replication_pad3d_out(
29939 &self,
29940 out: &Tensor,
29941 padding: impl IntList,
29942 ) -> Result<Tensor, TchError> {
29943 let mut c_tensors = [std::ptr::null_mut(); 1];
29944 unsafe_torch_err!(atg_replication_pad3d_out(
29945 c_tensors.as_mut_ptr(),
29946 out.c_tensor,
29947 self.c_tensor,
29948 padding.as_ptr(),
29949 padding.len_i32()
29950 ));
29951 Ok(Tensor { c_tensor: c_tensors[0] })
29952 }
29953
29954 pub fn f_requires_grad_(&mut self, requires_grad: bool) -> Result<Tensor, TchError> {
29955 let mut c_tensors = [std::ptr::null_mut(); 1];
29956 unsafe_torch_err!(atg_requires_grad_(
29957 c_tensors.as_mut_ptr(),
29958 self.c_tensor,
29959 if requires_grad { 1 } else { 0 }
29960 ));
29961 Ok(Tensor { c_tensor: c_tensors[0] })
29962 }
29963
29964 pub fn f_reshape(&self, shape: impl IntList) -> Result<Tensor, TchError> {
29965 let mut c_tensors = [std::ptr::null_mut(); 1];
29966 unsafe_torch_err!(atg_reshape(
29967 c_tensors.as_mut_ptr(),
29968 self.c_tensor,
29969 shape.as_ptr(),
29970 shape.len_i32()
29971 ));
29972 Ok(Tensor { c_tensor: c_tensors[0] })
29973 }
29974
29975 pub fn f_reshape_as(&self, other: &Tensor) -> Result<Tensor, TchError> {
29976 let mut c_tensors = [std::ptr::null_mut(); 1];
29977 unsafe_torch_err!(atg_reshape_as(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
29978 Ok(Tensor { c_tensor: c_tensors[0] })
29979 }
29980
29981 pub fn f_resize(&self, size: impl IntList) -> Result<Tensor, TchError> {
29982 let mut c_tensors = [std::ptr::null_mut(); 1];
29983 unsafe_torch_err!(atg_resize(
29984 c_tensors.as_mut_ptr(),
29985 self.c_tensor,
29986 size.as_ptr(),
29987 size.len_i32()
29988 ));
29989 Ok(Tensor { c_tensor: c_tensors[0] })
29990 }
29991
29992 pub fn f_resize_(&mut self, size: impl IntList) -> Result<Tensor, TchError> {
29993 let mut c_tensors = [std::ptr::null_mut(); 1];
29994 unsafe_torch_err!(atg_resize_(
29995 c_tensors.as_mut_ptr(),
29996 self.c_tensor,
29997 size.as_ptr(),
29998 size.len_i32()
29999 ));
30000 Ok(Tensor { c_tensor: c_tensors[0] })
30001 }
30002
30003 pub fn f_resize_as(&self, the_template: &Tensor) -> Result<Tensor, TchError> {
30004 let mut c_tensors = [std::ptr::null_mut(); 1];
30005 unsafe_torch_err!(atg_resize_as(
30006 c_tensors.as_mut_ptr(),
30007 self.c_tensor,
30008 the_template.c_tensor
30009 ));
30010 Ok(Tensor { c_tensor: c_tensors[0] })
30011 }
30012
30013 pub fn f_resize_as_(&mut self, the_template: &Tensor) -> Result<Tensor, TchError> {
30014 let mut c_tensors = [std::ptr::null_mut(); 1];
30015 unsafe_torch_err!(atg_resize_as_(
30016 c_tensors.as_mut_ptr(),
30017 self.c_tensor,
30018 the_template.c_tensor
30019 ));
30020 Ok(Tensor { c_tensor: c_tensors[0] })
30021 }
30022
30023 pub fn f_resize_as_out(&self, out: &Tensor, the_template: &Tensor) -> Result<Tensor, TchError> {
30024 let mut c_tensors = [std::ptr::null_mut(); 1];
30025 unsafe_torch_err!(atg_resize_as_out(
30026 c_tensors.as_mut_ptr(),
30027 out.c_tensor,
30028 self.c_tensor,
30029 the_template.c_tensor
30030 ));
30031 Ok(Tensor { c_tensor: c_tensors[0] })
30032 }
30033
30034 pub fn f_resize_as_sparse(&self, the_template: &Tensor) -> Result<Tensor, TchError> {
30035 let mut c_tensors = [std::ptr::null_mut(); 1];
30036 unsafe_torch_err!(atg_resize_as_sparse(
30037 c_tensors.as_mut_ptr(),
30038 self.c_tensor,
30039 the_template.c_tensor
30040 ));
30041 Ok(Tensor { c_tensor: c_tensors[0] })
30042 }
30043
30044 pub fn f_resize_as_sparse_(&mut self, the_template: &Tensor) -> Result<Tensor, TchError> {
30045 let mut c_tensors = [std::ptr::null_mut(); 1];
30046 unsafe_torch_err!(atg_resize_as_sparse_(
30047 c_tensors.as_mut_ptr(),
30048 self.c_tensor,
30049 the_template.c_tensor
30050 ));
30051 Ok(Tensor { c_tensor: c_tensors[0] })
30052 }
30053
30054 pub fn f_resize_as_sparse_out(
30055 &self,
30056 out: &Tensor,
30057 the_template: &Tensor,
30058 ) -> Result<Tensor, TchError> {
30059 let mut c_tensors = [std::ptr::null_mut(); 1];
30060 unsafe_torch_err!(atg_resize_as_sparse_out(
30061 c_tensors.as_mut_ptr(),
30062 out.c_tensor,
30063 self.c_tensor,
30064 the_template.c_tensor
30065 ));
30066 Ok(Tensor { c_tensor: c_tensors[0] })
30067 }
30068
30069 pub fn f_resize_out(&self, out: &Tensor, size: impl IntList) -> Result<Tensor, TchError> {
30070 let mut c_tensors = [std::ptr::null_mut(); 1];
30071 unsafe_torch_err!(atg_resize_out(
30072 c_tensors.as_mut_ptr(),
30073 out.c_tensor,
30074 self.c_tensor,
30075 size.as_ptr(),
30076 size.len_i32()
30077 ));
30078 Ok(Tensor { c_tensor: c_tensors[0] })
30079 }
30080
30081 pub fn f_resolve_conj(&self) -> Result<Tensor, TchError> {
30082 let mut c_tensors = [std::ptr::null_mut(); 1];
30083 unsafe_torch_err!(atg_resolve_conj(c_tensors.as_mut_ptr(), self.c_tensor));
30084 Ok(Tensor { c_tensor: c_tensors[0] })
30085 }
30086
30087 pub fn f_resolve_neg(&self) -> Result<Tensor, TchError> {
30088 let mut c_tensors = [std::ptr::null_mut(); 1];
30089 unsafe_torch_err!(atg_resolve_neg(c_tensors.as_mut_ptr(), self.c_tensor));
30090 Ok(Tensor { c_tensor: c_tensors[0] })
30091 }
30092
30093 pub fn f_retains_grad(&self) -> Result<bool, TchError> {
30094 let return_;
30095 unsafe_torch_err!(return_ = atg_retains_grad(self.c_tensor));
30096 Ok(return_ != 0)
30097 }
30098
30099 pub fn f_rms_norm<T: Borrow<Tensor>>(
30100 &self,
30101 normalized_shape: impl IntList,
30102 weight: Option<T>,
30103 eps: impl Into<Option<f64>>,
30104 ) -> Result<Tensor, TchError> {
30105 let eps = eps.into();
30106 let mut c_tensors = [std::ptr::null_mut(); 1];
30107 unsafe_torch_err!(atg_rms_norm(
30108 c_tensors.as_mut_ptr(),
30109 self.c_tensor,
30110 normalized_shape.as_ptr(),
30111 normalized_shape.len_i32(),
30112 weight.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
30113 eps.unwrap_or(std::f64::NAN),
30114 eps.is_none() as i8
30115 ));
30116 Ok(Tensor { c_tensor: c_tensors[0] })
30117 }
30118
30119 pub fn f_rnn_relu<T: Borrow<Tensor>>(
30120 &self,
30121 hx: &Tensor,
30122 params: &[T],
30123 has_biases: bool,
30124 num_layers: i64,
30125 dropout: f64,
30126 train: bool,
30127 bidirectional: bool,
30128 batch_first: bool,
30129 ) -> Result<(Tensor, Tensor), TchError> {
30130 let mut c_tensors = [std::ptr::null_mut(); 2];
30131 unsafe_torch_err!(atg_rnn_relu(
30132 c_tensors.as_mut_ptr(),
30133 self.c_tensor,
30134 hx.c_tensor,
30135 ptr_list(params).as_ptr(),
30136 params.len() as i32,
30137 if has_biases { 1 } else { 0 },
30138 num_layers,
30139 dropout,
30140 if train { 1 } else { 0 },
30141 if bidirectional { 1 } else { 0 },
30142 if batch_first { 1 } else { 0 }
30143 ));
30144 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
30145 }
30146
30147 pub fn f_rnn_relu_cell<T: Borrow<Tensor>>(
30148 &self,
30149 hx: &Tensor,
30150 w_ih: &Tensor,
30151 w_hh: &Tensor,
30152 b_ih: Option<T>,
30153 b_hh: Option<T>,
30154 ) -> Result<Tensor, TchError> {
30155 let mut c_tensors = [std::ptr::null_mut(); 1];
30156 unsafe_torch_err!(atg_rnn_relu_cell(
30157 c_tensors.as_mut_ptr(),
30158 self.c_tensor,
30159 hx.c_tensor,
30160 w_ih.c_tensor,
30161 w_hh.c_tensor,
30162 b_ih.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
30163 b_hh.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
30164 ));
30165 Ok(Tensor { c_tensor: c_tensors[0] })
30166 }
30167
30168 pub fn f_rnn_relu_data<T: Borrow<Tensor>>(
30169 data: &Tensor,
30170 batch_sizes: &Tensor,
30171 hx: &Tensor,
30172 params: &[T],
30173 has_biases: bool,
30174 num_layers: i64,
30175 dropout: f64,
30176 train: bool,
30177 bidirectional: bool,
30178 ) -> Result<(Tensor, Tensor), TchError> {
30179 let mut c_tensors = [std::ptr::null_mut(); 2];
30180 unsafe_torch_err!(atg_rnn_relu_data(
30181 c_tensors.as_mut_ptr(),
30182 data.c_tensor,
30183 batch_sizes.c_tensor,
30184 hx.c_tensor,
30185 ptr_list(params).as_ptr(),
30186 params.len() as i32,
30187 if has_biases { 1 } else { 0 },
30188 num_layers,
30189 dropout,
30190 if train { 1 } else { 0 },
30191 if bidirectional { 1 } else { 0 }
30192 ));
30193 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
30194 }
30195
30196 pub fn f_rnn_tanh<T: Borrow<Tensor>>(
30197 &self,
30198 hx: &Tensor,
30199 params: &[T],
30200 has_biases: bool,
30201 num_layers: i64,
30202 dropout: f64,
30203 train: bool,
30204 bidirectional: bool,
30205 batch_first: bool,
30206 ) -> Result<(Tensor, Tensor), TchError> {
30207 let mut c_tensors = [std::ptr::null_mut(); 2];
30208 unsafe_torch_err!(atg_rnn_tanh(
30209 c_tensors.as_mut_ptr(),
30210 self.c_tensor,
30211 hx.c_tensor,
30212 ptr_list(params).as_ptr(),
30213 params.len() as i32,
30214 if has_biases { 1 } else { 0 },
30215 num_layers,
30216 dropout,
30217 if train { 1 } else { 0 },
30218 if bidirectional { 1 } else { 0 },
30219 if batch_first { 1 } else { 0 }
30220 ));
30221 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
30222 }
30223
30224 pub fn f_rnn_tanh_cell<T: Borrow<Tensor>>(
30225 &self,
30226 hx: &Tensor,
30227 w_ih: &Tensor,
30228 w_hh: &Tensor,
30229 b_ih: Option<T>,
30230 b_hh: Option<T>,
30231 ) -> Result<Tensor, TchError> {
30232 let mut c_tensors = [std::ptr::null_mut(); 1];
30233 unsafe_torch_err!(atg_rnn_tanh_cell(
30234 c_tensors.as_mut_ptr(),
30235 self.c_tensor,
30236 hx.c_tensor,
30237 w_ih.c_tensor,
30238 w_hh.c_tensor,
30239 b_ih.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
30240 b_hh.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
30241 ));
30242 Ok(Tensor { c_tensor: c_tensors[0] })
30243 }
30244
30245 pub fn f_rnn_tanh_data<T: Borrow<Tensor>>(
30246 data: &Tensor,
30247 batch_sizes: &Tensor,
30248 hx: &Tensor,
30249 params: &[T],
30250 has_biases: bool,
30251 num_layers: i64,
30252 dropout: f64,
30253 train: bool,
30254 bidirectional: bool,
30255 ) -> Result<(Tensor, Tensor), TchError> {
30256 let mut c_tensors = [std::ptr::null_mut(); 2];
30257 unsafe_torch_err!(atg_rnn_tanh_data(
30258 c_tensors.as_mut_ptr(),
30259 data.c_tensor,
30260 batch_sizes.c_tensor,
30261 hx.c_tensor,
30262 ptr_list(params).as_ptr(),
30263 params.len() as i32,
30264 if has_biases { 1 } else { 0 },
30265 num_layers,
30266 dropout,
30267 if train { 1 } else { 0 },
30268 if bidirectional { 1 } else { 0 }
30269 ));
30270 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
30271 }
30272
30273 pub fn f_roll(&self, shifts: impl IntList, dims: impl IntList) -> Result<Tensor, TchError> {
30274 let mut c_tensors = [std::ptr::null_mut(); 1];
30275 unsafe_torch_err!(atg_roll(
30276 c_tensors.as_mut_ptr(),
30277 self.c_tensor,
30278 shifts.as_ptr(),
30279 shifts.len_i32(),
30280 dims.as_ptr(),
30281 dims.len_i32()
30282 ));
30283 Ok(Tensor { c_tensor: c_tensors[0] })
30284 }
30285
30286 pub fn f_roll_out(
30287 &self,
30288 out: &Tensor,
30289 shifts: impl IntList,
30290 dims: impl IntList,
30291 ) -> Result<Tensor, TchError> {
30292 let mut c_tensors = [std::ptr::null_mut(); 1];
30293 unsafe_torch_err!(atg_roll_out(
30294 c_tensors.as_mut_ptr(),
30295 out.c_tensor,
30296 self.c_tensor,
30297 shifts.as_ptr(),
30298 shifts.len_i32(),
30299 dims.as_ptr(),
30300 dims.len_i32()
30301 ));
30302 Ok(Tensor { c_tensor: c_tensors[0] })
30303 }
30304
30305 pub fn f_rot90(&self, k: i64, dims: impl IntList) -> Result<Tensor, TchError> {
30306 let mut c_tensors = [std::ptr::null_mut(); 1];
30307 unsafe_torch_err!(atg_rot90(
30308 c_tensors.as_mut_ptr(),
30309 self.c_tensor,
30310 k,
30311 dims.as_ptr(),
30312 dims.len_i32()
30313 ));
30314 Ok(Tensor { c_tensor: c_tensors[0] })
30315 }
30316
30317 pub fn f_rot90_out(
30318 &self,
30319 out: &Tensor,
30320 k: i64,
30321 dims: impl IntList,
30322 ) -> Result<Tensor, TchError> {
30323 let mut c_tensors = [std::ptr::null_mut(); 1];
30324 unsafe_torch_err!(atg_rot90_out(
30325 c_tensors.as_mut_ptr(),
30326 out.c_tensor,
30327 self.c_tensor,
30328 k,
30329 dims.as_ptr(),
30330 dims.len_i32()
30331 ));
30332 Ok(Tensor { c_tensor: c_tensors[0] })
30333 }
30334
30335 pub fn f_round(&self) -> Result<Tensor, TchError> {
30336 let mut c_tensors = [std::ptr::null_mut(); 1];
30337 unsafe_torch_err!(atg_round(c_tensors.as_mut_ptr(), self.c_tensor));
30338 Ok(Tensor { c_tensor: c_tensors[0] })
30339 }
30340
30341 pub fn f_round_(&mut self) -> Result<Tensor, TchError> {
30342 let mut c_tensors = [std::ptr::null_mut(); 1];
30343 unsafe_torch_err!(atg_round_(c_tensors.as_mut_ptr(), self.c_tensor));
30344 Ok(Tensor { c_tensor: c_tensors[0] })
30345 }
30346
30347 pub fn f_round_decimals(&self, decimals: i64) -> Result<Tensor, TchError> {
30348 let mut c_tensors = [std::ptr::null_mut(); 1];
30349 unsafe_torch_err!(atg_round_decimals(c_tensors.as_mut_ptr(), self.c_tensor, decimals));
30350 Ok(Tensor { c_tensor: c_tensors[0] })
30351 }
30352
30353 pub fn f_round_decimals_(&mut self, decimals: i64) -> Result<Tensor, TchError> {
30354 let mut c_tensors = [std::ptr::null_mut(); 1];
30355 unsafe_torch_err!(atg_round_decimals_(c_tensors.as_mut_ptr(), self.c_tensor, decimals));
30356 Ok(Tensor { c_tensor: c_tensors[0] })
30357 }
30358
30359 pub fn f_round_decimals_out(&self, out: &Tensor, decimals: i64) -> Result<Tensor, TchError> {
30360 let mut c_tensors = [std::ptr::null_mut(); 1];
30361 unsafe_torch_err!(atg_round_decimals_out(
30362 c_tensors.as_mut_ptr(),
30363 out.c_tensor,
30364 self.c_tensor,
30365 decimals
30366 ));
30367 Ok(Tensor { c_tensor: c_tensors[0] })
30368 }
30369
30370 pub fn f_round_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
30371 let mut c_tensors = [std::ptr::null_mut(); 1];
30372 unsafe_torch_err!(atg_round_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
30373 Ok(Tensor { c_tensor: c_tensors[0] })
30374 }
30375
30376 pub fn f_row_indices(&self) -> Result<Tensor, TchError> {
30377 let mut c_tensors = [std::ptr::null_mut(); 1];
30378 unsafe_torch_err!(atg_row_indices(c_tensors.as_mut_ptr(), self.c_tensor));
30379 Ok(Tensor { c_tensor: c_tensors[0] })
30380 }
30381
30382 pub fn f_row_indices_copy(&self) -> Result<Tensor, TchError> {
30383 let mut c_tensors = [std::ptr::null_mut(); 1];
30384 unsafe_torch_err!(atg_row_indices_copy(c_tensors.as_mut_ptr(), self.c_tensor));
30385 Ok(Tensor { c_tensor: c_tensors[0] })
30386 }
30387
30388 pub fn f_row_indices_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
30389 let mut c_tensors = [std::ptr::null_mut(); 1];
30390 unsafe_torch_err!(atg_row_indices_copy_out(
30391 c_tensors.as_mut_ptr(),
30392 out.c_tensor,
30393 self.c_tensor
30394 ));
30395 Ok(Tensor { c_tensor: c_tensors[0] })
30396 }
30397
30398 pub fn f_row_stack<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Tensor, TchError> {
30399 let mut c_tensors = [std::ptr::null_mut(); 1];
30400 unsafe_torch_err!(atg_row_stack(
30401 c_tensors.as_mut_ptr(),
30402 ptr_list(tensors).as_ptr(),
30403 tensors.len() as i32
30404 ));
30405 Ok(Tensor { c_tensor: c_tensors[0] })
30406 }
30407
30408 pub fn f_row_stack_out<T: Borrow<Tensor>>(
30409 out: &Tensor,
30410 tensors: &[T],
30411 ) -> Result<Tensor, TchError> {
30412 let mut c_tensors = [std::ptr::null_mut(); 1];
30413 unsafe_torch_err!(atg_row_stack_out(
30414 c_tensors.as_mut_ptr(),
30415 out.c_tensor,
30416 ptr_list(tensors).as_ptr(),
30417 tensors.len() as i32
30418 ));
30419 Ok(Tensor { c_tensor: c_tensors[0] })
30420 }
30421
30422 pub fn f_rrelu(&self, training: bool) -> Result<Tensor, TchError> {
30423 let mut c_tensors = [std::ptr::null_mut(); 1];
30424 unsafe_torch_err!(atg_rrelu(
30425 c_tensors.as_mut_ptr(),
30426 self.c_tensor,
30427 if training { 1 } else { 0 }
30428 ));
30429 Ok(Tensor { c_tensor: c_tensors[0] })
30430 }
30431
30432 pub fn f_rrelu_(&mut self, training: bool) -> Result<Tensor, TchError> {
30433 let mut c_tensors = [std::ptr::null_mut(); 1];
30434 unsafe_torch_err!(atg_rrelu_(
30435 c_tensors.as_mut_ptr(),
30436 self.c_tensor,
30437 if training { 1 } else { 0 }
30438 ));
30439 Ok(Tensor { c_tensor: c_tensors[0] })
30440 }
30441
30442 pub fn f_rrelu_with_noise(&self, noise: &Tensor, training: bool) -> Result<Tensor, TchError> {
30443 let mut c_tensors = [std::ptr::null_mut(); 1];
30444 unsafe_torch_err!(atg_rrelu_with_noise(
30445 c_tensors.as_mut_ptr(),
30446 self.c_tensor,
30447 noise.c_tensor,
30448 if training { 1 } else { 0 }
30449 ));
30450 Ok(Tensor { c_tensor: c_tensors[0] })
30451 }
30452
30453 pub fn f_rrelu_with_noise_(
30454 &mut self,
30455 noise: &Tensor,
30456 training: bool,
30457 ) -> Result<Tensor, TchError> {
30458 let mut c_tensors = [std::ptr::null_mut(); 1];
30459 unsafe_torch_err!(atg_rrelu_with_noise_(
30460 c_tensors.as_mut_ptr(),
30461 self.c_tensor,
30462 noise.c_tensor,
30463 if training { 1 } else { 0 }
30464 ));
30465 Ok(Tensor { c_tensor: c_tensors[0] })
30466 }
30467
30468 pub fn f_rrelu_with_noise_backward<S: Into<Scalar>>(
30469 &self,
30470 grad_output: &Tensor,
30471 noise: &Tensor,
30472 lower: S,
30473 upper: S,
30474 training: bool,
30475 self_is_result: bool,
30476 ) -> Result<Tensor, TchError> {
30477 let mut c_tensors = [std::ptr::null_mut(); 1];
30478 unsafe_torch_err!(atg_rrelu_with_noise_backward(
30479 c_tensors.as_mut_ptr(),
30480 grad_output.c_tensor,
30481 self.c_tensor,
30482 noise.c_tensor,
30483 lower.into().c_scalar,
30484 upper.into().c_scalar,
30485 if training { 1 } else { 0 },
30486 if self_is_result { 1 } else { 0 }
30487 ));
30488 Ok(Tensor { c_tensor: c_tensors[0] })
30489 }
30490
30491 pub fn f_rrelu_with_noise_backward_out<S: Into<Scalar>>(
30492 &self,
30493 out: &Tensor,
30494 grad_output: &Tensor,
30495 noise: &Tensor,
30496 lower: S,
30497 upper: S,
30498 training: bool,
30499 self_is_result: bool,
30500 ) -> Result<Tensor, TchError> {
30501 let mut c_tensors = [std::ptr::null_mut(); 1];
30502 unsafe_torch_err!(atg_rrelu_with_noise_backward_out(
30503 c_tensors.as_mut_ptr(),
30504 out.c_tensor,
30505 grad_output.c_tensor,
30506 self.c_tensor,
30507 noise.c_tensor,
30508 lower.into().c_scalar,
30509 upper.into().c_scalar,
30510 if training { 1 } else { 0 },
30511 if self_is_result { 1 } else { 0 }
30512 ));
30513 Ok(Tensor { c_tensor: c_tensors[0] })
30514 }
30515
30516 pub fn f_rrelu_with_noise_out(
30517 &self,
30518 out: &Tensor,
30519 noise: &Tensor,
30520 training: bool,
30521 ) -> Result<Tensor, TchError> {
30522 let mut c_tensors = [std::ptr::null_mut(); 1];
30523 unsafe_torch_err!(atg_rrelu_with_noise_out(
30524 c_tensors.as_mut_ptr(),
30525 out.c_tensor,
30526 self.c_tensor,
30527 noise.c_tensor,
30528 if training { 1 } else { 0 }
30529 ));
30530 Ok(Tensor { c_tensor: c_tensors[0] })
30531 }
30532
30533 pub fn f_rsqrt(&self) -> Result<Tensor, TchError> {
30534 let mut c_tensors = [std::ptr::null_mut(); 1];
30535 unsafe_torch_err!(atg_rsqrt(c_tensors.as_mut_ptr(), self.c_tensor));
30536 Ok(Tensor { c_tensor: c_tensors[0] })
30537 }
30538
30539 pub fn f_rsqrt_(&mut self) -> Result<Tensor, TchError> {
30540 let mut c_tensors = [std::ptr::null_mut(); 1];
30541 unsafe_torch_err!(atg_rsqrt_(c_tensors.as_mut_ptr(), self.c_tensor));
30542 Ok(Tensor { c_tensor: c_tensors[0] })
30543 }
30544
30545 pub fn f_rsqrt_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
30546 let mut c_tensors = [std::ptr::null_mut(); 1];
30547 unsafe_torch_err!(atg_rsqrt_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
30548 Ok(Tensor { c_tensor: c_tensors[0] })
30549 }
30550
30551 pub fn f_rsub(&self, other: &Tensor) -> Result<Tensor, TchError> {
30552 let mut c_tensors = [std::ptr::null_mut(); 1];
30553 unsafe_torch_err!(atg_rsub(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
30554 Ok(Tensor { c_tensor: c_tensors[0] })
30555 }
30556
30557 pub fn f_rsub_scalar<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
30558 let mut c_tensors = [std::ptr::null_mut(); 1];
30559 unsafe_torch_err!(atg_rsub_scalar(
30560 c_tensors.as_mut_ptr(),
30561 self.c_tensor,
30562 other.into().c_scalar
30563 ));
30564 Ok(Tensor { c_tensor: c_tensors[0] })
30565 }
30566
30567 pub fn f_rsub_scalar_out<S: Into<Scalar>>(
30568 &self,
30569 out: &Tensor,
30570 other: S,
30571 ) -> Result<Tensor, TchError> {
30572 let mut c_tensors = [std::ptr::null_mut(); 1];
30573 unsafe_torch_err!(atg_rsub_scalar_out(
30574 c_tensors.as_mut_ptr(),
30575 out.c_tensor,
30576 self.c_tensor,
30577 other.into().c_scalar
30578 ));
30579 Ok(Tensor { c_tensor: c_tensors[0] })
30580 }
30581
30582 pub fn f_rsub_tensor_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
30583 let mut c_tensors = [std::ptr::null_mut(); 1];
30584 unsafe_torch_err!(atg_rsub_tensor_out(
30585 c_tensors.as_mut_ptr(),
30586 out.c_tensor,
30587 self.c_tensor,
30588 other.c_tensor
30589 ));
30590 Ok(Tensor { c_tensor: c_tensors[0] })
30591 }
30592
30593 pub fn f_scalar_tensor<S: Into<Scalar>>(
30594 s: S,
30595 options: (Kind, Device),
30596 ) -> Result<Tensor, TchError> {
30597 let mut c_tensors = [std::ptr::null_mut(); 1];
30598 unsafe_torch_err!(atg_scalar_tensor(
30599 c_tensors.as_mut_ptr(),
30600 s.into().c_scalar,
30601 options.0.c_int(),
30602 options.1.c_int()
30603 ));
30604 Ok(Tensor { c_tensor: c_tensors[0] })
30605 }
30606
30607 pub fn f_scalar_tensor_out<S: Into<Scalar>>(out: &Tensor, s: S) -> Result<Tensor, TchError> {
30608 let mut c_tensors = [std::ptr::null_mut(); 1];
30609 unsafe_torch_err!(atg_scalar_tensor_out(
30610 c_tensors.as_mut_ptr(),
30611 out.c_tensor,
30612 s.into().c_scalar
30613 ));
30614 Ok(Tensor { c_tensor: c_tensors[0] })
30615 }
30616
30617 pub fn f_scaled_dot_product_attention<T: Borrow<Tensor>>(
30618 query: &Tensor,
30619 key: &Tensor,
30620 value: &Tensor,
30621 attn_mask: Option<T>,
30622 dropout_p: f64,
30623 is_causal: bool,
30624 scale: impl Into<Option<f64>>,
30625 enable_gqa: bool,
30626 ) -> Result<Tensor, TchError> {
30627 let scale = scale.into();
30628 let mut c_tensors = [std::ptr::null_mut(); 1];
30629 unsafe_torch_err!(atg_scaled_dot_product_attention(
30630 c_tensors.as_mut_ptr(),
30631 query.c_tensor,
30632 key.c_tensor,
30633 value.c_tensor,
30634 attn_mask.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
30635 dropout_p,
30636 if is_causal { 1 } else { 0 },
30637 scale.unwrap_or(std::f64::NAN),
30638 scale.is_none() as i8,
30639 if enable_gqa { 1 } else { 0 }
30640 ));
30641 Ok(Tensor { c_tensor: c_tensors[0] })
30642 }
30643
30644 pub fn f_scatter(&self, dim: i64, index: &Tensor, src: &Tensor) -> Result<Tensor, TchError> {
30645 let mut c_tensors = [std::ptr::null_mut(); 1];
30646 unsafe_torch_err!(atg_scatter(
30647 c_tensors.as_mut_ptr(),
30648 self.c_tensor,
30649 dim,
30650 index.c_tensor,
30651 src.c_tensor
30652 ));
30653 Ok(Tensor { c_tensor: c_tensors[0] })
30654 }
30655
30656 pub fn f_scatter_(
30657 &mut self,
30658 dim: i64,
30659 index: &Tensor,
30660 src: &Tensor,
30661 ) -> Result<Tensor, TchError> {
30662 let mut c_tensors = [std::ptr::null_mut(); 1];
30663 unsafe_torch_err!(atg_scatter_(
30664 c_tensors.as_mut_ptr(),
30665 self.c_tensor,
30666 dim,
30667 index.c_tensor,
30668 src.c_tensor
30669 ));
30670 Ok(Tensor { c_tensor: c_tensors[0] })
30671 }
30672
30673 pub fn f_scatter_add(
30674 &self,
30675 dim: i64,
30676 index: &Tensor,
30677 src: &Tensor,
30678 ) -> Result<Tensor, TchError> {
30679 let mut c_tensors = [std::ptr::null_mut(); 1];
30680 unsafe_torch_err!(atg_scatter_add(
30681 c_tensors.as_mut_ptr(),
30682 self.c_tensor,
30683 dim,
30684 index.c_tensor,
30685 src.c_tensor
30686 ));
30687 Ok(Tensor { c_tensor: c_tensors[0] })
30688 }
30689
30690 pub fn f_scatter_add_(
30691 &mut self,
30692 dim: i64,
30693 index: &Tensor,
30694 src: &Tensor,
30695 ) -> Result<Tensor, TchError> {
30696 let mut c_tensors = [std::ptr::null_mut(); 1];
30697 unsafe_torch_err!(atg_scatter_add_(
30698 c_tensors.as_mut_ptr(),
30699 self.c_tensor,
30700 dim,
30701 index.c_tensor,
30702 src.c_tensor
30703 ));
30704 Ok(Tensor { c_tensor: c_tensors[0] })
30705 }
30706
30707 pub fn f_scatter_add_out(
30708 &self,
30709 out: &Tensor,
30710 dim: i64,
30711 index: &Tensor,
30712 src: &Tensor,
30713 ) -> Result<Tensor, TchError> {
30714 let mut c_tensors = [std::ptr::null_mut(); 1];
30715 unsafe_torch_err!(atg_scatter_add_out(
30716 c_tensors.as_mut_ptr(),
30717 out.c_tensor,
30718 self.c_tensor,
30719 dim,
30720 index.c_tensor,
30721 src.c_tensor
30722 ));
30723 Ok(Tensor { c_tensor: c_tensors[0] })
30724 }
30725
30726 pub fn f_scatter_reduce(
30727 &self,
30728 dim: i64,
30729 index: &Tensor,
30730 src: &Tensor,
30731 reduce: &str,
30732 ) -> Result<Tensor, TchError> {
30733 let mut c_tensors = [std::ptr::null_mut(); 1];
30734 unsafe_torch_err!(atg_scatter_reduce(
30735 c_tensors.as_mut_ptr(),
30736 self.c_tensor,
30737 dim,
30738 index.c_tensor,
30739 src.c_tensor,
30740 reduce.as_ptr(),
30741 reduce.len() as i32
30742 ));
30743 Ok(Tensor { c_tensor: c_tensors[0] })
30744 }
30745
30746 pub fn f_scatter_reduce_(
30747 &mut self,
30748 dim: i64,
30749 index: &Tensor,
30750 src: &Tensor,
30751 reduce: &str,
30752 ) -> Result<Tensor, TchError> {
30753 let mut c_tensors = [std::ptr::null_mut(); 1];
30754 unsafe_torch_err!(atg_scatter_reduce_(
30755 c_tensors.as_mut_ptr(),
30756 self.c_tensor,
30757 dim,
30758 index.c_tensor,
30759 src.c_tensor,
30760 reduce.as_ptr(),
30761 reduce.len() as i32
30762 ));
30763 Ok(Tensor { c_tensor: c_tensors[0] })
30764 }
30765
30766 pub fn f_scatter_reduce_out(
30767 &self,
30768 out: &Tensor,
30769 dim: i64,
30770 index: &Tensor,
30771 src: &Tensor,
30772 reduce: &str,
30773 ) -> Result<Tensor, TchError> {
30774 let mut c_tensors = [std::ptr::null_mut(); 1];
30775 unsafe_torch_err!(atg_scatter_reduce_out(
30776 c_tensors.as_mut_ptr(),
30777 out.c_tensor,
30778 self.c_tensor,
30779 dim,
30780 index.c_tensor,
30781 src.c_tensor,
30782 reduce.as_ptr(),
30783 reduce.len() as i32
30784 ));
30785 Ok(Tensor { c_tensor: c_tensors[0] })
30786 }
30787
30788 pub fn f_scatter_src_out(
30789 &self,
30790 out: &Tensor,
30791 dim: i64,
30792 index: &Tensor,
30793 src: &Tensor,
30794 ) -> Result<Tensor, TchError> {
30795 let mut c_tensors = [std::ptr::null_mut(); 1];
30796 unsafe_torch_err!(atg_scatter_src_out(
30797 c_tensors.as_mut_ptr(),
30798 out.c_tensor,
30799 self.c_tensor,
30800 dim,
30801 index.c_tensor,
30802 src.c_tensor
30803 ));
30804 Ok(Tensor { c_tensor: c_tensors[0] })
30805 }
30806
30807 pub fn f_scatter_value<S: Into<Scalar>>(
30808 &self,
30809 dim: i64,
30810 index: &Tensor,
30811 value: S,
30812 ) -> Result<Tensor, TchError> {
30813 let mut c_tensors = [std::ptr::null_mut(); 1];
30814 unsafe_torch_err!(atg_scatter_value(
30815 c_tensors.as_mut_ptr(),
30816 self.c_tensor,
30817 dim,
30818 index.c_tensor,
30819 value.into().c_scalar
30820 ));
30821 Ok(Tensor { c_tensor: c_tensors[0] })
30822 }
30823
30824 pub fn f_scatter_value_<S: Into<Scalar>>(
30825 &mut self,
30826 dim: i64,
30827 index: &Tensor,
30828 value: S,
30829 ) -> Result<Tensor, TchError> {
30830 let mut c_tensors = [std::ptr::null_mut(); 1];
30831 unsafe_torch_err!(atg_scatter_value_(
30832 c_tensors.as_mut_ptr(),
30833 self.c_tensor,
30834 dim,
30835 index.c_tensor,
30836 value.into().c_scalar
30837 ));
30838 Ok(Tensor { c_tensor: c_tensors[0] })
30839 }
30840
30841 pub fn f_scatter_value_out<S: Into<Scalar>>(
30842 &self,
30843 out: &Tensor,
30844 dim: i64,
30845 index: &Tensor,
30846 value: S,
30847 ) -> Result<Tensor, TchError> {
30848 let mut c_tensors = [std::ptr::null_mut(); 1];
30849 unsafe_torch_err!(atg_scatter_value_out(
30850 c_tensors.as_mut_ptr(),
30851 out.c_tensor,
30852 self.c_tensor,
30853 dim,
30854 index.c_tensor,
30855 value.into().c_scalar
30856 ));
30857 Ok(Tensor { c_tensor: c_tensors[0] })
30858 }
30859
30860 pub fn f_scatter_value_reduce<S: Into<Scalar>>(
30861 &self,
30862 dim: i64,
30863 index: &Tensor,
30864 value: S,
30865 reduce: &str,
30866 ) -> Result<Tensor, TchError> {
30867 let mut c_tensors = [std::ptr::null_mut(); 1];
30868 unsafe_torch_err!(atg_scatter_value_reduce(
30869 c_tensors.as_mut_ptr(),
30870 self.c_tensor,
30871 dim,
30872 index.c_tensor,
30873 value.into().c_scalar,
30874 reduce.as_ptr(),
30875 reduce.len() as i32
30876 ));
30877 Ok(Tensor { c_tensor: c_tensors[0] })
30878 }
30879
30880 pub fn f_scatter_value_reduce_<S: Into<Scalar>>(
30881 &mut self,
30882 dim: i64,
30883 index: &Tensor,
30884 value: S,
30885 reduce: &str,
30886 ) -> Result<Tensor, TchError> {
30887 let mut c_tensors = [std::ptr::null_mut(); 1];
30888 unsafe_torch_err!(atg_scatter_value_reduce_(
30889 c_tensors.as_mut_ptr(),
30890 self.c_tensor,
30891 dim,
30892 index.c_tensor,
30893 value.into().c_scalar,
30894 reduce.as_ptr(),
30895 reduce.len() as i32
30896 ));
30897 Ok(Tensor { c_tensor: c_tensors[0] })
30898 }
30899
30900 pub fn f_scatter_value_reduce_out<S: Into<Scalar>>(
30901 &self,
30902 out: &Tensor,
30903 dim: i64,
30904 index: &Tensor,
30905 value: S,
30906 reduce: &str,
30907 ) -> Result<Tensor, TchError> {
30908 let mut c_tensors = [std::ptr::null_mut(); 1];
30909 unsafe_torch_err!(atg_scatter_value_reduce_out(
30910 c_tensors.as_mut_ptr(),
30911 out.c_tensor,
30912 self.c_tensor,
30913 dim,
30914 index.c_tensor,
30915 value.into().c_scalar,
30916 reduce.as_ptr(),
30917 reduce.len() as i32
30918 ));
30919 Ok(Tensor { c_tensor: c_tensors[0] })
30920 }
30921
30922 pub fn f_searchsorted<T: Borrow<Tensor>>(
30923 &self,
30924 sorted_sequence: &Tensor,
30925 out_int32: bool,
30926 right: bool,
30927 side: &str,
30928 sorter: Option<T>,
30929 ) -> Result<Tensor, TchError> {
30930 let mut c_tensors = [std::ptr::null_mut(); 1];
30931 unsafe_torch_err!(atg_searchsorted(
30932 c_tensors.as_mut_ptr(),
30933 sorted_sequence.c_tensor,
30934 self.c_tensor,
30935 if out_int32 { 1 } else { 0 },
30936 if right { 1 } else { 0 },
30937 side.as_ptr(),
30938 side.len() as i32,
30939 sorter.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
30940 ));
30941 Ok(Tensor { c_tensor: c_tensors[0] })
30942 }
30943
30944 pub fn f_searchsorted_scalar<T: Borrow<Tensor>, S: Into<Scalar>>(
30945 sorted_sequence: &Tensor,
30946 self_scalar: S,
30947 out_int32: bool,
30948 right: bool,
30949 side: &str,
30950 sorter: Option<T>,
30951 ) -> Result<Tensor, TchError> {
30952 let mut c_tensors = [std::ptr::null_mut(); 1];
30953 unsafe_torch_err!(atg_searchsorted_scalar(
30954 c_tensors.as_mut_ptr(),
30955 sorted_sequence.c_tensor,
30956 self_scalar.into().c_scalar,
30957 if out_int32 { 1 } else { 0 },
30958 if right { 1 } else { 0 },
30959 side.as_ptr(),
30960 side.len() as i32,
30961 sorter.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
30962 ));
30963 Ok(Tensor { c_tensor: c_tensors[0] })
30964 }
30965
30966 pub fn f_searchsorted_scalar_out<T: Borrow<Tensor>, S: Into<Scalar>>(
30967 out: &Tensor,
30968 sorted_sequence: &Tensor,
30969 self_scalar: S,
30970 out_int32: bool,
30971 right: bool,
30972 side: &str,
30973 sorter: Option<T>,
30974 ) -> Result<Tensor, TchError> {
30975 let mut c_tensors = [std::ptr::null_mut(); 1];
30976 unsafe_torch_err!(atg_searchsorted_scalar_out(
30977 c_tensors.as_mut_ptr(),
30978 out.c_tensor,
30979 sorted_sequence.c_tensor,
30980 self_scalar.into().c_scalar,
30981 if out_int32 { 1 } else { 0 },
30982 if right { 1 } else { 0 },
30983 side.as_ptr(),
30984 side.len() as i32,
30985 sorter.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
30986 ));
30987 Ok(Tensor { c_tensor: c_tensors[0] })
30988 }
30989
30990 pub fn f_searchsorted_tensor_out<T: Borrow<Tensor>>(
30991 &self,
30992 out: &Tensor,
30993 sorted_sequence: &Tensor,
30994 out_int32: bool,
30995 right: bool,
30996 side: &str,
30997 sorter: Option<T>,
30998 ) -> Result<Tensor, TchError> {
30999 let mut c_tensors = [std::ptr::null_mut(); 1];
31000 unsafe_torch_err!(atg_searchsorted_tensor_out(
31001 c_tensors.as_mut_ptr(),
31002 out.c_tensor,
31003 sorted_sequence.c_tensor,
31004 self.c_tensor,
31005 if out_int32 { 1 } else { 0 },
31006 if right { 1 } else { 0 },
31007 side.as_ptr(),
31008 side.len() as i32,
31009 sorter.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor)
31010 ));
31011 Ok(Tensor { c_tensor: c_tensors[0] })
31012 }
31013
31014 pub fn f_segment_reduce<T: Borrow<Tensor>, S: Into<Scalar>>(
31015 data: &Tensor,
31016 reduce: &str,
31017 lengths: Option<T>,
31018 indices: Option<T>,
31019 offsets: Option<T>,
31020 axis: i64,
31021 unsafe_: bool,
31022 initial: S,
31023 ) -> Result<Tensor, TchError> {
31024 let mut c_tensors = [std::ptr::null_mut(); 1];
31025 unsafe_torch_err!(atg_segment_reduce(
31026 c_tensors.as_mut_ptr(),
31027 data.c_tensor,
31028 reduce.as_ptr(),
31029 reduce.len() as i32,
31030 lengths.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
31031 indices.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
31032 offsets.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
31033 axis,
31034 if unsafe_ { 1 } else { 0 },
31035 initial.into().c_scalar
31036 ));
31037 Ok(Tensor { c_tensor: c_tensors[0] })
31038 }
31039
31040 pub fn f_segment_reduce_out<T: Borrow<Tensor>, S: Into<Scalar>>(
31041 out: &Tensor,
31042 data: &Tensor,
31043 reduce: &str,
31044 lengths: Option<T>,
31045 indices: Option<T>,
31046 offsets: Option<T>,
31047 axis: i64,
31048 unsafe_: bool,
31049 initial: S,
31050 ) -> Result<Tensor, TchError> {
31051 let mut c_tensors = [std::ptr::null_mut(); 1];
31052 unsafe_torch_err!(atg_segment_reduce_out(
31053 c_tensors.as_mut_ptr(),
31054 out.c_tensor,
31055 data.c_tensor,
31056 reduce.as_ptr(),
31057 reduce.len() as i32,
31058 lengths.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
31059 indices.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
31060 offsets.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
31061 axis,
31062 if unsafe_ { 1 } else { 0 },
31063 initial.into().c_scalar
31064 ));
31065 Ok(Tensor { c_tensor: c_tensors[0] })
31066 }
31067
31068 pub fn f_select(&self, dim: i64, index: i64) -> Result<Tensor, TchError> {
31069 let mut c_tensors = [std::ptr::null_mut(); 1];
31070 unsafe_torch_err!(atg_select(c_tensors.as_mut_ptr(), self.c_tensor, dim, index));
31071 Ok(Tensor { c_tensor: c_tensors[0] })
31072 }
31073
31074 pub fn f_select_backward(
31075 grad_output: &Tensor,
31076 input_sizes: impl IntList,
31077 dim: i64,
31078 index: i64,
31079 ) -> Result<Tensor, TchError> {
31080 let mut c_tensors = [std::ptr::null_mut(); 1];
31081 unsafe_torch_err!(atg_select_backward(
31082 c_tensors.as_mut_ptr(),
31083 grad_output.c_tensor,
31084 input_sizes.as_ptr(),
31085 input_sizes.len_i32(),
31086 dim,
31087 index
31088 ));
31089 Ok(Tensor { c_tensor: c_tensors[0] })
31090 }
31091
31092 pub fn f_select_backward_out(
31093 out: &Tensor,
31094 grad_output: &Tensor,
31095 input_sizes: impl IntList,
31096 dim: i64,
31097 index: i64,
31098 ) -> Result<Tensor, TchError> {
31099 let mut c_tensors = [std::ptr::null_mut(); 1];
31100 unsafe_torch_err!(atg_select_backward_out(
31101 c_tensors.as_mut_ptr(),
31102 out.c_tensor,
31103 grad_output.c_tensor,
31104 input_sizes.as_ptr(),
31105 input_sizes.len_i32(),
31106 dim,
31107 index
31108 ));
31109 Ok(Tensor { c_tensor: c_tensors[0] })
31110 }
31111
31112 pub fn f_select_copy(&self, dim: i64, index: i64) -> Result<Tensor, TchError> {
31113 let mut c_tensors = [std::ptr::null_mut(); 1];
31114 unsafe_torch_err!(atg_select_copy(c_tensors.as_mut_ptr(), self.c_tensor, dim, index));
31115 Ok(Tensor { c_tensor: c_tensors[0] })
31116 }
31117
31118 pub fn f_select_copy_int_out(
31119 &self,
31120 out: &Tensor,
31121 dim: i64,
31122 index: i64,
31123 ) -> Result<Tensor, TchError> {
31124 let mut c_tensors = [std::ptr::null_mut(); 1];
31125 unsafe_torch_err!(atg_select_copy_int_out(
31126 c_tensors.as_mut_ptr(),
31127 out.c_tensor,
31128 self.c_tensor,
31129 dim,
31130 index
31131 ));
31132 Ok(Tensor { c_tensor: c_tensors[0] })
31133 }
31134
31135 pub fn f_select_scatter(&self, src: &Tensor, dim: i64, index: i64) -> Result<Tensor, TchError> {
31136 let mut c_tensors = [std::ptr::null_mut(); 1];
31137 unsafe_torch_err!(atg_select_scatter(
31138 c_tensors.as_mut_ptr(),
31139 self.c_tensor,
31140 src.c_tensor,
31141 dim,
31142 index
31143 ));
31144 Ok(Tensor { c_tensor: c_tensors[0] })
31145 }
31146
31147 pub fn f_select_scatter_out(
31148 &self,
31149 out: &Tensor,
31150 src: &Tensor,
31151 dim: i64,
31152 index: i64,
31153 ) -> Result<Tensor, TchError> {
31154 let mut c_tensors = [std::ptr::null_mut(); 1];
31155 unsafe_torch_err!(atg_select_scatter_out(
31156 c_tensors.as_mut_ptr(),
31157 out.c_tensor,
31158 self.c_tensor,
31159 src.c_tensor,
31160 dim,
31161 index
31162 ));
31163 Ok(Tensor { c_tensor: c_tensors[0] })
31164 }
31165
31166 pub fn f_selu(&self) -> Result<Tensor, TchError> {
31167 let mut c_tensors = [std::ptr::null_mut(); 1];
31168 unsafe_torch_err!(atg_selu(c_tensors.as_mut_ptr(), self.c_tensor));
31169 Ok(Tensor { c_tensor: c_tensors[0] })
31170 }
31171
31172 pub fn f_selu_(&mut self) -> Result<Tensor, TchError> {
31173 let mut c_tensors = [std::ptr::null_mut(); 1];
31174 unsafe_torch_err!(atg_selu_(c_tensors.as_mut_ptr(), self.c_tensor));
31175 Ok(Tensor { c_tensor: c_tensors[0] })
31176 }
31177
31178 pub fn f_set(&self) -> Result<Tensor, TchError> {
31179 let mut c_tensors = [std::ptr::null_mut(); 1];
31180 unsafe_torch_err!(atg_set(c_tensors.as_mut_ptr(), self.c_tensor));
31181 Ok(Tensor { c_tensor: c_tensors[0] })
31182 }
31183
31184 pub fn f_set_(&mut self) -> Result<Tensor, TchError> {
31185 let mut c_tensors = [std::ptr::null_mut(); 1];
31186 unsafe_torch_err!(atg_set_(c_tensors.as_mut_ptr(), self.c_tensor));
31187 Ok(Tensor { c_tensor: c_tensors[0] })
31188 }
31189
31190 pub fn f_set_data(&mut self, new_data: &Tensor) -> Result<(), TchError> {
31191 unsafe_torch_err!(atg_set_data(self.c_tensor, new_data.c_tensor));
31192 Ok(())
31193 }
31194
31195 pub fn f_set_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
31196 let mut c_tensors = [std::ptr::null_mut(); 1];
31197 unsafe_torch_err!(atg_set_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
31198 Ok(Tensor { c_tensor: c_tensors[0] })
31199 }
31200
31201 pub fn f_set_requires_grad(&self, r: bool) -> Result<Tensor, TchError> {
31202 let mut c_tensors = [std::ptr::null_mut(); 1];
31203 unsafe_torch_err!(atg_set_requires_grad(
31204 c_tensors.as_mut_ptr(),
31205 self.c_tensor,
31206 if r { 1 } else { 0 }
31207 ));
31208 Ok(Tensor { c_tensor: c_tensors[0] })
31209 }
31210
31211 pub fn f_set_source_tensor(&self, source: &Tensor) -> Result<Tensor, TchError> {
31212 let mut c_tensors = [std::ptr::null_mut(); 1];
31213 unsafe_torch_err!(atg_set_source_tensor(
31214 c_tensors.as_mut_ptr(),
31215 self.c_tensor,
31216 source.c_tensor
31217 ));
31218 Ok(Tensor { c_tensor: c_tensors[0] })
31219 }
31220
31221 pub fn f_set_source_tensor_(&mut self, source: &Tensor) -> Result<Tensor, TchError> {
31222 let mut c_tensors = [std::ptr::null_mut(); 1];
31223 unsafe_torch_err!(atg_set_source_tensor_(
31224 c_tensors.as_mut_ptr(),
31225 self.c_tensor,
31226 source.c_tensor
31227 ));
31228 Ok(Tensor { c_tensor: c_tensors[0] })
31229 }
31230
31231 pub fn f_set_source_tensor_out(
31232 &self,
31233 out: &Tensor,
31234 source: &Tensor,
31235 ) -> Result<Tensor, TchError> {
31236 let mut c_tensors = [std::ptr::null_mut(); 1];
31237 unsafe_torch_err!(atg_set_source_tensor_out(
31238 c_tensors.as_mut_ptr(),
31239 out.c_tensor,
31240 self.c_tensor,
31241 source.c_tensor
31242 ));
31243 Ok(Tensor { c_tensor: c_tensors[0] })
31244 }
31245
31246 pub fn f_set_source_tensor_storage_offset_(
31247 &mut self,
31248 source: &Tensor,
31249 storage_offset: i64,
31250 size: impl IntList,
31251 stride: impl IntList,
31252 ) -> Result<Tensor, TchError> {
31253 let mut c_tensors = [std::ptr::null_mut(); 1];
31254 unsafe_torch_err!(atg_set_source_tensor_storage_offset_(
31255 c_tensors.as_mut_ptr(),
31256 self.c_tensor,
31257 source.c_tensor,
31258 storage_offset,
31259 size.as_ptr(),
31260 size.len_i32(),
31261 stride.as_ptr(),
31262 stride.len_i32()
31263 ));
31264 Ok(Tensor { c_tensor: c_tensors[0] })
31265 }
31266
31267 pub fn f_sgn(&self) -> Result<Tensor, TchError> {
31268 let mut c_tensors = [std::ptr::null_mut(); 1];
31269 unsafe_torch_err!(atg_sgn(c_tensors.as_mut_ptr(), self.c_tensor));
31270 Ok(Tensor { c_tensor: c_tensors[0] })
31271 }
31272
31273 pub fn f_sgn_(&mut self) -> Result<Tensor, TchError> {
31274 let mut c_tensors = [std::ptr::null_mut(); 1];
31275 unsafe_torch_err!(atg_sgn_(c_tensors.as_mut_ptr(), self.c_tensor));
31276 Ok(Tensor { c_tensor: c_tensors[0] })
31277 }
31278
31279 pub fn f_sgn_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
31280 let mut c_tensors = [std::ptr::null_mut(); 1];
31281 unsafe_torch_err!(atg_sgn_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
31282 Ok(Tensor { c_tensor: c_tensors[0] })
31283 }
31284
31285 pub fn f_sigmoid(&self) -> Result<Tensor, TchError> {
31286 let mut c_tensors = [std::ptr::null_mut(); 1];
31287 unsafe_torch_err!(atg_sigmoid(c_tensors.as_mut_ptr(), self.c_tensor));
31288 Ok(Tensor { c_tensor: c_tensors[0] })
31289 }
31290
31291 pub fn f_sigmoid_(&mut self) -> Result<Tensor, TchError> {
31292 let mut c_tensors = [std::ptr::null_mut(); 1];
31293 unsafe_torch_err!(atg_sigmoid_(c_tensors.as_mut_ptr(), self.c_tensor));
31294 Ok(Tensor { c_tensor: c_tensors[0] })
31295 }
31296
31297 pub fn f_sigmoid_backward(grad_output: &Tensor, output: &Tensor) -> Result<Tensor, TchError> {
31298 let mut c_tensors = [std::ptr::null_mut(); 1];
31299 unsafe_torch_err!(atg_sigmoid_backward(
31300 c_tensors.as_mut_ptr(),
31301 grad_output.c_tensor,
31302 output.c_tensor
31303 ));
31304 Ok(Tensor { c_tensor: c_tensors[0] })
31305 }
31306
31307 pub fn f_sigmoid_backward_grad_input(
31308 grad_input: &Tensor,
31309 grad_output: &Tensor,
31310 output: &Tensor,
31311 ) -> Result<Tensor, TchError> {
31312 let mut c_tensors = [std::ptr::null_mut(); 1];
31313 unsafe_torch_err!(atg_sigmoid_backward_grad_input(
31314 c_tensors.as_mut_ptr(),
31315 grad_input.c_tensor,
31316 grad_output.c_tensor,
31317 output.c_tensor
31318 ));
31319 Ok(Tensor { c_tensor: c_tensors[0] })
31320 }
31321
31322 pub fn f_sigmoid_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
31323 let mut c_tensors = [std::ptr::null_mut(); 1];
31324 unsafe_torch_err!(atg_sigmoid_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
31325 Ok(Tensor { c_tensor: c_tensors[0] })
31326 }
31327
31328 pub fn f_sign(&self) -> Result<Tensor, TchError> {
31329 let mut c_tensors = [std::ptr::null_mut(); 1];
31330 unsafe_torch_err!(atg_sign(c_tensors.as_mut_ptr(), self.c_tensor));
31331 Ok(Tensor { c_tensor: c_tensors[0] })
31332 }
31333
31334 pub fn f_sign_(&mut self) -> Result<Tensor, TchError> {
31335 let mut c_tensors = [std::ptr::null_mut(); 1];
31336 unsafe_torch_err!(atg_sign_(c_tensors.as_mut_ptr(), self.c_tensor));
31337 Ok(Tensor { c_tensor: c_tensors[0] })
31338 }
31339
31340 pub fn f_sign_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
31341 let mut c_tensors = [std::ptr::null_mut(); 1];
31342 unsafe_torch_err!(atg_sign_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
31343 Ok(Tensor { c_tensor: c_tensors[0] })
31344 }
31345
31346 pub fn f_signbit(&self) -> Result<Tensor, TchError> {
31347 let mut c_tensors = [std::ptr::null_mut(); 1];
31348 unsafe_torch_err!(atg_signbit(c_tensors.as_mut_ptr(), self.c_tensor));
31349 Ok(Tensor { c_tensor: c_tensors[0] })
31350 }
31351
31352 pub fn f_signbit_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
31353 let mut c_tensors = [std::ptr::null_mut(); 1];
31354 unsafe_torch_err!(atg_signbit_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
31355 Ok(Tensor { c_tensor: c_tensors[0] })
31356 }
31357
31358 pub fn f_silu(&self) -> Result<Tensor, TchError> {
31359 let mut c_tensors = [std::ptr::null_mut(); 1];
31360 unsafe_torch_err!(atg_silu(c_tensors.as_mut_ptr(), self.c_tensor));
31361 Ok(Tensor { c_tensor: c_tensors[0] })
31362 }
31363
31364 pub fn f_silu_(&mut self) -> Result<Tensor, TchError> {
31365 let mut c_tensors = [std::ptr::null_mut(); 1];
31366 unsafe_torch_err!(atg_silu_(c_tensors.as_mut_ptr(), self.c_tensor));
31367 Ok(Tensor { c_tensor: c_tensors[0] })
31368 }
31369
31370 pub fn f_silu_backward(&self, grad_output: &Tensor) -> Result<Tensor, TchError> {
31371 let mut c_tensors = [std::ptr::null_mut(); 1];
31372 unsafe_torch_err!(atg_silu_backward(
31373 c_tensors.as_mut_ptr(),
31374 grad_output.c_tensor,
31375 self.c_tensor
31376 ));
31377 Ok(Tensor { c_tensor: c_tensors[0] })
31378 }
31379
31380 pub fn f_silu_backward_grad_input(
31381 &self,
31382 grad_input: &Tensor,
31383 grad_output: &Tensor,
31384 ) -> Result<Tensor, TchError> {
31385 let mut c_tensors = [std::ptr::null_mut(); 1];
31386 unsafe_torch_err!(atg_silu_backward_grad_input(
31387 c_tensors.as_mut_ptr(),
31388 grad_input.c_tensor,
31389 grad_output.c_tensor,
31390 self.c_tensor
31391 ));
31392 Ok(Tensor { c_tensor: c_tensors[0] })
31393 }
31394
31395 pub fn f_silu_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
31396 let mut c_tensors = [std::ptr::null_mut(); 1];
31397 unsafe_torch_err!(atg_silu_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
31398 Ok(Tensor { c_tensor: c_tensors[0] })
31399 }
31400
31401 pub fn f_sin(&self) -> Result<Tensor, TchError> {
31402 let mut c_tensors = [std::ptr::null_mut(); 1];
31403 unsafe_torch_err!(atg_sin(c_tensors.as_mut_ptr(), self.c_tensor));
31404 Ok(Tensor { c_tensor: c_tensors[0] })
31405 }
31406
31407 pub fn f_sin_(&mut self) -> Result<Tensor, TchError> {
31408 let mut c_tensors = [std::ptr::null_mut(); 1];
31409 unsafe_torch_err!(atg_sin_(c_tensors.as_mut_ptr(), self.c_tensor));
31410 Ok(Tensor { c_tensor: c_tensors[0] })
31411 }
31412
31413 pub fn f_sin_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
31414 let mut c_tensors = [std::ptr::null_mut(); 1];
31415 unsafe_torch_err!(atg_sin_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
31416 Ok(Tensor { c_tensor: c_tensors[0] })
31417 }
31418
31419 pub fn f_sinc(&self) -> Result<Tensor, TchError> {
31420 let mut c_tensors = [std::ptr::null_mut(); 1];
31421 unsafe_torch_err!(atg_sinc(c_tensors.as_mut_ptr(), self.c_tensor));
31422 Ok(Tensor { c_tensor: c_tensors[0] })
31423 }
31424
31425 pub fn f_sinc_(&mut self) -> Result<Tensor, TchError> {
31426 let mut c_tensors = [std::ptr::null_mut(); 1];
31427 unsafe_torch_err!(atg_sinc_(c_tensors.as_mut_ptr(), self.c_tensor));
31428 Ok(Tensor { c_tensor: c_tensors[0] })
31429 }
31430
31431 pub fn f_sinc_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
31432 let mut c_tensors = [std::ptr::null_mut(); 1];
31433 unsafe_torch_err!(atg_sinc_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
31434 Ok(Tensor { c_tensor: c_tensors[0] })
31435 }
31436
31437 pub fn f_sinh(&self) -> Result<Tensor, TchError> {
31438 let mut c_tensors = [std::ptr::null_mut(); 1];
31439 unsafe_torch_err!(atg_sinh(c_tensors.as_mut_ptr(), self.c_tensor));
31440 Ok(Tensor { c_tensor: c_tensors[0] })
31441 }
31442
31443 pub fn f_sinh_(&mut self) -> Result<Tensor, TchError> {
31444 let mut c_tensors = [std::ptr::null_mut(); 1];
31445 unsafe_torch_err!(atg_sinh_(c_tensors.as_mut_ptr(), self.c_tensor));
31446 Ok(Tensor { c_tensor: c_tensors[0] })
31447 }
31448
31449 pub fn f_sinh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
31450 let mut c_tensors = [std::ptr::null_mut(); 1];
31451 unsafe_torch_err!(atg_sinh_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
31452 Ok(Tensor { c_tensor: c_tensors[0] })
31453 }
31454
31455 pub fn f_slice(
31456 &self,
31457 dim: i64,
31458 start: impl Into<Option<i64>>,
31459 end: impl Into<Option<i64>>,
31460 step: i64,
31461 ) -> Result<Tensor, TchError> {
31462 let start = start.into();
31463 let end = end.into();
31464 let mut c_tensors = [std::ptr::null_mut(); 1];
31465 unsafe_torch_err!(atg_slice(
31466 c_tensors.as_mut_ptr(),
31467 self.c_tensor,
31468 dim,
31469 start.unwrap_or(0i64),
31470 start.is_none() as i8,
31471 end.unwrap_or(0i64),
31472 end.is_none() as i8,
31473 step
31474 ));
31475 Ok(Tensor { c_tensor: c_tensors[0] })
31476 }
31477
31478 pub fn f_slice_backward(
31479 grad_output: &Tensor,
31480 input_sizes: impl IntList,
31481 dim: i64,
31482 start: i64,
31483 end: i64,
31484 step: i64,
31485 ) -> Result<Tensor, TchError> {
31486 let mut c_tensors = [std::ptr::null_mut(); 1];
31487 unsafe_torch_err!(atg_slice_backward(
31488 c_tensors.as_mut_ptr(),
31489 grad_output.c_tensor,
31490 input_sizes.as_ptr(),
31491 input_sizes.len_i32(),
31492 dim,
31493 start,
31494 end,
31495 step
31496 ));
31497 Ok(Tensor { c_tensor: c_tensors[0] })
31498 }
31499
31500 pub fn f_slice_backward_out(
31501 out: &Tensor,
31502 grad_output: &Tensor,
31503 input_sizes: impl IntList,
31504 dim: i64,
31505 start: i64,
31506 end: i64,
31507 step: i64,
31508 ) -> Result<Tensor, TchError> {
31509 let mut c_tensors = [std::ptr::null_mut(); 1];
31510 unsafe_torch_err!(atg_slice_backward_out(
31511 c_tensors.as_mut_ptr(),
31512 out.c_tensor,
31513 grad_output.c_tensor,
31514 input_sizes.as_ptr(),
31515 input_sizes.len_i32(),
31516 dim,
31517 start,
31518 end,
31519 step
31520 ));
31521 Ok(Tensor { c_tensor: c_tensors[0] })
31522 }
31523
31524 pub fn f_slice_copy(
31525 &self,
31526 dim: i64,
31527 start: impl Into<Option<i64>>,
31528 end: impl Into<Option<i64>>,
31529 step: i64,
31530 ) -> Result<Tensor, TchError> {
31531 let start = start.into();
31532 let end = end.into();
31533 let mut c_tensors = [std::ptr::null_mut(); 1];
31534 unsafe_torch_err!(atg_slice_copy(
31535 c_tensors.as_mut_ptr(),
31536 self.c_tensor,
31537 dim,
31538 start.unwrap_or(0i64),
31539 start.is_none() as i8,
31540 end.unwrap_or(0i64),
31541 end.is_none() as i8,
31542 step
31543 ));
31544 Ok(Tensor { c_tensor: c_tensors[0] })
31545 }
31546
31547 pub fn f_slice_copy_tensor_out(
31548 &self,
31549 out: &Tensor,
31550 dim: i64,
31551 start: impl Into<Option<i64>>,
31552 end: impl Into<Option<i64>>,
31553 step: i64,
31554 ) -> Result<Tensor, TchError> {
31555 let start = start.into();
31556 let end = end.into();
31557 let mut c_tensors = [std::ptr::null_mut(); 1];
31558 unsafe_torch_err!(atg_slice_copy_tensor_out(
31559 c_tensors.as_mut_ptr(),
31560 out.c_tensor,
31561 self.c_tensor,
31562 dim,
31563 start.unwrap_or(0i64),
31564 start.is_none() as i8,
31565 end.unwrap_or(0i64),
31566 end.is_none() as i8,
31567 step
31568 ));
31569 Ok(Tensor { c_tensor: c_tensors[0] })
31570 }
31571
31572 pub fn f_slice_inverse(
31573 &self,
31574 src: &Tensor,
31575 dim: i64,
31576 start: impl Into<Option<i64>>,
31577 end: impl Into<Option<i64>>,
31578 step: i64,
31579 ) -> Result<Tensor, TchError> {
31580 let start = start.into();
31581 let end = end.into();
31582 let mut c_tensors = [std::ptr::null_mut(); 1];
31583 unsafe_torch_err!(atg_slice_inverse(
31584 c_tensors.as_mut_ptr(),
31585 self.c_tensor,
31586 src.c_tensor,
31587 dim,
31588 start.unwrap_or(0i64),
31589 start.is_none() as i8,
31590 end.unwrap_or(0i64),
31591 end.is_none() as i8,
31592 step
31593 ));
31594 Ok(Tensor { c_tensor: c_tensors[0] })
31595 }
31596
31597 pub fn f_slice_scatter(
31598 &self,
31599 src: &Tensor,
31600 dim: i64,
31601 start: impl Into<Option<i64>>,
31602 end: impl Into<Option<i64>>,
31603 step: i64,
31604 ) -> Result<Tensor, TchError> {
31605 let start = start.into();
31606 let end = end.into();
31607 let mut c_tensors = [std::ptr::null_mut(); 1];
31608 unsafe_torch_err!(atg_slice_scatter(
31609 c_tensors.as_mut_ptr(),
31610 self.c_tensor,
31611 src.c_tensor,
31612 dim,
31613 start.unwrap_or(0i64),
31614 start.is_none() as i8,
31615 end.unwrap_or(0i64),
31616 end.is_none() as i8,
31617 step
31618 ));
31619 Ok(Tensor { c_tensor: c_tensors[0] })
31620 }
31621
31622 pub fn f_slice_scatter_out(
31623 &self,
31624 out: &Tensor,
31625 src: &Tensor,
31626 dim: i64,
31627 start: impl Into<Option<i64>>,
31628 end: impl Into<Option<i64>>,
31629 step: i64,
31630 ) -> Result<Tensor, TchError> {
31631 let start = start.into();
31632 let end = end.into();
31633 let mut c_tensors = [std::ptr::null_mut(); 1];
31634 unsafe_torch_err!(atg_slice_scatter_out(
31635 c_tensors.as_mut_ptr(),
31636 out.c_tensor,
31637 self.c_tensor,
31638 src.c_tensor,
31639 dim,
31640 start.unwrap_or(0i64),
31641 start.is_none() as i8,
31642 end.unwrap_or(0i64),
31643 end.is_none() as i8,
31644 step
31645 ));
31646 Ok(Tensor { c_tensor: c_tensors[0] })
31647 }
31648
31649 pub fn f_slogdet(&self) -> Result<(Tensor, Tensor), TchError> {
31650 let mut c_tensors = [std::ptr::null_mut(); 2];
31651 unsafe_torch_err!(atg_slogdet(c_tensors.as_mut_ptr(), self.c_tensor));
31652 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
31653 }
31654
31655 pub fn f_slogdet_out(
31656 &self,
31657 sign: &Tensor,
31658 logabsdet: &Tensor,
31659 ) -> Result<(Tensor, Tensor), TchError> {
31660 let mut c_tensors = [std::ptr::null_mut(); 2];
31661 unsafe_torch_err!(atg_slogdet_out(
31662 c_tensors.as_mut_ptr(),
31663 sign.c_tensor,
31664 logabsdet.c_tensor,
31665 self.c_tensor
31666 ));
31667 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
31668 }
31669
31670 pub fn f_slow_conv3d<T: Borrow<Tensor>>(
31671 &self,
31672 weight: &Tensor,
31673 kernel_size: impl IntList,
31674 bias: Option<T>,
31675 stride: impl IntList,
31676 padding: impl IntList,
31677 ) -> Result<Tensor, TchError> {
31678 let mut c_tensors = [std::ptr::null_mut(); 1];
31679 unsafe_torch_err!(atg_slow_conv3d(
31680 c_tensors.as_mut_ptr(),
31681 self.c_tensor,
31682 weight.c_tensor,
31683 kernel_size.as_ptr(),
31684 kernel_size.len_i32(),
31685 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
31686 stride.as_ptr(),
31687 stride.len_i32(),
31688 padding.as_ptr(),
31689 padding.len_i32()
31690 ));
31691 Ok(Tensor { c_tensor: c_tensors[0] })
31692 }
31693
31694 pub fn f_slow_conv3d_out<T: Borrow<Tensor>>(
31695 &self,
31696 out: &Tensor,
31697 weight: &Tensor,
31698 kernel_size: impl IntList,
31699 bias: Option<T>,
31700 stride: impl IntList,
31701 padding: impl IntList,
31702 ) -> Result<Tensor, TchError> {
31703 let mut c_tensors = [std::ptr::null_mut(); 1];
31704 unsafe_torch_err!(atg_slow_conv3d_out(
31705 c_tensors.as_mut_ptr(),
31706 out.c_tensor,
31707 self.c_tensor,
31708 weight.c_tensor,
31709 kernel_size.as_ptr(),
31710 kernel_size.len_i32(),
31711 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
31712 stride.as_ptr(),
31713 stride.len_i32(),
31714 padding.as_ptr(),
31715 padding.len_i32()
31716 ));
31717 Ok(Tensor { c_tensor: c_tensors[0] })
31718 }
31719
31720 pub fn f_slow_conv_dilated2d<T: Borrow<Tensor>>(
31721 &self,
31722 weight: &Tensor,
31723 kernel_size: impl IntList,
31724 bias: Option<T>,
31725 stride: impl IntList,
31726 padding: impl IntList,
31727 dilation: impl IntList,
31728 ) -> Result<Tensor, TchError> {
31729 let mut c_tensors = [std::ptr::null_mut(); 1];
31730 unsafe_torch_err!(atg_slow_conv_dilated2d(
31731 c_tensors.as_mut_ptr(),
31732 self.c_tensor,
31733 weight.c_tensor,
31734 kernel_size.as_ptr(),
31735 kernel_size.len_i32(),
31736 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
31737 stride.as_ptr(),
31738 stride.len_i32(),
31739 padding.as_ptr(),
31740 padding.len_i32(),
31741 dilation.as_ptr(),
31742 dilation.len_i32()
31743 ));
31744 Ok(Tensor { c_tensor: c_tensors[0] })
31745 }
31746
31747 pub fn f_slow_conv_dilated2d_out<T: Borrow<Tensor>>(
31748 &self,
31749 out: &Tensor,
31750 weight: &Tensor,
31751 kernel_size: impl IntList,
31752 bias: Option<T>,
31753 stride: impl IntList,
31754 padding: impl IntList,
31755 dilation: impl IntList,
31756 ) -> Result<Tensor, TchError> {
31757 let mut c_tensors = [std::ptr::null_mut(); 1];
31758 unsafe_torch_err!(atg_slow_conv_dilated2d_out(
31759 c_tensors.as_mut_ptr(),
31760 out.c_tensor,
31761 self.c_tensor,
31762 weight.c_tensor,
31763 kernel_size.as_ptr(),
31764 kernel_size.len_i32(),
31765 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
31766 stride.as_ptr(),
31767 stride.len_i32(),
31768 padding.as_ptr(),
31769 padding.len_i32(),
31770 dilation.as_ptr(),
31771 dilation.len_i32()
31772 ));
31773 Ok(Tensor { c_tensor: c_tensors[0] })
31774 }
31775
31776 pub fn f_slow_conv_dilated3d<T: Borrow<Tensor>>(
31777 &self,
31778 weight: &Tensor,
31779 kernel_size: impl IntList,
31780 bias: Option<T>,
31781 stride: impl IntList,
31782 padding: impl IntList,
31783 dilation: impl IntList,
31784 ) -> Result<Tensor, TchError> {
31785 let mut c_tensors = [std::ptr::null_mut(); 1];
31786 unsafe_torch_err!(atg_slow_conv_dilated3d(
31787 c_tensors.as_mut_ptr(),
31788 self.c_tensor,
31789 weight.c_tensor,
31790 kernel_size.as_ptr(),
31791 kernel_size.len_i32(),
31792 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
31793 stride.as_ptr(),
31794 stride.len_i32(),
31795 padding.as_ptr(),
31796 padding.len_i32(),
31797 dilation.as_ptr(),
31798 dilation.len_i32()
31799 ));
31800 Ok(Tensor { c_tensor: c_tensors[0] })
31801 }
31802
31803 pub fn f_slow_conv_dilated3d_out<T: Borrow<Tensor>>(
31804 &self,
31805 out: &Tensor,
31806 weight: &Tensor,
31807 kernel_size: impl IntList,
31808 bias: Option<T>,
31809 stride: impl IntList,
31810 padding: impl IntList,
31811 dilation: impl IntList,
31812 ) -> Result<Tensor, TchError> {
31813 let mut c_tensors = [std::ptr::null_mut(); 1];
31814 unsafe_torch_err!(atg_slow_conv_dilated3d_out(
31815 c_tensors.as_mut_ptr(),
31816 out.c_tensor,
31817 self.c_tensor,
31818 weight.c_tensor,
31819 kernel_size.as_ptr(),
31820 kernel_size.len_i32(),
31821 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
31822 stride.as_ptr(),
31823 stride.len_i32(),
31824 padding.as_ptr(),
31825 padding.len_i32(),
31826 dilation.as_ptr(),
31827 dilation.len_i32()
31828 ));
31829 Ok(Tensor { c_tensor: c_tensors[0] })
31830 }
31831
31832 pub fn f_slow_conv_transpose2d<T: Borrow<Tensor>>(
31833 &self,
31834 weight: &Tensor,
31835 kernel_size: impl IntList,
31836 bias: Option<T>,
31837 stride: impl IntList,
31838 padding: impl IntList,
31839 output_padding: impl IntList,
31840 dilation: impl IntList,
31841 ) -> Result<Tensor, TchError> {
31842 let mut c_tensors = [std::ptr::null_mut(); 1];
31843 unsafe_torch_err!(atg_slow_conv_transpose2d(
31844 c_tensors.as_mut_ptr(),
31845 self.c_tensor,
31846 weight.c_tensor,
31847 kernel_size.as_ptr(),
31848 kernel_size.len_i32(),
31849 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
31850 stride.as_ptr(),
31851 stride.len_i32(),
31852 padding.as_ptr(),
31853 padding.len_i32(),
31854 output_padding.as_ptr(),
31855 output_padding.len_i32(),
31856 dilation.as_ptr(),
31857 dilation.len_i32()
31858 ));
31859 Ok(Tensor { c_tensor: c_tensors[0] })
31860 }
31861
31862 pub fn f_slow_conv_transpose2d_out<T: Borrow<Tensor>>(
31863 &self,
31864 out: &Tensor,
31865 weight: &Tensor,
31866 kernel_size: impl IntList,
31867 bias: Option<T>,
31868 stride: impl IntList,
31869 padding: impl IntList,
31870 output_padding: impl IntList,
31871 dilation: impl IntList,
31872 ) -> Result<Tensor, TchError> {
31873 let mut c_tensors = [std::ptr::null_mut(); 1];
31874 unsafe_torch_err!(atg_slow_conv_transpose2d_out(
31875 c_tensors.as_mut_ptr(),
31876 out.c_tensor,
31877 self.c_tensor,
31878 weight.c_tensor,
31879 kernel_size.as_ptr(),
31880 kernel_size.len_i32(),
31881 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
31882 stride.as_ptr(),
31883 stride.len_i32(),
31884 padding.as_ptr(),
31885 padding.len_i32(),
31886 output_padding.as_ptr(),
31887 output_padding.len_i32(),
31888 dilation.as_ptr(),
31889 dilation.len_i32()
31890 ));
31891 Ok(Tensor { c_tensor: c_tensors[0] })
31892 }
31893
31894 pub fn f_slow_conv_transpose3d<T: Borrow<Tensor>>(
31895 &self,
31896 weight: &Tensor,
31897 kernel_size: impl IntList,
31898 bias: Option<T>,
31899 stride: impl IntList,
31900 padding: impl IntList,
31901 output_padding: impl IntList,
31902 dilation: impl IntList,
31903 ) -> Result<Tensor, TchError> {
31904 let mut c_tensors = [std::ptr::null_mut(); 1];
31905 unsafe_torch_err!(atg_slow_conv_transpose3d(
31906 c_tensors.as_mut_ptr(),
31907 self.c_tensor,
31908 weight.c_tensor,
31909 kernel_size.as_ptr(),
31910 kernel_size.len_i32(),
31911 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
31912 stride.as_ptr(),
31913 stride.len_i32(),
31914 padding.as_ptr(),
31915 padding.len_i32(),
31916 output_padding.as_ptr(),
31917 output_padding.len_i32(),
31918 dilation.as_ptr(),
31919 dilation.len_i32()
31920 ));
31921 Ok(Tensor { c_tensor: c_tensors[0] })
31922 }
31923
31924 pub fn f_slow_conv_transpose3d_out<T: Borrow<Tensor>>(
31925 &self,
31926 out: &Tensor,
31927 weight: &Tensor,
31928 kernel_size: impl IntList,
31929 bias: Option<T>,
31930 stride: impl IntList,
31931 padding: impl IntList,
31932 output_padding: impl IntList,
31933 dilation: impl IntList,
31934 ) -> Result<Tensor, TchError> {
31935 let mut c_tensors = [std::ptr::null_mut(); 1];
31936 unsafe_torch_err!(atg_slow_conv_transpose3d_out(
31937 c_tensors.as_mut_ptr(),
31938 out.c_tensor,
31939 self.c_tensor,
31940 weight.c_tensor,
31941 kernel_size.as_ptr(),
31942 kernel_size.len_i32(),
31943 bias.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
31944 stride.as_ptr(),
31945 stride.len_i32(),
31946 padding.as_ptr(),
31947 padding.len_i32(),
31948 output_padding.as_ptr(),
31949 output_padding.len_i32(),
31950 dilation.as_ptr(),
31951 dilation.len_i32()
31952 ));
31953 Ok(Tensor { c_tensor: c_tensors[0] })
31954 }
31955
31956 pub fn f_smm(&self, mat2: &Tensor) -> Result<Tensor, TchError> {
31957 let mut c_tensors = [std::ptr::null_mut(); 1];
31958 unsafe_torch_err!(atg_smm(c_tensors.as_mut_ptr(), self.c_tensor, mat2.c_tensor));
31959 Ok(Tensor { c_tensor: c_tensors[0] })
31960 }
31961
31962 pub fn f_smooth_l1_loss(
31963 &self,
31964 target: &Tensor,
31965 reduction: crate::Reduction,
31966 beta: f64,
31967 ) -> Result<Tensor, TchError> {
31968 let mut c_tensors = [std::ptr::null_mut(); 1];
31969 unsafe_torch_err!(atg_smooth_l1_loss(
31970 c_tensors.as_mut_ptr(),
31971 self.c_tensor,
31972 target.c_tensor,
31973 reduction.to_int(),
31974 beta
31975 ));
31976 Ok(Tensor { c_tensor: c_tensors[0] })
31977 }
31978
31979 pub fn f_smooth_l1_loss_backward(
31980 &self,
31981 grad_output: &Tensor,
31982 target: &Tensor,
31983 reduction: crate::Reduction,
31984 beta: f64,
31985 ) -> Result<Tensor, TchError> {
31986 let mut c_tensors = [std::ptr::null_mut(); 1];
31987 unsafe_torch_err!(atg_smooth_l1_loss_backward(
31988 c_tensors.as_mut_ptr(),
31989 grad_output.c_tensor,
31990 self.c_tensor,
31991 target.c_tensor,
31992 reduction.to_int(),
31993 beta
31994 ));
31995 Ok(Tensor { c_tensor: c_tensors[0] })
31996 }
31997
31998 pub fn f_smooth_l1_loss_backward_grad_input(
31999 &self,
32000 grad_input: &Tensor,
32001 grad_output: &Tensor,
32002 target: &Tensor,
32003 reduction: crate::Reduction,
32004 beta: f64,
32005 ) -> Result<Tensor, TchError> {
32006 let mut c_tensors = [std::ptr::null_mut(); 1];
32007 unsafe_torch_err!(atg_smooth_l1_loss_backward_grad_input(
32008 c_tensors.as_mut_ptr(),
32009 grad_input.c_tensor,
32010 grad_output.c_tensor,
32011 self.c_tensor,
32012 target.c_tensor,
32013 reduction.to_int(),
32014 beta
32015 ));
32016 Ok(Tensor { c_tensor: c_tensors[0] })
32017 }
32018
32019 pub fn f_smooth_l1_loss_out(
32020 &self,
32021 out: &Tensor,
32022 target: &Tensor,
32023 reduction: crate::Reduction,
32024 beta: f64,
32025 ) -> Result<Tensor, TchError> {
32026 let mut c_tensors = [std::ptr::null_mut(); 1];
32027 unsafe_torch_err!(atg_smooth_l1_loss_out(
32028 c_tensors.as_mut_ptr(),
32029 out.c_tensor,
32030 self.c_tensor,
32031 target.c_tensor,
32032 reduction.to_int(),
32033 beta
32034 ));
32035 Ok(Tensor { c_tensor: c_tensors[0] })
32036 }
32037
32038 pub fn f_soft_margin_loss(
32039 &self,
32040 target: &Tensor,
32041 reduction: crate::Reduction,
32042 ) -> Result<Tensor, TchError> {
32043 let mut c_tensors = [std::ptr::null_mut(); 1];
32044 unsafe_torch_err!(atg_soft_margin_loss(
32045 c_tensors.as_mut_ptr(),
32046 self.c_tensor,
32047 target.c_tensor,
32048 reduction.to_int()
32049 ));
32050 Ok(Tensor { c_tensor: c_tensors[0] })
32051 }
32052
32053 pub fn f_soft_margin_loss_backward(
32054 &self,
32055 grad_output: &Tensor,
32056 target: &Tensor,
32057 reduction: crate::Reduction,
32058 ) -> Result<Tensor, TchError> {
32059 let mut c_tensors = [std::ptr::null_mut(); 1];
32060 unsafe_torch_err!(atg_soft_margin_loss_backward(
32061 c_tensors.as_mut_ptr(),
32062 grad_output.c_tensor,
32063 self.c_tensor,
32064 target.c_tensor,
32065 reduction.to_int()
32066 ));
32067 Ok(Tensor { c_tensor: c_tensors[0] })
32068 }
32069
32070 pub fn f_soft_margin_loss_backward_grad_input(
32071 &self,
32072 grad_input: &Tensor,
32073 grad_output: &Tensor,
32074 target: &Tensor,
32075 reduction: crate::Reduction,
32076 ) -> Result<Tensor, TchError> {
32077 let mut c_tensors = [std::ptr::null_mut(); 1];
32078 unsafe_torch_err!(atg_soft_margin_loss_backward_grad_input(
32079 c_tensors.as_mut_ptr(),
32080 grad_input.c_tensor,
32081 grad_output.c_tensor,
32082 self.c_tensor,
32083 target.c_tensor,
32084 reduction.to_int()
32085 ));
32086 Ok(Tensor { c_tensor: c_tensors[0] })
32087 }
32088
32089 pub fn f_soft_margin_loss_out(
32090 &self,
32091 out: &Tensor,
32092 target: &Tensor,
32093 reduction: crate::Reduction,
32094 ) -> Result<Tensor, TchError> {
32095 let mut c_tensors = [std::ptr::null_mut(); 1];
32096 unsafe_torch_err!(atg_soft_margin_loss_out(
32097 c_tensors.as_mut_ptr(),
32098 out.c_tensor,
32099 self.c_tensor,
32100 target.c_tensor,
32101 reduction.to_int()
32102 ));
32103 Ok(Tensor { c_tensor: c_tensors[0] })
32104 }
32105
32106 pub fn f_softmax(&self, dim: i64, dtype: impl Into<Option<Kind>>) -> Result<Tensor, TchError> {
32107 let mut c_tensors = [std::ptr::null_mut(); 1];
32108 unsafe_torch_err!(atg_softmax(
32109 c_tensors.as_mut_ptr(),
32110 self.c_tensor,
32111 dim,
32112 dtype.into().map_or(-1, |s| s.c_int())
32113 ));
32114 Ok(Tensor { c_tensor: c_tensors[0] })
32115 }
32116
32117 pub fn f_softmax_int_out(
32118 &self,
32119 out: &Tensor,
32120 dim: i64,
32121 dtype: impl Into<Option<Kind>>,
32122 ) -> Result<Tensor, TchError> {
32123 let mut c_tensors = [std::ptr::null_mut(); 1];
32124 unsafe_torch_err!(atg_softmax_int_out(
32125 c_tensors.as_mut_ptr(),
32126 out.c_tensor,
32127 self.c_tensor,
32128 dim,
32129 dtype.into().map_or(-1, |s| s.c_int())
32130 ));
32131 Ok(Tensor { c_tensor: c_tensors[0] })
32132 }
32133
32134 pub fn f_softplus(&self) -> Result<Tensor, TchError> {
32135 let mut c_tensors = [std::ptr::null_mut(); 1];
32136 unsafe_torch_err!(atg_softplus(c_tensors.as_mut_ptr(), self.c_tensor));
32137 Ok(Tensor { c_tensor: c_tensors[0] })
32138 }
32139
32140 pub fn f_softplus_backward<S: Into<Scalar>>(
32141 &self,
32142 grad_output: &Tensor,
32143 beta: S,
32144 threshold: S,
32145 ) -> Result<Tensor, TchError> {
32146 let mut c_tensors = [std::ptr::null_mut(); 1];
32147 unsafe_torch_err!(atg_softplus_backward(
32148 c_tensors.as_mut_ptr(),
32149 grad_output.c_tensor,
32150 self.c_tensor,
32151 beta.into().c_scalar,
32152 threshold.into().c_scalar
32153 ));
32154 Ok(Tensor { c_tensor: c_tensors[0] })
32155 }
32156
32157 pub fn f_softplus_backward_grad_input<S: Into<Scalar>>(
32158 &self,
32159 grad_input: &Tensor,
32160 grad_output: &Tensor,
32161 beta: S,
32162 threshold: S,
32163 ) -> Result<Tensor, TchError> {
32164 let mut c_tensors = [std::ptr::null_mut(); 1];
32165 unsafe_torch_err!(atg_softplus_backward_grad_input(
32166 c_tensors.as_mut_ptr(),
32167 grad_input.c_tensor,
32168 grad_output.c_tensor,
32169 self.c_tensor,
32170 beta.into().c_scalar,
32171 threshold.into().c_scalar
32172 ));
32173 Ok(Tensor { c_tensor: c_tensors[0] })
32174 }
32175
32176 pub fn f_softplus_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
32177 let mut c_tensors = [std::ptr::null_mut(); 1];
32178 unsafe_torch_err!(atg_softplus_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
32179 Ok(Tensor { c_tensor: c_tensors[0] })
32180 }
32181
32182 pub fn f_softshrink(&self) -> Result<Tensor, TchError> {
32183 let mut c_tensors = [std::ptr::null_mut(); 1];
32184 unsafe_torch_err!(atg_softshrink(c_tensors.as_mut_ptr(), self.c_tensor));
32185 Ok(Tensor { c_tensor: c_tensors[0] })
32186 }
32187
32188 pub fn f_softshrink_backward<S: Into<Scalar>>(
32189 &self,
32190 grad_output: &Tensor,
32191 lambd: S,
32192 ) -> Result<Tensor, TchError> {
32193 let mut c_tensors = [std::ptr::null_mut(); 1];
32194 unsafe_torch_err!(atg_softshrink_backward(
32195 c_tensors.as_mut_ptr(),
32196 grad_output.c_tensor,
32197 self.c_tensor,
32198 lambd.into().c_scalar
32199 ));
32200 Ok(Tensor { c_tensor: c_tensors[0] })
32201 }
32202
32203 pub fn f_softshrink_backward_grad_input<S: Into<Scalar>>(
32204 &self,
32205 grad_input: &Tensor,
32206 grad_output: &Tensor,
32207 lambd: S,
32208 ) -> Result<Tensor, TchError> {
32209 let mut c_tensors = [std::ptr::null_mut(); 1];
32210 unsafe_torch_err!(atg_softshrink_backward_grad_input(
32211 c_tensors.as_mut_ptr(),
32212 grad_input.c_tensor,
32213 grad_output.c_tensor,
32214 self.c_tensor,
32215 lambd.into().c_scalar
32216 ));
32217 Ok(Tensor { c_tensor: c_tensors[0] })
32218 }
32219
32220 pub fn f_softshrink_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
32221 let mut c_tensors = [std::ptr::null_mut(); 1];
32222 unsafe_torch_err!(atg_softshrink_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
32223 Ok(Tensor { c_tensor: c_tensors[0] })
32224 }
32225
32226 pub fn f_sort(&self, dim: i64, descending: bool) -> Result<(Tensor, Tensor), TchError> {
32227 let mut c_tensors = [std::ptr::null_mut(); 2];
32228 unsafe_torch_err!(atg_sort(
32229 c_tensors.as_mut_ptr(),
32230 self.c_tensor,
32231 dim,
32232 if descending { 1 } else { 0 }
32233 ));
32234 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
32235 }
32236
32237 pub fn f_sort_stable(
32238 &self,
32239 stable: bool,
32240 dim: i64,
32241 descending: bool,
32242 ) -> Result<(Tensor, Tensor), TchError> {
32243 let mut c_tensors = [std::ptr::null_mut(); 2];
32244 unsafe_torch_err!(atg_sort_stable(
32245 c_tensors.as_mut_ptr(),
32246 self.c_tensor,
32247 if stable { 1 } else { 0 },
32248 dim,
32249 if descending { 1 } else { 0 }
32250 ));
32251 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
32252 }
32253
32254 pub fn f_sort_values(
32255 &self,
32256 values: &Tensor,
32257 indices: &Tensor,
32258 dim: i64,
32259 descending: bool,
32260 ) -> Result<(Tensor, Tensor), TchError> {
32261 let mut c_tensors = [std::ptr::null_mut(); 2];
32262 unsafe_torch_err!(atg_sort_values(
32263 c_tensors.as_mut_ptr(),
32264 values.c_tensor,
32265 indices.c_tensor,
32266 self.c_tensor,
32267 dim,
32268 if descending { 1 } else { 0 }
32269 ));
32270 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
32271 }
32272
32273 pub fn f_sort_values_stable(
32274 &self,
32275 values: &Tensor,
32276 indices: &Tensor,
32277 stable: bool,
32278 dim: i64,
32279 descending: bool,
32280 ) -> Result<(Tensor, Tensor), TchError> {
32281 let mut c_tensors = [std::ptr::null_mut(); 2];
32282 unsafe_torch_err!(atg_sort_values_stable(
32283 c_tensors.as_mut_ptr(),
32284 values.c_tensor,
32285 indices.c_tensor,
32286 self.c_tensor,
32287 if stable { 1 } else { 0 },
32288 dim,
32289 if descending { 1 } else { 0 }
32290 ));
32291 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
32292 }
32293
32294 pub fn f_sparse_bsc_tensor(
32295 ccol_indices: &Tensor,
32296 row_indices: &Tensor,
32297 values: &Tensor,
32298 options: (Kind, Device),
32299 ) -> Result<Tensor, TchError> {
32300 let mut c_tensors = [std::ptr::null_mut(); 1];
32301 unsafe_torch_err!(atg_sparse_bsc_tensor(
32302 c_tensors.as_mut_ptr(),
32303 ccol_indices.c_tensor,
32304 row_indices.c_tensor,
32305 values.c_tensor,
32306 options.0.c_int(),
32307 options.1.c_int()
32308 ));
32309 Ok(Tensor { c_tensor: c_tensors[0] })
32310 }
32311
32312 pub fn f_sparse_bsc_tensor_ccol_row_value_size(
32313 ccol_indices: &Tensor,
32314 row_indices: &Tensor,
32315 values: &Tensor,
32316 size: impl IntList,
32317 options: (Kind, Device),
32318 ) -> Result<Tensor, TchError> {
32319 let mut c_tensors = [std::ptr::null_mut(); 1];
32320 unsafe_torch_err!(atg_sparse_bsc_tensor_ccol_row_value_size(
32321 c_tensors.as_mut_ptr(),
32322 ccol_indices.c_tensor,
32323 row_indices.c_tensor,
32324 values.c_tensor,
32325 size.as_ptr(),
32326 size.len_i32(),
32327 options.0.c_int(),
32328 options.1.c_int()
32329 ));
32330 Ok(Tensor { c_tensor: c_tensors[0] })
32331 }
32332
32333 pub fn f_sparse_bsr_tensor(
32334 crow_indices: &Tensor,
32335 col_indices: &Tensor,
32336 values: &Tensor,
32337 options: (Kind, Device),
32338 ) -> Result<Tensor, TchError> {
32339 let mut c_tensors = [std::ptr::null_mut(); 1];
32340 unsafe_torch_err!(atg_sparse_bsr_tensor(
32341 c_tensors.as_mut_ptr(),
32342 crow_indices.c_tensor,
32343 col_indices.c_tensor,
32344 values.c_tensor,
32345 options.0.c_int(),
32346 options.1.c_int()
32347 ));
32348 Ok(Tensor { c_tensor: c_tensors[0] })
32349 }
32350
32351 pub fn f_sparse_bsr_tensor_crow_col_value_size(
32352 crow_indices: &Tensor,
32353 col_indices: &Tensor,
32354 values: &Tensor,
32355 size: impl IntList,
32356 options: (Kind, Device),
32357 ) -> Result<Tensor, TchError> {
32358 let mut c_tensors = [std::ptr::null_mut(); 1];
32359 unsafe_torch_err!(atg_sparse_bsr_tensor_crow_col_value_size(
32360 c_tensors.as_mut_ptr(),
32361 crow_indices.c_tensor,
32362 col_indices.c_tensor,
32363 values.c_tensor,
32364 size.as_ptr(),
32365 size.len_i32(),
32366 options.0.c_int(),
32367 options.1.c_int()
32368 ));
32369 Ok(Tensor { c_tensor: c_tensors[0] })
32370 }
32371
32372 pub fn f_sparse_compressed_tensor(
32373 compressed_indices: &Tensor,
32374 plain_indices: &Tensor,
32375 values: &Tensor,
32376 options: (Kind, Device),
32377 ) -> Result<Tensor, TchError> {
32378 let mut c_tensors = [std::ptr::null_mut(); 1];
32379 unsafe_torch_err!(atg_sparse_compressed_tensor(
32380 c_tensors.as_mut_ptr(),
32381 compressed_indices.c_tensor,
32382 plain_indices.c_tensor,
32383 values.c_tensor,
32384 options.0.c_int(),
32385 options.1.c_int()
32386 ));
32387 Ok(Tensor { c_tensor: c_tensors[0] })
32388 }
32389
32390 pub fn f_sparse_compressed_tensor_comp_plain_value_size(
32391 compressed_indices: &Tensor,
32392 plain_indices: &Tensor,
32393 values: &Tensor,
32394 size: impl IntList,
32395 options: (Kind, Device),
32396 ) -> Result<Tensor, TchError> {
32397 let mut c_tensors = [std::ptr::null_mut(); 1];
32398 unsafe_torch_err!(atg_sparse_compressed_tensor_comp_plain_value_size(
32399 c_tensors.as_mut_ptr(),
32400 compressed_indices.c_tensor,
32401 plain_indices.c_tensor,
32402 values.c_tensor,
32403 size.as_ptr(),
32404 size.len_i32(),
32405 options.0.c_int(),
32406 options.1.c_int()
32407 ));
32408 Ok(Tensor { c_tensor: c_tensors[0] })
32409 }
32410
32411 pub fn f_sparse_coo_tensor(
32412 size: impl IntList,
32413 options: (Kind, Device),
32414 ) -> Result<Tensor, TchError> {
32415 let mut c_tensors = [std::ptr::null_mut(); 1];
32416 unsafe_torch_err!(atg_sparse_coo_tensor(
32417 c_tensors.as_mut_ptr(),
32418 size.as_ptr(),
32419 size.len_i32(),
32420 options.0.c_int(),
32421 options.1.c_int()
32422 ));
32423 Ok(Tensor { c_tensor: c_tensors[0] })
32424 }
32425
32426 pub fn f_sparse_coo_tensor_indices(
32427 indices: &Tensor,
32428 values: &Tensor,
32429 options: (Kind, Device),
32430 is_coalesced: bool,
32431 ) -> Result<Tensor, TchError> {
32432 let mut c_tensors = [std::ptr::null_mut(); 1];
32433 unsafe_torch_err!(atg_sparse_coo_tensor_indices(
32434 c_tensors.as_mut_ptr(),
32435 indices.c_tensor,
32436 values.c_tensor,
32437 options.0.c_int(),
32438 options.1.c_int(),
32439 if is_coalesced { 1 } else { 0 }
32440 ));
32441 Ok(Tensor { c_tensor: c_tensors[0] })
32442 }
32443
32444 pub fn f_sparse_coo_tensor_indices_size(
32445 indices: &Tensor,
32446 values: &Tensor,
32447 size: impl IntList,
32448 options: (Kind, Device),
32449 is_coalesced: bool,
32450 ) -> Result<Tensor, TchError> {
32451 let mut c_tensors = [std::ptr::null_mut(); 1];
32452 unsafe_torch_err!(atg_sparse_coo_tensor_indices_size(
32453 c_tensors.as_mut_ptr(),
32454 indices.c_tensor,
32455 values.c_tensor,
32456 size.as_ptr(),
32457 size.len_i32(),
32458 options.0.c_int(),
32459 options.1.c_int(),
32460 if is_coalesced { 1 } else { 0 }
32461 ));
32462 Ok(Tensor { c_tensor: c_tensors[0] })
32463 }
32464
32465 pub fn f_sparse_coo_tensor_size_out(
32466 out: &Tensor,
32467 size: impl IntList,
32468 ) -> Result<Tensor, TchError> {
32469 let mut c_tensors = [std::ptr::null_mut(); 1];
32470 unsafe_torch_err!(atg_sparse_coo_tensor_size_out(
32471 c_tensors.as_mut_ptr(),
32472 out.c_tensor,
32473 size.as_ptr(),
32474 size.len_i32()
32475 ));
32476 Ok(Tensor { c_tensor: c_tensors[0] })
32477 }
32478
32479 pub fn f_sparse_csc_tensor(
32480 ccol_indices: &Tensor,
32481 row_indices: &Tensor,
32482 values: &Tensor,
32483 options: (Kind, Device),
32484 ) -> Result<Tensor, TchError> {
32485 let mut c_tensors = [std::ptr::null_mut(); 1];
32486 unsafe_torch_err!(atg_sparse_csc_tensor(
32487 c_tensors.as_mut_ptr(),
32488 ccol_indices.c_tensor,
32489 row_indices.c_tensor,
32490 values.c_tensor,
32491 options.0.c_int(),
32492 options.1.c_int()
32493 ));
32494 Ok(Tensor { c_tensor: c_tensors[0] })
32495 }
32496
32497 pub fn f_sparse_csc_tensor_ccol_row_value_size(
32498 ccol_indices: &Tensor,
32499 row_indices: &Tensor,
32500 values: &Tensor,
32501 size: impl IntList,
32502 options: (Kind, Device),
32503 ) -> Result<Tensor, TchError> {
32504 let mut c_tensors = [std::ptr::null_mut(); 1];
32505 unsafe_torch_err!(atg_sparse_csc_tensor_ccol_row_value_size(
32506 c_tensors.as_mut_ptr(),
32507 ccol_indices.c_tensor,
32508 row_indices.c_tensor,
32509 values.c_tensor,
32510 size.as_ptr(),
32511 size.len_i32(),
32512 options.0.c_int(),
32513 options.1.c_int()
32514 ));
32515 Ok(Tensor { c_tensor: c_tensors[0] })
32516 }
32517
32518 pub fn f_sparse_csr_tensor(
32519 crow_indices: &Tensor,
32520 col_indices: &Tensor,
32521 values: &Tensor,
32522 options: (Kind, Device),
32523 ) -> Result<Tensor, TchError> {
32524 let mut c_tensors = [std::ptr::null_mut(); 1];
32525 unsafe_torch_err!(atg_sparse_csr_tensor(
32526 c_tensors.as_mut_ptr(),
32527 crow_indices.c_tensor,
32528 col_indices.c_tensor,
32529 values.c_tensor,
32530 options.0.c_int(),
32531 options.1.c_int()
32532 ));
32533 Ok(Tensor { c_tensor: c_tensors[0] })
32534 }
32535
32536 pub fn f_sparse_csr_tensor_crow_col_value_size(
32537 crow_indices: &Tensor,
32538 col_indices: &Tensor,
32539 values: &Tensor,
32540 size: impl IntList,
32541 options: (Kind, Device),
32542 ) -> Result<Tensor, TchError> {
32543 let mut c_tensors = [std::ptr::null_mut(); 1];
32544 unsafe_torch_err!(atg_sparse_csr_tensor_crow_col_value_size(
32545 c_tensors.as_mut_ptr(),
32546 crow_indices.c_tensor,
32547 col_indices.c_tensor,
32548 values.c_tensor,
32549 size.as_ptr(),
32550 size.len_i32(),
32551 options.0.c_int(),
32552 options.1.c_int()
32553 ));
32554 Ok(Tensor { c_tensor: c_tensors[0] })
32555 }
32556
32557 pub fn f_sparse_dim(&self) -> Result<i64, TchError> {
32558 let return_;
32559 unsafe_torch_err!(return_ = atg_sparse_dim(self.c_tensor));
32560 Ok(return_)
32561 }
32562
32563 pub fn f_sparse_mask(&self, mask: &Tensor) -> Result<Tensor, TchError> {
32564 let mut c_tensors = [std::ptr::null_mut(); 1];
32565 unsafe_torch_err!(atg_sparse_mask(c_tensors.as_mut_ptr(), self.c_tensor, mask.c_tensor));
32566 Ok(Tensor { c_tensor: c_tensors[0] })
32567 }
32568
32569 pub fn f_sparse_mask_out(&self, out: &Tensor, mask: &Tensor) -> Result<Tensor, TchError> {
32570 let mut c_tensors = [std::ptr::null_mut(); 1];
32571 unsafe_torch_err!(atg_sparse_mask_out(
32572 c_tensors.as_mut_ptr(),
32573 out.c_tensor,
32574 self.c_tensor,
32575 mask.c_tensor
32576 ));
32577 Ok(Tensor { c_tensor: c_tensors[0] })
32578 }
32579
32580 pub fn f_sparse_resize(
32581 &self,
32582 size: impl IntList,
32583 sparse_dim: i64,
32584 dense_dim: i64,
32585 ) -> Result<Tensor, TchError> {
32586 let mut c_tensors = [std::ptr::null_mut(); 1];
32587 unsafe_torch_err!(atg_sparse_resize(
32588 c_tensors.as_mut_ptr(),
32589 self.c_tensor,
32590 size.as_ptr(),
32591 size.len_i32(),
32592 sparse_dim,
32593 dense_dim
32594 ));
32595 Ok(Tensor { c_tensor: c_tensors[0] })
32596 }
32597
32598 pub fn f_sparse_resize_(
32599 &mut self,
32600 size: impl IntList,
32601 sparse_dim: i64,
32602 dense_dim: i64,
32603 ) -> Result<Tensor, TchError> {
32604 let mut c_tensors = [std::ptr::null_mut(); 1];
32605 unsafe_torch_err!(atg_sparse_resize_(
32606 c_tensors.as_mut_ptr(),
32607 self.c_tensor,
32608 size.as_ptr(),
32609 size.len_i32(),
32610 sparse_dim,
32611 dense_dim
32612 ));
32613 Ok(Tensor { c_tensor: c_tensors[0] })
32614 }
32615
32616 pub fn f_sparse_resize_and_clear(
32617 &self,
32618 size: impl IntList,
32619 sparse_dim: i64,
32620 dense_dim: i64,
32621 ) -> Result<Tensor, TchError> {
32622 let mut c_tensors = [std::ptr::null_mut(); 1];
32623 unsafe_torch_err!(atg_sparse_resize_and_clear(
32624 c_tensors.as_mut_ptr(),
32625 self.c_tensor,
32626 size.as_ptr(),
32627 size.len_i32(),
32628 sparse_dim,
32629 dense_dim
32630 ));
32631 Ok(Tensor { c_tensor: c_tensors[0] })
32632 }
32633
32634 pub fn f_sparse_resize_and_clear_(
32635 &mut self,
32636 size: impl IntList,
32637 sparse_dim: i64,
32638 dense_dim: i64,
32639 ) -> Result<Tensor, TchError> {
32640 let mut c_tensors = [std::ptr::null_mut(); 1];
32641 unsafe_torch_err!(atg_sparse_resize_and_clear_(
32642 c_tensors.as_mut_ptr(),
32643 self.c_tensor,
32644 size.as_ptr(),
32645 size.len_i32(),
32646 sparse_dim,
32647 dense_dim
32648 ));
32649 Ok(Tensor { c_tensor: c_tensors[0] })
32650 }
32651
32652 pub fn f_sparse_resize_and_clear_out(
32653 &self,
32654 out: &Tensor,
32655 size: impl IntList,
32656 sparse_dim: i64,
32657 dense_dim: i64,
32658 ) -> Result<Tensor, TchError> {
32659 let mut c_tensors = [std::ptr::null_mut(); 1];
32660 unsafe_torch_err!(atg_sparse_resize_and_clear_out(
32661 c_tensors.as_mut_ptr(),
32662 out.c_tensor,
32663 self.c_tensor,
32664 size.as_ptr(),
32665 size.len_i32(),
32666 sparse_dim,
32667 dense_dim
32668 ));
32669 Ok(Tensor { c_tensor: c_tensors[0] })
32670 }
32671
32672 pub fn f_sparse_resize_out(
32673 &self,
32674 out: &Tensor,
32675 size: impl IntList,
32676 sparse_dim: i64,
32677 dense_dim: i64,
32678 ) -> Result<Tensor, TchError> {
32679 let mut c_tensors = [std::ptr::null_mut(); 1];
32680 unsafe_torch_err!(atg_sparse_resize_out(
32681 c_tensors.as_mut_ptr(),
32682 out.c_tensor,
32683 self.c_tensor,
32684 size.as_ptr(),
32685 size.len_i32(),
32686 sparse_dim,
32687 dense_dim
32688 ));
32689 Ok(Tensor { c_tensor: c_tensors[0] })
32690 }
32691
32692 pub fn f_sparse_sampled_addmm(&self, mat1: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError> {
32693 let mut c_tensors = [std::ptr::null_mut(); 1];
32694 unsafe_torch_err!(atg_sparse_sampled_addmm(
32695 c_tensors.as_mut_ptr(),
32696 self.c_tensor,
32697 mat1.c_tensor,
32698 mat2.c_tensor
32699 ));
32700 Ok(Tensor { c_tensor: c_tensors[0] })
32701 }
32702
32703 pub fn f_sparse_sampled_addmm_out(
32704 &self,
32705 out: &Tensor,
32706 mat1: &Tensor,
32707 mat2: &Tensor,
32708 ) -> Result<Tensor, TchError> {
32709 let mut c_tensors = [std::ptr::null_mut(); 1];
32710 unsafe_torch_err!(atg_sparse_sampled_addmm_out(
32711 c_tensors.as_mut_ptr(),
32712 out.c_tensor,
32713 self.c_tensor,
32714 mat1.c_tensor,
32715 mat2.c_tensor
32716 ));
32717 Ok(Tensor { c_tensor: c_tensors[0] })
32718 }
32719
32720 pub fn f_special_airy_ai(x: &Tensor) -> Result<Tensor, TchError> {
32721 let mut c_tensors = [std::ptr::null_mut(); 1];
32722 unsafe_torch_err!(atg_special_airy_ai(c_tensors.as_mut_ptr(), x.c_tensor));
32723 Ok(Tensor { c_tensor: c_tensors[0] })
32724 }
32725
32726 pub fn f_special_airy_ai_out(out: &Tensor, x: &Tensor) -> Result<Tensor, TchError> {
32727 let mut c_tensors = [std::ptr::null_mut(); 1];
32728 unsafe_torch_err!(atg_special_airy_ai_out(
32729 c_tensors.as_mut_ptr(),
32730 out.c_tensor,
32731 x.c_tensor
32732 ));
32733 Ok(Tensor { c_tensor: c_tensors[0] })
32734 }
32735
32736 pub fn f_special_bessel_j0(&self) -> Result<Tensor, TchError> {
32737 let mut c_tensors = [std::ptr::null_mut(); 1];
32738 unsafe_torch_err!(atg_special_bessel_j0(c_tensors.as_mut_ptr(), self.c_tensor));
32739 Ok(Tensor { c_tensor: c_tensors[0] })
32740 }
32741
32742 pub fn f_special_bessel_j0_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
32743 let mut c_tensors = [std::ptr::null_mut(); 1];
32744 unsafe_torch_err!(atg_special_bessel_j0_out(
32745 c_tensors.as_mut_ptr(),
32746 out.c_tensor,
32747 self.c_tensor
32748 ));
32749 Ok(Tensor { c_tensor: c_tensors[0] })
32750 }
32751
32752 pub fn f_special_bessel_j1(&self) -> Result<Tensor, TchError> {
32753 let mut c_tensors = [std::ptr::null_mut(); 1];
32754 unsafe_torch_err!(atg_special_bessel_j1(c_tensors.as_mut_ptr(), self.c_tensor));
32755 Ok(Tensor { c_tensor: c_tensors[0] })
32756 }
32757
32758 pub fn f_special_bessel_j1_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
32759 let mut c_tensors = [std::ptr::null_mut(); 1];
32760 unsafe_torch_err!(atg_special_bessel_j1_out(
32761 c_tensors.as_mut_ptr(),
32762 out.c_tensor,
32763 self.c_tensor
32764 ));
32765 Ok(Tensor { c_tensor: c_tensors[0] })
32766 }
32767
32768 pub fn f_special_bessel_y0(&self) -> Result<Tensor, TchError> {
32769 let mut c_tensors = [std::ptr::null_mut(); 1];
32770 unsafe_torch_err!(atg_special_bessel_y0(c_tensors.as_mut_ptr(), self.c_tensor));
32771 Ok(Tensor { c_tensor: c_tensors[0] })
32772 }
32773
32774 pub fn f_special_bessel_y0_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
32775 let mut c_tensors = [std::ptr::null_mut(); 1];
32776 unsafe_torch_err!(atg_special_bessel_y0_out(
32777 c_tensors.as_mut_ptr(),
32778 out.c_tensor,
32779 self.c_tensor
32780 ));
32781 Ok(Tensor { c_tensor: c_tensors[0] })
32782 }
32783
32784 pub fn f_special_bessel_y1(&self) -> Result<Tensor, TchError> {
32785 let mut c_tensors = [std::ptr::null_mut(); 1];
32786 unsafe_torch_err!(atg_special_bessel_y1(c_tensors.as_mut_ptr(), self.c_tensor));
32787 Ok(Tensor { c_tensor: c_tensors[0] })
32788 }
32789
32790 pub fn f_special_bessel_y1_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
32791 let mut c_tensors = [std::ptr::null_mut(); 1];
32792 unsafe_torch_err!(atg_special_bessel_y1_out(
32793 c_tensors.as_mut_ptr(),
32794 out.c_tensor,
32795 self.c_tensor
32796 ));
32797 Ok(Tensor { c_tensor: c_tensors[0] })
32798 }
32799
32800 pub fn f_special_chebyshev_polynomial_t(x: &Tensor, n: &Tensor) -> Result<Tensor, TchError> {
32801 let mut c_tensors = [std::ptr::null_mut(); 1];
32802 unsafe_torch_err!(atg_special_chebyshev_polynomial_t(
32803 c_tensors.as_mut_ptr(),
32804 x.c_tensor,
32805 n.c_tensor
32806 ));
32807 Ok(Tensor { c_tensor: c_tensors[0] })
32808 }
32809
32810 pub fn f_special_chebyshev_polynomial_t_n_scalar<S: Into<Scalar>>(
32811 x: &Tensor,
32812 n: S,
32813 ) -> Result<Tensor, TchError> {
32814 let mut c_tensors = [std::ptr::null_mut(); 1];
32815 unsafe_torch_err!(atg_special_chebyshev_polynomial_t_n_scalar(
32816 c_tensors.as_mut_ptr(),
32817 x.c_tensor,
32818 n.into().c_scalar
32819 ));
32820 Ok(Tensor { c_tensor: c_tensors[0] })
32821 }
32822
32823 pub fn f_special_chebyshev_polynomial_t_n_scalar_out<S: Into<Scalar>>(
32824 out: &Tensor,
32825 x: &Tensor,
32826 n: S,
32827 ) -> Result<Tensor, TchError> {
32828 let mut c_tensors = [std::ptr::null_mut(); 1];
32829 unsafe_torch_err!(atg_special_chebyshev_polynomial_t_n_scalar_out(
32830 c_tensors.as_mut_ptr(),
32831 out.c_tensor,
32832 x.c_tensor,
32833 n.into().c_scalar
32834 ));
32835 Ok(Tensor { c_tensor: c_tensors[0] })
32836 }
32837
32838 pub fn f_special_chebyshev_polynomial_t_out(
32839 out: &Tensor,
32840 x: &Tensor,
32841 n: &Tensor,
32842 ) -> Result<Tensor, TchError> {
32843 let mut c_tensors = [std::ptr::null_mut(); 1];
32844 unsafe_torch_err!(atg_special_chebyshev_polynomial_t_out(
32845 c_tensors.as_mut_ptr(),
32846 out.c_tensor,
32847 x.c_tensor,
32848 n.c_tensor
32849 ));
32850 Ok(Tensor { c_tensor: c_tensors[0] })
32851 }
32852
32853 pub fn f_special_chebyshev_polynomial_t_x_scalar<S: Into<Scalar>>(
32854 x: S,
32855 n: &Tensor,
32856 ) -> Result<Tensor, TchError> {
32857 let mut c_tensors = [std::ptr::null_mut(); 1];
32858 unsafe_torch_err!(atg_special_chebyshev_polynomial_t_x_scalar(
32859 c_tensors.as_mut_ptr(),
32860 x.into().c_scalar,
32861 n.c_tensor
32862 ));
32863 Ok(Tensor { c_tensor: c_tensors[0] })
32864 }
32865
32866 pub fn f_special_chebyshev_polynomial_t_x_scalar_out<S: Into<Scalar>>(
32867 out: &Tensor,
32868 x: S,
32869 n: &Tensor,
32870 ) -> Result<Tensor, TchError> {
32871 let mut c_tensors = [std::ptr::null_mut(); 1];
32872 unsafe_torch_err!(atg_special_chebyshev_polynomial_t_x_scalar_out(
32873 c_tensors.as_mut_ptr(),
32874 out.c_tensor,
32875 x.into().c_scalar,
32876 n.c_tensor
32877 ));
32878 Ok(Tensor { c_tensor: c_tensors[0] })
32879 }
32880
32881 pub fn f_special_chebyshev_polynomial_u(x: &Tensor, n: &Tensor) -> Result<Tensor, TchError> {
32882 let mut c_tensors = [std::ptr::null_mut(); 1];
32883 unsafe_torch_err!(atg_special_chebyshev_polynomial_u(
32884 c_tensors.as_mut_ptr(),
32885 x.c_tensor,
32886 n.c_tensor
32887 ));
32888 Ok(Tensor { c_tensor: c_tensors[0] })
32889 }
32890
32891 pub fn f_special_chebyshev_polynomial_u_n_scalar<S: Into<Scalar>>(
32892 x: &Tensor,
32893 n: S,
32894 ) -> Result<Tensor, TchError> {
32895 let mut c_tensors = [std::ptr::null_mut(); 1];
32896 unsafe_torch_err!(atg_special_chebyshev_polynomial_u_n_scalar(
32897 c_tensors.as_mut_ptr(),
32898 x.c_tensor,
32899 n.into().c_scalar
32900 ));
32901 Ok(Tensor { c_tensor: c_tensors[0] })
32902 }
32903
32904 pub fn f_special_chebyshev_polynomial_u_n_scalar_out<S: Into<Scalar>>(
32905 out: &Tensor,
32906 x: &Tensor,
32907 n: S,
32908 ) -> Result<Tensor, TchError> {
32909 let mut c_tensors = [std::ptr::null_mut(); 1];
32910 unsafe_torch_err!(atg_special_chebyshev_polynomial_u_n_scalar_out(
32911 c_tensors.as_mut_ptr(),
32912 out.c_tensor,
32913 x.c_tensor,
32914 n.into().c_scalar
32915 ));
32916 Ok(Tensor { c_tensor: c_tensors[0] })
32917 }
32918
32919 pub fn f_special_chebyshev_polynomial_u_out(
32920 out: &Tensor,
32921 x: &Tensor,
32922 n: &Tensor,
32923 ) -> Result<Tensor, TchError> {
32924 let mut c_tensors = [std::ptr::null_mut(); 1];
32925 unsafe_torch_err!(atg_special_chebyshev_polynomial_u_out(
32926 c_tensors.as_mut_ptr(),
32927 out.c_tensor,
32928 x.c_tensor,
32929 n.c_tensor
32930 ));
32931 Ok(Tensor { c_tensor: c_tensors[0] })
32932 }
32933
32934 pub fn f_special_chebyshev_polynomial_u_x_scalar<S: Into<Scalar>>(
32935 x: S,
32936 n: &Tensor,
32937 ) -> Result<Tensor, TchError> {
32938 let mut c_tensors = [std::ptr::null_mut(); 1];
32939 unsafe_torch_err!(atg_special_chebyshev_polynomial_u_x_scalar(
32940 c_tensors.as_mut_ptr(),
32941 x.into().c_scalar,
32942 n.c_tensor
32943 ));
32944 Ok(Tensor { c_tensor: c_tensors[0] })
32945 }
32946
32947 pub fn f_special_chebyshev_polynomial_u_x_scalar_out<S: Into<Scalar>>(
32948 out: &Tensor,
32949 x: S,
32950 n: &Tensor,
32951 ) -> Result<Tensor, TchError> {
32952 let mut c_tensors = [std::ptr::null_mut(); 1];
32953 unsafe_torch_err!(atg_special_chebyshev_polynomial_u_x_scalar_out(
32954 c_tensors.as_mut_ptr(),
32955 out.c_tensor,
32956 x.into().c_scalar,
32957 n.c_tensor
32958 ));
32959 Ok(Tensor { c_tensor: c_tensors[0] })
32960 }
32961
32962 pub fn f_special_chebyshev_polynomial_v(x: &Tensor, n: &Tensor) -> Result<Tensor, TchError> {
32963 let mut c_tensors = [std::ptr::null_mut(); 1];
32964 unsafe_torch_err!(atg_special_chebyshev_polynomial_v(
32965 c_tensors.as_mut_ptr(),
32966 x.c_tensor,
32967 n.c_tensor
32968 ));
32969 Ok(Tensor { c_tensor: c_tensors[0] })
32970 }
32971
32972 pub fn f_special_chebyshev_polynomial_v_n_scalar<S: Into<Scalar>>(
32973 x: &Tensor,
32974 n: S,
32975 ) -> Result<Tensor, TchError> {
32976 let mut c_tensors = [std::ptr::null_mut(); 1];
32977 unsafe_torch_err!(atg_special_chebyshev_polynomial_v_n_scalar(
32978 c_tensors.as_mut_ptr(),
32979 x.c_tensor,
32980 n.into().c_scalar
32981 ));
32982 Ok(Tensor { c_tensor: c_tensors[0] })
32983 }
32984
32985 pub fn f_special_chebyshev_polynomial_v_n_scalar_out<S: Into<Scalar>>(
32986 out: &Tensor,
32987 x: &Tensor,
32988 n: S,
32989 ) -> Result<Tensor, TchError> {
32990 let mut c_tensors = [std::ptr::null_mut(); 1];
32991 unsafe_torch_err!(atg_special_chebyshev_polynomial_v_n_scalar_out(
32992 c_tensors.as_mut_ptr(),
32993 out.c_tensor,
32994 x.c_tensor,
32995 n.into().c_scalar
32996 ));
32997 Ok(Tensor { c_tensor: c_tensors[0] })
32998 }
32999
33000 pub fn f_special_chebyshev_polynomial_v_out(
33001 out: &Tensor,
33002 x: &Tensor,
33003 n: &Tensor,
33004 ) -> Result<Tensor, TchError> {
33005 let mut c_tensors = [std::ptr::null_mut(); 1];
33006 unsafe_torch_err!(atg_special_chebyshev_polynomial_v_out(
33007 c_tensors.as_mut_ptr(),
33008 out.c_tensor,
33009 x.c_tensor,
33010 n.c_tensor
33011 ));
33012 Ok(Tensor { c_tensor: c_tensors[0] })
33013 }
33014
33015 pub fn f_special_chebyshev_polynomial_v_x_scalar<S: Into<Scalar>>(
33016 x: S,
33017 n: &Tensor,
33018 ) -> Result<Tensor, TchError> {
33019 let mut c_tensors = [std::ptr::null_mut(); 1];
33020 unsafe_torch_err!(atg_special_chebyshev_polynomial_v_x_scalar(
33021 c_tensors.as_mut_ptr(),
33022 x.into().c_scalar,
33023 n.c_tensor
33024 ));
33025 Ok(Tensor { c_tensor: c_tensors[0] })
33026 }
33027
33028 pub fn f_special_chebyshev_polynomial_v_x_scalar_out<S: Into<Scalar>>(
33029 out: &Tensor,
33030 x: S,
33031 n: &Tensor,
33032 ) -> Result<Tensor, TchError> {
33033 let mut c_tensors = [std::ptr::null_mut(); 1];
33034 unsafe_torch_err!(atg_special_chebyshev_polynomial_v_x_scalar_out(
33035 c_tensors.as_mut_ptr(),
33036 out.c_tensor,
33037 x.into().c_scalar,
33038 n.c_tensor
33039 ));
33040 Ok(Tensor { c_tensor: c_tensors[0] })
33041 }
33042
33043 pub fn f_special_chebyshev_polynomial_w(x: &Tensor, n: &Tensor) -> Result<Tensor, TchError> {
33044 let mut c_tensors = [std::ptr::null_mut(); 1];
33045 unsafe_torch_err!(atg_special_chebyshev_polynomial_w(
33046 c_tensors.as_mut_ptr(),
33047 x.c_tensor,
33048 n.c_tensor
33049 ));
33050 Ok(Tensor { c_tensor: c_tensors[0] })
33051 }
33052
33053 pub fn f_special_chebyshev_polynomial_w_n_scalar<S: Into<Scalar>>(
33054 x: &Tensor,
33055 n: S,
33056 ) -> Result<Tensor, TchError> {
33057 let mut c_tensors = [std::ptr::null_mut(); 1];
33058 unsafe_torch_err!(atg_special_chebyshev_polynomial_w_n_scalar(
33059 c_tensors.as_mut_ptr(),
33060 x.c_tensor,
33061 n.into().c_scalar
33062 ));
33063 Ok(Tensor { c_tensor: c_tensors[0] })
33064 }
33065
33066 pub fn f_special_chebyshev_polynomial_w_n_scalar_out<S: Into<Scalar>>(
33067 out: &Tensor,
33068 x: &Tensor,
33069 n: S,
33070 ) -> Result<Tensor, TchError> {
33071 let mut c_tensors = [std::ptr::null_mut(); 1];
33072 unsafe_torch_err!(atg_special_chebyshev_polynomial_w_n_scalar_out(
33073 c_tensors.as_mut_ptr(),
33074 out.c_tensor,
33075 x.c_tensor,
33076 n.into().c_scalar
33077 ));
33078 Ok(Tensor { c_tensor: c_tensors[0] })
33079 }
33080
33081 pub fn f_special_chebyshev_polynomial_w_out(
33082 out: &Tensor,
33083 x: &Tensor,
33084 n: &Tensor,
33085 ) -> Result<Tensor, TchError> {
33086 let mut c_tensors = [std::ptr::null_mut(); 1];
33087 unsafe_torch_err!(atg_special_chebyshev_polynomial_w_out(
33088 c_tensors.as_mut_ptr(),
33089 out.c_tensor,
33090 x.c_tensor,
33091 n.c_tensor
33092 ));
33093 Ok(Tensor { c_tensor: c_tensors[0] })
33094 }
33095
33096 pub fn f_special_chebyshev_polynomial_w_x_scalar<S: Into<Scalar>>(
33097 x: S,
33098 n: &Tensor,
33099 ) -> Result<Tensor, TchError> {
33100 let mut c_tensors = [std::ptr::null_mut(); 1];
33101 unsafe_torch_err!(atg_special_chebyshev_polynomial_w_x_scalar(
33102 c_tensors.as_mut_ptr(),
33103 x.into().c_scalar,
33104 n.c_tensor
33105 ));
33106 Ok(Tensor { c_tensor: c_tensors[0] })
33107 }
33108
33109 pub fn f_special_chebyshev_polynomial_w_x_scalar_out<S: Into<Scalar>>(
33110 out: &Tensor,
33111 x: S,
33112 n: &Tensor,
33113 ) -> Result<Tensor, TchError> {
33114 let mut c_tensors = [std::ptr::null_mut(); 1];
33115 unsafe_torch_err!(atg_special_chebyshev_polynomial_w_x_scalar_out(
33116 c_tensors.as_mut_ptr(),
33117 out.c_tensor,
33118 x.into().c_scalar,
33119 n.c_tensor
33120 ));
33121 Ok(Tensor { c_tensor: c_tensors[0] })
33122 }
33123
33124 pub fn f_special_digamma(&self) -> Result<Tensor, TchError> {
33125 let mut c_tensors = [std::ptr::null_mut(); 1];
33126 unsafe_torch_err!(atg_special_digamma(c_tensors.as_mut_ptr(), self.c_tensor));
33127 Ok(Tensor { c_tensor: c_tensors[0] })
33128 }
33129
33130 pub fn f_special_digamma_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
33131 let mut c_tensors = [std::ptr::null_mut(); 1];
33132 unsafe_torch_err!(atg_special_digamma_out(
33133 c_tensors.as_mut_ptr(),
33134 out.c_tensor,
33135 self.c_tensor
33136 ));
33137 Ok(Tensor { c_tensor: c_tensors[0] })
33138 }
33139
33140 pub fn f_special_entr(&self) -> Result<Tensor, TchError> {
33141 let mut c_tensors = [std::ptr::null_mut(); 1];
33142 unsafe_torch_err!(atg_special_entr(c_tensors.as_mut_ptr(), self.c_tensor));
33143 Ok(Tensor { c_tensor: c_tensors[0] })
33144 }
33145
33146 pub fn f_special_entr_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
33147 let mut c_tensors = [std::ptr::null_mut(); 1];
33148 unsafe_torch_err!(atg_special_entr_out(
33149 c_tensors.as_mut_ptr(),
33150 out.c_tensor,
33151 self.c_tensor
33152 ));
33153 Ok(Tensor { c_tensor: c_tensors[0] })
33154 }
33155
33156 pub fn f_special_erf(&self) -> Result<Tensor, TchError> {
33157 let mut c_tensors = [std::ptr::null_mut(); 1];
33158 unsafe_torch_err!(atg_special_erf(c_tensors.as_mut_ptr(), self.c_tensor));
33159 Ok(Tensor { c_tensor: c_tensors[0] })
33160 }
33161
33162 pub fn f_special_erf_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
33163 let mut c_tensors = [std::ptr::null_mut(); 1];
33164 unsafe_torch_err!(atg_special_erf_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
33165 Ok(Tensor { c_tensor: c_tensors[0] })
33166 }
33167
33168 pub fn f_special_erfc(&self) -> Result<Tensor, TchError> {
33169 let mut c_tensors = [std::ptr::null_mut(); 1];
33170 unsafe_torch_err!(atg_special_erfc(c_tensors.as_mut_ptr(), self.c_tensor));
33171 Ok(Tensor { c_tensor: c_tensors[0] })
33172 }
33173
33174 pub fn f_special_erfc_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
33175 let mut c_tensors = [std::ptr::null_mut(); 1];
33176 unsafe_torch_err!(atg_special_erfc_out(
33177 c_tensors.as_mut_ptr(),
33178 out.c_tensor,
33179 self.c_tensor
33180 ));
33181 Ok(Tensor { c_tensor: c_tensors[0] })
33182 }
33183
33184 pub fn f_special_erfcx(&self) -> Result<Tensor, TchError> {
33185 let mut c_tensors = [std::ptr::null_mut(); 1];
33186 unsafe_torch_err!(atg_special_erfcx(c_tensors.as_mut_ptr(), self.c_tensor));
33187 Ok(Tensor { c_tensor: c_tensors[0] })
33188 }
33189
33190 pub fn f_special_erfcx_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
33191 let mut c_tensors = [std::ptr::null_mut(); 1];
33192 unsafe_torch_err!(atg_special_erfcx_out(
33193 c_tensors.as_mut_ptr(),
33194 out.c_tensor,
33195 self.c_tensor
33196 ));
33197 Ok(Tensor { c_tensor: c_tensors[0] })
33198 }
33199
33200 pub fn f_special_erfinv(&self) -> Result<Tensor, TchError> {
33201 let mut c_tensors = [std::ptr::null_mut(); 1];
33202 unsafe_torch_err!(atg_special_erfinv(c_tensors.as_mut_ptr(), self.c_tensor));
33203 Ok(Tensor { c_tensor: c_tensors[0] })
33204 }
33205
33206 pub fn f_special_erfinv_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
33207 let mut c_tensors = [std::ptr::null_mut(); 1];
33208 unsafe_torch_err!(atg_special_erfinv_out(
33209 c_tensors.as_mut_ptr(),
33210 out.c_tensor,
33211 self.c_tensor
33212 ));
33213 Ok(Tensor { c_tensor: c_tensors[0] })
33214 }
33215
33216 pub fn f_special_exp2(&self) -> Result<Tensor, TchError> {
33217 let mut c_tensors = [std::ptr::null_mut(); 1];
33218 unsafe_torch_err!(atg_special_exp2(c_tensors.as_mut_ptr(), self.c_tensor));
33219 Ok(Tensor { c_tensor: c_tensors[0] })
33220 }
33221
33222 pub fn f_special_exp2_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
33223 let mut c_tensors = [std::ptr::null_mut(); 1];
33224 unsafe_torch_err!(atg_special_exp2_out(
33225 c_tensors.as_mut_ptr(),
33226 out.c_tensor,
33227 self.c_tensor
33228 ));
33229 Ok(Tensor { c_tensor: c_tensors[0] })
33230 }
33231
33232 pub fn f_special_expit(&self) -> Result<Tensor, TchError> {
33233 let mut c_tensors = [std::ptr::null_mut(); 1];
33234 unsafe_torch_err!(atg_special_expit(c_tensors.as_mut_ptr(), self.c_tensor));
33235 Ok(Tensor { c_tensor: c_tensors[0] })
33236 }
33237
33238 pub fn f_special_expit_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
33239 let mut c_tensors = [std::ptr::null_mut(); 1];
33240 unsafe_torch_err!(atg_special_expit_out(
33241 c_tensors.as_mut_ptr(),
33242 out.c_tensor,
33243 self.c_tensor
33244 ));
33245 Ok(Tensor { c_tensor: c_tensors[0] })
33246 }
33247
33248 pub fn f_special_expm1(&self) -> Result<Tensor, TchError> {
33249 let mut c_tensors = [std::ptr::null_mut(); 1];
33250 unsafe_torch_err!(atg_special_expm1(c_tensors.as_mut_ptr(), self.c_tensor));
33251 Ok(Tensor { c_tensor: c_tensors[0] })
33252 }
33253
33254 pub fn f_special_expm1_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
33255 let mut c_tensors = [std::ptr::null_mut(); 1];
33256 unsafe_torch_err!(atg_special_expm1_out(
33257 c_tensors.as_mut_ptr(),
33258 out.c_tensor,
33259 self.c_tensor
33260 ));
33261 Ok(Tensor { c_tensor: c_tensors[0] })
33262 }
33263
33264 pub fn f_special_gammainc(&self, other: &Tensor) -> Result<Tensor, TchError> {
33265 let mut c_tensors = [std::ptr::null_mut(); 1];
33266 unsafe_torch_err!(atg_special_gammainc(
33267 c_tensors.as_mut_ptr(),
33268 self.c_tensor,
33269 other.c_tensor
33270 ));
33271 Ok(Tensor { c_tensor: c_tensors[0] })
33272 }
33273
33274 pub fn f_special_gammainc_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
33275 let mut c_tensors = [std::ptr::null_mut(); 1];
33276 unsafe_torch_err!(atg_special_gammainc_out(
33277 c_tensors.as_mut_ptr(),
33278 out.c_tensor,
33279 self.c_tensor,
33280 other.c_tensor
33281 ));
33282 Ok(Tensor { c_tensor: c_tensors[0] })
33283 }
33284
33285 pub fn f_special_gammaincc(&self, other: &Tensor) -> Result<Tensor, TchError> {
33286 let mut c_tensors = [std::ptr::null_mut(); 1];
33287 unsafe_torch_err!(atg_special_gammaincc(
33288 c_tensors.as_mut_ptr(),
33289 self.c_tensor,
33290 other.c_tensor
33291 ));
33292 Ok(Tensor { c_tensor: c_tensors[0] })
33293 }
33294
33295 pub fn f_special_gammaincc_out(
33296 &self,
33297 out: &Tensor,
33298 other: &Tensor,
33299 ) -> Result<Tensor, TchError> {
33300 let mut c_tensors = [std::ptr::null_mut(); 1];
33301 unsafe_torch_err!(atg_special_gammaincc_out(
33302 c_tensors.as_mut_ptr(),
33303 out.c_tensor,
33304 self.c_tensor,
33305 other.c_tensor
33306 ));
33307 Ok(Tensor { c_tensor: c_tensors[0] })
33308 }
33309
33310 pub fn f_special_gammaln(&self) -> Result<Tensor, TchError> {
33311 let mut c_tensors = [std::ptr::null_mut(); 1];
33312 unsafe_torch_err!(atg_special_gammaln(c_tensors.as_mut_ptr(), self.c_tensor));
33313 Ok(Tensor { c_tensor: c_tensors[0] })
33314 }
33315
33316 pub fn f_special_gammaln_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
33317 let mut c_tensors = [std::ptr::null_mut(); 1];
33318 unsafe_torch_err!(atg_special_gammaln_out(
33319 c_tensors.as_mut_ptr(),
33320 out.c_tensor,
33321 self.c_tensor
33322 ));
33323 Ok(Tensor { c_tensor: c_tensors[0] })
33324 }
33325
33326 pub fn f_special_hermite_polynomial_h(x: &Tensor, n: &Tensor) -> Result<Tensor, TchError> {
33327 let mut c_tensors = [std::ptr::null_mut(); 1];
33328 unsafe_torch_err!(atg_special_hermite_polynomial_h(
33329 c_tensors.as_mut_ptr(),
33330 x.c_tensor,
33331 n.c_tensor
33332 ));
33333 Ok(Tensor { c_tensor: c_tensors[0] })
33334 }
33335
33336 pub fn f_special_hermite_polynomial_h_n_scalar<S: Into<Scalar>>(
33337 x: &Tensor,
33338 n: S,
33339 ) -> Result<Tensor, TchError> {
33340 let mut c_tensors = [std::ptr::null_mut(); 1];
33341 unsafe_torch_err!(atg_special_hermite_polynomial_h_n_scalar(
33342 c_tensors.as_mut_ptr(),
33343 x.c_tensor,
33344 n.into().c_scalar
33345 ));
33346 Ok(Tensor { c_tensor: c_tensors[0] })
33347 }
33348
33349 pub fn f_special_hermite_polynomial_h_n_scalar_out<S: Into<Scalar>>(
33350 out: &Tensor,
33351 x: &Tensor,
33352 n: S,
33353 ) -> Result<Tensor, TchError> {
33354 let mut c_tensors = [std::ptr::null_mut(); 1];
33355 unsafe_torch_err!(atg_special_hermite_polynomial_h_n_scalar_out(
33356 c_tensors.as_mut_ptr(),
33357 out.c_tensor,
33358 x.c_tensor,
33359 n.into().c_scalar
33360 ));
33361 Ok(Tensor { c_tensor: c_tensors[0] })
33362 }
33363
33364 pub fn f_special_hermite_polynomial_h_out(
33365 out: &Tensor,
33366 x: &Tensor,
33367 n: &Tensor,
33368 ) -> Result<Tensor, TchError> {
33369 let mut c_tensors = [std::ptr::null_mut(); 1];
33370 unsafe_torch_err!(atg_special_hermite_polynomial_h_out(
33371 c_tensors.as_mut_ptr(),
33372 out.c_tensor,
33373 x.c_tensor,
33374 n.c_tensor
33375 ));
33376 Ok(Tensor { c_tensor: c_tensors[0] })
33377 }
33378
33379 pub fn f_special_hermite_polynomial_h_x_scalar<S: Into<Scalar>>(
33380 x: S,
33381 n: &Tensor,
33382 ) -> Result<Tensor, TchError> {
33383 let mut c_tensors = [std::ptr::null_mut(); 1];
33384 unsafe_torch_err!(atg_special_hermite_polynomial_h_x_scalar(
33385 c_tensors.as_mut_ptr(),
33386 x.into().c_scalar,
33387 n.c_tensor
33388 ));
33389 Ok(Tensor { c_tensor: c_tensors[0] })
33390 }
33391
33392 pub fn f_special_hermite_polynomial_h_x_scalar_out<S: Into<Scalar>>(
33393 out: &Tensor,
33394 x: S,
33395 n: &Tensor,
33396 ) -> Result<Tensor, TchError> {
33397 let mut c_tensors = [std::ptr::null_mut(); 1];
33398 unsafe_torch_err!(atg_special_hermite_polynomial_h_x_scalar_out(
33399 c_tensors.as_mut_ptr(),
33400 out.c_tensor,
33401 x.into().c_scalar,
33402 n.c_tensor
33403 ));
33404 Ok(Tensor { c_tensor: c_tensors[0] })
33405 }
33406
33407 pub fn f_special_hermite_polynomial_he(x: &Tensor, n: &Tensor) -> Result<Tensor, TchError> {
33408 let mut c_tensors = [std::ptr::null_mut(); 1];
33409 unsafe_torch_err!(atg_special_hermite_polynomial_he(
33410 c_tensors.as_mut_ptr(),
33411 x.c_tensor,
33412 n.c_tensor
33413 ));
33414 Ok(Tensor { c_tensor: c_tensors[0] })
33415 }
33416
33417 pub fn f_special_hermite_polynomial_he_n_scalar<S: Into<Scalar>>(
33418 x: &Tensor,
33419 n: S,
33420 ) -> Result<Tensor, TchError> {
33421 let mut c_tensors = [std::ptr::null_mut(); 1];
33422 unsafe_torch_err!(atg_special_hermite_polynomial_he_n_scalar(
33423 c_tensors.as_mut_ptr(),
33424 x.c_tensor,
33425 n.into().c_scalar
33426 ));
33427 Ok(Tensor { c_tensor: c_tensors[0] })
33428 }
33429
33430 pub fn f_special_hermite_polynomial_he_n_scalar_out<S: Into<Scalar>>(
33431 out: &Tensor,
33432 x: &Tensor,
33433 n: S,
33434 ) -> Result<Tensor, TchError> {
33435 let mut c_tensors = [std::ptr::null_mut(); 1];
33436 unsafe_torch_err!(atg_special_hermite_polynomial_he_n_scalar_out(
33437 c_tensors.as_mut_ptr(),
33438 out.c_tensor,
33439 x.c_tensor,
33440 n.into().c_scalar
33441 ));
33442 Ok(Tensor { c_tensor: c_tensors[0] })
33443 }
33444
33445 pub fn f_special_hermite_polynomial_he_out(
33446 out: &Tensor,
33447 x: &Tensor,
33448 n: &Tensor,
33449 ) -> Result<Tensor, TchError> {
33450 let mut c_tensors = [std::ptr::null_mut(); 1];
33451 unsafe_torch_err!(atg_special_hermite_polynomial_he_out(
33452 c_tensors.as_mut_ptr(),
33453 out.c_tensor,
33454 x.c_tensor,
33455 n.c_tensor
33456 ));
33457 Ok(Tensor { c_tensor: c_tensors[0] })
33458 }
33459
33460 pub fn f_special_hermite_polynomial_he_x_scalar<S: Into<Scalar>>(
33461 x: S,
33462 n: &Tensor,
33463 ) -> Result<Tensor, TchError> {
33464 let mut c_tensors = [std::ptr::null_mut(); 1];
33465 unsafe_torch_err!(atg_special_hermite_polynomial_he_x_scalar(
33466 c_tensors.as_mut_ptr(),
33467 x.into().c_scalar,
33468 n.c_tensor
33469 ));
33470 Ok(Tensor { c_tensor: c_tensors[0] })
33471 }
33472
33473 pub fn f_special_hermite_polynomial_he_x_scalar_out<S: Into<Scalar>>(
33474 out: &Tensor,
33475 x: S,
33476 n: &Tensor,
33477 ) -> Result<Tensor, TchError> {
33478 let mut c_tensors = [std::ptr::null_mut(); 1];
33479 unsafe_torch_err!(atg_special_hermite_polynomial_he_x_scalar_out(
33480 c_tensors.as_mut_ptr(),
33481 out.c_tensor,
33482 x.into().c_scalar,
33483 n.c_tensor
33484 ));
33485 Ok(Tensor { c_tensor: c_tensors[0] })
33486 }
33487
33488 pub fn f_special_i0(&self) -> Result<Tensor, TchError> {
33489 let mut c_tensors = [std::ptr::null_mut(); 1];
33490 unsafe_torch_err!(atg_special_i0(c_tensors.as_mut_ptr(), self.c_tensor));
33491 Ok(Tensor { c_tensor: c_tensors[0] })
33492 }
33493
33494 pub fn f_special_i0_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
33495 let mut c_tensors = [std::ptr::null_mut(); 1];
33496 unsafe_torch_err!(atg_special_i0_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
33497 Ok(Tensor { c_tensor: c_tensors[0] })
33498 }
33499
33500 pub fn f_special_i0e(&self) -> Result<Tensor, TchError> {
33501 let mut c_tensors = [std::ptr::null_mut(); 1];
33502 unsafe_torch_err!(atg_special_i0e(c_tensors.as_mut_ptr(), self.c_tensor));
33503 Ok(Tensor { c_tensor: c_tensors[0] })
33504 }
33505
33506 pub fn f_special_i0e_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
33507 let mut c_tensors = [std::ptr::null_mut(); 1];
33508 unsafe_torch_err!(atg_special_i0e_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
33509 Ok(Tensor { c_tensor: c_tensors[0] })
33510 }
33511
33512 pub fn f_special_i1(&self) -> Result<Tensor, TchError> {
33513 let mut c_tensors = [std::ptr::null_mut(); 1];
33514 unsafe_torch_err!(atg_special_i1(c_tensors.as_mut_ptr(), self.c_tensor));
33515 Ok(Tensor { c_tensor: c_tensors[0] })
33516 }
33517
33518 pub fn f_special_i1_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
33519 let mut c_tensors = [std::ptr::null_mut(); 1];
33520 unsafe_torch_err!(atg_special_i1_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
33521 Ok(Tensor { c_tensor: c_tensors[0] })
33522 }
33523
33524 pub fn f_special_i1e(&self) -> Result<Tensor, TchError> {
33525 let mut c_tensors = [std::ptr::null_mut(); 1];
33526 unsafe_torch_err!(atg_special_i1e(c_tensors.as_mut_ptr(), self.c_tensor));
33527 Ok(Tensor { c_tensor: c_tensors[0] })
33528 }
33529
33530 pub fn f_special_i1e_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
33531 let mut c_tensors = [std::ptr::null_mut(); 1];
33532 unsafe_torch_err!(atg_special_i1e_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
33533 Ok(Tensor { c_tensor: c_tensors[0] })
33534 }
33535
33536 pub fn f_special_laguerre_polynomial_l(x: &Tensor, n: &Tensor) -> Result<Tensor, TchError> {
33537 let mut c_tensors = [std::ptr::null_mut(); 1];
33538 unsafe_torch_err!(atg_special_laguerre_polynomial_l(
33539 c_tensors.as_mut_ptr(),
33540 x.c_tensor,
33541 n.c_tensor
33542 ));
33543 Ok(Tensor { c_tensor: c_tensors[0] })
33544 }
33545
33546 pub fn f_special_laguerre_polynomial_l_n_scalar<S: Into<Scalar>>(
33547 x: &Tensor,
33548 n: S,
33549 ) -> Result<Tensor, TchError> {
33550 let mut c_tensors = [std::ptr::null_mut(); 1];
33551 unsafe_torch_err!(atg_special_laguerre_polynomial_l_n_scalar(
33552 c_tensors.as_mut_ptr(),
33553 x.c_tensor,
33554 n.into().c_scalar
33555 ));
33556 Ok(Tensor { c_tensor: c_tensors[0] })
33557 }
33558
33559 pub fn f_special_laguerre_polynomial_l_n_scalar_out<S: Into<Scalar>>(
33560 out: &Tensor,
33561 x: &Tensor,
33562 n: S,
33563 ) -> Result<Tensor, TchError> {
33564 let mut c_tensors = [std::ptr::null_mut(); 1];
33565 unsafe_torch_err!(atg_special_laguerre_polynomial_l_n_scalar_out(
33566 c_tensors.as_mut_ptr(),
33567 out.c_tensor,
33568 x.c_tensor,
33569 n.into().c_scalar
33570 ));
33571 Ok(Tensor { c_tensor: c_tensors[0] })
33572 }
33573
33574 pub fn f_special_laguerre_polynomial_l_out(
33575 out: &Tensor,
33576 x: &Tensor,
33577 n: &Tensor,
33578 ) -> Result<Tensor, TchError> {
33579 let mut c_tensors = [std::ptr::null_mut(); 1];
33580 unsafe_torch_err!(atg_special_laguerre_polynomial_l_out(
33581 c_tensors.as_mut_ptr(),
33582 out.c_tensor,
33583 x.c_tensor,
33584 n.c_tensor
33585 ));
33586 Ok(Tensor { c_tensor: c_tensors[0] })
33587 }
33588
33589 pub fn f_special_laguerre_polynomial_l_x_scalar<S: Into<Scalar>>(
33590 x: S,
33591 n: &Tensor,
33592 ) -> Result<Tensor, TchError> {
33593 let mut c_tensors = [std::ptr::null_mut(); 1];
33594 unsafe_torch_err!(atg_special_laguerre_polynomial_l_x_scalar(
33595 c_tensors.as_mut_ptr(),
33596 x.into().c_scalar,
33597 n.c_tensor
33598 ));
33599 Ok(Tensor { c_tensor: c_tensors[0] })
33600 }
33601
33602 pub fn f_special_laguerre_polynomial_l_x_scalar_out<S: Into<Scalar>>(
33603 out: &Tensor,
33604 x: S,
33605 n: &Tensor,
33606 ) -> Result<Tensor, TchError> {
33607 let mut c_tensors = [std::ptr::null_mut(); 1];
33608 unsafe_torch_err!(atg_special_laguerre_polynomial_l_x_scalar_out(
33609 c_tensors.as_mut_ptr(),
33610 out.c_tensor,
33611 x.into().c_scalar,
33612 n.c_tensor
33613 ));
33614 Ok(Tensor { c_tensor: c_tensors[0] })
33615 }
33616
33617 pub fn f_special_legendre_polynomial_p(x: &Tensor, n: &Tensor) -> Result<Tensor, TchError> {
33618 let mut c_tensors = [std::ptr::null_mut(); 1];
33619 unsafe_torch_err!(atg_special_legendre_polynomial_p(
33620 c_tensors.as_mut_ptr(),
33621 x.c_tensor,
33622 n.c_tensor
33623 ));
33624 Ok(Tensor { c_tensor: c_tensors[0] })
33625 }
33626
33627 pub fn f_special_legendre_polynomial_p_n_scalar<S: Into<Scalar>>(
33628 x: &Tensor,
33629 n: S,
33630 ) -> Result<Tensor, TchError> {
33631 let mut c_tensors = [std::ptr::null_mut(); 1];
33632 unsafe_torch_err!(atg_special_legendre_polynomial_p_n_scalar(
33633 c_tensors.as_mut_ptr(),
33634 x.c_tensor,
33635 n.into().c_scalar
33636 ));
33637 Ok(Tensor { c_tensor: c_tensors[0] })
33638 }
33639
33640 pub fn f_special_legendre_polynomial_p_n_scalar_out<S: Into<Scalar>>(
33641 out: &Tensor,
33642 x: &Tensor,
33643 n: S,
33644 ) -> Result<Tensor, TchError> {
33645 let mut c_tensors = [std::ptr::null_mut(); 1];
33646 unsafe_torch_err!(atg_special_legendre_polynomial_p_n_scalar_out(
33647 c_tensors.as_mut_ptr(),
33648 out.c_tensor,
33649 x.c_tensor,
33650 n.into().c_scalar
33651 ));
33652 Ok(Tensor { c_tensor: c_tensors[0] })
33653 }
33654
33655 pub fn f_special_legendre_polynomial_p_out(
33656 out: &Tensor,
33657 x: &Tensor,
33658 n: &Tensor,
33659 ) -> Result<Tensor, TchError> {
33660 let mut c_tensors = [std::ptr::null_mut(); 1];
33661 unsafe_torch_err!(atg_special_legendre_polynomial_p_out(
33662 c_tensors.as_mut_ptr(),
33663 out.c_tensor,
33664 x.c_tensor,
33665 n.c_tensor
33666 ));
33667 Ok(Tensor { c_tensor: c_tensors[0] })
33668 }
33669
33670 pub fn f_special_legendre_polynomial_p_x_scalar<S: Into<Scalar>>(
33671 x: S,
33672 n: &Tensor,
33673 ) -> Result<Tensor, TchError> {
33674 let mut c_tensors = [std::ptr::null_mut(); 1];
33675 unsafe_torch_err!(atg_special_legendre_polynomial_p_x_scalar(
33676 c_tensors.as_mut_ptr(),
33677 x.into().c_scalar,
33678 n.c_tensor
33679 ));
33680 Ok(Tensor { c_tensor: c_tensors[0] })
33681 }
33682
33683 pub fn f_special_legendre_polynomial_p_x_scalar_out<S: Into<Scalar>>(
33684 out: &Tensor,
33685 x: S,
33686 n: &Tensor,
33687 ) -> Result<Tensor, TchError> {
33688 let mut c_tensors = [std::ptr::null_mut(); 1];
33689 unsafe_torch_err!(atg_special_legendre_polynomial_p_x_scalar_out(
33690 c_tensors.as_mut_ptr(),
33691 out.c_tensor,
33692 x.into().c_scalar,
33693 n.c_tensor
33694 ));
33695 Ok(Tensor { c_tensor: c_tensors[0] })
33696 }
33697
33698 pub fn f_special_log1p(&self) -> Result<Tensor, TchError> {
33699 let mut c_tensors = [std::ptr::null_mut(); 1];
33700 unsafe_torch_err!(atg_special_log1p(c_tensors.as_mut_ptr(), self.c_tensor));
33701 Ok(Tensor { c_tensor: c_tensors[0] })
33702 }
33703
33704 pub fn f_special_log1p_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
33705 let mut c_tensors = [std::ptr::null_mut(); 1];
33706 unsafe_torch_err!(atg_special_log1p_out(
33707 c_tensors.as_mut_ptr(),
33708 out.c_tensor,
33709 self.c_tensor
33710 ));
33711 Ok(Tensor { c_tensor: c_tensors[0] })
33712 }
33713
33714 pub fn f_special_log_ndtr(&self) -> Result<Tensor, TchError> {
33715 let mut c_tensors = [std::ptr::null_mut(); 1];
33716 unsafe_torch_err!(atg_special_log_ndtr(c_tensors.as_mut_ptr(), self.c_tensor));
33717 Ok(Tensor { c_tensor: c_tensors[0] })
33718 }
33719
33720 pub fn f_special_log_ndtr_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
33721 let mut c_tensors = [std::ptr::null_mut(); 1];
33722 unsafe_torch_err!(atg_special_log_ndtr_out(
33723 c_tensors.as_mut_ptr(),
33724 out.c_tensor,
33725 self.c_tensor
33726 ));
33727 Ok(Tensor { c_tensor: c_tensors[0] })
33728 }
33729
33730 pub fn f_special_log_softmax(
33731 &self,
33732 dim: i64,
33733 dtype: impl Into<Option<Kind>>,
33734 ) -> Result<Tensor, TchError> {
33735 let mut c_tensors = [std::ptr::null_mut(); 1];
33736 unsafe_torch_err!(atg_special_log_softmax(
33737 c_tensors.as_mut_ptr(),
33738 self.c_tensor,
33739 dim,
33740 dtype.into().map_or(-1, |s| s.c_int())
33741 ));
33742 Ok(Tensor { c_tensor: c_tensors[0] })
33743 }
33744
33745 pub fn f_special_logit(&self, eps: impl Into<Option<f64>>) -> Result<Tensor, TchError> {
33746 let eps = eps.into();
33747 let mut c_tensors = [std::ptr::null_mut(); 1];
33748 unsafe_torch_err!(atg_special_logit(
33749 c_tensors.as_mut_ptr(),
33750 self.c_tensor,
33751 eps.unwrap_or(std::f64::NAN),
33752 eps.is_none() as i8
33753 ));
33754 Ok(Tensor { c_tensor: c_tensors[0] })
33755 }
33756
33757 pub fn f_special_logit_out(
33758 &self,
33759 out: &Tensor,
33760 eps: impl Into<Option<f64>>,
33761 ) -> Result<Tensor, TchError> {
33762 let eps = eps.into();
33763 let mut c_tensors = [std::ptr::null_mut(); 1];
33764 unsafe_torch_err!(atg_special_logit_out(
33765 c_tensors.as_mut_ptr(),
33766 out.c_tensor,
33767 self.c_tensor,
33768 eps.unwrap_or(std::f64::NAN),
33769 eps.is_none() as i8
33770 ));
33771 Ok(Tensor { c_tensor: c_tensors[0] })
33772 }
33773
33774 pub fn f_special_logsumexp(
33775 &self,
33776 dim: impl IntList,
33777 keepdim: bool,
33778 ) -> Result<Tensor, TchError> {
33779 let mut c_tensors = [std::ptr::null_mut(); 1];
33780 unsafe_torch_err!(atg_special_logsumexp(
33781 c_tensors.as_mut_ptr(),
33782 self.c_tensor,
33783 dim.as_ptr(),
33784 dim.len_i32(),
33785 if keepdim { 1 } else { 0 }
33786 ));
33787 Ok(Tensor { c_tensor: c_tensors[0] })
33788 }
33789
33790 pub fn f_special_logsumexp_out(
33791 &self,
33792 out: &Tensor,
33793 dim: impl IntList,
33794 keepdim: bool,
33795 ) -> Result<Tensor, TchError> {
33796 let mut c_tensors = [std::ptr::null_mut(); 1];
33797 unsafe_torch_err!(atg_special_logsumexp_out(
33798 c_tensors.as_mut_ptr(),
33799 out.c_tensor,
33800 self.c_tensor,
33801 dim.as_ptr(),
33802 dim.len_i32(),
33803 if keepdim { 1 } else { 0 }
33804 ));
33805 Ok(Tensor { c_tensor: c_tensors[0] })
33806 }
33807
33808 pub fn f_special_modified_bessel_i0(&self) -> Result<Tensor, TchError> {
33809 let mut c_tensors = [std::ptr::null_mut(); 1];
33810 unsafe_torch_err!(atg_special_modified_bessel_i0(c_tensors.as_mut_ptr(), self.c_tensor));
33811 Ok(Tensor { c_tensor: c_tensors[0] })
33812 }
33813
33814 pub fn f_special_modified_bessel_i0_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
33815 let mut c_tensors = [std::ptr::null_mut(); 1];
33816 unsafe_torch_err!(atg_special_modified_bessel_i0_out(
33817 c_tensors.as_mut_ptr(),
33818 out.c_tensor,
33819 self.c_tensor
33820 ));
33821 Ok(Tensor { c_tensor: c_tensors[0] })
33822 }
33823
33824 pub fn f_special_modified_bessel_i1(&self) -> Result<Tensor, TchError> {
33825 let mut c_tensors = [std::ptr::null_mut(); 1];
33826 unsafe_torch_err!(atg_special_modified_bessel_i1(c_tensors.as_mut_ptr(), self.c_tensor));
33827 Ok(Tensor { c_tensor: c_tensors[0] })
33828 }
33829
33830 pub fn f_special_modified_bessel_i1_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
33831 let mut c_tensors = [std::ptr::null_mut(); 1];
33832 unsafe_torch_err!(atg_special_modified_bessel_i1_out(
33833 c_tensors.as_mut_ptr(),
33834 out.c_tensor,
33835 self.c_tensor
33836 ));
33837 Ok(Tensor { c_tensor: c_tensors[0] })
33838 }
33839
33840 pub fn f_special_modified_bessel_k0(&self) -> Result<Tensor, TchError> {
33841 let mut c_tensors = [std::ptr::null_mut(); 1];
33842 unsafe_torch_err!(atg_special_modified_bessel_k0(c_tensors.as_mut_ptr(), self.c_tensor));
33843 Ok(Tensor { c_tensor: c_tensors[0] })
33844 }
33845
33846 pub fn f_special_modified_bessel_k0_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
33847 let mut c_tensors = [std::ptr::null_mut(); 1];
33848 unsafe_torch_err!(atg_special_modified_bessel_k0_out(
33849 c_tensors.as_mut_ptr(),
33850 out.c_tensor,
33851 self.c_tensor
33852 ));
33853 Ok(Tensor { c_tensor: c_tensors[0] })
33854 }
33855
33856 pub fn f_special_modified_bessel_k1(&self) -> Result<Tensor, TchError> {
33857 let mut c_tensors = [std::ptr::null_mut(); 1];
33858 unsafe_torch_err!(atg_special_modified_bessel_k1(c_tensors.as_mut_ptr(), self.c_tensor));
33859 Ok(Tensor { c_tensor: c_tensors[0] })
33860 }
33861
33862 pub fn f_special_modified_bessel_k1_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
33863 let mut c_tensors = [std::ptr::null_mut(); 1];
33864 unsafe_torch_err!(atg_special_modified_bessel_k1_out(
33865 c_tensors.as_mut_ptr(),
33866 out.c_tensor,
33867 self.c_tensor
33868 ));
33869 Ok(Tensor { c_tensor: c_tensors[0] })
33870 }
33871
33872 pub fn f_special_multigammaln(&self, p: i64) -> Result<Tensor, TchError> {
33873 let mut c_tensors = [std::ptr::null_mut(); 1];
33874 unsafe_torch_err!(atg_special_multigammaln(c_tensors.as_mut_ptr(), self.c_tensor, p));
33875 Ok(Tensor { c_tensor: c_tensors[0] })
33876 }
33877
33878 pub fn f_special_multigammaln_out(&self, out: &Tensor, p: i64) -> Result<Tensor, TchError> {
33879 let mut c_tensors = [std::ptr::null_mut(); 1];
33880 unsafe_torch_err!(atg_special_multigammaln_out(
33881 c_tensors.as_mut_ptr(),
33882 out.c_tensor,
33883 self.c_tensor,
33884 p
33885 ));
33886 Ok(Tensor { c_tensor: c_tensors[0] })
33887 }
33888
33889 pub fn f_special_ndtr(&self) -> Result<Tensor, TchError> {
33890 let mut c_tensors = [std::ptr::null_mut(); 1];
33891 unsafe_torch_err!(atg_special_ndtr(c_tensors.as_mut_ptr(), self.c_tensor));
33892 Ok(Tensor { c_tensor: c_tensors[0] })
33893 }
33894
33895 pub fn f_special_ndtr_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
33896 let mut c_tensors = [std::ptr::null_mut(); 1];
33897 unsafe_torch_err!(atg_special_ndtr_out(
33898 c_tensors.as_mut_ptr(),
33899 out.c_tensor,
33900 self.c_tensor
33901 ));
33902 Ok(Tensor { c_tensor: c_tensors[0] })
33903 }
33904
33905 pub fn f_special_ndtri(&self) -> Result<Tensor, TchError> {
33906 let mut c_tensors = [std::ptr::null_mut(); 1];
33907 unsafe_torch_err!(atg_special_ndtri(c_tensors.as_mut_ptr(), self.c_tensor));
33908 Ok(Tensor { c_tensor: c_tensors[0] })
33909 }
33910
33911 pub fn f_special_ndtri_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
33912 let mut c_tensors = [std::ptr::null_mut(); 1];
33913 unsafe_torch_err!(atg_special_ndtri_out(
33914 c_tensors.as_mut_ptr(),
33915 out.c_tensor,
33916 self.c_tensor
33917 ));
33918 Ok(Tensor { c_tensor: c_tensors[0] })
33919 }
33920
33921 pub fn f_special_polygamma(&self, n: i64) -> Result<Tensor, TchError> {
33922 let mut c_tensors = [std::ptr::null_mut(); 1];
33923 unsafe_torch_err!(atg_special_polygamma(c_tensors.as_mut_ptr(), n, self.c_tensor));
33924 Ok(Tensor { c_tensor: c_tensors[0] })
33925 }
33926
33927 pub fn f_special_polygamma_out(&self, out: &Tensor, n: i64) -> Result<Tensor, TchError> {
33928 let mut c_tensors = [std::ptr::null_mut(); 1];
33929 unsafe_torch_err!(atg_special_polygamma_out(
33930 c_tensors.as_mut_ptr(),
33931 out.c_tensor,
33932 n,
33933 self.c_tensor
33934 ));
33935 Ok(Tensor { c_tensor: c_tensors[0] })
33936 }
33937
33938 pub fn f_special_psi(&self) -> Result<Tensor, TchError> {
33939 let mut c_tensors = [std::ptr::null_mut(); 1];
33940 unsafe_torch_err!(atg_special_psi(c_tensors.as_mut_ptr(), self.c_tensor));
33941 Ok(Tensor { c_tensor: c_tensors[0] })
33942 }
33943
33944 pub fn f_special_psi_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
33945 let mut c_tensors = [std::ptr::null_mut(); 1];
33946 unsafe_torch_err!(atg_special_psi_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
33947 Ok(Tensor { c_tensor: c_tensors[0] })
33948 }
33949
33950 pub fn f_special_round(&self, decimals: i64) -> Result<Tensor, TchError> {
33951 let mut c_tensors = [std::ptr::null_mut(); 1];
33952 unsafe_torch_err!(atg_special_round(c_tensors.as_mut_ptr(), self.c_tensor, decimals));
33953 Ok(Tensor { c_tensor: c_tensors[0] })
33954 }
33955
33956 pub fn f_special_round_out(&self, out: &Tensor, decimals: i64) -> Result<Tensor, TchError> {
33957 let mut c_tensors = [std::ptr::null_mut(); 1];
33958 unsafe_torch_err!(atg_special_round_out(
33959 c_tensors.as_mut_ptr(),
33960 out.c_tensor,
33961 self.c_tensor,
33962 decimals
33963 ));
33964 Ok(Tensor { c_tensor: c_tensors[0] })
33965 }
33966
33967 pub fn f_special_scaled_modified_bessel_k0(x: &Tensor) -> Result<Tensor, TchError> {
33968 let mut c_tensors = [std::ptr::null_mut(); 1];
33969 unsafe_torch_err!(atg_special_scaled_modified_bessel_k0(
33970 c_tensors.as_mut_ptr(),
33971 x.c_tensor
33972 ));
33973 Ok(Tensor { c_tensor: c_tensors[0] })
33974 }
33975
33976 pub fn f_special_scaled_modified_bessel_k0_out(
33977 out: &Tensor,
33978 x: &Tensor,
33979 ) -> Result<Tensor, TchError> {
33980 let mut c_tensors = [std::ptr::null_mut(); 1];
33981 unsafe_torch_err!(atg_special_scaled_modified_bessel_k0_out(
33982 c_tensors.as_mut_ptr(),
33983 out.c_tensor,
33984 x.c_tensor
33985 ));
33986 Ok(Tensor { c_tensor: c_tensors[0] })
33987 }
33988
33989 pub fn f_special_scaled_modified_bessel_k1(x: &Tensor) -> Result<Tensor, TchError> {
33990 let mut c_tensors = [std::ptr::null_mut(); 1];
33991 unsafe_torch_err!(atg_special_scaled_modified_bessel_k1(
33992 c_tensors.as_mut_ptr(),
33993 x.c_tensor
33994 ));
33995 Ok(Tensor { c_tensor: c_tensors[0] })
33996 }
33997
33998 pub fn f_special_scaled_modified_bessel_k1_out(
33999 out: &Tensor,
34000 x: &Tensor,
34001 ) -> Result<Tensor, TchError> {
34002 let mut c_tensors = [std::ptr::null_mut(); 1];
34003 unsafe_torch_err!(atg_special_scaled_modified_bessel_k1_out(
34004 c_tensors.as_mut_ptr(),
34005 out.c_tensor,
34006 x.c_tensor
34007 ));
34008 Ok(Tensor { c_tensor: c_tensors[0] })
34009 }
34010
34011 pub fn f_special_shifted_chebyshev_polynomial_t(
34012 x: &Tensor,
34013 n: &Tensor,
34014 ) -> Result<Tensor, TchError> {
34015 let mut c_tensors = [std::ptr::null_mut(); 1];
34016 unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_t(
34017 c_tensors.as_mut_ptr(),
34018 x.c_tensor,
34019 n.c_tensor
34020 ));
34021 Ok(Tensor { c_tensor: c_tensors[0] })
34022 }
34023
34024 pub fn f_special_shifted_chebyshev_polynomial_t_n_scalar<S: Into<Scalar>>(
34025 x: &Tensor,
34026 n: S,
34027 ) -> Result<Tensor, TchError> {
34028 let mut c_tensors = [std::ptr::null_mut(); 1];
34029 unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_t_n_scalar(
34030 c_tensors.as_mut_ptr(),
34031 x.c_tensor,
34032 n.into().c_scalar
34033 ));
34034 Ok(Tensor { c_tensor: c_tensors[0] })
34035 }
34036
34037 pub fn f_special_shifted_chebyshev_polynomial_t_n_scalar_out<S: Into<Scalar>>(
34038 out: &Tensor,
34039 x: &Tensor,
34040 n: S,
34041 ) -> Result<Tensor, TchError> {
34042 let mut c_tensors = [std::ptr::null_mut(); 1];
34043 unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_t_n_scalar_out(
34044 c_tensors.as_mut_ptr(),
34045 out.c_tensor,
34046 x.c_tensor,
34047 n.into().c_scalar
34048 ));
34049 Ok(Tensor { c_tensor: c_tensors[0] })
34050 }
34051
34052 pub fn f_special_shifted_chebyshev_polynomial_t_out(
34053 out: &Tensor,
34054 x: &Tensor,
34055 n: &Tensor,
34056 ) -> Result<Tensor, TchError> {
34057 let mut c_tensors = [std::ptr::null_mut(); 1];
34058 unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_t_out(
34059 c_tensors.as_mut_ptr(),
34060 out.c_tensor,
34061 x.c_tensor,
34062 n.c_tensor
34063 ));
34064 Ok(Tensor { c_tensor: c_tensors[0] })
34065 }
34066
34067 pub fn f_special_shifted_chebyshev_polynomial_t_x_scalar<S: Into<Scalar>>(
34068 x: S,
34069 n: &Tensor,
34070 ) -> Result<Tensor, TchError> {
34071 let mut c_tensors = [std::ptr::null_mut(); 1];
34072 unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_t_x_scalar(
34073 c_tensors.as_mut_ptr(),
34074 x.into().c_scalar,
34075 n.c_tensor
34076 ));
34077 Ok(Tensor { c_tensor: c_tensors[0] })
34078 }
34079
34080 pub fn f_special_shifted_chebyshev_polynomial_t_x_scalar_out<S: Into<Scalar>>(
34081 out: &Tensor,
34082 x: S,
34083 n: &Tensor,
34084 ) -> Result<Tensor, TchError> {
34085 let mut c_tensors = [std::ptr::null_mut(); 1];
34086 unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_t_x_scalar_out(
34087 c_tensors.as_mut_ptr(),
34088 out.c_tensor,
34089 x.into().c_scalar,
34090 n.c_tensor
34091 ));
34092 Ok(Tensor { c_tensor: c_tensors[0] })
34093 }
34094
34095 pub fn f_special_shifted_chebyshev_polynomial_u(
34096 x: &Tensor,
34097 n: &Tensor,
34098 ) -> Result<Tensor, TchError> {
34099 let mut c_tensors = [std::ptr::null_mut(); 1];
34100 unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_u(
34101 c_tensors.as_mut_ptr(),
34102 x.c_tensor,
34103 n.c_tensor
34104 ));
34105 Ok(Tensor { c_tensor: c_tensors[0] })
34106 }
34107
34108 pub fn f_special_shifted_chebyshev_polynomial_u_n_scalar<S: Into<Scalar>>(
34109 x: &Tensor,
34110 n: S,
34111 ) -> Result<Tensor, TchError> {
34112 let mut c_tensors = [std::ptr::null_mut(); 1];
34113 unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_u_n_scalar(
34114 c_tensors.as_mut_ptr(),
34115 x.c_tensor,
34116 n.into().c_scalar
34117 ));
34118 Ok(Tensor { c_tensor: c_tensors[0] })
34119 }
34120
34121 pub fn f_special_shifted_chebyshev_polynomial_u_n_scalar_out<S: Into<Scalar>>(
34122 out: &Tensor,
34123 x: &Tensor,
34124 n: S,
34125 ) -> Result<Tensor, TchError> {
34126 let mut c_tensors = [std::ptr::null_mut(); 1];
34127 unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_u_n_scalar_out(
34128 c_tensors.as_mut_ptr(),
34129 out.c_tensor,
34130 x.c_tensor,
34131 n.into().c_scalar
34132 ));
34133 Ok(Tensor { c_tensor: c_tensors[0] })
34134 }
34135
34136 pub fn f_special_shifted_chebyshev_polynomial_u_out(
34137 out: &Tensor,
34138 x: &Tensor,
34139 n: &Tensor,
34140 ) -> Result<Tensor, TchError> {
34141 let mut c_tensors = [std::ptr::null_mut(); 1];
34142 unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_u_out(
34143 c_tensors.as_mut_ptr(),
34144 out.c_tensor,
34145 x.c_tensor,
34146 n.c_tensor
34147 ));
34148 Ok(Tensor { c_tensor: c_tensors[0] })
34149 }
34150
34151 pub fn f_special_shifted_chebyshev_polynomial_u_x_scalar<S: Into<Scalar>>(
34152 x: S,
34153 n: &Tensor,
34154 ) -> Result<Tensor, TchError> {
34155 let mut c_tensors = [std::ptr::null_mut(); 1];
34156 unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_u_x_scalar(
34157 c_tensors.as_mut_ptr(),
34158 x.into().c_scalar,
34159 n.c_tensor
34160 ));
34161 Ok(Tensor { c_tensor: c_tensors[0] })
34162 }
34163
34164 pub fn f_special_shifted_chebyshev_polynomial_u_x_scalar_out<S: Into<Scalar>>(
34165 out: &Tensor,
34166 x: S,
34167 n: &Tensor,
34168 ) -> Result<Tensor, TchError> {
34169 let mut c_tensors = [std::ptr::null_mut(); 1];
34170 unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_u_x_scalar_out(
34171 c_tensors.as_mut_ptr(),
34172 out.c_tensor,
34173 x.into().c_scalar,
34174 n.c_tensor
34175 ));
34176 Ok(Tensor { c_tensor: c_tensors[0] })
34177 }
34178
34179 pub fn f_special_shifted_chebyshev_polynomial_v(
34180 x: &Tensor,
34181 n: &Tensor,
34182 ) -> Result<Tensor, TchError> {
34183 let mut c_tensors = [std::ptr::null_mut(); 1];
34184 unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_v(
34185 c_tensors.as_mut_ptr(),
34186 x.c_tensor,
34187 n.c_tensor
34188 ));
34189 Ok(Tensor { c_tensor: c_tensors[0] })
34190 }
34191
34192 pub fn f_special_shifted_chebyshev_polynomial_v_n_scalar<S: Into<Scalar>>(
34193 x: &Tensor,
34194 n: S,
34195 ) -> Result<Tensor, TchError> {
34196 let mut c_tensors = [std::ptr::null_mut(); 1];
34197 unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_v_n_scalar(
34198 c_tensors.as_mut_ptr(),
34199 x.c_tensor,
34200 n.into().c_scalar
34201 ));
34202 Ok(Tensor { c_tensor: c_tensors[0] })
34203 }
34204
34205 pub fn f_special_shifted_chebyshev_polynomial_v_n_scalar_out<S: Into<Scalar>>(
34206 out: &Tensor,
34207 x: &Tensor,
34208 n: S,
34209 ) -> Result<Tensor, TchError> {
34210 let mut c_tensors = [std::ptr::null_mut(); 1];
34211 unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_v_n_scalar_out(
34212 c_tensors.as_mut_ptr(),
34213 out.c_tensor,
34214 x.c_tensor,
34215 n.into().c_scalar
34216 ));
34217 Ok(Tensor { c_tensor: c_tensors[0] })
34218 }
34219
34220 pub fn f_special_shifted_chebyshev_polynomial_v_out(
34221 out: &Tensor,
34222 x: &Tensor,
34223 n: &Tensor,
34224 ) -> Result<Tensor, TchError> {
34225 let mut c_tensors = [std::ptr::null_mut(); 1];
34226 unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_v_out(
34227 c_tensors.as_mut_ptr(),
34228 out.c_tensor,
34229 x.c_tensor,
34230 n.c_tensor
34231 ));
34232 Ok(Tensor { c_tensor: c_tensors[0] })
34233 }
34234
34235 pub fn f_special_shifted_chebyshev_polynomial_v_x_scalar<S: Into<Scalar>>(
34236 x: S,
34237 n: &Tensor,
34238 ) -> Result<Tensor, TchError> {
34239 let mut c_tensors = [std::ptr::null_mut(); 1];
34240 unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_v_x_scalar(
34241 c_tensors.as_mut_ptr(),
34242 x.into().c_scalar,
34243 n.c_tensor
34244 ));
34245 Ok(Tensor { c_tensor: c_tensors[0] })
34246 }
34247
34248 pub fn f_special_shifted_chebyshev_polynomial_v_x_scalar_out<S: Into<Scalar>>(
34249 out: &Tensor,
34250 x: S,
34251 n: &Tensor,
34252 ) -> Result<Tensor, TchError> {
34253 let mut c_tensors = [std::ptr::null_mut(); 1];
34254 unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_v_x_scalar_out(
34255 c_tensors.as_mut_ptr(),
34256 out.c_tensor,
34257 x.into().c_scalar,
34258 n.c_tensor
34259 ));
34260 Ok(Tensor { c_tensor: c_tensors[0] })
34261 }
34262
34263 pub fn f_special_shifted_chebyshev_polynomial_w(
34264 x: &Tensor,
34265 n: &Tensor,
34266 ) -> Result<Tensor, TchError> {
34267 let mut c_tensors = [std::ptr::null_mut(); 1];
34268 unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_w(
34269 c_tensors.as_mut_ptr(),
34270 x.c_tensor,
34271 n.c_tensor
34272 ));
34273 Ok(Tensor { c_tensor: c_tensors[0] })
34274 }
34275
34276 pub fn f_special_shifted_chebyshev_polynomial_w_n_scalar<S: Into<Scalar>>(
34277 x: &Tensor,
34278 n: S,
34279 ) -> Result<Tensor, TchError> {
34280 let mut c_tensors = [std::ptr::null_mut(); 1];
34281 unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_w_n_scalar(
34282 c_tensors.as_mut_ptr(),
34283 x.c_tensor,
34284 n.into().c_scalar
34285 ));
34286 Ok(Tensor { c_tensor: c_tensors[0] })
34287 }
34288
34289 pub fn f_special_shifted_chebyshev_polynomial_w_n_scalar_out<S: Into<Scalar>>(
34290 out: &Tensor,
34291 x: &Tensor,
34292 n: S,
34293 ) -> Result<Tensor, TchError> {
34294 let mut c_tensors = [std::ptr::null_mut(); 1];
34295 unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_w_n_scalar_out(
34296 c_tensors.as_mut_ptr(),
34297 out.c_tensor,
34298 x.c_tensor,
34299 n.into().c_scalar
34300 ));
34301 Ok(Tensor { c_tensor: c_tensors[0] })
34302 }
34303
34304 pub fn f_special_shifted_chebyshev_polynomial_w_out(
34305 out: &Tensor,
34306 x: &Tensor,
34307 n: &Tensor,
34308 ) -> Result<Tensor, TchError> {
34309 let mut c_tensors = [std::ptr::null_mut(); 1];
34310 unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_w_out(
34311 c_tensors.as_mut_ptr(),
34312 out.c_tensor,
34313 x.c_tensor,
34314 n.c_tensor
34315 ));
34316 Ok(Tensor { c_tensor: c_tensors[0] })
34317 }
34318
34319 pub fn f_special_shifted_chebyshev_polynomial_w_x_scalar<S: Into<Scalar>>(
34320 x: S,
34321 n: &Tensor,
34322 ) -> Result<Tensor, TchError> {
34323 let mut c_tensors = [std::ptr::null_mut(); 1];
34324 unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_w_x_scalar(
34325 c_tensors.as_mut_ptr(),
34326 x.into().c_scalar,
34327 n.c_tensor
34328 ));
34329 Ok(Tensor { c_tensor: c_tensors[0] })
34330 }
34331
34332 pub fn f_special_shifted_chebyshev_polynomial_w_x_scalar_out<S: Into<Scalar>>(
34333 out: &Tensor,
34334 x: S,
34335 n: &Tensor,
34336 ) -> Result<Tensor, TchError> {
34337 let mut c_tensors = [std::ptr::null_mut(); 1];
34338 unsafe_torch_err!(atg_special_shifted_chebyshev_polynomial_w_x_scalar_out(
34339 c_tensors.as_mut_ptr(),
34340 out.c_tensor,
34341 x.into().c_scalar,
34342 n.c_tensor
34343 ));
34344 Ok(Tensor { c_tensor: c_tensors[0] })
34345 }
34346
34347 pub fn f_special_sinc(&self) -> Result<Tensor, TchError> {
34348 let mut c_tensors = [std::ptr::null_mut(); 1];
34349 unsafe_torch_err!(atg_special_sinc(c_tensors.as_mut_ptr(), self.c_tensor));
34350 Ok(Tensor { c_tensor: c_tensors[0] })
34351 }
34352
34353 pub fn f_special_sinc_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
34354 let mut c_tensors = [std::ptr::null_mut(); 1];
34355 unsafe_torch_err!(atg_special_sinc_out(
34356 c_tensors.as_mut_ptr(),
34357 out.c_tensor,
34358 self.c_tensor
34359 ));
34360 Ok(Tensor { c_tensor: c_tensors[0] })
34361 }
34362
34363 pub fn f_special_softmax(
34364 &self,
34365 dim: i64,
34366 dtype: impl Into<Option<Kind>>,
34367 ) -> Result<Tensor, TchError> {
34368 let mut c_tensors = [std::ptr::null_mut(); 1];
34369 unsafe_torch_err!(atg_special_softmax(
34370 c_tensors.as_mut_ptr(),
34371 self.c_tensor,
34372 dim,
34373 dtype.into().map_or(-1, |s| s.c_int())
34374 ));
34375 Ok(Tensor { c_tensor: c_tensors[0] })
34376 }
34377
34378 pub fn f_special_spherical_bessel_j0(x: &Tensor) -> Result<Tensor, TchError> {
34379 let mut c_tensors = [std::ptr::null_mut(); 1];
34380 unsafe_torch_err!(atg_special_spherical_bessel_j0(c_tensors.as_mut_ptr(), x.c_tensor));
34381 Ok(Tensor { c_tensor: c_tensors[0] })
34382 }
34383
34384 pub fn f_special_spherical_bessel_j0_out(out: &Tensor, x: &Tensor) -> Result<Tensor, TchError> {
34385 let mut c_tensors = [std::ptr::null_mut(); 1];
34386 unsafe_torch_err!(atg_special_spherical_bessel_j0_out(
34387 c_tensors.as_mut_ptr(),
34388 out.c_tensor,
34389 x.c_tensor
34390 ));
34391 Ok(Tensor { c_tensor: c_tensors[0] })
34392 }
34393
34394 pub fn f_special_xlog1py(&self, other: &Tensor) -> Result<Tensor, TchError> {
34395 let mut c_tensors = [std::ptr::null_mut(); 1];
34396 unsafe_torch_err!(atg_special_xlog1py(
34397 c_tensors.as_mut_ptr(),
34398 self.c_tensor,
34399 other.c_tensor
34400 ));
34401 Ok(Tensor { c_tensor: c_tensors[0] })
34402 }
34403
34404 pub fn f_special_xlog1py_other_scalar<S: Into<Scalar>>(
34405 &self,
34406 other: S,
34407 ) -> Result<Tensor, TchError> {
34408 let mut c_tensors = [std::ptr::null_mut(); 1];
34409 unsafe_torch_err!(atg_special_xlog1py_other_scalar(
34410 c_tensors.as_mut_ptr(),
34411 self.c_tensor,
34412 other.into().c_scalar
34413 ));
34414 Ok(Tensor { c_tensor: c_tensors[0] })
34415 }
34416
34417 pub fn f_special_xlog1py_other_scalar_out<S: Into<Scalar>>(
34418 &self,
34419 out: &Tensor,
34420 other: S,
34421 ) -> Result<Tensor, TchError> {
34422 let mut c_tensors = [std::ptr::null_mut(); 1];
34423 unsafe_torch_err!(atg_special_xlog1py_other_scalar_out(
34424 c_tensors.as_mut_ptr(),
34425 out.c_tensor,
34426 self.c_tensor,
34427 other.into().c_scalar
34428 ));
34429 Ok(Tensor { c_tensor: c_tensors[0] })
34430 }
34431
34432 pub fn f_special_xlog1py_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
34433 let mut c_tensors = [std::ptr::null_mut(); 1];
34434 unsafe_torch_err!(atg_special_xlog1py_out(
34435 c_tensors.as_mut_ptr(),
34436 out.c_tensor,
34437 self.c_tensor,
34438 other.c_tensor
34439 ));
34440 Ok(Tensor { c_tensor: c_tensors[0] })
34441 }
34442
34443 pub fn f_special_xlog1py_self_scalar<S: Into<Scalar>>(
34444 self_scalar: S,
34445 other: &Tensor,
34446 ) -> Result<Tensor, TchError> {
34447 let mut c_tensors = [std::ptr::null_mut(); 1];
34448 unsafe_torch_err!(atg_special_xlog1py_self_scalar(
34449 c_tensors.as_mut_ptr(),
34450 self_scalar.into().c_scalar,
34451 other.c_tensor
34452 ));
34453 Ok(Tensor { c_tensor: c_tensors[0] })
34454 }
34455
34456 pub fn f_special_xlog1py_self_scalar_out<S: Into<Scalar>>(
34457 out: &Tensor,
34458 self_scalar: S,
34459 other: &Tensor,
34460 ) -> Result<Tensor, TchError> {
34461 let mut c_tensors = [std::ptr::null_mut(); 1];
34462 unsafe_torch_err!(atg_special_xlog1py_self_scalar_out(
34463 c_tensors.as_mut_ptr(),
34464 out.c_tensor,
34465 self_scalar.into().c_scalar,
34466 other.c_tensor
34467 ));
34468 Ok(Tensor { c_tensor: c_tensors[0] })
34469 }
34470
34471 pub fn f_special_xlogy(&self, other: &Tensor) -> Result<Tensor, TchError> {
34472 let mut c_tensors = [std::ptr::null_mut(); 1];
34473 unsafe_torch_err!(atg_special_xlogy(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
34474 Ok(Tensor { c_tensor: c_tensors[0] })
34475 }
34476
34477 pub fn f_special_xlogy_other_scalar<S: Into<Scalar>>(
34478 &self,
34479 other: S,
34480 ) -> Result<Tensor, TchError> {
34481 let mut c_tensors = [std::ptr::null_mut(); 1];
34482 unsafe_torch_err!(atg_special_xlogy_other_scalar(
34483 c_tensors.as_mut_ptr(),
34484 self.c_tensor,
34485 other.into().c_scalar
34486 ));
34487 Ok(Tensor { c_tensor: c_tensors[0] })
34488 }
34489
34490 pub fn f_special_xlogy_other_scalar_out<S: Into<Scalar>>(
34491 &self,
34492 out: &Tensor,
34493 other: S,
34494 ) -> Result<Tensor, TchError> {
34495 let mut c_tensors = [std::ptr::null_mut(); 1];
34496 unsafe_torch_err!(atg_special_xlogy_other_scalar_out(
34497 c_tensors.as_mut_ptr(),
34498 out.c_tensor,
34499 self.c_tensor,
34500 other.into().c_scalar
34501 ));
34502 Ok(Tensor { c_tensor: c_tensors[0] })
34503 }
34504
34505 pub fn f_special_xlogy_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
34506 let mut c_tensors = [std::ptr::null_mut(); 1];
34507 unsafe_torch_err!(atg_special_xlogy_out(
34508 c_tensors.as_mut_ptr(),
34509 out.c_tensor,
34510 self.c_tensor,
34511 other.c_tensor
34512 ));
34513 Ok(Tensor { c_tensor: c_tensors[0] })
34514 }
34515
34516 pub fn f_special_xlogy_self_scalar<S: Into<Scalar>>(
34517 self_scalar: S,
34518 other: &Tensor,
34519 ) -> Result<Tensor, TchError> {
34520 let mut c_tensors = [std::ptr::null_mut(); 1];
34521 unsafe_torch_err!(atg_special_xlogy_self_scalar(
34522 c_tensors.as_mut_ptr(),
34523 self_scalar.into().c_scalar,
34524 other.c_tensor
34525 ));
34526 Ok(Tensor { c_tensor: c_tensors[0] })
34527 }
34528
34529 pub fn f_special_xlogy_self_scalar_out<S: Into<Scalar>>(
34530 out: &Tensor,
34531 self_scalar: S,
34532 other: &Tensor,
34533 ) -> Result<Tensor, TchError> {
34534 let mut c_tensors = [std::ptr::null_mut(); 1];
34535 unsafe_torch_err!(atg_special_xlogy_self_scalar_out(
34536 c_tensors.as_mut_ptr(),
34537 out.c_tensor,
34538 self_scalar.into().c_scalar,
34539 other.c_tensor
34540 ));
34541 Ok(Tensor { c_tensor: c_tensors[0] })
34542 }
34543
34544 pub fn f_special_zeta(&self, other: &Tensor) -> Result<Tensor, TchError> {
34545 let mut c_tensors = [std::ptr::null_mut(); 1];
34546 unsafe_torch_err!(atg_special_zeta(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
34547 Ok(Tensor { c_tensor: c_tensors[0] })
34548 }
34549
34550 pub fn f_special_zeta_other_scalar<S: Into<Scalar>>(
34551 &self,
34552 other: S,
34553 ) -> Result<Tensor, TchError> {
34554 let mut c_tensors = [std::ptr::null_mut(); 1];
34555 unsafe_torch_err!(atg_special_zeta_other_scalar(
34556 c_tensors.as_mut_ptr(),
34557 self.c_tensor,
34558 other.into().c_scalar
34559 ));
34560 Ok(Tensor { c_tensor: c_tensors[0] })
34561 }
34562
34563 pub fn f_special_zeta_other_scalar_out<S: Into<Scalar>>(
34564 &self,
34565 out: &Tensor,
34566 other: S,
34567 ) -> Result<Tensor, TchError> {
34568 let mut c_tensors = [std::ptr::null_mut(); 1];
34569 unsafe_torch_err!(atg_special_zeta_other_scalar_out(
34570 c_tensors.as_mut_ptr(),
34571 out.c_tensor,
34572 self.c_tensor,
34573 other.into().c_scalar
34574 ));
34575 Ok(Tensor { c_tensor: c_tensors[0] })
34576 }
34577
34578 pub fn f_special_zeta_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
34579 let mut c_tensors = [std::ptr::null_mut(); 1];
34580 unsafe_torch_err!(atg_special_zeta_out(
34581 c_tensors.as_mut_ptr(),
34582 out.c_tensor,
34583 self.c_tensor,
34584 other.c_tensor
34585 ));
34586 Ok(Tensor { c_tensor: c_tensors[0] })
34587 }
34588
34589 pub fn f_special_zeta_self_scalar<S: Into<Scalar>>(
34590 self_scalar: S,
34591 other: &Tensor,
34592 ) -> Result<Tensor, TchError> {
34593 let mut c_tensors = [std::ptr::null_mut(); 1];
34594 unsafe_torch_err!(atg_special_zeta_self_scalar(
34595 c_tensors.as_mut_ptr(),
34596 self_scalar.into().c_scalar,
34597 other.c_tensor
34598 ));
34599 Ok(Tensor { c_tensor: c_tensors[0] })
34600 }
34601
34602 pub fn f_special_zeta_self_scalar_out<S: Into<Scalar>>(
34603 out: &Tensor,
34604 self_scalar: S,
34605 other: &Tensor,
34606 ) -> Result<Tensor, TchError> {
34607 let mut c_tensors = [std::ptr::null_mut(); 1];
34608 unsafe_torch_err!(atg_special_zeta_self_scalar_out(
34609 c_tensors.as_mut_ptr(),
34610 out.c_tensor,
34611 self_scalar.into().c_scalar,
34612 other.c_tensor
34613 ));
34614 Ok(Tensor { c_tensor: c_tensors[0] })
34615 }
34616
34617 pub fn f_split(&self, split_size: i64, dim: i64) -> Result<Vec<Tensor>, TchError> {
34618 let c_tensors = unsafe_torch_err!(atg_split(self.c_tensor, split_size, dim));
34619 let mut r__ = vec![];
34620 let mut i = 0;
34621 loop {
34622 let c__ = unsafe { *c_tensors.add(i) };
34623 if c__.is_null() {
34624 break;
34625 }
34626 r__.push(Tensor { c_tensor: c__ });
34627 i += 1;
34628 }
34629 unsafe { libc::free(c_tensors as *mut libc::c_void) }
34630 Ok(r__)
34631 }
34632
34633 pub fn f_split_copy(&self, split_size: i64, dim: i64) -> Result<Vec<Tensor>, TchError> {
34634 let c_tensors = unsafe_torch_err!(atg_split_copy(self.c_tensor, split_size, dim));
34635 let mut r__ = vec![];
34636 let mut i = 0;
34637 loop {
34638 let c__ = unsafe { *c_tensors.add(i) };
34639 if c__.is_null() {
34640 break;
34641 }
34642 r__.push(Tensor { c_tensor: c__ });
34643 i += 1;
34644 }
34645 unsafe { libc::free(c_tensors as *mut libc::c_void) }
34646 Ok(r__)
34647 }
34648
34649 pub fn f_split_copy_tensor_out<T: Borrow<Tensor>>(
34650 &self,
34651 out: &[T],
34652 split_size: i64,
34653 dim: i64,
34654 ) -> Result<(), TchError> {
34655 unsafe_torch_err!(atg_split_copy_tensor_out(
34656 ptr_list(out).as_ptr(),
34657 out.len() as i32,
34658 self.c_tensor,
34659 split_size,
34660 dim
34661 ));
34662 Ok(())
34663 }
34664
34665 pub fn f_split_sizes(
34666 &self,
34667 split_size: impl IntList,
34668 dim: i64,
34669 ) -> Result<Vec<Tensor>, TchError> {
34670 let c_tensors = unsafe_torch_err!(atg_split_sizes(
34671 self.c_tensor,
34672 split_size.as_ptr(),
34673 split_size.len_i32(),
34674 dim
34675 ));
34676 let mut r__ = vec![];
34677 let mut i = 0;
34678 loop {
34679 let c__ = unsafe { *c_tensors.add(i) };
34680 if c__.is_null() {
34681 break;
34682 }
34683 r__.push(Tensor { c_tensor: c__ });
34684 i += 1;
34685 }
34686 unsafe { libc::free(c_tensors as *mut libc::c_void) }
34687 Ok(r__)
34688 }
34689
34690 pub fn f_split_with_sizes(
34691 &self,
34692 split_sizes: impl IntList,
34693 dim: i64,
34694 ) -> Result<Vec<Tensor>, TchError> {
34695 let c_tensors = unsafe_torch_err!(atg_split_with_sizes(
34696 self.c_tensor,
34697 split_sizes.as_ptr(),
34698 split_sizes.len_i32(),
34699 dim
34700 ));
34701 let mut r__ = vec![];
34702 let mut i = 0;
34703 loop {
34704 let c__ = unsafe { *c_tensors.add(i) };
34705 if c__.is_null() {
34706 break;
34707 }
34708 r__.push(Tensor { c_tensor: c__ });
34709 i += 1;
34710 }
34711 unsafe { libc::free(c_tensors as *mut libc::c_void) }
34712 Ok(r__)
34713 }
34714
34715 pub fn f_split_with_sizes_copy(
34716 &self,
34717 split_sizes: impl IntList,
34718 dim: i64,
34719 ) -> Result<Vec<Tensor>, TchError> {
34720 let c_tensors = unsafe_torch_err!(atg_split_with_sizes_copy(
34721 self.c_tensor,
34722 split_sizes.as_ptr(),
34723 split_sizes.len_i32(),
34724 dim
34725 ));
34726 let mut r__ = vec![];
34727 let mut i = 0;
34728 loop {
34729 let c__ = unsafe { *c_tensors.add(i) };
34730 if c__.is_null() {
34731 break;
34732 }
34733 r__.push(Tensor { c_tensor: c__ });
34734 i += 1;
34735 }
34736 unsafe { libc::free(c_tensors as *mut libc::c_void) }
34737 Ok(r__)
34738 }
34739
34740 pub fn f_split_with_sizes_copy_out<T: Borrow<Tensor>>(
34741 &self,
34742 out: &[T],
34743 split_sizes: impl IntList,
34744 dim: i64,
34745 ) -> Result<(), TchError> {
34746 unsafe_torch_err!(atg_split_with_sizes_copy_out(
34747 ptr_list(out).as_ptr(),
34748 out.len() as i32,
34749 self.c_tensor,
34750 split_sizes.as_ptr(),
34751 split_sizes.len_i32(),
34752 dim
34753 ));
34754 Ok(())
34755 }
34756
34757 pub fn f_sqrt(&self) -> Result<Tensor, TchError> {
34758 let mut c_tensors = [std::ptr::null_mut(); 1];
34759 unsafe_torch_err!(atg_sqrt(c_tensors.as_mut_ptr(), self.c_tensor));
34760 Ok(Tensor { c_tensor: c_tensors[0] })
34761 }
34762
34763 pub fn f_sqrt_(&mut self) -> Result<Tensor, TchError> {
34764 let mut c_tensors = [std::ptr::null_mut(); 1];
34765 unsafe_torch_err!(atg_sqrt_(c_tensors.as_mut_ptr(), self.c_tensor));
34766 Ok(Tensor { c_tensor: c_tensors[0] })
34767 }
34768
34769 pub fn f_sqrt_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
34770 let mut c_tensors = [std::ptr::null_mut(); 1];
34771 unsafe_torch_err!(atg_sqrt_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
34772 Ok(Tensor { c_tensor: c_tensors[0] })
34773 }
34774
34775 pub fn f_square(&self) -> Result<Tensor, TchError> {
34776 let mut c_tensors = [std::ptr::null_mut(); 1];
34777 unsafe_torch_err!(atg_square(c_tensors.as_mut_ptr(), self.c_tensor));
34778 Ok(Tensor { c_tensor: c_tensors[0] })
34779 }
34780
34781 pub fn f_square_(&mut self) -> Result<Tensor, TchError> {
34782 let mut c_tensors = [std::ptr::null_mut(); 1];
34783 unsafe_torch_err!(atg_square_(c_tensors.as_mut_ptr(), self.c_tensor));
34784 Ok(Tensor { c_tensor: c_tensors[0] })
34785 }
34786
34787 pub fn f_square_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
34788 let mut c_tensors = [std::ptr::null_mut(); 1];
34789 unsafe_torch_err!(atg_square_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
34790 Ok(Tensor { c_tensor: c_tensors[0] })
34791 }
34792
34793 pub fn f_squeeze(&self) -> Result<Tensor, TchError> {
34794 let mut c_tensors = [std::ptr::null_mut(); 1];
34795 unsafe_torch_err!(atg_squeeze(c_tensors.as_mut_ptr(), self.c_tensor));
34796 Ok(Tensor { c_tensor: c_tensors[0] })
34797 }
34798
34799 pub fn f_squeeze_(&mut self) -> Result<Tensor, TchError> {
34800 let mut c_tensors = [std::ptr::null_mut(); 1];
34801 unsafe_torch_err!(atg_squeeze_(c_tensors.as_mut_ptr(), self.c_tensor));
34802 Ok(Tensor { c_tensor: c_tensors[0] })
34803 }
34804
34805 pub fn f_squeeze_copy(&self) -> Result<Tensor, TchError> {
34806 let mut c_tensors = [std::ptr::null_mut(); 1];
34807 unsafe_torch_err!(atg_squeeze_copy(c_tensors.as_mut_ptr(), self.c_tensor));
34808 Ok(Tensor { c_tensor: c_tensors[0] })
34809 }
34810
34811 pub fn f_squeeze_copy_dim(&self, dim: i64) -> Result<Tensor, TchError> {
34812 let mut c_tensors = [std::ptr::null_mut(); 1];
34813 unsafe_torch_err!(atg_squeeze_copy_dim(c_tensors.as_mut_ptr(), self.c_tensor, dim));
34814 Ok(Tensor { c_tensor: c_tensors[0] })
34815 }
34816
34817 pub fn f_squeeze_copy_dim_out(&self, out: &Tensor, dim: i64) -> Result<Tensor, TchError> {
34818 let mut c_tensors = [std::ptr::null_mut(); 1];
34819 unsafe_torch_err!(atg_squeeze_copy_dim_out(
34820 c_tensors.as_mut_ptr(),
34821 out.c_tensor,
34822 self.c_tensor,
34823 dim
34824 ));
34825 Ok(Tensor { c_tensor: c_tensors[0] })
34826 }
34827
34828 pub fn f_squeeze_copy_dims(&self, dim: impl IntList) -> Result<Tensor, TchError> {
34829 let mut c_tensors = [std::ptr::null_mut(); 1];
34830 unsafe_torch_err!(atg_squeeze_copy_dims(
34831 c_tensors.as_mut_ptr(),
34832 self.c_tensor,
34833 dim.as_ptr(),
34834 dim.len_i32()
34835 ));
34836 Ok(Tensor { c_tensor: c_tensors[0] })
34837 }
34838
34839 pub fn f_squeeze_copy_dims_out(
34840 &self,
34841 out: &Tensor,
34842 dim: impl IntList,
34843 ) -> Result<Tensor, TchError> {
34844 let mut c_tensors = [std::ptr::null_mut(); 1];
34845 unsafe_torch_err!(atg_squeeze_copy_dims_out(
34846 c_tensors.as_mut_ptr(),
34847 out.c_tensor,
34848 self.c_tensor,
34849 dim.as_ptr(),
34850 dim.len_i32()
34851 ));
34852 Ok(Tensor { c_tensor: c_tensors[0] })
34853 }
34854
34855 pub fn f_squeeze_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
34856 let mut c_tensors = [std::ptr::null_mut(); 1];
34857 unsafe_torch_err!(atg_squeeze_copy_out(
34858 c_tensors.as_mut_ptr(),
34859 out.c_tensor,
34860 self.c_tensor
34861 ));
34862 Ok(Tensor { c_tensor: c_tensors[0] })
34863 }
34864
34865 pub fn f_squeeze_dim(&self, dim: i64) -> Result<Tensor, TchError> {
34866 let mut c_tensors = [std::ptr::null_mut(); 1];
34867 unsafe_torch_err!(atg_squeeze_dim(c_tensors.as_mut_ptr(), self.c_tensor, dim));
34868 Ok(Tensor { c_tensor: c_tensors[0] })
34869 }
34870
34871 pub fn f_squeeze_dim_(&mut self, dim: i64) -> Result<Tensor, TchError> {
34872 let mut c_tensors = [std::ptr::null_mut(); 1];
34873 unsafe_torch_err!(atg_squeeze_dim_(c_tensors.as_mut_ptr(), self.c_tensor, dim));
34874 Ok(Tensor { c_tensor: c_tensors[0] })
34875 }
34876
34877 pub fn f_squeeze_dims(&self, dim: impl IntList) -> Result<Tensor, TchError> {
34878 let mut c_tensors = [std::ptr::null_mut(); 1];
34879 unsafe_torch_err!(atg_squeeze_dims(
34880 c_tensors.as_mut_ptr(),
34881 self.c_tensor,
34882 dim.as_ptr(),
34883 dim.len_i32()
34884 ));
34885 Ok(Tensor { c_tensor: c_tensors[0] })
34886 }
34887
34888 pub fn f_squeeze_dims_(&mut self, dim: impl IntList) -> Result<Tensor, TchError> {
34889 let mut c_tensors = [std::ptr::null_mut(); 1];
34890 unsafe_torch_err!(atg_squeeze_dims_(
34891 c_tensors.as_mut_ptr(),
34892 self.c_tensor,
34893 dim.as_ptr(),
34894 dim.len_i32()
34895 ));
34896 Ok(Tensor { c_tensor: c_tensors[0] })
34897 }
34898
34899 pub fn f_sspaddmm(&self, mat1: &Tensor, mat2: &Tensor) -> Result<Tensor, TchError> {
34900 let mut c_tensors = [std::ptr::null_mut(); 1];
34901 unsafe_torch_err!(atg_sspaddmm(
34902 c_tensors.as_mut_ptr(),
34903 self.c_tensor,
34904 mat1.c_tensor,
34905 mat2.c_tensor
34906 ));
34907 Ok(Tensor { c_tensor: c_tensors[0] })
34908 }
34909
34910 pub fn f_sspaddmm_out(
34911 &self,
34912 out: &Tensor,
34913 mat1: &Tensor,
34914 mat2: &Tensor,
34915 ) -> Result<Tensor, TchError> {
34916 let mut c_tensors = [std::ptr::null_mut(); 1];
34917 unsafe_torch_err!(atg_sspaddmm_out(
34918 c_tensors.as_mut_ptr(),
34919 out.c_tensor,
34920 self.c_tensor,
34921 mat1.c_tensor,
34922 mat2.c_tensor
34923 ));
34924 Ok(Tensor { c_tensor: c_tensors[0] })
34925 }
34926
34927 pub fn f_stack<T: Borrow<Tensor>>(tensors: &[T], dim: i64) -> Result<Tensor, TchError> {
34928 let mut c_tensors = [std::ptr::null_mut(); 1];
34929 unsafe_torch_err!(atg_stack(
34930 c_tensors.as_mut_ptr(),
34931 ptr_list(tensors).as_ptr(),
34932 tensors.len() as i32,
34933 dim
34934 ));
34935 Ok(Tensor { c_tensor: c_tensors[0] })
34936 }
34937
34938 pub fn f_stack_out<T: Borrow<Tensor>>(
34939 out: &Tensor,
34940 tensors: &[T],
34941 dim: i64,
34942 ) -> Result<Tensor, TchError> {
34943 let mut c_tensors = [std::ptr::null_mut(); 1];
34944 unsafe_torch_err!(atg_stack_out(
34945 c_tensors.as_mut_ptr(),
34946 out.c_tensor,
34947 ptr_list(tensors).as_ptr(),
34948 tensors.len() as i32,
34949 dim
34950 ));
34951 Ok(Tensor { c_tensor: c_tensors[0] })
34952 }
34953
34954 pub fn f_std(&self, unbiased: bool) -> Result<Tensor, TchError> {
34955 let mut c_tensors = [std::ptr::null_mut(); 1];
34956 unsafe_torch_err!(atg_std(
34957 c_tensors.as_mut_ptr(),
34958 self.c_tensor,
34959 if unbiased { 1 } else { 0 }
34960 ));
34961 Ok(Tensor { c_tensor: c_tensors[0] })
34962 }
34963
34964 pub fn f_std_correction<S: Into<Scalar>>(
34965 &self,
34966 dim: impl IntListOption,
34967 correction: S,
34968 keepdim: bool,
34969 ) -> Result<Tensor, TchError> {
34970 let mut c_tensors = [std::ptr::null_mut(); 1];
34971 unsafe_torch_err!(atg_std_correction(
34972 c_tensors.as_mut_ptr(),
34973 self.c_tensor,
34974 dim.as_ptr(),
34975 dim.len_i32(),
34976 correction.into().c_scalar,
34977 if keepdim { 1 } else { 0 }
34978 ));
34979 Ok(Tensor { c_tensor: c_tensors[0] })
34980 }
34981
34982 pub fn f_std_correction_out<S: Into<Scalar>>(
34983 &self,
34984 out: &Tensor,
34985 dim: impl IntListOption,
34986 correction: S,
34987 keepdim: bool,
34988 ) -> Result<Tensor, TchError> {
34989 let mut c_tensors = [std::ptr::null_mut(); 1];
34990 unsafe_torch_err!(atg_std_correction_out(
34991 c_tensors.as_mut_ptr(),
34992 out.c_tensor,
34993 self.c_tensor,
34994 dim.as_ptr(),
34995 dim.len_i32(),
34996 correction.into().c_scalar,
34997 if keepdim { 1 } else { 0 }
34998 ));
34999 Ok(Tensor { c_tensor: c_tensors[0] })
35000 }
35001
35002 pub fn f_std_dim(
35003 &self,
35004 dim: impl IntListOption,
35005 unbiased: bool,
35006 keepdim: bool,
35007 ) -> Result<Tensor, TchError> {
35008 let mut c_tensors = [std::ptr::null_mut(); 1];
35009 unsafe_torch_err!(atg_std_dim(
35010 c_tensors.as_mut_ptr(),
35011 self.c_tensor,
35012 dim.as_ptr(),
35013 dim.len_i32(),
35014 if unbiased { 1 } else { 0 },
35015 if keepdim { 1 } else { 0 }
35016 ));
35017 Ok(Tensor { c_tensor: c_tensors[0] })
35018 }
35019
35020 pub fn f_std_mean(&self, unbiased: bool) -> Result<(Tensor, Tensor), TchError> {
35021 let mut c_tensors = [std::ptr::null_mut(); 2];
35022 unsafe_torch_err!(atg_std_mean(
35023 c_tensors.as_mut_ptr(),
35024 self.c_tensor,
35025 if unbiased { 1 } else { 0 }
35026 ));
35027 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
35028 }
35029
35030 pub fn f_std_mean_correction<S: Into<Scalar>>(
35031 &self,
35032 dim: impl IntListOption,
35033 correction: S,
35034 keepdim: bool,
35035 ) -> Result<(Tensor, Tensor), TchError> {
35036 let mut c_tensors = [std::ptr::null_mut(); 2];
35037 unsafe_torch_err!(atg_std_mean_correction(
35038 c_tensors.as_mut_ptr(),
35039 self.c_tensor,
35040 dim.as_ptr(),
35041 dim.len_i32(),
35042 correction.into().c_scalar,
35043 if keepdim { 1 } else { 0 }
35044 ));
35045 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
35046 }
35047
35048 pub fn f_std_mean_correction_out<S: Into<Scalar>>(
35049 &self,
35050 out0: &Tensor,
35051 out1: &Tensor,
35052 dim: impl IntListOption,
35053 correction: S,
35054 keepdim: bool,
35055 ) -> Result<(Tensor, Tensor), TchError> {
35056 let mut c_tensors = [std::ptr::null_mut(); 2];
35057 unsafe_torch_err!(atg_std_mean_correction_out(
35058 c_tensors.as_mut_ptr(),
35059 out0.c_tensor,
35060 out1.c_tensor,
35061 self.c_tensor,
35062 dim.as_ptr(),
35063 dim.len_i32(),
35064 correction.into().c_scalar,
35065 if keepdim { 1 } else { 0 }
35066 ));
35067 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
35068 }
35069
35070 pub fn f_std_mean_dim(
35071 &self,
35072 dim: impl IntListOption,
35073 unbiased: bool,
35074 keepdim: bool,
35075 ) -> Result<(Tensor, Tensor), TchError> {
35076 let mut c_tensors = [std::ptr::null_mut(); 2];
35077 unsafe_torch_err!(atg_std_mean_dim(
35078 c_tensors.as_mut_ptr(),
35079 self.c_tensor,
35080 dim.as_ptr(),
35081 dim.len_i32(),
35082 if unbiased { 1 } else { 0 },
35083 if keepdim { 1 } else { 0 }
35084 ));
35085 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
35086 }
35087
35088 pub fn f_std_out(
35089 &self,
35090 out: &Tensor,
35091 dim: impl IntListOption,
35092 unbiased: bool,
35093 keepdim: bool,
35094 ) -> Result<Tensor, TchError> {
35095 let mut c_tensors = [std::ptr::null_mut(); 1];
35096 unsafe_torch_err!(atg_std_out(
35097 c_tensors.as_mut_ptr(),
35098 out.c_tensor,
35099 self.c_tensor,
35100 dim.as_ptr(),
35101 dim.len_i32(),
35102 if unbiased { 1 } else { 0 },
35103 if keepdim { 1 } else { 0 }
35104 ));
35105 Ok(Tensor { c_tensor: c_tensors[0] })
35106 }
35107
35108 pub fn f_stft<T: Borrow<Tensor>>(
35109 &self,
35110 n_fft: i64,
35111 hop_length: impl Into<Option<i64>>,
35112 win_length: impl Into<Option<i64>>,
35113 window: Option<T>,
35114 normalized: bool,
35115 onesided: bool,
35116 return_complex: bool,
35117 ) -> Result<Tensor, TchError> {
35118 let hop_length = hop_length.into();
35119 let win_length = win_length.into();
35120 let mut c_tensors = [std::ptr::null_mut(); 1];
35121 unsafe_torch_err!(atg_stft(
35122 c_tensors.as_mut_ptr(),
35123 self.c_tensor,
35124 n_fft,
35125 hop_length.unwrap_or(0i64),
35126 hop_length.is_none() as i8,
35127 win_length.unwrap_or(0i64),
35128 win_length.is_none() as i8,
35129 window.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
35130 if normalized { 1 } else { 0 },
35131 if onesided { 1 } else { 0 },
35132 if return_complex { 1 } else { 0 }
35133 ));
35134 Ok(Tensor { c_tensor: c_tensors[0] })
35135 }
35136
35137 pub fn f_stft_center<T: Borrow<Tensor>>(
35138 &self,
35139 n_fft: i64,
35140 hop_length: impl Into<Option<i64>>,
35141 win_length: impl Into<Option<i64>>,
35142 window: Option<T>,
35143 center: bool,
35144 pad_mode: &str,
35145 normalized: bool,
35146 onesided: bool,
35147 return_complex: bool,
35148 ) -> Result<Tensor, TchError> {
35149 let hop_length = hop_length.into();
35150 let win_length = win_length.into();
35151 let mut c_tensors = [std::ptr::null_mut(); 1];
35152 unsafe_torch_err!(atg_stft_center(
35153 c_tensors.as_mut_ptr(),
35154 self.c_tensor,
35155 n_fft,
35156 hop_length.unwrap_or(0i64),
35157 hop_length.is_none() as i8,
35158 win_length.unwrap_or(0i64),
35159 win_length.is_none() as i8,
35160 window.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
35161 if center { 1 } else { 0 },
35162 pad_mode.as_ptr(),
35163 pad_mode.len() as i32,
35164 if normalized { 1 } else { 0 },
35165 if onesided { 1 } else { 0 },
35166 if return_complex { 1 } else { 0 }
35167 ));
35168 Ok(Tensor { c_tensor: c_tensors[0] })
35169 }
35170
35171 pub fn f_sub(&self, other: &Tensor) -> Result<Tensor, TchError> {
35172 let mut c_tensors = [std::ptr::null_mut(); 1];
35173 unsafe_torch_err!(atg_sub(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
35174 Ok(Tensor { c_tensor: c_tensors[0] })
35175 }
35176
35177 pub fn f_sub_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
35178 let mut c_tensors = [std::ptr::null_mut(); 1];
35179 unsafe_torch_err!(atg_sub_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
35180 Ok(Tensor { c_tensor: c_tensors[0] })
35181 }
35182
35183 pub fn f_sub_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
35184 let mut c_tensors = [std::ptr::null_mut(); 1];
35185 unsafe_torch_err!(atg_sub_out(
35186 c_tensors.as_mut_ptr(),
35187 out.c_tensor,
35188 self.c_tensor,
35189 other.c_tensor
35190 ));
35191 Ok(Tensor { c_tensor: c_tensors[0] })
35192 }
35193
35194 pub fn f_sub_scalar<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
35195 let mut c_tensors = [std::ptr::null_mut(); 1];
35196 unsafe_torch_err!(atg_sub_scalar(
35197 c_tensors.as_mut_ptr(),
35198 self.c_tensor,
35199 other.into().c_scalar
35200 ));
35201 Ok(Tensor { c_tensor: c_tensors[0] })
35202 }
35203
35204 pub fn f_sub_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
35205 let mut c_tensors = [std::ptr::null_mut(); 1];
35206 unsafe_torch_err!(atg_sub_scalar_(
35207 c_tensors.as_mut_ptr(),
35208 self.c_tensor,
35209 other.into().c_scalar
35210 ));
35211 Ok(Tensor { c_tensor: c_tensors[0] })
35212 }
35213
35214 pub fn f_sub_scalar_out<S: Into<Scalar>>(
35215 &self,
35216 out: &Tensor,
35217 other: S,
35218 ) -> Result<Tensor, TchError> {
35219 let mut c_tensors = [std::ptr::null_mut(); 1];
35220 unsafe_torch_err!(atg_sub_scalar_out(
35221 c_tensors.as_mut_ptr(),
35222 out.c_tensor,
35223 self.c_tensor,
35224 other.into().c_scalar
35225 ));
35226 Ok(Tensor { c_tensor: c_tensors[0] })
35227 }
35228
35229 pub fn f_subtract(&self, other: &Tensor) -> Result<Tensor, TchError> {
35230 let mut c_tensors = [std::ptr::null_mut(); 1];
35231 unsafe_torch_err!(atg_subtract(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
35232 Ok(Tensor { c_tensor: c_tensors[0] })
35233 }
35234
35235 pub fn f_subtract_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
35236 let mut c_tensors = [std::ptr::null_mut(); 1];
35237 unsafe_torch_err!(atg_subtract_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
35238 Ok(Tensor { c_tensor: c_tensors[0] })
35239 }
35240
35241 pub fn f_subtract_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
35242 let mut c_tensors = [std::ptr::null_mut(); 1];
35243 unsafe_torch_err!(atg_subtract_out(
35244 c_tensors.as_mut_ptr(),
35245 out.c_tensor,
35246 self.c_tensor,
35247 other.c_tensor
35248 ));
35249 Ok(Tensor { c_tensor: c_tensors[0] })
35250 }
35251
35252 pub fn f_subtract_scalar<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
35253 let mut c_tensors = [std::ptr::null_mut(); 1];
35254 unsafe_torch_err!(atg_subtract_scalar(
35255 c_tensors.as_mut_ptr(),
35256 self.c_tensor,
35257 other.into().c_scalar
35258 ));
35259 Ok(Tensor { c_tensor: c_tensors[0] })
35260 }
35261
35262 pub fn f_subtract_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
35263 let mut c_tensors = [std::ptr::null_mut(); 1];
35264 unsafe_torch_err!(atg_subtract_scalar_(
35265 c_tensors.as_mut_ptr(),
35266 self.c_tensor,
35267 other.into().c_scalar
35268 ));
35269 Ok(Tensor { c_tensor: c_tensors[0] })
35270 }
35271
35272 pub fn f_sum(&self, dtype: impl Into<Option<Kind>>) -> Result<Tensor, TchError> {
35273 let mut c_tensors = [std::ptr::null_mut(); 1];
35274 unsafe_torch_err!(atg_sum(
35275 c_tensors.as_mut_ptr(),
35276 self.c_tensor,
35277 dtype.into().map_or(-1, |s| s.c_int())
35278 ));
35279 Ok(Tensor { c_tensor: c_tensors[0] })
35280 }
35281
35282 pub fn f_sum_dim_intlist(
35283 &self,
35284 dim: impl IntListOption,
35285 keepdim: bool,
35286 dtype: impl Into<Option<Kind>>,
35287 ) -> Result<Tensor, TchError> {
35288 let mut c_tensors = [std::ptr::null_mut(); 1];
35289 unsafe_torch_err!(atg_sum_dim_intlist(
35290 c_tensors.as_mut_ptr(),
35291 self.c_tensor,
35292 dim.as_ptr(),
35293 dim.len_i32(),
35294 if keepdim { 1 } else { 0 },
35295 dtype.into().map_or(-1, |s| s.c_int())
35296 ));
35297 Ok(Tensor { c_tensor: c_tensors[0] })
35298 }
35299
35300 pub fn f_sum_intlist_out(
35301 &self,
35302 out: &Tensor,
35303 dim: impl IntListOption,
35304 keepdim: bool,
35305 dtype: impl Into<Option<Kind>>,
35306 ) -> Result<Tensor, TchError> {
35307 let mut c_tensors = [std::ptr::null_mut(); 1];
35308 unsafe_torch_err!(atg_sum_intlist_out(
35309 c_tensors.as_mut_ptr(),
35310 out.c_tensor,
35311 self.c_tensor,
35312 dim.as_ptr(),
35313 dim.len_i32(),
35314 if keepdim { 1 } else { 0 },
35315 dtype.into().map_or(-1, |s| s.c_int())
35316 ));
35317 Ok(Tensor { c_tensor: c_tensors[0] })
35318 }
35319
35320 pub fn f_sum_out(
35321 &self,
35322 out: &Tensor,
35323 dtype: impl Into<Option<Kind>>,
35324 ) -> Result<Tensor, TchError> {
35325 let mut c_tensors = [std::ptr::null_mut(); 1];
35326 unsafe_torch_err!(atg_sum_out(
35327 c_tensors.as_mut_ptr(),
35328 out.c_tensor,
35329 self.c_tensor,
35330 dtype.into().map_or(-1, |s| s.c_int())
35331 ));
35332 Ok(Tensor { c_tensor: c_tensors[0] })
35333 }
35334
35335 pub fn f_sum_to_size(&self, size: impl IntList) -> Result<Tensor, TchError> {
35336 let mut c_tensors = [std::ptr::null_mut(); 1];
35337 unsafe_torch_err!(atg_sum_to_size(
35338 c_tensors.as_mut_ptr(),
35339 self.c_tensor,
35340 size.as_ptr(),
35341 size.len_i32()
35342 ));
35343 Ok(Tensor { c_tensor: c_tensors[0] })
35344 }
35345
35346 pub fn f_svd(
35347 &self,
35348 some: bool,
35349 compute_uv: bool,
35350 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
35351 let mut c_tensors = [std::ptr::null_mut(); 3];
35352 unsafe_torch_err!(atg_svd(
35353 c_tensors.as_mut_ptr(),
35354 self.c_tensor,
35355 if some { 1 } else { 0 },
35356 if compute_uv { 1 } else { 0 }
35357 ));
35358 Ok((
35359 Tensor { c_tensor: c_tensors[0] },
35360 Tensor { c_tensor: c_tensors[1] },
35361 Tensor { c_tensor: c_tensors[2] },
35362 ))
35363 }
35364
35365 pub fn f_svd_u(
35366 &self,
35367 u: &Tensor,
35368 s: &Tensor,
35369 v: &Tensor,
35370 some: bool,
35371 compute_uv: bool,
35372 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
35373 let mut c_tensors = [std::ptr::null_mut(); 3];
35374 unsafe_torch_err!(atg_svd_u(
35375 c_tensors.as_mut_ptr(),
35376 u.c_tensor,
35377 s.c_tensor,
35378 v.c_tensor,
35379 self.c_tensor,
35380 if some { 1 } else { 0 },
35381 if compute_uv { 1 } else { 0 }
35382 ));
35383 Ok((
35384 Tensor { c_tensor: c_tensors[0] },
35385 Tensor { c_tensor: c_tensors[1] },
35386 Tensor { c_tensor: c_tensors[2] },
35387 ))
35388 }
35389
35390 pub fn f_swapaxes(&self, axis0: i64, axis1: i64) -> Result<Tensor, TchError> {
35391 let mut c_tensors = [std::ptr::null_mut(); 1];
35392 unsafe_torch_err!(atg_swapaxes(c_tensors.as_mut_ptr(), self.c_tensor, axis0, axis1));
35393 Ok(Tensor { c_tensor: c_tensors[0] })
35394 }
35395
35396 pub fn f_swapaxes_(&mut self, axis0: i64, axis1: i64) -> Result<Tensor, TchError> {
35397 let mut c_tensors = [std::ptr::null_mut(); 1];
35398 unsafe_torch_err!(atg_swapaxes_(c_tensors.as_mut_ptr(), self.c_tensor, axis0, axis1));
35399 Ok(Tensor { c_tensor: c_tensors[0] })
35400 }
35401
35402 pub fn f_swapdims(&self, dim0: i64, dim1: i64) -> Result<Tensor, TchError> {
35403 let mut c_tensors = [std::ptr::null_mut(); 1];
35404 unsafe_torch_err!(atg_swapdims(c_tensors.as_mut_ptr(), self.c_tensor, dim0, dim1));
35405 Ok(Tensor { c_tensor: c_tensors[0] })
35406 }
35407
35408 pub fn f_swapdims_(&mut self, dim0: i64, dim1: i64) -> Result<Tensor, TchError> {
35409 let mut c_tensors = [std::ptr::null_mut(); 1];
35410 unsafe_torch_err!(atg_swapdims_(c_tensors.as_mut_ptr(), self.c_tensor, dim0, dim1));
35411 Ok(Tensor { c_tensor: c_tensors[0] })
35412 }
35413
35414 pub fn f_tr(&self) -> Result<Tensor, TchError> {
35415 let mut c_tensors = [std::ptr::null_mut(); 1];
35416 unsafe_torch_err!(atg_t(c_tensors.as_mut_ptr(), self.c_tensor));
35417 Ok(Tensor { c_tensor: c_tensors[0] })
35418 }
35419
35420 pub fn f_t_(&mut self) -> Result<Tensor, TchError> {
35421 let mut c_tensors = [std::ptr::null_mut(); 1];
35422 unsafe_torch_err!(atg_t_(c_tensors.as_mut_ptr(), self.c_tensor));
35423 Ok(Tensor { c_tensor: c_tensors[0] })
35424 }
35425
35426 pub fn f_t_copy(&self) -> Result<Tensor, TchError> {
35427 let mut c_tensors = [std::ptr::null_mut(); 1];
35428 unsafe_torch_err!(atg_t_copy(c_tensors.as_mut_ptr(), self.c_tensor));
35429 Ok(Tensor { c_tensor: c_tensors[0] })
35430 }
35431
35432 pub fn f_t_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
35433 let mut c_tensors = [std::ptr::null_mut(); 1];
35434 unsafe_torch_err!(atg_t_copy_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
35435 Ok(Tensor { c_tensor: c_tensors[0] })
35436 }
35437
35438 pub fn f_take(&self, index: &Tensor) -> Result<Tensor, TchError> {
35439 let mut c_tensors = [std::ptr::null_mut(); 1];
35440 unsafe_torch_err!(atg_take(c_tensors.as_mut_ptr(), self.c_tensor, index.c_tensor));
35441 Ok(Tensor { c_tensor: c_tensors[0] })
35442 }
35443
35444 pub fn f_take_along_dim(
35445 &self,
35446 indices: &Tensor,
35447 dim: impl Into<Option<i64>>,
35448 ) -> Result<Tensor, TchError> {
35449 let dim = dim.into();
35450 let mut c_tensors = [std::ptr::null_mut(); 1];
35451 unsafe_torch_err!(atg_take_along_dim(
35452 c_tensors.as_mut_ptr(),
35453 self.c_tensor,
35454 indices.c_tensor,
35455 dim.unwrap_or(0i64),
35456 dim.is_none() as i8
35457 ));
35458 Ok(Tensor { c_tensor: c_tensors[0] })
35459 }
35460
35461 pub fn f_take_along_dim_out(
35462 &self,
35463 out: &Tensor,
35464 indices: &Tensor,
35465 dim: impl Into<Option<i64>>,
35466 ) -> Result<Tensor, TchError> {
35467 let dim = dim.into();
35468 let mut c_tensors = [std::ptr::null_mut(); 1];
35469 unsafe_torch_err!(atg_take_along_dim_out(
35470 c_tensors.as_mut_ptr(),
35471 out.c_tensor,
35472 self.c_tensor,
35473 indices.c_tensor,
35474 dim.unwrap_or(0i64),
35475 dim.is_none() as i8
35476 ));
35477 Ok(Tensor { c_tensor: c_tensors[0] })
35478 }
35479
35480 pub fn f_take_out(&self, out: &Tensor, index: &Tensor) -> Result<Tensor, TchError> {
35481 let mut c_tensors = [std::ptr::null_mut(); 1];
35482 unsafe_torch_err!(atg_take_out(
35483 c_tensors.as_mut_ptr(),
35484 out.c_tensor,
35485 self.c_tensor,
35486 index.c_tensor
35487 ));
35488 Ok(Tensor { c_tensor: c_tensors[0] })
35489 }
35490
35491 pub fn f_tan(&self) -> Result<Tensor, TchError> {
35492 let mut c_tensors = [std::ptr::null_mut(); 1];
35493 unsafe_torch_err!(atg_tan(c_tensors.as_mut_ptr(), self.c_tensor));
35494 Ok(Tensor { c_tensor: c_tensors[0] })
35495 }
35496
35497 pub fn f_tan_(&mut self) -> Result<Tensor, TchError> {
35498 let mut c_tensors = [std::ptr::null_mut(); 1];
35499 unsafe_torch_err!(atg_tan_(c_tensors.as_mut_ptr(), self.c_tensor));
35500 Ok(Tensor { c_tensor: c_tensors[0] })
35501 }
35502
35503 pub fn f_tan_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
35504 let mut c_tensors = [std::ptr::null_mut(); 1];
35505 unsafe_torch_err!(atg_tan_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
35506 Ok(Tensor { c_tensor: c_tensors[0] })
35507 }
35508
35509 pub fn f_tanh(&self) -> Result<Tensor, TchError> {
35510 let mut c_tensors = [std::ptr::null_mut(); 1];
35511 unsafe_torch_err!(atg_tanh(c_tensors.as_mut_ptr(), self.c_tensor));
35512 Ok(Tensor { c_tensor: c_tensors[0] })
35513 }
35514
35515 pub fn f_tanh_(&mut self) -> Result<Tensor, TchError> {
35516 let mut c_tensors = [std::ptr::null_mut(); 1];
35517 unsafe_torch_err!(atg_tanh_(c_tensors.as_mut_ptr(), self.c_tensor));
35518 Ok(Tensor { c_tensor: c_tensors[0] })
35519 }
35520
35521 pub fn f_tanh_backward(grad_output: &Tensor, output: &Tensor) -> Result<Tensor, TchError> {
35522 let mut c_tensors = [std::ptr::null_mut(); 1];
35523 unsafe_torch_err!(atg_tanh_backward(
35524 c_tensors.as_mut_ptr(),
35525 grad_output.c_tensor,
35526 output.c_tensor
35527 ));
35528 Ok(Tensor { c_tensor: c_tensors[0] })
35529 }
35530
35531 pub fn f_tanh_backward_grad_input(
35532 grad_input: &Tensor,
35533 grad_output: &Tensor,
35534 output: &Tensor,
35535 ) -> Result<Tensor, TchError> {
35536 let mut c_tensors = [std::ptr::null_mut(); 1];
35537 unsafe_torch_err!(atg_tanh_backward_grad_input(
35538 c_tensors.as_mut_ptr(),
35539 grad_input.c_tensor,
35540 grad_output.c_tensor,
35541 output.c_tensor
35542 ));
35543 Ok(Tensor { c_tensor: c_tensors[0] })
35544 }
35545
35546 pub fn f_tanh_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
35547 let mut c_tensors = [std::ptr::null_mut(); 1];
35548 unsafe_torch_err!(atg_tanh_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
35549 Ok(Tensor { c_tensor: c_tensors[0] })
35550 }
35551
35552 pub fn f_tensor_split(&self, sections: i64, dim: i64) -> Result<Vec<Tensor>, TchError> {
35553 let c_tensors = unsafe_torch_err!(atg_tensor_split(self.c_tensor, sections, dim));
35554 let mut r__ = vec![];
35555 let mut i = 0;
35556 loop {
35557 let c__ = unsafe { *c_tensors.add(i) };
35558 if c__.is_null() {
35559 break;
35560 }
35561 r__.push(Tensor { c_tensor: c__ });
35562 i += 1;
35563 }
35564 unsafe { libc::free(c_tensors as *mut libc::c_void) }
35565 Ok(r__)
35566 }
35567
35568 pub fn f_tensor_split_indices(
35569 &self,
35570 indices: impl IntList,
35571 dim: i64,
35572 ) -> Result<Vec<Tensor>, TchError> {
35573 let c_tensors = unsafe_torch_err!(atg_tensor_split_indices(
35574 self.c_tensor,
35575 indices.as_ptr(),
35576 indices.len_i32(),
35577 dim
35578 ));
35579 let mut r__ = vec![];
35580 let mut i = 0;
35581 loop {
35582 let c__ = unsafe { *c_tensors.add(i) };
35583 if c__.is_null() {
35584 break;
35585 }
35586 r__.push(Tensor { c_tensor: c__ });
35587 i += 1;
35588 }
35589 unsafe { libc::free(c_tensors as *mut libc::c_void) }
35590 Ok(r__)
35591 }
35592
35593 pub fn f_tensor_split_tensor_indices_or_sections(
35594 &self,
35595 tensor_indices_or_sections: &Tensor,
35596 dim: i64,
35597 ) -> Result<Vec<Tensor>, TchError> {
35598 let c_tensors = unsafe_torch_err!(atg_tensor_split_tensor_indices_or_sections(
35599 self.c_tensor,
35600 tensor_indices_or_sections.c_tensor,
35601 dim
35602 ));
35603 let mut r__ = vec![];
35604 let mut i = 0;
35605 loop {
35606 let c__ = unsafe { *c_tensors.add(i) };
35607 if c__.is_null() {
35608 break;
35609 }
35610 r__.push(Tensor { c_tensor: c__ });
35611 i += 1;
35612 }
35613 unsafe { libc::free(c_tensors as *mut libc::c_void) }
35614 Ok(r__)
35615 }
35616
35617 pub fn f_tensordot(
35618 &self,
35619 other: &Tensor,
35620 dims_self: impl IntList,
35621 dims_other: impl IntList,
35622 ) -> Result<Tensor, TchError> {
35623 let mut c_tensors = [std::ptr::null_mut(); 1];
35624 unsafe_torch_err!(atg_tensordot(
35625 c_tensors.as_mut_ptr(),
35626 self.c_tensor,
35627 other.c_tensor,
35628 dims_self.as_ptr(),
35629 dims_self.len_i32(),
35630 dims_other.as_ptr(),
35631 dims_other.len_i32()
35632 ));
35633 Ok(Tensor { c_tensor: c_tensors[0] })
35634 }
35635
35636 pub fn f_tensordot_out(
35637 &self,
35638 out: &Tensor,
35639 other: &Tensor,
35640 dims_self: impl IntList,
35641 dims_other: impl IntList,
35642 ) -> Result<Tensor, TchError> {
35643 let mut c_tensors = [std::ptr::null_mut(); 1];
35644 unsafe_torch_err!(atg_tensordot_out(
35645 c_tensors.as_mut_ptr(),
35646 out.c_tensor,
35647 self.c_tensor,
35648 other.c_tensor,
35649 dims_self.as_ptr(),
35650 dims_self.len_i32(),
35651 dims_other.as_ptr(),
35652 dims_other.len_i32()
35653 ));
35654 Ok(Tensor { c_tensor: c_tensors[0] })
35655 }
35656
35657 pub fn f_threshold<S: Into<Scalar>>(&self, threshold: S, value: S) -> Result<Tensor, TchError> {
35658 let mut c_tensors = [std::ptr::null_mut(); 1];
35659 unsafe_torch_err!(atg_threshold(
35660 c_tensors.as_mut_ptr(),
35661 self.c_tensor,
35662 threshold.into().c_scalar,
35663 value.into().c_scalar
35664 ));
35665 Ok(Tensor { c_tensor: c_tensors[0] })
35666 }
35667
35668 pub fn f_threshold_<S: Into<Scalar>>(
35669 &mut self,
35670 threshold: S,
35671 value: S,
35672 ) -> Result<Tensor, TchError> {
35673 let mut c_tensors = [std::ptr::null_mut(); 1];
35674 unsafe_torch_err!(atg_threshold_(
35675 c_tensors.as_mut_ptr(),
35676 self.c_tensor,
35677 threshold.into().c_scalar,
35678 value.into().c_scalar
35679 ));
35680 Ok(Tensor { c_tensor: c_tensors[0] })
35681 }
35682
35683 pub fn f_threshold_backward<S: Into<Scalar>>(
35684 &self,
35685 grad_output: &Tensor,
35686 threshold: S,
35687 ) -> Result<Tensor, TchError> {
35688 let mut c_tensors = [std::ptr::null_mut(); 1];
35689 unsafe_torch_err!(atg_threshold_backward(
35690 c_tensors.as_mut_ptr(),
35691 grad_output.c_tensor,
35692 self.c_tensor,
35693 threshold.into().c_scalar
35694 ));
35695 Ok(Tensor { c_tensor: c_tensors[0] })
35696 }
35697
35698 pub fn f_threshold_backward_grad_input<S: Into<Scalar>>(
35699 &self,
35700 grad_input: &Tensor,
35701 grad_output: &Tensor,
35702 threshold: S,
35703 ) -> Result<Tensor, TchError> {
35704 let mut c_tensors = [std::ptr::null_mut(); 1];
35705 unsafe_torch_err!(atg_threshold_backward_grad_input(
35706 c_tensors.as_mut_ptr(),
35707 grad_input.c_tensor,
35708 grad_output.c_tensor,
35709 self.c_tensor,
35710 threshold.into().c_scalar
35711 ));
35712 Ok(Tensor { c_tensor: c_tensors[0] })
35713 }
35714
35715 pub fn f_threshold_out<S: Into<Scalar>>(
35716 &self,
35717 out: &Tensor,
35718 threshold: S,
35719 value: S,
35720 ) -> Result<Tensor, TchError> {
35721 let mut c_tensors = [std::ptr::null_mut(); 1];
35722 unsafe_torch_err!(atg_threshold_out(
35723 c_tensors.as_mut_ptr(),
35724 out.c_tensor,
35725 self.c_tensor,
35726 threshold.into().c_scalar,
35727 value.into().c_scalar
35728 ));
35729 Ok(Tensor { c_tensor: c_tensors[0] })
35730 }
35731
35732 pub fn f_tile(&self, dims: impl IntList) -> Result<Tensor, TchError> {
35733 let mut c_tensors = [std::ptr::null_mut(); 1];
35734 unsafe_torch_err!(atg_tile(
35735 c_tensors.as_mut_ptr(),
35736 self.c_tensor,
35737 dims.as_ptr(),
35738 dims.len_i32()
35739 ));
35740 Ok(Tensor { c_tensor: c_tensors[0] })
35741 }
35742
35743 pub fn f_to(&self, device: Device) -> Result<Tensor, TchError> {
35744 let mut c_tensors = [std::ptr::null_mut(); 1];
35745 unsafe_torch_err!(atg_to(c_tensors.as_mut_ptr(), self.c_tensor, device.c_int()));
35746 Ok(Tensor { c_tensor: c_tensors[0] })
35747 }
35748
35749 pub fn f_to_dense(
35750 &self,
35751 dtype: impl Into<Option<Kind>>,
35752 masked_grad: bool,
35753 ) -> Result<Tensor, TchError> {
35754 let mut c_tensors = [std::ptr::null_mut(); 1];
35755 unsafe_torch_err!(atg_to_dense(
35756 c_tensors.as_mut_ptr(),
35757 self.c_tensor,
35758 dtype.into().map_or(-1, |s| s.c_int()),
35759 if masked_grad { 1 } else { 0 }
35760 ));
35761 Ok(Tensor { c_tensor: c_tensors[0] })
35762 }
35763
35764 pub fn f_to_dense_backward(
35765 &self,
35766 grad: &Tensor,
35767 masked_grad: bool,
35768 ) -> Result<Tensor, TchError> {
35769 let mut c_tensors = [std::ptr::null_mut(); 1];
35770 unsafe_torch_err!(atg_to_dense_backward(
35771 c_tensors.as_mut_ptr(),
35772 grad.c_tensor,
35773 self.c_tensor,
35774 if masked_grad { 1 } else { 0 }
35775 ));
35776 Ok(Tensor { c_tensor: c_tensors[0] })
35777 }
35778
35779 pub fn f_to_device_(
35780 &self,
35781 device: Device,
35782 dtype: Kind,
35783 non_blocking: bool,
35784 copy: bool,
35785 ) -> Result<Tensor, TchError> {
35786 let mut c_tensors = [std::ptr::null_mut(); 1];
35787 unsafe_torch_err!(atg_to_device(
35788 c_tensors.as_mut_ptr(),
35789 self.c_tensor,
35790 device.c_int(),
35791 dtype.c_int(),
35792 if non_blocking { 1 } else { 0 },
35793 if copy { 1 } else { 0 }
35794 ));
35795 Ok(Tensor { c_tensor: c_tensors[0] })
35796 }
35797
35798 pub fn f_to_dtype(
35799 &self,
35800 dtype: Kind,
35801 non_blocking: bool,
35802 copy: bool,
35803 ) -> Result<Tensor, TchError> {
35804 let mut c_tensors = [std::ptr::null_mut(); 1];
35805 unsafe_torch_err!(atg_to_dtype(
35806 c_tensors.as_mut_ptr(),
35807 self.c_tensor,
35808 dtype.c_int(),
35809 if non_blocking { 1 } else { 0 },
35810 if copy { 1 } else { 0 }
35811 ));
35812 Ok(Tensor { c_tensor: c_tensors[0] })
35813 }
35814
35815 pub fn f_to_dtype_layout(
35816 &self,
35817 options: (Kind, Device),
35818 non_blocking: bool,
35819 copy: bool,
35820 ) -> Result<Tensor, TchError> {
35821 let mut c_tensors = [std::ptr::null_mut(); 1];
35822 unsafe_torch_err!(atg_to_dtype_layout(
35823 c_tensors.as_mut_ptr(),
35824 self.c_tensor,
35825 options.0.c_int(),
35826 options.1.c_int(),
35827 if non_blocking { 1 } else { 0 },
35828 if copy { 1 } else { 0 }
35829 ));
35830 Ok(Tensor { c_tensor: c_tensors[0] })
35831 }
35832
35833 pub fn f_to_mkldnn(&self, dtype: impl Into<Option<Kind>>) -> Result<Tensor, TchError> {
35834 let mut c_tensors = [std::ptr::null_mut(); 1];
35835 unsafe_torch_err!(atg_to_mkldnn(
35836 c_tensors.as_mut_ptr(),
35837 self.c_tensor,
35838 dtype.into().map_or(-1, |s| s.c_int())
35839 ));
35840 Ok(Tensor { c_tensor: c_tensors[0] })
35841 }
35842
35843 pub fn f_to_mkldnn_backward(&self, grad: &Tensor) -> Result<Tensor, TchError> {
35844 let mut c_tensors = [std::ptr::null_mut(); 1];
35845 unsafe_torch_err!(atg_to_mkldnn_backward(
35846 c_tensors.as_mut_ptr(),
35847 grad.c_tensor,
35848 self.c_tensor
35849 ));
35850 Ok(Tensor { c_tensor: c_tensors[0] })
35851 }
35852
35853 pub fn f_to_mkldnn_out(
35854 &self,
35855 out: &Tensor,
35856 dtype: impl Into<Option<Kind>>,
35857 ) -> Result<Tensor, TchError> {
35858 let mut c_tensors = [std::ptr::null_mut(); 1];
35859 unsafe_torch_err!(atg_to_mkldnn_out(
35860 c_tensors.as_mut_ptr(),
35861 out.c_tensor,
35862 self.c_tensor,
35863 dtype.into().map_or(-1, |s| s.c_int())
35864 ));
35865 Ok(Tensor { c_tensor: c_tensors[0] })
35866 }
35867
35868 pub fn f_to_other(
35869 &self,
35870 other: &Tensor,
35871 non_blocking: bool,
35872 copy: bool,
35873 ) -> Result<Tensor, TchError> {
35874 let mut c_tensors = [std::ptr::null_mut(); 1];
35875 unsafe_torch_err!(atg_to_other(
35876 c_tensors.as_mut_ptr(),
35877 self.c_tensor,
35878 other.c_tensor,
35879 if non_blocking { 1 } else { 0 },
35880 if copy { 1 } else { 0 }
35881 ));
35882 Ok(Tensor { c_tensor: c_tensors[0] })
35883 }
35884
35885 pub fn f_to_padded_tensor(
35886 &self,
35887 padding: f64,
35888 output_size: impl IntListOption,
35889 ) -> Result<Tensor, TchError> {
35890 let mut c_tensors = [std::ptr::null_mut(); 1];
35891 unsafe_torch_err!(atg_to_padded_tensor(
35892 c_tensors.as_mut_ptr(),
35893 self.c_tensor,
35894 padding,
35895 output_size.as_ptr(),
35896 output_size.len_i32()
35897 ));
35898 Ok(Tensor { c_tensor: c_tensors[0] })
35899 }
35900
35901 pub fn f_to_padded_tensor_out(
35902 &self,
35903 out: &Tensor,
35904 padding: f64,
35905 output_size: impl IntListOption,
35906 ) -> Result<Tensor, TchError> {
35907 let mut c_tensors = [std::ptr::null_mut(); 1];
35908 unsafe_torch_err!(atg_to_padded_tensor_out(
35909 c_tensors.as_mut_ptr(),
35910 out.c_tensor,
35911 self.c_tensor,
35912 padding,
35913 output_size.as_ptr(),
35914 output_size.len_i32()
35915 ));
35916 Ok(Tensor { c_tensor: c_tensors[0] })
35917 }
35918
35919 pub fn f_to_sparse(
35920 &self,
35921 layout: Option<Layout>,
35922 blocksize: impl IntListOption,
35923 dense_dim: impl Into<Option<i64>>,
35924 ) -> Result<Tensor, TchError> {
35925 let dense_dim = dense_dim.into();
35926 let mut c_tensors = [std::ptr::null_mut(); 1];
35927 unsafe_torch_err!(atg_to_sparse(
35928 c_tensors.as_mut_ptr(),
35929 self.c_tensor,
35930 layout.map_or(-1, |s| s.to_i8()),
35931 blocksize.as_ptr(),
35932 blocksize.len_i32(),
35933 dense_dim.unwrap_or(0i64),
35934 dense_dim.is_none() as i8
35935 ));
35936 Ok(Tensor { c_tensor: c_tensors[0] })
35937 }
35938
35939 pub fn f_to_sparse_bsc(
35940 &self,
35941 blocksize: impl IntList,
35942 dense_dim: impl Into<Option<i64>>,
35943 ) -> Result<Tensor, TchError> {
35944 let dense_dim = dense_dim.into();
35945 let mut c_tensors = [std::ptr::null_mut(); 1];
35946 unsafe_torch_err!(atg_to_sparse_bsc(
35947 c_tensors.as_mut_ptr(),
35948 self.c_tensor,
35949 blocksize.as_ptr(),
35950 blocksize.len_i32(),
35951 dense_dim.unwrap_or(0i64),
35952 dense_dim.is_none() as i8
35953 ));
35954 Ok(Tensor { c_tensor: c_tensors[0] })
35955 }
35956
35957 pub fn f_to_sparse_bsr(
35958 &self,
35959 blocksize: impl IntList,
35960 dense_dim: impl Into<Option<i64>>,
35961 ) -> Result<Tensor, TchError> {
35962 let dense_dim = dense_dim.into();
35963 let mut c_tensors = [std::ptr::null_mut(); 1];
35964 unsafe_torch_err!(atg_to_sparse_bsr(
35965 c_tensors.as_mut_ptr(),
35966 self.c_tensor,
35967 blocksize.as_ptr(),
35968 blocksize.len_i32(),
35969 dense_dim.unwrap_or(0i64),
35970 dense_dim.is_none() as i8
35971 ));
35972 Ok(Tensor { c_tensor: c_tensors[0] })
35973 }
35974
35975 pub fn f_to_sparse_csc(&self, dense_dim: impl Into<Option<i64>>) -> Result<Tensor, TchError> {
35976 let dense_dim = dense_dim.into();
35977 let mut c_tensors = [std::ptr::null_mut(); 1];
35978 unsafe_torch_err!(atg_to_sparse_csc(
35979 c_tensors.as_mut_ptr(),
35980 self.c_tensor,
35981 dense_dim.unwrap_or(0i64),
35982 dense_dim.is_none() as i8
35983 ));
35984 Ok(Tensor { c_tensor: c_tensors[0] })
35985 }
35986
35987 pub fn f_to_sparse_csr(&self, dense_dim: impl Into<Option<i64>>) -> Result<Tensor, TchError> {
35988 let dense_dim = dense_dim.into();
35989 let mut c_tensors = [std::ptr::null_mut(); 1];
35990 unsafe_torch_err!(atg_to_sparse_csr(
35991 c_tensors.as_mut_ptr(),
35992 self.c_tensor,
35993 dense_dim.unwrap_or(0i64),
35994 dense_dim.is_none() as i8
35995 ));
35996 Ok(Tensor { c_tensor: c_tensors[0] })
35997 }
35998
35999 pub fn f_to_sparse_sparse_dim(&self, sparse_dim: i64) -> Result<Tensor, TchError> {
36000 let mut c_tensors = [std::ptr::null_mut(); 1];
36001 unsafe_torch_err!(atg_to_sparse_sparse_dim(
36002 c_tensors.as_mut_ptr(),
36003 self.c_tensor,
36004 sparse_dim
36005 ));
36006 Ok(Tensor { c_tensor: c_tensors[0] })
36007 }
36008
36009 pub fn f_topk(
36010 &self,
36011 k: i64,
36012 dim: i64,
36013 largest: bool,
36014 sorted: bool,
36015 ) -> Result<(Tensor, Tensor), TchError> {
36016 let mut c_tensors = [std::ptr::null_mut(); 2];
36017 unsafe_torch_err!(atg_topk(
36018 c_tensors.as_mut_ptr(),
36019 self.c_tensor,
36020 k,
36021 dim,
36022 if largest { 1 } else { 0 },
36023 if sorted { 1 } else { 0 }
36024 ));
36025 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
36026 }
36027
36028 pub fn f_topk_values(
36029 &self,
36030 values: &Tensor,
36031 indices: &Tensor,
36032 k: i64,
36033 dim: i64,
36034 largest: bool,
36035 sorted: bool,
36036 ) -> Result<(Tensor, Tensor), TchError> {
36037 let mut c_tensors = [std::ptr::null_mut(); 2];
36038 unsafe_torch_err!(atg_topk_values(
36039 c_tensors.as_mut_ptr(),
36040 values.c_tensor,
36041 indices.c_tensor,
36042 self.c_tensor,
36043 k,
36044 dim,
36045 if largest { 1 } else { 0 },
36046 if sorted { 1 } else { 0 }
36047 ));
36048 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
36049 }
36050
36051 pub fn f_totype(&self, scalar_type: Kind) -> Result<Tensor, TchError> {
36052 let mut c_tensors = [std::ptr::null_mut(); 1];
36053 unsafe_torch_err!(atg_totype(c_tensors.as_mut_ptr(), self.c_tensor, scalar_type.c_int()));
36054 Ok(Tensor { c_tensor: c_tensors[0] })
36055 }
36056
36057 pub fn f_trace(&self) -> Result<Tensor, TchError> {
36058 let mut c_tensors = [std::ptr::null_mut(); 1];
36059 unsafe_torch_err!(atg_trace(c_tensors.as_mut_ptr(), self.c_tensor));
36060 Ok(Tensor { c_tensor: c_tensors[0] })
36061 }
36062
36063 pub fn f_trace_backward(grad: &Tensor, sizes: impl IntList) -> Result<Tensor, TchError> {
36064 let mut c_tensors = [std::ptr::null_mut(); 1];
36065 unsafe_torch_err!(atg_trace_backward(
36066 c_tensors.as_mut_ptr(),
36067 grad.c_tensor,
36068 sizes.as_ptr(),
36069 sizes.len_i32()
36070 ));
36071 Ok(Tensor { c_tensor: c_tensors[0] })
36072 }
36073
36074 pub fn f_trace_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
36075 let mut c_tensors = [std::ptr::null_mut(); 1];
36076 unsafe_torch_err!(atg_trace_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
36077 Ok(Tensor { c_tensor: c_tensors[0] })
36078 }
36079
36080 pub fn f_transpose(&self, dim0: i64, dim1: i64) -> Result<Tensor, TchError> {
36081 let mut c_tensors = [std::ptr::null_mut(); 1];
36082 unsafe_torch_err!(atg_transpose(c_tensors.as_mut_ptr(), self.c_tensor, dim0, dim1));
36083 Ok(Tensor { c_tensor: c_tensors[0] })
36084 }
36085
36086 pub fn f_transpose_(&mut self, dim0: i64, dim1: i64) -> Result<Tensor, TchError> {
36087 let mut c_tensors = [std::ptr::null_mut(); 1];
36088 unsafe_torch_err!(atg_transpose_(c_tensors.as_mut_ptr(), self.c_tensor, dim0, dim1));
36089 Ok(Tensor { c_tensor: c_tensors[0] })
36090 }
36091
36092 pub fn f_transpose_copy(&self, dim0: i64, dim1: i64) -> Result<Tensor, TchError> {
36093 let mut c_tensors = [std::ptr::null_mut(); 1];
36094 unsafe_torch_err!(atg_transpose_copy(c_tensors.as_mut_ptr(), self.c_tensor, dim0, dim1));
36095 Ok(Tensor { c_tensor: c_tensors[0] })
36096 }
36097
36098 pub fn f_transpose_copy_int_out(
36099 &self,
36100 out: &Tensor,
36101 dim0: i64,
36102 dim1: i64,
36103 ) -> Result<Tensor, TchError> {
36104 let mut c_tensors = [std::ptr::null_mut(); 1];
36105 unsafe_torch_err!(atg_transpose_copy_int_out(
36106 c_tensors.as_mut_ptr(),
36107 out.c_tensor,
36108 self.c_tensor,
36109 dim0,
36110 dim1
36111 ));
36112 Ok(Tensor { c_tensor: c_tensors[0] })
36113 }
36114
36115 pub fn f_trapezoid(y: &Tensor, dim: i64) -> Result<Tensor, TchError> {
36116 let mut c_tensors = [std::ptr::null_mut(); 1];
36117 unsafe_torch_err!(atg_trapezoid(c_tensors.as_mut_ptr(), y.c_tensor, dim));
36118 Ok(Tensor { c_tensor: c_tensors[0] })
36119 }
36120
36121 pub fn f_trapezoid_x(y: &Tensor, x: &Tensor, dim: i64) -> Result<Tensor, TchError> {
36122 let mut c_tensors = [std::ptr::null_mut(); 1];
36123 unsafe_torch_err!(atg_trapezoid_x(c_tensors.as_mut_ptr(), y.c_tensor, x.c_tensor, dim));
36124 Ok(Tensor { c_tensor: c_tensors[0] })
36125 }
36126
36127 pub fn f_trapz(y: &Tensor, x: &Tensor, dim: i64) -> Result<Tensor, TchError> {
36128 let mut c_tensors = [std::ptr::null_mut(); 1];
36129 unsafe_torch_err!(atg_trapz(c_tensors.as_mut_ptr(), y.c_tensor, x.c_tensor, dim));
36130 Ok(Tensor { c_tensor: c_tensors[0] })
36131 }
36132
36133 pub fn f_trapz_dx(y: &Tensor, dx: f64, dim: i64) -> Result<Tensor, TchError> {
36134 let mut c_tensors = [std::ptr::null_mut(); 1];
36135 unsafe_torch_err!(atg_trapz_dx(c_tensors.as_mut_ptr(), y.c_tensor, dx, dim));
36136 Ok(Tensor { c_tensor: c_tensors[0] })
36137 }
36138
36139 pub fn f_triangular_solve(
36140 &self,
36141 a: &Tensor,
36142 upper: bool,
36143 transpose: bool,
36144 unitriangular: bool,
36145 ) -> Result<(Tensor, Tensor), TchError> {
36146 let mut c_tensors = [std::ptr::null_mut(); 2];
36147 unsafe_torch_err!(atg_triangular_solve(
36148 c_tensors.as_mut_ptr(),
36149 self.c_tensor,
36150 a.c_tensor,
36151 if upper { 1 } else { 0 },
36152 if transpose { 1 } else { 0 },
36153 if unitriangular { 1 } else { 0 }
36154 ));
36155 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
36156 }
36157
36158 pub fn f_triangular_solve_x(
36159 &self,
36160 x: &Tensor,
36161 m: &Tensor,
36162 a: &Tensor,
36163 upper: bool,
36164 transpose: bool,
36165 unitriangular: bool,
36166 ) -> Result<(Tensor, Tensor), TchError> {
36167 let mut c_tensors = [std::ptr::null_mut(); 2];
36168 unsafe_torch_err!(atg_triangular_solve_x(
36169 c_tensors.as_mut_ptr(),
36170 x.c_tensor,
36171 m.c_tensor,
36172 self.c_tensor,
36173 a.c_tensor,
36174 if upper { 1 } else { 0 },
36175 if transpose { 1 } else { 0 },
36176 if unitriangular { 1 } else { 0 }
36177 ));
36178 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
36179 }
36180
36181 pub fn f_tril(&self, diagonal: i64) -> Result<Tensor, TchError> {
36182 let mut c_tensors = [std::ptr::null_mut(); 1];
36183 unsafe_torch_err!(atg_tril(c_tensors.as_mut_ptr(), self.c_tensor, diagonal));
36184 Ok(Tensor { c_tensor: c_tensors[0] })
36185 }
36186
36187 pub fn f_tril_(&mut self, diagonal: i64) -> Result<Tensor, TchError> {
36188 let mut c_tensors = [std::ptr::null_mut(); 1];
36189 unsafe_torch_err!(atg_tril_(c_tensors.as_mut_ptr(), self.c_tensor, diagonal));
36190 Ok(Tensor { c_tensor: c_tensors[0] })
36191 }
36192
36193 pub fn f_tril_indices(
36194 row: i64,
36195 col: i64,
36196 offset: i64,
36197 options: (Kind, Device),
36198 ) -> Result<Tensor, TchError> {
36199 let mut c_tensors = [std::ptr::null_mut(); 1];
36200 unsafe_torch_err!(atg_tril_indices(
36201 c_tensors.as_mut_ptr(),
36202 row,
36203 col,
36204 offset,
36205 options.0.c_int(),
36206 options.1.c_int()
36207 ));
36208 Ok(Tensor { c_tensor: c_tensors[0] })
36209 }
36210
36211 pub fn f_tril_indices_out(
36212 out: &Tensor,
36213 row: i64,
36214 col: i64,
36215 offset: i64,
36216 ) -> Result<Tensor, TchError> {
36217 let mut c_tensors = [std::ptr::null_mut(); 1];
36218 unsafe_torch_err!(atg_tril_indices_out(
36219 c_tensors.as_mut_ptr(),
36220 out.c_tensor,
36221 row,
36222 col,
36223 offset
36224 ));
36225 Ok(Tensor { c_tensor: c_tensors[0] })
36226 }
36227
36228 pub fn f_tril_out(&self, out: &Tensor, diagonal: i64) -> Result<Tensor, TchError> {
36229 let mut c_tensors = [std::ptr::null_mut(); 1];
36230 unsafe_torch_err!(atg_tril_out(
36231 c_tensors.as_mut_ptr(),
36232 out.c_tensor,
36233 self.c_tensor,
36234 diagonal
36235 ));
36236 Ok(Tensor { c_tensor: c_tensors[0] })
36237 }
36238
36239 pub fn f_triplet_margin_loss(
36240 anchor: &Tensor,
36241 positive: &Tensor,
36242 negative: &Tensor,
36243 margin: f64,
36244 p: f64,
36245 eps: f64,
36246 swap: bool,
36247 reduction: crate::Reduction,
36248 ) -> Result<Tensor, TchError> {
36249 let mut c_tensors = [std::ptr::null_mut(); 1];
36250 unsafe_torch_err!(atg_triplet_margin_loss(
36251 c_tensors.as_mut_ptr(),
36252 anchor.c_tensor,
36253 positive.c_tensor,
36254 negative.c_tensor,
36255 margin,
36256 p,
36257 eps,
36258 if swap { 1 } else { 0 },
36259 reduction.to_int()
36260 ));
36261 Ok(Tensor { c_tensor: c_tensors[0] })
36262 }
36263
36264 pub fn f_triu(&self, diagonal: i64) -> Result<Tensor, TchError> {
36265 let mut c_tensors = [std::ptr::null_mut(); 1];
36266 unsafe_torch_err!(atg_triu(c_tensors.as_mut_ptr(), self.c_tensor, diagonal));
36267 Ok(Tensor { c_tensor: c_tensors[0] })
36268 }
36269
36270 pub fn f_triu_(&mut self, diagonal: i64) -> Result<Tensor, TchError> {
36271 let mut c_tensors = [std::ptr::null_mut(); 1];
36272 unsafe_torch_err!(atg_triu_(c_tensors.as_mut_ptr(), self.c_tensor, diagonal));
36273 Ok(Tensor { c_tensor: c_tensors[0] })
36274 }
36275
36276 pub fn f_triu_indices(
36277 row: i64,
36278 col: i64,
36279 offset: i64,
36280 options: (Kind, Device),
36281 ) -> Result<Tensor, TchError> {
36282 let mut c_tensors = [std::ptr::null_mut(); 1];
36283 unsafe_torch_err!(atg_triu_indices(
36284 c_tensors.as_mut_ptr(),
36285 row,
36286 col,
36287 offset,
36288 options.0.c_int(),
36289 options.1.c_int()
36290 ));
36291 Ok(Tensor { c_tensor: c_tensors[0] })
36292 }
36293
36294 pub fn f_triu_indices_out(
36295 out: &Tensor,
36296 row: i64,
36297 col: i64,
36298 offset: i64,
36299 ) -> Result<Tensor, TchError> {
36300 let mut c_tensors = [std::ptr::null_mut(); 1];
36301 unsafe_torch_err!(atg_triu_indices_out(
36302 c_tensors.as_mut_ptr(),
36303 out.c_tensor,
36304 row,
36305 col,
36306 offset
36307 ));
36308 Ok(Tensor { c_tensor: c_tensors[0] })
36309 }
36310
36311 pub fn f_triu_out(&self, out: &Tensor, diagonal: i64) -> Result<Tensor, TchError> {
36312 let mut c_tensors = [std::ptr::null_mut(); 1];
36313 unsafe_torch_err!(atg_triu_out(
36314 c_tensors.as_mut_ptr(),
36315 out.c_tensor,
36316 self.c_tensor,
36317 diagonal
36318 ));
36319 Ok(Tensor { c_tensor: c_tensors[0] })
36320 }
36321
36322 pub fn f_true_divide(&self, other: &Tensor) -> Result<Tensor, TchError> {
36323 let mut c_tensors = [std::ptr::null_mut(); 1];
36324 unsafe_torch_err!(atg_true_divide(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
36325 Ok(Tensor { c_tensor: c_tensors[0] })
36326 }
36327
36328 pub fn f_true_divide_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
36329 let mut c_tensors = [std::ptr::null_mut(); 1];
36330 unsafe_torch_err!(atg_true_divide_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
36331 Ok(Tensor { c_tensor: c_tensors[0] })
36332 }
36333
36334 pub fn f_true_divide_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
36335 let mut c_tensors = [std::ptr::null_mut(); 1];
36336 unsafe_torch_err!(atg_true_divide_out(
36337 c_tensors.as_mut_ptr(),
36338 out.c_tensor,
36339 self.c_tensor,
36340 other.c_tensor
36341 ));
36342 Ok(Tensor { c_tensor: c_tensors[0] })
36343 }
36344
36345 pub fn f_true_divide_scalar<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
36346 let mut c_tensors = [std::ptr::null_mut(); 1];
36347 unsafe_torch_err!(atg_true_divide_scalar(
36348 c_tensors.as_mut_ptr(),
36349 self.c_tensor,
36350 other.into().c_scalar
36351 ));
36352 Ok(Tensor { c_tensor: c_tensors[0] })
36353 }
36354
36355 pub fn f_true_divide_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
36356 let mut c_tensors = [std::ptr::null_mut(); 1];
36357 unsafe_torch_err!(atg_true_divide_scalar_(
36358 c_tensors.as_mut_ptr(),
36359 self.c_tensor,
36360 other.into().c_scalar
36361 ));
36362 Ok(Tensor { c_tensor: c_tensors[0] })
36363 }
36364
36365 pub fn f_trunc(&self) -> Result<Tensor, TchError> {
36366 let mut c_tensors = [std::ptr::null_mut(); 1];
36367 unsafe_torch_err!(atg_trunc(c_tensors.as_mut_ptr(), self.c_tensor));
36368 Ok(Tensor { c_tensor: c_tensors[0] })
36369 }
36370
36371 pub fn f_trunc_(&mut self) -> Result<Tensor, TchError> {
36372 let mut c_tensors = [std::ptr::null_mut(); 1];
36373 unsafe_torch_err!(atg_trunc_(c_tensors.as_mut_ptr(), self.c_tensor));
36374 Ok(Tensor { c_tensor: c_tensors[0] })
36375 }
36376
36377 pub fn f_trunc_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
36378 let mut c_tensors = [std::ptr::null_mut(); 1];
36379 unsafe_torch_err!(atg_trunc_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
36380 Ok(Tensor { c_tensor: c_tensors[0] })
36381 }
36382
36383 pub fn f_type_as(&self, other: &Tensor) -> Result<Tensor, TchError> {
36384 let mut c_tensors = [std::ptr::null_mut(); 1];
36385 unsafe_torch_err!(atg_type_as(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
36386 Ok(Tensor { c_tensor: c_tensors[0] })
36387 }
36388
36389 pub fn f_unbind(&self, dim: i64) -> Result<Vec<Tensor>, TchError> {
36390 let c_tensors = unsafe_torch_err!(atg_unbind(self.c_tensor, dim));
36391 let mut r__ = vec![];
36392 let mut i = 0;
36393 loop {
36394 let c__ = unsafe { *c_tensors.add(i) };
36395 if c__.is_null() {
36396 break;
36397 }
36398 r__.push(Tensor { c_tensor: c__ });
36399 i += 1;
36400 }
36401 unsafe { libc::free(c_tensors as *mut libc::c_void) }
36402 Ok(r__)
36403 }
36404
36405 pub fn f_unbind_copy(&self, dim: i64) -> Result<Vec<Tensor>, TchError> {
36406 let c_tensors = unsafe_torch_err!(atg_unbind_copy(self.c_tensor, dim));
36407 let mut r__ = vec![];
36408 let mut i = 0;
36409 loop {
36410 let c__ = unsafe { *c_tensors.add(i) };
36411 if c__.is_null() {
36412 break;
36413 }
36414 r__.push(Tensor { c_tensor: c__ });
36415 i += 1;
36416 }
36417 unsafe { libc::free(c_tensors as *mut libc::c_void) }
36418 Ok(r__)
36419 }
36420
36421 pub fn f_unbind_copy_int_out<T: Borrow<Tensor>>(
36422 &self,
36423 out: &[T],
36424 dim: i64,
36425 ) -> Result<(), TchError> {
36426 unsafe_torch_err!(atg_unbind_copy_int_out(
36427 ptr_list(out).as_ptr(),
36428 out.len() as i32,
36429 self.c_tensor,
36430 dim
36431 ));
36432 Ok(())
36433 }
36434
36435 pub fn f_unflatten(&self, dim: i64, sizes: impl IntList) -> Result<Tensor, TchError> {
36436 let mut c_tensors = [std::ptr::null_mut(); 1];
36437 unsafe_torch_err!(atg_unflatten(
36438 c_tensors.as_mut_ptr(),
36439 self.c_tensor,
36440 dim,
36441 sizes.as_ptr(),
36442 sizes.len_i32()
36443 ));
36444 Ok(Tensor { c_tensor: c_tensors[0] })
36445 }
36446
36447 pub fn f_unflatten_dense_tensors<T: Borrow<Tensor>>(
36448 flat: &Tensor,
36449 tensors: &[T],
36450 ) -> Result<Vec<Tensor>, TchError> {
36451 let c_tensors = unsafe_torch_err!(atg_unflatten_dense_tensors(
36452 flat.c_tensor,
36453 ptr_list(tensors).as_ptr(),
36454 tensors.len() as i32
36455 ));
36456 let mut r__ = vec![];
36457 let mut i = 0;
36458 loop {
36459 let c__ = unsafe { *c_tensors.add(i) };
36460 if c__.is_null() {
36461 break;
36462 }
36463 r__.push(Tensor { c_tensor: c__ });
36464 i += 1;
36465 }
36466 unsafe { libc::free(c_tensors as *mut libc::c_void) }
36467 Ok(r__)
36468 }
36469
36470 pub fn f_unfold(&self, dimension: i64, size: i64, step: i64) -> Result<Tensor, TchError> {
36471 let mut c_tensors = [std::ptr::null_mut(); 1];
36472 unsafe_torch_err!(atg_unfold(c_tensors.as_mut_ptr(), self.c_tensor, dimension, size, step));
36473 Ok(Tensor { c_tensor: c_tensors[0] })
36474 }
36475
36476 pub fn f_unfold_backward(
36477 grad_in: &Tensor,
36478 input_sizes: impl IntList,
36479 dim: i64,
36480 size: i64,
36481 step: i64,
36482 ) -> Result<Tensor, TchError> {
36483 let mut c_tensors = [std::ptr::null_mut(); 1];
36484 unsafe_torch_err!(atg_unfold_backward(
36485 c_tensors.as_mut_ptr(),
36486 grad_in.c_tensor,
36487 input_sizes.as_ptr(),
36488 input_sizes.len_i32(),
36489 dim,
36490 size,
36491 step
36492 ));
36493 Ok(Tensor { c_tensor: c_tensors[0] })
36494 }
36495
36496 pub fn f_unfold_backward_out(
36497 out: &Tensor,
36498 grad_in: &Tensor,
36499 input_sizes: impl IntList,
36500 dim: i64,
36501 size: i64,
36502 step: i64,
36503 ) -> Result<Tensor, TchError> {
36504 let mut c_tensors = [std::ptr::null_mut(); 1];
36505 unsafe_torch_err!(atg_unfold_backward_out(
36506 c_tensors.as_mut_ptr(),
36507 out.c_tensor,
36508 grad_in.c_tensor,
36509 input_sizes.as_ptr(),
36510 input_sizes.len_i32(),
36511 dim,
36512 size,
36513 step
36514 ));
36515 Ok(Tensor { c_tensor: c_tensors[0] })
36516 }
36517
36518 pub fn f_unfold_copy(&self, dimension: i64, size: i64, step: i64) -> Result<Tensor, TchError> {
36519 let mut c_tensors = [std::ptr::null_mut(); 1];
36520 unsafe_torch_err!(atg_unfold_copy(
36521 c_tensors.as_mut_ptr(),
36522 self.c_tensor,
36523 dimension,
36524 size,
36525 step
36526 ));
36527 Ok(Tensor { c_tensor: c_tensors[0] })
36528 }
36529
36530 pub fn f_unfold_copy_out(
36531 &self,
36532 out: &Tensor,
36533 dimension: i64,
36534 size: i64,
36535 step: i64,
36536 ) -> Result<Tensor, TchError> {
36537 let mut c_tensors = [std::ptr::null_mut(); 1];
36538 unsafe_torch_err!(atg_unfold_copy_out(
36539 c_tensors.as_mut_ptr(),
36540 out.c_tensor,
36541 self.c_tensor,
36542 dimension,
36543 size,
36544 step
36545 ));
36546 Ok(Tensor { c_tensor: c_tensors[0] })
36547 }
36548
36549 pub fn f_uniform(&self, from: f64, to: f64) -> Result<Tensor, TchError> {
36550 let mut c_tensors = [std::ptr::null_mut(); 1];
36551 unsafe_torch_err!(atg_uniform(c_tensors.as_mut_ptr(), self.c_tensor, from, to));
36552 Ok(Tensor { c_tensor: c_tensors[0] })
36553 }
36554
36555 pub fn f_uniform_(&mut self, from: f64, to: f64) -> Result<Tensor, TchError> {
36556 let mut c_tensors = [std::ptr::null_mut(); 1];
36557 unsafe_torch_err!(atg_uniform_(c_tensors.as_mut_ptr(), self.c_tensor, from, to));
36558 Ok(Tensor { c_tensor: c_tensors[0] })
36559 }
36560
36561 pub fn f_uniform_out(&self, out: &Tensor, from: f64, to: f64) -> Result<Tensor, TchError> {
36562 let mut c_tensors = [std::ptr::null_mut(); 1];
36563 unsafe_torch_err!(atg_uniform_out(
36564 c_tensors.as_mut_ptr(),
36565 out.c_tensor,
36566 self.c_tensor,
36567 from,
36568 to
36569 ));
36570 Ok(Tensor { c_tensor: c_tensors[0] })
36571 }
36572
36573 pub fn f_unique_consecutive(
36574 &self,
36575 return_inverse: bool,
36576 return_counts: bool,
36577 dim: impl Into<Option<i64>>,
36578 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
36579 let dim = dim.into();
36580 let mut c_tensors = [std::ptr::null_mut(); 3];
36581 unsafe_torch_err!(atg_unique_consecutive(
36582 c_tensors.as_mut_ptr(),
36583 self.c_tensor,
36584 if return_inverse { 1 } else { 0 },
36585 if return_counts { 1 } else { 0 },
36586 dim.unwrap_or(0i64),
36587 dim.is_none() as i8
36588 ));
36589 Ok((
36590 Tensor { c_tensor: c_tensors[0] },
36591 Tensor { c_tensor: c_tensors[1] },
36592 Tensor { c_tensor: c_tensors[2] },
36593 ))
36594 }
36595
36596 pub fn f_unique_consecutive_out(
36597 &self,
36598 out0: &Tensor,
36599 out1: &Tensor,
36600 out2: &Tensor,
36601 return_inverse: bool,
36602 return_counts: bool,
36603 dim: impl Into<Option<i64>>,
36604 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
36605 let dim = dim.into();
36606 let mut c_tensors = [std::ptr::null_mut(); 3];
36607 unsafe_torch_err!(atg_unique_consecutive_out(
36608 c_tensors.as_mut_ptr(),
36609 out0.c_tensor,
36610 out1.c_tensor,
36611 out2.c_tensor,
36612 self.c_tensor,
36613 if return_inverse { 1 } else { 0 },
36614 if return_counts { 1 } else { 0 },
36615 dim.unwrap_or(0i64),
36616 dim.is_none() as i8
36617 ));
36618 Ok((
36619 Tensor { c_tensor: c_tensors[0] },
36620 Tensor { c_tensor: c_tensors[1] },
36621 Tensor { c_tensor: c_tensors[2] },
36622 ))
36623 }
36624
36625 pub fn f_unique_dim(
36626 &self,
36627 dim: i64,
36628 sorted: bool,
36629 return_inverse: bool,
36630 return_counts: bool,
36631 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
36632 let mut c_tensors = [std::ptr::null_mut(); 3];
36633 unsafe_torch_err!(atg_unique_dim(
36634 c_tensors.as_mut_ptr(),
36635 self.c_tensor,
36636 dim,
36637 if sorted { 1 } else { 0 },
36638 if return_inverse { 1 } else { 0 },
36639 if return_counts { 1 } else { 0 }
36640 ));
36641 Ok((
36642 Tensor { c_tensor: c_tensors[0] },
36643 Tensor { c_tensor: c_tensors[1] },
36644 Tensor { c_tensor: c_tensors[2] },
36645 ))
36646 }
36647
36648 pub fn f_unique_dim_consecutive(
36649 &self,
36650 dim: i64,
36651 return_inverse: bool,
36652 return_counts: bool,
36653 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
36654 let mut c_tensors = [std::ptr::null_mut(); 3];
36655 unsafe_torch_err!(atg_unique_dim_consecutive(
36656 c_tensors.as_mut_ptr(),
36657 self.c_tensor,
36658 dim,
36659 if return_inverse { 1 } else { 0 },
36660 if return_counts { 1 } else { 0 }
36661 ));
36662 Ok((
36663 Tensor { c_tensor: c_tensors[0] },
36664 Tensor { c_tensor: c_tensors[1] },
36665 Tensor { c_tensor: c_tensors[2] },
36666 ))
36667 }
36668
36669 pub fn f_unique_dim_consecutive_out(
36670 &self,
36671 out0: &Tensor,
36672 out1: &Tensor,
36673 out2: &Tensor,
36674 dim: i64,
36675 return_inverse: bool,
36676 return_counts: bool,
36677 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
36678 let mut c_tensors = [std::ptr::null_mut(); 3];
36679 unsafe_torch_err!(atg_unique_dim_consecutive_out(
36680 c_tensors.as_mut_ptr(),
36681 out0.c_tensor,
36682 out1.c_tensor,
36683 out2.c_tensor,
36684 self.c_tensor,
36685 dim,
36686 if return_inverse { 1 } else { 0 },
36687 if return_counts { 1 } else { 0 }
36688 ));
36689 Ok((
36690 Tensor { c_tensor: c_tensors[0] },
36691 Tensor { c_tensor: c_tensors[1] },
36692 Tensor { c_tensor: c_tensors[2] },
36693 ))
36694 }
36695
36696 pub fn f_unique_dim_out(
36697 &self,
36698 out0: &Tensor,
36699 out1: &Tensor,
36700 out2: &Tensor,
36701 dim: i64,
36702 sorted: bool,
36703 return_inverse: bool,
36704 return_counts: bool,
36705 ) -> Result<(Tensor, Tensor, Tensor), TchError> {
36706 let mut c_tensors = [std::ptr::null_mut(); 3];
36707 unsafe_torch_err!(atg_unique_dim_out(
36708 c_tensors.as_mut_ptr(),
36709 out0.c_tensor,
36710 out1.c_tensor,
36711 out2.c_tensor,
36712 self.c_tensor,
36713 dim,
36714 if sorted { 1 } else { 0 },
36715 if return_inverse { 1 } else { 0 },
36716 if return_counts { 1 } else { 0 }
36717 ));
36718 Ok((
36719 Tensor { c_tensor: c_tensors[0] },
36720 Tensor { c_tensor: c_tensors[1] },
36721 Tensor { c_tensor: c_tensors[2] },
36722 ))
36723 }
36724
36725 pub fn f_unsafe_chunk(&self, chunks: i64, dim: i64) -> Result<Vec<Tensor>, TchError> {
36726 let c_tensors = unsafe_torch_err!(atg_unsafe_chunk(self.c_tensor, chunks, dim));
36727 let mut r__ = vec![];
36728 let mut i = 0;
36729 loop {
36730 let c__ = unsafe { *c_tensors.add(i) };
36731 if c__.is_null() {
36732 break;
36733 }
36734 r__.push(Tensor { c_tensor: c__ });
36735 i += 1;
36736 }
36737 unsafe { libc::free(c_tensors as *mut libc::c_void) }
36738 Ok(r__)
36739 }
36740
36741 pub fn f_unsafe_split(&self, split_size: i64, dim: i64) -> Result<Vec<Tensor>, TchError> {
36742 let c_tensors = unsafe_torch_err!(atg_unsafe_split(self.c_tensor, split_size, dim));
36743 let mut r__ = vec![];
36744 let mut i = 0;
36745 loop {
36746 let c__ = unsafe { *c_tensors.add(i) };
36747 if c__.is_null() {
36748 break;
36749 }
36750 r__.push(Tensor { c_tensor: c__ });
36751 i += 1;
36752 }
36753 unsafe { libc::free(c_tensors as *mut libc::c_void) }
36754 Ok(r__)
36755 }
36756
36757 pub fn f_unsafe_split_tensor_out<T: Borrow<Tensor>>(
36758 &self,
36759 out: &[T],
36760 split_size: i64,
36761 dim: i64,
36762 ) -> Result<(), TchError> {
36763 unsafe_torch_err!(atg_unsafe_split_tensor_out(
36764 ptr_list(out).as_ptr(),
36765 out.len() as i32,
36766 self.c_tensor,
36767 split_size,
36768 dim
36769 ));
36770 Ok(())
36771 }
36772
36773 pub fn f_unsafe_split_with_sizes(
36774 &self,
36775 split_sizes: impl IntList,
36776 dim: i64,
36777 ) -> Result<Vec<Tensor>, TchError> {
36778 let c_tensors = unsafe_torch_err!(atg_unsafe_split_with_sizes(
36779 self.c_tensor,
36780 split_sizes.as_ptr(),
36781 split_sizes.len_i32(),
36782 dim
36783 ));
36784 let mut r__ = vec![];
36785 let mut i = 0;
36786 loop {
36787 let c__ = unsafe { *c_tensors.add(i) };
36788 if c__.is_null() {
36789 break;
36790 }
36791 r__.push(Tensor { c_tensor: c__ });
36792 i += 1;
36793 }
36794 unsafe { libc::free(c_tensors as *mut libc::c_void) }
36795 Ok(r__)
36796 }
36797
36798 pub fn f_unsafe_split_with_sizes_out<T: Borrow<Tensor>>(
36799 &self,
36800 out: &[T],
36801 split_sizes: impl IntList,
36802 dim: i64,
36803 ) -> Result<(), TchError> {
36804 unsafe_torch_err!(atg_unsafe_split_with_sizes_out(
36805 ptr_list(out).as_ptr(),
36806 out.len() as i32,
36807 self.c_tensor,
36808 split_sizes.as_ptr(),
36809 split_sizes.len_i32(),
36810 dim
36811 ));
36812 Ok(())
36813 }
36814
36815 pub fn f_unsqueeze(&self, dim: i64) -> Result<Tensor, TchError> {
36816 let mut c_tensors = [std::ptr::null_mut(); 1];
36817 unsafe_torch_err!(atg_unsqueeze(c_tensors.as_mut_ptr(), self.c_tensor, dim));
36818 Ok(Tensor { c_tensor: c_tensors[0] })
36819 }
36820
36821 pub fn f_unsqueeze_(&mut self, dim: i64) -> Result<Tensor, TchError> {
36822 let mut c_tensors = [std::ptr::null_mut(); 1];
36823 unsafe_torch_err!(atg_unsqueeze_(c_tensors.as_mut_ptr(), self.c_tensor, dim));
36824 Ok(Tensor { c_tensor: c_tensors[0] })
36825 }
36826
36827 pub fn f_unsqueeze_copy(&self, dim: i64) -> Result<Tensor, TchError> {
36828 let mut c_tensors = [std::ptr::null_mut(); 1];
36829 unsafe_torch_err!(atg_unsqueeze_copy(c_tensors.as_mut_ptr(), self.c_tensor, dim));
36830 Ok(Tensor { c_tensor: c_tensors[0] })
36831 }
36832
36833 pub fn f_unsqueeze_copy_out(&self, out: &Tensor, dim: i64) -> Result<Tensor, TchError> {
36834 let mut c_tensors = [std::ptr::null_mut(); 1];
36835 unsafe_torch_err!(atg_unsqueeze_copy_out(
36836 c_tensors.as_mut_ptr(),
36837 out.c_tensor,
36838 self.c_tensor,
36839 dim
36840 ));
36841 Ok(Tensor { c_tensor: c_tensors[0] })
36842 }
36843
36844 pub fn f_upsample_bicubic2d(
36845 &self,
36846 output_size: impl IntList,
36847 align_corners: bool,
36848 scales_h: impl Into<Option<f64>>,
36849 scales_w: impl Into<Option<f64>>,
36850 ) -> Result<Tensor, TchError> {
36851 let scales_h = scales_h.into();
36852 let scales_w = scales_w.into();
36853 let mut c_tensors = [std::ptr::null_mut(); 1];
36854 unsafe_torch_err!(atg_upsample_bicubic2d(
36855 c_tensors.as_mut_ptr(),
36856 self.c_tensor,
36857 output_size.as_ptr(),
36858 output_size.len_i32(),
36859 if align_corners { 1 } else { 0 },
36860 scales_h.unwrap_or(std::f64::NAN),
36861 scales_h.is_none() as i8,
36862 scales_w.unwrap_or(std::f64::NAN),
36863 scales_w.is_none() as i8
36864 ));
36865 Ok(Tensor { c_tensor: c_tensors[0] })
36866 }
36867
36868 pub fn f_upsample_bicubic2d_backward(
36869 grad_output: &Tensor,
36870 output_size: impl IntList,
36871 input_size: impl IntList,
36872 align_corners: bool,
36873 scales_h: impl Into<Option<f64>>,
36874 scales_w: impl Into<Option<f64>>,
36875 ) -> Result<Tensor, TchError> {
36876 let scales_h = scales_h.into();
36877 let scales_w = scales_w.into();
36878 let mut c_tensors = [std::ptr::null_mut(); 1];
36879 unsafe_torch_err!(atg_upsample_bicubic2d_backward(
36880 c_tensors.as_mut_ptr(),
36881 grad_output.c_tensor,
36882 output_size.as_ptr(),
36883 output_size.len_i32(),
36884 input_size.as_ptr(),
36885 input_size.len_i32(),
36886 if align_corners { 1 } else { 0 },
36887 scales_h.unwrap_or(std::f64::NAN),
36888 scales_h.is_none() as i8,
36889 scales_w.unwrap_or(std::f64::NAN),
36890 scales_w.is_none() as i8
36891 ));
36892 Ok(Tensor { c_tensor: c_tensors[0] })
36893 }
36894
36895 pub fn f_upsample_bicubic2d_backward_grad_input(
36896 grad_input: &Tensor,
36897 grad_output: &Tensor,
36898 output_size: impl IntList,
36899 input_size: impl IntList,
36900 align_corners: bool,
36901 scales_h: impl Into<Option<f64>>,
36902 scales_w: impl Into<Option<f64>>,
36903 ) -> Result<Tensor, TchError> {
36904 let scales_h = scales_h.into();
36905 let scales_w = scales_w.into();
36906 let mut c_tensors = [std::ptr::null_mut(); 1];
36907 unsafe_torch_err!(atg_upsample_bicubic2d_backward_grad_input(
36908 c_tensors.as_mut_ptr(),
36909 grad_input.c_tensor,
36910 grad_output.c_tensor,
36911 output_size.as_ptr(),
36912 output_size.len_i32(),
36913 input_size.as_ptr(),
36914 input_size.len_i32(),
36915 if align_corners { 1 } else { 0 },
36916 scales_h.unwrap_or(std::f64::NAN),
36917 scales_h.is_none() as i8,
36918 scales_w.unwrap_or(std::f64::NAN),
36919 scales_w.is_none() as i8
36920 ));
36921 Ok(Tensor { c_tensor: c_tensors[0] })
36922 }
36923
36924 pub fn f_upsample_bicubic2d_out(
36925 &self,
36926 out: &Tensor,
36927 output_size: impl IntList,
36928 align_corners: bool,
36929 scales_h: impl Into<Option<f64>>,
36930 scales_w: impl Into<Option<f64>>,
36931 ) -> Result<Tensor, TchError> {
36932 let scales_h = scales_h.into();
36933 let scales_w = scales_w.into();
36934 let mut c_tensors = [std::ptr::null_mut(); 1];
36935 unsafe_torch_err!(atg_upsample_bicubic2d_out(
36936 c_tensors.as_mut_ptr(),
36937 out.c_tensor,
36938 self.c_tensor,
36939 output_size.as_ptr(),
36940 output_size.len_i32(),
36941 if align_corners { 1 } else { 0 },
36942 scales_h.unwrap_or(std::f64::NAN),
36943 scales_h.is_none() as i8,
36944 scales_w.unwrap_or(std::f64::NAN),
36945 scales_w.is_none() as i8
36946 ));
36947 Ok(Tensor { c_tensor: c_tensors[0] })
36948 }
36949
36950 pub fn f_upsample_bicubic2d_vec(
36951 &self,
36952 output_size: impl IntListOption,
36953 align_corners: bool,
36954 scale_factors: impl DoubleList,
36955 ) -> Result<Tensor, TchError> {
36956 let mut c_tensors = [std::ptr::null_mut(); 1];
36957 unsafe_torch_err!(atg_upsample_bicubic2d_vec(
36958 c_tensors.as_mut_ptr(),
36959 self.c_tensor,
36960 output_size.as_ptr(),
36961 output_size.len_i32(),
36962 if align_corners { 1 } else { 0 },
36963 scale_factors.as_ptr(),
36964 scale_factors.len_i32()
36965 ));
36966 Ok(Tensor { c_tensor: c_tensors[0] })
36967 }
36968
36969 pub fn f_upsample_bilinear2d(
36970 &self,
36971 output_size: impl IntList,
36972 align_corners: bool,
36973 scales_h: impl Into<Option<f64>>,
36974 scales_w: impl Into<Option<f64>>,
36975 ) -> Result<Tensor, TchError> {
36976 let scales_h = scales_h.into();
36977 let scales_w = scales_w.into();
36978 let mut c_tensors = [std::ptr::null_mut(); 1];
36979 unsafe_torch_err!(atg_upsample_bilinear2d(
36980 c_tensors.as_mut_ptr(),
36981 self.c_tensor,
36982 output_size.as_ptr(),
36983 output_size.len_i32(),
36984 if align_corners { 1 } else { 0 },
36985 scales_h.unwrap_or(std::f64::NAN),
36986 scales_h.is_none() as i8,
36987 scales_w.unwrap_or(std::f64::NAN),
36988 scales_w.is_none() as i8
36989 ));
36990 Ok(Tensor { c_tensor: c_tensors[0] })
36991 }
36992
36993 pub fn f_upsample_bilinear2d_backward(
36994 grad_output: &Tensor,
36995 output_size: impl IntList,
36996 input_size: impl IntList,
36997 align_corners: bool,
36998 scales_h: impl Into<Option<f64>>,
36999 scales_w: impl Into<Option<f64>>,
37000 ) -> Result<Tensor, TchError> {
37001 let scales_h = scales_h.into();
37002 let scales_w = scales_w.into();
37003 let mut c_tensors = [std::ptr::null_mut(); 1];
37004 unsafe_torch_err!(atg_upsample_bilinear2d_backward(
37005 c_tensors.as_mut_ptr(),
37006 grad_output.c_tensor,
37007 output_size.as_ptr(),
37008 output_size.len_i32(),
37009 input_size.as_ptr(),
37010 input_size.len_i32(),
37011 if align_corners { 1 } else { 0 },
37012 scales_h.unwrap_or(std::f64::NAN),
37013 scales_h.is_none() as i8,
37014 scales_w.unwrap_or(std::f64::NAN),
37015 scales_w.is_none() as i8
37016 ));
37017 Ok(Tensor { c_tensor: c_tensors[0] })
37018 }
37019
37020 pub fn f_upsample_bilinear2d_backward_grad_input(
37021 grad_input: &Tensor,
37022 grad_output: &Tensor,
37023 output_size: impl IntList,
37024 input_size: impl IntList,
37025 align_corners: bool,
37026 scales_h: impl Into<Option<f64>>,
37027 scales_w: impl Into<Option<f64>>,
37028 ) -> Result<Tensor, TchError> {
37029 let scales_h = scales_h.into();
37030 let scales_w = scales_w.into();
37031 let mut c_tensors = [std::ptr::null_mut(); 1];
37032 unsafe_torch_err!(atg_upsample_bilinear2d_backward_grad_input(
37033 c_tensors.as_mut_ptr(),
37034 grad_input.c_tensor,
37035 grad_output.c_tensor,
37036 output_size.as_ptr(),
37037 output_size.len_i32(),
37038 input_size.as_ptr(),
37039 input_size.len_i32(),
37040 if align_corners { 1 } else { 0 },
37041 scales_h.unwrap_or(std::f64::NAN),
37042 scales_h.is_none() as i8,
37043 scales_w.unwrap_or(std::f64::NAN),
37044 scales_w.is_none() as i8
37045 ));
37046 Ok(Tensor { c_tensor: c_tensors[0] })
37047 }
37048
37049 pub fn f_upsample_bilinear2d_out(
37050 &self,
37051 out: &Tensor,
37052 output_size: impl IntList,
37053 align_corners: bool,
37054 scales_h: impl Into<Option<f64>>,
37055 scales_w: impl Into<Option<f64>>,
37056 ) -> Result<Tensor, TchError> {
37057 let scales_h = scales_h.into();
37058 let scales_w = scales_w.into();
37059 let mut c_tensors = [std::ptr::null_mut(); 1];
37060 unsafe_torch_err!(atg_upsample_bilinear2d_out(
37061 c_tensors.as_mut_ptr(),
37062 out.c_tensor,
37063 self.c_tensor,
37064 output_size.as_ptr(),
37065 output_size.len_i32(),
37066 if align_corners { 1 } else { 0 },
37067 scales_h.unwrap_or(std::f64::NAN),
37068 scales_h.is_none() as i8,
37069 scales_w.unwrap_or(std::f64::NAN),
37070 scales_w.is_none() as i8
37071 ));
37072 Ok(Tensor { c_tensor: c_tensors[0] })
37073 }
37074
37075 pub fn f_upsample_bilinear2d_vec(
37076 &self,
37077 output_size: impl IntListOption,
37078 align_corners: bool,
37079 scale_factors: impl DoubleList,
37080 ) -> Result<Tensor, TchError> {
37081 let mut c_tensors = [std::ptr::null_mut(); 1];
37082 unsafe_torch_err!(atg_upsample_bilinear2d_vec(
37083 c_tensors.as_mut_ptr(),
37084 self.c_tensor,
37085 output_size.as_ptr(),
37086 output_size.len_i32(),
37087 if align_corners { 1 } else { 0 },
37088 scale_factors.as_ptr(),
37089 scale_factors.len_i32()
37090 ));
37091 Ok(Tensor { c_tensor: c_tensors[0] })
37092 }
37093
37094 pub fn f_upsample_linear1d(
37095 &self,
37096 output_size: impl IntList,
37097 align_corners: bool,
37098 scales: impl Into<Option<f64>>,
37099 ) -> Result<Tensor, TchError> {
37100 let scales = scales.into();
37101 let mut c_tensors = [std::ptr::null_mut(); 1];
37102 unsafe_torch_err!(atg_upsample_linear1d(
37103 c_tensors.as_mut_ptr(),
37104 self.c_tensor,
37105 output_size.as_ptr(),
37106 output_size.len_i32(),
37107 if align_corners { 1 } else { 0 },
37108 scales.unwrap_or(std::f64::NAN),
37109 scales.is_none() as i8
37110 ));
37111 Ok(Tensor { c_tensor: c_tensors[0] })
37112 }
37113
37114 pub fn f_upsample_linear1d_backward(
37115 grad_output: &Tensor,
37116 output_size: impl IntList,
37117 input_size: impl IntList,
37118 align_corners: bool,
37119 scales: impl Into<Option<f64>>,
37120 ) -> Result<Tensor, TchError> {
37121 let scales = scales.into();
37122 let mut c_tensors = [std::ptr::null_mut(); 1];
37123 unsafe_torch_err!(atg_upsample_linear1d_backward(
37124 c_tensors.as_mut_ptr(),
37125 grad_output.c_tensor,
37126 output_size.as_ptr(),
37127 output_size.len_i32(),
37128 input_size.as_ptr(),
37129 input_size.len_i32(),
37130 if align_corners { 1 } else { 0 },
37131 scales.unwrap_or(std::f64::NAN),
37132 scales.is_none() as i8
37133 ));
37134 Ok(Tensor { c_tensor: c_tensors[0] })
37135 }
37136
37137 pub fn f_upsample_linear1d_backward_grad_input(
37138 grad_input: &Tensor,
37139 grad_output: &Tensor,
37140 output_size: impl IntList,
37141 input_size: impl IntList,
37142 align_corners: bool,
37143 scales: impl Into<Option<f64>>,
37144 ) -> Result<Tensor, TchError> {
37145 let scales = scales.into();
37146 let mut c_tensors = [std::ptr::null_mut(); 1];
37147 unsafe_torch_err!(atg_upsample_linear1d_backward_grad_input(
37148 c_tensors.as_mut_ptr(),
37149 grad_input.c_tensor,
37150 grad_output.c_tensor,
37151 output_size.as_ptr(),
37152 output_size.len_i32(),
37153 input_size.as_ptr(),
37154 input_size.len_i32(),
37155 if align_corners { 1 } else { 0 },
37156 scales.unwrap_or(std::f64::NAN),
37157 scales.is_none() as i8
37158 ));
37159 Ok(Tensor { c_tensor: c_tensors[0] })
37160 }
37161
37162 pub fn f_upsample_linear1d_out(
37163 &self,
37164 out: &Tensor,
37165 output_size: impl IntList,
37166 align_corners: bool,
37167 scales: impl Into<Option<f64>>,
37168 ) -> Result<Tensor, TchError> {
37169 let scales = scales.into();
37170 let mut c_tensors = [std::ptr::null_mut(); 1];
37171 unsafe_torch_err!(atg_upsample_linear1d_out(
37172 c_tensors.as_mut_ptr(),
37173 out.c_tensor,
37174 self.c_tensor,
37175 output_size.as_ptr(),
37176 output_size.len_i32(),
37177 if align_corners { 1 } else { 0 },
37178 scales.unwrap_or(std::f64::NAN),
37179 scales.is_none() as i8
37180 ));
37181 Ok(Tensor { c_tensor: c_tensors[0] })
37182 }
37183
37184 pub fn f_upsample_linear1d_vec(
37185 &self,
37186 output_size: impl IntListOption,
37187 align_corners: bool,
37188 scale_factors: impl DoubleList,
37189 ) -> Result<Tensor, TchError> {
37190 let mut c_tensors = [std::ptr::null_mut(); 1];
37191 unsafe_torch_err!(atg_upsample_linear1d_vec(
37192 c_tensors.as_mut_ptr(),
37193 self.c_tensor,
37194 output_size.as_ptr(),
37195 output_size.len_i32(),
37196 if align_corners { 1 } else { 0 },
37197 scale_factors.as_ptr(),
37198 scale_factors.len_i32()
37199 ));
37200 Ok(Tensor { c_tensor: c_tensors[0] })
37201 }
37202
37203 pub fn f_upsample_nearest1d(
37204 &self,
37205 output_size: impl IntList,
37206 scales: impl Into<Option<f64>>,
37207 ) -> Result<Tensor, TchError> {
37208 let scales = scales.into();
37209 let mut c_tensors = [std::ptr::null_mut(); 1];
37210 unsafe_torch_err!(atg_upsample_nearest1d(
37211 c_tensors.as_mut_ptr(),
37212 self.c_tensor,
37213 output_size.as_ptr(),
37214 output_size.len_i32(),
37215 scales.unwrap_or(std::f64::NAN),
37216 scales.is_none() as i8
37217 ));
37218 Ok(Tensor { c_tensor: c_tensors[0] })
37219 }
37220
37221 pub fn f_upsample_nearest1d_backward(
37222 grad_output: &Tensor,
37223 output_size: impl IntList,
37224 input_size: impl IntList,
37225 scales: impl Into<Option<f64>>,
37226 ) -> Result<Tensor, TchError> {
37227 let scales = scales.into();
37228 let mut c_tensors = [std::ptr::null_mut(); 1];
37229 unsafe_torch_err!(atg_upsample_nearest1d_backward(
37230 c_tensors.as_mut_ptr(),
37231 grad_output.c_tensor,
37232 output_size.as_ptr(),
37233 output_size.len_i32(),
37234 input_size.as_ptr(),
37235 input_size.len_i32(),
37236 scales.unwrap_or(std::f64::NAN),
37237 scales.is_none() as i8
37238 ));
37239 Ok(Tensor { c_tensor: c_tensors[0] })
37240 }
37241
37242 pub fn f_upsample_nearest1d_backward_grad_input(
37243 grad_input: &Tensor,
37244 grad_output: &Tensor,
37245 output_size: impl IntList,
37246 input_size: impl IntList,
37247 scales: impl Into<Option<f64>>,
37248 ) -> Result<Tensor, TchError> {
37249 let scales = scales.into();
37250 let mut c_tensors = [std::ptr::null_mut(); 1];
37251 unsafe_torch_err!(atg_upsample_nearest1d_backward_grad_input(
37252 c_tensors.as_mut_ptr(),
37253 grad_input.c_tensor,
37254 grad_output.c_tensor,
37255 output_size.as_ptr(),
37256 output_size.len_i32(),
37257 input_size.as_ptr(),
37258 input_size.len_i32(),
37259 scales.unwrap_or(std::f64::NAN),
37260 scales.is_none() as i8
37261 ));
37262 Ok(Tensor { c_tensor: c_tensors[0] })
37263 }
37264
37265 pub fn f_upsample_nearest1d_out(
37266 &self,
37267 out: &Tensor,
37268 output_size: impl IntList,
37269 scales: impl Into<Option<f64>>,
37270 ) -> Result<Tensor, TchError> {
37271 let scales = scales.into();
37272 let mut c_tensors = [std::ptr::null_mut(); 1];
37273 unsafe_torch_err!(atg_upsample_nearest1d_out(
37274 c_tensors.as_mut_ptr(),
37275 out.c_tensor,
37276 self.c_tensor,
37277 output_size.as_ptr(),
37278 output_size.len_i32(),
37279 scales.unwrap_or(std::f64::NAN),
37280 scales.is_none() as i8
37281 ));
37282 Ok(Tensor { c_tensor: c_tensors[0] })
37283 }
37284
37285 pub fn f_upsample_nearest1d_vec(
37286 &self,
37287 output_size: impl IntListOption,
37288 scale_factors: impl DoubleList,
37289 ) -> Result<Tensor, TchError> {
37290 let mut c_tensors = [std::ptr::null_mut(); 1];
37291 unsafe_torch_err!(atg_upsample_nearest1d_vec(
37292 c_tensors.as_mut_ptr(),
37293 self.c_tensor,
37294 output_size.as_ptr(),
37295 output_size.len_i32(),
37296 scale_factors.as_ptr(),
37297 scale_factors.len_i32()
37298 ));
37299 Ok(Tensor { c_tensor: c_tensors[0] })
37300 }
37301
37302 pub fn f_upsample_nearest2d(
37303 &self,
37304 output_size: impl IntList,
37305 scales_h: impl Into<Option<f64>>,
37306 scales_w: impl Into<Option<f64>>,
37307 ) -> Result<Tensor, TchError> {
37308 let scales_h = scales_h.into();
37309 let scales_w = scales_w.into();
37310 let mut c_tensors = [std::ptr::null_mut(); 1];
37311 unsafe_torch_err!(atg_upsample_nearest2d(
37312 c_tensors.as_mut_ptr(),
37313 self.c_tensor,
37314 output_size.as_ptr(),
37315 output_size.len_i32(),
37316 scales_h.unwrap_or(std::f64::NAN),
37317 scales_h.is_none() as i8,
37318 scales_w.unwrap_or(std::f64::NAN),
37319 scales_w.is_none() as i8
37320 ));
37321 Ok(Tensor { c_tensor: c_tensors[0] })
37322 }
37323
37324 pub fn f_upsample_nearest2d_backward(
37325 grad_output: &Tensor,
37326 output_size: impl IntList,
37327 input_size: impl IntList,
37328 scales_h: impl Into<Option<f64>>,
37329 scales_w: impl Into<Option<f64>>,
37330 ) -> Result<Tensor, TchError> {
37331 let scales_h = scales_h.into();
37332 let scales_w = scales_w.into();
37333 let mut c_tensors = [std::ptr::null_mut(); 1];
37334 unsafe_torch_err!(atg_upsample_nearest2d_backward(
37335 c_tensors.as_mut_ptr(),
37336 grad_output.c_tensor,
37337 output_size.as_ptr(),
37338 output_size.len_i32(),
37339 input_size.as_ptr(),
37340 input_size.len_i32(),
37341 scales_h.unwrap_or(std::f64::NAN),
37342 scales_h.is_none() as i8,
37343 scales_w.unwrap_or(std::f64::NAN),
37344 scales_w.is_none() as i8
37345 ));
37346 Ok(Tensor { c_tensor: c_tensors[0] })
37347 }
37348
37349 pub fn f_upsample_nearest2d_backward_grad_input(
37350 grad_input: &Tensor,
37351 grad_output: &Tensor,
37352 output_size: impl IntList,
37353 input_size: impl IntList,
37354 scales_h: impl Into<Option<f64>>,
37355 scales_w: impl Into<Option<f64>>,
37356 ) -> Result<Tensor, TchError> {
37357 let scales_h = scales_h.into();
37358 let scales_w = scales_w.into();
37359 let mut c_tensors = [std::ptr::null_mut(); 1];
37360 unsafe_torch_err!(atg_upsample_nearest2d_backward_grad_input(
37361 c_tensors.as_mut_ptr(),
37362 grad_input.c_tensor,
37363 grad_output.c_tensor,
37364 output_size.as_ptr(),
37365 output_size.len_i32(),
37366 input_size.as_ptr(),
37367 input_size.len_i32(),
37368 scales_h.unwrap_or(std::f64::NAN),
37369 scales_h.is_none() as i8,
37370 scales_w.unwrap_or(std::f64::NAN),
37371 scales_w.is_none() as i8
37372 ));
37373 Ok(Tensor { c_tensor: c_tensors[0] })
37374 }
37375
37376 pub fn f_upsample_nearest2d_out(
37377 &self,
37378 out: &Tensor,
37379 output_size: impl IntList,
37380 scales_h: impl Into<Option<f64>>,
37381 scales_w: impl Into<Option<f64>>,
37382 ) -> Result<Tensor, TchError> {
37383 let scales_h = scales_h.into();
37384 let scales_w = scales_w.into();
37385 let mut c_tensors = [std::ptr::null_mut(); 1];
37386 unsafe_torch_err!(atg_upsample_nearest2d_out(
37387 c_tensors.as_mut_ptr(),
37388 out.c_tensor,
37389 self.c_tensor,
37390 output_size.as_ptr(),
37391 output_size.len_i32(),
37392 scales_h.unwrap_or(std::f64::NAN),
37393 scales_h.is_none() as i8,
37394 scales_w.unwrap_or(std::f64::NAN),
37395 scales_w.is_none() as i8
37396 ));
37397 Ok(Tensor { c_tensor: c_tensors[0] })
37398 }
37399
37400 pub fn f_upsample_nearest2d_vec(
37401 &self,
37402 output_size: impl IntListOption,
37403 scale_factors: impl DoubleList,
37404 ) -> Result<Tensor, TchError> {
37405 let mut c_tensors = [std::ptr::null_mut(); 1];
37406 unsafe_torch_err!(atg_upsample_nearest2d_vec(
37407 c_tensors.as_mut_ptr(),
37408 self.c_tensor,
37409 output_size.as_ptr(),
37410 output_size.len_i32(),
37411 scale_factors.as_ptr(),
37412 scale_factors.len_i32()
37413 ));
37414 Ok(Tensor { c_tensor: c_tensors[0] })
37415 }
37416
37417 pub fn f_upsample_nearest3d(
37418 &self,
37419 output_size: impl IntList,
37420 scales_d: impl Into<Option<f64>>,
37421 scales_h: impl Into<Option<f64>>,
37422 scales_w: impl Into<Option<f64>>,
37423 ) -> Result<Tensor, TchError> {
37424 let scales_d = scales_d.into();
37425 let scales_h = scales_h.into();
37426 let scales_w = scales_w.into();
37427 let mut c_tensors = [std::ptr::null_mut(); 1];
37428 unsafe_torch_err!(atg_upsample_nearest3d(
37429 c_tensors.as_mut_ptr(),
37430 self.c_tensor,
37431 output_size.as_ptr(),
37432 output_size.len_i32(),
37433 scales_d.unwrap_or(std::f64::NAN),
37434 scales_d.is_none() as i8,
37435 scales_h.unwrap_or(std::f64::NAN),
37436 scales_h.is_none() as i8,
37437 scales_w.unwrap_or(std::f64::NAN),
37438 scales_w.is_none() as i8
37439 ));
37440 Ok(Tensor { c_tensor: c_tensors[0] })
37441 }
37442
37443 pub fn f_upsample_nearest3d_backward(
37444 grad_output: &Tensor,
37445 output_size: impl IntList,
37446 input_size: impl IntList,
37447 scales_d: impl Into<Option<f64>>,
37448 scales_h: impl Into<Option<f64>>,
37449 scales_w: impl Into<Option<f64>>,
37450 ) -> Result<Tensor, TchError> {
37451 let scales_d = scales_d.into();
37452 let scales_h = scales_h.into();
37453 let scales_w = scales_w.into();
37454 let mut c_tensors = [std::ptr::null_mut(); 1];
37455 unsafe_torch_err!(atg_upsample_nearest3d_backward(
37456 c_tensors.as_mut_ptr(),
37457 grad_output.c_tensor,
37458 output_size.as_ptr(),
37459 output_size.len_i32(),
37460 input_size.as_ptr(),
37461 input_size.len_i32(),
37462 scales_d.unwrap_or(std::f64::NAN),
37463 scales_d.is_none() as i8,
37464 scales_h.unwrap_or(std::f64::NAN),
37465 scales_h.is_none() as i8,
37466 scales_w.unwrap_or(std::f64::NAN),
37467 scales_w.is_none() as i8
37468 ));
37469 Ok(Tensor { c_tensor: c_tensors[0] })
37470 }
37471
37472 pub fn f_upsample_nearest3d_backward_grad_input(
37473 grad_input: &Tensor,
37474 grad_output: &Tensor,
37475 output_size: impl IntList,
37476 input_size: impl IntList,
37477 scales_d: impl Into<Option<f64>>,
37478 scales_h: impl Into<Option<f64>>,
37479 scales_w: impl Into<Option<f64>>,
37480 ) -> Result<Tensor, TchError> {
37481 let scales_d = scales_d.into();
37482 let scales_h = scales_h.into();
37483 let scales_w = scales_w.into();
37484 let mut c_tensors = [std::ptr::null_mut(); 1];
37485 unsafe_torch_err!(atg_upsample_nearest3d_backward_grad_input(
37486 c_tensors.as_mut_ptr(),
37487 grad_input.c_tensor,
37488 grad_output.c_tensor,
37489 output_size.as_ptr(),
37490 output_size.len_i32(),
37491 input_size.as_ptr(),
37492 input_size.len_i32(),
37493 scales_d.unwrap_or(std::f64::NAN),
37494 scales_d.is_none() as i8,
37495 scales_h.unwrap_or(std::f64::NAN),
37496 scales_h.is_none() as i8,
37497 scales_w.unwrap_or(std::f64::NAN),
37498 scales_w.is_none() as i8
37499 ));
37500 Ok(Tensor { c_tensor: c_tensors[0] })
37501 }
37502
37503 pub fn f_upsample_nearest3d_out(
37504 &self,
37505 out: &Tensor,
37506 output_size: impl IntList,
37507 scales_d: impl Into<Option<f64>>,
37508 scales_h: impl Into<Option<f64>>,
37509 scales_w: impl Into<Option<f64>>,
37510 ) -> Result<Tensor, TchError> {
37511 let scales_d = scales_d.into();
37512 let scales_h = scales_h.into();
37513 let scales_w = scales_w.into();
37514 let mut c_tensors = [std::ptr::null_mut(); 1];
37515 unsafe_torch_err!(atg_upsample_nearest3d_out(
37516 c_tensors.as_mut_ptr(),
37517 out.c_tensor,
37518 self.c_tensor,
37519 output_size.as_ptr(),
37520 output_size.len_i32(),
37521 scales_d.unwrap_or(std::f64::NAN),
37522 scales_d.is_none() as i8,
37523 scales_h.unwrap_or(std::f64::NAN),
37524 scales_h.is_none() as i8,
37525 scales_w.unwrap_or(std::f64::NAN),
37526 scales_w.is_none() as i8
37527 ));
37528 Ok(Tensor { c_tensor: c_tensors[0] })
37529 }
37530
37531 pub fn f_upsample_nearest3d_vec(
37532 &self,
37533 output_size: impl IntListOption,
37534 scale_factors: impl DoubleList,
37535 ) -> Result<Tensor, TchError> {
37536 let mut c_tensors = [std::ptr::null_mut(); 1];
37537 unsafe_torch_err!(atg_upsample_nearest3d_vec(
37538 c_tensors.as_mut_ptr(),
37539 self.c_tensor,
37540 output_size.as_ptr(),
37541 output_size.len_i32(),
37542 scale_factors.as_ptr(),
37543 scale_factors.len_i32()
37544 ));
37545 Ok(Tensor { c_tensor: c_tensors[0] })
37546 }
37547
37548 pub fn f_upsample_trilinear3d(
37549 &self,
37550 output_size: impl IntList,
37551 align_corners: bool,
37552 scales_d: impl Into<Option<f64>>,
37553 scales_h: impl Into<Option<f64>>,
37554 scales_w: impl Into<Option<f64>>,
37555 ) -> Result<Tensor, TchError> {
37556 let scales_d = scales_d.into();
37557 let scales_h = scales_h.into();
37558 let scales_w = scales_w.into();
37559 let mut c_tensors = [std::ptr::null_mut(); 1];
37560 unsafe_torch_err!(atg_upsample_trilinear3d(
37561 c_tensors.as_mut_ptr(),
37562 self.c_tensor,
37563 output_size.as_ptr(),
37564 output_size.len_i32(),
37565 if align_corners { 1 } else { 0 },
37566 scales_d.unwrap_or(std::f64::NAN),
37567 scales_d.is_none() as i8,
37568 scales_h.unwrap_or(std::f64::NAN),
37569 scales_h.is_none() as i8,
37570 scales_w.unwrap_or(std::f64::NAN),
37571 scales_w.is_none() as i8
37572 ));
37573 Ok(Tensor { c_tensor: c_tensors[0] })
37574 }
37575
37576 pub fn f_upsample_trilinear3d_backward(
37577 grad_output: &Tensor,
37578 output_size: impl IntList,
37579 input_size: impl IntList,
37580 align_corners: bool,
37581 scales_d: impl Into<Option<f64>>,
37582 scales_h: impl Into<Option<f64>>,
37583 scales_w: impl Into<Option<f64>>,
37584 ) -> Result<Tensor, TchError> {
37585 let scales_d = scales_d.into();
37586 let scales_h = scales_h.into();
37587 let scales_w = scales_w.into();
37588 let mut c_tensors = [std::ptr::null_mut(); 1];
37589 unsafe_torch_err!(atg_upsample_trilinear3d_backward(
37590 c_tensors.as_mut_ptr(),
37591 grad_output.c_tensor,
37592 output_size.as_ptr(),
37593 output_size.len_i32(),
37594 input_size.as_ptr(),
37595 input_size.len_i32(),
37596 if align_corners { 1 } else { 0 },
37597 scales_d.unwrap_or(std::f64::NAN),
37598 scales_d.is_none() as i8,
37599 scales_h.unwrap_or(std::f64::NAN),
37600 scales_h.is_none() as i8,
37601 scales_w.unwrap_or(std::f64::NAN),
37602 scales_w.is_none() as i8
37603 ));
37604 Ok(Tensor { c_tensor: c_tensors[0] })
37605 }
37606
37607 pub fn f_upsample_trilinear3d_backward_grad_input(
37608 grad_input: &Tensor,
37609 grad_output: &Tensor,
37610 output_size: impl IntList,
37611 input_size: impl IntList,
37612 align_corners: bool,
37613 scales_d: impl Into<Option<f64>>,
37614 scales_h: impl Into<Option<f64>>,
37615 scales_w: impl Into<Option<f64>>,
37616 ) -> Result<Tensor, TchError> {
37617 let scales_d = scales_d.into();
37618 let scales_h = scales_h.into();
37619 let scales_w = scales_w.into();
37620 let mut c_tensors = [std::ptr::null_mut(); 1];
37621 unsafe_torch_err!(atg_upsample_trilinear3d_backward_grad_input(
37622 c_tensors.as_mut_ptr(),
37623 grad_input.c_tensor,
37624 grad_output.c_tensor,
37625 output_size.as_ptr(),
37626 output_size.len_i32(),
37627 input_size.as_ptr(),
37628 input_size.len_i32(),
37629 if align_corners { 1 } else { 0 },
37630 scales_d.unwrap_or(std::f64::NAN),
37631 scales_d.is_none() as i8,
37632 scales_h.unwrap_or(std::f64::NAN),
37633 scales_h.is_none() as i8,
37634 scales_w.unwrap_or(std::f64::NAN),
37635 scales_w.is_none() as i8
37636 ));
37637 Ok(Tensor { c_tensor: c_tensors[0] })
37638 }
37639
37640 pub fn f_upsample_trilinear3d_out(
37641 &self,
37642 out: &Tensor,
37643 output_size: impl IntList,
37644 align_corners: bool,
37645 scales_d: impl Into<Option<f64>>,
37646 scales_h: impl Into<Option<f64>>,
37647 scales_w: impl Into<Option<f64>>,
37648 ) -> Result<Tensor, TchError> {
37649 let scales_d = scales_d.into();
37650 let scales_h = scales_h.into();
37651 let scales_w = scales_w.into();
37652 let mut c_tensors = [std::ptr::null_mut(); 1];
37653 unsafe_torch_err!(atg_upsample_trilinear3d_out(
37654 c_tensors.as_mut_ptr(),
37655 out.c_tensor,
37656 self.c_tensor,
37657 output_size.as_ptr(),
37658 output_size.len_i32(),
37659 if align_corners { 1 } else { 0 },
37660 scales_d.unwrap_or(std::f64::NAN),
37661 scales_d.is_none() as i8,
37662 scales_h.unwrap_or(std::f64::NAN),
37663 scales_h.is_none() as i8,
37664 scales_w.unwrap_or(std::f64::NAN),
37665 scales_w.is_none() as i8
37666 ));
37667 Ok(Tensor { c_tensor: c_tensors[0] })
37668 }
37669
37670 pub fn f_upsample_trilinear3d_vec(
37671 &self,
37672 output_size: impl IntListOption,
37673 align_corners: bool,
37674 scale_factors: impl DoubleList,
37675 ) -> Result<Tensor, TchError> {
37676 let mut c_tensors = [std::ptr::null_mut(); 1];
37677 unsafe_torch_err!(atg_upsample_trilinear3d_vec(
37678 c_tensors.as_mut_ptr(),
37679 self.c_tensor,
37680 output_size.as_ptr(),
37681 output_size.len_i32(),
37682 if align_corners { 1 } else { 0 },
37683 scale_factors.as_ptr(),
37684 scale_factors.len_i32()
37685 ));
37686 Ok(Tensor { c_tensor: c_tensors[0] })
37687 }
37688
37689 pub fn f_value_selecting_reduction_backward(
37690 grad: &Tensor,
37691 dim: i64,
37692 indices: &Tensor,
37693 sizes: impl IntList,
37694 keepdim: bool,
37695 ) -> Result<Tensor, TchError> {
37696 let mut c_tensors = [std::ptr::null_mut(); 1];
37697 unsafe_torch_err!(atg_value_selecting_reduction_backward(
37698 c_tensors.as_mut_ptr(),
37699 grad.c_tensor,
37700 dim,
37701 indices.c_tensor,
37702 sizes.as_ptr(),
37703 sizes.len_i32(),
37704 if keepdim { 1 } else { 0 }
37705 ));
37706 Ok(Tensor { c_tensor: c_tensors[0] })
37707 }
37708
37709 pub fn f_values(&self) -> Result<Tensor, TchError> {
37710 let mut c_tensors = [std::ptr::null_mut(); 1];
37711 unsafe_torch_err!(atg_values(c_tensors.as_mut_ptr(), self.c_tensor));
37712 Ok(Tensor { c_tensor: c_tensors[0] })
37713 }
37714
37715 pub fn f_values_copy(&self) -> Result<Tensor, TchError> {
37716 let mut c_tensors = [std::ptr::null_mut(); 1];
37717 unsafe_torch_err!(atg_values_copy(c_tensors.as_mut_ptr(), self.c_tensor));
37718 Ok(Tensor { c_tensor: c_tensors[0] })
37719 }
37720
37721 pub fn f_values_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
37722 let mut c_tensors = [std::ptr::null_mut(); 1];
37723 unsafe_torch_err!(atg_values_copy_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
37724 Ok(Tensor { c_tensor: c_tensors[0] })
37725 }
37726
37727 pub fn f_vander(
37728 x: &Tensor,
37729 n: impl Into<Option<i64>>,
37730 increasing: bool,
37731 ) -> Result<Tensor, TchError> {
37732 let n = n.into();
37733 let mut c_tensors = [std::ptr::null_mut(); 1];
37734 unsafe_torch_err!(atg_vander(
37735 c_tensors.as_mut_ptr(),
37736 x.c_tensor,
37737 n.unwrap_or(0i64),
37738 n.is_none() as i8,
37739 if increasing { 1 } else { 0 }
37740 ));
37741 Ok(Tensor { c_tensor: c_tensors[0] })
37742 }
37743
37744 pub fn f_var(&self, unbiased: bool) -> Result<Tensor, TchError> {
37745 let mut c_tensors = [std::ptr::null_mut(); 1];
37746 unsafe_torch_err!(atg_var(
37747 c_tensors.as_mut_ptr(),
37748 self.c_tensor,
37749 if unbiased { 1 } else { 0 }
37750 ));
37751 Ok(Tensor { c_tensor: c_tensors[0] })
37752 }
37753
37754 pub fn f_var_correction<S: Into<Scalar>>(
37755 &self,
37756 dim: impl IntListOption,
37757 correction: S,
37758 keepdim: bool,
37759 ) -> Result<Tensor, TchError> {
37760 let mut c_tensors = [std::ptr::null_mut(); 1];
37761 unsafe_torch_err!(atg_var_correction(
37762 c_tensors.as_mut_ptr(),
37763 self.c_tensor,
37764 dim.as_ptr(),
37765 dim.len_i32(),
37766 correction.into().c_scalar,
37767 if keepdim { 1 } else { 0 }
37768 ));
37769 Ok(Tensor { c_tensor: c_tensors[0] })
37770 }
37771
37772 pub fn f_var_correction_out<S: Into<Scalar>>(
37773 &self,
37774 out: &Tensor,
37775 dim: impl IntListOption,
37776 correction: S,
37777 keepdim: bool,
37778 ) -> Result<Tensor, TchError> {
37779 let mut c_tensors = [std::ptr::null_mut(); 1];
37780 unsafe_torch_err!(atg_var_correction_out(
37781 c_tensors.as_mut_ptr(),
37782 out.c_tensor,
37783 self.c_tensor,
37784 dim.as_ptr(),
37785 dim.len_i32(),
37786 correction.into().c_scalar,
37787 if keepdim { 1 } else { 0 }
37788 ));
37789 Ok(Tensor { c_tensor: c_tensors[0] })
37790 }
37791
37792 pub fn f_var_dim(
37793 &self,
37794 dim: impl IntListOption,
37795 unbiased: bool,
37796 keepdim: bool,
37797 ) -> Result<Tensor, TchError> {
37798 let mut c_tensors = [std::ptr::null_mut(); 1];
37799 unsafe_torch_err!(atg_var_dim(
37800 c_tensors.as_mut_ptr(),
37801 self.c_tensor,
37802 dim.as_ptr(),
37803 dim.len_i32(),
37804 if unbiased { 1 } else { 0 },
37805 if keepdim { 1 } else { 0 }
37806 ));
37807 Ok(Tensor { c_tensor: c_tensors[0] })
37808 }
37809
37810 pub fn f_var_mean(&self, unbiased: bool) -> Result<(Tensor, Tensor), TchError> {
37811 let mut c_tensors = [std::ptr::null_mut(); 2];
37812 unsafe_torch_err!(atg_var_mean(
37813 c_tensors.as_mut_ptr(),
37814 self.c_tensor,
37815 if unbiased { 1 } else { 0 }
37816 ));
37817 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
37818 }
37819
37820 pub fn f_var_mean_correction<S: Into<Scalar>>(
37821 &self,
37822 dim: impl IntListOption,
37823 correction: S,
37824 keepdim: bool,
37825 ) -> Result<(Tensor, Tensor), TchError> {
37826 let mut c_tensors = [std::ptr::null_mut(); 2];
37827 unsafe_torch_err!(atg_var_mean_correction(
37828 c_tensors.as_mut_ptr(),
37829 self.c_tensor,
37830 dim.as_ptr(),
37831 dim.len_i32(),
37832 correction.into().c_scalar,
37833 if keepdim { 1 } else { 0 }
37834 ));
37835 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
37836 }
37837
37838 pub fn f_var_mean_correction_out<S: Into<Scalar>>(
37839 &self,
37840 out0: &Tensor,
37841 out1: &Tensor,
37842 dim: impl IntListOption,
37843 correction: S,
37844 keepdim: bool,
37845 ) -> Result<(Tensor, Tensor), TchError> {
37846 let mut c_tensors = [std::ptr::null_mut(); 2];
37847 unsafe_torch_err!(atg_var_mean_correction_out(
37848 c_tensors.as_mut_ptr(),
37849 out0.c_tensor,
37850 out1.c_tensor,
37851 self.c_tensor,
37852 dim.as_ptr(),
37853 dim.len_i32(),
37854 correction.into().c_scalar,
37855 if keepdim { 1 } else { 0 }
37856 ));
37857 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
37858 }
37859
37860 pub fn f_var_mean_dim(
37861 &self,
37862 dim: impl IntListOption,
37863 unbiased: bool,
37864 keepdim: bool,
37865 ) -> Result<(Tensor, Tensor), TchError> {
37866 let mut c_tensors = [std::ptr::null_mut(); 2];
37867 unsafe_torch_err!(atg_var_mean_dim(
37868 c_tensors.as_mut_ptr(),
37869 self.c_tensor,
37870 dim.as_ptr(),
37871 dim.len_i32(),
37872 if unbiased { 1 } else { 0 },
37873 if keepdim { 1 } else { 0 }
37874 ));
37875 Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
37876 }
37877
37878 pub fn f_var_out(
37879 &self,
37880 out: &Tensor,
37881 dim: impl IntListOption,
37882 unbiased: bool,
37883 keepdim: bool,
37884 ) -> Result<Tensor, TchError> {
37885 let mut c_tensors = [std::ptr::null_mut(); 1];
37886 unsafe_torch_err!(atg_var_out(
37887 c_tensors.as_mut_ptr(),
37888 out.c_tensor,
37889 self.c_tensor,
37890 dim.as_ptr(),
37891 dim.len_i32(),
37892 if unbiased { 1 } else { 0 },
37893 if keepdim { 1 } else { 0 }
37894 ));
37895 Ok(Tensor { c_tensor: c_tensors[0] })
37896 }
37897
37898 pub fn f_vdot(&self, other: &Tensor) -> Result<Tensor, TchError> {
37899 let mut c_tensors = [std::ptr::null_mut(); 1];
37900 unsafe_torch_err!(atg_vdot(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
37901 Ok(Tensor { c_tensor: c_tensors[0] })
37902 }
37903
37904 pub fn f_vdot_out(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
37905 let mut c_tensors = [std::ptr::null_mut(); 1];
37906 unsafe_torch_err!(atg_vdot_out(
37907 c_tensors.as_mut_ptr(),
37908 out.c_tensor,
37909 self.c_tensor,
37910 other.c_tensor
37911 ));
37912 Ok(Tensor { c_tensor: c_tensors[0] })
37913 }
37914
37915 pub fn f_view_(&self, size: impl IntList) -> Result<Tensor, TchError> {
37916 let mut c_tensors = [std::ptr::null_mut(); 1];
37917 unsafe_torch_err!(atg_view(
37918 c_tensors.as_mut_ptr(),
37919 self.c_tensor,
37920 size.as_ptr(),
37921 size.len_i32()
37922 ));
37923 Ok(Tensor { c_tensor: c_tensors[0] })
37924 }
37925
37926 pub fn f_view_as(&self, other: &Tensor) -> Result<Tensor, TchError> {
37927 let mut c_tensors = [std::ptr::null_mut(); 1];
37928 unsafe_torch_err!(atg_view_as(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
37929 Ok(Tensor { c_tensor: c_tensors[0] })
37930 }
37931
37932 pub fn f_view_as_complex(&self) -> Result<Tensor, TchError> {
37933 let mut c_tensors = [std::ptr::null_mut(); 1];
37934 unsafe_torch_err!(atg_view_as_complex(c_tensors.as_mut_ptr(), self.c_tensor));
37935 Ok(Tensor { c_tensor: c_tensors[0] })
37936 }
37937
37938 pub fn f_view_as_complex_copy(&self) -> Result<Tensor, TchError> {
37939 let mut c_tensors = [std::ptr::null_mut(); 1];
37940 unsafe_torch_err!(atg_view_as_complex_copy(c_tensors.as_mut_ptr(), self.c_tensor));
37941 Ok(Tensor { c_tensor: c_tensors[0] })
37942 }
37943
37944 pub fn f_view_as_complex_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
37945 let mut c_tensors = [std::ptr::null_mut(); 1];
37946 unsafe_torch_err!(atg_view_as_complex_copy_out(
37947 c_tensors.as_mut_ptr(),
37948 out.c_tensor,
37949 self.c_tensor
37950 ));
37951 Ok(Tensor { c_tensor: c_tensors[0] })
37952 }
37953
37954 pub fn f_view_as_real(&self) -> Result<Tensor, TchError> {
37955 let mut c_tensors = [std::ptr::null_mut(); 1];
37956 unsafe_torch_err!(atg_view_as_real(c_tensors.as_mut_ptr(), self.c_tensor));
37957 Ok(Tensor { c_tensor: c_tensors[0] })
37958 }
37959
37960 pub fn f_view_as_real_copy(&self) -> Result<Tensor, TchError> {
37961 let mut c_tensors = [std::ptr::null_mut(); 1];
37962 unsafe_torch_err!(atg_view_as_real_copy(c_tensors.as_mut_ptr(), self.c_tensor));
37963 Ok(Tensor { c_tensor: c_tensors[0] })
37964 }
37965
37966 pub fn f_view_as_real_copy_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
37967 let mut c_tensors = [std::ptr::null_mut(); 1];
37968 unsafe_torch_err!(atg_view_as_real_copy_out(
37969 c_tensors.as_mut_ptr(),
37970 out.c_tensor,
37971 self.c_tensor
37972 ));
37973 Ok(Tensor { c_tensor: c_tensors[0] })
37974 }
37975
37976 pub fn f_view_copy(&self, size: impl IntList) -> Result<Tensor, TchError> {
37977 let mut c_tensors = [std::ptr::null_mut(); 1];
37978 unsafe_torch_err!(atg_view_copy(
37979 c_tensors.as_mut_ptr(),
37980 self.c_tensor,
37981 size.as_ptr(),
37982 size.len_i32()
37983 ));
37984 Ok(Tensor { c_tensor: c_tensors[0] })
37985 }
37986
37987 pub fn f_view_copy_dtype(&self, dtype: Kind) -> Result<Tensor, TchError> {
37988 let mut c_tensors = [std::ptr::null_mut(); 1];
37989 unsafe_torch_err!(atg_view_copy_dtype(
37990 c_tensors.as_mut_ptr(),
37991 self.c_tensor,
37992 dtype.c_int()
37993 ));
37994 Ok(Tensor { c_tensor: c_tensors[0] })
37995 }
37996
37997 pub fn f_view_copy_dtype_out(&self, out: &Tensor, dtype: Kind) -> Result<Tensor, TchError> {
37998 let mut c_tensors = [std::ptr::null_mut(); 1];
37999 unsafe_torch_err!(atg_view_copy_dtype_out(
38000 c_tensors.as_mut_ptr(),
38001 out.c_tensor,
38002 self.c_tensor,
38003 dtype.c_int()
38004 ));
38005 Ok(Tensor { c_tensor: c_tensors[0] })
38006 }
38007
38008 pub fn f_view_copy_out(&self, out: &Tensor, size: impl IntList) -> Result<Tensor, TchError> {
38009 let mut c_tensors = [std::ptr::null_mut(); 1];
38010 unsafe_torch_err!(atg_view_copy_out(
38011 c_tensors.as_mut_ptr(),
38012 out.c_tensor,
38013 self.c_tensor,
38014 size.as_ptr(),
38015 size.len_i32()
38016 ));
38017 Ok(Tensor { c_tensor: c_tensors[0] })
38018 }
38019
38020 pub fn f_view_dtype(&self, dtype: Kind) -> Result<Tensor, TchError> {
38021 let mut c_tensors = [std::ptr::null_mut(); 1];
38022 unsafe_torch_err!(atg_view_dtype(c_tensors.as_mut_ptr(), self.c_tensor, dtype.c_int()));
38023 Ok(Tensor { c_tensor: c_tensors[0] })
38024 }
38025
38026 pub fn f_vsplit(&self, sections: i64) -> Result<Vec<Tensor>, TchError> {
38027 let c_tensors = unsafe_torch_err!(atg_vsplit(self.c_tensor, sections));
38028 let mut r__ = vec![];
38029 let mut i = 0;
38030 loop {
38031 let c__ = unsafe { *c_tensors.add(i) };
38032 if c__.is_null() {
38033 break;
38034 }
38035 r__.push(Tensor { c_tensor: c__ });
38036 i += 1;
38037 }
38038 unsafe { libc::free(c_tensors as *mut libc::c_void) }
38039 Ok(r__)
38040 }
38041
38042 pub fn f_vsplit_array(&self, indices: impl IntList) -> Result<Vec<Tensor>, TchError> {
38043 let c_tensors =
38044 unsafe_torch_err!(atg_vsplit_array(self.c_tensor, indices.as_ptr(), indices.len_i32()));
38045 let mut r__ = vec![];
38046 let mut i = 0;
38047 loop {
38048 let c__ = unsafe { *c_tensors.add(i) };
38049 if c__.is_null() {
38050 break;
38051 }
38052 r__.push(Tensor { c_tensor: c__ });
38053 i += 1;
38054 }
38055 unsafe { libc::free(c_tensors as *mut libc::c_void) }
38056 Ok(r__)
38057 }
38058
38059 pub fn f_vstack<T: Borrow<Tensor>>(tensors: &[T]) -> Result<Tensor, TchError> {
38060 let mut c_tensors = [std::ptr::null_mut(); 1];
38061 unsafe_torch_err!(atg_vstack(
38062 c_tensors.as_mut_ptr(),
38063 ptr_list(tensors).as_ptr(),
38064 tensors.len() as i32
38065 ));
38066 Ok(Tensor { c_tensor: c_tensors[0] })
38067 }
38068
38069 pub fn f_vstack_out<T: Borrow<Tensor>>(
38070 out: &Tensor,
38071 tensors: &[T],
38072 ) -> Result<Tensor, TchError> {
38073 let mut c_tensors = [std::ptr::null_mut(); 1];
38074 unsafe_torch_err!(atg_vstack_out(
38075 c_tensors.as_mut_ptr(),
38076 out.c_tensor,
38077 ptr_list(tensors).as_ptr(),
38078 tensors.len() as i32
38079 ));
38080 Ok(Tensor { c_tensor: c_tensors[0] })
38081 }
38082
38083 pub fn f_where_(condition: &Tensor) -> Result<Vec<Tensor>, TchError> {
38084 let c_tensors = unsafe_torch_err!(atg_where(condition.c_tensor));
38085 let mut r__ = vec![];
38086 let mut i = 0;
38087 loop {
38088 let c__ = unsafe { *c_tensors.add(i) };
38089 if c__.is_null() {
38090 break;
38091 }
38092 r__.push(Tensor { c_tensor: c__ });
38093 i += 1;
38094 }
38095 unsafe { libc::free(c_tensors as *mut libc::c_void) }
38096 Ok(r__)
38097 }
38098
38099 pub fn f_where_scalar<S: Into<Scalar>>(
38100 condition: &Tensor,
38101 self_scalar: S,
38102 other: S,
38103 ) -> Result<Tensor, TchError> {
38104 let mut c_tensors = [std::ptr::null_mut(); 1];
38105 unsafe_torch_err!(atg_where_scalar(
38106 c_tensors.as_mut_ptr(),
38107 condition.c_tensor,
38108 self_scalar.into().c_scalar,
38109 other.into().c_scalar
38110 ));
38111 Ok(Tensor { c_tensor: c_tensors[0] })
38112 }
38113
38114 pub fn f_where_scalarother<S: Into<Scalar>>(
38115 &self,
38116 condition: &Tensor,
38117 other: S,
38118 ) -> Result<Tensor, TchError> {
38119 let mut c_tensors = [std::ptr::null_mut(); 1];
38120 unsafe_torch_err!(atg_where_scalarother(
38121 c_tensors.as_mut_ptr(),
38122 condition.c_tensor,
38123 self.c_tensor,
38124 other.into().c_scalar
38125 ));
38126 Ok(Tensor { c_tensor: c_tensors[0] })
38127 }
38128
38129 pub fn f_where_scalarself<S: Into<Scalar>>(
38130 condition: &Tensor,
38131 self_scalar: S,
38132 other: &Tensor,
38133 ) -> Result<Tensor, TchError> {
38134 let mut c_tensors = [std::ptr::null_mut(); 1];
38135 unsafe_torch_err!(atg_where_scalarself(
38136 c_tensors.as_mut_ptr(),
38137 condition.c_tensor,
38138 self_scalar.into().c_scalar,
38139 other.c_tensor
38140 ));
38141 Ok(Tensor { c_tensor: c_tensors[0] })
38142 }
38143
38144 pub fn f_where_self(&self, condition: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
38145 let mut c_tensors = [std::ptr::null_mut(); 1];
38146 unsafe_torch_err!(atg_where_self(
38147 c_tensors.as_mut_ptr(),
38148 condition.c_tensor,
38149 self.c_tensor,
38150 other.c_tensor
38151 ));
38152 Ok(Tensor { c_tensor: c_tensors[0] })
38153 }
38154
38155 pub fn f_where_self_out(
38156 &self,
38157 out: &Tensor,
38158 condition: &Tensor,
38159 other: &Tensor,
38160 ) -> Result<Tensor, TchError> {
38161 let mut c_tensors = [std::ptr::null_mut(); 1];
38162 unsafe_torch_err!(atg_where_self_out(
38163 c_tensors.as_mut_ptr(),
38164 out.c_tensor,
38165 condition.c_tensor,
38166 self.c_tensor,
38167 other.c_tensor
38168 ));
38169 Ok(Tensor { c_tensor: c_tensors[0] })
38170 }
38171
38172 pub fn f_xlogy(&self, other: &Tensor) -> Result<Tensor, TchError> {
38173 let mut c_tensors = [std::ptr::null_mut(); 1];
38174 unsafe_torch_err!(atg_xlogy(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
38175 Ok(Tensor { c_tensor: c_tensors[0] })
38176 }
38177
38178 pub fn f_xlogy_(&mut self, other: &Tensor) -> Result<Tensor, TchError> {
38179 let mut c_tensors = [std::ptr::null_mut(); 1];
38180 unsafe_torch_err!(atg_xlogy_(c_tensors.as_mut_ptr(), self.c_tensor, other.c_tensor));
38181 Ok(Tensor { c_tensor: c_tensors[0] })
38182 }
38183
38184 pub fn f_xlogy_outscalar_other<S: Into<Scalar>>(
38185 &self,
38186 out: &Tensor,
38187 other: S,
38188 ) -> Result<Tensor, TchError> {
38189 let mut c_tensors = [std::ptr::null_mut(); 1];
38190 unsafe_torch_err!(atg_xlogy_outscalar_other(
38191 c_tensors.as_mut_ptr(),
38192 out.c_tensor,
38193 self.c_tensor,
38194 other.into().c_scalar
38195 ));
38196 Ok(Tensor { c_tensor: c_tensors[0] })
38197 }
38198
38199 pub fn f_xlogy_outscalar_self<S: Into<Scalar>>(
38200 out: &Tensor,
38201 self_scalar: S,
38202 other: &Tensor,
38203 ) -> Result<Tensor, TchError> {
38204 let mut c_tensors = [std::ptr::null_mut(); 1];
38205 unsafe_torch_err!(atg_xlogy_outscalar_self(
38206 c_tensors.as_mut_ptr(),
38207 out.c_tensor,
38208 self_scalar.into().c_scalar,
38209 other.c_tensor
38210 ));
38211 Ok(Tensor { c_tensor: c_tensors[0] })
38212 }
38213
38214 pub fn f_xlogy_outtensor(&self, out: &Tensor, other: &Tensor) -> Result<Tensor, TchError> {
38215 let mut c_tensors = [std::ptr::null_mut(); 1];
38216 unsafe_torch_err!(atg_xlogy_outtensor(
38217 c_tensors.as_mut_ptr(),
38218 out.c_tensor,
38219 self.c_tensor,
38220 other.c_tensor
38221 ));
38222 Ok(Tensor { c_tensor: c_tensors[0] })
38223 }
38224
38225 pub fn f_xlogy_scalar_other<S: Into<Scalar>>(&self, other: S) -> Result<Tensor, TchError> {
38226 let mut c_tensors = [std::ptr::null_mut(); 1];
38227 unsafe_torch_err!(atg_xlogy_scalar_other(
38228 c_tensors.as_mut_ptr(),
38229 self.c_tensor,
38230 other.into().c_scalar
38231 ));
38232 Ok(Tensor { c_tensor: c_tensors[0] })
38233 }
38234
38235 pub fn f_xlogy_scalar_other_<S: Into<Scalar>>(&mut self, other: S) -> Result<Tensor, TchError> {
38236 let mut c_tensors = [std::ptr::null_mut(); 1];
38237 unsafe_torch_err!(atg_xlogy_scalar_other_(
38238 c_tensors.as_mut_ptr(),
38239 self.c_tensor,
38240 other.into().c_scalar
38241 ));
38242 Ok(Tensor { c_tensor: c_tensors[0] })
38243 }
38244
38245 pub fn f_xlogy_scalar_self<S: Into<Scalar>>(
38246 self_scalar: S,
38247 other: &Tensor,
38248 ) -> Result<Tensor, TchError> {
38249 let mut c_tensors = [std::ptr::null_mut(); 1];
38250 unsafe_torch_err!(atg_xlogy_scalar_self(
38251 c_tensors.as_mut_ptr(),
38252 self_scalar.into().c_scalar,
38253 other.c_tensor
38254 ));
38255 Ok(Tensor { c_tensor: c_tensors[0] })
38256 }
38257
38258 pub fn f_zero(&self) -> Result<Tensor, TchError> {
38259 let mut c_tensors = [std::ptr::null_mut(); 1];
38260 unsafe_torch_err!(atg_zero(c_tensors.as_mut_ptr(), self.c_tensor));
38261 Ok(Tensor { c_tensor: c_tensors[0] })
38262 }
38263
38264 pub fn f_zero_(&mut self) -> Result<Tensor, TchError> {
38265 let mut c_tensors = [std::ptr::null_mut(); 1];
38266 unsafe_torch_err!(atg_zero_(c_tensors.as_mut_ptr(), self.c_tensor));
38267 Ok(Tensor { c_tensor: c_tensors[0] })
38268 }
38269
38270 pub fn f_zero_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
38271 let mut c_tensors = [std::ptr::null_mut(); 1];
38272 unsafe_torch_err!(atg_zero_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
38273 Ok(Tensor { c_tensor: c_tensors[0] })
38274 }
38275
38276 pub fn f_zeros(size: impl IntList, options: (Kind, Device)) -> Result<Tensor, TchError> {
38277 let mut c_tensors = [std::ptr::null_mut(); 1];
38278 unsafe_torch_err!(atg_zeros(
38279 c_tensors.as_mut_ptr(),
38280 size.as_ptr(),
38281 size.len_i32(),
38282 options.0.c_int(),
38283 options.1.c_int()
38284 ));
38285 Ok(Tensor { c_tensor: c_tensors[0] })
38286 }
38287
38288 pub fn f_zeros_like(&self) -> Result<Tensor, TchError> {
38289 let mut c_tensors = [std::ptr::null_mut(); 1];
38290 unsafe_torch_err!(atg_zeros_like(c_tensors.as_mut_ptr(), self.c_tensor));
38291 Ok(Tensor { c_tensor: c_tensors[0] })
38292 }
38293
38294 pub fn f_zeros_like_out(&self, out: &Tensor) -> Result<Tensor, TchError> {
38295 let mut c_tensors = [std::ptr::null_mut(); 1];
38296 unsafe_torch_err!(atg_zeros_like_out(c_tensors.as_mut_ptr(), out.c_tensor, self.c_tensor));
38297 Ok(Tensor { c_tensor: c_tensors[0] })
38298 }
38299
38300 pub fn f_zeros_out(out: &Tensor, size: impl IntList) -> Result<Tensor, TchError> {
38301 let mut c_tensors = [std::ptr::null_mut(); 1];
38302 unsafe_torch_err!(atg_zeros_out(
38303 c_tensors.as_mut_ptr(),
38304 out.c_tensor,
38305 size.as_ptr(),
38306 size.len_i32()
38307 ));
38308 Ok(Tensor { c_tensor: c_tensors[0] })
38309 }
38310}