tch_plus/wrappers/
tensor_generated.rs

1/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND! */
2#![allow(clippy::all)]
3use crate::{Device, Kind, Layout, Scalar, Tensor};
4use std::borrow::Borrow;
5use std::convert::Into;
6use torch_sys_plus::*;
7
8impl Tensor {
9    pub fn internal_and_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
10        self.f_internal_and_(other).unwrap()
11    }
12
13    pub fn internal_and_tensor_(&mut self, other: &Tensor) -> Tensor {
14        self.f_internal_and_tensor_(other).unwrap()
15    }
16
17    pub fn internal_iand_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
18        self.f_internal_iand_(other).unwrap()
19    }
20
21    pub fn internal_iand_tensor_(&mut self, other: &Tensor) -> Tensor {
22        self.f_internal_iand_tensor_(other).unwrap()
23    }
24
25    pub fn internal_ilshift_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
26        self.f_internal_ilshift_(other).unwrap()
27    }
28
29    pub fn internal_ilshift_tensor_(&mut self, other: &Tensor) -> Tensor {
30        self.f_internal_ilshift_tensor_(other).unwrap()
31    }
32
33    pub fn internal_ior_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
34        self.f_internal_ior_(other).unwrap()
35    }
36
37    pub fn internal_ior_tensor_(&mut self, other: &Tensor) -> Tensor {
38        self.f_internal_ior_tensor_(other).unwrap()
39    }
40
41    pub fn internal_irshift_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
42        self.f_internal_irshift_(other).unwrap()
43    }
44
45    pub fn internal_irshift_tensor_(&mut self, other: &Tensor) -> Tensor {
46        self.f_internal_irshift_tensor_(other).unwrap()
47    }
48
49    pub fn internal_ixor_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
50        self.f_internal_ixor_(other).unwrap()
51    }
52
53    pub fn internal_ixor_tensor_(&mut self, other: &Tensor) -> Tensor {
54        self.f_internal_ixor_tensor_(other).unwrap()
55    }
56
57    pub fn internal_lshift_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
58        self.f_internal_lshift_(other).unwrap()
59    }
60
61    pub fn internal_lshift_scalar_out_<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
62        self.f_internal_lshift_scalar_out_(out, other).unwrap()
63    }
64
65    pub fn internal_lshift_tensor_(&mut self, other: &Tensor) -> Tensor {
66        self.f_internal_lshift_tensor_(other).unwrap()
67    }
68
69    pub fn internal_lshift_tensor_out_(&self, out: &Tensor, other: &Tensor) -> Tensor {
70        self.f_internal_lshift_tensor_out_(out, other).unwrap()
71    }
72
73    pub fn internal_or_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
74        self.f_internal_or_(other).unwrap()
75    }
76
77    pub fn internal_or_tensor_(&mut self, other: &Tensor) -> Tensor {
78        self.f_internal_or_tensor_(other).unwrap()
79    }
80
81    pub fn internal_rshift_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
82        self.f_internal_rshift_(other).unwrap()
83    }
84
85    pub fn internal_rshift_scalar_out_<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
86        self.f_internal_rshift_scalar_out_(out, other).unwrap()
87    }
88
89    pub fn internal_rshift_tensor_(&mut self, other: &Tensor) -> Tensor {
90        self.f_internal_rshift_tensor_(other).unwrap()
91    }
92
93    pub fn internal_rshift_tensor_out_(&self, out: &Tensor, other: &Tensor) -> Tensor {
94        self.f_internal_rshift_tensor_out_(out, other).unwrap()
95    }
96
97    pub fn internal_xor_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
98        self.f_internal_xor_(other).unwrap()
99    }
100
101    pub fn internal_xor_tensor_(&mut self, other: &Tensor) -> Tensor {
102        self.f_internal_xor_tensor_(other).unwrap()
103    }
104
105    pub fn internal_adaptive_avg_pool2d(&self, output_size: impl IntList) -> Tensor {
106        self.f_internal_adaptive_avg_pool2d(output_size).unwrap()
107    }
108
109    pub fn internal_adaptive_avg_pool2d_backward(&self, grad_output: &Tensor) -> Tensor {
110        self.f_internal_adaptive_avg_pool2d_backward(grad_output).unwrap()
111    }
112
113    pub fn internal_adaptive_avg_pool2d_backward_out(
114        &self,
115        out: &Tensor,
116        grad_output: &Tensor,
117    ) -> Tensor {
118        self.f_internal_adaptive_avg_pool2d_backward_out(out, grad_output).unwrap()
119    }
120
121    pub fn internal_adaptive_avg_pool2d_out(
122        &self,
123        out: &Tensor,
124        output_size: impl IntList,
125    ) -> Tensor {
126        self.f_internal_adaptive_avg_pool2d_out(out, output_size).unwrap()
127    }
128
129    pub fn internal_adaptive_avg_pool3d(&self, output_size: impl IntList) -> Tensor {
130        self.f_internal_adaptive_avg_pool3d(output_size).unwrap()
131    }
132
133    pub fn internal_adaptive_avg_pool3d_backward(&self, grad_output: &Tensor) -> Tensor {
134        self.f_internal_adaptive_avg_pool3d_backward(grad_output).unwrap()
135    }
136
137    pub fn internal_adaptive_avg_pool3d_backward_out(
138        &self,
139        out: &Tensor,
140        grad_output: &Tensor,
141    ) -> Tensor {
142        self.f_internal_adaptive_avg_pool3d_backward_out(out, grad_output).unwrap()
143    }
144
145    pub fn internal_adaptive_avg_pool3d_out(
146        &self,
147        out: &Tensor,
148        output_size: impl IntList,
149    ) -> Tensor {
150        self.f_internal_adaptive_avg_pool3d_out(out, output_size).unwrap()
151    }
152
153    pub fn internal_add_batch_dim(&self, batch_dim: i64, level: i64) -> Tensor {
154        self.f_internal_add_batch_dim(batch_dim, level).unwrap()
155    }
156
157    pub fn internal_add_relu(&self, other: &Tensor) -> Tensor {
158        self.f_internal_add_relu(other).unwrap()
159    }
160
161    pub fn internal_add_relu_(&mut self, other: &Tensor) -> Tensor {
162        self.f_internal_add_relu_(other).unwrap()
163    }
164
165    pub fn internal_add_relu_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
166        self.f_internal_add_relu_out(out, other).unwrap()
167    }
168
169    pub fn internal_add_relu_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
170        self.f_internal_add_relu_scalar(other).unwrap()
171    }
172
173    pub fn internal_add_relu_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
174        self.f_internal_add_relu_scalar_(other).unwrap()
175    }
176
177    pub fn internal_add_relu_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
178        self.f_internal_add_relu_scalar_out(out, other).unwrap()
179    }
180
181    pub fn internal_addmm_activation(
182        &self,
183        mat1: &Tensor,
184        mat2: &Tensor,
185        use_gelu: bool,
186    ) -> Tensor {
187        self.f_internal_addmm_activation(mat1, mat2, use_gelu).unwrap()
188    }
189
190    pub fn internal_addmm_activation_out(
191        &self,
192        out: &Tensor,
193        mat1: &Tensor,
194        mat2: &Tensor,
195        use_gelu: bool,
196    ) -> Tensor {
197        self.f_internal_addmm_activation_out(out, mat1, mat2, use_gelu).unwrap()
198    }
199
200    pub fn internal_aminmax(&self) -> (Tensor, Tensor) {
201        self.f_internal_aminmax().unwrap()
202    }
203
204    pub fn internal_aminmax_dim(&self, dim: i64, keepdim: bool) -> (Tensor, Tensor) {
205        self.f_internal_aminmax_dim(dim, keepdim).unwrap()
206    }
207
208    pub fn internal_aminmax_dim_out(
209        &self,
210        out0: &Tensor,
211        out1: &Tensor,
212        dim: i64,
213        keepdim: bool,
214    ) -> (Tensor, Tensor) {
215        self.f_internal_aminmax_dim_out(out0, out1, dim, keepdim).unwrap()
216    }
217
218    pub fn internal_aminmax_out(&self, out0: &Tensor, out1: &Tensor) -> (Tensor, Tensor) {
219        self.f_internal_aminmax_out(out0, out1).unwrap()
220    }
221
222    pub fn internal_amp_update_scale(
223        &self,
224        growth_tracker: &Tensor,
225        found_inf: &Tensor,
226        scale_growth_factor: f64,
227        scale_backoff_factor: f64,
228        growth_interval: i64,
229    ) -> (Tensor, Tensor) {
230        self.f_internal_amp_update_scale(
231            growth_tracker,
232            found_inf,
233            scale_growth_factor,
234            scale_backoff_factor,
235            growth_interval,
236        )
237        .unwrap()
238    }
239
240    pub fn internal_amp_update_scale_(
241        &mut self,
242        growth_tracker: &Tensor,
243        found_inf: &Tensor,
244        scale_growth_factor: f64,
245        scale_backoff_factor: f64,
246        growth_interval: i64,
247    ) -> Tensor {
248        self.f_internal_amp_update_scale_(
249            growth_tracker,
250            found_inf,
251            scale_growth_factor,
252            scale_backoff_factor,
253            growth_interval,
254        )
255        .unwrap()
256    }
257
258    pub fn internal_amp_update_scale_out(
259        &self,
260        out: &Tensor,
261        growth_tracker: &Tensor,
262        found_inf: &Tensor,
263        scale_growth_factor: f64,
264        scale_backoff_factor: f64,
265        growth_interval: i64,
266    ) -> Tensor {
267        self.f_internal_amp_update_scale_out(
268            out,
269            growth_tracker,
270            found_inf,
271            scale_growth_factor,
272            scale_backoff_factor,
273            growth_interval,
274        )
275        .unwrap()
276    }
277
278    pub fn internal_assert_scalar<S: Into<Scalar>>(self_scalar: S, assert_msg: &str) {
279        Tensor::f_internal_assert_scalar(self_scalar, assert_msg).unwrap()
280    }
281
282    pub fn internal_assert_tensor_metadata(
283        a: &Tensor,
284        size: impl IntListOption,
285        stride: impl IntListOption,
286        dtype: impl Into<Option<Kind>>,
287    ) {
288        Tensor::f_internal_assert_tensor_metadata(a, size, stride, dtype).unwrap()
289    }
290
291    pub fn internal_autocast_to_full_precision(
292        &self,
293        cuda_enabled: bool,
294        cpu_enabled: bool,
295    ) -> Tensor {
296        self.f_internal_autocast_to_full_precision(cuda_enabled, cpu_enabled).unwrap()
297    }
298
299    pub fn internal_autocast_to_reduced_precision(
300        &self,
301        cuda_enabled: bool,
302        cpu_enabled: bool,
303        cuda_dtype: Kind,
304        cpu_dtype: Kind,
305    ) -> Tensor {
306        self.f_internal_autocast_to_reduced_precision(
307            cuda_enabled,
308            cpu_enabled,
309            cuda_dtype,
310            cpu_dtype,
311        )
312        .unwrap()
313    }
314
315    pub fn internal_batch_norm_no_update<T: Borrow<Tensor>>(
316        &self,
317        weight: Option<T>,
318        bias: Option<T>,
319        running_mean: Option<T>,
320        running_var: Option<T>,
321        momentum: f64,
322        eps: f64,
323    ) -> (Tensor, Tensor, Tensor, Tensor) {
324        self.f_internal_batch_norm_no_update(weight, bias, running_mean, running_var, momentum, eps)
325            .unwrap()
326    }
327
328    pub fn internal_batch_norm_no_update_out<T: Borrow<Tensor>>(
329        &self,
330        out0: &Tensor,
331        out1: &Tensor,
332        out2: &Tensor,
333        out3: &Tensor,
334        weight: Option<T>,
335        bias: Option<T>,
336        running_mean: Option<T>,
337        running_var: Option<T>,
338        momentum: f64,
339        eps: f64,
340    ) -> (Tensor, Tensor, Tensor, Tensor) {
341        self.f_internal_batch_norm_no_update_out(
342            out0,
343            out1,
344            out2,
345            out3,
346            weight,
347            bias,
348            running_mean,
349            running_var,
350            momentum,
351            eps,
352        )
353        .unwrap()
354    }
355
356    pub fn internal_batch_norm_with_update<T: Borrow<Tensor>>(
357        &self,
358        weight: Option<T>,
359        bias: Option<T>,
360        running_mean: &Tensor,
361        running_var: &Tensor,
362        momentum: f64,
363        eps: f64,
364    ) -> (Tensor, Tensor, Tensor, Tensor) {
365        self.f_internal_batch_norm_with_update(
366            weight,
367            bias,
368            running_mean,
369            running_var,
370            momentum,
371            eps,
372        )
373        .unwrap()
374    }
375
376    pub fn internal_batch_norm_with_update_functional<T: Borrow<Tensor>>(
377        &self,
378        weight: Option<T>,
379        bias: Option<T>,
380        running_mean: &Tensor,
381        running_var: &Tensor,
382        momentum: f64,
383        eps: f64,
384    ) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) {
385        self.f_internal_batch_norm_with_update_functional(
386            weight,
387            bias,
388            running_mean,
389            running_var,
390            momentum,
391            eps,
392        )
393        .unwrap()
394    }
395
396    pub fn internal_batch_norm_with_update_out<T: Borrow<Tensor>>(
397        &self,
398        out: &Tensor,
399        save_mean: &Tensor,
400        save_invstd: &Tensor,
401        reserve: &Tensor,
402        weight: Option<T>,
403        bias: Option<T>,
404        running_mean: &Tensor,
405        running_var: &Tensor,
406        momentum: f64,
407        eps: f64,
408    ) -> (Tensor, Tensor, Tensor, Tensor) {
409        self.f_internal_batch_norm_with_update_out(
410            out,
411            save_mean,
412            save_invstd,
413            reserve,
414            weight,
415            bias,
416            running_mean,
417            running_var,
418            momentum,
419            eps,
420        )
421        .unwrap()
422    }
423
424    pub fn internal_cast_byte(&self, non_blocking: bool) -> Tensor {
425        self.f_internal_cast_byte(non_blocking).unwrap()
426    }
427
428    pub fn internal_cast_char(&self, non_blocking: bool) -> Tensor {
429        self.f_internal_cast_char(non_blocking).unwrap()
430    }
431
432    pub fn internal_cast_double(&self, non_blocking: bool) -> Tensor {
433        self.f_internal_cast_double(non_blocking).unwrap()
434    }
435
436    pub fn internal_cast_float(&self, non_blocking: bool) -> Tensor {
437        self.f_internal_cast_float(non_blocking).unwrap()
438    }
439
440    pub fn internal_cast_half(&self, non_blocking: bool) -> Tensor {
441        self.f_internal_cast_half(non_blocking).unwrap()
442    }
443
444    pub fn internal_cast_int(&self, non_blocking: bool) -> Tensor {
445        self.f_internal_cast_int(non_blocking).unwrap()
446    }
447
448    pub fn internal_cast_long(&self, non_blocking: bool) -> Tensor {
449        self.f_internal_cast_long(non_blocking).unwrap()
450    }
451
452    pub fn internal_cast_short(&self, non_blocking: bool) -> Tensor {
453        self.f_internal_cast_short(non_blocking).unwrap()
454    }
455
456    pub fn internal_cdist_backward(
457        grad: &Tensor,
458        x1: &Tensor,
459        x2: &Tensor,
460        p: f64,
461        cdist: &Tensor,
462    ) -> Tensor {
463        Tensor::f_internal_cdist_backward(grad, x1, x2, p, cdist).unwrap()
464    }
465
466    pub fn internal_cdist_backward_out(
467        out: &Tensor,
468        grad: &Tensor,
469        x1: &Tensor,
470        x2: &Tensor,
471        p: f64,
472        cdist: &Tensor,
473    ) -> Tensor {
474        Tensor::f_internal_cdist_backward_out(out, grad, x1, x2, p, cdist).unwrap()
475    }
476
477    pub fn internal_cholesky_solve_helper(&self, a: &Tensor, upper: bool) -> Tensor {
478        self.f_internal_cholesky_solve_helper(a, upper).unwrap()
479    }
480
481    pub fn internal_cholesky_solve_helper_out(
482        &self,
483        out: &Tensor,
484        a: &Tensor,
485        upper: bool,
486    ) -> Tensor {
487        self.f_internal_cholesky_solve_helper_out(out, a, upper).unwrap()
488    }
489
490    pub fn internal_chunk_cat<T: Borrow<Tensor>>(
491        tensors: &[T],
492        dim: i64,
493        num_chunks: i64,
494    ) -> Tensor {
495        Tensor::f_internal_chunk_cat(tensors, dim, num_chunks).unwrap()
496    }
497
498    pub fn internal_chunk_cat_out<T: Borrow<Tensor>>(
499        out: &Tensor,
500        tensors: &[T],
501        dim: i64,
502        num_chunks: i64,
503    ) -> Tensor {
504        Tensor::f_internal_chunk_cat_out(out, tensors, dim, num_chunks).unwrap()
505    }
506
507    pub fn internal_coalesce(&self) -> Tensor {
508        self.f_internal_coalesce().unwrap()
509    }
510
511    pub fn internal_coalesce_out(&self, out: &Tensor) -> Tensor {
512        self.f_internal_coalesce_out(out).unwrap()
513    }
514
515    pub fn internal_coalesced(&self, coalesced: bool) -> Tensor {
516        self.f_internal_coalesced(coalesced).unwrap()
517    }
518
519    pub fn internal_coalesced_(&mut self, coalesced: bool) -> Tensor {
520        self.f_internal_coalesced_(coalesced).unwrap()
521    }
522
523    pub fn internal_coalesced_out(&self, out: &Tensor, coalesced: bool) -> Tensor {
524        self.f_internal_coalesced_out(out, coalesced).unwrap()
525    }
526
527    pub fn internal_compute_linear_combination(&self, coefficients: &Tensor) -> Tensor {
528        self.f_internal_compute_linear_combination(coefficients).unwrap()
529    }
530
531    pub fn internal_compute_linear_combination_out(
532        &self,
533        out: &Tensor,
534        coefficients: &Tensor,
535    ) -> Tensor {
536        self.f_internal_compute_linear_combination_out(out, coefficients).unwrap()
537    }
538
539    pub fn internal_conj(&self) -> Tensor {
540        self.f_internal_conj().unwrap()
541    }
542
543    pub fn internal_conj_copy(&self) -> Tensor {
544        self.f_internal_conj_copy().unwrap()
545    }
546
547    pub fn internal_conj_copy_out(&self, out: &Tensor) -> Tensor {
548        self.f_internal_conj_copy_out(out).unwrap()
549    }
550
551    pub fn internal_conj_physical(&self) -> Tensor {
552        self.f_internal_conj_physical().unwrap()
553    }
554
555    pub fn internal_conj_physical_out(&self, out: &Tensor) -> Tensor {
556        self.f_internal_conj_physical_out(out).unwrap()
557    }
558
559    pub fn internal_conv_depthwise2d<T: Borrow<Tensor>>(
560        &self,
561        weight: &Tensor,
562        kernel_size: impl IntList,
563        bias: Option<T>,
564        stride: impl IntList,
565        padding: impl IntList,
566        dilation: impl IntList,
567    ) -> Tensor {
568        self.f_internal_conv_depthwise2d(weight, kernel_size, bias, stride, padding, dilation)
569            .unwrap()
570    }
571
572    pub fn internal_conv_depthwise2d_out<T: Borrow<Tensor>>(
573        &self,
574        out: &Tensor,
575        weight: &Tensor,
576        kernel_size: impl IntList,
577        bias: Option<T>,
578        stride: impl IntList,
579        padding: impl IntList,
580        dilation: impl IntList,
581    ) -> Tensor {
582        self.f_internal_conv_depthwise2d_out(
583            out,
584            weight,
585            kernel_size,
586            bias,
587            stride,
588            padding,
589            dilation,
590        )
591        .unwrap()
592    }
593
594    pub fn internal_convert_indices_from_coo_to_csr(&self, size: i64, out_int32: bool) -> Tensor {
595        self.f_internal_convert_indices_from_coo_to_csr(size, out_int32).unwrap()
596    }
597
598    pub fn internal_convert_indices_from_coo_to_csr_out(
599        &self,
600        out: &Tensor,
601        size: i64,
602        out_int32: bool,
603    ) -> Tensor {
604        self.f_internal_convert_indices_from_coo_to_csr_out(out, size, out_int32).unwrap()
605    }
606
607    pub fn internal_convert_indices_from_csr_to_coo(
608        crow_indices: &Tensor,
609        col_indices: &Tensor,
610        out_int32: bool,
611        transpose: bool,
612    ) -> Tensor {
613        Tensor::f_internal_convert_indices_from_csr_to_coo(
614            crow_indices,
615            col_indices,
616            out_int32,
617            transpose,
618        )
619        .unwrap()
620    }
621
622    pub fn internal_convert_indices_from_csr_to_coo_out(
623        out: &Tensor,
624        crow_indices: &Tensor,
625        col_indices: &Tensor,
626        out_int32: bool,
627        transpose: bool,
628    ) -> Tensor {
629        Tensor::f_internal_convert_indices_from_csr_to_coo_out(
630            out,
631            crow_indices,
632            col_indices,
633            out_int32,
634            transpose,
635        )
636        .unwrap()
637    }
638
639    pub fn internal_convert_weight_to_int4pack(&self, innerktiles: i64) -> Tensor {
640        self.f_internal_convert_weight_to_int4pack(innerktiles).unwrap()
641    }
642
643    pub fn internal_convolution<T: Borrow<Tensor>>(
644        &self,
645        weight: &Tensor,
646        bias: Option<T>,
647        stride: impl IntList,
648        padding: impl IntList,
649        dilation: impl IntList,
650        transposed: bool,
651        output_padding: impl IntList,
652        groups: i64,
653        benchmark: bool,
654        deterministic: bool,
655        cudnn_enabled: bool,
656        allow_tf32: bool,
657    ) -> Tensor {
658        self.f_internal_convolution(
659            weight,
660            bias,
661            stride,
662            padding,
663            dilation,
664            transposed,
665            output_padding,
666            groups,
667            benchmark,
668            deterministic,
669            cudnn_enabled,
670            allow_tf32,
671        )
672        .unwrap()
673    }
674
675    pub fn internal_convolution_deprecated<T: Borrow<Tensor>>(
676        &self,
677        weight: &Tensor,
678        bias: Option<T>,
679        stride: impl IntList,
680        padding: impl IntList,
681        dilation: impl IntList,
682        transposed: bool,
683        output_padding: impl IntList,
684        groups: i64,
685        benchmark: bool,
686        deterministic: bool,
687        cudnn_enabled: bool,
688    ) -> Tensor {
689        self.f_internal_convolution_deprecated(
690            weight,
691            bias,
692            stride,
693            padding,
694            dilation,
695            transposed,
696            output_padding,
697            groups,
698            benchmark,
699            deterministic,
700            cudnn_enabled,
701        )
702        .unwrap()
703    }
704
705    pub fn internal_convolution_mode<T: Borrow<Tensor>>(
706        &self,
707        weight: &Tensor,
708        bias: Option<T>,
709        stride: impl IntList,
710        padding: &str,
711        dilation: impl IntList,
712        groups: i64,
713    ) -> Tensor {
714        self.f_internal_convolution_mode(weight, bias, stride, padding, dilation, groups).unwrap()
715    }
716
717    pub fn internal_convolution_out<T: Borrow<Tensor>>(
718        &self,
719        out: &Tensor,
720        weight: &Tensor,
721        bias: Option<T>,
722        stride: impl IntList,
723        padding: impl IntList,
724        dilation: impl IntList,
725        transposed: bool,
726        output_padding: impl IntList,
727        groups: i64,
728        benchmark: bool,
729        deterministic: bool,
730        cudnn_enabled: bool,
731        allow_tf32: bool,
732    ) -> Tensor {
733        self.f_internal_convolution_out(
734            out,
735            weight,
736            bias,
737            stride,
738            padding,
739            dilation,
740            transposed,
741            output_padding,
742            groups,
743            benchmark,
744            deterministic,
745            cudnn_enabled,
746            allow_tf32,
747        )
748        .unwrap()
749    }
750
751    pub fn internal_copy_from(&self, dst: &Tensor, non_blocking: bool) -> Tensor {
752        self.f_internal_copy_from(dst, non_blocking).unwrap()
753    }
754
755    pub fn internal_copy_from_and_resize(&self, dst: &Tensor) -> Tensor {
756        self.f_internal_copy_from_and_resize(dst).unwrap()
757    }
758
759    pub fn internal_copy_from_and_resize_out(&self, out: &Tensor, dst: &Tensor) -> Tensor {
760        self.f_internal_copy_from_and_resize_out(out, dst).unwrap()
761    }
762
763    pub fn internal_copy_from_out(&self, out: &Tensor, dst: &Tensor, non_blocking: bool) -> Tensor {
764        self.f_internal_copy_from_out(out, dst, non_blocking).unwrap()
765    }
766
767    pub fn internal_cslt_compress(&self) -> Tensor {
768        self.f_internal_cslt_compress().unwrap()
769    }
770
771    pub fn internal_cslt_sparse_mm<T: Borrow<Tensor>>(
772        compressed_a: &Tensor,
773        dense_b: &Tensor,
774        bias: Option<T>,
775        alpha: Option<T>,
776        out_dtype: impl Into<Option<Kind>>,
777        transpose_result: bool,
778        alg_id: i64,
779    ) -> Tensor {
780        Tensor::f_internal_cslt_sparse_mm(
781            compressed_a,
782            dense_b,
783            bias,
784            alpha,
785            out_dtype,
786            transpose_result,
787            alg_id,
788        )
789        .unwrap()
790    }
791
792    pub fn internal_cslt_sparse_mm_search<T: Borrow<Tensor>>(
793        compressed_a: &Tensor,
794        dense_b: &Tensor,
795        bias: Option<T>,
796        alpha: Option<T>,
797        out_dtype: impl Into<Option<Kind>>,
798        transpose_result: bool,
799    ) -> i64 {
800        Tensor::f_internal_cslt_sparse_mm_search(
801            compressed_a,
802            dense_b,
803            bias,
804            alpha,
805            out_dtype,
806            transpose_result,
807        )
808        .unwrap()
809    }
810
811    pub fn internal_ctc_loss(
812        log_probs: &Tensor,
813        targets: &Tensor,
814        input_lengths: impl IntList,
815        target_lengths: impl IntList,
816        blank: i64,
817        zero_infinity: bool,
818    ) -> (Tensor, Tensor) {
819        Tensor::f_internal_ctc_loss(
820            log_probs,
821            targets,
822            input_lengths,
823            target_lengths,
824            blank,
825            zero_infinity,
826        )
827        .unwrap()
828    }
829
830    pub fn internal_ctc_loss_backward(
831        grad: &Tensor,
832        log_probs: &Tensor,
833        targets: &Tensor,
834        input_lengths: impl IntList,
835        target_lengths: impl IntList,
836        neg_log_likelihood: &Tensor,
837        log_alpha: &Tensor,
838        blank: i64,
839        zero_infinity: bool,
840    ) -> Tensor {
841        Tensor::f_internal_ctc_loss_backward(
842            grad,
843            log_probs,
844            targets,
845            input_lengths,
846            target_lengths,
847            neg_log_likelihood,
848            log_alpha,
849            blank,
850            zero_infinity,
851        )
852        .unwrap()
853    }
854
855    pub fn internal_ctc_loss_backward_out(
856        out: &Tensor,
857        grad: &Tensor,
858        log_probs: &Tensor,
859        targets: &Tensor,
860        input_lengths: impl IntList,
861        target_lengths: impl IntList,
862        neg_log_likelihood: &Tensor,
863        log_alpha: &Tensor,
864        blank: i64,
865        zero_infinity: bool,
866    ) -> Tensor {
867        Tensor::f_internal_ctc_loss_backward_out(
868            out,
869            grad,
870            log_probs,
871            targets,
872            input_lengths,
873            target_lengths,
874            neg_log_likelihood,
875            log_alpha,
876            blank,
877            zero_infinity,
878        )
879        .unwrap()
880    }
881
882    pub fn internal_ctc_loss_backward_tensor(
883        grad: &Tensor,
884        log_probs: &Tensor,
885        targets: &Tensor,
886        input_lengths: &Tensor,
887        target_lengths: &Tensor,
888        neg_log_likelihood: &Tensor,
889        log_alpha: &Tensor,
890        blank: i64,
891        zero_infinity: bool,
892    ) -> Tensor {
893        Tensor::f_internal_ctc_loss_backward_tensor(
894            grad,
895            log_probs,
896            targets,
897            input_lengths,
898            target_lengths,
899            neg_log_likelihood,
900            log_alpha,
901            blank,
902            zero_infinity,
903        )
904        .unwrap()
905    }
906
907    pub fn internal_ctc_loss_out(
908        out0: &Tensor,
909        out1: &Tensor,
910        log_probs: &Tensor,
911        targets: &Tensor,
912        input_lengths: impl IntList,
913        target_lengths: impl IntList,
914        blank: i64,
915        zero_infinity: bool,
916    ) -> (Tensor, Tensor) {
917        Tensor::f_internal_ctc_loss_out(
918            out0,
919            out1,
920            log_probs,
921            targets,
922            input_lengths,
923            target_lengths,
924            blank,
925            zero_infinity,
926        )
927        .unwrap()
928    }
929
930    pub fn internal_ctc_loss_tensor(
931        log_probs: &Tensor,
932        targets: &Tensor,
933        input_lengths: &Tensor,
934        target_lengths: &Tensor,
935        blank: i64,
936        zero_infinity: bool,
937    ) -> (Tensor, Tensor) {
938        Tensor::f_internal_ctc_loss_tensor(
939            log_probs,
940            targets,
941            input_lengths,
942            target_lengths,
943            blank,
944            zero_infinity,
945        )
946        .unwrap()
947    }
948
949    pub fn internal_ctc_loss_tensor_out(
950        out0: &Tensor,
951        out1: &Tensor,
952        log_probs: &Tensor,
953        targets: &Tensor,
954        input_lengths: &Tensor,
955        target_lengths: &Tensor,
956        blank: i64,
957        zero_infinity: bool,
958    ) -> (Tensor, Tensor) {
959        Tensor::f_internal_ctc_loss_tensor_out(
960            out0,
961            out1,
962            log_probs,
963            targets,
964            input_lengths,
965            target_lengths,
966            blank,
967            zero_infinity,
968        )
969        .unwrap()
970    }
971
972    pub fn internal_cudnn_ctc_loss(
973        log_probs: &Tensor,
974        targets: &Tensor,
975        input_lengths: impl IntList,
976        target_lengths: impl IntList,
977        blank: i64,
978        deterministic: bool,
979        zero_infinity: bool,
980    ) -> (Tensor, Tensor) {
981        Tensor::f_internal_cudnn_ctc_loss(
982            log_probs,
983            targets,
984            input_lengths,
985            target_lengths,
986            blank,
987            deterministic,
988            zero_infinity,
989        )
990        .unwrap()
991    }
992
993    pub fn internal_cudnn_ctc_loss_out(
994        out0: &Tensor,
995        out1: &Tensor,
996        log_probs: &Tensor,
997        targets: &Tensor,
998        input_lengths: impl IntList,
999        target_lengths: impl IntList,
1000        blank: i64,
1001        deterministic: bool,
1002        zero_infinity: bool,
1003    ) -> (Tensor, Tensor) {
1004        Tensor::f_internal_cudnn_ctc_loss_out(
1005            out0,
1006            out1,
1007            log_probs,
1008            targets,
1009            input_lengths,
1010            target_lengths,
1011            blank,
1012            deterministic,
1013            zero_infinity,
1014        )
1015        .unwrap()
1016    }
1017
1018    pub fn internal_cudnn_ctc_loss_tensor(
1019        log_probs: &Tensor,
1020        targets: &Tensor,
1021        input_lengths: &Tensor,
1022        target_lengths: &Tensor,
1023        blank: i64,
1024        deterministic: bool,
1025        zero_infinity: bool,
1026    ) -> (Tensor, Tensor) {
1027        Tensor::f_internal_cudnn_ctc_loss_tensor(
1028            log_probs,
1029            targets,
1030            input_lengths,
1031            target_lengths,
1032            blank,
1033            deterministic,
1034            zero_infinity,
1035        )
1036        .unwrap()
1037    }
1038
1039    pub fn internal_cudnn_init_dropout_state(
1040        dropout: f64,
1041        train: bool,
1042        dropout_seed: i64,
1043        options: (Kind, Device),
1044    ) -> Tensor {
1045        Tensor::f_internal_cudnn_init_dropout_state(dropout, train, dropout_seed, options).unwrap()
1046    }
1047
1048    pub fn internal_cudnn_init_dropout_state_out(
1049        out: &Tensor,
1050        dropout: f64,
1051        train: bool,
1052        dropout_seed: i64,
1053    ) -> Tensor {
1054        Tensor::f_internal_cudnn_init_dropout_state_out(out, dropout, train, dropout_seed).unwrap()
1055    }
1056
1057    pub fn internal_cudnn_rnn<T: Borrow<Tensor>>(
1058        &self,
1059        weight: &[T],
1060        weight_stride0: i64,
1061        weight_buf: Option<T>,
1062        hx: &Tensor,
1063        cx: Option<T>,
1064        mode: i64,
1065        hidden_size: i64,
1066        proj_size: i64,
1067        num_layers: i64,
1068        batch_first: bool,
1069        dropout: f64,
1070        train: bool,
1071        bidirectional: bool,
1072        batch_sizes: impl IntList,
1073        dropout_state: Option<T>,
1074    ) -> (Tensor, Tensor, Tensor, Tensor, Tensor) {
1075        self.f_internal_cudnn_rnn(
1076            weight,
1077            weight_stride0,
1078            weight_buf,
1079            hx,
1080            cx,
1081            mode,
1082            hidden_size,
1083            proj_size,
1084            num_layers,
1085            batch_first,
1086            dropout,
1087            train,
1088            bidirectional,
1089            batch_sizes,
1090            dropout_state,
1091        )
1092        .unwrap()
1093    }
1094
1095    pub fn internal_cudnn_rnn_flatten_weight<T: Borrow<Tensor>>(
1096        weight_arr: &[T],
1097        weight_stride0: i64,
1098        input_size: i64,
1099        mode: i64,
1100        hidden_size: i64,
1101        proj_size: i64,
1102        num_layers: i64,
1103        batch_first: bool,
1104        bidirectional: bool,
1105    ) -> Tensor {
1106        Tensor::f_internal_cudnn_rnn_flatten_weight(
1107            weight_arr,
1108            weight_stride0,
1109            input_size,
1110            mode,
1111            hidden_size,
1112            proj_size,
1113            num_layers,
1114            batch_first,
1115            bidirectional,
1116        )
1117        .unwrap()
1118    }
1119
1120    pub fn internal_cudnn_rnn_flatten_weight_out<T: Borrow<Tensor>>(
1121        out: &Tensor,
1122        weight_arr: &[T],
1123        weight_stride0: i64,
1124        input_size: i64,
1125        mode: i64,
1126        hidden_size: i64,
1127        proj_size: i64,
1128        num_layers: i64,
1129        batch_first: bool,
1130        bidirectional: bool,
1131    ) -> Tensor {
1132        Tensor::f_internal_cudnn_rnn_flatten_weight_out(
1133            out,
1134            weight_arr,
1135            weight_stride0,
1136            input_size,
1137            mode,
1138            hidden_size,
1139            proj_size,
1140            num_layers,
1141            batch_first,
1142            bidirectional,
1143        )
1144        .unwrap()
1145    }
1146
1147    pub fn internal_cudnn_rnn_out<T: Borrow<Tensor>>(
1148        &self,
1149        out0: &Tensor,
1150        out1: &Tensor,
1151        out2: &Tensor,
1152        out3: &Tensor,
1153        out4: &Tensor,
1154        weight: &[T],
1155        weight_stride0: i64,
1156        weight_buf: Option<T>,
1157        hx: &Tensor,
1158        cx: Option<T>,
1159        mode: i64,
1160        hidden_size: i64,
1161        proj_size: i64,
1162        num_layers: i64,
1163        batch_first: bool,
1164        dropout: f64,
1165        train: bool,
1166        bidirectional: bool,
1167        batch_sizes: impl IntList,
1168        dropout_state: Option<T>,
1169    ) -> (Tensor, Tensor, Tensor, Tensor, Tensor) {
1170        self.f_internal_cudnn_rnn_out(
1171            out0,
1172            out1,
1173            out2,
1174            out3,
1175            out4,
1176            weight,
1177            weight_stride0,
1178            weight_buf,
1179            hx,
1180            cx,
1181            mode,
1182            hidden_size,
1183            proj_size,
1184            num_layers,
1185            batch_first,
1186            dropout,
1187            train,
1188            bidirectional,
1189            batch_sizes,
1190            dropout_state,
1191        )
1192        .unwrap()
1193    }
1194
1195    pub fn internal_debug_has_internal_overlap(&self) -> i64 {
1196        self.f_internal_debug_has_internal_overlap().unwrap()
1197    }
1198
1199    pub fn internal_dim_arange(like: &Tensor, dim: i64) -> Tensor {
1200        Tensor::f_internal_dim_arange(like, dim).unwrap()
1201    }
1202
1203    pub fn internal_dimi(&self) -> i64 {
1204        self.f_internal_dimi().unwrap()
1205    }
1206
1207    pub fn internal_dimv(&self) -> i64 {
1208        self.f_internal_dimv().unwrap()
1209    }
1210
1211    pub fn internal_dirichlet_grad(x: &Tensor, alpha: &Tensor, total: &Tensor) -> Tensor {
1212        Tensor::f_internal_dirichlet_grad(x, alpha, total).unwrap()
1213    }
1214
1215    pub fn internal_dirichlet_grad_out(
1216        out: &Tensor,
1217        x: &Tensor,
1218        alpha: &Tensor,
1219        total: &Tensor,
1220    ) -> Tensor {
1221        Tensor::f_internal_dirichlet_grad_out(out, x, alpha, total).unwrap()
1222    }
1223
1224    pub fn internal_efficient_attention_backward<T: Borrow<Tensor>>(
1225        grad_out_: &Tensor,
1226        query: &Tensor,
1227        key: &Tensor,
1228        value: &Tensor,
1229        bias: Option<T>,
1230        out: &Tensor,
1231        cu_seqlens_q: Option<T>,
1232        cu_seqlens_k: Option<T>,
1233        max_seqlen_q: i64,
1234        max_seqlen_k: i64,
1235        logsumexp: &Tensor,
1236        dropout_p: f64,
1237        philox_seed: &Tensor,
1238        philox_offset: &Tensor,
1239        custom_mask_type: i64,
1240        bias_requires_grad: bool,
1241        scale: impl Into<Option<f64>>,
1242        num_splits_key: impl Into<Option<i64>>,
1243        window_size: impl Into<Option<i64>>,
1244        shared_storage_dqdkdv: bool,
1245    ) -> (Tensor, Tensor, Tensor, Tensor) {
1246        Tensor::f_internal_efficient_attention_backward(
1247            grad_out_,
1248            query,
1249            key,
1250            value,
1251            bias,
1252            out,
1253            cu_seqlens_q,
1254            cu_seqlens_k,
1255            max_seqlen_q,
1256            max_seqlen_k,
1257            logsumexp,
1258            dropout_p,
1259            philox_seed,
1260            philox_offset,
1261            custom_mask_type,
1262            bias_requires_grad,
1263            scale,
1264            num_splits_key,
1265            window_size,
1266            shared_storage_dqdkdv,
1267        )
1268        .unwrap()
1269    }
1270
1271    pub fn internal_efficientzerotensor(size: impl IntList, options: (Kind, Device)) -> Tensor {
1272        Tensor::f_internal_efficientzerotensor(size, options).unwrap()
1273    }
1274
1275    pub fn internal_efficientzerotensor_out(out: &Tensor, size: impl IntList) -> Tensor {
1276        Tensor::f_internal_efficientzerotensor_out(out, size).unwrap()
1277    }
1278
1279    pub fn internal_embedding_bag<T: Borrow<Tensor>>(
1280        weight: &Tensor,
1281        indices: &Tensor,
1282        offsets: &Tensor,
1283        scale_grad_by_freq: bool,
1284        mode: i64,
1285        sparse: bool,
1286        per_sample_weights: Option<T>,
1287        include_last_offset: bool,
1288        padding_idx: i64,
1289    ) -> (Tensor, Tensor, Tensor, Tensor) {
1290        Tensor::f_internal_embedding_bag(
1291            weight,
1292            indices,
1293            offsets,
1294            scale_grad_by_freq,
1295            mode,
1296            sparse,
1297            per_sample_weights,
1298            include_last_offset,
1299            padding_idx,
1300        )
1301        .unwrap()
1302    }
1303
1304    pub fn internal_embedding_bag_backward<T: Borrow<Tensor>>(
1305        grad: &Tensor,
1306        indices: &Tensor,
1307        offsets: &Tensor,
1308        offset2bag: &Tensor,
1309        bag_size: &Tensor,
1310        maximum_indices: &Tensor,
1311        num_weights: i64,
1312        scale_grad_by_freq: bool,
1313        mode: i64,
1314        sparse: bool,
1315        per_sample_weights: Option<T>,
1316        padding_idx: i64,
1317    ) -> Tensor {
1318        Tensor::f_internal_embedding_bag_backward(
1319            grad,
1320            indices,
1321            offsets,
1322            offset2bag,
1323            bag_size,
1324            maximum_indices,
1325            num_weights,
1326            scale_grad_by_freq,
1327            mode,
1328            sparse,
1329            per_sample_weights,
1330            padding_idx,
1331        )
1332        .unwrap()
1333    }
1334
1335    pub fn internal_embedding_bag_dense_backward<T: Borrow<Tensor>>(
1336        grad: &Tensor,
1337        indices: &Tensor,
1338        offset2bag: &Tensor,
1339        bag_size: &Tensor,
1340        maximum_indices: &Tensor,
1341        num_weights: i64,
1342        scale_grad_by_freq: bool,
1343        mode: i64,
1344        per_sample_weights: Option<T>,
1345        padding_idx: i64,
1346    ) -> Tensor {
1347        Tensor::f_internal_embedding_bag_dense_backward(
1348            grad,
1349            indices,
1350            offset2bag,
1351            bag_size,
1352            maximum_indices,
1353            num_weights,
1354            scale_grad_by_freq,
1355            mode,
1356            per_sample_weights,
1357            padding_idx,
1358        )
1359        .unwrap()
1360    }
1361
1362    pub fn internal_embedding_bag_dense_backward_out<T: Borrow<Tensor>>(
1363        out: &Tensor,
1364        grad: &Tensor,
1365        indices: &Tensor,
1366        offset2bag: &Tensor,
1367        bag_size: &Tensor,
1368        maximum_indices: &Tensor,
1369        num_weights: i64,
1370        scale_grad_by_freq: bool,
1371        mode: i64,
1372        per_sample_weights: Option<T>,
1373        padding_idx: i64,
1374    ) -> Tensor {
1375        Tensor::f_internal_embedding_bag_dense_backward_out(
1376            out,
1377            grad,
1378            indices,
1379            offset2bag,
1380            bag_size,
1381            maximum_indices,
1382            num_weights,
1383            scale_grad_by_freq,
1384            mode,
1385            per_sample_weights,
1386            padding_idx,
1387        )
1388        .unwrap()
1389    }
1390
1391    pub fn internal_embedding_bag_forward_only<T: Borrow<Tensor>>(
1392        weight: &Tensor,
1393        indices: &Tensor,
1394        offsets: &Tensor,
1395        scale_grad_by_freq: bool,
1396        mode: i64,
1397        sparse: bool,
1398        per_sample_weights: Option<T>,
1399        include_last_offset: bool,
1400        padding_idx: i64,
1401    ) -> (Tensor, Tensor, Tensor, Tensor) {
1402        Tensor::f_internal_embedding_bag_forward_only(
1403            weight,
1404            indices,
1405            offsets,
1406            scale_grad_by_freq,
1407            mode,
1408            sparse,
1409            per_sample_weights,
1410            include_last_offset,
1411            padding_idx,
1412        )
1413        .unwrap()
1414    }
1415
1416    pub fn internal_embedding_bag_forward_only_out<T: Borrow<Tensor>>(
1417        out0: &Tensor,
1418        out1: &Tensor,
1419        out2: &Tensor,
1420        out3: &Tensor,
1421        weight: &Tensor,
1422        indices: &Tensor,
1423        offsets: &Tensor,
1424        scale_grad_by_freq: bool,
1425        mode: i64,
1426        sparse: bool,
1427        per_sample_weights: Option<T>,
1428        include_last_offset: bool,
1429        padding_idx: i64,
1430    ) -> (Tensor, Tensor, Tensor, Tensor) {
1431        Tensor::f_internal_embedding_bag_forward_only_out(
1432            out0,
1433            out1,
1434            out2,
1435            out3,
1436            weight,
1437            indices,
1438            offsets,
1439            scale_grad_by_freq,
1440            mode,
1441            sparse,
1442            per_sample_weights,
1443            include_last_offset,
1444            padding_idx,
1445        )
1446        .unwrap()
1447    }
1448
1449    pub fn internal_embedding_bag_out<T: Borrow<Tensor>>(
1450        out0: &Tensor,
1451        out1: &Tensor,
1452        out2: &Tensor,
1453        out3: &Tensor,
1454        weight: &Tensor,
1455        indices: &Tensor,
1456        offsets: &Tensor,
1457        scale_grad_by_freq: bool,
1458        mode: i64,
1459        sparse: bool,
1460        per_sample_weights: Option<T>,
1461        include_last_offset: bool,
1462        padding_idx: i64,
1463    ) -> (Tensor, Tensor, Tensor, Tensor) {
1464        Tensor::f_internal_embedding_bag_out(
1465            out0,
1466            out1,
1467            out2,
1468            out3,
1469            weight,
1470            indices,
1471            offsets,
1472            scale_grad_by_freq,
1473            mode,
1474            sparse,
1475            per_sample_weights,
1476            include_last_offset,
1477            padding_idx,
1478        )
1479        .unwrap()
1480    }
1481
1482    pub fn internal_embedding_bag_per_sample_weights_backward(
1483        grad: &Tensor,
1484        weight: &Tensor,
1485        indices: &Tensor,
1486        offsets: &Tensor,
1487        offset2bag: &Tensor,
1488        mode: i64,
1489        padding_idx: i64,
1490    ) -> Tensor {
1491        Tensor::f_internal_embedding_bag_per_sample_weights_backward(
1492            grad,
1493            weight,
1494            indices,
1495            offsets,
1496            offset2bag,
1497            mode,
1498            padding_idx,
1499        )
1500        .unwrap()
1501    }
1502
1503    pub fn internal_embedding_bag_per_sample_weights_backward_out(
1504        out: &Tensor,
1505        grad: &Tensor,
1506        weight: &Tensor,
1507        indices: &Tensor,
1508        offsets: &Tensor,
1509        offset2bag: &Tensor,
1510        mode: i64,
1511        padding_idx: i64,
1512    ) -> Tensor {
1513        Tensor::f_internal_embedding_bag_per_sample_weights_backward_out(
1514            out,
1515            grad,
1516            weight,
1517            indices,
1518            offsets,
1519            offset2bag,
1520            mode,
1521            padding_idx,
1522        )
1523        .unwrap()
1524    }
1525
1526    pub fn internal_embedding_bag_sparse_backward<T: Borrow<Tensor>>(
1527        grad: &Tensor,
1528        indices: &Tensor,
1529        offsets: &Tensor,
1530        offset2bag: &Tensor,
1531        bag_size: &Tensor,
1532        num_weights: i64,
1533        scale_grad_by_freq: bool,
1534        mode: i64,
1535        per_sample_weights: Option<T>,
1536        padding_idx: i64,
1537    ) -> Tensor {
1538        Tensor::f_internal_embedding_bag_sparse_backward(
1539            grad,
1540            indices,
1541            offsets,
1542            offset2bag,
1543            bag_size,
1544            num_weights,
1545            scale_grad_by_freq,
1546            mode,
1547            per_sample_weights,
1548            padding_idx,
1549        )
1550        .unwrap()
1551    }
1552
1553    pub fn internal_empty_affine_quantized(
1554        size: impl IntList,
1555        options: (Kind, Device),
1556        scale: f64,
1557        zero_point: i64,
1558    ) -> Tensor {
1559        Tensor::f_internal_empty_affine_quantized(size, options, scale, zero_point).unwrap()
1560    }
1561
1562    pub fn internal_empty_affine_quantized_out(
1563        out: &Tensor,
1564        size: impl IntList,
1565        scale: f64,
1566        zero_point: i64,
1567    ) -> Tensor {
1568        Tensor::f_internal_empty_affine_quantized_out(out, size, scale, zero_point).unwrap()
1569    }
1570
1571    pub fn internal_empty_per_channel_affine_quantized(
1572        size: impl IntList,
1573        scales: &Tensor,
1574        zero_points: &Tensor,
1575        axis: i64,
1576        options: (Kind, Device),
1577    ) -> Tensor {
1578        Tensor::f_internal_empty_per_channel_affine_quantized(
1579            size,
1580            scales,
1581            zero_points,
1582            axis,
1583            options,
1584        )
1585        .unwrap()
1586    }
1587
1588    pub fn internal_empty_per_channel_affine_quantized_out(
1589        out: &Tensor,
1590        size: impl IntList,
1591        scales: &Tensor,
1592        zero_points: &Tensor,
1593        axis: i64,
1594    ) -> Tensor {
1595        Tensor::f_internal_empty_per_channel_affine_quantized_out(
1596            out,
1597            size,
1598            scales,
1599            zero_points,
1600            axis,
1601        )
1602        .unwrap()
1603    }
1604
1605    pub fn internal_euclidean_dist(x1: &Tensor, x2: &Tensor) -> Tensor {
1606        Tensor::f_internal_euclidean_dist(x1, x2).unwrap()
1607    }
1608
1609    pub fn internal_euclidean_dist_out(out: &Tensor, x1: &Tensor, x2: &Tensor) -> Tensor {
1610        Tensor::f_internal_euclidean_dist_out(out, x1, x2).unwrap()
1611    }
1612
1613    pub fn internal_fake_quantize_learnable_per_channel_affine(
1614        &self,
1615        scale: &Tensor,
1616        zero_point: &Tensor,
1617        axis: i64,
1618        quant_min: i64,
1619        quant_max: i64,
1620        grad_factor: f64,
1621    ) -> Tensor {
1622        self.f_internal_fake_quantize_learnable_per_channel_affine(
1623            scale,
1624            zero_point,
1625            axis,
1626            quant_min,
1627            quant_max,
1628            grad_factor,
1629        )
1630        .unwrap()
1631    }
1632
1633    pub fn internal_fake_quantize_learnable_per_channel_affine_backward(
1634        &self,
1635        grad: &Tensor,
1636        scale: &Tensor,
1637        zero_point: &Tensor,
1638        axis: i64,
1639        quant_min: i64,
1640        quant_max: i64,
1641        grad_factor: f64,
1642    ) -> (Tensor, Tensor, Tensor) {
1643        self.f_internal_fake_quantize_learnable_per_channel_affine_backward(
1644            grad,
1645            scale,
1646            zero_point,
1647            axis,
1648            quant_min,
1649            quant_max,
1650            grad_factor,
1651        )
1652        .unwrap()
1653    }
1654
1655    pub fn internal_fake_quantize_learnable_per_channel_affine_out(
1656        &self,
1657        out: &Tensor,
1658        scale: &Tensor,
1659        zero_point: &Tensor,
1660        axis: i64,
1661        quant_min: i64,
1662        quant_max: i64,
1663        grad_factor: f64,
1664    ) -> Tensor {
1665        self.f_internal_fake_quantize_learnable_per_channel_affine_out(
1666            out,
1667            scale,
1668            zero_point,
1669            axis,
1670            quant_min,
1671            quant_max,
1672            grad_factor,
1673        )
1674        .unwrap()
1675    }
1676
1677    pub fn internal_fake_quantize_learnable_per_tensor_affine(
1678        &self,
1679        scale: &Tensor,
1680        zero_point: &Tensor,
1681        quant_min: i64,
1682        quant_max: i64,
1683        grad_factor: f64,
1684    ) -> Tensor {
1685        self.f_internal_fake_quantize_learnable_per_tensor_affine(
1686            scale,
1687            zero_point,
1688            quant_min,
1689            quant_max,
1690            grad_factor,
1691        )
1692        .unwrap()
1693    }
1694
1695    pub fn internal_fake_quantize_learnable_per_tensor_affine_backward(
1696        &self,
1697        grad: &Tensor,
1698        scale: &Tensor,
1699        zero_point: &Tensor,
1700        quant_min: i64,
1701        quant_max: i64,
1702        grad_factor: f64,
1703    ) -> (Tensor, Tensor, Tensor) {
1704        self.f_internal_fake_quantize_learnable_per_tensor_affine_backward(
1705            grad,
1706            scale,
1707            zero_point,
1708            quant_min,
1709            quant_max,
1710            grad_factor,
1711        )
1712        .unwrap()
1713    }
1714
1715    pub fn internal_fake_quantize_learnable_per_tensor_affine_out(
1716        &self,
1717        out: &Tensor,
1718        scale: &Tensor,
1719        zero_point: &Tensor,
1720        quant_min: i64,
1721        quant_max: i64,
1722        grad_factor: f64,
1723    ) -> Tensor {
1724        self.f_internal_fake_quantize_learnable_per_tensor_affine_out(
1725            out,
1726            scale,
1727            zero_point,
1728            quant_min,
1729            quant_max,
1730            grad_factor,
1731        )
1732        .unwrap()
1733    }
1734
1735    pub fn internal_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(
1736        &self,
1737        scale: &Tensor,
1738        zero_point: &Tensor,
1739        fake_quant_enabled: &Tensor,
1740        quant_min: i64,
1741        quant_max: i64,
1742    ) -> (Tensor, Tensor) {
1743        self.f_internal_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(
1744            scale,
1745            zero_point,
1746            fake_quant_enabled,
1747            quant_min,
1748            quant_max,
1749        )
1750        .unwrap()
1751    }
1752
1753    pub fn internal_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(
1754        &self,
1755        out0: &Tensor,
1756        out1: &Tensor,
1757        scale: &Tensor,
1758        zero_point: &Tensor,
1759        fake_quant_enabled: &Tensor,
1760        quant_min: i64,
1761        quant_max: i64,
1762    ) -> (Tensor, Tensor) {
1763        self.f_internal_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(
1764            out0,
1765            out1,
1766            scale,
1767            zero_point,
1768            fake_quant_enabled,
1769            quant_min,
1770            quant_max,
1771        )
1772        .unwrap()
1773    }
1774
1775    pub fn internal_fft_c2c(&self, dim: impl IntList, normalization: i64, forward: bool) -> Tensor {
1776        self.f_internal_fft_c2c(dim, normalization, forward).unwrap()
1777    }
1778
1779    pub fn internal_fft_c2c_out(
1780        &self,
1781        out: &Tensor,
1782        dim: impl IntList,
1783        normalization: i64,
1784        forward: bool,
1785    ) -> Tensor {
1786        self.f_internal_fft_c2c_out(out, dim, normalization, forward).unwrap()
1787    }
1788
1789    pub fn internal_fft_c2r(
1790        &self,
1791        dim: impl IntList,
1792        normalization: i64,
1793        last_dim_size: i64,
1794    ) -> Tensor {
1795        self.f_internal_fft_c2r(dim, normalization, last_dim_size).unwrap()
1796    }
1797
1798    pub fn internal_fft_c2r_out(
1799        &self,
1800        out: &Tensor,
1801        dim: impl IntList,
1802        normalization: i64,
1803        last_dim_size: i64,
1804    ) -> Tensor {
1805        self.f_internal_fft_c2r_out(out, dim, normalization, last_dim_size).unwrap()
1806    }
1807
1808    pub fn internal_fft_r2c(
1809        &self,
1810        dim: impl IntList,
1811        normalization: i64,
1812        onesided: bool,
1813    ) -> Tensor {
1814        self.f_internal_fft_r2c(dim, normalization, onesided).unwrap()
1815    }
1816
1817    pub fn internal_fft_r2c_out(
1818        &self,
1819        out: &Tensor,
1820        dim: impl IntList,
1821        normalization: i64,
1822        onesided: bool,
1823    ) -> Tensor {
1824        self.f_internal_fft_r2c_out(out, dim, normalization, onesided).unwrap()
1825    }
1826
1827    pub fn internal_fill_mem_eff_dropout_mask_(
1828        &mut self,
1829        dropout_p: f64,
1830        seed: i64,
1831        offset: i64,
1832    ) -> Tensor {
1833        self.f_internal_fill_mem_eff_dropout_mask_(dropout_p, seed, offset).unwrap()
1834    }
1835
1836    pub fn internal_flash_attention_backward(
1837        grad_out: &Tensor,
1838        query: &Tensor,
1839        key: &Tensor,
1840        value: &Tensor,
1841        out: &Tensor,
1842        logsumexp: &Tensor,
1843        cum_seq_q: &Tensor,
1844        cum_seq_k: &Tensor,
1845        max_q: i64,
1846        max_k: i64,
1847        dropout_p: f64,
1848        is_causal: bool,
1849        philox_seed: &Tensor,
1850        philox_offset: &Tensor,
1851        scale: impl Into<Option<f64>>,
1852        window_size_left: impl Into<Option<i64>>,
1853        window_size_right: impl Into<Option<i64>>,
1854    ) -> (Tensor, Tensor, Tensor) {
1855        Tensor::f_internal_flash_attention_backward(
1856            grad_out,
1857            query,
1858            key,
1859            value,
1860            out,
1861            logsumexp,
1862            cum_seq_q,
1863            cum_seq_k,
1864            max_q,
1865            max_k,
1866            dropout_p,
1867            is_causal,
1868            philox_seed,
1869            philox_offset,
1870            scale,
1871            window_size_left,
1872            window_size_right,
1873        )
1874        .unwrap()
1875    }
1876
1877    pub fn internal_foobar(&self, arg1: bool, arg2: bool, arg3: bool) -> Tensor {
1878        self.f_internal_foobar(arg1, arg2, arg3).unwrap()
1879    }
1880
1881    pub fn internal_foobar_out(&self, out: &Tensor, arg1: bool, arg2: bool, arg3: bool) -> Tensor {
1882        self.f_internal_foobar_out(out, arg1, arg2, arg3).unwrap()
1883    }
1884
1885    pub fn internal_functional_assert_async(&self, assert_msg: &str, dep_token: &Tensor) -> Tensor {
1886        self.f_internal_functional_assert_async(assert_msg, dep_token).unwrap()
1887    }
1888
1889    pub fn internal_functional_assert_scalar<S: Into<Scalar>>(
1890        self_scalar: S,
1891        assert_msg: &str,
1892        dep_token: &Tensor,
1893    ) -> Tensor {
1894        Tensor::f_internal_functional_assert_scalar(self_scalar, assert_msg, dep_token).unwrap()
1895    }
1896
1897    pub fn internal_functional_sym_constrain_range<S: Into<Scalar>>(
1898        size: S,
1899        min: impl Into<Option<i64>>,
1900        max: impl Into<Option<i64>>,
1901        dep_token: &Tensor,
1902    ) -> Tensor {
1903        Tensor::f_internal_functional_sym_constrain_range(size, min, max, dep_token).unwrap()
1904    }
1905
1906    pub fn internal_functional_sym_constrain_range_for_size<S: Into<Scalar>>(
1907        size: S,
1908        min: impl Into<Option<i64>>,
1909        max: impl Into<Option<i64>>,
1910        dep_token: &Tensor,
1911    ) -> Tensor {
1912        Tensor::f_internal_functional_sym_constrain_range_for_size(size, min, max, dep_token)
1913            .unwrap()
1914    }
1915
1916    pub fn internal_fused_dropout(&self, p: f64) -> (Tensor, Tensor) {
1917        self.f_internal_fused_dropout(p).unwrap()
1918    }
1919
1920    pub fn internal_fused_dropout_out(
1921        &self,
1922        out0: &Tensor,
1923        out1: &Tensor,
1924        p: f64,
1925    ) -> (Tensor, Tensor) {
1926        self.f_internal_fused_dropout_out(out0, out1, p).unwrap()
1927    }
1928
1929    pub fn internal_fused_moving_avg_obs_fq_helper(
1930        &self,
1931        observer_on: &Tensor,
1932        fake_quant_on: &Tensor,
1933        running_min: &Tensor,
1934        running_max: &Tensor,
1935        scale: &Tensor,
1936        zero_point: &Tensor,
1937        averaging_const: f64,
1938        quant_min: i64,
1939        quant_max: i64,
1940        ch_axis: i64,
1941        per_row_fake_quant: bool,
1942        symmetric_quant: bool,
1943    ) -> (Tensor, Tensor) {
1944        self.f_internal_fused_moving_avg_obs_fq_helper(
1945            observer_on,
1946            fake_quant_on,
1947            running_min,
1948            running_max,
1949            scale,
1950            zero_point,
1951            averaging_const,
1952            quant_min,
1953            quant_max,
1954            ch_axis,
1955            per_row_fake_quant,
1956            symmetric_quant,
1957        )
1958        .unwrap()
1959    }
1960
1961    pub fn internal_fused_moving_avg_obs_fq_helper_functional(
1962        &self,
1963        observer_on: &Tensor,
1964        fake_quant_on: &Tensor,
1965        running_min: &Tensor,
1966        running_max: &Tensor,
1967        scale: &Tensor,
1968        zero_point: &Tensor,
1969        averaging_const: f64,
1970        quant_min: i64,
1971        quant_max: i64,
1972        ch_axis: i64,
1973        per_row_fake_quant: bool,
1974        symmetric_quant: bool,
1975    ) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) {
1976        self.f_internal_fused_moving_avg_obs_fq_helper_functional(
1977            observer_on,
1978            fake_quant_on,
1979            running_min,
1980            running_max,
1981            scale,
1982            zero_point,
1983            averaging_const,
1984            quant_min,
1985            quant_max,
1986            ch_axis,
1987            per_row_fake_quant,
1988            symmetric_quant,
1989        )
1990        .unwrap()
1991    }
1992
1993    pub fn internal_fused_moving_avg_obs_fq_helper_out(
1994        &self,
1995        out0: &Tensor,
1996        out1: &Tensor,
1997        observer_on: &Tensor,
1998        fake_quant_on: &Tensor,
1999        running_min: &Tensor,
2000        running_max: &Tensor,
2001        scale: &Tensor,
2002        zero_point: &Tensor,
2003        averaging_const: f64,
2004        quant_min: i64,
2005        quant_max: i64,
2006        ch_axis: i64,
2007        per_row_fake_quant: bool,
2008        symmetric_quant: bool,
2009    ) -> (Tensor, Tensor) {
2010        self.f_internal_fused_moving_avg_obs_fq_helper_out(
2011            out0,
2012            out1,
2013            observer_on,
2014            fake_quant_on,
2015            running_min,
2016            running_max,
2017            scale,
2018            zero_point,
2019            averaging_const,
2020            quant_min,
2021            quant_max,
2022            ch_axis,
2023            per_row_fake_quant,
2024            symmetric_quant,
2025        )
2026        .unwrap()
2027    }
2028
2029    pub fn internal_fused_sdp_choice<T: Borrow<Tensor>>(
2030        query: &Tensor,
2031        key: &Tensor,
2032        value: &Tensor,
2033        attn_mask: Option<T>,
2034        dropout_p: f64,
2035        is_causal: bool,
2036        scale: impl Into<Option<f64>>,
2037        enable_gqa: bool,
2038    ) -> i64 {
2039        Tensor::f_internal_fused_sdp_choice(
2040            query, key, value, attn_mask, dropout_p, is_causal, scale, enable_gqa,
2041        )
2042        .unwrap()
2043    }
2044
2045    pub fn internal_fw_primal(&self, level: i64) -> Tensor {
2046        self.f_internal_fw_primal(level).unwrap()
2047    }
2048
2049    pub fn internal_fw_primal_copy(&self, level: i64) -> Tensor {
2050        self.f_internal_fw_primal_copy(level).unwrap()
2051    }
2052
2053    pub fn internal_fw_primal_copy_out(&self, out: &Tensor, level: i64) -> Tensor {
2054        self.f_internal_fw_primal_copy_out(out, level).unwrap()
2055    }
2056
2057    pub fn internal_gather_sparse_backward(
2058        &self,
2059        dim: i64,
2060        index: &Tensor,
2061        grad: &Tensor,
2062    ) -> Tensor {
2063        self.f_internal_gather_sparse_backward(dim, index, grad).unwrap()
2064    }
2065
2066    pub fn internal_grid_sampler_2d_cpu_fallback(
2067        &self,
2068        grid: &Tensor,
2069        interpolation_mode: i64,
2070        padding_mode: i64,
2071        align_corners: bool,
2072    ) -> Tensor {
2073        self.f_internal_grid_sampler_2d_cpu_fallback(
2074            grid,
2075            interpolation_mode,
2076            padding_mode,
2077            align_corners,
2078        )
2079        .unwrap()
2080    }
2081
2082    pub fn internal_grid_sampler_2d_cpu_fallback_backward(
2083        &self,
2084        grad_output: &Tensor,
2085        grid: &Tensor,
2086        interpolation_mode: i64,
2087        padding_mode: i64,
2088        align_corners: bool,
2089    ) -> (Tensor, Tensor) {
2090        self.f_internal_grid_sampler_2d_cpu_fallback_backward(
2091            grad_output,
2092            grid,
2093            interpolation_mode,
2094            padding_mode,
2095            align_corners,
2096        )
2097        .unwrap()
2098    }
2099
2100    pub fn internal_grid_sampler_2d_cpu_fallback_out(
2101        &self,
2102        out: &Tensor,
2103        grid: &Tensor,
2104        interpolation_mode: i64,
2105        padding_mode: i64,
2106        align_corners: bool,
2107    ) -> Tensor {
2108        self.f_internal_grid_sampler_2d_cpu_fallback_out(
2109            out,
2110            grid,
2111            interpolation_mode,
2112            padding_mode,
2113            align_corners,
2114        )
2115        .unwrap()
2116    }
2117
2118    pub fn internal_has_compatible_shallow_copy_type(&self, from: &Tensor) -> bool {
2119        self.f_internal_has_compatible_shallow_copy_type(from).unwrap()
2120    }
2121
2122    pub fn internal_has_same_storage_numel(&self, other: &Tensor) -> bool {
2123        self.f_internal_has_same_storage_numel(other).unwrap()
2124    }
2125
2126    pub fn internal_histogramdd_bin_edges<T: Borrow<Tensor>>(
2127        &self,
2128        bins: impl IntList,
2129        range: impl DoubleList,
2130        weight: Option<T>,
2131        density: bool,
2132    ) -> Vec<Tensor> {
2133        self.f_internal_histogramdd_bin_edges(bins, range, weight, density).unwrap()
2134    }
2135
2136    pub fn internal_histogramdd_bin_edges_out<T: Borrow<Tensor>>(
2137        &self,
2138        out: &[T],
2139        bins: impl IntList,
2140        range: impl DoubleList,
2141        weight: Option<T>,
2142        density: bool,
2143    ) {
2144        self.f_internal_histogramdd_bin_edges_out(out, bins, range, weight, density).unwrap()
2145    }
2146
2147    pub fn internal_histogramdd_from_bin_cts<T: Borrow<Tensor>>(
2148        &self,
2149        bins: impl IntList,
2150        range: impl DoubleList,
2151        weight: Option<T>,
2152        density: bool,
2153    ) -> Tensor {
2154        self.f_internal_histogramdd_from_bin_cts(bins, range, weight, density).unwrap()
2155    }
2156
2157    pub fn internal_histogramdd_from_bin_cts_out<T: Borrow<Tensor>>(
2158        &self,
2159        out: &Tensor,
2160        bins: impl IntList,
2161        range: impl DoubleList,
2162        weight: Option<T>,
2163        density: bool,
2164    ) -> Tensor {
2165        self.f_internal_histogramdd_from_bin_cts_out(out, bins, range, weight, density).unwrap()
2166    }
2167
2168    pub fn internal_histogramdd_from_bin_tensors<T: Borrow<Tensor>>(
2169        &self,
2170        bins: &[T],
2171        weight: Option<T>,
2172        density: bool,
2173    ) -> Tensor {
2174        self.f_internal_histogramdd_from_bin_tensors(bins, weight, density).unwrap()
2175    }
2176
2177    pub fn internal_histogramdd_from_bin_tensors_out<T: Borrow<Tensor>>(
2178        &self,
2179        out: &Tensor,
2180        bins: &[T],
2181        weight: Option<T>,
2182        density: bool,
2183    ) -> Tensor {
2184        self.f_internal_histogramdd_from_bin_tensors_out(out, bins, weight, density).unwrap()
2185    }
2186
2187    pub fn internal_index_put_impl<T: Borrow<Tensor>>(
2188        &self,
2189        indices: &[Option<T>],
2190        values: &Tensor,
2191        accumulate: bool,
2192        unsafe_: bool,
2193    ) -> Tensor {
2194        self.f_internal_index_put_impl(indices, values, accumulate, unsafe_).unwrap()
2195    }
2196
2197    pub fn internal_index_put_impl_<T: Borrow<Tensor>>(
2198        &mut self,
2199        indices: &[Option<T>],
2200        values: &Tensor,
2201        accumulate: bool,
2202        unsafe_: bool,
2203    ) -> Tensor {
2204        self.f_internal_index_put_impl_(indices, values, accumulate, unsafe_).unwrap()
2205    }
2206
2207    pub fn internal_index_put_impl_out<T: Borrow<Tensor>>(
2208        &self,
2209        out: &Tensor,
2210        indices: &[Option<T>],
2211        values: &Tensor,
2212        accumulate: bool,
2213        unsafe_: bool,
2214    ) -> Tensor {
2215        self.f_internal_index_put_impl_out(out, indices, values, accumulate, unsafe_).unwrap()
2216    }
2217
2218    pub fn internal_indices(&self) -> Tensor {
2219        self.f_internal_indices().unwrap()
2220    }
2221
2222    pub fn internal_indices_copy(&self) -> Tensor {
2223        self.f_internal_indices_copy().unwrap()
2224    }
2225
2226    pub fn internal_indices_copy_out(&self, out: &Tensor) -> Tensor {
2227        self.f_internal_indices_copy_out(out).unwrap()
2228    }
2229
2230    pub fn internal_int_mm(&self, mat2: &Tensor) -> Tensor {
2231        self.f_internal_int_mm(mat2).unwrap()
2232    }
2233
2234    pub fn internal_int_mm_out(&self, out: &Tensor, mat2: &Tensor) -> Tensor {
2235        self.f_internal_int_mm_out(out, mat2).unwrap()
2236    }
2237
2238    pub fn internal_is_all_true(&self) -> Tensor {
2239        self.f_internal_is_all_true().unwrap()
2240    }
2241
2242    pub fn internal_is_any_true(&self) -> Tensor {
2243        self.f_internal_is_any_true().unwrap()
2244    }
2245
2246    pub fn internal_is_zerotensor(&self) -> bool {
2247        self.f_internal_is_zerotensor().unwrap()
2248    }
2249
2250    pub fn internal_lazy_clone(&self) -> Tensor {
2251        self.f_internal_lazy_clone().unwrap()
2252    }
2253
2254    pub fn internal_linalg_check_errors(info: &Tensor, api_name: &str, is_matrix: bool) {
2255        Tensor::f_internal_linalg_check_errors(info, api_name, is_matrix).unwrap()
2256    }
2257
2258    pub fn internal_linalg_det(a: &Tensor) -> (Tensor, Tensor, Tensor) {
2259        Tensor::f_internal_linalg_det(a).unwrap()
2260    }
2261
2262    pub fn internal_linalg_det_result(
2263        result: &Tensor,
2264        lu: &Tensor,
2265        pivots: &Tensor,
2266        a: &Tensor,
2267    ) -> (Tensor, Tensor, Tensor) {
2268        Tensor::f_internal_linalg_det_result(result, lu, pivots, a).unwrap()
2269    }
2270
2271    pub fn internal_linalg_eigh(a: &Tensor, uplo: &str, compute_v: bool) -> (Tensor, Tensor) {
2272        Tensor::f_internal_linalg_eigh(a, uplo, compute_v).unwrap()
2273    }
2274
2275    pub fn internal_linalg_eigh_eigenvalues(
2276        eigenvalues: &Tensor,
2277        eigenvectors: &Tensor,
2278        a: &Tensor,
2279        uplo: &str,
2280        compute_v: bool,
2281    ) -> (Tensor, Tensor) {
2282        Tensor::f_internal_linalg_eigh_eigenvalues(eigenvalues, eigenvectors, a, uplo, compute_v)
2283            .unwrap()
2284    }
2285
2286    pub fn internal_linalg_eigvals(&self) -> Tensor {
2287        self.f_internal_linalg_eigvals().unwrap()
2288    }
2289
2290    pub fn internal_linalg_slogdet(a: &Tensor) -> (Tensor, Tensor, Tensor, Tensor) {
2291        Tensor::f_internal_linalg_slogdet(a).unwrap()
2292    }
2293
2294    pub fn internal_linalg_slogdet_sign(
2295        sign: &Tensor,
2296        logabsdet: &Tensor,
2297        lu: &Tensor,
2298        pivots: &Tensor,
2299        a: &Tensor,
2300    ) -> (Tensor, Tensor, Tensor, Tensor) {
2301        Tensor::f_internal_linalg_slogdet_sign(sign, logabsdet, lu, pivots, a).unwrap()
2302    }
2303
2304    pub fn internal_linalg_solve_ex(
2305        a: &Tensor,
2306        b: &Tensor,
2307        left: bool,
2308        check_errors: bool,
2309    ) -> (Tensor, Tensor, Tensor, Tensor) {
2310        Tensor::f_internal_linalg_solve_ex(a, b, left, check_errors).unwrap()
2311    }
2312
2313    pub fn internal_linalg_solve_ex_result(
2314        result: &Tensor,
2315        lu: &Tensor,
2316        pivots: &Tensor,
2317        info: &Tensor,
2318        a: &Tensor,
2319        b: &Tensor,
2320        left: bool,
2321        check_errors: bool,
2322    ) -> (Tensor, Tensor, Tensor, Tensor) {
2323        Tensor::f_internal_linalg_solve_ex_result(
2324            result,
2325            lu,
2326            pivots,
2327            info,
2328            a,
2329            b,
2330            left,
2331            check_errors,
2332        )
2333        .unwrap()
2334    }
2335
2336    pub fn internal_linalg_svd(
2337        a: &Tensor,
2338        full_matrices: bool,
2339        compute_uv: bool,
2340        driver: &str,
2341    ) -> (Tensor, Tensor, Tensor) {
2342        Tensor::f_internal_linalg_svd(a, full_matrices, compute_uv, driver).unwrap()
2343    }
2344
2345    pub fn internal_linalg_svd_u(
2346        u: &Tensor,
2347        s: &Tensor,
2348        vh: &Tensor,
2349        a: &Tensor,
2350        full_matrices: bool,
2351        compute_uv: bool,
2352        driver: &str,
2353    ) -> (Tensor, Tensor, Tensor) {
2354        Tensor::f_internal_linalg_svd_u(u, s, vh, a, full_matrices, compute_uv, driver).unwrap()
2355    }
2356
2357    pub fn internal_log_softmax(&self, dim: i64, half_to_float: bool) -> Tensor {
2358        self.f_internal_log_softmax(dim, half_to_float).unwrap()
2359    }
2360
2361    pub fn internal_log_softmax_backward_data(
2362        grad_output: &Tensor,
2363        output: &Tensor,
2364        dim: i64,
2365        input_dtype: Kind,
2366    ) -> Tensor {
2367        Tensor::f_internal_log_softmax_backward_data(grad_output, output, dim, input_dtype).unwrap()
2368    }
2369
2370    pub fn internal_log_softmax_backward_data_out(
2371        out: &Tensor,
2372        grad_output: &Tensor,
2373        output: &Tensor,
2374        dim: i64,
2375        input_dtype: Kind,
2376    ) -> Tensor {
2377        Tensor::f_internal_log_softmax_backward_data_out(out, grad_output, output, dim, input_dtype)
2378            .unwrap()
2379    }
2380
2381    pub fn internal_log_softmax_out(&self, out: &Tensor, dim: i64, half_to_float: bool) -> Tensor {
2382        self.f_internal_log_softmax_out(out, dim, half_to_float).unwrap()
2383    }
2384
2385    pub fn internal_logcumsumexp(&self, dim: i64) -> Tensor {
2386        self.f_internal_logcumsumexp(dim).unwrap()
2387    }
2388
2389    pub fn internal_logcumsumexp_out(&self, out: &Tensor, dim: i64) -> Tensor {
2390        self.f_internal_logcumsumexp_out(out, dim).unwrap()
2391    }
2392
2393    pub fn internal_lstm_mps<T: Borrow<Tensor>>(
2394        &self,
2395        hx: &[T],
2396        params: &[T],
2397        has_biases: bool,
2398        num_layers: i64,
2399        dropout: f64,
2400        train: bool,
2401        bidirectional: bool,
2402        batch_first: bool,
2403    ) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) {
2404        self.f_internal_lstm_mps(
2405            hx,
2406            params,
2407            has_biases,
2408            num_layers,
2409            dropout,
2410            train,
2411            bidirectional,
2412            batch_first,
2413        )
2414        .unwrap()
2415    }
2416
2417    pub fn internal_lstm_mps_out<T: Borrow<Tensor>>(
2418        &self,
2419        out0: &Tensor,
2420        out1: &Tensor,
2421        out2: &Tensor,
2422        out3: &Tensor,
2423        out4: &Tensor,
2424        out5: &Tensor,
2425        hx: &[T],
2426        params: &[T],
2427        has_biases: bool,
2428        num_layers: i64,
2429        dropout: f64,
2430        train: bool,
2431        bidirectional: bool,
2432        batch_first: bool,
2433    ) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) {
2434        self.f_internal_lstm_mps_out(
2435            out0,
2436            out1,
2437            out2,
2438            out3,
2439            out4,
2440            out5,
2441            hx,
2442            params,
2443            has_biases,
2444            num_layers,
2445            dropout,
2446            train,
2447            bidirectional,
2448            batch_first,
2449        )
2450        .unwrap()
2451    }
2452
2453    pub fn internal_lu_with_info(
2454        &self,
2455        pivot: bool,
2456        check_errors: bool,
2457    ) -> (Tensor, Tensor, Tensor) {
2458        self.f_internal_lu_with_info(pivot, check_errors).unwrap()
2459    }
2460
2461    pub fn internal_make_dep_token(options: (Kind, Device)) -> Tensor {
2462        Tensor::f_internal_make_dep_token(options).unwrap()
2463    }
2464
2465    pub fn internal_make_dual(primal: &Tensor, tangent: &Tensor, level: i64) -> Tensor {
2466        Tensor::f_internal_make_dual(primal, tangent, level).unwrap()
2467    }
2468
2469    pub fn internal_make_dual_copy(primal: &Tensor, tangent: &Tensor, level: i64) -> Tensor {
2470        Tensor::f_internal_make_dual_copy(primal, tangent, level).unwrap()
2471    }
2472
2473    pub fn internal_make_dual_copy_out(
2474        out: &Tensor,
2475        primal: &Tensor,
2476        tangent: &Tensor,
2477        level: i64,
2478    ) -> Tensor {
2479        Tensor::f_internal_make_dual_copy_out(out, primal, tangent, level).unwrap()
2480    }
2481
2482    pub fn internal_make_per_channel_quantized_tensor(
2483        &self,
2484        scale: &Tensor,
2485        zero_point: &Tensor,
2486        axis: i64,
2487    ) -> Tensor {
2488        self.f_internal_make_per_channel_quantized_tensor(scale, zero_point, axis).unwrap()
2489    }
2490
2491    pub fn internal_make_per_channel_quantized_tensor_out(
2492        &self,
2493        out: &Tensor,
2494        scale: &Tensor,
2495        zero_point: &Tensor,
2496        axis: i64,
2497    ) -> Tensor {
2498        self.f_internal_make_per_channel_quantized_tensor_out(out, scale, zero_point, axis).unwrap()
2499    }
2500
2501    pub fn internal_make_per_tensor_quantized_tensor(&self, scale: f64, zero_point: i64) -> Tensor {
2502        self.f_internal_make_per_tensor_quantized_tensor(scale, zero_point).unwrap()
2503    }
2504
2505    pub fn internal_make_per_tensor_quantized_tensor_out(
2506        &self,
2507        out: &Tensor,
2508        scale: f64,
2509        zero_point: i64,
2510    ) -> Tensor {
2511        self.f_internal_make_per_tensor_quantized_tensor_out(out, scale, zero_point).unwrap()
2512    }
2513
2514    pub fn internal_masked_scale(&self, mask: &Tensor, scale: f64) -> Tensor {
2515        self.f_internal_masked_scale(mask, scale).unwrap()
2516    }
2517
2518    pub fn internal_masked_scale_out(&self, out: &Tensor, mask: &Tensor, scale: f64) -> Tensor {
2519        self.f_internal_masked_scale_out(out, mask, scale).unwrap()
2520    }
2521
2522    pub fn internal_masked_softmax(
2523        &self,
2524        mask: &Tensor,
2525        dim: impl Into<Option<i64>>,
2526        mask_type: impl Into<Option<i64>>,
2527    ) -> Tensor {
2528        self.f_internal_masked_softmax(mask, dim, mask_type).unwrap()
2529    }
2530
2531    pub fn internal_masked_softmax_backward(
2532        grad_output: &Tensor,
2533        output: &Tensor,
2534        mask: &Tensor,
2535        dim: impl Into<Option<i64>>,
2536    ) -> Tensor {
2537        Tensor::f_internal_masked_softmax_backward(grad_output, output, mask, dim).unwrap()
2538    }
2539
2540    pub fn internal_masked_softmax_backward_out(
2541        out: &Tensor,
2542        grad_output: &Tensor,
2543        output: &Tensor,
2544        mask: &Tensor,
2545        dim: impl Into<Option<i64>>,
2546    ) -> Tensor {
2547        Tensor::f_internal_masked_softmax_backward_out(out, grad_output, output, mask, dim).unwrap()
2548    }
2549
2550    pub fn internal_masked_softmax_out(
2551        &self,
2552        out: &Tensor,
2553        mask: &Tensor,
2554        dim: impl Into<Option<i64>>,
2555        mask_type: impl Into<Option<i64>>,
2556    ) -> Tensor {
2557        self.f_internal_masked_softmax_out(out, mask, dim, mask_type).unwrap()
2558    }
2559
2560    pub fn internal_mixed_dtypes_linear<T: Borrow<Tensor>>(
2561        &self,
2562        weight: &Tensor,
2563        scale: &Tensor,
2564        bias: Option<T>,
2565        activation: &str,
2566    ) -> Tensor {
2567        self.f_internal_mixed_dtypes_linear(weight, scale, bias, activation).unwrap()
2568    }
2569
2570    pub fn internal_mkldnn_reshape(&self, shape: impl IntList) -> Tensor {
2571        self.f_internal_mkldnn_reshape(shape).unwrap()
2572    }
2573
2574    pub fn internal_mkldnn_reshape_out(&self, out: &Tensor, shape: impl IntList) -> Tensor {
2575        self.f_internal_mkldnn_reshape_out(out, shape).unwrap()
2576    }
2577
2578    pub fn internal_mkldnn_transpose(&self, dim0: i64, dim1: i64) -> Tensor {
2579        self.f_internal_mkldnn_transpose(dim0, dim1).unwrap()
2580    }
2581
2582    pub fn internal_mkldnn_transpose_(&mut self, dim0: i64, dim1: i64) -> Tensor {
2583        self.f_internal_mkldnn_transpose_(dim0, dim1).unwrap()
2584    }
2585
2586    pub fn internal_mkldnn_transpose_out(&self, out: &Tensor, dim0: i64, dim1: i64) -> Tensor {
2587        self.f_internal_mkldnn_transpose_out(out, dim0, dim1).unwrap()
2588    }
2589
2590    pub fn internal_mps_convolution<T: Borrow<Tensor>>(
2591        &self,
2592        weight: &Tensor,
2593        bias: Option<T>,
2594        padding: impl IntList,
2595        stride: impl IntList,
2596        dilation: impl IntList,
2597        groups: i64,
2598    ) -> Tensor {
2599        self.f_internal_mps_convolution(weight, bias, padding, stride, dilation, groups).unwrap()
2600    }
2601
2602    pub fn internal_mps_convolution_out<T: Borrow<Tensor>>(
2603        &self,
2604        out: &Tensor,
2605        weight: &Tensor,
2606        bias: Option<T>,
2607        padding: impl IntList,
2608        stride: impl IntList,
2609        dilation: impl IntList,
2610        groups: i64,
2611    ) -> Tensor {
2612        self.f_internal_mps_convolution_out(out, weight, bias, padding, stride, dilation, groups)
2613            .unwrap()
2614    }
2615
2616    pub fn internal_mps_convolution_transpose(
2617        &self,
2618        weight: &Tensor,
2619        padding: impl IntList,
2620        output_padding: impl IntList,
2621        stride: impl IntList,
2622        dilation: impl IntList,
2623        groups: i64,
2624    ) -> Tensor {
2625        self.f_internal_mps_convolution_transpose(
2626            weight,
2627            padding,
2628            output_padding,
2629            stride,
2630            dilation,
2631            groups,
2632        )
2633        .unwrap()
2634    }
2635
2636    pub fn internal_mps_convolution_transpose_out(
2637        &self,
2638        out: &Tensor,
2639        weight: &Tensor,
2640        padding: impl IntList,
2641        output_padding: impl IntList,
2642        stride: impl IntList,
2643        dilation: impl IntList,
2644        groups: i64,
2645    ) -> Tensor {
2646        self.f_internal_mps_convolution_transpose_out(
2647            out,
2648            weight,
2649            padding,
2650            output_padding,
2651            stride,
2652            dilation,
2653            groups,
2654        )
2655        .unwrap()
2656    }
2657
2658    pub fn internal_native_batch_norm_legit<T: Borrow<Tensor>>(
2659        &self,
2660        weight: Option<T>,
2661        bias: Option<T>,
2662        running_mean: &Tensor,
2663        running_var: &Tensor,
2664        training: bool,
2665        momentum: f64,
2666        eps: f64,
2667    ) -> (Tensor, Tensor, Tensor) {
2668        self.f_internal_native_batch_norm_legit(
2669            weight,
2670            bias,
2671            running_mean,
2672            running_var,
2673            training,
2674            momentum,
2675            eps,
2676        )
2677        .unwrap()
2678    }
2679
2680    pub fn internal_native_batch_norm_legit_functional<T: Borrow<Tensor>>(
2681        &self,
2682        weight: Option<T>,
2683        bias: Option<T>,
2684        running_mean: &Tensor,
2685        running_var: &Tensor,
2686        training: bool,
2687        momentum: f64,
2688        eps: f64,
2689    ) -> (Tensor, Tensor, Tensor, Tensor, Tensor) {
2690        self.f_internal_native_batch_norm_legit_functional(
2691            weight,
2692            bias,
2693            running_mean,
2694            running_var,
2695            training,
2696            momentum,
2697            eps,
2698        )
2699        .unwrap()
2700    }
2701
2702    pub fn internal_native_batch_norm_legit_no_stats<T: Borrow<Tensor>>(
2703        &self,
2704        weight: Option<T>,
2705        bias: Option<T>,
2706        training: bool,
2707        momentum: f64,
2708        eps: f64,
2709    ) -> (Tensor, Tensor, Tensor) {
2710        self.f_internal_native_batch_norm_legit_no_stats(weight, bias, training, momentum, eps)
2711            .unwrap()
2712    }
2713
2714    pub fn internal_native_batch_norm_legit_no_stats_out<T: Borrow<Tensor>>(
2715        &self,
2716        out: &Tensor,
2717        save_mean: &Tensor,
2718        save_invstd: &Tensor,
2719        weight: Option<T>,
2720        bias: Option<T>,
2721        training: bool,
2722        momentum: f64,
2723        eps: f64,
2724    ) -> (Tensor, Tensor, Tensor) {
2725        self.f_internal_native_batch_norm_legit_no_stats_out(
2726            out,
2727            save_mean,
2728            save_invstd,
2729            weight,
2730            bias,
2731            training,
2732            momentum,
2733            eps,
2734        )
2735        .unwrap()
2736    }
2737
2738    pub fn internal_native_batch_norm_legit_no_training<T: Borrow<Tensor>>(
2739        &self,
2740        weight: Option<T>,
2741        bias: Option<T>,
2742        running_mean: &Tensor,
2743        running_var: &Tensor,
2744        momentum: f64,
2745        eps: f64,
2746    ) -> (Tensor, Tensor, Tensor) {
2747        self.f_internal_native_batch_norm_legit_no_training(
2748            weight,
2749            bias,
2750            running_mean,
2751            running_var,
2752            momentum,
2753            eps,
2754        )
2755        .unwrap()
2756    }
2757
2758    pub fn internal_native_batch_norm_legit_no_training_out<T: Borrow<Tensor>>(
2759        &self,
2760        out0: &Tensor,
2761        out1: &Tensor,
2762        out2: &Tensor,
2763        weight: Option<T>,
2764        bias: Option<T>,
2765        running_mean: &Tensor,
2766        running_var: &Tensor,
2767        momentum: f64,
2768        eps: f64,
2769    ) -> (Tensor, Tensor, Tensor) {
2770        self.f_internal_native_batch_norm_legit_no_training_out(
2771            out0,
2772            out1,
2773            out2,
2774            weight,
2775            bias,
2776            running_mean,
2777            running_var,
2778            momentum,
2779            eps,
2780        )
2781        .unwrap()
2782    }
2783
2784    pub fn internal_native_batch_norm_legit_out<T: Borrow<Tensor>>(
2785        &self,
2786        out: &Tensor,
2787        save_mean: &Tensor,
2788        save_invstd: &Tensor,
2789        weight: Option<T>,
2790        bias: Option<T>,
2791        running_mean: &Tensor,
2792        running_var: &Tensor,
2793        training: bool,
2794        momentum: f64,
2795        eps: f64,
2796    ) -> (Tensor, Tensor, Tensor) {
2797        self.f_internal_native_batch_norm_legit_out(
2798            out,
2799            save_mean,
2800            save_invstd,
2801            weight,
2802            bias,
2803            running_mean,
2804            running_var,
2805            training,
2806            momentum,
2807            eps,
2808        )
2809        .unwrap()
2810    }
2811
2812    pub fn internal_native_multi_head_attention<T: Borrow<Tensor>>(
2813        query: &Tensor,
2814        key: &Tensor,
2815        value: &Tensor,
2816        embed_dim: i64,
2817        num_head: i64,
2818        qkv_weight: &Tensor,
2819        qkv_bias: &Tensor,
2820        proj_weight: &Tensor,
2821        proj_bias: &Tensor,
2822        mask: Option<T>,
2823        need_weights: bool,
2824        average_attn_weights: bool,
2825        mask_type: impl Into<Option<i64>>,
2826    ) -> (Tensor, Tensor) {
2827        Tensor::f_internal_native_multi_head_attention(
2828            query,
2829            key,
2830            value,
2831            embed_dim,
2832            num_head,
2833            qkv_weight,
2834            qkv_bias,
2835            proj_weight,
2836            proj_bias,
2837            mask,
2838            need_weights,
2839            average_attn_weights,
2840            mask_type,
2841        )
2842        .unwrap()
2843    }
2844
2845    pub fn internal_native_multi_head_attention_out<T: Borrow<Tensor>>(
2846        out0: &Tensor,
2847        out1: &Tensor,
2848        query: &Tensor,
2849        key: &Tensor,
2850        value: &Tensor,
2851        embed_dim: i64,
2852        num_head: i64,
2853        qkv_weight: &Tensor,
2854        qkv_bias: &Tensor,
2855        proj_weight: &Tensor,
2856        proj_bias: &Tensor,
2857        mask: Option<T>,
2858        need_weights: bool,
2859        average_attn_weights: bool,
2860        mask_type: impl Into<Option<i64>>,
2861    ) -> (Tensor, Tensor) {
2862        Tensor::f_internal_native_multi_head_attention_out(
2863            out0,
2864            out1,
2865            query,
2866            key,
2867            value,
2868            embed_dim,
2869            num_head,
2870            qkv_weight,
2871            qkv_bias,
2872            proj_weight,
2873            proj_bias,
2874            mask,
2875            need_weights,
2876            average_attn_weights,
2877            mask_type,
2878        )
2879        .unwrap()
2880    }
2881
2882    pub fn internal_neg_view(&self) -> Tensor {
2883        self.f_internal_neg_view().unwrap()
2884    }
2885
2886    pub fn internal_neg_view_copy(&self) -> Tensor {
2887        self.f_internal_neg_view_copy().unwrap()
2888    }
2889
2890    pub fn internal_neg_view_copy_out(&self, out: &Tensor) -> Tensor {
2891        self.f_internal_neg_view_copy_out(out).unwrap()
2892    }
2893
2894    pub fn internal_nested_compute_contiguous_strides_offsets(
2895        nested_size: &Tensor,
2896    ) -> (Tensor, Tensor) {
2897        Tensor::f_internal_nested_compute_contiguous_strides_offsets(nested_size).unwrap()
2898    }
2899
2900    pub fn internal_nested_from_padded(
2901        padded: &Tensor,
2902        cpu_nested_shape_example: &Tensor,
2903        fuse_transform_0213: bool,
2904    ) -> Tensor {
2905        Tensor::f_internal_nested_from_padded(padded, cpu_nested_shape_example, fuse_transform_0213)
2906            .unwrap()
2907    }
2908
2909    pub fn internal_nested_from_padded_and_nested_example(
2910        padded: &Tensor,
2911        nt_example: &Tensor,
2912    ) -> Tensor {
2913        Tensor::f_internal_nested_from_padded_and_nested_example(padded, nt_example).unwrap()
2914    }
2915
2916    pub fn internal_nested_from_padded_and_nested_example_out(
2917        out: &Tensor,
2918        padded: &Tensor,
2919        nt_example: &Tensor,
2920    ) -> Tensor {
2921        Tensor::f_internal_nested_from_padded_and_nested_example_out(out, padded, nt_example)
2922            .unwrap()
2923    }
2924
2925    pub fn internal_nested_from_padded_out(
2926        out: &Tensor,
2927        padded: &Tensor,
2928        cpu_nested_shape_example: &Tensor,
2929        fuse_transform_0213: bool,
2930    ) -> Tensor {
2931        Tensor::f_internal_nested_from_padded_out(
2932            out,
2933            padded,
2934            cpu_nested_shape_example,
2935            fuse_transform_0213,
2936        )
2937        .unwrap()
2938    }
2939
2940    pub fn internal_nested_get_jagged_dummy(any: &Tensor) -> Tensor {
2941        Tensor::f_internal_nested_get_jagged_dummy(any).unwrap()
2942    }
2943
2944    pub fn internal_nested_get_lengths(&self) -> Tensor {
2945        self.f_internal_nested_get_lengths().unwrap()
2946    }
2947
2948    pub fn internal_nested_get_max_seqlen(&self) -> Tensor {
2949        self.f_internal_nested_get_max_seqlen().unwrap()
2950    }
2951
2952    pub fn internal_nested_get_min_seqlen(&self) -> Tensor {
2953        self.f_internal_nested_get_min_seqlen().unwrap()
2954    }
2955
2956    pub fn internal_nested_get_offsets(&self) -> Tensor {
2957        self.f_internal_nested_get_offsets().unwrap()
2958    }
2959
2960    pub fn internal_nested_get_ragged_idx(&self) -> i64 {
2961        self.f_internal_nested_get_ragged_idx().unwrap()
2962    }
2963
2964    pub fn internal_nested_get_values(&self) -> Tensor {
2965        self.f_internal_nested_get_values().unwrap()
2966    }
2967
2968    pub fn internal_nested_get_values_copy(&self) -> Tensor {
2969        self.f_internal_nested_get_values_copy().unwrap()
2970    }
2971
2972    pub fn internal_nested_get_values_copy_out(&self, out: &Tensor) -> Tensor {
2973        self.f_internal_nested_get_values_copy_out(out).unwrap()
2974    }
2975
2976    pub fn internal_nested_select_backward(
2977        &self,
2978        grad_output: &Tensor,
2979        dim: i64,
2980        index: i64,
2981    ) -> Tensor {
2982        self.f_internal_nested_select_backward(grad_output, dim, index).unwrap()
2983    }
2984
2985    pub fn internal_nested_sum_backward(
2986        &self,
2987        grad: &Tensor,
2988        dim: impl IntListOption,
2989        keepdim: bool,
2990    ) -> Tensor {
2991        self.f_internal_nested_sum_backward(grad, dim, keepdim).unwrap()
2992    }
2993
2994    pub fn internal_nested_view_from_buffer(
2995        &self,
2996        nested_size: &Tensor,
2997        nested_strides: &Tensor,
2998        offsets: &Tensor,
2999    ) -> Tensor {
3000        self.f_internal_nested_view_from_buffer(nested_size, nested_strides, offsets).unwrap()
3001    }
3002
3003    pub fn internal_nested_view_from_buffer_copy(
3004        &self,
3005        nested_size: &Tensor,
3006        nested_strides: &Tensor,
3007        offsets: &Tensor,
3008    ) -> Tensor {
3009        self.f_internal_nested_view_from_buffer_copy(nested_size, nested_strides, offsets).unwrap()
3010    }
3011
3012    pub fn internal_nested_view_from_buffer_copy_out(
3013        &self,
3014        out: &Tensor,
3015        nested_size: &Tensor,
3016        nested_strides: &Tensor,
3017        offsets: &Tensor,
3018    ) -> Tensor {
3019        self.f_internal_nested_view_from_buffer_copy_out(out, nested_size, nested_strides, offsets)
3020            .unwrap()
3021    }
3022
3023    pub fn internal_nested_view_from_jagged<T: Borrow<Tensor>>(
3024        &self,
3025        offsets: &Tensor,
3026        dummy: &Tensor,
3027        lengths: Option<T>,
3028        ragged_idx: i64,
3029        min_seqlen: Option<T>,
3030        max_seqlen: Option<T>,
3031    ) -> Tensor {
3032        self.f_internal_nested_view_from_jagged(
3033            offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen,
3034        )
3035        .unwrap()
3036    }
3037
3038    pub fn internal_nested_view_from_jagged_copy<T: Borrow<Tensor>>(
3039        &self,
3040        offsets: &Tensor,
3041        dummy: &Tensor,
3042        lengths: Option<T>,
3043        ragged_idx: i64,
3044        min_seqlen: Option<T>,
3045        max_seqlen: Option<T>,
3046    ) -> Tensor {
3047        self.f_internal_nested_view_from_jagged_copy(
3048            offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen,
3049        )
3050        .unwrap()
3051    }
3052
3053    pub fn internal_nested_view_from_jagged_copy_out<T: Borrow<Tensor>>(
3054        &self,
3055        out: &Tensor,
3056        offsets: &Tensor,
3057        dummy: &Tensor,
3058        lengths: Option<T>,
3059        ragged_idx: i64,
3060        min_seqlen: Option<T>,
3061        max_seqlen: Option<T>,
3062    ) -> Tensor {
3063        self.f_internal_nested_view_from_jagged_copy_out(
3064            out, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen,
3065        )
3066        .unwrap()
3067    }
3068
3069    pub fn internal_new_zeros_with_same_feature_meta(
3070        &self,
3071        other: &Tensor,
3072        self_num_batch_dims: i64,
3073    ) -> Tensor {
3074        self.f_internal_new_zeros_with_same_feature_meta(other, self_num_batch_dims).unwrap()
3075    }
3076
3077    pub fn internal_new_zeros_with_same_feature_meta_out(
3078        &self,
3079        out: &Tensor,
3080        other: &Tensor,
3081        self_num_batch_dims: i64,
3082    ) -> Tensor {
3083        self.f_internal_new_zeros_with_same_feature_meta_out(out, other, self_num_batch_dims)
3084            .unwrap()
3085    }
3086
3087    pub fn internal_nnpack_available() -> bool {
3088        Tensor::f_internal_nnpack_available().unwrap()
3089    }
3090
3091    pub fn internal_nnpack_spatial_convolution<T: Borrow<Tensor>>(
3092        &self,
3093        weight: &Tensor,
3094        bias: Option<T>,
3095        padding: impl IntList,
3096        stride: impl IntList,
3097    ) -> Tensor {
3098        self.f_internal_nnpack_spatial_convolution(weight, bias, padding, stride).unwrap()
3099    }
3100
3101    pub fn internal_nnpack_spatial_convolution_out<T: Borrow<Tensor>>(
3102        &self,
3103        out: &Tensor,
3104        weight: &Tensor,
3105        bias: Option<T>,
3106        padding: impl IntList,
3107        stride: impl IntList,
3108    ) -> Tensor {
3109        self.f_internal_nnpack_spatial_convolution_out(out, weight, bias, padding, stride).unwrap()
3110    }
3111
3112    pub fn internal_nnz(&self) -> i64 {
3113        self.f_internal_nnz().unwrap()
3114    }
3115
3116    pub fn internal_pack_padded_sequence(
3117        &self,
3118        lengths: &Tensor,
3119        batch_first: bool,
3120    ) -> (Tensor, Tensor) {
3121        self.f_internal_pack_padded_sequence(lengths, batch_first).unwrap()
3122    }
3123
3124    pub fn internal_pack_padded_sequence_backward(
3125        grad: &Tensor,
3126        input_size: impl IntList,
3127        batch_sizes: &Tensor,
3128        batch_first: bool,
3129    ) -> Tensor {
3130        Tensor::f_internal_pack_padded_sequence_backward(grad, input_size, batch_sizes, batch_first)
3131            .unwrap()
3132    }
3133
3134    pub fn internal_pack_padded_sequence_out(
3135        &self,
3136        out0: &Tensor,
3137        out1: &Tensor,
3138        lengths: &Tensor,
3139        batch_first: bool,
3140    ) -> (Tensor, Tensor) {
3141        self.f_internal_pack_padded_sequence_out(out0, out1, lengths, batch_first).unwrap()
3142    }
3143
3144    pub fn internal_pad_circular(&self, pad: impl IntList) -> Tensor {
3145        self.f_internal_pad_circular(pad).unwrap()
3146    }
3147
3148    pub fn internal_pad_enum(
3149        &self,
3150        pad: impl IntList,
3151        mode: i64,
3152        value: impl Into<Option<f64>>,
3153    ) -> Tensor {
3154        self.f_internal_pad_enum(pad, mode, value).unwrap()
3155    }
3156
3157    pub fn internal_pad_packed_sequence<S: Into<Scalar>>(
3158        data: &Tensor,
3159        batch_sizes: &Tensor,
3160        batch_first: bool,
3161        padding_value: S,
3162        total_length: i64,
3163    ) -> (Tensor, Tensor) {
3164        Tensor::f_internal_pad_packed_sequence(
3165            data,
3166            batch_sizes,
3167            batch_first,
3168            padding_value,
3169            total_length,
3170        )
3171        .unwrap()
3172    }
3173
3174    pub fn internal_pdist_backward(&self, grad: &Tensor, p: f64, pdist: &Tensor) -> Tensor {
3175        self.f_internal_pdist_backward(grad, p, pdist).unwrap()
3176    }
3177
3178    pub fn internal_pdist_backward_out(
3179        &self,
3180        out: &Tensor,
3181        grad: &Tensor,
3182        p: f64,
3183        pdist: &Tensor,
3184    ) -> Tensor {
3185        self.f_internal_pdist_backward_out(out, grad, p, pdist).unwrap()
3186    }
3187
3188    pub fn internal_pin_memory(&self, device: Device) -> Tensor {
3189        self.f_internal_pin_memory(device).unwrap()
3190    }
3191
3192    pub fn internal_pin_memory_out(&self, out: &Tensor, device: Device) -> Tensor {
3193        self.f_internal_pin_memory_out(out, device).unwrap()
3194    }
3195
3196    pub fn internal_prelu_kernel(&self, weight: &Tensor) -> Tensor {
3197        self.f_internal_prelu_kernel(weight).unwrap()
3198    }
3199
3200    pub fn internal_prelu_kernel_backward(
3201        &self,
3202        grad_output: &Tensor,
3203        weight: &Tensor,
3204    ) -> (Tensor, Tensor) {
3205        self.f_internal_prelu_kernel_backward(grad_output, weight).unwrap()
3206    }
3207
3208    pub fn internal_print(s: &str) {
3209        Tensor::f_internal_print(s).unwrap()
3210    }
3211
3212    pub fn internal_propagate_xla_data(&self, output: &Tensor) {
3213        self.f_internal_propagate_xla_data(output).unwrap()
3214    }
3215
3216    pub fn internal_remove_batch_dim(&self, level: i64, batch_size: i64, out_dim: i64) -> Tensor {
3217        self.f_internal_remove_batch_dim(level, batch_size, out_dim).unwrap()
3218    }
3219
3220    pub fn internal_reshape_alias(&self, size: impl IntList, stride: impl IntList) -> Tensor {
3221        self.f_internal_reshape_alias(size, stride).unwrap()
3222    }
3223
3224    pub fn internal_reshape_alias_copy(&self, size: impl IntList, stride: impl IntList) -> Tensor {
3225        self.f_internal_reshape_alias_copy(size, stride).unwrap()
3226    }
3227
3228    pub fn internal_reshape_alias_copy_out(
3229        &self,
3230        out: &Tensor,
3231        size: impl IntList,
3232        stride: impl IntList,
3233    ) -> Tensor {
3234        self.f_internal_reshape_alias_copy_out(out, size, stride).unwrap()
3235    }
3236
3237    pub fn internal_reshape_copy(&self, size: impl IntList) -> Tensor {
3238        self.f_internal_reshape_copy(size).unwrap()
3239    }
3240
3241    pub fn internal_reshape_from_tensor(&self, shape: &Tensor) -> Tensor {
3242        self.f_internal_reshape_from_tensor(shape).unwrap()
3243    }
3244
3245    pub fn internal_resize_output(&self, size: impl IntList, device: Device) -> Tensor {
3246        self.f_internal_resize_output(size, device).unwrap()
3247    }
3248
3249    pub fn internal_resize_output_(&mut self, size: impl IntList, device: Device) -> Tensor {
3250        self.f_internal_resize_output_(size, device).unwrap()
3251    }
3252
3253    pub fn internal_resize_output_out(
3254        &self,
3255        out: &Tensor,
3256        size: impl IntList,
3257        device: Device,
3258    ) -> Tensor {
3259        self.f_internal_resize_output_out(out, size, device).unwrap()
3260    }
3261
3262    pub fn internal_rowwise_prune(
3263        weight: &Tensor,
3264        mask: &Tensor,
3265        compressed_indices_dtype: Kind,
3266    ) -> (Tensor, Tensor) {
3267        Tensor::f_internal_rowwise_prune(weight, mask, compressed_indices_dtype).unwrap()
3268    }
3269
3270    pub fn internal_safe_softmax(&self, dim: i64, dtype: impl Into<Option<Kind>>) -> Tensor {
3271        self.f_internal_safe_softmax(dim, dtype).unwrap()
3272    }
3273
3274    pub fn internal_sample_dirichlet(&self) -> Tensor {
3275        self.f_internal_sample_dirichlet().unwrap()
3276    }
3277
3278    pub fn internal_sample_dirichlet_out(&self, out: &Tensor) -> Tensor {
3279        self.f_internal_sample_dirichlet_out(out).unwrap()
3280    }
3281
3282    pub fn internal_saturate_weight_to_fp16(weight: &Tensor) -> Tensor {
3283        Tensor::f_internal_saturate_weight_to_fp16(weight).unwrap()
3284    }
3285
3286    pub fn internal_scaled_dot_product_attention_math<T: Borrow<Tensor>>(
3287        query: &Tensor,
3288        key: &Tensor,
3289        value: &Tensor,
3290        attn_mask: Option<T>,
3291        dropout_p: f64,
3292        is_causal: bool,
3293        dropout_mask: Option<T>,
3294        scale: impl Into<Option<f64>>,
3295        enable_gqa: bool,
3296    ) -> (Tensor, Tensor) {
3297        Tensor::f_internal_scaled_dot_product_attention_math(
3298            query,
3299            key,
3300            value,
3301            attn_mask,
3302            dropout_p,
3303            is_causal,
3304            dropout_mask,
3305            scale,
3306            enable_gqa,
3307        )
3308        .unwrap()
3309    }
3310
3311    pub fn internal_scaled_dot_product_attention_math_for_mps<T: Borrow<Tensor>>(
3312        query: &Tensor,
3313        key: &Tensor,
3314        value: &Tensor,
3315        attn_mask: Option<T>,
3316        dropout_p: f64,
3317        is_causal: bool,
3318        dropout_mask: Option<T>,
3319        scale: impl Into<Option<f64>>,
3320    ) -> (Tensor, Tensor) {
3321        Tensor::f_internal_scaled_dot_product_attention_math_for_mps(
3322            query,
3323            key,
3324            value,
3325            attn_mask,
3326            dropout_p,
3327            is_causal,
3328            dropout_mask,
3329            scale,
3330        )
3331        .unwrap()
3332    }
3333
3334    pub fn internal_scaled_dot_product_cudnn_attention_backward(
3335        grad_out: &Tensor,
3336        query: &Tensor,
3337        key: &Tensor,
3338        value: &Tensor,
3339        out: &Tensor,
3340        logsumexp: &Tensor,
3341        philox_seed: &Tensor,
3342        philox_offset: &Tensor,
3343        attn_bias: &Tensor,
3344        cum_seq_q: &Tensor,
3345        cum_seq_k: &Tensor,
3346        max_q: i64,
3347        max_k: i64,
3348        dropout_p: f64,
3349        is_causal: bool,
3350        scale: impl Into<Option<f64>>,
3351    ) -> (Tensor, Tensor, Tensor) {
3352        Tensor::f_internal_scaled_dot_product_cudnn_attention_backward(
3353            grad_out,
3354            query,
3355            key,
3356            value,
3357            out,
3358            logsumexp,
3359            philox_seed,
3360            philox_offset,
3361            attn_bias,
3362            cum_seq_q,
3363            cum_seq_k,
3364            max_q,
3365            max_k,
3366            dropout_p,
3367            is_causal,
3368            scale,
3369        )
3370        .unwrap()
3371    }
3372
3373    pub fn internal_scaled_dot_product_efficient_attention<T: Borrow<Tensor>>(
3374        query: &Tensor,
3375        key: &Tensor,
3376        value: &Tensor,
3377        attn_bias: Option<T>,
3378        compute_log_sumexp: bool,
3379        dropout_p: f64,
3380        is_causal: bool,
3381        scale: impl Into<Option<f64>>,
3382    ) -> (Tensor, Tensor, Tensor, Tensor) {
3383        Tensor::f_internal_scaled_dot_product_efficient_attention(
3384            query,
3385            key,
3386            value,
3387            attn_bias,
3388            compute_log_sumexp,
3389            dropout_p,
3390            is_causal,
3391            scale,
3392        )
3393        .unwrap()
3394    }
3395
3396    pub fn internal_scaled_dot_product_flash_attention_backward(
3397        grad_out: &Tensor,
3398        query: &Tensor,
3399        key: &Tensor,
3400        value: &Tensor,
3401        out: &Tensor,
3402        logsumexp: &Tensor,
3403        cum_seq_q: &Tensor,
3404        cum_seq_k: &Tensor,
3405        max_q: i64,
3406        max_k: i64,
3407        dropout_p: f64,
3408        is_causal: bool,
3409        philox_seed: &Tensor,
3410        philox_offset: &Tensor,
3411        scale: impl Into<Option<f64>>,
3412    ) -> (Tensor, Tensor, Tensor) {
3413        Tensor::f_internal_scaled_dot_product_flash_attention_backward(
3414            grad_out,
3415            query,
3416            key,
3417            value,
3418            out,
3419            logsumexp,
3420            cum_seq_q,
3421            cum_seq_k,
3422            max_q,
3423            max_k,
3424            dropout_p,
3425            is_causal,
3426            philox_seed,
3427            philox_offset,
3428            scale,
3429        )
3430        .unwrap()
3431    }
3432
3433    pub fn internal_scaled_dot_product_flash_attention_for_cpu<T: Borrow<Tensor>>(
3434        query: &Tensor,
3435        key: &Tensor,
3436        value: &Tensor,
3437        dropout_p: f64,
3438        is_causal: bool,
3439        attn_mask: Option<T>,
3440        scale: impl Into<Option<f64>>,
3441    ) -> (Tensor, Tensor) {
3442        Tensor::f_internal_scaled_dot_product_flash_attention_for_cpu(
3443            query, key, value, dropout_p, is_causal, attn_mask, scale,
3444        )
3445        .unwrap()
3446    }
3447
3448    pub fn internal_scaled_dot_product_flash_attention_for_cpu_backward<T: Borrow<Tensor>>(
3449        grad_out: &Tensor,
3450        query: &Tensor,
3451        key: &Tensor,
3452        value: &Tensor,
3453        out: &Tensor,
3454        logsumexp: &Tensor,
3455        dropout_p: f64,
3456        is_causal: bool,
3457        attn_mask: Option<T>,
3458        scale: impl Into<Option<f64>>,
3459    ) -> (Tensor, Tensor, Tensor) {
3460        Tensor::f_internal_scaled_dot_product_flash_attention_for_cpu_backward(
3461            grad_out, query, key, value, out, logsumexp, dropout_p, is_causal, attn_mask, scale,
3462        )
3463        .unwrap()
3464    }
3465
3466    pub fn internal_scaled_mm<T: Borrow<Tensor>>(
3467        &self,
3468        mat2: &Tensor,
3469        scale_a: &Tensor,
3470        scale_b: &Tensor,
3471        bias: Option<T>,
3472        scale_result: Option<T>,
3473        out_dtype: impl Into<Option<Kind>>,
3474        use_fast_accum: bool,
3475    ) -> Tensor {
3476        self.f_internal_scaled_mm(
3477            mat2,
3478            scale_a,
3479            scale_b,
3480            bias,
3481            scale_result,
3482            out_dtype,
3483            use_fast_accum,
3484        )
3485        .unwrap()
3486    }
3487
3488    pub fn internal_scaled_mm_out<T: Borrow<Tensor>>(
3489        &self,
3490        out: &Tensor,
3491        mat2: &Tensor,
3492        scale_a: &Tensor,
3493        scale_b: &Tensor,
3494        bias: Option<T>,
3495        scale_result: Option<T>,
3496        out_dtype: impl Into<Option<Kind>>,
3497        use_fast_accum: bool,
3498    ) -> Tensor {
3499        self.f_internal_scaled_mm_out(
3500            out,
3501            mat2,
3502            scale_a,
3503            scale_b,
3504            bias,
3505            scale_result,
3506            out_dtype,
3507            use_fast_accum,
3508        )
3509        .unwrap()
3510    }
3511
3512    pub fn internal_scatter_reduce(
3513        &self,
3514        dim: i64,
3515        index: &Tensor,
3516        src: &Tensor,
3517        reduce: &str,
3518        include_self: bool,
3519    ) -> Tensor {
3520        self.f_internal_scatter_reduce(dim, index, src, reduce, include_self).unwrap()
3521    }
3522
3523    pub fn internal_scatter_reduce_(
3524        &mut self,
3525        dim: i64,
3526        index: &Tensor,
3527        src: &Tensor,
3528        reduce: &str,
3529        include_self: bool,
3530    ) -> Tensor {
3531        self.f_internal_scatter_reduce_(dim, index, src, reduce, include_self).unwrap()
3532    }
3533
3534    pub fn internal_scatter_reduce_two_out(
3535        &self,
3536        out: &Tensor,
3537        dim: i64,
3538        index: &Tensor,
3539        src: &Tensor,
3540        reduce: &str,
3541        include_self: bool,
3542    ) -> Tensor {
3543        self.f_internal_scatter_reduce_two_out(out, dim, index, src, reduce, include_self).unwrap()
3544    }
3545
3546    pub fn internal_segment_reduce_backward<T: Borrow<Tensor>, S: Into<Scalar>>(
3547        grad: &Tensor,
3548        output: &Tensor,
3549        data: &Tensor,
3550        reduce: &str,
3551        lengths: Option<T>,
3552        offsets: Option<T>,
3553        axis: i64,
3554        initial: S,
3555    ) -> Tensor {
3556        Tensor::f_internal_segment_reduce_backward(
3557            grad, output, data, reduce, lengths, offsets, axis, initial,
3558        )
3559        .unwrap()
3560    }
3561
3562    pub fn internal_segment_reduce_backward_out<T: Borrow<Tensor>, S: Into<Scalar>>(
3563        out: &Tensor,
3564        grad: &Tensor,
3565        output: &Tensor,
3566        data: &Tensor,
3567        reduce: &str,
3568        lengths: Option<T>,
3569        offsets: Option<T>,
3570        axis: i64,
3571        initial: S,
3572    ) -> Tensor {
3573        Tensor::f_internal_segment_reduce_backward_out(
3574            out, grad, output, data, reduce, lengths, offsets, axis, initial,
3575        )
3576        .unwrap()
3577    }
3578
3579    pub fn internal_shape_as_tensor(&self) -> Tensor {
3580        self.f_internal_shape_as_tensor().unwrap()
3581    }
3582
3583    pub fn internal_slow_conv2d_backward(
3584        &self,
3585        grad_input: &Tensor,
3586        grad_weight: &Tensor,
3587        grad_bias: &Tensor,
3588        grad_output: &Tensor,
3589        weight: &Tensor,
3590        kernel_size: impl IntList,
3591        stride: impl IntList,
3592        padding: impl IntList,
3593    ) -> (Tensor, Tensor, Tensor) {
3594        self.f_internal_slow_conv2d_backward(
3595            grad_input,
3596            grad_weight,
3597            grad_bias,
3598            grad_output,
3599            weight,
3600            kernel_size,
3601            stride,
3602            padding,
3603        )
3604        .unwrap()
3605    }
3606
3607    pub fn internal_sobol_engine_draw(
3608        quasi: &Tensor,
3609        n: i64,
3610        sobolstate: &Tensor,
3611        dimension: i64,
3612        num_generated: i64,
3613        dtype: impl Into<Option<Kind>>,
3614    ) -> (Tensor, Tensor) {
3615        Tensor::f_internal_sobol_engine_draw(quasi, n, sobolstate, dimension, num_generated, dtype)
3616            .unwrap()
3617    }
3618
3619    pub fn internal_sobol_engine_ff_(
3620        &mut self,
3621        n: i64,
3622        sobolstate: &Tensor,
3623        dimension: i64,
3624        num_generated: i64,
3625    ) -> Tensor {
3626        self.f_internal_sobol_engine_ff_(n, sobolstate, dimension, num_generated).unwrap()
3627    }
3628
3629    pub fn internal_sobol_engine_initialize_state_(&mut self, dimension: i64) -> Tensor {
3630        self.f_internal_sobol_engine_initialize_state_(dimension).unwrap()
3631    }
3632
3633    pub fn internal_sobol_engine_scramble_(&mut self, ltm: &Tensor, dimension: i64) -> Tensor {
3634        self.f_internal_sobol_engine_scramble_(ltm, dimension).unwrap()
3635    }
3636
3637    pub fn internal_softmax(&self, dim: i64, half_to_float: bool) -> Tensor {
3638        self.f_internal_softmax(dim, half_to_float).unwrap()
3639    }
3640
3641    pub fn internal_softmax_backward_data(
3642        grad_output: &Tensor,
3643        output: &Tensor,
3644        dim: i64,
3645        input_dtype: Kind,
3646    ) -> Tensor {
3647        Tensor::f_internal_softmax_backward_data(grad_output, output, dim, input_dtype).unwrap()
3648    }
3649
3650    pub fn internal_softmax_backward_data_out(
3651        grad_input: &Tensor,
3652        grad_output: &Tensor,
3653        output: &Tensor,
3654        dim: i64,
3655        input_dtype: Kind,
3656    ) -> Tensor {
3657        Tensor::f_internal_softmax_backward_data_out(
3658            grad_input,
3659            grad_output,
3660            output,
3661            dim,
3662            input_dtype,
3663        )
3664        .unwrap()
3665    }
3666
3667    pub fn internal_softmax_out(&self, out: &Tensor, dim: i64, half_to_float: bool) -> Tensor {
3668        self.f_internal_softmax_out(out, dim, half_to_float).unwrap()
3669    }
3670
3671    pub fn internal_sparse_addmm(&self, mat1: &Tensor, mat2: &Tensor) -> Tensor {
3672        self.f_internal_sparse_addmm(mat1, mat2).unwrap()
3673    }
3674
3675    pub fn internal_sparse_addmm_out(&self, out: &Tensor, mat1: &Tensor, mat2: &Tensor) -> Tensor {
3676        self.f_internal_sparse_addmm_out(out, mat1, mat2).unwrap()
3677    }
3678
3679    pub fn internal_sparse_broadcast_to(&self, size: impl IntList) -> Tensor {
3680        self.f_internal_sparse_broadcast_to(size).unwrap()
3681    }
3682
3683    pub fn internal_sparse_broadcast_to_copy(&self, size: impl IntList) -> Tensor {
3684        self.f_internal_sparse_broadcast_to_copy(size).unwrap()
3685    }
3686
3687    pub fn internal_sparse_broadcast_to_copy_out(
3688        &self,
3689        out: &Tensor,
3690        size: impl IntList,
3691    ) -> Tensor {
3692        self.f_internal_sparse_broadcast_to_copy_out(out, size).unwrap()
3693    }
3694
3695    pub fn internal_sparse_bsc_tensor_unsafe(
3696        ccol_indices: &Tensor,
3697        row_indices: &Tensor,
3698        values: &Tensor,
3699        size: impl IntList,
3700        options: (Kind, Device),
3701    ) -> Tensor {
3702        Tensor::f_internal_sparse_bsc_tensor_unsafe(
3703            ccol_indices,
3704            row_indices,
3705            values,
3706            size,
3707            options,
3708        )
3709        .unwrap()
3710    }
3711
3712    pub fn internal_sparse_bsr_tensor_unsafe(
3713        crow_indices: &Tensor,
3714        col_indices: &Tensor,
3715        values: &Tensor,
3716        size: impl IntList,
3717        options: (Kind, Device),
3718    ) -> Tensor {
3719        Tensor::f_internal_sparse_bsr_tensor_unsafe(
3720            crow_indices,
3721            col_indices,
3722            values,
3723            size,
3724            options,
3725        )
3726        .unwrap()
3727    }
3728
3729    pub fn internal_sparse_compressed_tensor_unsafe(
3730        compressed_indices: &Tensor,
3731        plain_indices: &Tensor,
3732        values: &Tensor,
3733        size: impl IntList,
3734        options: (Kind, Device),
3735    ) -> Tensor {
3736        Tensor::f_internal_sparse_compressed_tensor_unsafe(
3737            compressed_indices,
3738            plain_indices,
3739            values,
3740            size,
3741            options,
3742        )
3743        .unwrap()
3744    }
3745
3746    pub fn internal_sparse_compressed_tensor_with_dims(
3747        nnz: i64,
3748        dense_dim: i64,
3749        size: impl IntList,
3750        blocksize: impl IntList,
3751        index_dtype: Kind,
3752        options: (Kind, Device),
3753    ) -> Tensor {
3754        Tensor::f_internal_sparse_compressed_tensor_with_dims(
3755            nnz,
3756            dense_dim,
3757            size,
3758            blocksize,
3759            index_dtype,
3760            options,
3761        )
3762        .unwrap()
3763    }
3764
3765    pub fn internal_sparse_coo_tensor_unsafe(
3766        indices: &Tensor,
3767        values: &Tensor,
3768        size: impl IntList,
3769        options: (Kind, Device),
3770        is_coalesced: bool,
3771    ) -> Tensor {
3772        Tensor::f_internal_sparse_coo_tensor_unsafe(indices, values, size, options, is_coalesced)
3773            .unwrap()
3774    }
3775
3776    pub fn internal_sparse_coo_tensor_with_dims(
3777        sparse_dim: i64,
3778        dense_dim: i64,
3779        size: impl IntList,
3780        options: (Kind, Device),
3781    ) -> Tensor {
3782        Tensor::f_internal_sparse_coo_tensor_with_dims(sparse_dim, dense_dim, size, options)
3783            .unwrap()
3784    }
3785
3786    pub fn internal_sparse_coo_tensor_with_dims_and_tensors(
3787        sparse_dim: i64,
3788        dense_dim: i64,
3789        size: impl IntList,
3790        indices: &Tensor,
3791        values: &Tensor,
3792        options: (Kind, Device),
3793        is_coalesced: bool,
3794    ) -> Tensor {
3795        Tensor::f_internal_sparse_coo_tensor_with_dims_and_tensors(
3796            sparse_dim,
3797            dense_dim,
3798            size,
3799            indices,
3800            values,
3801            options,
3802            is_coalesced,
3803        )
3804        .unwrap()
3805    }
3806
3807    pub fn internal_sparse_coo_tensor_with_dims_and_tensors_out(
3808        out: &Tensor,
3809        sparse_dim: i64,
3810        dense_dim: i64,
3811        size: impl IntList,
3812        indices: &Tensor,
3813        values: &Tensor,
3814        is_coalesced: bool,
3815    ) -> Tensor {
3816        Tensor::f_internal_sparse_coo_tensor_with_dims_and_tensors_out(
3817            out,
3818            sparse_dim,
3819            dense_dim,
3820            size,
3821            indices,
3822            values,
3823            is_coalesced,
3824        )
3825        .unwrap()
3826    }
3827
3828    pub fn internal_sparse_coo_tensor_with_dims_out(
3829        out: &Tensor,
3830        sparse_dim: i64,
3831        dense_dim: i64,
3832        size: impl IntList,
3833    ) -> Tensor {
3834        Tensor::f_internal_sparse_coo_tensor_with_dims_out(out, sparse_dim, dense_dim, size)
3835            .unwrap()
3836    }
3837
3838    pub fn internal_sparse_csc_tensor_unsafe(
3839        ccol_indices: &Tensor,
3840        row_indices: &Tensor,
3841        values: &Tensor,
3842        size: impl IntList,
3843        options: (Kind, Device),
3844    ) -> Tensor {
3845        Tensor::f_internal_sparse_csc_tensor_unsafe(
3846            ccol_indices,
3847            row_indices,
3848            values,
3849            size,
3850            options,
3851        )
3852        .unwrap()
3853    }
3854
3855    pub fn internal_sparse_csr_prod(
3856        &self,
3857        dim: impl IntList,
3858        keepdim: bool,
3859        dtype: impl Into<Option<Kind>>,
3860    ) -> Tensor {
3861        self.f_internal_sparse_csr_prod(dim, keepdim, dtype).unwrap()
3862    }
3863
3864    pub fn internal_sparse_csr_prod_dim_dtype_out(
3865        &self,
3866        out: &Tensor,
3867        dim: impl IntList,
3868        keepdim: bool,
3869        dtype: impl Into<Option<Kind>>,
3870    ) -> Tensor {
3871        self.f_internal_sparse_csr_prod_dim_dtype_out(out, dim, keepdim, dtype).unwrap()
3872    }
3873
3874    pub fn internal_sparse_csr_sum(
3875        &self,
3876        dim: impl IntList,
3877        keepdim: bool,
3878        dtype: impl Into<Option<Kind>>,
3879    ) -> Tensor {
3880        self.f_internal_sparse_csr_sum(dim, keepdim, dtype).unwrap()
3881    }
3882
3883    pub fn internal_sparse_csr_sum_dim_dtype_out(
3884        &self,
3885        out: &Tensor,
3886        dim: impl IntList,
3887        keepdim: bool,
3888        dtype: impl Into<Option<Kind>>,
3889    ) -> Tensor {
3890        self.f_internal_sparse_csr_sum_dim_dtype_out(out, dim, keepdim, dtype).unwrap()
3891    }
3892
3893    pub fn internal_sparse_csr_tensor_unsafe(
3894        crow_indices: &Tensor,
3895        col_indices: &Tensor,
3896        values: &Tensor,
3897        size: impl IntList,
3898        options: (Kind, Device),
3899    ) -> Tensor {
3900        Tensor::f_internal_sparse_csr_tensor_unsafe(
3901            crow_indices,
3902            col_indices,
3903            values,
3904            size,
3905            options,
3906        )
3907        .unwrap()
3908    }
3909
3910    pub fn internal_sparse_log_softmax(&self, dim: i64, half_to_float: bool) -> Tensor {
3911        self.f_internal_sparse_log_softmax(dim, half_to_float).unwrap()
3912    }
3913
3914    pub fn internal_sparse_log_softmax_backward_data(
3915        &self,
3916        grad_output: &Tensor,
3917        output: &Tensor,
3918        dim: i64,
3919    ) -> Tensor {
3920        self.f_internal_sparse_log_softmax_backward_data(grad_output, output, dim).unwrap()
3921    }
3922
3923    pub fn internal_sparse_log_softmax_backward_data_out(
3924        &self,
3925        out: &Tensor,
3926        grad_output: &Tensor,
3927        output: &Tensor,
3928        dim: i64,
3929    ) -> Tensor {
3930        self.f_internal_sparse_log_softmax_backward_data_out(out, grad_output, output, dim).unwrap()
3931    }
3932
3933    pub fn internal_sparse_log_softmax_int(
3934        &self,
3935        dim: i64,
3936        dtype: impl Into<Option<Kind>>,
3937    ) -> Tensor {
3938        self.f_internal_sparse_log_softmax_int(dim, dtype).unwrap()
3939    }
3940
3941    pub fn internal_sparse_log_softmax_out(
3942        &self,
3943        out: &Tensor,
3944        dim: i64,
3945        half_to_float: bool,
3946    ) -> Tensor {
3947        self.f_internal_sparse_log_softmax_out(out, dim, half_to_float).unwrap()
3948    }
3949
3950    pub fn internal_sparse_mask_projection(
3951        &self,
3952        mask: &Tensor,
3953        accumulate_matches: bool,
3954    ) -> Tensor {
3955        self.f_internal_sparse_mask_projection(mask, accumulate_matches).unwrap()
3956    }
3957
3958    pub fn internal_sparse_mask_projection_out(
3959        &self,
3960        out: &Tensor,
3961        mask: &Tensor,
3962        accumulate_matches: bool,
3963    ) -> Tensor {
3964        self.f_internal_sparse_mask_projection_out(out, mask, accumulate_matches).unwrap()
3965    }
3966
3967    pub fn internal_sparse_mm(sparse: &Tensor, dense: &Tensor) -> Tensor {
3968        Tensor::f_internal_sparse_mm(sparse, dense).unwrap()
3969    }
3970
3971    pub fn internal_sparse_mm_reduce(sparse: &Tensor, dense: &Tensor, reduce: &str) -> Tensor {
3972        Tensor::f_internal_sparse_mm_reduce(sparse, dense, reduce).unwrap()
3973    }
3974
3975    pub fn internal_sparse_mm_reduce_impl(&self, other: &Tensor, reduce: &str) -> (Tensor, Tensor) {
3976        self.f_internal_sparse_mm_reduce_impl(other, reduce).unwrap()
3977    }
3978
3979    pub fn internal_sparse_semi_structured_apply(&self, thread_masks: &Tensor) -> (Tensor, Tensor) {
3980        self.f_internal_sparse_semi_structured_apply(thread_masks).unwrap()
3981    }
3982
3983    pub fn internal_sparse_semi_structured_apply_dense(&self, thread_masks: &Tensor) -> Tensor {
3984        self.f_internal_sparse_semi_structured_apply_dense(thread_masks).unwrap()
3985    }
3986
3987    pub fn internal_sparse_semi_structured_linear<T: Borrow<Tensor>>(
3988        &self,
3989        weight: &Tensor,
3990        meta: &Tensor,
3991        bias: Option<T>,
3992        activation: &str,
3993        out_dtype: impl Into<Option<Kind>>,
3994    ) -> Tensor {
3995        self.f_internal_sparse_semi_structured_linear(weight, meta, bias, activation, out_dtype)
3996            .unwrap()
3997    }
3998
3999    pub fn internal_sparse_semi_structured_mm(
4000        mat1: &Tensor,
4001        mat1_meta: &Tensor,
4002        mat2: &Tensor,
4003        out_dtype: impl Into<Option<Kind>>,
4004    ) -> Tensor {
4005        Tensor::f_internal_sparse_semi_structured_mm(mat1, mat1_meta, mat2, out_dtype).unwrap()
4006    }
4007
4008    pub fn internal_sparse_semi_structured_tile(
4009        &self,
4010        algorithm: &str,
4011        use_cutlass: bool,
4012    ) -> (Tensor, Tensor, Tensor, Tensor, Tensor) {
4013        self.f_internal_sparse_semi_structured_tile(algorithm, use_cutlass).unwrap()
4014    }
4015
4016    pub fn internal_sparse_softmax(&self, dim: i64, half_to_float: bool) -> Tensor {
4017        self.f_internal_sparse_softmax(dim, half_to_float).unwrap()
4018    }
4019
4020    pub fn internal_sparse_softmax_backward_data(
4021        &self,
4022        grad_output: &Tensor,
4023        output: &Tensor,
4024        dim: i64,
4025    ) -> Tensor {
4026        self.f_internal_sparse_softmax_backward_data(grad_output, output, dim).unwrap()
4027    }
4028
4029    pub fn internal_sparse_softmax_backward_data_out(
4030        &self,
4031        out: &Tensor,
4032        grad_output: &Tensor,
4033        output: &Tensor,
4034        dim: i64,
4035    ) -> Tensor {
4036        self.f_internal_sparse_softmax_backward_data_out(out, grad_output, output, dim).unwrap()
4037    }
4038
4039    pub fn internal_sparse_softmax_int(&self, dim: i64, dtype: impl Into<Option<Kind>>) -> Tensor {
4040        self.f_internal_sparse_softmax_int(dim, dtype).unwrap()
4041    }
4042
4043    pub fn internal_sparse_softmax_out(
4044        &self,
4045        out: &Tensor,
4046        dim: i64,
4047        half_to_float: bool,
4048    ) -> Tensor {
4049        self.f_internal_sparse_softmax_out(out, dim, half_to_float).unwrap()
4050    }
4051
4052    pub fn internal_sparse_sparse_matmul(&self, other: &Tensor) -> Tensor {
4053        self.f_internal_sparse_sparse_matmul(other).unwrap()
4054    }
4055
4056    pub fn internal_sparse_sparse_matmul_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
4057        self.f_internal_sparse_sparse_matmul_out(out, other).unwrap()
4058    }
4059
4060    pub fn internal_sparse_sum(&self) -> Tensor {
4061        self.f_internal_sparse_sum().unwrap()
4062    }
4063
4064    pub fn internal_sparse_sum_backward(&self, grad: &Tensor, dim: impl IntList) -> Tensor {
4065        self.f_internal_sparse_sum_backward(grad, dim).unwrap()
4066    }
4067
4068    pub fn internal_sparse_sum_backward_out(
4069        &self,
4070        out: &Tensor,
4071        grad: &Tensor,
4072        dim: impl IntList,
4073    ) -> Tensor {
4074        self.f_internal_sparse_sum_backward_out(out, grad, dim).unwrap()
4075    }
4076
4077    pub fn internal_sparse_sum_dim(&self, dim: impl IntList) -> Tensor {
4078        self.f_internal_sparse_sum_dim(dim).unwrap()
4079    }
4080
4081    pub fn internal_sparse_sum_dim_dtype(&self, dim: impl IntList, dtype: Kind) -> Tensor {
4082        self.f_internal_sparse_sum_dim_dtype(dim, dtype).unwrap()
4083    }
4084
4085    pub fn internal_sparse_sum_dim_out(&self, out: &Tensor, dim: impl IntList) -> Tensor {
4086        self.f_internal_sparse_sum_dim_out(out, dim).unwrap()
4087    }
4088
4089    pub fn internal_sparse_sum_dtype(&self, dtype: Kind) -> Tensor {
4090        self.f_internal_sparse_sum_dtype(dtype).unwrap()
4091    }
4092
4093    pub fn internal_spdiags(
4094        diagonals: &Tensor,
4095        offsets: &Tensor,
4096        shape: impl IntList,
4097        layout: Option<Layout>,
4098    ) -> Tensor {
4099        Tensor::f_internal_spdiags(diagonals, offsets, shape, layout).unwrap()
4100    }
4101
4102    pub fn internal_spdiags_out(
4103        out: &Tensor,
4104        diagonals: &Tensor,
4105        offsets: &Tensor,
4106        shape: impl IntList,
4107        layout: Option<Layout>,
4108    ) -> Tensor {
4109        Tensor::f_internal_spdiags_out(out, diagonals, offsets, shape, layout).unwrap()
4110    }
4111
4112    pub fn internal_spsolve(a: &Tensor, b: &Tensor, left: bool) -> Tensor {
4113        Tensor::f_internal_spsolve(a, b, left).unwrap()
4114    }
4115
4116    pub fn internal_stack<T: Borrow<Tensor>>(tensors: &[T], dim: i64) -> Tensor {
4117        Tensor::f_internal_stack(tensors, dim).unwrap()
4118    }
4119
4120    pub fn internal_stack_out<T: Borrow<Tensor>>(out: &Tensor, tensors: &[T], dim: i64) -> Tensor {
4121        Tensor::f_internal_stack_out(out, tensors, dim).unwrap()
4122    }
4123
4124    pub fn internal_standard_gamma(&self) -> Tensor {
4125        self.f_internal_standard_gamma().unwrap()
4126    }
4127
4128    pub fn internal_standard_gamma_grad(&self, output: &Tensor) -> Tensor {
4129        self.f_internal_standard_gamma_grad(output).unwrap()
4130    }
4131
4132    pub fn internal_standard_gamma_grad_out(&self, out: &Tensor, output: &Tensor) -> Tensor {
4133        self.f_internal_standard_gamma_grad_out(out, output).unwrap()
4134    }
4135
4136    pub fn internal_standard_gamma_out(&self, out: &Tensor) -> Tensor {
4137        self.f_internal_standard_gamma_out(out).unwrap()
4138    }
4139
4140    pub fn internal_test_ambiguous_defaults(dummy: &Tensor, a: i64, b: i64) -> Tensor {
4141        Tensor::f_internal_test_ambiguous_defaults(dummy, a, b).unwrap()
4142    }
4143
4144    pub fn internal_test_ambiguous_defaults_b(dummy: &Tensor, a: i64, b: &str) -> Tensor {
4145        Tensor::f_internal_test_ambiguous_defaults_b(dummy, a, b).unwrap()
4146    }
4147
4148    pub fn internal_test_autograd_multiple_dispatch(&self) -> Tensor {
4149        self.f_internal_test_autograd_multiple_dispatch().unwrap()
4150    }
4151
4152    pub fn internal_test_autograd_multiple_dispatch_fullcoverage_out(
4153        &self,
4154        out: &Tensor,
4155    ) -> Tensor {
4156        self.f_internal_test_autograd_multiple_dispatch_fullcoverage_out(out).unwrap()
4157    }
4158
4159    pub fn internal_test_autograd_multiple_dispatch_ntonly(&self, b: bool) -> Tensor {
4160        self.f_internal_test_autograd_multiple_dispatch_ntonly(b).unwrap()
4161    }
4162
4163    pub fn internal_test_autograd_multiple_dispatch_view(&self) -> Tensor {
4164        self.f_internal_test_autograd_multiple_dispatch_view().unwrap()
4165    }
4166
4167    pub fn internal_test_autograd_multiple_dispatch_view_copy(&self) -> Tensor {
4168        self.f_internal_test_autograd_multiple_dispatch_view_copy().unwrap()
4169    }
4170
4171    pub fn internal_test_autograd_multiple_dispatch_view_copy_out(&self, out: &Tensor) -> Tensor {
4172        self.f_internal_test_autograd_multiple_dispatch_view_copy_out(out).unwrap()
4173    }
4174
4175    pub fn internal_test_check_tensor(&self) -> Tensor {
4176        self.f_internal_test_check_tensor().unwrap()
4177    }
4178
4179    pub fn internal_test_functorch_fallback(&self, other: &Tensor) -> Tensor {
4180        self.f_internal_test_functorch_fallback(other).unwrap()
4181    }
4182
4183    pub fn internal_test_functorch_fallback_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
4184        self.f_internal_test_functorch_fallback_out(out, other).unwrap()
4185    }
4186
4187    pub fn internal_test_optional_filled_intlist(
4188        values: &Tensor,
4189        addends: impl IntListOption,
4190    ) -> Tensor {
4191        Tensor::f_internal_test_optional_filled_intlist(values, addends).unwrap()
4192    }
4193
4194    pub fn internal_test_optional_filled_intlist_out(
4195        out: &Tensor,
4196        values: &Tensor,
4197        addends: impl IntListOption,
4198    ) -> Tensor {
4199        Tensor::f_internal_test_optional_filled_intlist_out(out, values, addends).unwrap()
4200    }
4201
4202    pub fn internal_test_optional_floatlist(values: &Tensor, addends: impl DoubleList) -> Tensor {
4203        Tensor::f_internal_test_optional_floatlist(values, addends).unwrap()
4204    }
4205
4206    pub fn internal_test_optional_floatlist_out(
4207        out: &Tensor,
4208        values: &Tensor,
4209        addends: impl DoubleList,
4210    ) -> Tensor {
4211        Tensor::f_internal_test_optional_floatlist_out(out, values, addends).unwrap()
4212    }
4213
4214    pub fn internal_test_optional_intlist(values: &Tensor, addends: impl IntListOption) -> Tensor {
4215        Tensor::f_internal_test_optional_intlist(values, addends).unwrap()
4216    }
4217
4218    pub fn internal_test_optional_intlist_out(
4219        out: &Tensor,
4220        values: &Tensor,
4221        addends: impl IntListOption,
4222    ) -> Tensor {
4223        Tensor::f_internal_test_optional_intlist_out(out, values, addends).unwrap()
4224    }
4225
4226    pub fn internal_test_parallel_materialize(
4227        &self,
4228        num_parallel: i64,
4229        skip_first: bool,
4230    ) -> Tensor {
4231        self.f_internal_test_parallel_materialize(num_parallel, skip_first).unwrap()
4232    }
4233
4234    pub fn internal_test_serialization_subcmul(&self, other: &Tensor) -> Tensor {
4235        self.f_internal_test_serialization_subcmul(other).unwrap()
4236    }
4237
4238    pub fn internal_test_string_default(dummy: &Tensor, a: &str, b: &str) -> Tensor {
4239        Tensor::f_internal_test_string_default(dummy, a, b).unwrap()
4240    }
4241
4242    pub fn internal_test_warn_in_autograd(&self) -> Tensor {
4243        self.f_internal_test_warn_in_autograd().unwrap()
4244    }
4245
4246    pub fn internal_test_warn_in_autograd_out(&self, out: &Tensor) -> Tensor {
4247        self.f_internal_test_warn_in_autograd_out(out).unwrap()
4248    }
4249
4250    pub fn internal_to_copy(&self, options: (Kind, Device), non_blocking: bool) -> Tensor {
4251        self.f_internal_to_copy(options, non_blocking).unwrap()
4252    }
4253
4254    pub fn internal_to_copy_out(&self, out: &Tensor, non_blocking: bool) -> Tensor {
4255        self.f_internal_to_copy_out(out, non_blocking).unwrap()
4256    }
4257
4258    pub fn internal_to_cpu<T: Borrow<Tensor>>(tensors: &[T]) -> Vec<Tensor> {
4259        Tensor::f_internal_to_cpu(tensors).unwrap()
4260    }
4261
4262    pub fn internal_to_dense(&self, dtype: impl Into<Option<Kind>>, masked_grad: bool) -> Tensor {
4263        self.f_internal_to_dense(dtype, masked_grad).unwrap()
4264    }
4265
4266    pub fn internal_to_dense_out(
4267        &self,
4268        out: &Tensor,
4269        dtype: impl Into<Option<Kind>>,
4270        masked_grad: bool,
4271    ) -> Tensor {
4272        self.f_internal_to_dense_out(out, dtype, masked_grad).unwrap()
4273    }
4274
4275    pub fn internal_to_sparse(
4276        &self,
4277        layout: Option<Layout>,
4278        blocksize: impl IntListOption,
4279        dense_dim: impl Into<Option<i64>>,
4280    ) -> Tensor {
4281        self.f_internal_to_sparse(layout, blocksize, dense_dim).unwrap()
4282    }
4283
4284    pub fn internal_to_sparse_bsc(
4285        &self,
4286        blocksize: impl IntList,
4287        dense_dim: impl Into<Option<i64>>,
4288    ) -> Tensor {
4289        self.f_internal_to_sparse_bsc(blocksize, dense_dim).unwrap()
4290    }
4291
4292    pub fn internal_to_sparse_bsc_out(
4293        &self,
4294        out: &Tensor,
4295        blocksize: impl IntList,
4296        dense_dim: impl Into<Option<i64>>,
4297    ) -> Tensor {
4298        self.f_internal_to_sparse_bsc_out(out, blocksize, dense_dim).unwrap()
4299    }
4300
4301    pub fn internal_to_sparse_bsr(
4302        &self,
4303        blocksize: impl IntList,
4304        dense_dim: impl Into<Option<i64>>,
4305    ) -> Tensor {
4306        self.f_internal_to_sparse_bsr(blocksize, dense_dim).unwrap()
4307    }
4308
4309    pub fn internal_to_sparse_bsr_out(
4310        &self,
4311        out: &Tensor,
4312        blocksize: impl IntList,
4313        dense_dim: impl Into<Option<i64>>,
4314    ) -> Tensor {
4315        self.f_internal_to_sparse_bsr_out(out, blocksize, dense_dim).unwrap()
4316    }
4317
4318    pub fn internal_to_sparse_csc(&self, dense_dim: impl Into<Option<i64>>) -> Tensor {
4319        self.f_internal_to_sparse_csc(dense_dim).unwrap()
4320    }
4321
4322    pub fn internal_to_sparse_csc_out(
4323        &self,
4324        out: &Tensor,
4325        dense_dim: impl Into<Option<i64>>,
4326    ) -> Tensor {
4327        self.f_internal_to_sparse_csc_out(out, dense_dim).unwrap()
4328    }
4329
4330    pub fn internal_to_sparse_csr(&self, dense_dim: impl Into<Option<i64>>) -> Tensor {
4331        self.f_internal_to_sparse_csr(dense_dim).unwrap()
4332    }
4333
4334    pub fn internal_to_sparse_csr_out(
4335        &self,
4336        out: &Tensor,
4337        dense_dim: impl Into<Option<i64>>,
4338    ) -> Tensor {
4339        self.f_internal_to_sparse_csr_out(out, dense_dim).unwrap()
4340    }
4341
4342    pub fn internal_to_sparse_out(
4343        &self,
4344        out: &Tensor,
4345        layout: Option<Layout>,
4346        blocksize: impl IntListOption,
4347        dense_dim: impl Into<Option<i64>>,
4348    ) -> Tensor {
4349        self.f_internal_to_sparse_out(out, layout, blocksize, dense_dim).unwrap()
4350    }
4351
4352    pub fn internal_to_sparse_semi_structured(dense: &Tensor) -> (Tensor, Tensor) {
4353        Tensor::f_internal_to_sparse_semi_structured(dense).unwrap()
4354    }
4355
4356    pub fn internal_to_sparse_sparse_dim(&self, sparse_dim: i64) -> Tensor {
4357        self.f_internal_to_sparse_sparse_dim(sparse_dim).unwrap()
4358    }
4359
4360    pub fn internal_to_sparse_sparse_dim_out(&self, out: &Tensor, sparse_dim: i64) -> Tensor {
4361        self.f_internal_to_sparse_sparse_dim_out(out, sparse_dim).unwrap()
4362    }
4363
4364    pub fn internal_transform_bias_rescale_qkv(
4365        qkv: &Tensor,
4366        qkv_bias: &Tensor,
4367        num_heads: i64,
4368    ) -> (Tensor, Tensor, Tensor) {
4369        Tensor::f_internal_transform_bias_rescale_qkv(qkv, qkv_bias, num_heads).unwrap()
4370    }
4371
4372    pub fn internal_transform_bias_rescale_qkv_out(
4373        out0: &Tensor,
4374        out1: &Tensor,
4375        out2: &Tensor,
4376        qkv: &Tensor,
4377        qkv_bias: &Tensor,
4378        num_heads: i64,
4379    ) -> (Tensor, Tensor, Tensor) {
4380        Tensor::f_internal_transform_bias_rescale_qkv_out(
4381            out0, out1, out2, qkv, qkv_bias, num_heads,
4382        )
4383        .unwrap()
4384    }
4385
4386    pub fn internal_transformer_encoder_layer_fwd<T: Borrow<Tensor>>(
4387        src: &Tensor,
4388        embed_dim: i64,
4389        num_heads: i64,
4390        qkv_weight: &Tensor,
4391        qkv_bias: &Tensor,
4392        proj_weight: &Tensor,
4393        proj_bias: &Tensor,
4394        use_gelu: bool,
4395        norm_first: bool,
4396        eps: f64,
4397        norm_weight_1: &Tensor,
4398        norm_bias_1: &Tensor,
4399        norm_weight_2: &Tensor,
4400        norm_bias_2: &Tensor,
4401        ffn_weight_1: &Tensor,
4402        ffn_bias_1: &Tensor,
4403        ffn_weight_2: &Tensor,
4404        ffn_bias_2: &Tensor,
4405        mask: Option<T>,
4406        mask_type: impl Into<Option<i64>>,
4407    ) -> Tensor {
4408        Tensor::f_internal_transformer_encoder_layer_fwd(
4409            src,
4410            embed_dim,
4411            num_heads,
4412            qkv_weight,
4413            qkv_bias,
4414            proj_weight,
4415            proj_bias,
4416            use_gelu,
4417            norm_first,
4418            eps,
4419            norm_weight_1,
4420            norm_bias_1,
4421            norm_weight_2,
4422            norm_bias_2,
4423            ffn_weight_1,
4424            ffn_bias_1,
4425            ffn_weight_2,
4426            ffn_bias_2,
4427            mask,
4428            mask_type,
4429        )
4430        .unwrap()
4431    }
4432
4433    pub fn internal_transformer_encoder_layer_fwd_out<T: Borrow<Tensor>>(
4434        out: &Tensor,
4435        src: &Tensor,
4436        embed_dim: i64,
4437        num_heads: i64,
4438        qkv_weight: &Tensor,
4439        qkv_bias: &Tensor,
4440        proj_weight: &Tensor,
4441        proj_bias: &Tensor,
4442        use_gelu: bool,
4443        norm_first: bool,
4444        eps: f64,
4445        norm_weight_1: &Tensor,
4446        norm_bias_1: &Tensor,
4447        norm_weight_2: &Tensor,
4448        norm_bias_2: &Tensor,
4449        ffn_weight_1: &Tensor,
4450        ffn_bias_1: &Tensor,
4451        ffn_weight_2: &Tensor,
4452        ffn_bias_2: &Tensor,
4453        mask: Option<T>,
4454        mask_type: impl Into<Option<i64>>,
4455    ) -> Tensor {
4456        Tensor::f_internal_transformer_encoder_layer_fwd_out(
4457            out,
4458            src,
4459            embed_dim,
4460            num_heads,
4461            qkv_weight,
4462            qkv_bias,
4463            proj_weight,
4464            proj_bias,
4465            use_gelu,
4466            norm_first,
4467            eps,
4468            norm_weight_1,
4469            norm_bias_1,
4470            norm_weight_2,
4471            norm_bias_2,
4472            ffn_weight_1,
4473            ffn_bias_1,
4474            ffn_weight_2,
4475            ffn_bias_2,
4476            mask,
4477            mask_type,
4478        )
4479        .unwrap()
4480    }
4481
4482    pub fn internal_trilinear(
4483        i1: &Tensor,
4484        i2: &Tensor,
4485        i3: &Tensor,
4486        expand1: impl IntList,
4487        expand2: impl IntList,
4488        expand3: impl IntList,
4489        sumdim: impl IntList,
4490        unroll_dim: i64,
4491    ) -> Tensor {
4492        Tensor::f_internal_trilinear(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim)
4493            .unwrap()
4494    }
4495
4496    pub fn internal_trilinear_out(
4497        out: &Tensor,
4498        i1: &Tensor,
4499        i2: &Tensor,
4500        i3: &Tensor,
4501        expand1: impl IntList,
4502        expand2: impl IntList,
4503        expand3: impl IntList,
4504        sumdim: impl IntList,
4505        unroll_dim: i64,
4506    ) -> Tensor {
4507        Tensor::f_internal_trilinear_out(
4508            out, i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim,
4509        )
4510        .unwrap()
4511    }
4512
4513    pub fn internal_triton_multi_head_attention<T: Borrow<Tensor>>(
4514        query: &Tensor,
4515        key: &Tensor,
4516        value: &Tensor,
4517        embed_dim: i64,
4518        num_head: i64,
4519        qkv_weight: &Tensor,
4520        qkv_bias: &Tensor,
4521        proj_weight: &Tensor,
4522        proj_bias: &Tensor,
4523        mask: Option<T>,
4524    ) -> Tensor {
4525        Tensor::f_internal_triton_multi_head_attention(
4526            query,
4527            key,
4528            value,
4529            embed_dim,
4530            num_head,
4531            qkv_weight,
4532            qkv_bias,
4533            proj_weight,
4534            proj_bias,
4535            mask,
4536        )
4537        .unwrap()
4538    }
4539
4540    pub fn internal_triton_multi_head_attention_out<T: Borrow<Tensor>>(
4541        out: &Tensor,
4542        query: &Tensor,
4543        key: &Tensor,
4544        value: &Tensor,
4545        embed_dim: i64,
4546        num_head: i64,
4547        qkv_weight: &Tensor,
4548        qkv_bias: &Tensor,
4549        proj_weight: &Tensor,
4550        proj_bias: &Tensor,
4551        mask: Option<T>,
4552    ) -> Tensor {
4553        Tensor::f_internal_triton_multi_head_attention_out(
4554            out,
4555            query,
4556            key,
4557            value,
4558            embed_dim,
4559            num_head,
4560            qkv_weight,
4561            qkv_bias,
4562            proj_weight,
4563            proj_bias,
4564            mask,
4565        )
4566        .unwrap()
4567    }
4568
4569    pub fn internal_triton_scaled_dot_attention(
4570        q: &Tensor,
4571        k: &Tensor,
4572        v: &Tensor,
4573        dropout_p: f64,
4574    ) -> Tensor {
4575        Tensor::f_internal_triton_scaled_dot_attention(q, k, v, dropout_p).unwrap()
4576    }
4577
4578    pub fn internal_triton_scaled_dot_attention_out(
4579        out: &Tensor,
4580        q: &Tensor,
4581        k: &Tensor,
4582        v: &Tensor,
4583        dropout_p: f64,
4584    ) -> Tensor {
4585        Tensor::f_internal_triton_scaled_dot_attention_out(out, q, k, v, dropout_p).unwrap()
4586    }
4587
4588    pub fn internal_unique(&self, sorted: bool, return_inverse: bool) -> (Tensor, Tensor) {
4589        self.f_internal_unique(sorted, return_inverse).unwrap()
4590    }
4591
4592    pub fn internal_unique2(
4593        &self,
4594        sorted: bool,
4595        return_inverse: bool,
4596        return_counts: bool,
4597    ) -> (Tensor, Tensor, Tensor) {
4598        self.f_internal_unique2(sorted, return_inverse, return_counts).unwrap()
4599    }
4600
4601    pub fn internal_unique2_out(
4602        &self,
4603        out0: &Tensor,
4604        out1: &Tensor,
4605        out2: &Tensor,
4606        sorted: bool,
4607        return_inverse: bool,
4608        return_counts: bool,
4609    ) -> (Tensor, Tensor, Tensor) {
4610        self.f_internal_unique2_out(out0, out1, out2, sorted, return_inverse, return_counts)
4611            .unwrap()
4612    }
4613
4614    pub fn internal_unique_out(
4615        &self,
4616        out0: &Tensor,
4617        out1: &Tensor,
4618        sorted: bool,
4619        return_inverse: bool,
4620    ) -> (Tensor, Tensor) {
4621        self.f_internal_unique_out(out0, out1, sorted, return_inverse).unwrap()
4622    }
4623
4624    pub fn internal_unpack_dual(dual: &Tensor, level: i64) -> (Tensor, Tensor) {
4625        Tensor::f_internal_unpack_dual(dual, level).unwrap()
4626    }
4627
4628    pub fn internal_unsafe_index<T: Borrow<Tensor>>(&self, indices: &[Option<T>]) -> Tensor {
4629        self.f_internal_unsafe_index(indices).unwrap()
4630    }
4631
4632    pub fn internal_unsafe_index_put<T: Borrow<Tensor>>(
4633        &self,
4634        indices: &[Option<T>],
4635        values: &Tensor,
4636        accumulate: bool,
4637    ) -> Tensor {
4638        self.f_internal_unsafe_index_put(indices, values, accumulate).unwrap()
4639    }
4640
4641    pub fn internal_unsafe_masked_index<T: Borrow<Tensor>, S: Into<Scalar>>(
4642        &self,
4643        mask: &Tensor,
4644        indices: &[Option<T>],
4645        fill: S,
4646    ) -> Tensor {
4647        self.f_internal_unsafe_masked_index(mask, indices, fill).unwrap()
4648    }
4649
4650    pub fn internal_unsafe_masked_index_put_accumulate<T: Borrow<Tensor>>(
4651        &self,
4652        mask: &Tensor,
4653        indices: &[Option<T>],
4654        values: &Tensor,
4655    ) -> Tensor {
4656        self.f_internal_unsafe_masked_index_put_accumulate(mask, indices, values).unwrap()
4657    }
4658
4659    pub fn internal_unsafe_view(&self, size: impl IntList) -> Tensor {
4660        self.f_internal_unsafe_view(size).unwrap()
4661    }
4662
4663    pub fn internal_unsafe_view_out(&self, out: &Tensor, size: impl IntList) -> Tensor {
4664        self.f_internal_unsafe_view_out(out, size).unwrap()
4665    }
4666
4667    pub fn internal_upsample_bicubic2d_aa(
4668        &self,
4669        output_size: impl IntList,
4670        align_corners: bool,
4671        scales_h: impl Into<Option<f64>>,
4672        scales_w: impl Into<Option<f64>>,
4673    ) -> Tensor {
4674        self.f_internal_upsample_bicubic2d_aa(output_size, align_corners, scales_h, scales_w)
4675            .unwrap()
4676    }
4677
4678    pub fn internal_upsample_bicubic2d_aa_backward(
4679        grad_output: &Tensor,
4680        output_size: impl IntList,
4681        input_size: impl IntList,
4682        align_corners: bool,
4683        scales_h: impl Into<Option<f64>>,
4684        scales_w: impl Into<Option<f64>>,
4685    ) -> Tensor {
4686        Tensor::f_internal_upsample_bicubic2d_aa_backward(
4687            grad_output,
4688            output_size,
4689            input_size,
4690            align_corners,
4691            scales_h,
4692            scales_w,
4693        )
4694        .unwrap()
4695    }
4696
4697    pub fn internal_upsample_bicubic2d_aa_backward_grad_input(
4698        grad_input: &Tensor,
4699        grad_output: &Tensor,
4700        output_size: impl IntList,
4701        input_size: impl IntList,
4702        align_corners: bool,
4703        scales_h: impl Into<Option<f64>>,
4704        scales_w: impl Into<Option<f64>>,
4705    ) -> Tensor {
4706        Tensor::f_internal_upsample_bicubic2d_aa_backward_grad_input(
4707            grad_input,
4708            grad_output,
4709            output_size,
4710            input_size,
4711            align_corners,
4712            scales_h,
4713            scales_w,
4714        )
4715        .unwrap()
4716    }
4717
4718    pub fn internal_upsample_bicubic2d_aa_out(
4719        &self,
4720        out: &Tensor,
4721        output_size: impl IntList,
4722        align_corners: bool,
4723        scales_h: impl Into<Option<f64>>,
4724        scales_w: impl Into<Option<f64>>,
4725    ) -> Tensor {
4726        self.f_internal_upsample_bicubic2d_aa_out(
4727            out,
4728            output_size,
4729            align_corners,
4730            scales_h,
4731            scales_w,
4732        )
4733        .unwrap()
4734    }
4735
4736    pub fn internal_upsample_bicubic2d_aa_vec(
4737        &self,
4738        output_size: impl IntListOption,
4739        align_corners: bool,
4740        scale_factors: impl DoubleList,
4741    ) -> Tensor {
4742        self.f_internal_upsample_bicubic2d_aa_vec(output_size, align_corners, scale_factors)
4743            .unwrap()
4744    }
4745
4746    pub fn internal_upsample_bilinear2d_aa(
4747        &self,
4748        output_size: impl IntList,
4749        align_corners: bool,
4750        scales_h: impl Into<Option<f64>>,
4751        scales_w: impl Into<Option<f64>>,
4752    ) -> Tensor {
4753        self.f_internal_upsample_bilinear2d_aa(output_size, align_corners, scales_h, scales_w)
4754            .unwrap()
4755    }
4756
4757    pub fn internal_upsample_bilinear2d_aa_backward(
4758        grad_output: &Tensor,
4759        output_size: impl IntList,
4760        input_size: impl IntList,
4761        align_corners: bool,
4762        scales_h: impl Into<Option<f64>>,
4763        scales_w: impl Into<Option<f64>>,
4764    ) -> Tensor {
4765        Tensor::f_internal_upsample_bilinear2d_aa_backward(
4766            grad_output,
4767            output_size,
4768            input_size,
4769            align_corners,
4770            scales_h,
4771            scales_w,
4772        )
4773        .unwrap()
4774    }
4775
4776    pub fn internal_upsample_bilinear2d_aa_backward_grad_input(
4777        grad_input: &Tensor,
4778        grad_output: &Tensor,
4779        output_size: impl IntList,
4780        input_size: impl IntList,
4781        align_corners: bool,
4782        scales_h: impl Into<Option<f64>>,
4783        scales_w: impl Into<Option<f64>>,
4784    ) -> Tensor {
4785        Tensor::f_internal_upsample_bilinear2d_aa_backward_grad_input(
4786            grad_input,
4787            grad_output,
4788            output_size,
4789            input_size,
4790            align_corners,
4791            scales_h,
4792            scales_w,
4793        )
4794        .unwrap()
4795    }
4796
4797    pub fn internal_upsample_bilinear2d_aa_out(
4798        &self,
4799        out: &Tensor,
4800        output_size: impl IntList,
4801        align_corners: bool,
4802        scales_h: impl Into<Option<f64>>,
4803        scales_w: impl Into<Option<f64>>,
4804    ) -> Tensor {
4805        self.f_internal_upsample_bilinear2d_aa_out(
4806            out,
4807            output_size,
4808            align_corners,
4809            scales_h,
4810            scales_w,
4811        )
4812        .unwrap()
4813    }
4814
4815    pub fn internal_upsample_bilinear2d_aa_vec(
4816        &self,
4817        output_size: impl IntListOption,
4818        align_corners: bool,
4819        scale_factors: impl DoubleList,
4820    ) -> Tensor {
4821        self.f_internal_upsample_bilinear2d_aa_vec(output_size, align_corners, scale_factors)
4822            .unwrap()
4823    }
4824
4825    pub fn internal_upsample_nearest_exact1d(
4826        &self,
4827        output_size: impl IntList,
4828        scales: impl Into<Option<f64>>,
4829    ) -> Tensor {
4830        self.f_internal_upsample_nearest_exact1d(output_size, scales).unwrap()
4831    }
4832
4833    pub fn internal_upsample_nearest_exact1d_backward(
4834        grad_output: &Tensor,
4835        output_size: impl IntList,
4836        input_size: impl IntList,
4837        scales: impl Into<Option<f64>>,
4838    ) -> Tensor {
4839        Tensor::f_internal_upsample_nearest_exact1d_backward(
4840            grad_output,
4841            output_size,
4842            input_size,
4843            scales,
4844        )
4845        .unwrap()
4846    }
4847
4848    pub fn internal_upsample_nearest_exact1d_backward_grad_input(
4849        grad_input: &Tensor,
4850        grad_output: &Tensor,
4851        output_size: impl IntList,
4852        input_size: impl IntList,
4853        scales: impl Into<Option<f64>>,
4854    ) -> Tensor {
4855        Tensor::f_internal_upsample_nearest_exact1d_backward_grad_input(
4856            grad_input,
4857            grad_output,
4858            output_size,
4859            input_size,
4860            scales,
4861        )
4862        .unwrap()
4863    }
4864
4865    pub fn internal_upsample_nearest_exact1d_out(
4866        &self,
4867        out: &Tensor,
4868        output_size: impl IntList,
4869        scales: impl Into<Option<f64>>,
4870    ) -> Tensor {
4871        self.f_internal_upsample_nearest_exact1d_out(out, output_size, scales).unwrap()
4872    }
4873
4874    pub fn internal_upsample_nearest_exact1d_vec(
4875        &self,
4876        output_size: impl IntListOption,
4877        scale_factors: impl DoubleList,
4878    ) -> Tensor {
4879        self.f_internal_upsample_nearest_exact1d_vec(output_size, scale_factors).unwrap()
4880    }
4881
4882    pub fn internal_upsample_nearest_exact2d(
4883        &self,
4884        output_size: impl IntList,
4885        scales_h: impl Into<Option<f64>>,
4886        scales_w: impl Into<Option<f64>>,
4887    ) -> Tensor {
4888        self.f_internal_upsample_nearest_exact2d(output_size, scales_h, scales_w).unwrap()
4889    }
4890
4891    pub fn internal_upsample_nearest_exact2d_backward(
4892        grad_output: &Tensor,
4893        output_size: impl IntList,
4894        input_size: impl IntList,
4895        scales_h: impl Into<Option<f64>>,
4896        scales_w: impl Into<Option<f64>>,
4897    ) -> Tensor {
4898        Tensor::f_internal_upsample_nearest_exact2d_backward(
4899            grad_output,
4900            output_size,
4901            input_size,
4902            scales_h,
4903            scales_w,
4904        )
4905        .unwrap()
4906    }
4907
4908    pub fn internal_upsample_nearest_exact2d_backward_grad_input(
4909        grad_input: &Tensor,
4910        grad_output: &Tensor,
4911        output_size: impl IntList,
4912        input_size: impl IntList,
4913        scales_h: impl Into<Option<f64>>,
4914        scales_w: impl Into<Option<f64>>,
4915    ) -> Tensor {
4916        Tensor::f_internal_upsample_nearest_exact2d_backward_grad_input(
4917            grad_input,
4918            grad_output,
4919            output_size,
4920            input_size,
4921            scales_h,
4922            scales_w,
4923        )
4924        .unwrap()
4925    }
4926
4927    pub fn internal_upsample_nearest_exact2d_out(
4928        &self,
4929        out: &Tensor,
4930        output_size: impl IntList,
4931        scales_h: impl Into<Option<f64>>,
4932        scales_w: impl Into<Option<f64>>,
4933    ) -> Tensor {
4934        self.f_internal_upsample_nearest_exact2d_out(out, output_size, scales_h, scales_w).unwrap()
4935    }
4936
4937    pub fn internal_upsample_nearest_exact2d_vec(
4938        &self,
4939        output_size: impl IntListOption,
4940        scale_factors: impl DoubleList,
4941    ) -> Tensor {
4942        self.f_internal_upsample_nearest_exact2d_vec(output_size, scale_factors).unwrap()
4943    }
4944
4945    pub fn internal_upsample_nearest_exact3d(
4946        &self,
4947        output_size: impl IntList,
4948        scales_d: impl Into<Option<f64>>,
4949        scales_h: impl Into<Option<f64>>,
4950        scales_w: impl Into<Option<f64>>,
4951    ) -> Tensor {
4952        self.f_internal_upsample_nearest_exact3d(output_size, scales_d, scales_h, scales_w).unwrap()
4953    }
4954
4955    pub fn internal_upsample_nearest_exact3d_backward(
4956        grad_output: &Tensor,
4957        output_size: impl IntList,
4958        input_size: impl IntList,
4959        scales_d: impl Into<Option<f64>>,
4960        scales_h: impl Into<Option<f64>>,
4961        scales_w: impl Into<Option<f64>>,
4962    ) -> Tensor {
4963        Tensor::f_internal_upsample_nearest_exact3d_backward(
4964            grad_output,
4965            output_size,
4966            input_size,
4967            scales_d,
4968            scales_h,
4969            scales_w,
4970        )
4971        .unwrap()
4972    }
4973
4974    pub fn internal_upsample_nearest_exact3d_backward_grad_input(
4975        grad_input: &Tensor,
4976        grad_output: &Tensor,
4977        output_size: impl IntList,
4978        input_size: impl IntList,
4979        scales_d: impl Into<Option<f64>>,
4980        scales_h: impl Into<Option<f64>>,
4981        scales_w: impl Into<Option<f64>>,
4982    ) -> Tensor {
4983        Tensor::f_internal_upsample_nearest_exact3d_backward_grad_input(
4984            grad_input,
4985            grad_output,
4986            output_size,
4987            input_size,
4988            scales_d,
4989            scales_h,
4990            scales_w,
4991        )
4992        .unwrap()
4993    }
4994
4995    pub fn internal_upsample_nearest_exact3d_out(
4996        &self,
4997        out: &Tensor,
4998        output_size: impl IntList,
4999        scales_d: impl Into<Option<f64>>,
5000        scales_h: impl Into<Option<f64>>,
5001        scales_w: impl Into<Option<f64>>,
5002    ) -> Tensor {
5003        self.f_internal_upsample_nearest_exact3d_out(out, output_size, scales_d, scales_h, scales_w)
5004            .unwrap()
5005    }
5006
5007    pub fn internal_upsample_nearest_exact3d_vec(
5008        &self,
5009        output_size: impl IntListOption,
5010        scale_factors: impl DoubleList,
5011    ) -> Tensor {
5012        self.f_internal_upsample_nearest_exact3d_vec(output_size, scale_factors).unwrap()
5013    }
5014
5015    pub fn internal_use_cudnn_ctc_loss(
5016        log_probs: &Tensor,
5017        targets: &Tensor,
5018        input_lengths: impl IntList,
5019        target_lengths: impl IntList,
5020        blank: i64,
5021    ) -> bool {
5022        Tensor::f_internal_use_cudnn_ctc_loss(
5023            log_probs,
5024            targets,
5025            input_lengths,
5026            target_lengths,
5027            blank,
5028        )
5029        .unwrap()
5030    }
5031
5032    pub fn internal_use_cudnn_ctc_loss_tensor(
5033        log_probs: &Tensor,
5034        targets: &Tensor,
5035        input_lengths: &Tensor,
5036        target_lengths: &Tensor,
5037        blank: i64,
5038    ) -> bool {
5039        Tensor::f_internal_use_cudnn_ctc_loss_tensor(
5040            log_probs,
5041            targets,
5042            input_lengths,
5043            target_lengths,
5044            blank,
5045        )
5046        .unwrap()
5047    }
5048
5049    pub fn internal_use_cudnn_rnn_flatten_weight() -> bool {
5050        Tensor::f_internal_use_cudnn_rnn_flatten_weight().unwrap()
5051    }
5052
5053    pub fn internal_validate_compressed_sparse_indices(
5054        is_crow: bool,
5055        compressed_idx: &Tensor,
5056        plain_idx: &Tensor,
5057        cdim: i64,
5058        dim: i64,
5059        nnz: i64,
5060    ) {
5061        Tensor::f_internal_validate_compressed_sparse_indices(
5062            is_crow,
5063            compressed_idx,
5064            plain_idx,
5065            cdim,
5066            dim,
5067            nnz,
5068        )
5069        .unwrap()
5070    }
5071
5072    pub fn internal_validate_sparse_bsc_tensor_args(
5073        ccol_indices: &Tensor,
5074        row_indices: &Tensor,
5075        values: &Tensor,
5076        size: impl IntList,
5077    ) {
5078        Tensor::f_internal_validate_sparse_bsc_tensor_args(ccol_indices, row_indices, values, size)
5079            .unwrap()
5080    }
5081
5082    pub fn internal_validate_sparse_bsr_tensor_args(
5083        crow_indices: &Tensor,
5084        col_indices: &Tensor,
5085        values: &Tensor,
5086        size: impl IntList,
5087    ) {
5088        Tensor::f_internal_validate_sparse_bsr_tensor_args(crow_indices, col_indices, values, size)
5089            .unwrap()
5090    }
5091
5092    pub fn internal_validate_sparse_compressed_tensor_args(
5093        compressed_indices: &Tensor,
5094        plain_indices: &Tensor,
5095        values: &Tensor,
5096        size: impl IntList,
5097        layout: Layout,
5098    ) {
5099        Tensor::f_internal_validate_sparse_compressed_tensor_args(
5100            compressed_indices,
5101            plain_indices,
5102            values,
5103            size,
5104            layout,
5105        )
5106        .unwrap()
5107    }
5108
5109    pub fn internal_validate_sparse_csc_tensor_args(
5110        ccol_indices: &Tensor,
5111        row_indices: &Tensor,
5112        values: &Tensor,
5113        size: impl IntList,
5114    ) {
5115        Tensor::f_internal_validate_sparse_csc_tensor_args(ccol_indices, row_indices, values, size)
5116            .unwrap()
5117    }
5118
5119    pub fn internal_validate_sparse_csr_tensor_args(
5120        crow_indices: &Tensor,
5121        col_indices: &Tensor,
5122        values: &Tensor,
5123        size: impl IntList,
5124    ) {
5125        Tensor::f_internal_validate_sparse_csr_tensor_args(crow_indices, col_indices, values, size)
5126            .unwrap()
5127    }
5128
5129    pub fn internal_values(&self) -> Tensor {
5130        self.f_internal_values().unwrap()
5131    }
5132
5133    pub fn internal_values_copy(&self) -> Tensor {
5134        self.f_internal_values_copy().unwrap()
5135    }
5136
5137    pub fn internal_values_copy_out(&self, out: &Tensor) -> Tensor {
5138        self.f_internal_values_copy_out(out).unwrap()
5139    }
5140
5141    pub fn internal_version(&self) -> i64 {
5142        self.f_internal_version().unwrap()
5143    }
5144
5145    pub fn internal_weight_int4pack_mm(
5146        &self,
5147        mat2: &Tensor,
5148        qgroupsize: i64,
5149        qscaleandzeros: &Tensor,
5150    ) -> Tensor {
5151        self.f_internal_weight_int4pack_mm(mat2, qgroupsize, qscaleandzeros).unwrap()
5152    }
5153
5154    pub fn internal_weight_int8pack_mm(&self, mat2: &Tensor, scales: &Tensor) -> Tensor {
5155        self.f_internal_weight_int8pack_mm(mat2, scales).unwrap()
5156    }
5157
5158    pub fn internal_weight_norm(v: &Tensor, g: &Tensor, dim: i64) -> Tensor {
5159        Tensor::f_internal_weight_norm(v, g, dim).unwrap()
5160    }
5161
5162    pub fn internal_weight_norm_differentiable_backward(
5163        grad_w: &Tensor,
5164        saved_v: &Tensor,
5165        saved_g: &Tensor,
5166        saved_norms: &Tensor,
5167        dim: i64,
5168    ) -> (Tensor, Tensor) {
5169        Tensor::f_internal_weight_norm_differentiable_backward(
5170            grad_w,
5171            saved_v,
5172            saved_g,
5173            saved_norms,
5174            dim,
5175        )
5176        .unwrap()
5177    }
5178
5179    pub fn internal_weight_norm_interface(v: &Tensor, g: &Tensor, dim: i64) -> (Tensor, Tensor) {
5180        Tensor::f_internal_weight_norm_interface(v, g, dim).unwrap()
5181    }
5182
5183    pub fn internal_weight_norm_interface_backward(
5184        grad_w: &Tensor,
5185        saved_v: &Tensor,
5186        saved_g: &Tensor,
5187        saved_norms: &Tensor,
5188        dim: i64,
5189    ) -> (Tensor, Tensor) {
5190        Tensor::f_internal_weight_norm_interface_backward(
5191            grad_w,
5192            saved_v,
5193            saved_g,
5194            saved_norms,
5195            dim,
5196        )
5197        .unwrap()
5198    }
5199
5200    pub fn internal_weight_norm_interface_backward_out(
5201        out0: &Tensor,
5202        out1: &Tensor,
5203        grad_w: &Tensor,
5204        saved_v: &Tensor,
5205        saved_g: &Tensor,
5206        saved_norms: &Tensor,
5207        dim: i64,
5208    ) -> (Tensor, Tensor) {
5209        Tensor::f_internal_weight_norm_interface_backward_out(
5210            out0,
5211            out1,
5212            grad_w,
5213            saved_v,
5214            saved_g,
5215            saved_norms,
5216            dim,
5217        )
5218        .unwrap()
5219    }
5220
5221    pub fn internal_weight_norm_interface_out(
5222        out0: &Tensor,
5223        out1: &Tensor,
5224        v: &Tensor,
5225        g: &Tensor,
5226        dim: i64,
5227    ) -> (Tensor, Tensor) {
5228        Tensor::f_internal_weight_norm_interface_out(out0, out1, v, g, dim).unwrap()
5229    }
5230
5231    pub fn internal_wrapped_linear_prepack(
5232        weight: &Tensor,
5233        weight_scale: &Tensor,
5234        weight_zero_point: &Tensor,
5235        bias: &Tensor,
5236    ) -> Tensor {
5237        Tensor::f_internal_wrapped_linear_prepack(weight, weight_scale, weight_zero_point, bias)
5238            .unwrap()
5239    }
5240
5241    pub fn internal_wrapped_quantized_linear_prepacked(
5242        &self,
5243        input_scale: &Tensor,
5244        input_zero_point: &Tensor,
5245        packed_weight: &Tensor,
5246        output_scale: &Tensor,
5247        output_zero_point: &Tensor,
5248        out_channel: i64,
5249    ) -> Tensor {
5250        self.f_internal_wrapped_quantized_linear_prepacked(
5251            input_scale,
5252            input_zero_point,
5253            packed_weight,
5254            output_scale,
5255            output_zero_point,
5256            out_channel,
5257        )
5258        .unwrap()
5259    }
5260
5261    pub fn abs(&self) -> Tensor {
5262        self.f_abs().unwrap()
5263    }
5264
5265    pub fn abs_(&mut self) -> Tensor {
5266        self.f_abs_().unwrap()
5267    }
5268
5269    pub fn abs_out(&self, out: &Tensor) -> Tensor {
5270        self.f_abs_out(out).unwrap()
5271    }
5272
5273    pub fn absolute(&self) -> Tensor {
5274        self.f_absolute().unwrap()
5275    }
5276
5277    pub fn absolute_(&mut self) -> Tensor {
5278        self.f_absolute_().unwrap()
5279    }
5280
5281    pub fn absolute_out(&self, out: &Tensor) -> Tensor {
5282        self.f_absolute_out(out).unwrap()
5283    }
5284
5285    pub fn acos(&self) -> Tensor {
5286        self.f_acos().unwrap()
5287    }
5288
5289    pub fn acos_(&mut self) -> Tensor {
5290        self.f_acos_().unwrap()
5291    }
5292
5293    pub fn acos_out(&self, out: &Tensor) -> Tensor {
5294        self.f_acos_out(out).unwrap()
5295    }
5296
5297    pub fn acosh(&self) -> Tensor {
5298        self.f_acosh().unwrap()
5299    }
5300
5301    pub fn acosh_(&mut self) -> Tensor {
5302        self.f_acosh_().unwrap()
5303    }
5304
5305    pub fn acosh_out(&self, out: &Tensor) -> Tensor {
5306        self.f_acosh_out(out).unwrap()
5307    }
5308
5309    pub fn adaptive_avg_pool1d(&self, output_size: impl IntList) -> Tensor {
5310        self.f_adaptive_avg_pool1d(output_size).unwrap()
5311    }
5312
5313    pub fn adaptive_avg_pool2d(&self, output_size: impl IntList) -> Tensor {
5314        self.f_adaptive_avg_pool2d(output_size).unwrap()
5315    }
5316
5317    pub fn adaptive_avg_pool2d_out(&self, out: &Tensor, output_size: impl IntList) -> Tensor {
5318        self.f_adaptive_avg_pool2d_out(out, output_size).unwrap()
5319    }
5320
5321    pub fn adaptive_avg_pool3d(&self, output_size: impl IntList) -> Tensor {
5322        self.f_adaptive_avg_pool3d(output_size).unwrap()
5323    }
5324
5325    pub fn adaptive_avg_pool3d_backward(
5326        &self,
5327        grad_input: &Tensor,
5328        grad_output: &Tensor,
5329    ) -> Tensor {
5330        self.f_adaptive_avg_pool3d_backward(grad_input, grad_output).unwrap()
5331    }
5332
5333    pub fn adaptive_avg_pool3d_out(&self, out: &Tensor, output_size: impl IntList) -> Tensor {
5334        self.f_adaptive_avg_pool3d_out(out, output_size).unwrap()
5335    }
5336
5337    pub fn adaptive_max_pool1d(&self, output_size: impl IntList) -> (Tensor, Tensor) {
5338        self.f_adaptive_max_pool1d(output_size).unwrap()
5339    }
5340
5341    pub fn adaptive_max_pool2d(&self, output_size: impl IntList) -> (Tensor, Tensor) {
5342        self.f_adaptive_max_pool2d(output_size).unwrap()
5343    }
5344
5345    pub fn adaptive_max_pool2d_backward(&self, grad_output: &Tensor, indices: &Tensor) -> Tensor {
5346        self.f_adaptive_max_pool2d_backward(grad_output, indices).unwrap()
5347    }
5348
5349    pub fn adaptive_max_pool2d_backward_grad_input(
5350        &self,
5351        grad_input: &Tensor,
5352        grad_output: &Tensor,
5353        indices: &Tensor,
5354    ) -> Tensor {
5355        self.f_adaptive_max_pool2d_backward_grad_input(grad_input, grad_output, indices).unwrap()
5356    }
5357
5358    pub fn adaptive_max_pool2d_out(
5359        &self,
5360        out: &Tensor,
5361        indices: &Tensor,
5362        output_size: impl IntList,
5363    ) -> (Tensor, Tensor) {
5364        self.f_adaptive_max_pool2d_out(out, indices, output_size).unwrap()
5365    }
5366
5367    pub fn adaptive_max_pool3d(&self, output_size: impl IntList) -> (Tensor, Tensor) {
5368        self.f_adaptive_max_pool3d(output_size).unwrap()
5369    }
5370
5371    pub fn adaptive_max_pool3d_backward(&self, grad_output: &Tensor, indices: &Tensor) -> Tensor {
5372        self.f_adaptive_max_pool3d_backward(grad_output, indices).unwrap()
5373    }
5374
5375    pub fn adaptive_max_pool3d_backward_grad_input(
5376        &self,
5377        grad_input: &Tensor,
5378        grad_output: &Tensor,
5379        indices: &Tensor,
5380    ) -> Tensor {
5381        self.f_adaptive_max_pool3d_backward_grad_input(grad_input, grad_output, indices).unwrap()
5382    }
5383
5384    pub fn adaptive_max_pool3d_out(
5385        &self,
5386        out: &Tensor,
5387        indices: &Tensor,
5388        output_size: impl IntList,
5389    ) -> (Tensor, Tensor) {
5390        self.f_adaptive_max_pool3d_out(out, indices, output_size).unwrap()
5391    }
5392
5393    pub fn g_add(&self, other: &Tensor) -> Tensor {
5394        self.f_add(other).unwrap()
5395    }
5396
5397    pub fn g_add_(&mut self, other: &Tensor) -> Tensor {
5398        self.f_add_(other).unwrap()
5399    }
5400
5401    pub fn add_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
5402        self.f_add_out(out, other).unwrap()
5403    }
5404
5405    pub fn g_add_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
5406        self.f_add_scalar(other).unwrap()
5407    }
5408
5409    pub fn g_add_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
5410        self.f_add_scalar_(other).unwrap()
5411    }
5412
5413    pub fn add_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
5414        self.f_add_scalar_out(out, other).unwrap()
5415    }
5416
5417    pub fn addbmm(&self, batch1: &Tensor, batch2: &Tensor) -> Tensor {
5418        self.f_addbmm(batch1, batch2).unwrap()
5419    }
5420
5421    pub fn addbmm_(&mut self, batch1: &Tensor, batch2: &Tensor) -> Tensor {
5422        self.f_addbmm_(batch1, batch2).unwrap()
5423    }
5424
5425    pub fn addbmm_out(&self, out: &Tensor, batch1: &Tensor, batch2: &Tensor) -> Tensor {
5426        self.f_addbmm_out(out, batch1, batch2).unwrap()
5427    }
5428
5429    pub fn addcdiv(&self, tensor1: &Tensor, tensor2: &Tensor) -> Tensor {
5430        self.f_addcdiv(tensor1, tensor2).unwrap()
5431    }
5432
5433    pub fn addcdiv_(&mut self, tensor1: &Tensor, tensor2: &Tensor) -> Tensor {
5434        self.f_addcdiv_(tensor1, tensor2).unwrap()
5435    }
5436
5437    pub fn addcdiv_out(&self, out: &Tensor, tensor1: &Tensor, tensor2: &Tensor) -> Tensor {
5438        self.f_addcdiv_out(out, tensor1, tensor2).unwrap()
5439    }
5440
5441    pub fn addcmul(&self, tensor1: &Tensor, tensor2: &Tensor) -> Tensor {
5442        self.f_addcmul(tensor1, tensor2).unwrap()
5443    }
5444
5445    pub fn addcmul_(&mut self, tensor1: &Tensor, tensor2: &Tensor) -> Tensor {
5446        self.f_addcmul_(tensor1, tensor2).unwrap()
5447    }
5448
5449    pub fn addcmul_out(&self, out: &Tensor, tensor1: &Tensor, tensor2: &Tensor) -> Tensor {
5450        self.f_addcmul_out(out, tensor1, tensor2).unwrap()
5451    }
5452
5453    pub fn addmm(&self, mat1: &Tensor, mat2: &Tensor) -> Tensor {
5454        self.f_addmm(mat1, mat2).unwrap()
5455    }
5456
5457    pub fn addmm_(&mut self, mat1: &Tensor, mat2: &Tensor) -> Tensor {
5458        self.f_addmm_(mat1, mat2).unwrap()
5459    }
5460
5461    pub fn addmm_out(&self, out: &Tensor, mat1: &Tensor, mat2: &Tensor) -> Tensor {
5462        self.f_addmm_out(out, mat1, mat2).unwrap()
5463    }
5464
5465    pub fn addmv(&self, mat: &Tensor, vec: &Tensor) -> Tensor {
5466        self.f_addmv(mat, vec).unwrap()
5467    }
5468
5469    pub fn addmv_(&mut self, mat: &Tensor, vec: &Tensor) -> Tensor {
5470        self.f_addmv_(mat, vec).unwrap()
5471    }
5472
5473    pub fn addmv_out(&self, out: &Tensor, mat: &Tensor, vec: &Tensor) -> Tensor {
5474        self.f_addmv_out(out, mat, vec).unwrap()
5475    }
5476
5477    pub fn addr(&self, vec1: &Tensor, vec2: &Tensor) -> Tensor {
5478        self.f_addr(vec1, vec2).unwrap()
5479    }
5480
5481    pub fn addr_(&mut self, vec1: &Tensor, vec2: &Tensor) -> Tensor {
5482        self.f_addr_(vec1, vec2).unwrap()
5483    }
5484
5485    pub fn addr_out(&self, out: &Tensor, vec1: &Tensor, vec2: &Tensor) -> Tensor {
5486        self.f_addr_out(out, vec1, vec2).unwrap()
5487    }
5488
5489    pub fn adjoint(&self) -> Tensor {
5490        self.f_adjoint().unwrap()
5491    }
5492
5493    pub fn affine_grid_generator(
5494        theta: &Tensor,
5495        size: impl IntList,
5496        align_corners: bool,
5497    ) -> Tensor {
5498        Tensor::f_affine_grid_generator(theta, size, align_corners).unwrap()
5499    }
5500
5501    pub fn affine_grid_generator_backward(
5502        grad: &Tensor,
5503        size: impl IntList,
5504        align_corners: bool,
5505    ) -> Tensor {
5506        Tensor::f_affine_grid_generator_backward(grad, size, align_corners).unwrap()
5507    }
5508
5509    pub fn affine_grid_generator_out(
5510        out: &Tensor,
5511        theta: &Tensor,
5512        size: impl IntList,
5513        align_corners: bool,
5514    ) -> Tensor {
5515        Tensor::f_affine_grid_generator_out(out, theta, size, align_corners).unwrap()
5516    }
5517
5518    pub fn alias(&self) -> Tensor {
5519        self.f_alias().unwrap()
5520    }
5521
5522    pub fn alias_copy(&self) -> Tensor {
5523        self.f_alias_copy().unwrap()
5524    }
5525
5526    pub fn alias_copy_out(&self, out: &Tensor) -> Tensor {
5527        self.f_alias_copy_out(out).unwrap()
5528    }
5529
5530    pub fn align_as(&self, other: &Tensor) -> Tensor {
5531        self.f_align_as(other).unwrap()
5532    }
5533
5534    pub fn align_tensors<T: Borrow<Tensor>>(tensors: &[T]) -> Vec<Tensor> {
5535        Tensor::f_align_tensors(tensors).unwrap()
5536    }
5537
5538    pub fn all(&self) -> Tensor {
5539        self.f_all().unwrap()
5540    }
5541
5542    pub fn all_all_out(&self, out: &Tensor) -> Tensor {
5543        self.f_all_all_out(out).unwrap()
5544    }
5545
5546    pub fn all_dim(&self, dim: i64, keepdim: bool) -> Tensor {
5547        self.f_all_dim(dim, keepdim).unwrap()
5548    }
5549
5550    pub fn all_dims(&self, dim: impl IntListOption, keepdim: bool) -> Tensor {
5551        self.f_all_dims(dim, keepdim).unwrap()
5552    }
5553
5554    pub fn all_dims_out(&self, out: &Tensor, dim: impl IntListOption, keepdim: bool) -> Tensor {
5555        self.f_all_dims_out(out, dim, keepdim).unwrap()
5556    }
5557
5558    pub fn all_out(&self, out: &Tensor, dim: i64, keepdim: bool) -> Tensor {
5559        self.f_all_out(out, dim, keepdim).unwrap()
5560    }
5561
5562    pub fn allclose(&self, other: &Tensor, rtol: f64, atol: f64, equal_nan: bool) -> bool {
5563        self.f_allclose(other, rtol, atol, equal_nan).unwrap()
5564    }
5565
5566    pub fn alpha_dropout(&self, p: f64, train: bool) -> Tensor {
5567        self.f_alpha_dropout(p, train).unwrap()
5568    }
5569
5570    pub fn alpha_dropout_(&mut self, p: f64, train: bool) -> Tensor {
5571        self.f_alpha_dropout_(p, train).unwrap()
5572    }
5573
5574    pub fn amax(&self, dim: impl IntList, keepdim: bool) -> Tensor {
5575        self.f_amax(dim, keepdim).unwrap()
5576    }
5577
5578    pub fn amax_out(&self, out: &Tensor, dim: impl IntList, keepdim: bool) -> Tensor {
5579        self.f_amax_out(out, dim, keepdim).unwrap()
5580    }
5581
5582    pub fn amin(&self, dim: impl IntList, keepdim: bool) -> Tensor {
5583        self.f_amin(dim, keepdim).unwrap()
5584    }
5585
5586    pub fn amin_out(&self, out: &Tensor, dim: impl IntList, keepdim: bool) -> Tensor {
5587        self.f_amin_out(out, dim, keepdim).unwrap()
5588    }
5589
5590    pub fn aminmax(&self, dim: impl Into<Option<i64>>, keepdim: bool) -> (Tensor, Tensor) {
5591        self.f_aminmax(dim, keepdim).unwrap()
5592    }
5593
5594    pub fn aminmax_out(
5595        &self,
5596        min: &Tensor,
5597        max: &Tensor,
5598        dim: impl Into<Option<i64>>,
5599        keepdim: bool,
5600    ) -> (Tensor, Tensor) {
5601        self.f_aminmax_out(min, max, dim, keepdim).unwrap()
5602    }
5603
5604    pub fn angle(&self) -> Tensor {
5605        self.f_angle().unwrap()
5606    }
5607
5608    pub fn angle_out(&self, out: &Tensor) -> Tensor {
5609        self.f_angle_out(out).unwrap()
5610    }
5611
5612    pub fn any(&self) -> Tensor {
5613        self.f_any().unwrap()
5614    }
5615
5616    pub fn any_all_out(&self, out: &Tensor) -> Tensor {
5617        self.f_any_all_out(out).unwrap()
5618    }
5619
5620    pub fn any_dim(&self, dim: i64, keepdim: bool) -> Tensor {
5621        self.f_any_dim(dim, keepdim).unwrap()
5622    }
5623
5624    pub fn any_dims(&self, dim: impl IntListOption, keepdim: bool) -> Tensor {
5625        self.f_any_dims(dim, keepdim).unwrap()
5626    }
5627
5628    pub fn any_dims_out(&self, out: &Tensor, dim: impl IntListOption, keepdim: bool) -> Tensor {
5629        self.f_any_dims_out(out, dim, keepdim).unwrap()
5630    }
5631
5632    pub fn any_out(&self, out: &Tensor, dim: i64, keepdim: bool) -> Tensor {
5633        self.f_any_out(out, dim, keepdim).unwrap()
5634    }
5635
5636    pub fn arange<S: Into<Scalar>>(end: S, options: (Kind, Device)) -> Tensor {
5637        Tensor::f_arange(end, options).unwrap()
5638    }
5639
5640    pub fn arange_start<S: Into<Scalar>>(start: S, end: S, options: (Kind, Device)) -> Tensor {
5641        Tensor::f_arange_start(start, end, options).unwrap()
5642    }
5643
5644    pub fn arange_start_step<S: Into<Scalar>>(
5645        start: S,
5646        end: S,
5647        step: S,
5648        options: (Kind, Device),
5649    ) -> Tensor {
5650        Tensor::f_arange_start_step(start, end, step, options).unwrap()
5651    }
5652
5653    pub fn arccos(&self) -> Tensor {
5654        self.f_arccos().unwrap()
5655    }
5656
5657    pub fn arccos_(&mut self) -> Tensor {
5658        self.f_arccos_().unwrap()
5659    }
5660
5661    pub fn arccos_out(&self, out: &Tensor) -> Tensor {
5662        self.f_arccos_out(out).unwrap()
5663    }
5664
5665    pub fn arccosh(&self) -> Tensor {
5666        self.f_arccosh().unwrap()
5667    }
5668
5669    pub fn arccosh_(&mut self) -> Tensor {
5670        self.f_arccosh_().unwrap()
5671    }
5672
5673    pub fn arccosh_out(&self, out: &Tensor) -> Tensor {
5674        self.f_arccosh_out(out).unwrap()
5675    }
5676
5677    pub fn arcsin(&self) -> Tensor {
5678        self.f_arcsin().unwrap()
5679    }
5680
5681    pub fn arcsin_(&mut self) -> Tensor {
5682        self.f_arcsin_().unwrap()
5683    }
5684
5685    pub fn arcsin_out(&self, out: &Tensor) -> Tensor {
5686        self.f_arcsin_out(out).unwrap()
5687    }
5688
5689    pub fn arcsinh(&self) -> Tensor {
5690        self.f_arcsinh().unwrap()
5691    }
5692
5693    pub fn arcsinh_(&mut self) -> Tensor {
5694        self.f_arcsinh_().unwrap()
5695    }
5696
5697    pub fn arcsinh_out(&self, out: &Tensor) -> Tensor {
5698        self.f_arcsinh_out(out).unwrap()
5699    }
5700
5701    pub fn arctan(&self) -> Tensor {
5702        self.f_arctan().unwrap()
5703    }
5704
5705    pub fn arctan2(&self, other: &Tensor) -> Tensor {
5706        self.f_arctan2(other).unwrap()
5707    }
5708
5709    pub fn arctan2_(&mut self, other: &Tensor) -> Tensor {
5710        self.f_arctan2_(other).unwrap()
5711    }
5712
5713    pub fn arctan2_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
5714        self.f_arctan2_out(out, other).unwrap()
5715    }
5716
5717    pub fn arctan_(&mut self) -> Tensor {
5718        self.f_arctan_().unwrap()
5719    }
5720
5721    pub fn arctan_out(&self, out: &Tensor) -> Tensor {
5722        self.f_arctan_out(out).unwrap()
5723    }
5724
5725    pub fn arctanh(&self) -> Tensor {
5726        self.f_arctanh().unwrap()
5727    }
5728
5729    pub fn arctanh_(&mut self) -> Tensor {
5730        self.f_arctanh_().unwrap()
5731    }
5732
5733    pub fn arctanh_out(&self, out: &Tensor) -> Tensor {
5734        self.f_arctanh_out(out).unwrap()
5735    }
5736
5737    pub fn argmax(&self, dim: impl Into<Option<i64>>, keepdim: bool) -> Tensor {
5738        self.f_argmax(dim, keepdim).unwrap()
5739    }
5740
5741    pub fn argmax_out(&self, out: &Tensor, dim: impl Into<Option<i64>>, keepdim: bool) -> Tensor {
5742        self.f_argmax_out(out, dim, keepdim).unwrap()
5743    }
5744
5745    pub fn argmin(&self, dim: impl Into<Option<i64>>, keepdim: bool) -> Tensor {
5746        self.f_argmin(dim, keepdim).unwrap()
5747    }
5748
5749    pub fn argmin_out(&self, out: &Tensor, dim: impl Into<Option<i64>>, keepdim: bool) -> Tensor {
5750        self.f_argmin_out(out, dim, keepdim).unwrap()
5751    }
5752
5753    pub fn argsort(&self, dim: i64, descending: bool) -> Tensor {
5754        self.f_argsort(dim, descending).unwrap()
5755    }
5756
5757    pub fn argsort_stable(&self, stable: bool, dim: i64, descending: bool) -> Tensor {
5758        self.f_argsort_stable(stable, dim, descending).unwrap()
5759    }
5760
5761    pub fn argsort_stable_out(
5762        &self,
5763        out: &Tensor,
5764        stable: bool,
5765        dim: i64,
5766        descending: bool,
5767    ) -> Tensor {
5768        self.f_argsort_stable_out(out, stable, dim, descending).unwrap()
5769    }
5770
5771    pub fn argwhere(&self) -> Tensor {
5772        self.f_argwhere().unwrap()
5773    }
5774
5775    pub fn as_strided(
5776        &self,
5777        size: impl IntList,
5778        stride: impl IntList,
5779        storage_offset: impl Into<Option<i64>>,
5780    ) -> Tensor {
5781        self.f_as_strided(size, stride, storage_offset).unwrap()
5782    }
5783
5784    pub fn as_strided_(
5785        &mut self,
5786        size: impl IntList,
5787        stride: impl IntList,
5788        storage_offset: impl Into<Option<i64>>,
5789    ) -> Tensor {
5790        self.f_as_strided_(size, stride, storage_offset).unwrap()
5791    }
5792
5793    pub fn as_strided_copy(
5794        &self,
5795        size: impl IntList,
5796        stride: impl IntList,
5797        storage_offset: impl Into<Option<i64>>,
5798    ) -> Tensor {
5799        self.f_as_strided_copy(size, stride, storage_offset).unwrap()
5800    }
5801
5802    pub fn as_strided_copy_out(
5803        &self,
5804        out: &Tensor,
5805        size: impl IntList,
5806        stride: impl IntList,
5807        storage_offset: impl Into<Option<i64>>,
5808    ) -> Tensor {
5809        self.f_as_strided_copy_out(out, size, stride, storage_offset).unwrap()
5810    }
5811
5812    pub fn as_strided_scatter(
5813        &self,
5814        src: &Tensor,
5815        size: impl IntList,
5816        stride: impl IntList,
5817        storage_offset: impl Into<Option<i64>>,
5818    ) -> Tensor {
5819        self.f_as_strided_scatter(src, size, stride, storage_offset).unwrap()
5820    }
5821
5822    pub fn as_strided_scatter_out(
5823        &self,
5824        out: &Tensor,
5825        src: &Tensor,
5826        size: impl IntList,
5827        stride: impl IntList,
5828        storage_offset: impl Into<Option<i64>>,
5829    ) -> Tensor {
5830        self.f_as_strided_scatter_out(out, src, size, stride, storage_offset).unwrap()
5831    }
5832
5833    pub fn asin(&self) -> Tensor {
5834        self.f_asin().unwrap()
5835    }
5836
5837    pub fn asin_(&mut self) -> Tensor {
5838        self.f_asin_().unwrap()
5839    }
5840
5841    pub fn asin_out(&self, out: &Tensor) -> Tensor {
5842        self.f_asin_out(out).unwrap()
5843    }
5844
5845    pub fn asinh(&self) -> Tensor {
5846        self.f_asinh().unwrap()
5847    }
5848
5849    pub fn asinh_(&mut self) -> Tensor {
5850        self.f_asinh_().unwrap()
5851    }
5852
5853    pub fn asinh_out(&self, out: &Tensor) -> Tensor {
5854        self.f_asinh_out(out).unwrap()
5855    }
5856
5857    pub fn atan(&self) -> Tensor {
5858        self.f_atan().unwrap()
5859    }
5860
5861    pub fn atan2(&self, other: &Tensor) -> Tensor {
5862        self.f_atan2(other).unwrap()
5863    }
5864
5865    pub fn atan2_(&mut self, other: &Tensor) -> Tensor {
5866        self.f_atan2_(other).unwrap()
5867    }
5868
5869    pub fn atan2_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
5870        self.f_atan2_out(out, other).unwrap()
5871    }
5872
5873    pub fn atan_(&mut self) -> Tensor {
5874        self.f_atan_().unwrap()
5875    }
5876
5877    pub fn atan_out(&self, out: &Tensor) -> Tensor {
5878        self.f_atan_out(out).unwrap()
5879    }
5880
5881    pub fn atanh(&self) -> Tensor {
5882        self.f_atanh().unwrap()
5883    }
5884
5885    pub fn atanh_(&mut self) -> Tensor {
5886        self.f_atanh_().unwrap()
5887    }
5888
5889    pub fn atanh_out(&self, out: &Tensor) -> Tensor {
5890        self.f_atanh_out(out).unwrap()
5891    }
5892
5893    pub fn atleast_1d(&self) -> Tensor {
5894        self.f_atleast_1d().unwrap()
5895    }
5896
5897    pub fn atleast_1d_sequence<T: Borrow<Tensor>>(tensors: &[T]) -> Vec<Tensor> {
5898        Tensor::f_atleast_1d_sequence(tensors).unwrap()
5899    }
5900
5901    pub fn atleast_2d(&self) -> Tensor {
5902        self.f_atleast_2d().unwrap()
5903    }
5904
5905    pub fn atleast_2d_sequence<T: Borrow<Tensor>>(tensors: &[T]) -> Vec<Tensor> {
5906        Tensor::f_atleast_2d_sequence(tensors).unwrap()
5907    }
5908
5909    pub fn atleast_3d(&self) -> Tensor {
5910        self.f_atleast_3d().unwrap()
5911    }
5912
5913    pub fn atleast_3d_sequence<T: Borrow<Tensor>>(tensors: &[T]) -> Vec<Tensor> {
5914        Tensor::f_atleast_3d_sequence(tensors).unwrap()
5915    }
5916
5917    pub fn avg_pool1d(
5918        &self,
5919        kernel_size: impl IntList,
5920        stride: impl IntList,
5921        padding: impl IntList,
5922        ceil_mode: bool,
5923        count_include_pad: bool,
5924    ) -> Tensor {
5925        self.f_avg_pool1d(kernel_size, stride, padding, ceil_mode, count_include_pad).unwrap()
5926    }
5927
5928    pub fn avg_pool2d(
5929        &self,
5930        kernel_size: impl IntList,
5931        stride: impl IntList,
5932        padding: impl IntList,
5933        ceil_mode: bool,
5934        count_include_pad: bool,
5935        divisor_override: impl Into<Option<i64>>,
5936    ) -> Tensor {
5937        self.f_avg_pool2d(
5938            kernel_size,
5939            stride,
5940            padding,
5941            ceil_mode,
5942            count_include_pad,
5943            divisor_override,
5944        )
5945        .unwrap()
5946    }
5947
5948    pub fn avg_pool2d_backward(
5949        &self,
5950        grad_output: &Tensor,
5951        kernel_size: impl IntList,
5952        stride: impl IntList,
5953        padding: impl IntList,
5954        ceil_mode: bool,
5955        count_include_pad: bool,
5956        divisor_override: impl Into<Option<i64>>,
5957    ) -> Tensor {
5958        self.f_avg_pool2d_backward(
5959            grad_output,
5960            kernel_size,
5961            stride,
5962            padding,
5963            ceil_mode,
5964            count_include_pad,
5965            divisor_override,
5966        )
5967        .unwrap()
5968    }
5969
5970    pub fn avg_pool2d_backward_grad_input(
5971        &self,
5972        grad_input: &Tensor,
5973        grad_output: &Tensor,
5974        kernel_size: impl IntList,
5975        stride: impl IntList,
5976        padding: impl IntList,
5977        ceil_mode: bool,
5978        count_include_pad: bool,
5979        divisor_override: impl Into<Option<i64>>,
5980    ) -> Tensor {
5981        self.f_avg_pool2d_backward_grad_input(
5982            grad_input,
5983            grad_output,
5984            kernel_size,
5985            stride,
5986            padding,
5987            ceil_mode,
5988            count_include_pad,
5989            divisor_override,
5990        )
5991        .unwrap()
5992    }
5993
5994    pub fn avg_pool2d_out(
5995        &self,
5996        out: &Tensor,
5997        kernel_size: impl IntList,
5998        stride: impl IntList,
5999        padding: impl IntList,
6000        ceil_mode: bool,
6001        count_include_pad: bool,
6002        divisor_override: impl Into<Option<i64>>,
6003    ) -> Tensor {
6004        self.f_avg_pool2d_out(
6005            out,
6006            kernel_size,
6007            stride,
6008            padding,
6009            ceil_mode,
6010            count_include_pad,
6011            divisor_override,
6012        )
6013        .unwrap()
6014    }
6015
6016    pub fn avg_pool3d(
6017        &self,
6018        kernel_size: impl IntList,
6019        stride: impl IntList,
6020        padding: impl IntList,
6021        ceil_mode: bool,
6022        count_include_pad: bool,
6023        divisor_override: impl Into<Option<i64>>,
6024    ) -> Tensor {
6025        self.f_avg_pool3d(
6026            kernel_size,
6027            stride,
6028            padding,
6029            ceil_mode,
6030            count_include_pad,
6031            divisor_override,
6032        )
6033        .unwrap()
6034    }
6035
6036    pub fn avg_pool3d_backward(
6037        &self,
6038        grad_output: &Tensor,
6039        kernel_size: impl IntList,
6040        stride: impl IntList,
6041        padding: impl IntList,
6042        ceil_mode: bool,
6043        count_include_pad: bool,
6044        divisor_override: impl Into<Option<i64>>,
6045    ) -> Tensor {
6046        self.f_avg_pool3d_backward(
6047            grad_output,
6048            kernel_size,
6049            stride,
6050            padding,
6051            ceil_mode,
6052            count_include_pad,
6053            divisor_override,
6054        )
6055        .unwrap()
6056    }
6057
6058    pub fn avg_pool3d_backward_grad_input(
6059        &self,
6060        grad_input: &Tensor,
6061        grad_output: &Tensor,
6062        kernel_size: impl IntList,
6063        stride: impl IntList,
6064        padding: impl IntList,
6065        ceil_mode: bool,
6066        count_include_pad: bool,
6067        divisor_override: impl Into<Option<i64>>,
6068    ) -> Tensor {
6069        self.f_avg_pool3d_backward_grad_input(
6070            grad_input,
6071            grad_output,
6072            kernel_size,
6073            stride,
6074            padding,
6075            ceil_mode,
6076            count_include_pad,
6077            divisor_override,
6078        )
6079        .unwrap()
6080    }
6081
6082    pub fn avg_pool3d_out(
6083        &self,
6084        out: &Tensor,
6085        kernel_size: impl IntList,
6086        stride: impl IntList,
6087        padding: impl IntList,
6088        ceil_mode: bool,
6089        count_include_pad: bool,
6090        divisor_override: impl Into<Option<i64>>,
6091    ) -> Tensor {
6092        self.f_avg_pool3d_out(
6093            out,
6094            kernel_size,
6095            stride,
6096            padding,
6097            ceil_mode,
6098            count_include_pad,
6099            divisor_override,
6100        )
6101        .unwrap()
6102    }
6103
6104    pub fn baddbmm<S: Into<Scalar>>(
6105        &self,
6106        batch1: &Tensor,
6107        batch2: &Tensor,
6108        beta: S,
6109        alpha: S,
6110    ) -> Tensor {
6111        self.f_baddbmm(batch1, batch2, beta, alpha).unwrap()
6112    }
6113
6114    pub fn baddbmm_(&mut self, batch1: &Tensor, batch2: &Tensor) -> Tensor {
6115        self.f_baddbmm_(batch1, batch2).unwrap()
6116    }
6117
6118    pub fn baddbmm_out(&self, out: &Tensor, batch1: &Tensor, batch2: &Tensor) -> Tensor {
6119        self.f_baddbmm_out(out, batch1, batch2).unwrap()
6120    }
6121
6122    pub fn bartlett_window(window_length: i64, options: (Kind, Device)) -> Tensor {
6123        Tensor::f_bartlett_window(window_length, options).unwrap()
6124    }
6125
6126    pub fn bartlett_window_out(out: &Tensor, window_length: i64) -> Tensor {
6127        Tensor::f_bartlett_window_out(out, window_length).unwrap()
6128    }
6129
6130    pub fn bartlett_window_periodic(
6131        window_length: i64,
6132        periodic: bool,
6133        options: (Kind, Device),
6134    ) -> Tensor {
6135        Tensor::f_bartlett_window_periodic(window_length, periodic, options).unwrap()
6136    }
6137
6138    pub fn bartlett_window_periodic_out(
6139        out: &Tensor,
6140        window_length: i64,
6141        periodic: bool,
6142    ) -> Tensor {
6143        Tensor::f_bartlett_window_periodic_out(out, window_length, periodic).unwrap()
6144    }
6145
6146    pub fn batch_norm<T: Borrow<Tensor>>(
6147        &self,
6148        weight: Option<T>,
6149        bias: Option<T>,
6150        running_mean: Option<T>,
6151        running_var: Option<T>,
6152        training: bool,
6153        momentum: f64,
6154        eps: f64,
6155        cudnn_enabled: bool,
6156    ) -> Tensor {
6157        self.f_batch_norm(
6158            weight,
6159            bias,
6160            running_mean,
6161            running_var,
6162            training,
6163            momentum,
6164            eps,
6165            cudnn_enabled,
6166        )
6167        .unwrap()
6168    }
6169
6170    pub fn batch_norm_backward_elemt<T: Borrow<Tensor>>(
6171        &self,
6172        grad_out: &Tensor,
6173        mean: &Tensor,
6174        invstd: &Tensor,
6175        weight: Option<T>,
6176        sum_dy: &Tensor,
6177        sum_dy_xmu: &Tensor,
6178        count: &Tensor,
6179    ) -> Tensor {
6180        self.f_batch_norm_backward_elemt(grad_out, mean, invstd, weight, sum_dy, sum_dy_xmu, count)
6181            .unwrap()
6182    }
6183
6184    pub fn batch_norm_backward_elemt_out<T: Borrow<Tensor>>(
6185        &self,
6186        out: &Tensor,
6187        grad_out: &Tensor,
6188        mean: &Tensor,
6189        invstd: &Tensor,
6190        weight: Option<T>,
6191        sum_dy: &Tensor,
6192        sum_dy_xmu: &Tensor,
6193        count: &Tensor,
6194    ) -> Tensor {
6195        self.f_batch_norm_backward_elemt_out(
6196            out, grad_out, mean, invstd, weight, sum_dy, sum_dy_xmu, count,
6197        )
6198        .unwrap()
6199    }
6200
6201    pub fn batch_norm_backward_reduce<T: Borrow<Tensor>>(
6202        &self,
6203        grad_out: &Tensor,
6204        mean: &Tensor,
6205        invstd: &Tensor,
6206        weight: Option<T>,
6207        input_g: bool,
6208        weight_g: bool,
6209        bias_g: bool,
6210    ) -> (Tensor, Tensor, Tensor, Tensor) {
6211        self.f_batch_norm_backward_reduce(grad_out, mean, invstd, weight, input_g, weight_g, bias_g)
6212            .unwrap()
6213    }
6214
6215    pub fn batch_norm_backward_reduce_out<T: Borrow<Tensor>>(
6216        &self,
6217        out0: &Tensor,
6218        out1: &Tensor,
6219        out2: &Tensor,
6220        out3: &Tensor,
6221        grad_out: &Tensor,
6222        mean: &Tensor,
6223        invstd: &Tensor,
6224        weight: Option<T>,
6225        input_g: bool,
6226        weight_g: bool,
6227        bias_g: bool,
6228    ) -> (Tensor, Tensor, Tensor, Tensor) {
6229        self.f_batch_norm_backward_reduce_out(
6230            out0, out1, out2, out3, grad_out, mean, invstd, weight, input_g, weight_g, bias_g,
6231        )
6232        .unwrap()
6233    }
6234
6235    pub fn batch_norm_elemt<T: Borrow<Tensor>>(
6236        &self,
6237        weight: Option<T>,
6238        bias: Option<T>,
6239        mean: &Tensor,
6240        invstd: &Tensor,
6241        eps: f64,
6242    ) -> Tensor {
6243        self.f_batch_norm_elemt(weight, bias, mean, invstd, eps).unwrap()
6244    }
6245
6246    pub fn batch_norm_elemt_out<T: Borrow<Tensor>>(
6247        &self,
6248        out: &Tensor,
6249        weight: Option<T>,
6250        bias: Option<T>,
6251        mean: &Tensor,
6252        invstd: &Tensor,
6253        eps: f64,
6254    ) -> Tensor {
6255        self.f_batch_norm_elemt_out(out, weight, bias, mean, invstd, eps).unwrap()
6256    }
6257
6258    pub fn batch_norm_gather_stats<T: Borrow<Tensor>>(
6259        &self,
6260        mean: &Tensor,
6261        invstd: &Tensor,
6262        running_mean: Option<T>,
6263        running_var: Option<T>,
6264        momentum: f64,
6265        eps: f64,
6266        count: i64,
6267    ) -> (Tensor, Tensor) {
6268        self.f_batch_norm_gather_stats(
6269            mean,
6270            invstd,
6271            running_mean,
6272            running_var,
6273            momentum,
6274            eps,
6275            count,
6276        )
6277        .unwrap()
6278    }
6279
6280    pub fn batch_norm_gather_stats_out<T: Borrow<Tensor>>(
6281        &self,
6282        out0: &Tensor,
6283        out1: &Tensor,
6284        mean: &Tensor,
6285        invstd: &Tensor,
6286        running_mean: Option<T>,
6287        running_var: Option<T>,
6288        momentum: f64,
6289        eps: f64,
6290        count: i64,
6291    ) -> (Tensor, Tensor) {
6292        self.f_batch_norm_gather_stats_out(
6293            out0,
6294            out1,
6295            mean,
6296            invstd,
6297            running_mean,
6298            running_var,
6299            momentum,
6300            eps,
6301            count,
6302        )
6303        .unwrap()
6304    }
6305
6306    pub fn batch_norm_gather_stats_with_counts<T: Borrow<Tensor>>(
6307        &self,
6308        mean: &Tensor,
6309        invstd: &Tensor,
6310        running_mean: Option<T>,
6311        running_var: Option<T>,
6312        momentum: f64,
6313        eps: f64,
6314        counts: &Tensor,
6315    ) -> (Tensor, Tensor) {
6316        self.f_batch_norm_gather_stats_with_counts(
6317            mean,
6318            invstd,
6319            running_mean,
6320            running_var,
6321            momentum,
6322            eps,
6323            counts,
6324        )
6325        .unwrap()
6326    }
6327
6328    pub fn batch_norm_gather_stats_with_counts_out<T: Borrow<Tensor>>(
6329        &self,
6330        out0: &Tensor,
6331        out1: &Tensor,
6332        mean: &Tensor,
6333        invstd: &Tensor,
6334        running_mean: Option<T>,
6335        running_var: Option<T>,
6336        momentum: f64,
6337        eps: f64,
6338        counts: &Tensor,
6339    ) -> (Tensor, Tensor) {
6340        self.f_batch_norm_gather_stats_with_counts_out(
6341            out0,
6342            out1,
6343            mean,
6344            invstd,
6345            running_mean,
6346            running_var,
6347            momentum,
6348            eps,
6349            counts,
6350        )
6351        .unwrap()
6352    }
6353
6354    pub fn batch_norm_stats(&self, eps: f64) -> (Tensor, Tensor) {
6355        self.f_batch_norm_stats(eps).unwrap()
6356    }
6357
6358    pub fn batch_norm_stats_out(&self, out0: &Tensor, out1: &Tensor, eps: f64) -> (Tensor, Tensor) {
6359        self.f_batch_norm_stats_out(out0, out1, eps).unwrap()
6360    }
6361
6362    pub fn batch_norm_update_stats<T: Borrow<Tensor>>(
6363        &self,
6364        running_mean: Option<T>,
6365        running_var: Option<T>,
6366        momentum: f64,
6367    ) -> (Tensor, Tensor) {
6368        self.f_batch_norm_update_stats(running_mean, running_var, momentum).unwrap()
6369    }
6370
6371    pub fn batch_norm_update_stats_out<T: Borrow<Tensor>>(
6372        &self,
6373        out0: &Tensor,
6374        out1: &Tensor,
6375        running_mean: Option<T>,
6376        running_var: Option<T>,
6377        momentum: f64,
6378    ) -> (Tensor, Tensor) {
6379        self.f_batch_norm_update_stats_out(out0, out1, running_mean, running_var, momentum).unwrap()
6380    }
6381
6382    pub fn bernoulli(&self) -> Tensor {
6383        self.f_bernoulli().unwrap()
6384    }
6385
6386    pub fn bernoulli_(&mut self, p: &Tensor) -> Tensor {
6387        self.f_bernoulli_(p).unwrap()
6388    }
6389
6390    pub fn bernoulli_float_(&mut self, p: f64) -> Tensor {
6391        self.f_bernoulli_float_(p).unwrap()
6392    }
6393
6394    pub fn bernoulli_p(&self, p: f64) -> Tensor {
6395        self.f_bernoulli_p(p).unwrap()
6396    }
6397
6398    pub fn bernoulli_tensor(&self, p: &Tensor) -> Tensor {
6399        self.f_bernoulli_tensor(p).unwrap()
6400    }
6401
6402    pub fn bilinear<T: Borrow<Tensor>>(
6403        input1: &Tensor,
6404        input2: &Tensor,
6405        weight: &Tensor,
6406        bias: Option<T>,
6407    ) -> Tensor {
6408        Tensor::f_bilinear(input1, input2, weight, bias).unwrap()
6409    }
6410
6411    pub fn binary_cross_entropy<T: Borrow<Tensor>>(
6412        &self,
6413        target: &Tensor,
6414        weight: Option<T>,
6415        reduction: crate::Reduction,
6416    ) -> Tensor {
6417        self.f_binary_cross_entropy(target, weight, reduction).unwrap()
6418    }
6419
6420    pub fn binary_cross_entropy_backward<T: Borrow<Tensor>>(
6421        &self,
6422        grad_output: &Tensor,
6423        target: &Tensor,
6424        weight: Option<T>,
6425        reduction: crate::Reduction,
6426    ) -> Tensor {
6427        self.f_binary_cross_entropy_backward(grad_output, target, weight, reduction).unwrap()
6428    }
6429
6430    pub fn binary_cross_entropy_backward_grad_input<T: Borrow<Tensor>>(
6431        &self,
6432        grad_input: &Tensor,
6433        grad_output: &Tensor,
6434        target: &Tensor,
6435        weight: Option<T>,
6436        reduction: crate::Reduction,
6437    ) -> Tensor {
6438        self.f_binary_cross_entropy_backward_grad_input(
6439            grad_input,
6440            grad_output,
6441            target,
6442            weight,
6443            reduction,
6444        )
6445        .unwrap()
6446    }
6447
6448    pub fn binary_cross_entropy_out<T: Borrow<Tensor>>(
6449        &self,
6450        out: &Tensor,
6451        target: &Tensor,
6452        weight: Option<T>,
6453        reduction: crate::Reduction,
6454    ) -> Tensor {
6455        self.f_binary_cross_entropy_out(out, target, weight, reduction).unwrap()
6456    }
6457
6458    pub fn binary_cross_entropy_with_logits<T: Borrow<Tensor>>(
6459        &self,
6460        target: &Tensor,
6461        weight: Option<T>,
6462        pos_weight: Option<T>,
6463        reduction: crate::Reduction,
6464    ) -> Tensor {
6465        self.f_binary_cross_entropy_with_logits(target, weight, pos_weight, reduction).unwrap()
6466    }
6467
6468    pub fn binary_cross_entropy_with_logits_out<T: Borrow<Tensor>>(
6469        &self,
6470        out: &Tensor,
6471        target: &Tensor,
6472        weight: Option<T>,
6473        pos_weight: Option<T>,
6474        reduction: crate::Reduction,
6475    ) -> Tensor {
6476        self.f_binary_cross_entropy_with_logits_out(out, target, weight, pos_weight, reduction)
6477            .unwrap()
6478    }
6479
6480    pub fn bincount<T: Borrow<Tensor>>(&self, weights: Option<T>, minlength: i64) -> Tensor {
6481        self.f_bincount(weights, minlength).unwrap()
6482    }
6483
6484    pub fn bincount_out<T: Borrow<Tensor>>(
6485        &self,
6486        out: &Tensor,
6487        weights: Option<T>,
6488        minlength: i64,
6489    ) -> Tensor {
6490        self.f_bincount_out(out, weights, minlength).unwrap()
6491    }
6492
6493    pub fn binomial(count: &Tensor, prob: &Tensor) -> Tensor {
6494        Tensor::f_binomial(count, prob).unwrap()
6495    }
6496
6497    pub fn binomial_out(out: &Tensor, count: &Tensor, prob: &Tensor) -> Tensor {
6498        Tensor::f_binomial_out(out, count, prob).unwrap()
6499    }
6500
6501    pub fn bitwise_and<S: Into<Scalar>>(&self, other: S) -> Tensor {
6502        self.f_bitwise_and(other).unwrap()
6503    }
6504
6505    pub fn bitwise_and_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
6506        self.f_bitwise_and_(other).unwrap()
6507    }
6508
6509    pub fn bitwise_and_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
6510        self.f_bitwise_and_scalar_out(out, other).unwrap()
6511    }
6512
6513    pub fn bitwise_and_scalar_tensor<S: Into<Scalar>>(self_scalar: S, other: &Tensor) -> Tensor {
6514        Tensor::f_bitwise_and_scalar_tensor(self_scalar, other).unwrap()
6515    }
6516
6517    pub fn bitwise_and_scalar_tensor_out<S: Into<Scalar>>(
6518        out: &Tensor,
6519        self_scalar: S,
6520        other: &Tensor,
6521    ) -> Tensor {
6522        Tensor::f_bitwise_and_scalar_tensor_out(out, self_scalar, other).unwrap()
6523    }
6524
6525    pub fn bitwise_and_tensor(&self, other: &Tensor) -> Tensor {
6526        self.f_bitwise_and_tensor(other).unwrap()
6527    }
6528
6529    pub fn bitwise_and_tensor_(&mut self, other: &Tensor) -> Tensor {
6530        self.f_bitwise_and_tensor_(other).unwrap()
6531    }
6532
6533    pub fn bitwise_and_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
6534        self.f_bitwise_and_tensor_out(out, other).unwrap()
6535    }
6536
6537    pub fn bitwise_left_shift(&self, other: &Tensor) -> Tensor {
6538        self.f_bitwise_left_shift(other).unwrap()
6539    }
6540
6541    pub fn bitwise_left_shift_(&mut self, other: &Tensor) -> Tensor {
6542        self.f_bitwise_left_shift_(other).unwrap()
6543    }
6544
6545    pub fn bitwise_left_shift_scalar_tensor<S: Into<Scalar>>(
6546        self_scalar: S,
6547        other: &Tensor,
6548    ) -> Tensor {
6549        Tensor::f_bitwise_left_shift_scalar_tensor(self_scalar, other).unwrap()
6550    }
6551
6552    pub fn bitwise_left_shift_scalar_tensor_out<S: Into<Scalar>>(
6553        out: &Tensor,
6554        self_scalar: S,
6555        other: &Tensor,
6556    ) -> Tensor {
6557        Tensor::f_bitwise_left_shift_scalar_tensor_out(out, self_scalar, other).unwrap()
6558    }
6559
6560    pub fn bitwise_left_shift_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
6561        self.f_bitwise_left_shift_tensor_out(out, other).unwrap()
6562    }
6563
6564    pub fn bitwise_left_shift_tensor_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
6565        self.f_bitwise_left_shift_tensor_scalar(other).unwrap()
6566    }
6567
6568    pub fn bitwise_left_shift_tensor_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
6569        self.f_bitwise_left_shift_tensor_scalar_(other).unwrap()
6570    }
6571
6572    pub fn bitwise_left_shift_tensor_scalar_out<S: Into<Scalar>>(
6573        &self,
6574        out: &Tensor,
6575        other: S,
6576    ) -> Tensor {
6577        self.f_bitwise_left_shift_tensor_scalar_out(out, other).unwrap()
6578    }
6579
6580    pub fn bitwise_not(&self) -> Tensor {
6581        self.f_bitwise_not().unwrap()
6582    }
6583
6584    pub fn bitwise_not_(&mut self) -> Tensor {
6585        self.f_bitwise_not_().unwrap()
6586    }
6587
6588    pub fn bitwise_not_out(&self, out: &Tensor) -> Tensor {
6589        self.f_bitwise_not_out(out).unwrap()
6590    }
6591
6592    pub fn bitwise_or<S: Into<Scalar>>(&self, other: S) -> Tensor {
6593        self.f_bitwise_or(other).unwrap()
6594    }
6595
6596    pub fn bitwise_or_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
6597        self.f_bitwise_or_(other).unwrap()
6598    }
6599
6600    pub fn bitwise_or_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
6601        self.f_bitwise_or_scalar_out(out, other).unwrap()
6602    }
6603
6604    pub fn bitwise_or_scalar_tensor<S: Into<Scalar>>(self_scalar: S, other: &Tensor) -> Tensor {
6605        Tensor::f_bitwise_or_scalar_tensor(self_scalar, other).unwrap()
6606    }
6607
6608    pub fn bitwise_or_scalar_tensor_out<S: Into<Scalar>>(
6609        out: &Tensor,
6610        self_scalar: S,
6611        other: &Tensor,
6612    ) -> Tensor {
6613        Tensor::f_bitwise_or_scalar_tensor_out(out, self_scalar, other).unwrap()
6614    }
6615
6616    pub fn bitwise_or_tensor(&self, other: &Tensor) -> Tensor {
6617        self.f_bitwise_or_tensor(other).unwrap()
6618    }
6619
6620    pub fn bitwise_or_tensor_(&mut self, other: &Tensor) -> Tensor {
6621        self.f_bitwise_or_tensor_(other).unwrap()
6622    }
6623
6624    pub fn bitwise_or_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
6625        self.f_bitwise_or_tensor_out(out, other).unwrap()
6626    }
6627
6628    pub fn bitwise_right_shift(&self, other: &Tensor) -> Tensor {
6629        self.f_bitwise_right_shift(other).unwrap()
6630    }
6631
6632    pub fn bitwise_right_shift_(&mut self, other: &Tensor) -> Tensor {
6633        self.f_bitwise_right_shift_(other).unwrap()
6634    }
6635
6636    pub fn bitwise_right_shift_scalar_tensor<S: Into<Scalar>>(
6637        self_scalar: S,
6638        other: &Tensor,
6639    ) -> Tensor {
6640        Tensor::f_bitwise_right_shift_scalar_tensor(self_scalar, other).unwrap()
6641    }
6642
6643    pub fn bitwise_right_shift_scalar_tensor_out<S: Into<Scalar>>(
6644        out: &Tensor,
6645        self_scalar: S,
6646        other: &Tensor,
6647    ) -> Tensor {
6648        Tensor::f_bitwise_right_shift_scalar_tensor_out(out, self_scalar, other).unwrap()
6649    }
6650
6651    pub fn bitwise_right_shift_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
6652        self.f_bitwise_right_shift_tensor_out(out, other).unwrap()
6653    }
6654
6655    pub fn bitwise_right_shift_tensor_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
6656        self.f_bitwise_right_shift_tensor_scalar(other).unwrap()
6657    }
6658
6659    pub fn bitwise_right_shift_tensor_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
6660        self.f_bitwise_right_shift_tensor_scalar_(other).unwrap()
6661    }
6662
6663    pub fn bitwise_right_shift_tensor_scalar_out<S: Into<Scalar>>(
6664        &self,
6665        out: &Tensor,
6666        other: S,
6667    ) -> Tensor {
6668        self.f_bitwise_right_shift_tensor_scalar_out(out, other).unwrap()
6669    }
6670
6671    pub fn bitwise_xor<S: Into<Scalar>>(&self, other: S) -> Tensor {
6672        self.f_bitwise_xor(other).unwrap()
6673    }
6674
6675    pub fn bitwise_xor_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
6676        self.f_bitwise_xor_(other).unwrap()
6677    }
6678
6679    pub fn bitwise_xor_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
6680        self.f_bitwise_xor_scalar_out(out, other).unwrap()
6681    }
6682
6683    pub fn bitwise_xor_scalar_tensor<S: Into<Scalar>>(self_scalar: S, other: &Tensor) -> Tensor {
6684        Tensor::f_bitwise_xor_scalar_tensor(self_scalar, other).unwrap()
6685    }
6686
6687    pub fn bitwise_xor_scalar_tensor_out<S: Into<Scalar>>(
6688        out: &Tensor,
6689        self_scalar: S,
6690        other: &Tensor,
6691    ) -> Tensor {
6692        Tensor::f_bitwise_xor_scalar_tensor_out(out, self_scalar, other).unwrap()
6693    }
6694
6695    pub fn bitwise_xor_tensor(&self, other: &Tensor) -> Tensor {
6696        self.f_bitwise_xor_tensor(other).unwrap()
6697    }
6698
6699    pub fn bitwise_xor_tensor_(&mut self, other: &Tensor) -> Tensor {
6700        self.f_bitwise_xor_tensor_(other).unwrap()
6701    }
6702
6703    pub fn bitwise_xor_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
6704        self.f_bitwise_xor_tensor_out(out, other).unwrap()
6705    }
6706
6707    pub fn blackman_window(window_length: i64, options: (Kind, Device)) -> Tensor {
6708        Tensor::f_blackman_window(window_length, options).unwrap()
6709    }
6710
6711    pub fn blackman_window_out(out: &Tensor, window_length: i64) -> Tensor {
6712        Tensor::f_blackman_window_out(out, window_length).unwrap()
6713    }
6714
6715    pub fn blackman_window_periodic(
6716        window_length: i64,
6717        periodic: bool,
6718        options: (Kind, Device),
6719    ) -> Tensor {
6720        Tensor::f_blackman_window_periodic(window_length, periodic, options).unwrap()
6721    }
6722
6723    pub fn blackman_window_periodic_out(
6724        out: &Tensor,
6725        window_length: i64,
6726        periodic: bool,
6727    ) -> Tensor {
6728        Tensor::f_blackman_window_periodic_out(out, window_length, periodic).unwrap()
6729    }
6730
6731    pub fn block_diag<T: Borrow<Tensor>>(tensors: &[T]) -> Tensor {
6732        Tensor::f_block_diag(tensors).unwrap()
6733    }
6734
6735    pub fn block_diag_out<T: Borrow<Tensor>>(out: &Tensor, tensors: &[T]) -> Tensor {
6736        Tensor::f_block_diag_out(out, tensors).unwrap()
6737    }
6738
6739    pub fn bmm(&self, mat2: &Tensor) -> Tensor {
6740        self.f_bmm(mat2).unwrap()
6741    }
6742
6743    pub fn bmm_out(&self, out: &Tensor, mat2: &Tensor) -> Tensor {
6744        self.f_bmm_out(out, mat2).unwrap()
6745    }
6746
6747    pub fn broadcast_tensors<T: Borrow<Tensor>>(tensors: &[T]) -> Vec<Tensor> {
6748        Tensor::f_broadcast_tensors(tensors).unwrap()
6749    }
6750
6751    pub fn broadcast_to(&self, size: impl IntList) -> Tensor {
6752        self.f_broadcast_to(size).unwrap()
6753    }
6754
6755    pub fn bucketize(&self, boundaries: &Tensor, out_int32: bool, right: bool) -> Tensor {
6756        self.f_bucketize(boundaries, out_int32, right).unwrap()
6757    }
6758
6759    pub fn bucketize_scalar<S: Into<Scalar>>(
6760        self_scalar: S,
6761        boundaries: &Tensor,
6762        out_int32: bool,
6763        right: bool,
6764    ) -> Tensor {
6765        Tensor::f_bucketize_scalar(self_scalar, boundaries, out_int32, right).unwrap()
6766    }
6767
6768    pub fn bucketize_scalar_out<S: Into<Scalar>>(
6769        out: &Tensor,
6770        self_scalar: S,
6771        boundaries: &Tensor,
6772        out_int32: bool,
6773        right: bool,
6774    ) -> Tensor {
6775        Tensor::f_bucketize_scalar_out(out, self_scalar, boundaries, out_int32, right).unwrap()
6776    }
6777
6778    pub fn bucketize_tensor_out(
6779        &self,
6780        out: &Tensor,
6781        boundaries: &Tensor,
6782        out_int32: bool,
6783        right: bool,
6784    ) -> Tensor {
6785        self.f_bucketize_tensor_out(out, boundaries, out_int32, right).unwrap()
6786    }
6787
6788    pub fn can_cast(from_: Kind, to: Kind) -> bool {
6789        Tensor::f_can_cast(from_, to).unwrap()
6790    }
6791
6792    pub fn cartesian_prod<T: Borrow<Tensor>>(tensors: &[T]) -> Tensor {
6793        Tensor::f_cartesian_prod(tensors).unwrap()
6794    }
6795
6796    pub fn cat<T: Borrow<Tensor>>(tensors: &[T], dim: i64) -> Tensor {
6797        Tensor::f_cat(tensors, dim).unwrap()
6798    }
6799
6800    pub fn cat_out<T: Borrow<Tensor>>(out: &Tensor, tensors: &[T], dim: i64) -> Tensor {
6801        Tensor::f_cat_out(out, tensors, dim).unwrap()
6802    }
6803
6804    pub fn cauchy(&self, median: f64, sigma: f64) -> Tensor {
6805        self.f_cauchy(median, sigma).unwrap()
6806    }
6807
6808    pub fn cauchy_(&mut self, median: f64, sigma: f64) -> Tensor {
6809        self.f_cauchy_(median, sigma).unwrap()
6810    }
6811
6812    pub fn cauchy_out(&self, out: &Tensor, median: f64, sigma: f64) -> Tensor {
6813        self.f_cauchy_out(out, median, sigma).unwrap()
6814    }
6815
6816    pub fn ccol_indices(&self) -> Tensor {
6817        self.f_ccol_indices().unwrap()
6818    }
6819
6820    pub fn ccol_indices_copy(&self) -> Tensor {
6821        self.f_ccol_indices_copy().unwrap()
6822    }
6823
6824    pub fn ccol_indices_copy_out(&self, out: &Tensor) -> Tensor {
6825        self.f_ccol_indices_copy_out(out).unwrap()
6826    }
6827
6828    pub fn cdist(x1: &Tensor, x2: &Tensor, p: f64, compute_mode: impl Into<Option<i64>>) -> Tensor {
6829        Tensor::f_cdist(x1, x2, p, compute_mode).unwrap()
6830    }
6831
6832    pub fn ceil(&self) -> Tensor {
6833        self.f_ceil().unwrap()
6834    }
6835
6836    pub fn ceil_(&mut self) -> Tensor {
6837        self.f_ceil_().unwrap()
6838    }
6839
6840    pub fn ceil_out(&self, out: &Tensor) -> Tensor {
6841        self.f_ceil_out(out).unwrap()
6842    }
6843
6844    pub fn celu(&self) -> Tensor {
6845        self.f_celu().unwrap()
6846    }
6847
6848    pub fn celu_(&mut self) -> Tensor {
6849        self.f_celu_().unwrap()
6850    }
6851
6852    pub fn celu_out(&self, out: &Tensor) -> Tensor {
6853        self.f_celu_out(out).unwrap()
6854    }
6855
6856    pub fn chain_matmul<T: Borrow<Tensor>>(matrices: &[T]) -> Tensor {
6857        Tensor::f_chain_matmul(matrices).unwrap()
6858    }
6859
6860    pub fn chain_matmul_out<T: Borrow<Tensor>>(out: &Tensor, matrices: &[T]) -> Tensor {
6861        Tensor::f_chain_matmul_out(out, matrices).unwrap()
6862    }
6863
6864    pub fn chalf(&self) -> Tensor {
6865        self.f_chalf().unwrap()
6866    }
6867
6868    pub fn channel_shuffle(&self, groups: i64) -> Tensor {
6869        self.f_channel_shuffle(groups).unwrap()
6870    }
6871
6872    pub fn channel_shuffle_out(&self, out: &Tensor, groups: i64) -> Tensor {
6873        self.f_channel_shuffle_out(out, groups).unwrap()
6874    }
6875
6876    pub fn cholesky(&self, upper: bool) -> Tensor {
6877        self.f_cholesky(upper).unwrap()
6878    }
6879
6880    pub fn cholesky_inverse(&self, upper: bool) -> Tensor {
6881        self.f_cholesky_inverse(upper).unwrap()
6882    }
6883
6884    pub fn cholesky_inverse_out(&self, out: &Tensor, upper: bool) -> Tensor {
6885        self.f_cholesky_inverse_out(out, upper).unwrap()
6886    }
6887
6888    pub fn cholesky_out(&self, out: &Tensor, upper: bool) -> Tensor {
6889        self.f_cholesky_out(out, upper).unwrap()
6890    }
6891
6892    pub fn cholesky_solve(&self, input2: &Tensor, upper: bool) -> Tensor {
6893        self.f_cholesky_solve(input2, upper).unwrap()
6894    }
6895
6896    pub fn cholesky_solve_out(&self, out: &Tensor, input2: &Tensor, upper: bool) -> Tensor {
6897        self.f_cholesky_solve_out(out, input2, upper).unwrap()
6898    }
6899
6900    pub fn choose_qparams_optimized(
6901        &self,
6902        numel: i64,
6903        n_bins: i64,
6904        ratio: f64,
6905        bit_width: i64,
6906    ) -> (Tensor, Tensor) {
6907        self.f_choose_qparams_optimized(numel, n_bins, ratio, bit_width).unwrap()
6908    }
6909
6910    pub fn chunk(&self, chunks: i64, dim: i64) -> Vec<Tensor> {
6911        self.f_chunk(chunks, dim).unwrap()
6912    }
6913
6914    pub fn clamp<S: Into<Scalar>>(&self, min: S, max: S) -> Tensor {
6915        self.f_clamp(min, max).unwrap()
6916    }
6917
6918    pub fn clamp_<S: Into<Scalar>>(&mut self, min: S, max: S) -> Tensor {
6919        self.f_clamp_(min, max).unwrap()
6920    }
6921
6922    pub fn clamp_max<S: Into<Scalar>>(&self, max: S) -> Tensor {
6923        self.f_clamp_max(max).unwrap()
6924    }
6925
6926    pub fn clamp_max_<S: Into<Scalar>>(&mut self, max: S) -> Tensor {
6927        self.f_clamp_max_(max).unwrap()
6928    }
6929
6930    pub fn clamp_max_out<S: Into<Scalar>>(&self, out: &Tensor, max: S) -> Tensor {
6931        self.f_clamp_max_out(out, max).unwrap()
6932    }
6933
6934    pub fn clamp_max_tensor(&self, max: &Tensor) -> Tensor {
6935        self.f_clamp_max_tensor(max).unwrap()
6936    }
6937
6938    pub fn clamp_max_tensor_(&mut self, max: &Tensor) -> Tensor {
6939        self.f_clamp_max_tensor_(max).unwrap()
6940    }
6941
6942    pub fn clamp_max_tensor_out(&self, out: &Tensor, max: &Tensor) -> Tensor {
6943        self.f_clamp_max_tensor_out(out, max).unwrap()
6944    }
6945
6946    pub fn clamp_min<S: Into<Scalar>>(&self, min: S) -> Tensor {
6947        self.f_clamp_min(min).unwrap()
6948    }
6949
6950    pub fn clamp_min_<S: Into<Scalar>>(&mut self, min: S) -> Tensor {
6951        self.f_clamp_min_(min).unwrap()
6952    }
6953
6954    pub fn clamp_min_out<S: Into<Scalar>>(&self, out: &Tensor, min: S) -> Tensor {
6955        self.f_clamp_min_out(out, min).unwrap()
6956    }
6957
6958    pub fn clamp_min_tensor(&self, min: &Tensor) -> Tensor {
6959        self.f_clamp_min_tensor(min).unwrap()
6960    }
6961
6962    pub fn clamp_min_tensor_(&mut self, min: &Tensor) -> Tensor {
6963        self.f_clamp_min_tensor_(min).unwrap()
6964    }
6965
6966    pub fn clamp_min_tensor_out(&self, out: &Tensor, min: &Tensor) -> Tensor {
6967        self.f_clamp_min_tensor_out(out, min).unwrap()
6968    }
6969
6970    pub fn clamp_out<S: Into<Scalar>>(&self, out: &Tensor, min: S, max: S) -> Tensor {
6971        self.f_clamp_out(out, min, max).unwrap()
6972    }
6973
6974    pub fn clamp_tensor<T: Borrow<Tensor>>(&self, min: Option<T>, max: Option<T>) -> Tensor {
6975        self.f_clamp_tensor(min, max).unwrap()
6976    }
6977
6978    pub fn clamp_tensor_<T: Borrow<Tensor>>(&mut self, min: Option<T>, max: Option<T>) -> Tensor {
6979        self.f_clamp_tensor_(min, max).unwrap()
6980    }
6981
6982    pub fn clamp_tensor_out<T: Borrow<Tensor>>(
6983        &self,
6984        out: &Tensor,
6985        min: Option<T>,
6986        max: Option<T>,
6987    ) -> Tensor {
6988        self.f_clamp_tensor_out(out, min, max).unwrap()
6989    }
6990
6991    pub fn clip<S: Into<Scalar>>(&self, min: S, max: S) -> Tensor {
6992        self.f_clip(min, max).unwrap()
6993    }
6994
6995    pub fn clip_<S: Into<Scalar>>(&mut self, min: S, max: S) -> Tensor {
6996        self.f_clip_(min, max).unwrap()
6997    }
6998
6999    pub fn clip_out<S: Into<Scalar>>(&self, out: &Tensor, min: S, max: S) -> Tensor {
7000        self.f_clip_out(out, min, max).unwrap()
7001    }
7002
7003    pub fn clip_tensor<T: Borrow<Tensor>>(&self, min: Option<T>, max: Option<T>) -> Tensor {
7004        self.f_clip_tensor(min, max).unwrap()
7005    }
7006
7007    pub fn clip_tensor_<T: Borrow<Tensor>>(&mut self, min: Option<T>, max: Option<T>) -> Tensor {
7008        self.f_clip_tensor_(min, max).unwrap()
7009    }
7010
7011    pub fn clip_tensor_out<T: Borrow<Tensor>>(
7012        &self,
7013        out: &Tensor,
7014        min: Option<T>,
7015        max: Option<T>,
7016    ) -> Tensor {
7017        self.f_clip_tensor_out(out, min, max).unwrap()
7018    }
7019
7020    pub fn clone(&self, out: &Tensor) -> Tensor {
7021        self.f_clone(out).unwrap()
7022    }
7023
7024    pub fn coalesce(&self) -> Tensor {
7025        self.f_coalesce().unwrap()
7026    }
7027
7028    pub fn col2im(
7029        &self,
7030        output_size: impl IntList,
7031        kernel_size: impl IntList,
7032        dilation: impl IntList,
7033        padding: impl IntList,
7034        stride: impl IntList,
7035    ) -> Tensor {
7036        self.f_col2im(output_size, kernel_size, dilation, padding, stride).unwrap()
7037    }
7038
7039    pub fn col2im_out(
7040        &self,
7041        out: &Tensor,
7042        output_size: impl IntList,
7043        kernel_size: impl IntList,
7044        dilation: impl IntList,
7045        padding: impl IntList,
7046        stride: impl IntList,
7047    ) -> Tensor {
7048        self.f_col2im_out(out, output_size, kernel_size, dilation, padding, stride).unwrap()
7049    }
7050
7051    pub fn col_indices(&self) -> Tensor {
7052        self.f_col_indices().unwrap()
7053    }
7054
7055    pub fn col_indices_copy(&self) -> Tensor {
7056        self.f_col_indices_copy().unwrap()
7057    }
7058
7059    pub fn col_indices_copy_out(&self, out: &Tensor) -> Tensor {
7060        self.f_col_indices_copy_out(out).unwrap()
7061    }
7062
7063    pub fn column_stack<T: Borrow<Tensor>>(tensors: &[T]) -> Tensor {
7064        Tensor::f_column_stack(tensors).unwrap()
7065    }
7066
7067    pub fn column_stack_out<T: Borrow<Tensor>>(out: &Tensor, tensors: &[T]) -> Tensor {
7068        Tensor::f_column_stack_out(out, tensors).unwrap()
7069    }
7070
7071    pub fn combinations(&self, r: i64, with_replacement: bool) -> Tensor {
7072        self.f_combinations(r, with_replacement).unwrap()
7073    }
7074
7075    pub fn complex(real: &Tensor, imag: &Tensor) -> Tensor {
7076        Tensor::f_complex(real, imag).unwrap()
7077    }
7078
7079    pub fn complex_out(out: &Tensor, real: &Tensor, imag: &Tensor) -> Tensor {
7080        Tensor::f_complex_out(out, real, imag).unwrap()
7081    }
7082
7083    pub fn concat<T: Borrow<Tensor>>(tensors: &[T], dim: i64) -> Tensor {
7084        Tensor::f_concat(tensors, dim).unwrap()
7085    }
7086
7087    pub fn concat_out<T: Borrow<Tensor>>(out: &Tensor, tensors: &[T], dim: i64) -> Tensor {
7088        Tensor::f_concat_out(out, tensors, dim).unwrap()
7089    }
7090
7091    pub fn concatenate<T: Borrow<Tensor>>(tensors: &[T], dim: i64) -> Tensor {
7092        Tensor::f_concatenate(tensors, dim).unwrap()
7093    }
7094
7095    pub fn concatenate_out<T: Borrow<Tensor>>(out: &Tensor, tensors: &[T], dim: i64) -> Tensor {
7096        Tensor::f_concatenate_out(out, tensors, dim).unwrap()
7097    }
7098
7099    pub fn conj(&self) -> Tensor {
7100        self.f_conj().unwrap()
7101    }
7102
7103    pub fn conj_physical(&self) -> Tensor {
7104        self.f_conj_physical().unwrap()
7105    }
7106
7107    pub fn conj_physical_(&mut self) -> Tensor {
7108        self.f_conj_physical_().unwrap()
7109    }
7110
7111    pub fn conj_physical_out(&self, out: &Tensor) -> Tensor {
7112        self.f_conj_physical_out(out).unwrap()
7113    }
7114
7115    pub fn constant_pad_nd(&self, pad: impl IntList) -> Tensor {
7116        self.f_constant_pad_nd(pad).unwrap()
7117    }
7118
7119    pub fn constant_pad_nd_out(&self, out: &Tensor, pad: impl IntList) -> Tensor {
7120        self.f_constant_pad_nd_out(out, pad).unwrap()
7121    }
7122
7123    pub fn contiguous(&self) -> Tensor {
7124        self.f_contiguous().unwrap()
7125    }
7126
7127    pub fn conv1d<T: Borrow<Tensor>>(
7128        &self,
7129        weight: &Tensor,
7130        bias: Option<T>,
7131        stride: impl IntList,
7132        padding: impl IntList,
7133        dilation: impl IntList,
7134        groups: i64,
7135    ) -> Tensor {
7136        self.f_conv1d(weight, bias, stride, padding, dilation, groups).unwrap()
7137    }
7138
7139    pub fn conv1d_padding<T: Borrow<Tensor>>(
7140        &self,
7141        weight: &Tensor,
7142        bias: Option<T>,
7143        stride: impl IntList,
7144        padding: &str,
7145        dilation: impl IntList,
7146        groups: i64,
7147    ) -> Tensor {
7148        self.f_conv1d_padding(weight, bias, stride, padding, dilation, groups).unwrap()
7149    }
7150
7151    pub fn conv2d<T: Borrow<Tensor>>(
7152        &self,
7153        weight: &Tensor,
7154        bias: Option<T>,
7155        stride: impl IntList,
7156        padding: impl IntList,
7157        dilation: impl IntList,
7158        groups: i64,
7159    ) -> Tensor {
7160        self.f_conv2d(weight, bias, stride, padding, dilation, groups).unwrap()
7161    }
7162
7163    pub fn conv2d_padding<T: Borrow<Tensor>>(
7164        &self,
7165        weight: &Tensor,
7166        bias: Option<T>,
7167        stride: impl IntList,
7168        padding: &str,
7169        dilation: impl IntList,
7170        groups: i64,
7171    ) -> Tensor {
7172        self.f_conv2d_padding(weight, bias, stride, padding, dilation, groups).unwrap()
7173    }
7174
7175    pub fn conv3d<T: Borrow<Tensor>>(
7176        &self,
7177        weight: &Tensor,
7178        bias: Option<T>,
7179        stride: impl IntList,
7180        padding: impl IntList,
7181        dilation: impl IntList,
7182        groups: i64,
7183    ) -> Tensor {
7184        self.f_conv3d(weight, bias, stride, padding, dilation, groups).unwrap()
7185    }
7186
7187    pub fn conv3d_padding<T: Borrow<Tensor>>(
7188        &self,
7189        weight: &Tensor,
7190        bias: Option<T>,
7191        stride: impl IntList,
7192        padding: &str,
7193        dilation: impl IntList,
7194        groups: i64,
7195    ) -> Tensor {
7196        self.f_conv3d_padding(weight, bias, stride, padding, dilation, groups).unwrap()
7197    }
7198
7199    pub fn conv_depthwise3d<T: Borrow<Tensor>>(
7200        &self,
7201        weight: &Tensor,
7202        kernel_size: impl IntList,
7203        bias: Option<T>,
7204        stride: impl IntList,
7205        padding: impl IntList,
7206        dilation: impl IntList,
7207    ) -> Tensor {
7208        self.f_conv_depthwise3d(weight, kernel_size, bias, stride, padding, dilation).unwrap()
7209    }
7210
7211    pub fn conv_depthwise3d_out<T: Borrow<Tensor>>(
7212        &self,
7213        out: &Tensor,
7214        weight: &Tensor,
7215        kernel_size: impl IntList,
7216        bias: Option<T>,
7217        stride: impl IntList,
7218        padding: impl IntList,
7219        dilation: impl IntList,
7220    ) -> Tensor {
7221        self.f_conv_depthwise3d_out(out, weight, kernel_size, bias, stride, padding, dilation)
7222            .unwrap()
7223    }
7224
7225    pub fn conv_tbc(&self, weight: &Tensor, bias: &Tensor, pad: i64) -> Tensor {
7226        self.f_conv_tbc(weight, bias, pad).unwrap()
7227    }
7228
7229    pub fn conv_tbc_backward(
7230        &self,
7231        input: &Tensor,
7232        weight: &Tensor,
7233        bias: &Tensor,
7234        pad: i64,
7235    ) -> (Tensor, Tensor, Tensor) {
7236        self.f_conv_tbc_backward(input, weight, bias, pad).unwrap()
7237    }
7238
7239    pub fn conv_tbc_out(&self, out: &Tensor, weight: &Tensor, bias: &Tensor, pad: i64) -> Tensor {
7240        self.f_conv_tbc_out(out, weight, bias, pad).unwrap()
7241    }
7242
7243    pub fn conv_transpose1d<T: Borrow<Tensor>>(
7244        &self,
7245        weight: &Tensor,
7246        bias: Option<T>,
7247        stride: impl IntList,
7248        padding: impl IntList,
7249        output_padding: impl IntList,
7250        groups: i64,
7251        dilation: impl IntList,
7252    ) -> Tensor {
7253        self.f_conv_transpose1d(weight, bias, stride, padding, output_padding, groups, dilation)
7254            .unwrap()
7255    }
7256
7257    pub fn conv_transpose2d<T: Borrow<Tensor>>(
7258        &self,
7259        weight: &Tensor,
7260        bias: Option<T>,
7261        stride: impl IntList,
7262        padding: impl IntList,
7263        output_padding: impl IntList,
7264        groups: i64,
7265        dilation: impl IntList,
7266    ) -> Tensor {
7267        self.f_conv_transpose2d(weight, bias, stride, padding, output_padding, groups, dilation)
7268            .unwrap()
7269    }
7270
7271    pub fn conv_transpose3d<T: Borrow<Tensor>>(
7272        &self,
7273        weight: &Tensor,
7274        bias: Option<T>,
7275        stride: impl IntList,
7276        padding: impl IntList,
7277        output_padding: impl IntList,
7278        groups: i64,
7279        dilation: impl IntList,
7280    ) -> Tensor {
7281        self.f_conv_transpose3d(weight, bias, stride, padding, output_padding, groups, dilation)
7282            .unwrap()
7283    }
7284
7285    pub fn convolution<T: Borrow<Tensor>>(
7286        &self,
7287        weight: &Tensor,
7288        bias: Option<T>,
7289        stride: impl IntList,
7290        padding: impl IntList,
7291        dilation: impl IntList,
7292        transposed: bool,
7293        output_padding: impl IntList,
7294        groups: i64,
7295    ) -> Tensor {
7296        self.f_convolution(
7297            weight,
7298            bias,
7299            stride,
7300            padding,
7301            dilation,
7302            transposed,
7303            output_padding,
7304            groups,
7305        )
7306        .unwrap()
7307    }
7308
7309    pub fn convolution_out<T: Borrow<Tensor>>(
7310        &self,
7311        out: &Tensor,
7312        weight: &Tensor,
7313        bias: Option<T>,
7314        stride: impl IntList,
7315        padding: impl IntList,
7316        dilation: impl IntList,
7317        transposed: bool,
7318        output_padding: impl IntList,
7319        groups: i64,
7320    ) -> Tensor {
7321        self.f_convolution_out(
7322            out,
7323            weight,
7324            bias,
7325            stride,
7326            padding,
7327            dilation,
7328            transposed,
7329            output_padding,
7330            groups,
7331        )
7332        .unwrap()
7333    }
7334
7335    pub fn convolution_overrideable<T: Borrow<Tensor>>(
7336        &self,
7337        weight: &Tensor,
7338        bias: Option<T>,
7339        stride: impl IntList,
7340        padding: impl IntList,
7341        dilation: impl IntList,
7342        transposed: bool,
7343        output_padding: impl IntList,
7344        groups: i64,
7345    ) -> Tensor {
7346        self.f_convolution_overrideable(
7347            weight,
7348            bias,
7349            stride,
7350            padding,
7351            dilation,
7352            transposed,
7353            output_padding,
7354            groups,
7355        )
7356        .unwrap()
7357    }
7358
7359    pub fn convolution_overrideable_out<T: Borrow<Tensor>>(
7360        &self,
7361        out: &Tensor,
7362        weight: &Tensor,
7363        bias: Option<T>,
7364        stride: impl IntList,
7365        padding: impl IntList,
7366        dilation: impl IntList,
7367        transposed: bool,
7368        output_padding: impl IntList,
7369        groups: i64,
7370    ) -> Tensor {
7371        self.f_convolution_overrideable_out(
7372            out,
7373            weight,
7374            bias,
7375            stride,
7376            padding,
7377            dilation,
7378            transposed,
7379            output_padding,
7380            groups,
7381        )
7382        .unwrap()
7383    }
7384
7385    pub fn copy_sparse_to_sparse(&self, src: &Tensor, non_blocking: bool) -> Tensor {
7386        self.f_copy_sparse_to_sparse(src, non_blocking).unwrap()
7387    }
7388
7389    pub fn copy_sparse_to_sparse_(&mut self, src: &Tensor, non_blocking: bool) -> Tensor {
7390        self.f_copy_sparse_to_sparse_(src, non_blocking).unwrap()
7391    }
7392
7393    pub fn copy_sparse_to_sparse_out(
7394        &self,
7395        out: &Tensor,
7396        src: &Tensor,
7397        non_blocking: bool,
7398    ) -> Tensor {
7399        self.f_copy_sparse_to_sparse_out(out, src, non_blocking).unwrap()
7400    }
7401
7402    pub fn copysign(&self, other: &Tensor) -> Tensor {
7403        self.f_copysign(other).unwrap()
7404    }
7405
7406    pub fn copysign_(&mut self, other: &Tensor) -> Tensor {
7407        self.f_copysign_(other).unwrap()
7408    }
7409
7410    pub fn copysign_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
7411        self.f_copysign_out(out, other).unwrap()
7412    }
7413
7414    pub fn copysign_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
7415        self.f_copysign_scalar(other).unwrap()
7416    }
7417
7418    pub fn copysign_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
7419        self.f_copysign_scalar_(other).unwrap()
7420    }
7421
7422    pub fn copysign_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
7423        self.f_copysign_scalar_out(out, other).unwrap()
7424    }
7425
7426    pub fn corrcoef(&self) -> Tensor {
7427        self.f_corrcoef().unwrap()
7428    }
7429
7430    pub fn cos(&self) -> Tensor {
7431        self.f_cos().unwrap()
7432    }
7433
7434    pub fn cos_(&mut self) -> Tensor {
7435        self.f_cos_().unwrap()
7436    }
7437
7438    pub fn cos_out(&self, out: &Tensor) -> Tensor {
7439        self.f_cos_out(out).unwrap()
7440    }
7441
7442    pub fn cosh(&self) -> Tensor {
7443        self.f_cosh().unwrap()
7444    }
7445
7446    pub fn cosh_(&mut self) -> Tensor {
7447        self.f_cosh_().unwrap()
7448    }
7449
7450    pub fn cosh_out(&self, out: &Tensor) -> Tensor {
7451        self.f_cosh_out(out).unwrap()
7452    }
7453
7454    pub fn cosine_embedding_loss(
7455        input1: &Tensor,
7456        input2: &Tensor,
7457        target: &Tensor,
7458        margin: f64,
7459        reduction: crate::Reduction,
7460    ) -> Tensor {
7461        Tensor::f_cosine_embedding_loss(input1, input2, target, margin, reduction).unwrap()
7462    }
7463
7464    pub fn cosine_similarity(x1: &Tensor, x2: &Tensor, dim: i64, eps: f64) -> Tensor {
7465        Tensor::f_cosine_similarity(x1, x2, dim, eps).unwrap()
7466    }
7467
7468    pub fn count_nonzero(&self, dim: impl Into<Option<i64>>) -> Tensor {
7469        self.f_count_nonzero(dim).unwrap()
7470    }
7471
7472    pub fn count_nonzero_dim_intlist(&self, dim: impl IntList) -> Tensor {
7473        self.f_count_nonzero_dim_intlist(dim).unwrap()
7474    }
7475
7476    pub fn count_nonzero_dim_intlist_out(&self, out: &Tensor, dim: impl IntList) -> Tensor {
7477        self.f_count_nonzero_dim_intlist_out(out, dim).unwrap()
7478    }
7479
7480    pub fn count_nonzero_out(&self, out: &Tensor, dim: impl Into<Option<i64>>) -> Tensor {
7481        self.f_count_nonzero_out(out, dim).unwrap()
7482    }
7483
7484    pub fn cov<T: Borrow<Tensor>>(
7485        &self,
7486        correction: i64,
7487        fweights: Option<T>,
7488        aweights: Option<T>,
7489    ) -> Tensor {
7490        self.f_cov(correction, fweights, aweights).unwrap()
7491    }
7492
7493    pub fn cross(&self, other: &Tensor, dim: impl Into<Option<i64>>) -> Tensor {
7494        self.f_cross(other, dim).unwrap()
7495    }
7496
7497    pub fn cross_entropy_loss<T: Borrow<Tensor>>(
7498        &self,
7499        target: &Tensor,
7500        weight: Option<T>,
7501        reduction: crate::Reduction,
7502        ignore_index: i64,
7503        label_smoothing: f64,
7504    ) -> Tensor {
7505        self.f_cross_entropy_loss(target, weight, reduction, ignore_index, label_smoothing).unwrap()
7506    }
7507
7508    pub fn cross_out(&self, out: &Tensor, other: &Tensor, dim: impl Into<Option<i64>>) -> Tensor {
7509        self.f_cross_out(out, other, dim).unwrap()
7510    }
7511
7512    pub fn crow_indices(&self) -> Tensor {
7513        self.f_crow_indices().unwrap()
7514    }
7515
7516    pub fn crow_indices_copy(&self) -> Tensor {
7517        self.f_crow_indices_copy().unwrap()
7518    }
7519
7520    pub fn crow_indices_copy_out(&self, out: &Tensor) -> Tensor {
7521        self.f_crow_indices_copy_out(out).unwrap()
7522    }
7523
7524    pub fn ctc_loss(
7525        log_probs: &Tensor,
7526        targets: &Tensor,
7527        input_lengths: impl IntList,
7528        target_lengths: impl IntList,
7529        blank: i64,
7530        reduction: crate::Reduction,
7531        zero_infinity: bool,
7532    ) -> Tensor {
7533        Tensor::f_ctc_loss(
7534            log_probs,
7535            targets,
7536            input_lengths,
7537            target_lengths,
7538            blank,
7539            reduction,
7540            zero_infinity,
7541        )
7542        .unwrap()
7543    }
7544
7545    pub fn ctc_loss_tensor(
7546        log_probs: &Tensor,
7547        targets: &Tensor,
7548        input_lengths: &Tensor,
7549        target_lengths: &Tensor,
7550        blank: i64,
7551        reduction: crate::Reduction,
7552        zero_infinity: bool,
7553    ) -> Tensor {
7554        Tensor::f_ctc_loss_tensor(
7555            log_probs,
7556            targets,
7557            input_lengths,
7558            target_lengths,
7559            blank,
7560            reduction,
7561            zero_infinity,
7562        )
7563        .unwrap()
7564    }
7565
7566    pub fn cudnn_affine_grid_generator(theta: &Tensor, n: i64, c: i64, h: i64, w: i64) -> Tensor {
7567        Tensor::f_cudnn_affine_grid_generator(theta, n, c, h, w).unwrap()
7568    }
7569
7570    pub fn cudnn_affine_grid_generator_backward(
7571        grad: &Tensor,
7572        n: i64,
7573        c: i64,
7574        h: i64,
7575        w: i64,
7576    ) -> Tensor {
7577        Tensor::f_cudnn_affine_grid_generator_backward(grad, n, c, h, w).unwrap()
7578    }
7579
7580    pub fn cudnn_affine_grid_generator_backward_out(
7581        out: &Tensor,
7582        grad: &Tensor,
7583        n: i64,
7584        c: i64,
7585        h: i64,
7586        w: i64,
7587    ) -> Tensor {
7588        Tensor::f_cudnn_affine_grid_generator_backward_out(out, grad, n, c, h, w).unwrap()
7589    }
7590
7591    pub fn cudnn_affine_grid_generator_out(
7592        out: &Tensor,
7593        theta: &Tensor,
7594        n: i64,
7595        c: i64,
7596        h: i64,
7597        w: i64,
7598    ) -> Tensor {
7599        Tensor::f_cudnn_affine_grid_generator_out(out, theta, n, c, h, w).unwrap()
7600    }
7601
7602    pub fn cudnn_batch_norm<T: Borrow<Tensor>>(
7603        &self,
7604        weight: &Tensor,
7605        bias: Option<T>,
7606        running_mean: Option<T>,
7607        running_var: Option<T>,
7608        training: bool,
7609        exponential_average_factor: f64,
7610        epsilon: f64,
7611    ) -> (Tensor, Tensor, Tensor, Tensor) {
7612        self.f_cudnn_batch_norm(
7613            weight,
7614            bias,
7615            running_mean,
7616            running_var,
7617            training,
7618            exponential_average_factor,
7619            epsilon,
7620        )
7621        .unwrap()
7622    }
7623
7624    pub fn cudnn_batch_norm_backward<T: Borrow<Tensor>>(
7625        &self,
7626        grad_output: &Tensor,
7627        weight: &Tensor,
7628        running_mean: Option<T>,
7629        running_var: Option<T>,
7630        save_mean: Option<T>,
7631        save_var: Option<T>,
7632        epsilon: f64,
7633        reservespace: &Tensor,
7634    ) -> (Tensor, Tensor, Tensor) {
7635        self.f_cudnn_batch_norm_backward(
7636            grad_output,
7637            weight,
7638            running_mean,
7639            running_var,
7640            save_mean,
7641            save_var,
7642            epsilon,
7643            reservespace,
7644        )
7645        .unwrap()
7646    }
7647
7648    pub fn cudnn_batch_norm_backward_out<T: Borrow<Tensor>>(
7649        &self,
7650        out0: &Tensor,
7651        out1: &Tensor,
7652        out2: &Tensor,
7653        grad_output: &Tensor,
7654        weight: &Tensor,
7655        running_mean: Option<T>,
7656        running_var: Option<T>,
7657        save_mean: Option<T>,
7658        save_var: Option<T>,
7659        epsilon: f64,
7660        reservespace: &Tensor,
7661    ) -> (Tensor, Tensor, Tensor) {
7662        self.f_cudnn_batch_norm_backward_out(
7663            out0,
7664            out1,
7665            out2,
7666            grad_output,
7667            weight,
7668            running_mean,
7669            running_var,
7670            save_mean,
7671            save_var,
7672            epsilon,
7673            reservespace,
7674        )
7675        .unwrap()
7676    }
7677
7678    pub fn cudnn_batch_norm_out<T: Borrow<Tensor>>(
7679        &self,
7680        out0: &Tensor,
7681        out1: &Tensor,
7682        out2: &Tensor,
7683        out3: &Tensor,
7684        weight: &Tensor,
7685        bias: Option<T>,
7686        running_mean: Option<T>,
7687        running_var: Option<T>,
7688        training: bool,
7689        exponential_average_factor: f64,
7690        epsilon: f64,
7691    ) -> (Tensor, Tensor, Tensor, Tensor) {
7692        self.f_cudnn_batch_norm_out(
7693            out0,
7694            out1,
7695            out2,
7696            out3,
7697            weight,
7698            bias,
7699            running_mean,
7700            running_var,
7701            training,
7702            exponential_average_factor,
7703            epsilon,
7704        )
7705        .unwrap()
7706    }
7707
7708    pub fn cudnn_convolution(
7709        &self,
7710        weight: &Tensor,
7711        padding: impl IntList,
7712        stride: impl IntList,
7713        dilation: impl IntList,
7714        groups: i64,
7715        benchmark: bool,
7716        deterministic: bool,
7717        allow_tf32: bool,
7718    ) -> Tensor {
7719        self.f_cudnn_convolution(
7720            weight,
7721            padding,
7722            stride,
7723            dilation,
7724            groups,
7725            benchmark,
7726            deterministic,
7727            allow_tf32,
7728        )
7729        .unwrap()
7730    }
7731
7732    pub fn cudnn_convolution_add_relu<T: Borrow<Tensor>, S: Into<Scalar>>(
7733        &self,
7734        weight: &Tensor,
7735        z: &Tensor,
7736        alpha: S,
7737        bias: Option<T>,
7738        stride: impl IntList,
7739        padding: impl IntList,
7740        dilation: impl IntList,
7741        groups: i64,
7742    ) -> Tensor {
7743        self.f_cudnn_convolution_add_relu(weight, z, alpha, bias, stride, padding, dilation, groups)
7744            .unwrap()
7745    }
7746
7747    pub fn cudnn_convolution_add_relu_out<T: Borrow<Tensor>, S: Into<Scalar>>(
7748        &self,
7749        out: &Tensor,
7750        weight: &Tensor,
7751        z: &Tensor,
7752        alpha: S,
7753        bias: Option<T>,
7754        stride: impl IntList,
7755        padding: impl IntList,
7756        dilation: impl IntList,
7757        groups: i64,
7758    ) -> Tensor {
7759        self.f_cudnn_convolution_add_relu_out(
7760            out, weight, z, alpha, bias, stride, padding, dilation, groups,
7761        )
7762        .unwrap()
7763    }
7764
7765    pub fn cudnn_convolution_out(
7766        &self,
7767        out: &Tensor,
7768        weight: &Tensor,
7769        padding: impl IntList,
7770        stride: impl IntList,
7771        dilation: impl IntList,
7772        groups: i64,
7773        benchmark: bool,
7774        deterministic: bool,
7775        allow_tf32: bool,
7776    ) -> Tensor {
7777        self.f_cudnn_convolution_out(
7778            out,
7779            weight,
7780            padding,
7781            stride,
7782            dilation,
7783            groups,
7784            benchmark,
7785            deterministic,
7786            allow_tf32,
7787        )
7788        .unwrap()
7789    }
7790
7791    pub fn cudnn_convolution_relu<T: Borrow<Tensor>>(
7792        &self,
7793        weight: &Tensor,
7794        bias: Option<T>,
7795        stride: impl IntList,
7796        padding: impl IntList,
7797        dilation: impl IntList,
7798        groups: i64,
7799    ) -> Tensor {
7800        self.f_cudnn_convolution_relu(weight, bias, stride, padding, dilation, groups).unwrap()
7801    }
7802
7803    pub fn cudnn_convolution_relu_out<T: Borrow<Tensor>>(
7804        &self,
7805        out: &Tensor,
7806        weight: &Tensor,
7807        bias: Option<T>,
7808        stride: impl IntList,
7809        padding: impl IntList,
7810        dilation: impl IntList,
7811        groups: i64,
7812    ) -> Tensor {
7813        self.f_cudnn_convolution_relu_out(out, weight, bias, stride, padding, dilation, groups)
7814            .unwrap()
7815    }
7816
7817    pub fn cudnn_convolution_transpose(
7818        &self,
7819        weight: &Tensor,
7820        padding: impl IntList,
7821        output_padding: impl IntList,
7822        stride: impl IntList,
7823        dilation: impl IntList,
7824        groups: i64,
7825        benchmark: bool,
7826        deterministic: bool,
7827        allow_tf32: bool,
7828    ) -> Tensor {
7829        self.f_cudnn_convolution_transpose(
7830            weight,
7831            padding,
7832            output_padding,
7833            stride,
7834            dilation,
7835            groups,
7836            benchmark,
7837            deterministic,
7838            allow_tf32,
7839        )
7840        .unwrap()
7841    }
7842
7843    pub fn cudnn_convolution_transpose_out(
7844        &self,
7845        out: &Tensor,
7846        weight: &Tensor,
7847        padding: impl IntList,
7848        output_padding: impl IntList,
7849        stride: impl IntList,
7850        dilation: impl IntList,
7851        groups: i64,
7852        benchmark: bool,
7853        deterministic: bool,
7854        allow_tf32: bool,
7855    ) -> Tensor {
7856        self.f_cudnn_convolution_transpose_out(
7857            out,
7858            weight,
7859            padding,
7860            output_padding,
7861            stride,
7862            dilation,
7863            groups,
7864            benchmark,
7865            deterministic,
7866            allow_tf32,
7867        )
7868        .unwrap()
7869    }
7870
7871    pub fn cudnn_grid_sampler(&self, grid: &Tensor) -> Tensor {
7872        self.f_cudnn_grid_sampler(grid).unwrap()
7873    }
7874
7875    pub fn cudnn_grid_sampler_backward(
7876        &self,
7877        grid: &Tensor,
7878        grad_output: &Tensor,
7879    ) -> (Tensor, Tensor) {
7880        self.f_cudnn_grid_sampler_backward(grid, grad_output).unwrap()
7881    }
7882
7883    pub fn cudnn_grid_sampler_backward_out(
7884        &self,
7885        out0: &Tensor,
7886        out1: &Tensor,
7887        grid: &Tensor,
7888        grad_output: &Tensor,
7889    ) -> (Tensor, Tensor) {
7890        self.f_cudnn_grid_sampler_backward_out(out0, out1, grid, grad_output).unwrap()
7891    }
7892
7893    pub fn cudnn_grid_sampler_out(&self, out: &Tensor, grid: &Tensor) -> Tensor {
7894        self.f_cudnn_grid_sampler_out(out, grid).unwrap()
7895    }
7896
7897    pub fn cudnn_is_acceptable(&self) -> bool {
7898        self.f_cudnn_is_acceptable().unwrap()
7899    }
7900
7901    pub fn cummax(&self, dim: i64) -> (Tensor, Tensor) {
7902        self.f_cummax(dim).unwrap()
7903    }
7904
7905    pub fn cummax_out(&self, values: &Tensor, indices: &Tensor, dim: i64) -> (Tensor, Tensor) {
7906        self.f_cummax_out(values, indices, dim).unwrap()
7907    }
7908
7909    pub fn cummaxmin_backward(&self, grad: &Tensor, indices: &Tensor, dim: i64) -> Tensor {
7910        self.f_cummaxmin_backward(grad, indices, dim).unwrap()
7911    }
7912
7913    pub fn cummin(&self, dim: i64) -> (Tensor, Tensor) {
7914        self.f_cummin(dim).unwrap()
7915    }
7916
7917    pub fn cummin_out(&self, values: &Tensor, indices: &Tensor, dim: i64) -> (Tensor, Tensor) {
7918        self.f_cummin_out(values, indices, dim).unwrap()
7919    }
7920
7921    pub fn cumprod(&self, dim: i64, dtype: impl Into<Option<Kind>>) -> Tensor {
7922        self.f_cumprod(dim, dtype).unwrap()
7923    }
7924
7925    pub fn cumprod_(&mut self, dim: i64, dtype: impl Into<Option<Kind>>) -> Tensor {
7926        self.f_cumprod_(dim, dtype).unwrap()
7927    }
7928
7929    pub fn cumprod_backward(&self, grad: &Tensor, dim: i64, output: &Tensor) -> Tensor {
7930        self.f_cumprod_backward(grad, dim, output).unwrap()
7931    }
7932
7933    pub fn cumprod_out(&self, out: &Tensor, dim: i64, dtype: impl Into<Option<Kind>>) -> Tensor {
7934        self.f_cumprod_out(out, dim, dtype).unwrap()
7935    }
7936
7937    pub fn cumsum(&self, dim: i64, dtype: impl Into<Option<Kind>>) -> Tensor {
7938        self.f_cumsum(dim, dtype).unwrap()
7939    }
7940
7941    pub fn cumsum_(&mut self, dim: i64, dtype: impl Into<Option<Kind>>) -> Tensor {
7942        self.f_cumsum_(dim, dtype).unwrap()
7943    }
7944
7945    pub fn cumsum_out(&self, out: &Tensor, dim: i64, dtype: impl Into<Option<Kind>>) -> Tensor {
7946        self.f_cumsum_out(out, dim, dtype).unwrap()
7947    }
7948
7949    pub fn cumulative_trapezoid(y: &Tensor, dim: i64) -> Tensor {
7950        Tensor::f_cumulative_trapezoid(y, dim).unwrap()
7951    }
7952
7953    pub fn cumulative_trapezoid_x(y: &Tensor, x: &Tensor, dim: i64) -> Tensor {
7954        Tensor::f_cumulative_trapezoid_x(y, x, dim).unwrap()
7955    }
7956
7957    pub fn data(&self) -> Tensor {
7958        self.f_data().unwrap()
7959    }
7960
7961    pub fn deg2rad(&self) -> Tensor {
7962        self.f_deg2rad().unwrap()
7963    }
7964
7965    pub fn deg2rad_(&mut self) -> Tensor {
7966        self.f_deg2rad_().unwrap()
7967    }
7968
7969    pub fn deg2rad_out(&self, out: &Tensor) -> Tensor {
7970        self.f_deg2rad_out(out).unwrap()
7971    }
7972
7973    pub fn dense_dim(&self) -> i64 {
7974        self.f_dense_dim().unwrap()
7975    }
7976
7977    pub fn dequantize(&self) -> Tensor {
7978        self.f_dequantize().unwrap()
7979    }
7980
7981    pub fn dequantize_self_out(&self, out: &Tensor) -> Tensor {
7982        self.f_dequantize_self_out(out).unwrap()
7983    }
7984
7985    pub fn dequantize_tensors<T: Borrow<Tensor>>(tensors: &[T]) -> Vec<Tensor> {
7986        Tensor::f_dequantize_tensors(tensors).unwrap()
7987    }
7988
7989    pub fn dequantize_tensors_out<T: Borrow<Tensor>>(out: &[T], tensors: &[T]) {
7990        Tensor::f_dequantize_tensors_out(out, tensors).unwrap()
7991    }
7992
7993    pub fn det(&self) -> Tensor {
7994        self.f_det().unwrap()
7995    }
7996
7997    pub fn detach(&self) -> Tensor {
7998        self.f_detach().unwrap()
7999    }
8000
8001    pub fn detach_(&mut self) -> Tensor {
8002        self.f_detach_().unwrap()
8003    }
8004
8005    pub fn detach_copy(&self) -> Tensor {
8006        self.f_detach_copy().unwrap()
8007    }
8008
8009    pub fn detach_copy_out(&self, out: &Tensor) -> Tensor {
8010        self.f_detach_copy_out(out).unwrap()
8011    }
8012
8013    pub fn diag(&self, diagonal: i64) -> Tensor {
8014        self.f_diag(diagonal).unwrap()
8015    }
8016
8017    pub fn diag_embed(&self, offset: i64, dim1: i64, dim2: i64) -> Tensor {
8018        self.f_diag_embed(offset, dim1, dim2).unwrap()
8019    }
8020
8021    pub fn diag_embed_out(&self, out: &Tensor, offset: i64, dim1: i64, dim2: i64) -> Tensor {
8022        self.f_diag_embed_out(out, offset, dim1, dim2).unwrap()
8023    }
8024
8025    pub fn diag_out(&self, out: &Tensor, diagonal: i64) -> Tensor {
8026        self.f_diag_out(out, diagonal).unwrap()
8027    }
8028
8029    pub fn diagflat(&self, offset: i64) -> Tensor {
8030        self.f_diagflat(offset).unwrap()
8031    }
8032
8033    pub fn diagonal(&self, offset: i64, dim1: i64, dim2: i64) -> Tensor {
8034        self.f_diagonal(offset, dim1, dim2).unwrap()
8035    }
8036
8037    pub fn diagonal_backward(
8038        grad_output: &Tensor,
8039        input_sizes: impl IntList,
8040        offset: i64,
8041        dim1: i64,
8042        dim2: i64,
8043    ) -> Tensor {
8044        Tensor::f_diagonal_backward(grad_output, input_sizes, offset, dim1, dim2).unwrap()
8045    }
8046
8047    pub fn diagonal_backward_out(
8048        out: &Tensor,
8049        grad_output: &Tensor,
8050        input_sizes: impl IntList,
8051        offset: i64,
8052        dim1: i64,
8053        dim2: i64,
8054    ) -> Tensor {
8055        Tensor::f_diagonal_backward_out(out, grad_output, input_sizes, offset, dim1, dim2).unwrap()
8056    }
8057
8058    pub fn diagonal_copy(&self, offset: i64, dim1: i64, dim2: i64) -> Tensor {
8059        self.f_diagonal_copy(offset, dim1, dim2).unwrap()
8060    }
8061
8062    pub fn diagonal_copy_out(&self, out: &Tensor, offset: i64, dim1: i64, dim2: i64) -> Tensor {
8063        self.f_diagonal_copy_out(out, offset, dim1, dim2).unwrap()
8064    }
8065
8066    pub fn diagonal_scatter(&self, src: &Tensor, offset: i64, dim1: i64, dim2: i64) -> Tensor {
8067        self.f_diagonal_scatter(src, offset, dim1, dim2).unwrap()
8068    }
8069
8070    pub fn diagonal_scatter_out(
8071        &self,
8072        out: &Tensor,
8073        src: &Tensor,
8074        offset: i64,
8075        dim1: i64,
8076        dim2: i64,
8077    ) -> Tensor {
8078        self.f_diagonal_scatter_out(out, src, offset, dim1, dim2).unwrap()
8079    }
8080
8081    pub fn diff<T: Borrow<Tensor>>(
8082        &self,
8083        n: i64,
8084        dim: i64,
8085        prepend: Option<T>,
8086        append: Option<T>,
8087    ) -> Tensor {
8088        self.f_diff(n, dim, prepend, append).unwrap()
8089    }
8090
8091    pub fn diff_out<T: Borrow<Tensor>>(
8092        &self,
8093        out: &Tensor,
8094        n: i64,
8095        dim: i64,
8096        prepend: Option<T>,
8097        append: Option<T>,
8098    ) -> Tensor {
8099        self.f_diff_out(out, n, dim, prepend, append).unwrap()
8100    }
8101
8102    pub fn digamma(&self) -> Tensor {
8103        self.f_digamma().unwrap()
8104    }
8105
8106    pub fn digamma_(&mut self) -> Tensor {
8107        self.f_digamma_().unwrap()
8108    }
8109
8110    pub fn digamma_out(&self, out: &Tensor) -> Tensor {
8111        self.f_digamma_out(out).unwrap()
8112    }
8113
8114    pub fn dist(&self, other: &Tensor) -> Tensor {
8115        self.f_dist(other).unwrap()
8116    }
8117
8118    pub fn dist_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
8119        self.f_dist_out(out, other).unwrap()
8120    }
8121
8122    pub fn g_div(&self, other: &Tensor) -> Tensor {
8123        self.f_div(other).unwrap()
8124    }
8125
8126    pub fn g_div_(&mut self, other: &Tensor) -> Tensor {
8127        self.f_div_(other).unwrap()
8128    }
8129
8130    pub fn div_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
8131        self.f_div_out(out, other).unwrap()
8132    }
8133
8134    pub fn div_out_mode(&self, out: &Tensor, other: &Tensor, rounding_mode: &str) -> Tensor {
8135        self.f_div_out_mode(out, other, rounding_mode).unwrap()
8136    }
8137
8138    pub fn g_div_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
8139        self.f_div_scalar(other).unwrap()
8140    }
8141
8142    pub fn g_div_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
8143        self.f_div_scalar_(other).unwrap()
8144    }
8145
8146    pub fn g_div_scalar_mode<S: Into<Scalar>>(&self, other: S, rounding_mode: &str) -> Tensor {
8147        self.f_div_scalar_mode(other, rounding_mode).unwrap()
8148    }
8149
8150    pub fn g_div_scalar_mode_<S: Into<Scalar>>(&mut self, other: S, rounding_mode: &str) -> Tensor {
8151        self.f_div_scalar_mode_(other, rounding_mode).unwrap()
8152    }
8153
8154    pub fn div_scalar_mode_out<S: Into<Scalar>>(
8155        &self,
8156        out: &Tensor,
8157        other: S,
8158        rounding_mode: &str,
8159    ) -> Tensor {
8160        self.f_div_scalar_mode_out(out, other, rounding_mode).unwrap()
8161    }
8162
8163    pub fn div_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
8164        self.f_div_scalar_out(out, other).unwrap()
8165    }
8166
8167    pub fn g_div_tensor_mode(&self, other: &Tensor, rounding_mode: &str) -> Tensor {
8168        self.f_div_tensor_mode(other, rounding_mode).unwrap()
8169    }
8170
8171    pub fn g_div_tensor_mode_(&mut self, other: &Tensor, rounding_mode: &str) -> Tensor {
8172        self.f_div_tensor_mode_(other, rounding_mode).unwrap()
8173    }
8174
8175    pub fn divide(&self, other: &Tensor) -> Tensor {
8176        self.f_divide(other).unwrap()
8177    }
8178
8179    pub fn divide_(&mut self, other: &Tensor) -> Tensor {
8180        self.f_divide_(other).unwrap()
8181    }
8182
8183    pub fn divide_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
8184        self.f_divide_out(out, other).unwrap()
8185    }
8186
8187    pub fn divide_out_mode(&self, out: &Tensor, other: &Tensor, rounding_mode: &str) -> Tensor {
8188        self.f_divide_out_mode(out, other, rounding_mode).unwrap()
8189    }
8190
8191    pub fn divide_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
8192        self.f_divide_scalar(other).unwrap()
8193    }
8194
8195    pub fn divide_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
8196        self.f_divide_scalar_(other).unwrap()
8197    }
8198
8199    pub fn divide_scalar_mode<S: Into<Scalar>>(&self, other: S, rounding_mode: &str) -> Tensor {
8200        self.f_divide_scalar_mode(other, rounding_mode).unwrap()
8201    }
8202
8203    pub fn divide_scalar_mode_<S: Into<Scalar>>(
8204        &mut self,
8205        other: S,
8206        rounding_mode: &str,
8207    ) -> Tensor {
8208        self.f_divide_scalar_mode_(other, rounding_mode).unwrap()
8209    }
8210
8211    pub fn divide_tensor_mode(&self, other: &Tensor, rounding_mode: &str) -> Tensor {
8212        self.f_divide_tensor_mode(other, rounding_mode).unwrap()
8213    }
8214
8215    pub fn divide_tensor_mode_(&mut self, other: &Tensor, rounding_mode: &str) -> Tensor {
8216        self.f_divide_tensor_mode_(other, rounding_mode).unwrap()
8217    }
8218
8219    pub fn dot(&self, tensor: &Tensor) -> Tensor {
8220        self.f_dot(tensor).unwrap()
8221    }
8222
8223    pub fn dot_out(&self, out: &Tensor, tensor: &Tensor) -> Tensor {
8224        self.f_dot_out(out, tensor).unwrap()
8225    }
8226
8227    pub fn dropout(&self, p: f64, train: bool) -> Tensor {
8228        self.f_dropout(p, train).unwrap()
8229    }
8230
8231    pub fn dropout_(&mut self, p: f64, train: bool) -> Tensor {
8232        self.f_dropout_(p, train).unwrap()
8233    }
8234
8235    pub fn dsplit(&self, sections: i64) -> Vec<Tensor> {
8236        self.f_dsplit(sections).unwrap()
8237    }
8238
8239    pub fn dsplit_array(&self, indices: impl IntList) -> Vec<Tensor> {
8240        self.f_dsplit_array(indices).unwrap()
8241    }
8242
8243    pub fn dstack<T: Borrow<Tensor>>(tensors: &[T]) -> Tensor {
8244        Tensor::f_dstack(tensors).unwrap()
8245    }
8246
8247    pub fn dstack_out<T: Borrow<Tensor>>(out: &Tensor, tensors: &[T]) -> Tensor {
8248        Tensor::f_dstack_out(out, tensors).unwrap()
8249    }
8250
8251    pub fn einsum<T: Borrow<Tensor>>(
8252        equation: &str,
8253        tensors: &[T],
8254        path: impl IntListOption,
8255    ) -> Tensor {
8256        Tensor::f_einsum(equation, tensors, path).unwrap()
8257    }
8258
8259    pub fn elu(&self) -> Tensor {
8260        self.f_elu().unwrap()
8261    }
8262
8263    pub fn elu_(&mut self) -> Tensor {
8264        self.f_elu_().unwrap()
8265    }
8266
8267    pub fn elu_backward<S: Into<Scalar>>(
8268        grad_output: &Tensor,
8269        alpha: S,
8270        scale: S,
8271        input_scale: S,
8272        is_result: bool,
8273        self_or_result: &Tensor,
8274    ) -> Tensor {
8275        Tensor::f_elu_backward(grad_output, alpha, scale, input_scale, is_result, self_or_result)
8276            .unwrap()
8277    }
8278
8279    pub fn elu_backward_grad_input<S: Into<Scalar>>(
8280        grad_input: &Tensor,
8281        grad_output: &Tensor,
8282        alpha: S,
8283        scale: S,
8284        input_scale: S,
8285        is_result: bool,
8286        self_or_result: &Tensor,
8287    ) -> Tensor {
8288        Tensor::f_elu_backward_grad_input(
8289            grad_input,
8290            grad_output,
8291            alpha,
8292            scale,
8293            input_scale,
8294            is_result,
8295            self_or_result,
8296        )
8297        .unwrap()
8298    }
8299
8300    pub fn elu_out(&self, out: &Tensor) -> Tensor {
8301        self.f_elu_out(out).unwrap()
8302    }
8303
8304    pub fn embedding(
8305        weight: &Tensor,
8306        indices: &Tensor,
8307        padding_idx: i64,
8308        scale_grad_by_freq: bool,
8309        sparse: bool,
8310    ) -> Tensor {
8311        Tensor::f_embedding(weight, indices, padding_idx, scale_grad_by_freq, sparse).unwrap()
8312    }
8313
8314    pub fn embedding_backward(
8315        grad: &Tensor,
8316        indices: &Tensor,
8317        num_weights: i64,
8318        padding_idx: i64,
8319        scale_grad_by_freq: bool,
8320        sparse: bool,
8321    ) -> Tensor {
8322        Tensor::f_embedding_backward(
8323            grad,
8324            indices,
8325            num_weights,
8326            padding_idx,
8327            scale_grad_by_freq,
8328            sparse,
8329        )
8330        .unwrap()
8331    }
8332
8333    pub fn embedding_bag<T: Borrow<Tensor>>(
8334        weight: &Tensor,
8335        indices: &Tensor,
8336        offsets: &Tensor,
8337        scale_grad_by_freq: bool,
8338        mode: i64,
8339        sparse: bool,
8340        per_sample_weights: Option<T>,
8341        include_last_offset: bool,
8342    ) -> (Tensor, Tensor, Tensor, Tensor) {
8343        Tensor::f_embedding_bag(
8344            weight,
8345            indices,
8346            offsets,
8347            scale_grad_by_freq,
8348            mode,
8349            sparse,
8350            per_sample_weights,
8351            include_last_offset,
8352        )
8353        .unwrap()
8354    }
8355
8356    pub fn embedding_bag_padding_idx<T: Borrow<Tensor>>(
8357        weight: &Tensor,
8358        indices: &Tensor,
8359        offsets: &Tensor,
8360        scale_grad_by_freq: bool,
8361        mode: i64,
8362        sparse: bool,
8363        per_sample_weights: Option<T>,
8364        include_last_offset: bool,
8365        padding_idx: impl Into<Option<i64>>,
8366    ) -> (Tensor, Tensor, Tensor, Tensor) {
8367        Tensor::f_embedding_bag_padding_idx(
8368            weight,
8369            indices,
8370            offsets,
8371            scale_grad_by_freq,
8372            mode,
8373            sparse,
8374            per_sample_weights,
8375            include_last_offset,
8376            padding_idx,
8377        )
8378        .unwrap()
8379    }
8380
8381    pub fn embedding_dense_backward(
8382        grad_output: &Tensor,
8383        indices: &Tensor,
8384        num_weights: i64,
8385        padding_idx: i64,
8386        scale_grad_by_freq: bool,
8387    ) -> Tensor {
8388        Tensor::f_embedding_dense_backward(
8389            grad_output,
8390            indices,
8391            num_weights,
8392            padding_idx,
8393            scale_grad_by_freq,
8394        )
8395        .unwrap()
8396    }
8397
8398    pub fn embedding_dense_backward_out(
8399        out: &Tensor,
8400        grad_output: &Tensor,
8401        indices: &Tensor,
8402        num_weights: i64,
8403        padding_idx: i64,
8404        scale_grad_by_freq: bool,
8405    ) -> Tensor {
8406        Tensor::f_embedding_dense_backward_out(
8407            out,
8408            grad_output,
8409            indices,
8410            num_weights,
8411            padding_idx,
8412            scale_grad_by_freq,
8413        )
8414        .unwrap()
8415    }
8416
8417    pub fn embedding_out(
8418        out: &Tensor,
8419        weight: &Tensor,
8420        indices: &Tensor,
8421        padding_idx: i64,
8422        scale_grad_by_freq: bool,
8423        sparse: bool,
8424    ) -> Tensor {
8425        Tensor::f_embedding_out(out, weight, indices, padding_idx, scale_grad_by_freq, sparse)
8426            .unwrap()
8427    }
8428
8429    pub fn embedding_renorm(&self, indices: &Tensor, max_norm: f64, norm_type: f64) -> Tensor {
8430        self.f_embedding_renorm(indices, max_norm, norm_type).unwrap()
8431    }
8432
8433    pub fn embedding_renorm_(&mut self, indices: &Tensor, max_norm: f64, norm_type: f64) -> Tensor {
8434        self.f_embedding_renorm_(indices, max_norm, norm_type).unwrap()
8435    }
8436
8437    pub fn embedding_renorm_out(
8438        &self,
8439        out: &Tensor,
8440        indices: &Tensor,
8441        max_norm: f64,
8442        norm_type: f64,
8443    ) -> Tensor {
8444        self.f_embedding_renorm_out(out, indices, max_norm, norm_type).unwrap()
8445    }
8446
8447    pub fn embedding_sparse_backward(
8448        grad: &Tensor,
8449        indices: &Tensor,
8450        num_weights: i64,
8451        padding_idx: i64,
8452        scale_grad_by_freq: bool,
8453    ) -> Tensor {
8454        Tensor::f_embedding_sparse_backward(
8455            grad,
8456            indices,
8457            num_weights,
8458            padding_idx,
8459            scale_grad_by_freq,
8460        )
8461        .unwrap()
8462    }
8463
8464    pub fn empty(size: impl IntList, options: (Kind, Device)) -> Tensor {
8465        Tensor::f_empty(size, options).unwrap()
8466    }
8467
8468    pub fn empty_like(&self) -> Tensor {
8469        self.f_empty_like().unwrap()
8470    }
8471
8472    pub fn empty_like_out(&self, out: &Tensor) -> Tensor {
8473        self.f_empty_like_out(out).unwrap()
8474    }
8475
8476    pub fn empty_out(out: &Tensor, size: impl IntList) -> Tensor {
8477        Tensor::f_empty_out(out, size).unwrap()
8478    }
8479
8480    pub fn empty_permuted(
8481        size: impl IntList,
8482        physical_layout: impl IntList,
8483        options: (Kind, Device),
8484    ) -> Tensor {
8485        Tensor::f_empty_permuted(size, physical_layout, options).unwrap()
8486    }
8487
8488    pub fn empty_permuted_out(
8489        out: &Tensor,
8490        size: impl IntList,
8491        physical_layout: impl IntList,
8492    ) -> Tensor {
8493        Tensor::f_empty_permuted_out(out, size, physical_layout).unwrap()
8494    }
8495
8496    pub fn empty_quantized(
8497        size: impl IntList,
8498        qtensor: &Tensor,
8499        options: (Kind, Device),
8500    ) -> Tensor {
8501        Tensor::f_empty_quantized(size, qtensor, options).unwrap()
8502    }
8503
8504    pub fn empty_quantized_out(out: &Tensor, size: impl IntList, qtensor: &Tensor) -> Tensor {
8505        Tensor::f_empty_quantized_out(out, size, qtensor).unwrap()
8506    }
8507
8508    pub fn empty_strided(
8509        size: impl IntList,
8510        stride: impl IntList,
8511        options: (Kind, Device),
8512    ) -> Tensor {
8513        Tensor::f_empty_strided(size, stride, options).unwrap()
8514    }
8515
8516    pub fn empty_strided_out(out: &Tensor, size: impl IntList, stride: impl IntList) -> Tensor {
8517        Tensor::f_empty_strided_out(out, size, stride).unwrap()
8518    }
8519
8520    pub fn eq<S: Into<Scalar>>(&self, other: S) -> Tensor {
8521        self.f_eq(other).unwrap()
8522    }
8523
8524    pub fn eq_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
8525        self.f_eq_(other).unwrap()
8526    }
8527
8528    pub fn eq_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
8529        self.f_eq_scalar_out(out, other).unwrap()
8530    }
8531
8532    pub fn eq_tensor(&self, other: &Tensor) -> Tensor {
8533        self.f_eq_tensor(other).unwrap()
8534    }
8535
8536    pub fn eq_tensor_(&mut self, other: &Tensor) -> Tensor {
8537        self.f_eq_tensor_(other).unwrap()
8538    }
8539
8540    pub fn eq_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
8541        self.f_eq_tensor_out(out, other).unwrap()
8542    }
8543
8544    pub fn equal(&self, other: &Tensor) -> bool {
8545        self.f_equal(other).unwrap()
8546    }
8547
8548    pub fn erf(&self) -> Tensor {
8549        self.f_erf().unwrap()
8550    }
8551
8552    pub fn erf_(&mut self) -> Tensor {
8553        self.f_erf_().unwrap()
8554    }
8555
8556    pub fn erf_out(&self, out: &Tensor) -> Tensor {
8557        self.f_erf_out(out).unwrap()
8558    }
8559
8560    pub fn erfc(&self) -> Tensor {
8561        self.f_erfc().unwrap()
8562    }
8563
8564    pub fn erfc_(&mut self) -> Tensor {
8565        self.f_erfc_().unwrap()
8566    }
8567
8568    pub fn erfc_out(&self, out: &Tensor) -> Tensor {
8569        self.f_erfc_out(out).unwrap()
8570    }
8571
8572    pub fn erfinv(&self) -> Tensor {
8573        self.f_erfinv().unwrap()
8574    }
8575
8576    pub fn erfinv_(&mut self) -> Tensor {
8577        self.f_erfinv_().unwrap()
8578    }
8579
8580    pub fn erfinv_out(&self, out: &Tensor) -> Tensor {
8581        self.f_erfinv_out(out).unwrap()
8582    }
8583
8584    pub fn exp(&self) -> Tensor {
8585        self.f_exp().unwrap()
8586    }
8587
8588    pub fn exp2(&self) -> Tensor {
8589        self.f_exp2().unwrap()
8590    }
8591
8592    pub fn exp2_(&mut self) -> Tensor {
8593        self.f_exp2_().unwrap()
8594    }
8595
8596    pub fn exp2_out(&self, out: &Tensor) -> Tensor {
8597        self.f_exp2_out(out).unwrap()
8598    }
8599
8600    pub fn exp_(&mut self) -> Tensor {
8601        self.f_exp_().unwrap()
8602    }
8603
8604    pub fn exp_out(&self, out: &Tensor) -> Tensor {
8605        self.f_exp_out(out).unwrap()
8606    }
8607
8608    pub fn expand(&self, size: impl IntList, implicit: bool) -> Tensor {
8609        self.f_expand(size, implicit).unwrap()
8610    }
8611
8612    pub fn expand_as(&self, other: &Tensor) -> Tensor {
8613        self.f_expand_as(other).unwrap()
8614    }
8615
8616    pub fn expand_copy(&self, size: impl IntList, implicit: bool) -> Tensor {
8617        self.f_expand_copy(size, implicit).unwrap()
8618    }
8619
8620    pub fn expand_copy_out(&self, out: &Tensor, size: impl IntList, implicit: bool) -> Tensor {
8621        self.f_expand_copy_out(out, size, implicit).unwrap()
8622    }
8623
8624    pub fn expm1(&self) -> Tensor {
8625        self.f_expm1().unwrap()
8626    }
8627
8628    pub fn expm1_(&mut self) -> Tensor {
8629        self.f_expm1_().unwrap()
8630    }
8631
8632    pub fn expm1_out(&self, out: &Tensor) -> Tensor {
8633        self.f_expm1_out(out).unwrap()
8634    }
8635
8636    pub fn exponential(&self, lambd: f64) -> Tensor {
8637        self.f_exponential(lambd).unwrap()
8638    }
8639
8640    pub fn exponential_(&mut self, lambd: f64) -> Tensor {
8641        self.f_exponential_(lambd).unwrap()
8642    }
8643
8644    pub fn exponential_out(&self, out: &Tensor, lambd: f64) -> Tensor {
8645        self.f_exponential_out(out, lambd).unwrap()
8646    }
8647
8648    pub fn eye(n: i64, options: (Kind, Device)) -> Tensor {
8649        Tensor::f_eye(n, options).unwrap()
8650    }
8651
8652    pub fn eye_m(n: i64, m: i64, options: (Kind, Device)) -> Tensor {
8653        Tensor::f_eye_m(n, m, options).unwrap()
8654    }
8655
8656    pub fn eye_m_out(out: &Tensor, n: i64, m: i64) -> Tensor {
8657        Tensor::f_eye_m_out(out, n, m).unwrap()
8658    }
8659
8660    pub fn eye_out(out: &Tensor, n: i64) -> Tensor {
8661        Tensor::f_eye_out(out, n).unwrap()
8662    }
8663
8664    pub fn fake_quantize_per_channel_affine(
8665        &self,
8666        scale: &Tensor,
8667        zero_point: &Tensor,
8668        axis: i64,
8669        quant_min: i64,
8670        quant_max: i64,
8671    ) -> Tensor {
8672        self.f_fake_quantize_per_channel_affine(scale, zero_point, axis, quant_min, quant_max)
8673            .unwrap()
8674    }
8675
8676    pub fn fake_quantize_per_channel_affine_cachemask(
8677        &self,
8678        scale: &Tensor,
8679        zero_point: &Tensor,
8680        axis: i64,
8681        quant_min: i64,
8682        quant_max: i64,
8683    ) -> (Tensor, Tensor) {
8684        self.f_fake_quantize_per_channel_affine_cachemask(
8685            scale, zero_point, axis, quant_min, quant_max,
8686        )
8687        .unwrap()
8688    }
8689
8690    pub fn fake_quantize_per_channel_affine_cachemask_backward(
8691        grad: &Tensor,
8692        mask: &Tensor,
8693    ) -> Tensor {
8694        Tensor::f_fake_quantize_per_channel_affine_cachemask_backward(grad, mask).unwrap()
8695    }
8696
8697    pub fn fake_quantize_per_channel_affine_cachemask_out(
8698        &self,
8699        out0: &Tensor,
8700        out1: &Tensor,
8701        scale: &Tensor,
8702        zero_point: &Tensor,
8703        axis: i64,
8704        quant_min: i64,
8705        quant_max: i64,
8706    ) -> (Tensor, Tensor) {
8707        self.f_fake_quantize_per_channel_affine_cachemask_out(
8708            out0, out1, scale, zero_point, axis, quant_min, quant_max,
8709        )
8710        .unwrap()
8711    }
8712
8713    pub fn fake_quantize_per_tensor_affine(
8714        &self,
8715        scale: f64,
8716        zero_point: i64,
8717        quant_min: i64,
8718        quant_max: i64,
8719    ) -> Tensor {
8720        self.f_fake_quantize_per_tensor_affine(scale, zero_point, quant_min, quant_max).unwrap()
8721    }
8722
8723    pub fn fake_quantize_per_tensor_affine_cachemask(
8724        &self,
8725        scale: f64,
8726        zero_point: i64,
8727        quant_min: i64,
8728        quant_max: i64,
8729    ) -> (Tensor, Tensor) {
8730        self.f_fake_quantize_per_tensor_affine_cachemask(scale, zero_point, quant_min, quant_max)
8731            .unwrap()
8732    }
8733
8734    pub fn fake_quantize_per_tensor_affine_cachemask_backward(
8735        grad: &Tensor,
8736        mask: &Tensor,
8737    ) -> Tensor {
8738        Tensor::f_fake_quantize_per_tensor_affine_cachemask_backward(grad, mask).unwrap()
8739    }
8740
8741    pub fn fake_quantize_per_tensor_affine_cachemask_out(
8742        &self,
8743        out0: &Tensor,
8744        out1: &Tensor,
8745        scale: f64,
8746        zero_point: i64,
8747        quant_min: i64,
8748        quant_max: i64,
8749    ) -> (Tensor, Tensor) {
8750        self.f_fake_quantize_per_tensor_affine_cachemask_out(
8751            out0, out1, scale, zero_point, quant_min, quant_max,
8752        )
8753        .unwrap()
8754    }
8755
8756    pub fn fake_quantize_per_tensor_affine_tensor_qparams(
8757        &self,
8758        scale: &Tensor,
8759        zero_point: &Tensor,
8760        quant_min: i64,
8761        quant_max: i64,
8762    ) -> Tensor {
8763        self.f_fake_quantize_per_tensor_affine_tensor_qparams(
8764            scale, zero_point, quant_min, quant_max,
8765        )
8766        .unwrap()
8767    }
8768
8769    pub fn fbgemm_linear_fp16_weight(&self, packed_weight: &Tensor, bias: &Tensor) -> Tensor {
8770        self.f_fbgemm_linear_fp16_weight(packed_weight, bias).unwrap()
8771    }
8772
8773    pub fn fbgemm_linear_fp16_weight_fp32_activation(
8774        &self,
8775        packed_weight: &Tensor,
8776        bias: &Tensor,
8777    ) -> Tensor {
8778        self.f_fbgemm_linear_fp16_weight_fp32_activation(packed_weight, bias).unwrap()
8779    }
8780
8781    pub fn fbgemm_linear_int8_weight<S: Into<Scalar>>(
8782        &self,
8783        weight: &Tensor,
8784        packed: &Tensor,
8785        col_offsets: &Tensor,
8786        weight_scale: S,
8787        weight_zero_point: S,
8788        bias: &Tensor,
8789    ) -> Tensor {
8790        self.f_fbgemm_linear_int8_weight(
8791            weight,
8792            packed,
8793            col_offsets,
8794            weight_scale,
8795            weight_zero_point,
8796            bias,
8797        )
8798        .unwrap()
8799    }
8800
8801    pub fn fbgemm_linear_int8_weight_fp32_activation<S: Into<Scalar>>(
8802        &self,
8803        weight: &Tensor,
8804        packed: &Tensor,
8805        col_offsets: &Tensor,
8806        weight_scale: S,
8807        weight_zero_point: S,
8808        bias: &Tensor,
8809    ) -> Tensor {
8810        self.f_fbgemm_linear_int8_weight_fp32_activation(
8811            weight,
8812            packed,
8813            col_offsets,
8814            weight_scale,
8815            weight_zero_point,
8816            bias,
8817        )
8818        .unwrap()
8819    }
8820
8821    pub fn fbgemm_pack_gemm_matrix_fp16(&self) -> Tensor {
8822        self.f_fbgemm_pack_gemm_matrix_fp16().unwrap()
8823    }
8824
8825    pub fn fbgemm_pack_quantized_matrix(&self) -> Tensor {
8826        self.f_fbgemm_pack_quantized_matrix().unwrap()
8827    }
8828
8829    pub fn fbgemm_pack_quantized_matrix_kn(&self, k: i64, n: i64) -> Tensor {
8830        self.f_fbgemm_pack_quantized_matrix_kn(k, n).unwrap()
8831    }
8832
8833    pub fn feature_alpha_dropout(&self, p: f64, train: bool) -> Tensor {
8834        self.f_feature_alpha_dropout(p, train).unwrap()
8835    }
8836
8837    pub fn feature_alpha_dropout_(&mut self, p: f64, train: bool) -> Tensor {
8838        self.f_feature_alpha_dropout_(p, train).unwrap()
8839    }
8840
8841    pub fn feature_dropout(&self, p: f64, train: bool) -> Tensor {
8842        self.f_feature_dropout(p, train).unwrap()
8843    }
8844
8845    pub fn feature_dropout_(&mut self, p: f64, train: bool) -> Tensor {
8846        self.f_feature_dropout_(p, train).unwrap()
8847    }
8848
8849    pub fn fft_fft(&self, n: impl Into<Option<i64>>, dim: i64, norm: &str) -> Tensor {
8850        self.f_fft_fft(n, dim, norm).unwrap()
8851    }
8852
8853    pub fn fft_fft2(&self, s: impl IntListOption, dim: impl IntList, norm: &str) -> Tensor {
8854        self.f_fft_fft2(s, dim, norm).unwrap()
8855    }
8856
8857    pub fn fft_fft2_out(
8858        &self,
8859        out: &Tensor,
8860        s: impl IntListOption,
8861        dim: impl IntList,
8862        norm: &str,
8863    ) -> Tensor {
8864        self.f_fft_fft2_out(out, s, dim, norm).unwrap()
8865    }
8866
8867    pub fn fft_fft_out(
8868        &self,
8869        out: &Tensor,
8870        n: impl Into<Option<i64>>,
8871        dim: i64,
8872        norm: &str,
8873    ) -> Tensor {
8874        self.f_fft_fft_out(out, n, dim, norm).unwrap()
8875    }
8876
8877    pub fn fft_fftfreq(n: i64, d: f64, options: (Kind, Device)) -> Tensor {
8878        Tensor::f_fft_fftfreq(n, d, options).unwrap()
8879    }
8880
8881    pub fn fft_fftfreq_out(out: &Tensor, n: i64, d: f64) -> Tensor {
8882        Tensor::f_fft_fftfreq_out(out, n, d).unwrap()
8883    }
8884
8885    pub fn fft_fftn(&self, s: impl IntListOption, dim: impl IntListOption, norm: &str) -> Tensor {
8886        self.f_fft_fftn(s, dim, norm).unwrap()
8887    }
8888
8889    pub fn fft_fftn_out(
8890        &self,
8891        out: &Tensor,
8892        s: impl IntListOption,
8893        dim: impl IntListOption,
8894        norm: &str,
8895    ) -> Tensor {
8896        self.f_fft_fftn_out(out, s, dim, norm).unwrap()
8897    }
8898
8899    pub fn fft_fftshift(&self, dim: impl IntListOption) -> Tensor {
8900        self.f_fft_fftshift(dim).unwrap()
8901    }
8902
8903    pub fn fft_hfft(&self, n: impl Into<Option<i64>>, dim: i64, norm: &str) -> Tensor {
8904        self.f_fft_hfft(n, dim, norm).unwrap()
8905    }
8906
8907    pub fn fft_hfft2(&self, s: impl IntListOption, dim: impl IntList, norm: &str) -> Tensor {
8908        self.f_fft_hfft2(s, dim, norm).unwrap()
8909    }
8910
8911    pub fn fft_hfft2_out(
8912        &self,
8913        out: &Tensor,
8914        s: impl IntListOption,
8915        dim: impl IntList,
8916        norm: &str,
8917    ) -> Tensor {
8918        self.f_fft_hfft2_out(out, s, dim, norm).unwrap()
8919    }
8920
8921    pub fn fft_hfft_out(
8922        &self,
8923        out: &Tensor,
8924        n: impl Into<Option<i64>>,
8925        dim: i64,
8926        norm: &str,
8927    ) -> Tensor {
8928        self.f_fft_hfft_out(out, n, dim, norm).unwrap()
8929    }
8930
8931    pub fn fft_hfftn(&self, s: impl IntListOption, dim: impl IntListOption, norm: &str) -> Tensor {
8932        self.f_fft_hfftn(s, dim, norm).unwrap()
8933    }
8934
8935    pub fn fft_hfftn_out(
8936        &self,
8937        out: &Tensor,
8938        s: impl IntListOption,
8939        dim: impl IntListOption,
8940        norm: &str,
8941    ) -> Tensor {
8942        self.f_fft_hfftn_out(out, s, dim, norm).unwrap()
8943    }
8944
8945    pub fn fft_ifft(&self, n: impl Into<Option<i64>>, dim: i64, norm: &str) -> Tensor {
8946        self.f_fft_ifft(n, dim, norm).unwrap()
8947    }
8948
8949    pub fn fft_ifft2(&self, s: impl IntListOption, dim: impl IntList, norm: &str) -> Tensor {
8950        self.f_fft_ifft2(s, dim, norm).unwrap()
8951    }
8952
8953    pub fn fft_ifft2_out(
8954        &self,
8955        out: &Tensor,
8956        s: impl IntListOption,
8957        dim: impl IntList,
8958        norm: &str,
8959    ) -> Tensor {
8960        self.f_fft_ifft2_out(out, s, dim, norm).unwrap()
8961    }
8962
8963    pub fn fft_ifft_out(
8964        &self,
8965        out: &Tensor,
8966        n: impl Into<Option<i64>>,
8967        dim: i64,
8968        norm: &str,
8969    ) -> Tensor {
8970        self.f_fft_ifft_out(out, n, dim, norm).unwrap()
8971    }
8972
8973    pub fn fft_ifftn(&self, s: impl IntListOption, dim: impl IntListOption, norm: &str) -> Tensor {
8974        self.f_fft_ifftn(s, dim, norm).unwrap()
8975    }
8976
8977    pub fn fft_ifftn_out(
8978        &self,
8979        out: &Tensor,
8980        s: impl IntListOption,
8981        dim: impl IntListOption,
8982        norm: &str,
8983    ) -> Tensor {
8984        self.f_fft_ifftn_out(out, s, dim, norm).unwrap()
8985    }
8986
8987    pub fn fft_ifftshift(&self, dim: impl IntListOption) -> Tensor {
8988        self.f_fft_ifftshift(dim).unwrap()
8989    }
8990
8991    pub fn fft_ihfft(&self, n: impl Into<Option<i64>>, dim: i64, norm: &str) -> Tensor {
8992        self.f_fft_ihfft(n, dim, norm).unwrap()
8993    }
8994
8995    pub fn fft_ihfft2(&self, s: impl IntListOption, dim: impl IntList, norm: &str) -> Tensor {
8996        self.f_fft_ihfft2(s, dim, norm).unwrap()
8997    }
8998
8999    pub fn fft_ihfft2_out(
9000        &self,
9001        out: &Tensor,
9002        s: impl IntListOption,
9003        dim: impl IntList,
9004        norm: &str,
9005    ) -> Tensor {
9006        self.f_fft_ihfft2_out(out, s, dim, norm).unwrap()
9007    }
9008
9009    pub fn fft_ihfft_out(
9010        &self,
9011        out: &Tensor,
9012        n: impl Into<Option<i64>>,
9013        dim: i64,
9014        norm: &str,
9015    ) -> Tensor {
9016        self.f_fft_ihfft_out(out, n, dim, norm).unwrap()
9017    }
9018
9019    pub fn fft_ihfftn(&self, s: impl IntListOption, dim: impl IntListOption, norm: &str) -> Tensor {
9020        self.f_fft_ihfftn(s, dim, norm).unwrap()
9021    }
9022
9023    pub fn fft_ihfftn_out(
9024        &self,
9025        out: &Tensor,
9026        s: impl IntListOption,
9027        dim: impl IntListOption,
9028        norm: &str,
9029    ) -> Tensor {
9030        self.f_fft_ihfftn_out(out, s, dim, norm).unwrap()
9031    }
9032
9033    pub fn fft_irfft(&self, n: impl Into<Option<i64>>, dim: i64, norm: &str) -> Tensor {
9034        self.f_fft_irfft(n, dim, norm).unwrap()
9035    }
9036
9037    pub fn fft_irfft2(&self, s: impl IntListOption, dim: impl IntList, norm: &str) -> Tensor {
9038        self.f_fft_irfft2(s, dim, norm).unwrap()
9039    }
9040
9041    pub fn fft_irfft2_out(
9042        &self,
9043        out: &Tensor,
9044        s: impl IntListOption,
9045        dim: impl IntList,
9046        norm: &str,
9047    ) -> Tensor {
9048        self.f_fft_irfft2_out(out, s, dim, norm).unwrap()
9049    }
9050
9051    pub fn fft_irfft_out(
9052        &self,
9053        out: &Tensor,
9054        n: impl Into<Option<i64>>,
9055        dim: i64,
9056        norm: &str,
9057    ) -> Tensor {
9058        self.f_fft_irfft_out(out, n, dim, norm).unwrap()
9059    }
9060
9061    pub fn fft_irfftn(&self, s: impl IntListOption, dim: impl IntListOption, norm: &str) -> Tensor {
9062        self.f_fft_irfftn(s, dim, norm).unwrap()
9063    }
9064
9065    pub fn fft_irfftn_out(
9066        &self,
9067        out: &Tensor,
9068        s: impl IntListOption,
9069        dim: impl IntListOption,
9070        norm: &str,
9071    ) -> Tensor {
9072        self.f_fft_irfftn_out(out, s, dim, norm).unwrap()
9073    }
9074
9075    pub fn fft_rfft(&self, n: impl Into<Option<i64>>, dim: i64, norm: &str) -> Tensor {
9076        self.f_fft_rfft(n, dim, norm).unwrap()
9077    }
9078
9079    pub fn fft_rfft2(&self, s: impl IntListOption, dim: impl IntList, norm: &str) -> Tensor {
9080        self.f_fft_rfft2(s, dim, norm).unwrap()
9081    }
9082
9083    pub fn fft_rfft2_out(
9084        &self,
9085        out: &Tensor,
9086        s: impl IntListOption,
9087        dim: impl IntList,
9088        norm: &str,
9089    ) -> Tensor {
9090        self.f_fft_rfft2_out(out, s, dim, norm).unwrap()
9091    }
9092
9093    pub fn fft_rfft_out(
9094        &self,
9095        out: &Tensor,
9096        n: impl Into<Option<i64>>,
9097        dim: i64,
9098        norm: &str,
9099    ) -> Tensor {
9100        self.f_fft_rfft_out(out, n, dim, norm).unwrap()
9101    }
9102
9103    pub fn fft_rfftfreq(n: i64, d: f64, options: (Kind, Device)) -> Tensor {
9104        Tensor::f_fft_rfftfreq(n, d, options).unwrap()
9105    }
9106
9107    pub fn fft_rfftfreq_out(out: &Tensor, n: i64, d: f64) -> Tensor {
9108        Tensor::f_fft_rfftfreq_out(out, n, d).unwrap()
9109    }
9110
9111    pub fn fft_rfftn(&self, s: impl IntListOption, dim: impl IntListOption, norm: &str) -> Tensor {
9112        self.f_fft_rfftn(s, dim, norm).unwrap()
9113    }
9114
9115    pub fn fft_rfftn_out(
9116        &self,
9117        out: &Tensor,
9118        s: impl IntListOption,
9119        dim: impl IntListOption,
9120        norm: &str,
9121    ) -> Tensor {
9122        self.f_fft_rfftn_out(out, s, dim, norm).unwrap()
9123    }
9124
9125    pub fn fill<S: Into<Scalar>>(&self, value: S) -> Tensor {
9126        self.f_fill(value).unwrap()
9127    }
9128
9129    pub fn fill_<S: Into<Scalar>>(&mut self, value: S) -> Tensor {
9130        self.f_fill_(value).unwrap()
9131    }
9132
9133    pub fn fill_diagonal_<S: Into<Scalar>>(&mut self, fill_value: S, wrap: bool) -> Tensor {
9134        self.f_fill_diagonal_(fill_value, wrap).unwrap()
9135    }
9136
9137    pub fn fill_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, value: S) -> Tensor {
9138        self.f_fill_scalar_out(out, value).unwrap()
9139    }
9140
9141    pub fn fill_tensor(&self, value: &Tensor) -> Tensor {
9142        self.f_fill_tensor(value).unwrap()
9143    }
9144
9145    pub fn fill_tensor_(&mut self, value: &Tensor) -> Tensor {
9146        self.f_fill_tensor_(value).unwrap()
9147    }
9148
9149    pub fn fill_tensor_out(&self, out: &Tensor, value: &Tensor) -> Tensor {
9150        self.f_fill_tensor_out(out, value).unwrap()
9151    }
9152
9153    pub fn fix(&self) -> Tensor {
9154        self.f_fix().unwrap()
9155    }
9156
9157    pub fn fix_(&mut self) -> Tensor {
9158        self.f_fix_().unwrap()
9159    }
9160
9161    pub fn fix_out(&self, out: &Tensor) -> Tensor {
9162        self.f_fix_out(out).unwrap()
9163    }
9164
9165    pub fn flatten(&self, start_dim: i64, end_dim: i64) -> Tensor {
9166        self.f_flatten(start_dim, end_dim).unwrap()
9167    }
9168
9169    pub fn flatten_dense_tensors<T: Borrow<Tensor>>(tensors: &[T]) -> Tensor {
9170        Tensor::f_flatten_dense_tensors(tensors).unwrap()
9171    }
9172
9173    pub fn flip(&self, dims: impl IntList) -> Tensor {
9174        self.f_flip(dims).unwrap()
9175    }
9176
9177    pub fn flip_out(&self, out: &Tensor, dims: impl IntList) -> Tensor {
9178        self.f_flip_out(out, dims).unwrap()
9179    }
9180
9181    pub fn fliplr(&self) -> Tensor {
9182        self.f_fliplr().unwrap()
9183    }
9184
9185    pub fn flipud(&self) -> Tensor {
9186        self.f_flipud().unwrap()
9187    }
9188
9189    pub fn float_power(&self, exponent: &Tensor) -> Tensor {
9190        self.f_float_power(exponent).unwrap()
9191    }
9192
9193    pub fn float_power_<S: Into<Scalar>>(&mut self, exponent: S) -> Tensor {
9194        self.f_float_power_(exponent).unwrap()
9195    }
9196
9197    pub fn float_power_scalar<S: Into<Scalar>>(self_scalar: S, exponent: &Tensor) -> Tensor {
9198        Tensor::f_float_power_scalar(self_scalar, exponent).unwrap()
9199    }
9200
9201    pub fn float_power_scalar_out<S: Into<Scalar>>(
9202        out: &Tensor,
9203        self_scalar: S,
9204        exponent: &Tensor,
9205    ) -> Tensor {
9206        Tensor::f_float_power_scalar_out(out, self_scalar, exponent).unwrap()
9207    }
9208
9209    pub fn float_power_tensor_(&mut self, exponent: &Tensor) -> Tensor {
9210        self.f_float_power_tensor_(exponent).unwrap()
9211    }
9212
9213    pub fn float_power_tensor_scalar<S: Into<Scalar>>(&self, exponent: S) -> Tensor {
9214        self.f_float_power_tensor_scalar(exponent).unwrap()
9215    }
9216
9217    pub fn float_power_tensor_scalar_out<S: Into<Scalar>>(
9218        &self,
9219        out: &Tensor,
9220        exponent: S,
9221    ) -> Tensor {
9222        self.f_float_power_tensor_scalar_out(out, exponent).unwrap()
9223    }
9224
9225    pub fn float_power_tensor_tensor_out(&self, out: &Tensor, exponent: &Tensor) -> Tensor {
9226        self.f_float_power_tensor_tensor_out(out, exponent).unwrap()
9227    }
9228
9229    pub fn floor(&self) -> Tensor {
9230        self.f_floor().unwrap()
9231    }
9232
9233    pub fn floor_(&mut self) -> Tensor {
9234        self.f_floor_().unwrap()
9235    }
9236
9237    pub fn floor_divide(&self, other: &Tensor) -> Tensor {
9238        self.f_floor_divide(other).unwrap()
9239    }
9240
9241    pub fn floor_divide_(&mut self, other: &Tensor) -> Tensor {
9242        self.f_floor_divide_(other).unwrap()
9243    }
9244
9245    pub fn floor_divide_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
9246        self.f_floor_divide_out(out, other).unwrap()
9247    }
9248
9249    pub fn floor_divide_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
9250        self.f_floor_divide_scalar(other).unwrap()
9251    }
9252
9253    pub fn floor_divide_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
9254        self.f_floor_divide_scalar_(other).unwrap()
9255    }
9256
9257    pub fn floor_divide_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
9258        self.f_floor_divide_scalar_out(out, other).unwrap()
9259    }
9260
9261    pub fn floor_out(&self, out: &Tensor) -> Tensor {
9262        self.f_floor_out(out).unwrap()
9263    }
9264
9265    pub fn fmax(&self, other: &Tensor) -> Tensor {
9266        self.f_fmax(other).unwrap()
9267    }
9268
9269    pub fn fmax_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
9270        self.f_fmax_out(out, other).unwrap()
9271    }
9272
9273    pub fn fmin(&self, other: &Tensor) -> Tensor {
9274        self.f_fmin(other).unwrap()
9275    }
9276
9277    pub fn fmin_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
9278        self.f_fmin_out(out, other).unwrap()
9279    }
9280
9281    pub fn fmod<S: Into<Scalar>>(&self, other: S) -> Tensor {
9282        self.f_fmod(other).unwrap()
9283    }
9284
9285    pub fn fmod_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
9286        self.f_fmod_(other).unwrap()
9287    }
9288
9289    pub fn fmod_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
9290        self.f_fmod_scalar_out(out, other).unwrap()
9291    }
9292
9293    pub fn fmod_tensor(&self, other: &Tensor) -> Tensor {
9294        self.f_fmod_tensor(other).unwrap()
9295    }
9296
9297    pub fn fmod_tensor_(&mut self, other: &Tensor) -> Tensor {
9298        self.f_fmod_tensor_(other).unwrap()
9299    }
9300
9301    pub fn fmod_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
9302        self.f_fmod_tensor_out(out, other).unwrap()
9303    }
9304
9305    pub fn frac(&self) -> Tensor {
9306        self.f_frac().unwrap()
9307    }
9308
9309    pub fn frac_(&mut self) -> Tensor {
9310        self.f_frac_().unwrap()
9311    }
9312
9313    pub fn frac_out(&self, out: &Tensor) -> Tensor {
9314        self.f_frac_out(out).unwrap()
9315    }
9316
9317    pub fn fractional_max_pool2d(
9318        &self,
9319        kernel_size: impl IntList,
9320        output_size: impl IntList,
9321        random_samples: &Tensor,
9322    ) -> (Tensor, Tensor) {
9323        self.f_fractional_max_pool2d(kernel_size, output_size, random_samples).unwrap()
9324    }
9325
9326    pub fn fractional_max_pool2d_backward(
9327        &self,
9328        grad_output: &Tensor,
9329        kernel_size: impl IntList,
9330        output_size: impl IntList,
9331        indices: &Tensor,
9332    ) -> Tensor {
9333        self.f_fractional_max_pool2d_backward(grad_output, kernel_size, output_size, indices)
9334            .unwrap()
9335    }
9336
9337    pub fn fractional_max_pool2d_backward_grad_input(
9338        &self,
9339        grad_input: &Tensor,
9340        grad_output: &Tensor,
9341        kernel_size: impl IntList,
9342        output_size: impl IntList,
9343        indices: &Tensor,
9344    ) -> Tensor {
9345        self.f_fractional_max_pool2d_backward_grad_input(
9346            grad_input,
9347            grad_output,
9348            kernel_size,
9349            output_size,
9350            indices,
9351        )
9352        .unwrap()
9353    }
9354
9355    pub fn fractional_max_pool2d_output(
9356        &self,
9357        output: &Tensor,
9358        indices: &Tensor,
9359        kernel_size: impl IntList,
9360        output_size: impl IntList,
9361        random_samples: &Tensor,
9362    ) -> (Tensor, Tensor) {
9363        self.f_fractional_max_pool2d_output(
9364            output,
9365            indices,
9366            kernel_size,
9367            output_size,
9368            random_samples,
9369        )
9370        .unwrap()
9371    }
9372
9373    pub fn fractional_max_pool3d(
9374        &self,
9375        kernel_size: impl IntList,
9376        output_size: impl IntList,
9377        random_samples: &Tensor,
9378    ) -> (Tensor, Tensor) {
9379        self.f_fractional_max_pool3d(kernel_size, output_size, random_samples).unwrap()
9380    }
9381
9382    pub fn fractional_max_pool3d_backward(
9383        &self,
9384        grad_output: &Tensor,
9385        kernel_size: impl IntList,
9386        output_size: impl IntList,
9387        indices: &Tensor,
9388    ) -> Tensor {
9389        self.f_fractional_max_pool3d_backward(grad_output, kernel_size, output_size, indices)
9390            .unwrap()
9391    }
9392
9393    pub fn fractional_max_pool3d_backward_grad_input(
9394        &self,
9395        grad_input: &Tensor,
9396        grad_output: &Tensor,
9397        kernel_size: impl IntList,
9398        output_size: impl IntList,
9399        indices: &Tensor,
9400    ) -> Tensor {
9401        self.f_fractional_max_pool3d_backward_grad_input(
9402            grad_input,
9403            grad_output,
9404            kernel_size,
9405            output_size,
9406            indices,
9407        )
9408        .unwrap()
9409    }
9410
9411    pub fn fractional_max_pool3d_output(
9412        &self,
9413        output: &Tensor,
9414        indices: &Tensor,
9415        kernel_size: impl IntList,
9416        output_size: impl IntList,
9417        random_samples: &Tensor,
9418    ) -> (Tensor, Tensor) {
9419        self.f_fractional_max_pool3d_output(
9420            output,
9421            indices,
9422            kernel_size,
9423            output_size,
9424            random_samples,
9425        )
9426        .unwrap()
9427    }
9428
9429    pub fn frexp(&self) -> (Tensor, Tensor) {
9430        self.f_frexp().unwrap()
9431    }
9432
9433    pub fn frexp_tensor_out(&self, mantissa: &Tensor, exponent: &Tensor) -> (Tensor, Tensor) {
9434        self.f_frexp_tensor_out(mantissa, exponent).unwrap()
9435    }
9436
9437    pub fn frobenius_norm(&self, dim: impl IntList, keepdim: bool) -> Tensor {
9438        self.f_frobenius_norm(dim, keepdim).unwrap()
9439    }
9440
9441    pub fn frobenius_norm_out(&self, out: &Tensor, dim: impl IntList, keepdim: bool) -> Tensor {
9442        self.f_frobenius_norm_out(out, dim, keepdim).unwrap()
9443    }
9444
9445    pub fn from_file(
9446        filename: &str,
9447        shared: bool,
9448        size: impl Into<Option<i64>>,
9449        options: (Kind, Device),
9450    ) -> Tensor {
9451        Tensor::f_from_file(filename, shared, size, options).unwrap()
9452    }
9453
9454    pub fn from_file_out(
9455        out: &Tensor,
9456        filename: &str,
9457        shared: bool,
9458        size: impl Into<Option<i64>>,
9459    ) -> Tensor {
9460        Tensor::f_from_file_out(out, filename, shared, size).unwrap()
9461    }
9462
9463    pub fn full<S: Into<Scalar>>(
9464        size: impl IntList,
9465        fill_value: S,
9466        options: (Kind, Device),
9467    ) -> Tensor {
9468        Tensor::f_full(size, fill_value, options).unwrap()
9469    }
9470
9471    pub fn full_like<S: Into<Scalar>>(&self, fill_value: S) -> Tensor {
9472        self.f_full_like(fill_value).unwrap()
9473    }
9474
9475    pub fn full_like_out<S: Into<Scalar>>(&self, out: &Tensor, fill_value: S) -> Tensor {
9476        self.f_full_like_out(out, fill_value).unwrap()
9477    }
9478
9479    pub fn full_out<S: Into<Scalar>>(out: &Tensor, size: impl IntList, fill_value: S) -> Tensor {
9480        Tensor::f_full_out(out, size, fill_value).unwrap()
9481    }
9482
9483    pub fn fused_moving_avg_obs_fake_quant(
9484        &self,
9485        observer_on: &Tensor,
9486        fake_quant_on: &Tensor,
9487        running_min: &Tensor,
9488        running_max: &Tensor,
9489        scale: &Tensor,
9490        zero_point: &Tensor,
9491        averaging_const: f64,
9492        quant_min: i64,
9493        quant_max: i64,
9494        ch_axis: i64,
9495        per_row_fake_quant: bool,
9496        symmetric_quant: bool,
9497    ) -> Tensor {
9498        self.f_fused_moving_avg_obs_fake_quant(
9499            observer_on,
9500            fake_quant_on,
9501            running_min,
9502            running_max,
9503            scale,
9504            zero_point,
9505            averaging_const,
9506            quant_min,
9507            quant_max,
9508            ch_axis,
9509            per_row_fake_quant,
9510            symmetric_quant,
9511        )
9512        .unwrap()
9513    }
9514
9515    pub fn gather(&self, dim: i64, index: &Tensor, sparse_grad: bool) -> Tensor {
9516        self.f_gather(dim, index, sparse_grad).unwrap()
9517    }
9518
9519    pub fn gather_backward(
9520        &self,
9521        grad: &Tensor,
9522        dim: i64,
9523        index: &Tensor,
9524        sparse_grad: bool,
9525    ) -> Tensor {
9526        self.f_gather_backward(grad, dim, index, sparse_grad).unwrap()
9527    }
9528
9529    pub fn gather_out(&self, out: &Tensor, dim: i64, index: &Tensor, sparse_grad: bool) -> Tensor {
9530        self.f_gather_out(out, dim, index, sparse_grad).unwrap()
9531    }
9532
9533    pub fn gcd(&self, other: &Tensor) -> Tensor {
9534        self.f_gcd(other).unwrap()
9535    }
9536
9537    pub fn gcd_(&mut self, other: &Tensor) -> Tensor {
9538        self.f_gcd_(other).unwrap()
9539    }
9540
9541    pub fn gcd_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
9542        self.f_gcd_out(out, other).unwrap()
9543    }
9544
9545    pub fn ge<S: Into<Scalar>>(&self, other: S) -> Tensor {
9546        self.f_ge(other).unwrap()
9547    }
9548
9549    pub fn ge_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
9550        self.f_ge_(other).unwrap()
9551    }
9552
9553    pub fn ge_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
9554        self.f_ge_scalar_out(out, other).unwrap()
9555    }
9556
9557    pub fn ge_tensor(&self, other: &Tensor) -> Tensor {
9558        self.f_ge_tensor(other).unwrap()
9559    }
9560
9561    pub fn ge_tensor_(&mut self, other: &Tensor) -> Tensor {
9562        self.f_ge_tensor_(other).unwrap()
9563    }
9564
9565    pub fn ge_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
9566        self.f_ge_tensor_out(out, other).unwrap()
9567    }
9568
9569    pub fn gelu(&self, approximate: &str) -> Tensor {
9570        self.f_gelu(approximate).unwrap()
9571    }
9572
9573    pub fn gelu_(&mut self, approximate: &str) -> Tensor {
9574        self.f_gelu_(approximate).unwrap()
9575    }
9576
9577    pub fn gelu_backward(&self, grad_output: &Tensor, approximate: &str) -> Tensor {
9578        self.f_gelu_backward(grad_output, approximate).unwrap()
9579    }
9580
9581    pub fn gelu_backward_grad_input(
9582        &self,
9583        grad_input: &Tensor,
9584        grad_output: &Tensor,
9585        approximate: &str,
9586    ) -> Tensor {
9587        self.f_gelu_backward_grad_input(grad_input, grad_output, approximate).unwrap()
9588    }
9589
9590    pub fn gelu_out(&self, out: &Tensor, approximate: &str) -> Tensor {
9591        self.f_gelu_out(out, approximate).unwrap()
9592    }
9593
9594    pub fn geometric(&self, p: f64) -> Tensor {
9595        self.f_geometric(p).unwrap()
9596    }
9597
9598    pub fn geometric_(&mut self, p: f64) -> Tensor {
9599        self.f_geometric_(p).unwrap()
9600    }
9601
9602    pub fn geometric_out(&self, out: &Tensor, p: f64) -> Tensor {
9603        self.f_geometric_out(out, p).unwrap()
9604    }
9605
9606    pub fn geqrf(&self) -> (Tensor, Tensor) {
9607        self.f_geqrf().unwrap()
9608    }
9609
9610    pub fn geqrf_a(&self, a: &Tensor, tau: &Tensor) -> (Tensor, Tensor) {
9611        self.f_geqrf_a(a, tau).unwrap()
9612    }
9613
9614    pub fn ger(&self, vec2: &Tensor) -> Tensor {
9615        self.f_ger(vec2).unwrap()
9616    }
9617
9618    pub fn ger_out(&self, out: &Tensor, vec2: &Tensor) -> Tensor {
9619        self.f_ger_out(out, vec2).unwrap()
9620    }
9621
9622    pub fn glu(&self, dim: i64) -> Tensor {
9623        self.f_glu(dim).unwrap()
9624    }
9625
9626    pub fn glu_backward(&self, grad_output: &Tensor, dim: i64) -> Tensor {
9627        self.f_glu_backward(grad_output, dim).unwrap()
9628    }
9629
9630    pub fn glu_backward_grad_input(
9631        &self,
9632        grad_input: &Tensor,
9633        grad_output: &Tensor,
9634        dim: i64,
9635    ) -> Tensor {
9636        self.f_glu_backward_grad_input(grad_input, grad_output, dim).unwrap()
9637    }
9638
9639    pub fn glu_backward_jvp(
9640        grad_x: &Tensor,
9641        grad_glu: &Tensor,
9642        x: &Tensor,
9643        dgrad_glu: &Tensor,
9644        dx: &Tensor,
9645        dim: i64,
9646    ) -> Tensor {
9647        Tensor::f_glu_backward_jvp(grad_x, grad_glu, x, dgrad_glu, dx, dim).unwrap()
9648    }
9649
9650    pub fn glu_backward_jvp_out(
9651        out: &Tensor,
9652        grad_x: &Tensor,
9653        grad_glu: &Tensor,
9654        x: &Tensor,
9655        dgrad_glu: &Tensor,
9656        dx: &Tensor,
9657        dim: i64,
9658    ) -> Tensor {
9659        Tensor::f_glu_backward_jvp_out(out, grad_x, grad_glu, x, dgrad_glu, dx, dim).unwrap()
9660    }
9661
9662    pub fn glu_jvp(glu: &Tensor, x: &Tensor, dx: &Tensor, dim: i64) -> Tensor {
9663        Tensor::f_glu_jvp(glu, x, dx, dim).unwrap()
9664    }
9665
9666    pub fn glu_jvp_out(out: &Tensor, glu: &Tensor, x: &Tensor, dx: &Tensor, dim: i64) -> Tensor {
9667        Tensor::f_glu_jvp_out(out, glu, x, dx, dim).unwrap()
9668    }
9669
9670    pub fn glu_out(&self, out: &Tensor, dim: i64) -> Tensor {
9671        self.f_glu_out(out, dim).unwrap()
9672    }
9673
9674    pub fn grad(&self) -> Tensor {
9675        self.f_grad().unwrap()
9676    }
9677
9678    pub fn greater<S: Into<Scalar>>(&self, other: S) -> Tensor {
9679        self.f_greater(other).unwrap()
9680    }
9681
9682    pub fn greater_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
9683        self.f_greater_(other).unwrap()
9684    }
9685
9686    pub fn greater_equal<S: Into<Scalar>>(&self, other: S) -> Tensor {
9687        self.f_greater_equal(other).unwrap()
9688    }
9689
9690    pub fn greater_equal_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
9691        self.f_greater_equal_(other).unwrap()
9692    }
9693
9694    pub fn greater_equal_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
9695        self.f_greater_equal_scalar_out(out, other).unwrap()
9696    }
9697
9698    pub fn greater_equal_tensor(&self, other: &Tensor) -> Tensor {
9699        self.f_greater_equal_tensor(other).unwrap()
9700    }
9701
9702    pub fn greater_equal_tensor_(&mut self, other: &Tensor) -> Tensor {
9703        self.f_greater_equal_tensor_(other).unwrap()
9704    }
9705
9706    pub fn greater_equal_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
9707        self.f_greater_equal_tensor_out(out, other).unwrap()
9708    }
9709
9710    pub fn greater_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
9711        self.f_greater_scalar_out(out, other).unwrap()
9712    }
9713
9714    pub fn greater_tensor(&self, other: &Tensor) -> Tensor {
9715        self.f_greater_tensor(other).unwrap()
9716    }
9717
9718    pub fn greater_tensor_(&mut self, other: &Tensor) -> Tensor {
9719        self.f_greater_tensor_(other).unwrap()
9720    }
9721
9722    pub fn greater_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
9723        self.f_greater_tensor_out(out, other).unwrap()
9724    }
9725
9726    pub fn grid_sampler(
9727        &self,
9728        grid: &Tensor,
9729        interpolation_mode: i64,
9730        padding_mode: i64,
9731        align_corners: bool,
9732    ) -> Tensor {
9733        self.f_grid_sampler(grid, interpolation_mode, padding_mode, align_corners).unwrap()
9734    }
9735
9736    pub fn grid_sampler_2d(
9737        &self,
9738        grid: &Tensor,
9739        interpolation_mode: i64,
9740        padding_mode: i64,
9741        align_corners: bool,
9742    ) -> Tensor {
9743        self.f_grid_sampler_2d(grid, interpolation_mode, padding_mode, align_corners).unwrap()
9744    }
9745
9746    pub fn grid_sampler_2d_out(
9747        &self,
9748        out: &Tensor,
9749        grid: &Tensor,
9750        interpolation_mode: i64,
9751        padding_mode: i64,
9752        align_corners: bool,
9753    ) -> Tensor {
9754        self.f_grid_sampler_2d_out(out, grid, interpolation_mode, padding_mode, align_corners)
9755            .unwrap()
9756    }
9757
9758    pub fn grid_sampler_3d(
9759        &self,
9760        grid: &Tensor,
9761        interpolation_mode: i64,
9762        padding_mode: i64,
9763        align_corners: bool,
9764    ) -> Tensor {
9765        self.f_grid_sampler_3d(grid, interpolation_mode, padding_mode, align_corners).unwrap()
9766    }
9767
9768    pub fn grid_sampler_3d_out(
9769        &self,
9770        out: &Tensor,
9771        grid: &Tensor,
9772        interpolation_mode: i64,
9773        padding_mode: i64,
9774        align_corners: bool,
9775    ) -> Tensor {
9776        self.f_grid_sampler_3d_out(out, grid, interpolation_mode, padding_mode, align_corners)
9777            .unwrap()
9778    }
9779
9780    pub fn group_norm<T: Borrow<Tensor>>(
9781        &self,
9782        num_groups: i64,
9783        weight: Option<T>,
9784        bias: Option<T>,
9785        eps: f64,
9786        cudnn_enabled: bool,
9787    ) -> Tensor {
9788        self.f_group_norm(num_groups, weight, bias, eps, cudnn_enabled).unwrap()
9789    }
9790
9791    pub fn gru<T: Borrow<Tensor>>(
9792        &self,
9793        hx: &Tensor,
9794        params: &[T],
9795        has_biases: bool,
9796        num_layers: i64,
9797        dropout: f64,
9798        train: bool,
9799        bidirectional: bool,
9800        batch_first: bool,
9801    ) -> (Tensor, Tensor) {
9802        self.f_gru(hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first)
9803            .unwrap()
9804    }
9805
9806    pub fn gru_cell<T: Borrow<Tensor>>(
9807        &self,
9808        hx: &Tensor,
9809        w_ih: &Tensor,
9810        w_hh: &Tensor,
9811        b_ih: Option<T>,
9812        b_hh: Option<T>,
9813    ) -> Tensor {
9814        self.f_gru_cell(hx, w_ih, w_hh, b_ih, b_hh).unwrap()
9815    }
9816
9817    pub fn gru_data<T: Borrow<Tensor>>(
9818        data: &Tensor,
9819        batch_sizes: &Tensor,
9820        hx: &Tensor,
9821        params: &[T],
9822        has_biases: bool,
9823        num_layers: i64,
9824        dropout: f64,
9825        train: bool,
9826        bidirectional: bool,
9827    ) -> (Tensor, Tensor) {
9828        Tensor::f_gru_data(
9829            data,
9830            batch_sizes,
9831            hx,
9832            params,
9833            has_biases,
9834            num_layers,
9835            dropout,
9836            train,
9837            bidirectional,
9838        )
9839        .unwrap()
9840    }
9841
9842    pub fn gt<S: Into<Scalar>>(&self, other: S) -> Tensor {
9843        self.f_gt(other).unwrap()
9844    }
9845
9846    pub fn gt_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
9847        self.f_gt_(other).unwrap()
9848    }
9849
9850    pub fn gt_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
9851        self.f_gt_scalar_out(out, other).unwrap()
9852    }
9853
9854    pub fn gt_tensor(&self, other: &Tensor) -> Tensor {
9855        self.f_gt_tensor(other).unwrap()
9856    }
9857
9858    pub fn gt_tensor_(&mut self, other: &Tensor) -> Tensor {
9859        self.f_gt_tensor_(other).unwrap()
9860    }
9861
9862    pub fn gt_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
9863        self.f_gt_tensor_out(out, other).unwrap()
9864    }
9865
9866    pub fn hamming_window(window_length: i64, options: (Kind, Device)) -> Tensor {
9867        Tensor::f_hamming_window(window_length, options).unwrap()
9868    }
9869
9870    pub fn hamming_window_out(out: &Tensor, window_length: i64) -> Tensor {
9871        Tensor::f_hamming_window_out(out, window_length).unwrap()
9872    }
9873
9874    pub fn hamming_window_periodic(
9875        window_length: i64,
9876        periodic: bool,
9877        options: (Kind, Device),
9878    ) -> Tensor {
9879        Tensor::f_hamming_window_periodic(window_length, periodic, options).unwrap()
9880    }
9881
9882    pub fn hamming_window_periodic_alpha(
9883        window_length: i64,
9884        periodic: bool,
9885        alpha: f64,
9886        options: (Kind, Device),
9887    ) -> Tensor {
9888        Tensor::f_hamming_window_periodic_alpha(window_length, periodic, alpha, options).unwrap()
9889    }
9890
9891    pub fn hamming_window_periodic_alpha_beta(
9892        window_length: i64,
9893        periodic: bool,
9894        alpha: f64,
9895        beta: f64,
9896        options: (Kind, Device),
9897    ) -> Tensor {
9898        Tensor::f_hamming_window_periodic_alpha_beta(window_length, periodic, alpha, beta, options)
9899            .unwrap()
9900    }
9901
9902    pub fn hamming_window_periodic_alpha_beta_out(
9903        out: &Tensor,
9904        window_length: i64,
9905        periodic: bool,
9906        alpha: f64,
9907        beta: f64,
9908    ) -> Tensor {
9909        Tensor::f_hamming_window_periodic_alpha_beta_out(out, window_length, periodic, alpha, beta)
9910            .unwrap()
9911    }
9912
9913    pub fn hamming_window_periodic_alpha_out(
9914        out: &Tensor,
9915        window_length: i64,
9916        periodic: bool,
9917        alpha: f64,
9918    ) -> Tensor {
9919        Tensor::f_hamming_window_periodic_alpha_out(out, window_length, periodic, alpha).unwrap()
9920    }
9921
9922    pub fn hamming_window_periodic_out(out: &Tensor, window_length: i64, periodic: bool) -> Tensor {
9923        Tensor::f_hamming_window_periodic_out(out, window_length, periodic).unwrap()
9924    }
9925
9926    pub fn hann_window(window_length: i64, options: (Kind, Device)) -> Tensor {
9927        Tensor::f_hann_window(window_length, options).unwrap()
9928    }
9929
9930    pub fn hann_window_out(out: &Tensor, window_length: i64) -> Tensor {
9931        Tensor::f_hann_window_out(out, window_length).unwrap()
9932    }
9933
9934    pub fn hann_window_periodic(
9935        window_length: i64,
9936        periodic: bool,
9937        options: (Kind, Device),
9938    ) -> Tensor {
9939        Tensor::f_hann_window_periodic(window_length, periodic, options).unwrap()
9940    }
9941
9942    pub fn hann_window_periodic_out(out: &Tensor, window_length: i64, periodic: bool) -> Tensor {
9943        Tensor::f_hann_window_periodic_out(out, window_length, periodic).unwrap()
9944    }
9945
9946    pub fn hardshrink(&self) -> Tensor {
9947        self.f_hardshrink().unwrap()
9948    }
9949
9950    pub fn hardshrink_backward<S: Into<Scalar>>(&self, grad_out: &Tensor, lambd: S) -> Tensor {
9951        self.f_hardshrink_backward(grad_out, lambd).unwrap()
9952    }
9953
9954    pub fn hardshrink_backward_grad_input<S: Into<Scalar>>(
9955        &self,
9956        grad_input: &Tensor,
9957        grad_out: &Tensor,
9958        lambd: S,
9959    ) -> Tensor {
9960        self.f_hardshrink_backward_grad_input(grad_input, grad_out, lambd).unwrap()
9961    }
9962
9963    pub fn hardshrink_out(&self, out: &Tensor) -> Tensor {
9964        self.f_hardshrink_out(out).unwrap()
9965    }
9966
9967    pub fn hardsigmoid(&self) -> Tensor {
9968        self.f_hardsigmoid().unwrap()
9969    }
9970
9971    pub fn hardsigmoid_(&mut self) -> Tensor {
9972        self.f_hardsigmoid_().unwrap()
9973    }
9974
9975    pub fn hardsigmoid_backward(&self, grad_output: &Tensor) -> Tensor {
9976        self.f_hardsigmoid_backward(grad_output).unwrap()
9977    }
9978
9979    pub fn hardsigmoid_backward_grad_input(
9980        &self,
9981        grad_input: &Tensor,
9982        grad_output: &Tensor,
9983    ) -> Tensor {
9984        self.f_hardsigmoid_backward_grad_input(grad_input, grad_output).unwrap()
9985    }
9986
9987    pub fn hardsigmoid_out(&self, out: &Tensor) -> Tensor {
9988        self.f_hardsigmoid_out(out).unwrap()
9989    }
9990
9991    pub fn hardswish(&self) -> Tensor {
9992        self.f_hardswish().unwrap()
9993    }
9994
9995    pub fn hardswish_(&mut self) -> Tensor {
9996        self.f_hardswish_().unwrap()
9997    }
9998
9999    pub fn hardswish_backward(&self, grad_output: &Tensor) -> Tensor {
10000        self.f_hardswish_backward(grad_output).unwrap()
10001    }
10002
10003    pub fn hardswish_backward_out(&self, out: &Tensor, grad_output: &Tensor) -> Tensor {
10004        self.f_hardswish_backward_out(out, grad_output).unwrap()
10005    }
10006
10007    pub fn hardswish_out(&self, out: &Tensor) -> Tensor {
10008        self.f_hardswish_out(out).unwrap()
10009    }
10010
10011    pub fn hardtanh(&self) -> Tensor {
10012        self.f_hardtanh().unwrap()
10013    }
10014
10015    pub fn hardtanh_(&mut self) -> Tensor {
10016        self.f_hardtanh_().unwrap()
10017    }
10018
10019    pub fn hardtanh_backward<S: Into<Scalar>>(
10020        &self,
10021        grad_output: &Tensor,
10022        min_val: S,
10023        max_val: S,
10024    ) -> Tensor {
10025        self.f_hardtanh_backward(grad_output, min_val, max_val).unwrap()
10026    }
10027
10028    pub fn hardtanh_backward_grad_input<S: Into<Scalar>>(
10029        &self,
10030        grad_input: &Tensor,
10031        grad_output: &Tensor,
10032        min_val: S,
10033        max_val: S,
10034    ) -> Tensor {
10035        self.f_hardtanh_backward_grad_input(grad_input, grad_output, min_val, max_val).unwrap()
10036    }
10037
10038    pub fn hardtanh_out(&self, out: &Tensor) -> Tensor {
10039        self.f_hardtanh_out(out).unwrap()
10040    }
10041
10042    pub fn heaviside(&self, values: &Tensor) -> Tensor {
10043        self.f_heaviside(values).unwrap()
10044    }
10045
10046    pub fn heaviside_(&mut self, values: &Tensor) -> Tensor {
10047        self.f_heaviside_(values).unwrap()
10048    }
10049
10050    pub fn heaviside_out(&self, out: &Tensor, values: &Tensor) -> Tensor {
10051        self.f_heaviside_out(out, values).unwrap()
10052    }
10053
10054    pub fn hinge_embedding_loss(
10055        &self,
10056        target: &Tensor,
10057        margin: f64,
10058        reduction: crate::Reduction,
10059    ) -> Tensor {
10060        self.f_hinge_embedding_loss(target, margin, reduction).unwrap()
10061    }
10062
10063    pub fn histc(&self, bins: i64) -> Tensor {
10064        self.f_histc(bins).unwrap()
10065    }
10066
10067    pub fn histc_out(&self, out: &Tensor, bins: i64) -> Tensor {
10068        self.f_histc_out(out, bins).unwrap()
10069    }
10070
10071    pub fn histogram<T: Borrow<Tensor>>(
10072        &self,
10073        bins: &Tensor,
10074        weight: Option<T>,
10075        density: bool,
10076    ) -> (Tensor, Tensor) {
10077        self.f_histogram(bins, weight, density).unwrap()
10078    }
10079
10080    pub fn histogram_bin_ct<T: Borrow<Tensor>>(
10081        &self,
10082        bins: i64,
10083        range: impl DoubleList,
10084        weight: Option<T>,
10085        density: bool,
10086    ) -> (Tensor, Tensor) {
10087        self.f_histogram_bin_ct(bins, range, weight, density).unwrap()
10088    }
10089
10090    pub fn histogram_bin_ct_out<T: Borrow<Tensor>>(
10091        &self,
10092        hist: &Tensor,
10093        bin_edges: &Tensor,
10094        bins: i64,
10095        range: impl DoubleList,
10096        weight: Option<T>,
10097        density: bool,
10098    ) -> (Tensor, Tensor) {
10099        self.f_histogram_bin_ct_out(hist, bin_edges, bins, range, weight, density).unwrap()
10100    }
10101
10102    pub fn histogram_bins_tensor_out<T: Borrow<Tensor>>(
10103        &self,
10104        hist: &Tensor,
10105        bin_edges: &Tensor,
10106        bins: &Tensor,
10107        weight: Option<T>,
10108        density: bool,
10109    ) -> (Tensor, Tensor) {
10110        self.f_histogram_bins_tensor_out(hist, bin_edges, bins, weight, density).unwrap()
10111    }
10112
10113    pub fn hsplit(&self, sections: i64) -> Vec<Tensor> {
10114        self.f_hsplit(sections).unwrap()
10115    }
10116
10117    pub fn hsplit_array(&self, indices: impl IntList) -> Vec<Tensor> {
10118        self.f_hsplit_array(indices).unwrap()
10119    }
10120
10121    pub fn hspmm(mat1: &Tensor, mat2: &Tensor) -> Tensor {
10122        Tensor::f_hspmm(mat1, mat2).unwrap()
10123    }
10124
10125    pub fn hspmm_out(out: &Tensor, mat1: &Tensor, mat2: &Tensor) -> Tensor {
10126        Tensor::f_hspmm_out(out, mat1, mat2).unwrap()
10127    }
10128
10129    pub fn hstack<T: Borrow<Tensor>>(tensors: &[T]) -> Tensor {
10130        Tensor::f_hstack(tensors).unwrap()
10131    }
10132
10133    pub fn hstack_out<T: Borrow<Tensor>>(out: &Tensor, tensors: &[T]) -> Tensor {
10134        Tensor::f_hstack_out(out, tensors).unwrap()
10135    }
10136
10137    pub fn huber_loss(&self, target: &Tensor, reduction: crate::Reduction, delta: f64) -> Tensor {
10138        self.f_huber_loss(target, reduction, delta).unwrap()
10139    }
10140
10141    pub fn huber_loss_backward(
10142        &self,
10143        grad_output: &Tensor,
10144        target: &Tensor,
10145        reduction: crate::Reduction,
10146        delta: f64,
10147    ) -> Tensor {
10148        self.f_huber_loss_backward(grad_output, target, reduction, delta).unwrap()
10149    }
10150
10151    pub fn huber_loss_backward_out(
10152        &self,
10153        grad_input: &Tensor,
10154        grad_output: &Tensor,
10155        target: &Tensor,
10156        reduction: crate::Reduction,
10157        delta: f64,
10158    ) -> Tensor {
10159        self.f_huber_loss_backward_out(grad_input, grad_output, target, reduction, delta).unwrap()
10160    }
10161
10162    pub fn huber_loss_out(
10163        &self,
10164        out: &Tensor,
10165        target: &Tensor,
10166        reduction: crate::Reduction,
10167        delta: f64,
10168    ) -> Tensor {
10169        self.f_huber_loss_out(out, target, reduction, delta).unwrap()
10170    }
10171
10172    pub fn hypot(&self, other: &Tensor) -> Tensor {
10173        self.f_hypot(other).unwrap()
10174    }
10175
10176    pub fn hypot_(&mut self, other: &Tensor) -> Tensor {
10177        self.f_hypot_(other).unwrap()
10178    }
10179
10180    pub fn hypot_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
10181        self.f_hypot_out(out, other).unwrap()
10182    }
10183
10184    pub fn i0(&self) -> Tensor {
10185        self.f_i0().unwrap()
10186    }
10187
10188    pub fn i0_(&mut self) -> Tensor {
10189        self.f_i0_().unwrap()
10190    }
10191
10192    pub fn i0_out(&self, out: &Tensor) -> Tensor {
10193        self.f_i0_out(out).unwrap()
10194    }
10195
10196    pub fn igamma(&self, other: &Tensor) -> Tensor {
10197        self.f_igamma(other).unwrap()
10198    }
10199
10200    pub fn igamma_(&mut self, other: &Tensor) -> Tensor {
10201        self.f_igamma_(other).unwrap()
10202    }
10203
10204    pub fn igamma_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
10205        self.f_igamma_out(out, other).unwrap()
10206    }
10207
10208    pub fn igammac(&self, other: &Tensor) -> Tensor {
10209        self.f_igammac(other).unwrap()
10210    }
10211
10212    pub fn igammac_(&mut self, other: &Tensor) -> Tensor {
10213        self.f_igammac_(other).unwrap()
10214    }
10215
10216    pub fn igammac_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
10217        self.f_igammac_out(out, other).unwrap()
10218    }
10219
10220    pub fn im2col(
10221        &self,
10222        kernel_size: impl IntList,
10223        dilation: impl IntList,
10224        padding: impl IntList,
10225        stride: impl IntList,
10226    ) -> Tensor {
10227        self.f_im2col(kernel_size, dilation, padding, stride).unwrap()
10228    }
10229
10230    pub fn im2col_out(
10231        &self,
10232        out: &Tensor,
10233        kernel_size: impl IntList,
10234        dilation: impl IntList,
10235        padding: impl IntList,
10236        stride: impl IntList,
10237    ) -> Tensor {
10238        self.f_im2col_out(out, kernel_size, dilation, padding, stride).unwrap()
10239    }
10240
10241    pub fn imag(&self) -> Tensor {
10242        self.f_imag().unwrap()
10243    }
10244
10245    pub fn index<T: Borrow<Tensor>>(&self, indices: &[Option<T>]) -> Tensor {
10246        self.f_index(indices).unwrap()
10247    }
10248
10249    pub fn index_add(&self, dim: i64, index: &Tensor, source: &Tensor) -> Tensor {
10250        self.f_index_add(dim, index, source).unwrap()
10251    }
10252
10253    pub fn index_add_(&mut self, dim: i64, index: &Tensor, source: &Tensor) -> Tensor {
10254        self.f_index_add_(dim, index, source).unwrap()
10255    }
10256
10257    pub fn index_add_out(&self, out: &Tensor, dim: i64, index: &Tensor, source: &Tensor) -> Tensor {
10258        self.f_index_add_out(out, dim, index, source).unwrap()
10259    }
10260
10261    pub fn index_copy(&self, dim: i64, index: &Tensor, source: &Tensor) -> Tensor {
10262        self.f_index_copy(dim, index, source).unwrap()
10263    }
10264
10265    pub fn index_copy_(&mut self, dim: i64, index: &Tensor, source: &Tensor) -> Tensor {
10266        self.f_index_copy_(dim, index, source).unwrap()
10267    }
10268
10269    pub fn index_copy_out(
10270        &self,
10271        out: &Tensor,
10272        dim: i64,
10273        index: &Tensor,
10274        source: &Tensor,
10275    ) -> Tensor {
10276        self.f_index_copy_out(out, dim, index, source).unwrap()
10277    }
10278
10279    pub fn index_fill<S: Into<Scalar>>(&self, dim: i64, index: &Tensor, value: S) -> Tensor {
10280        self.f_index_fill(dim, index, value).unwrap()
10281    }
10282
10283    pub fn index_fill_<S: Into<Scalar>>(&mut self, dim: i64, index: &Tensor, value: S) -> Tensor {
10284        self.f_index_fill_(dim, index, value).unwrap()
10285    }
10286
10287    pub fn index_fill_int_scalar_out<S: Into<Scalar>>(
10288        &self,
10289        out: &Tensor,
10290        dim: i64,
10291        index: &Tensor,
10292        value: S,
10293    ) -> Tensor {
10294        self.f_index_fill_int_scalar_out(out, dim, index, value).unwrap()
10295    }
10296
10297    pub fn index_fill_int_tensor(&self, dim: i64, index: &Tensor, value: &Tensor) -> Tensor {
10298        self.f_index_fill_int_tensor(dim, index, value).unwrap()
10299    }
10300
10301    pub fn index_fill_int_tensor_(&mut self, dim: i64, index: &Tensor, value: &Tensor) -> Tensor {
10302        self.f_index_fill_int_tensor_(dim, index, value).unwrap()
10303    }
10304
10305    pub fn index_fill_int_tensor_out(
10306        &self,
10307        out: &Tensor,
10308        dim: i64,
10309        index: &Tensor,
10310        value: &Tensor,
10311    ) -> Tensor {
10312        self.f_index_fill_int_tensor_out(out, dim, index, value).unwrap()
10313    }
10314
10315    pub fn index_put<T: Borrow<Tensor>>(
10316        &self,
10317        indices: &[Option<T>],
10318        values: &Tensor,
10319        accumulate: bool,
10320    ) -> Tensor {
10321        self.f_index_put(indices, values, accumulate).unwrap()
10322    }
10323
10324    pub fn index_put_<T: Borrow<Tensor>>(
10325        &mut self,
10326        indices: &[Option<T>],
10327        values: &Tensor,
10328        accumulate: bool,
10329    ) -> Tensor {
10330        self.f_index_put_(indices, values, accumulate).unwrap()
10331    }
10332
10333    pub fn index_put_out<T: Borrow<Tensor>>(
10334        &self,
10335        out: &Tensor,
10336        indices: &[Option<T>],
10337        values: &Tensor,
10338        accumulate: bool,
10339    ) -> Tensor {
10340        self.f_index_put_out(out, indices, values, accumulate).unwrap()
10341    }
10342
10343    pub fn index_reduce(
10344        &self,
10345        dim: i64,
10346        index: &Tensor,
10347        source: &Tensor,
10348        reduce: &str,
10349        include_self: bool,
10350    ) -> Tensor {
10351        self.f_index_reduce(dim, index, source, reduce, include_self).unwrap()
10352    }
10353
10354    pub fn index_reduce_(
10355        &mut self,
10356        dim: i64,
10357        index: &Tensor,
10358        source: &Tensor,
10359        reduce: &str,
10360        include_self: bool,
10361    ) -> Tensor {
10362        self.f_index_reduce_(dim, index, source, reduce, include_self).unwrap()
10363    }
10364
10365    pub fn index_reduce_out(
10366        &self,
10367        out: &Tensor,
10368        dim: i64,
10369        index: &Tensor,
10370        source: &Tensor,
10371        reduce: &str,
10372        include_self: bool,
10373    ) -> Tensor {
10374        self.f_index_reduce_out(out, dim, index, source, reduce, include_self).unwrap()
10375    }
10376
10377    pub fn index_select(&self, dim: i64, index: &Tensor) -> Tensor {
10378        self.f_index_select(dim, index).unwrap()
10379    }
10380
10381    pub fn index_select_backward(
10382        grad: &Tensor,
10383        self_sizes: impl IntList,
10384        dim: i64,
10385        index: &Tensor,
10386    ) -> Tensor {
10387        Tensor::f_index_select_backward(grad, self_sizes, dim, index).unwrap()
10388    }
10389
10390    pub fn index_select_out(&self, out: &Tensor, dim: i64, index: &Tensor) -> Tensor {
10391        self.f_index_select_out(out, dim, index).unwrap()
10392    }
10393
10394    pub fn index_tensor_out<T: Borrow<Tensor>>(
10395        &self,
10396        out: &Tensor,
10397        indices: &[Option<T>],
10398    ) -> Tensor {
10399        self.f_index_tensor_out(out, indices).unwrap()
10400    }
10401
10402    pub fn indices(&self) -> Tensor {
10403        self.f_indices().unwrap()
10404    }
10405
10406    pub fn indices_copy(&self) -> Tensor {
10407        self.f_indices_copy().unwrap()
10408    }
10409
10410    pub fn indices_copy_out(&self, out: &Tensor) -> Tensor {
10411        self.f_indices_copy_out(out).unwrap()
10412    }
10413
10414    pub fn infinitely_differentiable_gelu_backward(&self, grad: &Tensor) -> Tensor {
10415        self.f_infinitely_differentiable_gelu_backward(grad).unwrap()
10416    }
10417
10418    pub fn inner(&self, other: &Tensor) -> Tensor {
10419        self.f_inner(other).unwrap()
10420    }
10421
10422    pub fn inner_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
10423        self.f_inner_out(out, other).unwrap()
10424    }
10425
10426    pub fn instance_norm<T: Borrow<Tensor>>(
10427        &self,
10428        weight: Option<T>,
10429        bias: Option<T>,
10430        running_mean: Option<T>,
10431        running_var: Option<T>,
10432        use_input_stats: bool,
10433        momentum: f64,
10434        eps: f64,
10435        cudnn_enabled: bool,
10436    ) -> Tensor {
10437        self.f_instance_norm(
10438            weight,
10439            bias,
10440            running_mean,
10441            running_var,
10442            use_input_stats,
10443            momentum,
10444            eps,
10445            cudnn_enabled,
10446        )
10447        .unwrap()
10448    }
10449
10450    pub fn int_repr(&self) -> Tensor {
10451        self.f_int_repr().unwrap()
10452    }
10453
10454    pub fn int_repr_out(&self, out: &Tensor) -> Tensor {
10455        self.f_int_repr_out(out).unwrap()
10456    }
10457
10458    pub fn inverse(&self) -> Tensor {
10459        self.f_inverse().unwrap()
10460    }
10461
10462    pub fn inverse_out(&self, out: &Tensor) -> Tensor {
10463        self.f_inverse_out(out).unwrap()
10464    }
10465
10466    pub fn is_coalesced(&self) -> bool {
10467        self.f_is_coalesced().unwrap()
10468    }
10469
10470    pub fn is_complex(&self) -> bool {
10471        self.f_is_complex().unwrap()
10472    }
10473
10474    pub fn is_conj(&self) -> bool {
10475        self.f_is_conj().unwrap()
10476    }
10477
10478    pub fn is_distributed(&self) -> bool {
10479        self.f_is_distributed().unwrap()
10480    }
10481
10482    pub fn is_floating_point(&self) -> bool {
10483        self.f_is_floating_point().unwrap()
10484    }
10485
10486    pub fn is_inference(&self) -> bool {
10487        self.f_is_inference().unwrap()
10488    }
10489
10490    pub fn is_leaf(&self) -> bool {
10491        self.f_is_leaf().unwrap()
10492    }
10493
10494    pub fn is_neg(&self) -> bool {
10495        self.f_is_neg().unwrap()
10496    }
10497
10498    pub fn is_nonzero(&self) -> bool {
10499        self.f_is_nonzero().unwrap()
10500    }
10501
10502    pub fn is_pinned(&self, device: Device) -> bool {
10503        self.f_is_pinned(device).unwrap()
10504    }
10505
10506    pub fn is_same_size(&self, other: &Tensor) -> bool {
10507        self.f_is_same_size(other).unwrap()
10508    }
10509
10510    pub fn is_set_to(&self, tensor: &Tensor) -> bool {
10511        self.f_is_set_to(tensor).unwrap()
10512    }
10513
10514    pub fn is_signed(&self) -> bool {
10515        self.f_is_signed().unwrap()
10516    }
10517
10518    pub fn is_vulkan_available() -> bool {
10519        Tensor::f_is_vulkan_available().unwrap()
10520    }
10521
10522    pub fn isclose(&self, other: &Tensor, rtol: f64, atol: f64, equal_nan: bool) -> Tensor {
10523        self.f_isclose(other, rtol, atol, equal_nan).unwrap()
10524    }
10525
10526    pub fn isfinite(&self) -> Tensor {
10527        self.f_isfinite().unwrap()
10528    }
10529
10530    pub fn isin(
10531        elements: &Tensor,
10532        test_elements: &Tensor,
10533        assume_unique: bool,
10534        invert: bool,
10535    ) -> Tensor {
10536        Tensor::f_isin(elements, test_elements, assume_unique, invert).unwrap()
10537    }
10538
10539    pub fn isin_scalar_tensor<S: Into<Scalar>>(
10540        element: S,
10541        test_elements: &Tensor,
10542        assume_unique: bool,
10543        invert: bool,
10544    ) -> Tensor {
10545        Tensor::f_isin_scalar_tensor(element, test_elements, assume_unique, invert).unwrap()
10546    }
10547
10548    pub fn isin_scalar_tensor_out<S: Into<Scalar>>(
10549        out: &Tensor,
10550        element: S,
10551        test_elements: &Tensor,
10552        assume_unique: bool,
10553        invert: bool,
10554    ) -> Tensor {
10555        Tensor::f_isin_scalar_tensor_out(out, element, test_elements, assume_unique, invert)
10556            .unwrap()
10557    }
10558
10559    pub fn isin_tensor_scalar<S: Into<Scalar>>(
10560        elements: &Tensor,
10561        test_element: S,
10562        assume_unique: bool,
10563        invert: bool,
10564    ) -> Tensor {
10565        Tensor::f_isin_tensor_scalar(elements, test_element, assume_unique, invert).unwrap()
10566    }
10567
10568    pub fn isin_tensor_scalar_out<S: Into<Scalar>>(
10569        out: &Tensor,
10570        elements: &Tensor,
10571        test_element: S,
10572        assume_unique: bool,
10573        invert: bool,
10574    ) -> Tensor {
10575        Tensor::f_isin_tensor_scalar_out(out, elements, test_element, assume_unique, invert)
10576            .unwrap()
10577    }
10578
10579    pub fn isin_tensor_tensor_out(
10580        out: &Tensor,
10581        elements: &Tensor,
10582        test_elements: &Tensor,
10583        assume_unique: bool,
10584        invert: bool,
10585    ) -> Tensor {
10586        Tensor::f_isin_tensor_tensor_out(out, elements, test_elements, assume_unique, invert)
10587            .unwrap()
10588    }
10589
10590    pub fn isinf(&self) -> Tensor {
10591        self.f_isinf().unwrap()
10592    }
10593
10594    pub fn isinf_out(&self, out: &Tensor) -> Tensor {
10595        self.f_isinf_out(out).unwrap()
10596    }
10597
10598    pub fn isnan(&self) -> Tensor {
10599        self.f_isnan().unwrap()
10600    }
10601
10602    pub fn isnan_out(&self, out: &Tensor) -> Tensor {
10603        self.f_isnan_out(out).unwrap()
10604    }
10605
10606    pub fn isneginf(&self) -> Tensor {
10607        self.f_isneginf().unwrap()
10608    }
10609
10610    pub fn isneginf_out(&self, out: &Tensor) -> Tensor {
10611        self.f_isneginf_out(out).unwrap()
10612    }
10613
10614    pub fn isposinf(&self) -> Tensor {
10615        self.f_isposinf().unwrap()
10616    }
10617
10618    pub fn isposinf_out(&self, out: &Tensor) -> Tensor {
10619        self.f_isposinf_out(out).unwrap()
10620    }
10621
10622    pub fn isreal(&self) -> Tensor {
10623        self.f_isreal().unwrap()
10624    }
10625
10626    pub fn istft<T: Borrow<Tensor>>(
10627        &self,
10628        n_fft: i64,
10629        hop_length: impl Into<Option<i64>>,
10630        win_length: impl Into<Option<i64>>,
10631        window: Option<T>,
10632        center: bool,
10633        normalized: bool,
10634        onesided: bool,
10635        length: impl Into<Option<i64>>,
10636        return_complex: bool,
10637    ) -> Tensor {
10638        self.f_istft(
10639            n_fft,
10640            hop_length,
10641            win_length,
10642            window,
10643            center,
10644            normalized,
10645            onesided,
10646            length,
10647            return_complex,
10648        )
10649        .unwrap()
10650    }
10651
10652    pub fn kaiser_window(window_length: i64, options: (Kind, Device)) -> Tensor {
10653        Tensor::f_kaiser_window(window_length, options).unwrap()
10654    }
10655
10656    pub fn kaiser_window_beta(
10657        window_length: i64,
10658        periodic: bool,
10659        beta: f64,
10660        options: (Kind, Device),
10661    ) -> Tensor {
10662        Tensor::f_kaiser_window_beta(window_length, periodic, beta, options).unwrap()
10663    }
10664
10665    pub fn kaiser_window_beta_out(
10666        out: &Tensor,
10667        window_length: i64,
10668        periodic: bool,
10669        beta: f64,
10670    ) -> Tensor {
10671        Tensor::f_kaiser_window_beta_out(out, window_length, periodic, beta).unwrap()
10672    }
10673
10674    pub fn kaiser_window_out(out: &Tensor, window_length: i64) -> Tensor {
10675        Tensor::f_kaiser_window_out(out, window_length).unwrap()
10676    }
10677
10678    pub fn kaiser_window_periodic(
10679        window_length: i64,
10680        periodic: bool,
10681        options: (Kind, Device),
10682    ) -> Tensor {
10683        Tensor::f_kaiser_window_periodic(window_length, periodic, options).unwrap()
10684    }
10685
10686    pub fn kaiser_window_periodic_out(out: &Tensor, window_length: i64, periodic: bool) -> Tensor {
10687        Tensor::f_kaiser_window_periodic_out(out, window_length, periodic).unwrap()
10688    }
10689
10690    pub fn kl_div(&self, target: &Tensor, reduction: crate::Reduction, log_target: bool) -> Tensor {
10691        self.f_kl_div(target, reduction, log_target).unwrap()
10692    }
10693
10694    pub fn kron(&self, other: &Tensor) -> Tensor {
10695        self.f_kron(other).unwrap()
10696    }
10697
10698    pub fn kron_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
10699        self.f_kron_out(out, other).unwrap()
10700    }
10701
10702    pub fn kthvalue(&self, k: i64, dim: i64, keepdim: bool) -> (Tensor, Tensor) {
10703        self.f_kthvalue(k, dim, keepdim).unwrap()
10704    }
10705
10706    pub fn kthvalue_values(
10707        &self,
10708        values: &Tensor,
10709        indices: &Tensor,
10710        k: i64,
10711        dim: i64,
10712        keepdim: bool,
10713    ) -> (Tensor, Tensor) {
10714        self.f_kthvalue_values(values, indices, k, dim, keepdim).unwrap()
10715    }
10716
10717    pub fn l1_loss(&self, target: &Tensor, reduction: crate::Reduction) -> Tensor {
10718        self.f_l1_loss(target, reduction).unwrap()
10719    }
10720
10721    pub fn layer_norm<T: Borrow<Tensor>>(
10722        &self,
10723        normalized_shape: impl IntList,
10724        weight: Option<T>,
10725        bias: Option<T>,
10726        eps: f64,
10727        cudnn_enable: bool,
10728    ) -> Tensor {
10729        self.f_layer_norm(normalized_shape, weight, bias, eps, cudnn_enable).unwrap()
10730    }
10731
10732    pub fn lcm(&self, other: &Tensor) -> Tensor {
10733        self.f_lcm(other).unwrap()
10734    }
10735
10736    pub fn lcm_(&mut self, other: &Tensor) -> Tensor {
10737        self.f_lcm_(other).unwrap()
10738    }
10739
10740    pub fn lcm_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
10741        self.f_lcm_out(out, other).unwrap()
10742    }
10743
10744    pub fn ldexp(&self, other: &Tensor) -> Tensor {
10745        self.f_ldexp(other).unwrap()
10746    }
10747
10748    pub fn ldexp_(&mut self, other: &Tensor) -> Tensor {
10749        self.f_ldexp_(other).unwrap()
10750    }
10751
10752    pub fn ldexp_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
10753        self.f_ldexp_out(out, other).unwrap()
10754    }
10755
10756    pub fn le<S: Into<Scalar>>(&self, other: S) -> Tensor {
10757        self.f_le(other).unwrap()
10758    }
10759
10760    pub fn le_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
10761        self.f_le_(other).unwrap()
10762    }
10763
10764    pub fn le_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
10765        self.f_le_scalar_out(out, other).unwrap()
10766    }
10767
10768    pub fn le_tensor(&self, other: &Tensor) -> Tensor {
10769        self.f_le_tensor(other).unwrap()
10770    }
10771
10772    pub fn le_tensor_(&mut self, other: &Tensor) -> Tensor {
10773        self.f_le_tensor_(other).unwrap()
10774    }
10775
10776    pub fn le_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
10777        self.f_le_tensor_out(out, other).unwrap()
10778    }
10779
10780    pub fn leaky_relu(&self) -> Tensor {
10781        self.f_leaky_relu().unwrap()
10782    }
10783
10784    pub fn leaky_relu_(&mut self) -> Tensor {
10785        self.f_leaky_relu_().unwrap()
10786    }
10787
10788    pub fn leaky_relu_backward<S: Into<Scalar>>(
10789        &self,
10790        grad_output: &Tensor,
10791        negative_slope: S,
10792        self_is_result: bool,
10793    ) -> Tensor {
10794        self.f_leaky_relu_backward(grad_output, negative_slope, self_is_result).unwrap()
10795    }
10796
10797    pub fn leaky_relu_backward_grad_input<S: Into<Scalar>>(
10798        &self,
10799        grad_input: &Tensor,
10800        grad_output: &Tensor,
10801        negative_slope: S,
10802        self_is_result: bool,
10803    ) -> Tensor {
10804        self.f_leaky_relu_backward_grad_input(
10805            grad_input,
10806            grad_output,
10807            negative_slope,
10808            self_is_result,
10809        )
10810        .unwrap()
10811    }
10812
10813    pub fn leaky_relu_out(&self, out: &Tensor) -> Tensor {
10814        self.f_leaky_relu_out(out).unwrap()
10815    }
10816
10817    pub fn lerp<S: Into<Scalar>>(&self, end: &Tensor, weight: S) -> Tensor {
10818        self.f_lerp(end, weight).unwrap()
10819    }
10820
10821    pub fn lerp_<S: Into<Scalar>>(&mut self, end: &Tensor, weight: S) -> Tensor {
10822        self.f_lerp_(end, weight).unwrap()
10823    }
10824
10825    pub fn lerp_scalar_out<S: Into<Scalar>>(
10826        &self,
10827        out: &Tensor,
10828        end: &Tensor,
10829        weight: S,
10830    ) -> Tensor {
10831        self.f_lerp_scalar_out(out, end, weight).unwrap()
10832    }
10833
10834    pub fn lerp_tensor(&self, end: &Tensor, weight: &Tensor) -> Tensor {
10835        self.f_lerp_tensor(end, weight).unwrap()
10836    }
10837
10838    pub fn lerp_tensor_(&mut self, end: &Tensor, weight: &Tensor) -> Tensor {
10839        self.f_lerp_tensor_(end, weight).unwrap()
10840    }
10841
10842    pub fn lerp_tensor_out(&self, out: &Tensor, end: &Tensor, weight: &Tensor) -> Tensor {
10843        self.f_lerp_tensor_out(out, end, weight).unwrap()
10844    }
10845
10846    pub fn less<S: Into<Scalar>>(&self, other: S) -> Tensor {
10847        self.f_less(other).unwrap()
10848    }
10849
10850    pub fn less_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
10851        self.f_less_(other).unwrap()
10852    }
10853
10854    pub fn less_equal<S: Into<Scalar>>(&self, other: S) -> Tensor {
10855        self.f_less_equal(other).unwrap()
10856    }
10857
10858    pub fn less_equal_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
10859        self.f_less_equal_(other).unwrap()
10860    }
10861
10862    pub fn less_equal_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
10863        self.f_less_equal_scalar_out(out, other).unwrap()
10864    }
10865
10866    pub fn less_equal_tensor(&self, other: &Tensor) -> Tensor {
10867        self.f_less_equal_tensor(other).unwrap()
10868    }
10869
10870    pub fn less_equal_tensor_(&mut self, other: &Tensor) -> Tensor {
10871        self.f_less_equal_tensor_(other).unwrap()
10872    }
10873
10874    pub fn less_equal_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
10875        self.f_less_equal_tensor_out(out, other).unwrap()
10876    }
10877
10878    pub fn less_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
10879        self.f_less_scalar_out(out, other).unwrap()
10880    }
10881
10882    pub fn less_tensor(&self, other: &Tensor) -> Tensor {
10883        self.f_less_tensor(other).unwrap()
10884    }
10885
10886    pub fn less_tensor_(&mut self, other: &Tensor) -> Tensor {
10887        self.f_less_tensor_(other).unwrap()
10888    }
10889
10890    pub fn less_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
10891        self.f_less_tensor_out(out, other).unwrap()
10892    }
10893
10894    pub fn lgamma(&self) -> Tensor {
10895        self.f_lgamma().unwrap()
10896    }
10897
10898    pub fn lgamma_(&mut self) -> Tensor {
10899        self.f_lgamma_().unwrap()
10900    }
10901
10902    pub fn lgamma_out(&self, out: &Tensor) -> Tensor {
10903        self.f_lgamma_out(out).unwrap()
10904    }
10905
10906    pub fn lift(&self) -> Tensor {
10907        self.f_lift().unwrap()
10908    }
10909
10910    pub fn lift_fresh(&self) -> Tensor {
10911        self.f_lift_fresh().unwrap()
10912    }
10913
10914    pub fn lift_fresh_copy(&self) -> Tensor {
10915        self.f_lift_fresh_copy().unwrap()
10916    }
10917
10918    pub fn lift_fresh_copy_out(&self, out: &Tensor) -> Tensor {
10919        self.f_lift_fresh_copy_out(out).unwrap()
10920    }
10921
10922    pub fn lift_out(&self, out: &Tensor) -> Tensor {
10923        self.f_lift_out(out).unwrap()
10924    }
10925
10926    pub fn linalg_cholesky(&self, upper: bool) -> Tensor {
10927        self.f_linalg_cholesky(upper).unwrap()
10928    }
10929
10930    pub fn linalg_cholesky_ex(&self, upper: bool, check_errors: bool) -> (Tensor, Tensor) {
10931        self.f_linalg_cholesky_ex(upper, check_errors).unwrap()
10932    }
10933
10934    pub fn linalg_cholesky_ex_l(
10935        &self,
10936        l: &Tensor,
10937        info: &Tensor,
10938        upper: bool,
10939        check_errors: bool,
10940    ) -> (Tensor, Tensor) {
10941        self.f_linalg_cholesky_ex_l(l, info, upper, check_errors).unwrap()
10942    }
10943
10944    pub fn linalg_cholesky_out(&self, out: &Tensor, upper: bool) -> Tensor {
10945        self.f_linalg_cholesky_out(out, upper).unwrap()
10946    }
10947
10948    pub fn linalg_cond<S: Into<Scalar>>(&self, p: S) -> Tensor {
10949        self.f_linalg_cond(p).unwrap()
10950    }
10951
10952    pub fn linalg_cond_out<S: Into<Scalar>>(&self, out: &Tensor, p: S) -> Tensor {
10953        self.f_linalg_cond_out(out, p).unwrap()
10954    }
10955
10956    pub fn linalg_cond_p_str(&self, p: &str) -> Tensor {
10957        self.f_linalg_cond_p_str(p).unwrap()
10958    }
10959
10960    pub fn linalg_cond_p_str_out(&self, out: &Tensor, p: &str) -> Tensor {
10961        self.f_linalg_cond_p_str_out(out, p).unwrap()
10962    }
10963
10964    pub fn linalg_cross(&self, other: &Tensor, dim: i64) -> Tensor {
10965        self.f_linalg_cross(other, dim).unwrap()
10966    }
10967
10968    pub fn linalg_cross_out(&self, out: &Tensor, other: &Tensor, dim: i64) -> Tensor {
10969        self.f_linalg_cross_out(out, other, dim).unwrap()
10970    }
10971
10972    pub fn linalg_det(a: &Tensor) -> Tensor {
10973        Tensor::f_linalg_det(a).unwrap()
10974    }
10975
10976    pub fn linalg_det_out(out: &Tensor, a: &Tensor) -> Tensor {
10977        Tensor::f_linalg_det_out(out, a).unwrap()
10978    }
10979
10980    pub fn linalg_diagonal(a: &Tensor, offset: i64, dim1: i64, dim2: i64) -> Tensor {
10981        Tensor::f_linalg_diagonal(a, offset, dim1, dim2).unwrap()
10982    }
10983
10984    pub fn linalg_eig(&self) -> (Tensor, Tensor) {
10985        self.f_linalg_eig().unwrap()
10986    }
10987
10988    pub fn linalg_eig_out(&self, eigenvalues: &Tensor, eigenvectors: &Tensor) -> (Tensor, Tensor) {
10989        self.f_linalg_eig_out(eigenvalues, eigenvectors).unwrap()
10990    }
10991
10992    pub fn linalg_eigh(&self, uplo: &str) -> (Tensor, Tensor) {
10993        self.f_linalg_eigh(uplo).unwrap()
10994    }
10995
10996    pub fn linalg_eigh_eigvals(
10997        &self,
10998        eigvals: &Tensor,
10999        eigvecs: &Tensor,
11000        uplo: &str,
11001    ) -> (Tensor, Tensor) {
11002        self.f_linalg_eigh_eigvals(eigvals, eigvecs, uplo).unwrap()
11003    }
11004
11005    pub fn linalg_eigvals(&self) -> Tensor {
11006        self.f_linalg_eigvals().unwrap()
11007    }
11008
11009    pub fn linalg_eigvals_out(&self, out: &Tensor) -> Tensor {
11010        self.f_linalg_eigvals_out(out).unwrap()
11011    }
11012
11013    pub fn linalg_eigvalsh(&self, uplo: &str) -> Tensor {
11014        self.f_linalg_eigvalsh(uplo).unwrap()
11015    }
11016
11017    pub fn linalg_eigvalsh_out(&self, out: &Tensor, uplo: &str) -> Tensor {
11018        self.f_linalg_eigvalsh_out(out, uplo).unwrap()
11019    }
11020
11021    pub fn linalg_householder_product(&self, tau: &Tensor) -> Tensor {
11022        self.f_linalg_householder_product(tau).unwrap()
11023    }
11024
11025    pub fn linalg_householder_product_out(&self, out: &Tensor, tau: &Tensor) -> Tensor {
11026        self.f_linalg_householder_product_out(out, tau).unwrap()
11027    }
11028
11029    pub fn linalg_inv(a: &Tensor) -> Tensor {
11030        Tensor::f_linalg_inv(a).unwrap()
11031    }
11032
11033    pub fn linalg_inv_ex(a: &Tensor, check_errors: bool) -> (Tensor, Tensor) {
11034        Tensor::f_linalg_inv_ex(a, check_errors).unwrap()
11035    }
11036
11037    pub fn linalg_inv_ex_inverse(
11038        inverse: &Tensor,
11039        info: &Tensor,
11040        a: &Tensor,
11041        check_errors: bool,
11042    ) -> (Tensor, Tensor) {
11043        Tensor::f_linalg_inv_ex_inverse(inverse, info, a, check_errors).unwrap()
11044    }
11045
11046    pub fn linalg_inv_out(out: &Tensor, a: &Tensor) -> Tensor {
11047        Tensor::f_linalg_inv_out(out, a).unwrap()
11048    }
11049
11050    pub fn linalg_ldl_factor(&self, hermitian: bool) -> (Tensor, Tensor) {
11051        self.f_linalg_ldl_factor(hermitian).unwrap()
11052    }
11053
11054    pub fn linalg_ldl_factor_ex(
11055        &self,
11056        hermitian: bool,
11057        check_errors: bool,
11058    ) -> (Tensor, Tensor, Tensor) {
11059        self.f_linalg_ldl_factor_ex(hermitian, check_errors).unwrap()
11060    }
11061
11062    pub fn linalg_ldl_factor_ex_out(
11063        &self,
11064        ld: &Tensor,
11065        pivots: &Tensor,
11066        info: &Tensor,
11067        hermitian: bool,
11068        check_errors: bool,
11069    ) -> (Tensor, Tensor, Tensor) {
11070        self.f_linalg_ldl_factor_ex_out(ld, pivots, info, hermitian, check_errors).unwrap()
11071    }
11072
11073    pub fn linalg_ldl_factor_out(
11074        &self,
11075        ld: &Tensor,
11076        pivots: &Tensor,
11077        hermitian: bool,
11078    ) -> (Tensor, Tensor) {
11079        self.f_linalg_ldl_factor_out(ld, pivots, hermitian).unwrap()
11080    }
11081
11082    pub fn linalg_ldl_solve(ld: &Tensor, pivots: &Tensor, b: &Tensor, hermitian: bool) -> Tensor {
11083        Tensor::f_linalg_ldl_solve(ld, pivots, b, hermitian).unwrap()
11084    }
11085
11086    pub fn linalg_ldl_solve_out(
11087        out: &Tensor,
11088        ld: &Tensor,
11089        pivots: &Tensor,
11090        b: &Tensor,
11091        hermitian: bool,
11092    ) -> Tensor {
11093        Tensor::f_linalg_ldl_solve_out(out, ld, pivots, b, hermitian).unwrap()
11094    }
11095
11096    pub fn linalg_lstsq(
11097        &self,
11098        b: &Tensor,
11099        rcond: impl Into<Option<f64>>,
11100        driver: &str,
11101    ) -> (Tensor, Tensor, Tensor, Tensor) {
11102        self.f_linalg_lstsq(b, rcond, driver).unwrap()
11103    }
11104
11105    pub fn linalg_lstsq_out(
11106        &self,
11107        solution: &Tensor,
11108        residuals: &Tensor,
11109        rank: &Tensor,
11110        singular_values: &Tensor,
11111        b: &Tensor,
11112        rcond: impl Into<Option<f64>>,
11113        driver: &str,
11114    ) -> (Tensor, Tensor, Tensor, Tensor) {
11115        self.f_linalg_lstsq_out(solution, residuals, rank, singular_values, b, rcond, driver)
11116            .unwrap()
11117    }
11118
11119    pub fn linalg_lu(a: &Tensor, pivot: bool) -> (Tensor, Tensor, Tensor) {
11120        Tensor::f_linalg_lu(a, pivot).unwrap()
11121    }
11122
11123    pub fn linalg_lu_factor(a: &Tensor, pivot: bool) -> (Tensor, Tensor) {
11124        Tensor::f_linalg_lu_factor(a, pivot).unwrap()
11125    }
11126
11127    pub fn linalg_lu_factor_ex(
11128        a: &Tensor,
11129        pivot: bool,
11130        check_errors: bool,
11131    ) -> (Tensor, Tensor, Tensor) {
11132        Tensor::f_linalg_lu_factor_ex(a, pivot, check_errors).unwrap()
11133    }
11134
11135    pub fn linalg_lu_factor_ex_out(
11136        lu: &Tensor,
11137        pivots: &Tensor,
11138        info: &Tensor,
11139        a: &Tensor,
11140        pivot: bool,
11141        check_errors: bool,
11142    ) -> (Tensor, Tensor, Tensor) {
11143        Tensor::f_linalg_lu_factor_ex_out(lu, pivots, info, a, pivot, check_errors).unwrap()
11144    }
11145
11146    pub fn linalg_lu_factor_out(
11147        lu: &Tensor,
11148        pivots: &Tensor,
11149        a: &Tensor,
11150        pivot: bool,
11151    ) -> (Tensor, Tensor) {
11152        Tensor::f_linalg_lu_factor_out(lu, pivots, a, pivot).unwrap()
11153    }
11154
11155    pub fn linalg_lu_out(
11156        p: &Tensor,
11157        l: &Tensor,
11158        u: &Tensor,
11159        a: &Tensor,
11160        pivot: bool,
11161    ) -> (Tensor, Tensor, Tensor) {
11162        Tensor::f_linalg_lu_out(p, l, u, a, pivot).unwrap()
11163    }
11164
11165    pub fn linalg_lu_solve(
11166        lu: &Tensor,
11167        pivots: &Tensor,
11168        b: &Tensor,
11169        left: bool,
11170        adjoint: bool,
11171    ) -> Tensor {
11172        Tensor::f_linalg_lu_solve(lu, pivots, b, left, adjoint).unwrap()
11173    }
11174
11175    pub fn linalg_lu_solve_out(
11176        out: &Tensor,
11177        lu: &Tensor,
11178        pivots: &Tensor,
11179        b: &Tensor,
11180        left: bool,
11181        adjoint: bool,
11182    ) -> Tensor {
11183        Tensor::f_linalg_lu_solve_out(out, lu, pivots, b, left, adjoint).unwrap()
11184    }
11185
11186    pub fn linalg_matmul(&self, other: &Tensor) -> Tensor {
11187        self.f_linalg_matmul(other).unwrap()
11188    }
11189
11190    pub fn linalg_matmul_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
11191        self.f_linalg_matmul_out(out, other).unwrap()
11192    }
11193
11194    pub fn linalg_matrix_exp(&self) -> Tensor {
11195        self.f_linalg_matrix_exp().unwrap()
11196    }
11197
11198    pub fn linalg_matrix_exp_out(&self, out: &Tensor) -> Tensor {
11199        self.f_linalg_matrix_exp_out(out).unwrap()
11200    }
11201
11202    pub fn linalg_matrix_power(&self, n: i64) -> Tensor {
11203        self.f_linalg_matrix_power(n).unwrap()
11204    }
11205
11206    pub fn linalg_matrix_power_out(&self, out: &Tensor, n: i64) -> Tensor {
11207        self.f_linalg_matrix_power_out(out, n).unwrap()
11208    }
11209
11210    pub fn linalg_matrix_rank(&self, tol: f64, hermitian: bool) -> Tensor {
11211        self.f_linalg_matrix_rank(tol, hermitian).unwrap()
11212    }
11213
11214    pub fn linalg_matrix_rank_atol_rtol_float(
11215        &self,
11216        atol: impl Into<Option<f64>>,
11217        rtol: impl Into<Option<f64>>,
11218        hermitian: bool,
11219    ) -> Tensor {
11220        self.f_linalg_matrix_rank_atol_rtol_float(atol, rtol, hermitian).unwrap()
11221    }
11222
11223    pub fn linalg_matrix_rank_atol_rtol_float_out(
11224        &self,
11225        out: &Tensor,
11226        atol: impl Into<Option<f64>>,
11227        rtol: impl Into<Option<f64>>,
11228        hermitian: bool,
11229    ) -> Tensor {
11230        self.f_linalg_matrix_rank_atol_rtol_float_out(out, atol, rtol, hermitian).unwrap()
11231    }
11232
11233    pub fn linalg_matrix_rank_atol_rtol_tensor<T: Borrow<Tensor>>(
11234        &self,
11235        atol: Option<T>,
11236        rtol: Option<T>,
11237        hermitian: bool,
11238    ) -> Tensor {
11239        self.f_linalg_matrix_rank_atol_rtol_tensor(atol, rtol, hermitian).unwrap()
11240    }
11241
11242    pub fn linalg_matrix_rank_atol_rtol_tensor_out<T: Borrow<Tensor>>(
11243        &self,
11244        out: &Tensor,
11245        atol: Option<T>,
11246        rtol: Option<T>,
11247        hermitian: bool,
11248    ) -> Tensor {
11249        self.f_linalg_matrix_rank_atol_rtol_tensor_out(out, atol, rtol, hermitian).unwrap()
11250    }
11251
11252    pub fn linalg_matrix_rank_out(&self, out: &Tensor, tol: f64, hermitian: bool) -> Tensor {
11253        self.f_linalg_matrix_rank_out(out, tol, hermitian).unwrap()
11254    }
11255
11256    pub fn linalg_matrix_rank_out_tol_tensor(
11257        &self,
11258        out: &Tensor,
11259        tol: &Tensor,
11260        hermitian: bool,
11261    ) -> Tensor {
11262        self.f_linalg_matrix_rank_out_tol_tensor(out, tol, hermitian).unwrap()
11263    }
11264
11265    pub fn linalg_matrix_rank_tol_tensor(&self, tol: &Tensor, hermitian: bool) -> Tensor {
11266        self.f_linalg_matrix_rank_tol_tensor(tol, hermitian).unwrap()
11267    }
11268
11269    pub fn linalg_multi_dot<T: Borrow<Tensor>>(tensors: &[T]) -> Tensor {
11270        Tensor::f_linalg_multi_dot(tensors).unwrap()
11271    }
11272
11273    pub fn linalg_multi_dot_out<T: Borrow<Tensor>>(out: &Tensor, tensors: &[T]) -> Tensor {
11274        Tensor::f_linalg_multi_dot_out(out, tensors).unwrap()
11275    }
11276
11277    pub fn linalg_norm<S: Into<Scalar>>(
11278        &self,
11279        ord: S,
11280        dim: impl IntListOption,
11281        keepdim: bool,
11282        dtype: impl Into<Option<Kind>>,
11283    ) -> Tensor {
11284        self.f_linalg_norm(ord, dim, keepdim, dtype).unwrap()
11285    }
11286
11287    pub fn linalg_norm_ord_str(
11288        &self,
11289        ord: &str,
11290        dim: impl IntListOption,
11291        keepdim: bool,
11292        dtype: impl Into<Option<Kind>>,
11293    ) -> Tensor {
11294        self.f_linalg_norm_ord_str(ord, dim, keepdim, dtype).unwrap()
11295    }
11296
11297    pub fn linalg_norm_ord_str_out(
11298        &self,
11299        out: &Tensor,
11300        ord: &str,
11301        dim: impl IntListOption,
11302        keepdim: bool,
11303        dtype: impl Into<Option<Kind>>,
11304    ) -> Tensor {
11305        self.f_linalg_norm_ord_str_out(out, ord, dim, keepdim, dtype).unwrap()
11306    }
11307
11308    pub fn linalg_norm_out<S: Into<Scalar>>(
11309        &self,
11310        out: &Tensor,
11311        ord: S,
11312        dim: impl IntListOption,
11313        keepdim: bool,
11314        dtype: impl Into<Option<Kind>>,
11315    ) -> Tensor {
11316        self.f_linalg_norm_out(out, ord, dim, keepdim, dtype).unwrap()
11317    }
11318
11319    pub fn linalg_pinv(&self, rcond: f64, hermitian: bool) -> Tensor {
11320        self.f_linalg_pinv(rcond, hermitian).unwrap()
11321    }
11322
11323    pub fn linalg_pinv_atol_rtol_float(
11324        &self,
11325        atol: impl Into<Option<f64>>,
11326        rtol: impl Into<Option<f64>>,
11327        hermitian: bool,
11328    ) -> Tensor {
11329        self.f_linalg_pinv_atol_rtol_float(atol, rtol, hermitian).unwrap()
11330    }
11331
11332    pub fn linalg_pinv_atol_rtol_float_out(
11333        &self,
11334        out: &Tensor,
11335        atol: impl Into<Option<f64>>,
11336        rtol: impl Into<Option<f64>>,
11337        hermitian: bool,
11338    ) -> Tensor {
11339        self.f_linalg_pinv_atol_rtol_float_out(out, atol, rtol, hermitian).unwrap()
11340    }
11341
11342    pub fn linalg_pinv_atol_rtol_tensor<T: Borrow<Tensor>>(
11343        &self,
11344        atol: Option<T>,
11345        rtol: Option<T>,
11346        hermitian: bool,
11347    ) -> Tensor {
11348        self.f_linalg_pinv_atol_rtol_tensor(atol, rtol, hermitian).unwrap()
11349    }
11350
11351    pub fn linalg_pinv_atol_rtol_tensor_out<T: Borrow<Tensor>>(
11352        &self,
11353        out: &Tensor,
11354        atol: Option<T>,
11355        rtol: Option<T>,
11356        hermitian: bool,
11357    ) -> Tensor {
11358        self.f_linalg_pinv_atol_rtol_tensor_out(out, atol, rtol, hermitian).unwrap()
11359    }
11360
11361    pub fn linalg_pinv_out(&self, out: &Tensor, rcond: f64, hermitian: bool) -> Tensor {
11362        self.f_linalg_pinv_out(out, rcond, hermitian).unwrap()
11363    }
11364
11365    pub fn linalg_pinv_out_rcond_tensor(
11366        &self,
11367        out: &Tensor,
11368        rcond: &Tensor,
11369        hermitian: bool,
11370    ) -> Tensor {
11371        self.f_linalg_pinv_out_rcond_tensor(out, rcond, hermitian).unwrap()
11372    }
11373
11374    pub fn linalg_pinv_rcond_tensor(&self, rcond: &Tensor, hermitian: bool) -> Tensor {
11375        self.f_linalg_pinv_rcond_tensor(rcond, hermitian).unwrap()
11376    }
11377
11378    pub fn linalg_qr(a: &Tensor, mode: &str) -> (Tensor, Tensor) {
11379        Tensor::f_linalg_qr(a, mode).unwrap()
11380    }
11381
11382    pub fn linalg_qr_out(q: &Tensor, r: &Tensor, a: &Tensor, mode: &str) -> (Tensor, Tensor) {
11383        Tensor::f_linalg_qr_out(q, r, a, mode).unwrap()
11384    }
11385
11386    pub fn linalg_slogdet(a: &Tensor) -> (Tensor, Tensor) {
11387        Tensor::f_linalg_slogdet(a).unwrap()
11388    }
11389
11390    pub fn linalg_slogdet_out(sign: &Tensor, logabsdet: &Tensor, a: &Tensor) -> (Tensor, Tensor) {
11391        Tensor::f_linalg_slogdet_out(sign, logabsdet, a).unwrap()
11392    }
11393
11394    pub fn linalg_solve(a: &Tensor, b: &Tensor, left: bool) -> Tensor {
11395        Tensor::f_linalg_solve(a, b, left).unwrap()
11396    }
11397
11398    pub fn linalg_solve_ex(
11399        a: &Tensor,
11400        b: &Tensor,
11401        left: bool,
11402        check_errors: bool,
11403    ) -> (Tensor, Tensor) {
11404        Tensor::f_linalg_solve_ex(a, b, left, check_errors).unwrap()
11405    }
11406
11407    pub fn linalg_solve_ex_out(
11408        result: &Tensor,
11409        info: &Tensor,
11410        a: &Tensor,
11411        b: &Tensor,
11412        left: bool,
11413        check_errors: bool,
11414    ) -> (Tensor, Tensor) {
11415        Tensor::f_linalg_solve_ex_out(result, info, a, b, left, check_errors).unwrap()
11416    }
11417
11418    pub fn linalg_solve_out(out: &Tensor, a: &Tensor, b: &Tensor, left: bool) -> Tensor {
11419        Tensor::f_linalg_solve_out(out, a, b, left).unwrap()
11420    }
11421
11422    pub fn linalg_solve_triangular(
11423        &self,
11424        b: &Tensor,
11425        upper: bool,
11426        left: bool,
11427        unitriangular: bool,
11428    ) -> Tensor {
11429        self.f_linalg_solve_triangular(b, upper, left, unitriangular).unwrap()
11430    }
11431
11432    pub fn linalg_solve_triangular_out(
11433        &self,
11434        out: &Tensor,
11435        b: &Tensor,
11436        upper: bool,
11437        left: bool,
11438        unitriangular: bool,
11439    ) -> Tensor {
11440        self.f_linalg_solve_triangular_out(out, b, upper, left, unitriangular).unwrap()
11441    }
11442
11443    pub fn linalg_svd(a: &Tensor, full_matrices: bool, driver: &str) -> (Tensor, Tensor, Tensor) {
11444        Tensor::f_linalg_svd(a, full_matrices, driver).unwrap()
11445    }
11446
11447    pub fn linalg_svd_u(
11448        u: &Tensor,
11449        s: &Tensor,
11450        vh: &Tensor,
11451        a: &Tensor,
11452        full_matrices: bool,
11453        driver: &str,
11454    ) -> (Tensor, Tensor, Tensor) {
11455        Tensor::f_linalg_svd_u(u, s, vh, a, full_matrices, driver).unwrap()
11456    }
11457
11458    pub fn linalg_svdvals(a: &Tensor, driver: &str) -> Tensor {
11459        Tensor::f_linalg_svdvals(a, driver).unwrap()
11460    }
11461
11462    pub fn linalg_svdvals_out(out: &Tensor, a: &Tensor, driver: &str) -> Tensor {
11463        Tensor::f_linalg_svdvals_out(out, a, driver).unwrap()
11464    }
11465
11466    pub fn linalg_tensorinv(&self, ind: i64) -> Tensor {
11467        self.f_linalg_tensorinv(ind).unwrap()
11468    }
11469
11470    pub fn linalg_tensorinv_out(&self, out: &Tensor, ind: i64) -> Tensor {
11471        self.f_linalg_tensorinv_out(out, ind).unwrap()
11472    }
11473
11474    pub fn linalg_tensorsolve(&self, other: &Tensor, dims: impl IntListOption) -> Tensor {
11475        self.f_linalg_tensorsolve(other, dims).unwrap()
11476    }
11477
11478    pub fn linalg_tensorsolve_out(
11479        &self,
11480        out: &Tensor,
11481        other: &Tensor,
11482        dims: impl IntListOption,
11483    ) -> Tensor {
11484        self.f_linalg_tensorsolve_out(out, other, dims).unwrap()
11485    }
11486
11487    pub fn linalg_vander(x: &Tensor, n: impl Into<Option<i64>>) -> Tensor {
11488        Tensor::f_linalg_vander(x, n).unwrap()
11489    }
11490
11491    pub fn linalg_vecdot(x: &Tensor, y: &Tensor, dim: i64) -> Tensor {
11492        Tensor::f_linalg_vecdot(x, y, dim).unwrap()
11493    }
11494
11495    pub fn linalg_vecdot_out(out: &Tensor, x: &Tensor, y: &Tensor, dim: i64) -> Tensor {
11496        Tensor::f_linalg_vecdot_out(out, x, y, dim).unwrap()
11497    }
11498
11499    pub fn linear<T: Borrow<Tensor>>(&self, weight: &Tensor, bias: Option<T>) -> Tensor {
11500        self.f_linear(weight, bias).unwrap()
11501    }
11502
11503    pub fn linear_out<T: Borrow<Tensor>>(
11504        &self,
11505        out: &Tensor,
11506        weight: &Tensor,
11507        bias: Option<T>,
11508    ) -> Tensor {
11509        self.f_linear_out(out, weight, bias).unwrap()
11510    }
11511
11512    pub fn linspace<S: Into<Scalar>>(
11513        start: S,
11514        end: S,
11515        steps: i64,
11516        options: (Kind, Device),
11517    ) -> Tensor {
11518        Tensor::f_linspace(start, end, steps, options).unwrap()
11519    }
11520
11521    pub fn linspace_out<S: Into<Scalar>>(out: &Tensor, start: S, end: S, steps: i64) -> Tensor {
11522        Tensor::f_linspace_out(out, start, end, steps).unwrap()
11523    }
11524
11525    pub fn linspace_scalar_tensor<S: Into<Scalar>>(
11526        start: S,
11527        end: &Tensor,
11528        steps: i64,
11529        options: (Kind, Device),
11530    ) -> Tensor {
11531        Tensor::f_linspace_scalar_tensor(start, end, steps, options).unwrap()
11532    }
11533
11534    pub fn linspace_scalar_tensor_out<S: Into<Scalar>>(
11535        out: &Tensor,
11536        start: S,
11537        end: &Tensor,
11538        steps: i64,
11539    ) -> Tensor {
11540        Tensor::f_linspace_scalar_tensor_out(out, start, end, steps).unwrap()
11541    }
11542
11543    pub fn linspace_tensor_scalar<S: Into<Scalar>>(
11544        start: &Tensor,
11545        end: S,
11546        steps: i64,
11547        options: (Kind, Device),
11548    ) -> Tensor {
11549        Tensor::f_linspace_tensor_scalar(start, end, steps, options).unwrap()
11550    }
11551
11552    pub fn linspace_tensor_scalar_out<S: Into<Scalar>>(
11553        out: &Tensor,
11554        start: &Tensor,
11555        end: S,
11556        steps: i64,
11557    ) -> Tensor {
11558        Tensor::f_linspace_tensor_scalar_out(out, start, end, steps).unwrap()
11559    }
11560
11561    pub fn linspace_tensor_tensor(
11562        start: &Tensor,
11563        end: &Tensor,
11564        steps: i64,
11565        options: (Kind, Device),
11566    ) -> Tensor {
11567        Tensor::f_linspace_tensor_tensor(start, end, steps, options).unwrap()
11568    }
11569
11570    pub fn linspace_tensor_tensor_out(
11571        out: &Tensor,
11572        start: &Tensor,
11573        end: &Tensor,
11574        steps: i64,
11575    ) -> Tensor {
11576        Tensor::f_linspace_tensor_tensor_out(out, start, end, steps).unwrap()
11577    }
11578
11579    pub fn log(&self) -> Tensor {
11580        self.f_log().unwrap()
11581    }
11582
11583    pub fn log10(&self) -> Tensor {
11584        self.f_log10().unwrap()
11585    }
11586
11587    pub fn log10_(&mut self) -> Tensor {
11588        self.f_log10_().unwrap()
11589    }
11590
11591    pub fn log10_out(&self, out: &Tensor) -> Tensor {
11592        self.f_log10_out(out).unwrap()
11593    }
11594
11595    pub fn log1p(&self) -> Tensor {
11596        self.f_log1p().unwrap()
11597    }
11598
11599    pub fn log1p_(&mut self) -> Tensor {
11600        self.f_log1p_().unwrap()
11601    }
11602
11603    pub fn log1p_out(&self, out: &Tensor) -> Tensor {
11604        self.f_log1p_out(out).unwrap()
11605    }
11606
11607    pub fn log2(&self) -> Tensor {
11608        self.f_log2().unwrap()
11609    }
11610
11611    pub fn log2_(&mut self) -> Tensor {
11612        self.f_log2_().unwrap()
11613    }
11614
11615    pub fn log2_out(&self, out: &Tensor) -> Tensor {
11616        self.f_log2_out(out).unwrap()
11617    }
11618
11619    pub fn log_(&mut self) -> Tensor {
11620        self.f_log_().unwrap()
11621    }
11622
11623    pub fn log_normal(&self, mean: f64, std: f64) -> Tensor {
11624        self.f_log_normal(mean, std).unwrap()
11625    }
11626
11627    pub fn log_normal_(&mut self, mean: f64, std: f64) -> Tensor {
11628        self.f_log_normal_(mean, std).unwrap()
11629    }
11630
11631    pub fn log_normal_out(&self, out: &Tensor, mean: f64, std: f64) -> Tensor {
11632        self.f_log_normal_out(out, mean, std).unwrap()
11633    }
11634
11635    pub fn log_out(&self, out: &Tensor) -> Tensor {
11636        self.f_log_out(out).unwrap()
11637    }
11638
11639    pub fn log_sigmoid(&self) -> Tensor {
11640        self.f_log_sigmoid().unwrap()
11641    }
11642
11643    pub fn log_sigmoid_backward(&self, grad_output: &Tensor, buffer: &Tensor) -> Tensor {
11644        self.f_log_sigmoid_backward(grad_output, buffer).unwrap()
11645    }
11646
11647    pub fn log_sigmoid_backward_grad_input(
11648        &self,
11649        grad_input: &Tensor,
11650        grad_output: &Tensor,
11651        buffer: &Tensor,
11652    ) -> Tensor {
11653        self.f_log_sigmoid_backward_grad_input(grad_input, grad_output, buffer).unwrap()
11654    }
11655
11656    pub fn log_sigmoid_out(&self, out: &Tensor) -> Tensor {
11657        self.f_log_sigmoid_out(out).unwrap()
11658    }
11659
11660    pub fn log_softmax(&self, dim: i64, dtype: impl Into<Option<Kind>>) -> Tensor {
11661        self.f_log_softmax(dim, dtype).unwrap()
11662    }
11663
11664    pub fn log_softmax_int_out(
11665        &self,
11666        out: &Tensor,
11667        dim: i64,
11668        dtype: impl Into<Option<Kind>>,
11669    ) -> Tensor {
11670        self.f_log_softmax_int_out(out, dim, dtype).unwrap()
11671    }
11672
11673    pub fn logaddexp(&self, other: &Tensor) -> Tensor {
11674        self.f_logaddexp(other).unwrap()
11675    }
11676
11677    pub fn logaddexp2(&self, other: &Tensor) -> Tensor {
11678        self.f_logaddexp2(other).unwrap()
11679    }
11680
11681    pub fn logaddexp2_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
11682        self.f_logaddexp2_out(out, other).unwrap()
11683    }
11684
11685    pub fn logaddexp_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
11686        self.f_logaddexp_out(out, other).unwrap()
11687    }
11688
11689    pub fn logcumsumexp(&self, dim: i64) -> Tensor {
11690        self.f_logcumsumexp(dim).unwrap()
11691    }
11692
11693    pub fn logcumsumexp_out(&self, out: &Tensor, dim: i64) -> Tensor {
11694        self.f_logcumsumexp_out(out, dim).unwrap()
11695    }
11696
11697    pub fn logdet(&self) -> Tensor {
11698        self.f_logdet().unwrap()
11699    }
11700
11701    pub fn logical_and(&self, other: &Tensor) -> Tensor {
11702        self.f_logical_and(other).unwrap()
11703    }
11704
11705    pub fn logical_and_(&mut self, other: &Tensor) -> Tensor {
11706        self.f_logical_and_(other).unwrap()
11707    }
11708
11709    pub fn logical_and_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
11710        self.f_logical_and_out(out, other).unwrap()
11711    }
11712
11713    pub fn logical_not(&self) -> Tensor {
11714        self.f_logical_not().unwrap()
11715    }
11716
11717    pub fn logical_not_(&mut self) -> Tensor {
11718        self.f_logical_not_().unwrap()
11719    }
11720
11721    pub fn logical_not_out(&self, out: &Tensor) -> Tensor {
11722        self.f_logical_not_out(out).unwrap()
11723    }
11724
11725    pub fn logical_or(&self, other: &Tensor) -> Tensor {
11726        self.f_logical_or(other).unwrap()
11727    }
11728
11729    pub fn logical_or_(&mut self, other: &Tensor) -> Tensor {
11730        self.f_logical_or_(other).unwrap()
11731    }
11732
11733    pub fn logical_or_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
11734        self.f_logical_or_out(out, other).unwrap()
11735    }
11736
11737    pub fn logical_xor(&self, other: &Tensor) -> Tensor {
11738        self.f_logical_xor(other).unwrap()
11739    }
11740
11741    pub fn logical_xor_(&mut self, other: &Tensor) -> Tensor {
11742        self.f_logical_xor_(other).unwrap()
11743    }
11744
11745    pub fn logical_xor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
11746        self.f_logical_xor_out(out, other).unwrap()
11747    }
11748
11749    pub fn logit(&self, eps: impl Into<Option<f64>>) -> Tensor {
11750        self.f_logit(eps).unwrap()
11751    }
11752
11753    pub fn logit_(&mut self, eps: impl Into<Option<f64>>) -> Tensor {
11754        self.f_logit_(eps).unwrap()
11755    }
11756
11757    pub fn logit_backward(&self, grad_output: &Tensor, eps: impl Into<Option<f64>>) -> Tensor {
11758        self.f_logit_backward(grad_output, eps).unwrap()
11759    }
11760
11761    pub fn logit_backward_grad_input(
11762        &self,
11763        grad_input: &Tensor,
11764        grad_output: &Tensor,
11765        eps: impl Into<Option<f64>>,
11766    ) -> Tensor {
11767        self.f_logit_backward_grad_input(grad_input, grad_output, eps).unwrap()
11768    }
11769
11770    pub fn logit_out(&self, out: &Tensor, eps: impl Into<Option<f64>>) -> Tensor {
11771        self.f_logit_out(out, eps).unwrap()
11772    }
11773
11774    pub fn logspace<S: Into<Scalar>>(
11775        start: S,
11776        end: S,
11777        steps: i64,
11778        base: f64,
11779        options: (Kind, Device),
11780    ) -> Tensor {
11781        Tensor::f_logspace(start, end, steps, base, options).unwrap()
11782    }
11783
11784    pub fn logspace_out<S: Into<Scalar>>(
11785        out: &Tensor,
11786        start: S,
11787        end: S,
11788        steps: i64,
11789        base: f64,
11790    ) -> Tensor {
11791        Tensor::f_logspace_out(out, start, end, steps, base).unwrap()
11792    }
11793
11794    pub fn logspace_scalar_tensor<S: Into<Scalar>>(
11795        start: S,
11796        end: &Tensor,
11797        steps: i64,
11798        base: f64,
11799        options: (Kind, Device),
11800    ) -> Tensor {
11801        Tensor::f_logspace_scalar_tensor(start, end, steps, base, options).unwrap()
11802    }
11803
11804    pub fn logspace_scalar_tensor_out<S: Into<Scalar>>(
11805        out: &Tensor,
11806        start: S,
11807        end: &Tensor,
11808        steps: i64,
11809        base: f64,
11810    ) -> Tensor {
11811        Tensor::f_logspace_scalar_tensor_out(out, start, end, steps, base).unwrap()
11812    }
11813
11814    pub fn logspace_tensor_scalar<S: Into<Scalar>>(
11815        start: &Tensor,
11816        end: S,
11817        steps: i64,
11818        base: f64,
11819        options: (Kind, Device),
11820    ) -> Tensor {
11821        Tensor::f_logspace_tensor_scalar(start, end, steps, base, options).unwrap()
11822    }
11823
11824    pub fn logspace_tensor_scalar_out<S: Into<Scalar>>(
11825        out: &Tensor,
11826        start: &Tensor,
11827        end: S,
11828        steps: i64,
11829        base: f64,
11830    ) -> Tensor {
11831        Tensor::f_logspace_tensor_scalar_out(out, start, end, steps, base).unwrap()
11832    }
11833
11834    pub fn logspace_tensor_tensor(
11835        start: &Tensor,
11836        end: &Tensor,
11837        steps: i64,
11838        base: f64,
11839        options: (Kind, Device),
11840    ) -> Tensor {
11841        Tensor::f_logspace_tensor_tensor(start, end, steps, base, options).unwrap()
11842    }
11843
11844    pub fn logspace_tensor_tensor_out(
11845        out: &Tensor,
11846        start: &Tensor,
11847        end: &Tensor,
11848        steps: i64,
11849        base: f64,
11850    ) -> Tensor {
11851        Tensor::f_logspace_tensor_tensor_out(out, start, end, steps, base).unwrap()
11852    }
11853
11854    pub fn logsumexp(&self, dim: impl IntList, keepdim: bool) -> Tensor {
11855        self.f_logsumexp(dim, keepdim).unwrap()
11856    }
11857
11858    pub fn logsumexp_out(&self, out: &Tensor, dim: impl IntList, keepdim: bool) -> Tensor {
11859        self.f_logsumexp_out(out, dim, keepdim).unwrap()
11860    }
11861
11862    pub fn lstm<T: Borrow<Tensor>>(
11863        &self,
11864        hx: &[T],
11865        params: &[T],
11866        has_biases: bool,
11867        num_layers: i64,
11868        dropout: f64,
11869        train: bool,
11870        bidirectional: bool,
11871        batch_first: bool,
11872    ) -> (Tensor, Tensor, Tensor) {
11873        self.f_lstm(hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first)
11874            .unwrap()
11875    }
11876
11877    pub fn lstm_cell<T: Borrow<Tensor>>(
11878        &self,
11879        hx: &[T],
11880        w_ih: &Tensor,
11881        w_hh: &Tensor,
11882        b_ih: Option<T>,
11883        b_hh: Option<T>,
11884    ) -> (Tensor, Tensor) {
11885        self.f_lstm_cell(hx, w_ih, w_hh, b_ih, b_hh).unwrap()
11886    }
11887
11888    pub fn lstm_data<T: Borrow<Tensor>>(
11889        data: &Tensor,
11890        batch_sizes: &Tensor,
11891        hx: &[T],
11892        params: &[T],
11893        has_biases: bool,
11894        num_layers: i64,
11895        dropout: f64,
11896        train: bool,
11897        bidirectional: bool,
11898    ) -> (Tensor, Tensor, Tensor) {
11899        Tensor::f_lstm_data(
11900            data,
11901            batch_sizes,
11902            hx,
11903            params,
11904            has_biases,
11905            num_layers,
11906            dropout,
11907            train,
11908            bidirectional,
11909        )
11910        .unwrap()
11911    }
11912
11913    pub fn lstm_mps_backward<T: Borrow<Tensor>>(
11914        &self,
11915        out0: &Tensor,
11916        out1: &[T],
11917        out2: &[T],
11918        grad_y: Option<T>,
11919        grad_hy: Option<T>,
11920        grad_cy: Option<T>,
11921        z_state: &Tensor,
11922        cell_state_fwd: &Tensor,
11923        layersoutputs: &Tensor,
11924        hx: &[T],
11925        params: &[T],
11926        has_biases: bool,
11927        num_layers: i64,
11928        dropout: f64,
11929        train: bool,
11930        bidirectional: bool,
11931        batch_first: bool,
11932    ) {
11933        self.f_lstm_mps_backward(
11934            out0,
11935            out1,
11936            out2,
11937            grad_y,
11938            grad_hy,
11939            grad_cy,
11940            z_state,
11941            cell_state_fwd,
11942            layersoutputs,
11943            hx,
11944            params,
11945            has_biases,
11946            num_layers,
11947            dropout,
11948            train,
11949            bidirectional,
11950            batch_first,
11951        )
11952        .unwrap()
11953    }
11954
11955    pub fn lt<S: Into<Scalar>>(&self, other: S) -> Tensor {
11956        self.f_lt(other).unwrap()
11957    }
11958
11959    pub fn lt_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
11960        self.f_lt_(other).unwrap()
11961    }
11962
11963    pub fn lt_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
11964        self.f_lt_scalar_out(out, other).unwrap()
11965    }
11966
11967    pub fn lt_tensor(&self, other: &Tensor) -> Tensor {
11968        self.f_lt_tensor(other).unwrap()
11969    }
11970
11971    pub fn lt_tensor_(&mut self, other: &Tensor) -> Tensor {
11972        self.f_lt_tensor_(other).unwrap()
11973    }
11974
11975    pub fn lt_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
11976        self.f_lt_tensor_out(out, other).unwrap()
11977    }
11978
11979    pub fn lu_solve(&self, lu_data: &Tensor, lu_pivots: &Tensor) -> Tensor {
11980        self.f_lu_solve(lu_data, lu_pivots).unwrap()
11981    }
11982
11983    pub fn lu_solve_out(&self, out: &Tensor, lu_data: &Tensor, lu_pivots: &Tensor) -> Tensor {
11984        self.f_lu_solve_out(out, lu_data, lu_pivots).unwrap()
11985    }
11986
11987    pub fn lu_unpack(
11988        lu_data: &Tensor,
11989        lu_pivots: &Tensor,
11990        unpack_data: bool,
11991        unpack_pivots: bool,
11992    ) -> (Tensor, Tensor, Tensor) {
11993        Tensor::f_lu_unpack(lu_data, lu_pivots, unpack_data, unpack_pivots).unwrap()
11994    }
11995
11996    pub fn lu_unpack_out(
11997        p: &Tensor,
11998        l: &Tensor,
11999        u: &Tensor,
12000        lu_data: &Tensor,
12001        lu_pivots: &Tensor,
12002        unpack_data: bool,
12003        unpack_pivots: bool,
12004    ) -> (Tensor, Tensor, Tensor) {
12005        Tensor::f_lu_unpack_out(p, l, u, lu_data, lu_pivots, unpack_data, unpack_pivots).unwrap()
12006    }
12007
12008    pub fn margin_ranking_loss(
12009        input1: &Tensor,
12010        input2: &Tensor,
12011        target: &Tensor,
12012        margin: f64,
12013        reduction: crate::Reduction,
12014    ) -> Tensor {
12015        Tensor::f_margin_ranking_loss(input1, input2, target, margin, reduction).unwrap()
12016    }
12017
12018    pub fn masked_fill<S: Into<Scalar>>(&self, mask: &Tensor, value: S) -> Tensor {
12019        self.f_masked_fill(mask, value).unwrap()
12020    }
12021
12022    pub fn masked_fill_<S: Into<Scalar>>(&mut self, mask: &Tensor, value: S) -> Tensor {
12023        self.f_masked_fill_(mask, value).unwrap()
12024    }
12025
12026    pub fn masked_fill_scalar_out<S: Into<Scalar>>(
12027        &self,
12028        out: &Tensor,
12029        mask: &Tensor,
12030        value: S,
12031    ) -> Tensor {
12032        self.f_masked_fill_scalar_out(out, mask, value).unwrap()
12033    }
12034
12035    pub fn masked_fill_tensor(&self, mask: &Tensor, value: &Tensor) -> Tensor {
12036        self.f_masked_fill_tensor(mask, value).unwrap()
12037    }
12038
12039    pub fn masked_fill_tensor_(&mut self, mask: &Tensor, value: &Tensor) -> Tensor {
12040        self.f_masked_fill_tensor_(mask, value).unwrap()
12041    }
12042
12043    pub fn masked_fill_tensor_out(&self, out: &Tensor, mask: &Tensor, value: &Tensor) -> Tensor {
12044        self.f_masked_fill_tensor_out(out, mask, value).unwrap()
12045    }
12046
12047    pub fn masked_scatter(&self, mask: &Tensor, source: &Tensor) -> Tensor {
12048        self.f_masked_scatter(mask, source).unwrap()
12049    }
12050
12051    pub fn masked_scatter_(&mut self, mask: &Tensor, source: &Tensor) -> Tensor {
12052        self.f_masked_scatter_(mask, source).unwrap()
12053    }
12054
12055    pub fn masked_scatter_backward(
12056        grad_output: &Tensor,
12057        mask: &Tensor,
12058        sizes: impl IntList,
12059    ) -> Tensor {
12060        Tensor::f_masked_scatter_backward(grad_output, mask, sizes).unwrap()
12061    }
12062
12063    pub fn masked_scatter_out(&self, out: &Tensor, mask: &Tensor, source: &Tensor) -> Tensor {
12064        self.f_masked_scatter_out(out, mask, source).unwrap()
12065    }
12066
12067    pub fn masked_select(&self, mask: &Tensor) -> Tensor {
12068        self.f_masked_select(mask).unwrap()
12069    }
12070
12071    pub fn masked_select_backward(&self, grad: &Tensor, mask: &Tensor) -> Tensor {
12072        self.f_masked_select_backward(grad, mask).unwrap()
12073    }
12074
12075    pub fn masked_select_out(&self, out: &Tensor, mask: &Tensor) -> Tensor {
12076        self.f_masked_select_out(out, mask).unwrap()
12077    }
12078
12079    pub fn matmul(&self, other: &Tensor) -> Tensor {
12080        self.f_matmul(other).unwrap()
12081    }
12082
12083    pub fn matmul_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
12084        self.f_matmul_out(out, other).unwrap()
12085    }
12086
12087    pub fn matrix_exp(&self) -> Tensor {
12088        self.f_matrix_exp().unwrap()
12089    }
12090
12091    pub fn matrix_exp_backward(&self, grad: &Tensor) -> Tensor {
12092        self.f_matrix_exp_backward(grad).unwrap()
12093    }
12094
12095    pub fn matrix_h(&self) -> Tensor {
12096        self.f_matrix_h().unwrap()
12097    }
12098
12099    pub fn matrix_power(&self, n: i64) -> Tensor {
12100        self.f_matrix_power(n).unwrap()
12101    }
12102
12103    pub fn matrix_power_out(&self, out: &Tensor, n: i64) -> Tensor {
12104        self.f_matrix_power_out(out, n).unwrap()
12105    }
12106
12107    pub fn max(&self) -> Tensor {
12108        self.f_max().unwrap()
12109    }
12110
12111    pub fn max_dim(&self, dim: i64, keepdim: bool) -> (Tensor, Tensor) {
12112        self.f_max_dim(dim, keepdim).unwrap()
12113    }
12114
12115    pub fn max_dim_max(
12116        &self,
12117        max: &Tensor,
12118        max_values: &Tensor,
12119        dim: i64,
12120        keepdim: bool,
12121    ) -> (Tensor, Tensor) {
12122        self.f_max_dim_max(max, max_values, dim, keepdim).unwrap()
12123    }
12124
12125    pub fn max_other(&self, other: &Tensor) -> Tensor {
12126        self.f_max_other(other).unwrap()
12127    }
12128
12129    pub fn max_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
12130        self.f_max_out(out, other).unwrap()
12131    }
12132
12133    pub fn max_pool1d(
12134        &self,
12135        kernel_size: impl IntList,
12136        stride: impl IntList,
12137        padding: impl IntList,
12138        dilation: impl IntList,
12139        ceil_mode: bool,
12140    ) -> Tensor {
12141        self.f_max_pool1d(kernel_size, stride, padding, dilation, ceil_mode).unwrap()
12142    }
12143
12144    pub fn max_pool1d_with_indices(
12145        &self,
12146        kernel_size: impl IntList,
12147        stride: impl IntList,
12148        padding: impl IntList,
12149        dilation: impl IntList,
12150        ceil_mode: bool,
12151    ) -> (Tensor, Tensor) {
12152        self.f_max_pool1d_with_indices(kernel_size, stride, padding, dilation, ceil_mode).unwrap()
12153    }
12154
12155    pub fn max_pool2d(
12156        &self,
12157        kernel_size: impl IntList,
12158        stride: impl IntList,
12159        padding: impl IntList,
12160        dilation: impl IntList,
12161        ceil_mode: bool,
12162    ) -> Tensor {
12163        self.f_max_pool2d(kernel_size, stride, padding, dilation, ceil_mode).unwrap()
12164    }
12165
12166    pub fn max_pool2d_backward(
12167        &self,
12168        grad_output: &Tensor,
12169        kernel_size: impl IntList,
12170        stride: impl IntList,
12171        padding: impl IntList,
12172        dilation: impl IntList,
12173        ceil_mode: bool,
12174    ) -> Tensor {
12175        self.f_max_pool2d_backward(grad_output, kernel_size, stride, padding, dilation, ceil_mode)
12176            .unwrap()
12177    }
12178
12179    pub fn max_pool2d_backward_out(
12180        &self,
12181        out: &Tensor,
12182        grad_output: &Tensor,
12183        kernel_size: impl IntList,
12184        stride: impl IntList,
12185        padding: impl IntList,
12186        dilation: impl IntList,
12187        ceil_mode: bool,
12188    ) -> Tensor {
12189        self.f_max_pool2d_backward_out(
12190            out,
12191            grad_output,
12192            kernel_size,
12193            stride,
12194            padding,
12195            dilation,
12196            ceil_mode,
12197        )
12198        .unwrap()
12199    }
12200
12201    pub fn max_pool2d_with_indices(
12202        &self,
12203        kernel_size: impl IntList,
12204        stride: impl IntList,
12205        padding: impl IntList,
12206        dilation: impl IntList,
12207        ceil_mode: bool,
12208    ) -> (Tensor, Tensor) {
12209        self.f_max_pool2d_with_indices(kernel_size, stride, padding, dilation, ceil_mode).unwrap()
12210    }
12211
12212    pub fn max_pool2d_with_indices_backward(
12213        &self,
12214        grad_output: &Tensor,
12215        kernel_size: impl IntList,
12216        stride: impl IntList,
12217        padding: impl IntList,
12218        dilation: impl IntList,
12219        ceil_mode: bool,
12220        indices: &Tensor,
12221    ) -> Tensor {
12222        self.f_max_pool2d_with_indices_backward(
12223            grad_output,
12224            kernel_size,
12225            stride,
12226            padding,
12227            dilation,
12228            ceil_mode,
12229            indices,
12230        )
12231        .unwrap()
12232    }
12233
12234    pub fn max_pool2d_with_indices_backward_grad_input(
12235        &self,
12236        grad_input: &Tensor,
12237        grad_output: &Tensor,
12238        kernel_size: impl IntList,
12239        stride: impl IntList,
12240        padding: impl IntList,
12241        dilation: impl IntList,
12242        ceil_mode: bool,
12243        indices: &Tensor,
12244    ) -> Tensor {
12245        self.f_max_pool2d_with_indices_backward_grad_input(
12246            grad_input,
12247            grad_output,
12248            kernel_size,
12249            stride,
12250            padding,
12251            dilation,
12252            ceil_mode,
12253            indices,
12254        )
12255        .unwrap()
12256    }
12257
12258    pub fn max_pool2d_with_indices_out(
12259        &self,
12260        out: &Tensor,
12261        indices: &Tensor,
12262        kernel_size: impl IntList,
12263        stride: impl IntList,
12264        padding: impl IntList,
12265        dilation: impl IntList,
12266        ceil_mode: bool,
12267    ) -> (Tensor, Tensor) {
12268        self.f_max_pool2d_with_indices_out(
12269            out,
12270            indices,
12271            kernel_size,
12272            stride,
12273            padding,
12274            dilation,
12275            ceil_mode,
12276        )
12277        .unwrap()
12278    }
12279
12280    pub fn max_pool3d(
12281        &self,
12282        kernel_size: impl IntList,
12283        stride: impl IntList,
12284        padding: impl IntList,
12285        dilation: impl IntList,
12286        ceil_mode: bool,
12287    ) -> Tensor {
12288        self.f_max_pool3d(kernel_size, stride, padding, dilation, ceil_mode).unwrap()
12289    }
12290
12291    pub fn max_pool3d_with_indices(
12292        &self,
12293        kernel_size: impl IntList,
12294        stride: impl IntList,
12295        padding: impl IntList,
12296        dilation: impl IntList,
12297        ceil_mode: bool,
12298    ) -> (Tensor, Tensor) {
12299        self.f_max_pool3d_with_indices(kernel_size, stride, padding, dilation, ceil_mode).unwrap()
12300    }
12301
12302    pub fn max_pool3d_with_indices_backward(
12303        &self,
12304        grad_output: &Tensor,
12305        kernel_size: impl IntList,
12306        stride: impl IntList,
12307        padding: impl IntList,
12308        dilation: impl IntList,
12309        ceil_mode: bool,
12310        indices: &Tensor,
12311    ) -> Tensor {
12312        self.f_max_pool3d_with_indices_backward(
12313            grad_output,
12314            kernel_size,
12315            stride,
12316            padding,
12317            dilation,
12318            ceil_mode,
12319            indices,
12320        )
12321        .unwrap()
12322    }
12323
12324    pub fn max_pool3d_with_indices_backward_grad_input(
12325        &self,
12326        grad_input: &Tensor,
12327        grad_output: &Tensor,
12328        kernel_size: impl IntList,
12329        stride: impl IntList,
12330        padding: impl IntList,
12331        dilation: impl IntList,
12332        ceil_mode: bool,
12333        indices: &Tensor,
12334    ) -> Tensor {
12335        self.f_max_pool3d_with_indices_backward_grad_input(
12336            grad_input,
12337            grad_output,
12338            kernel_size,
12339            stride,
12340            padding,
12341            dilation,
12342            ceil_mode,
12343            indices,
12344        )
12345        .unwrap()
12346    }
12347
12348    pub fn max_pool3d_with_indices_out(
12349        &self,
12350        out: &Tensor,
12351        indices: &Tensor,
12352        kernel_size: impl IntList,
12353        stride: impl IntList,
12354        padding: impl IntList,
12355        dilation: impl IntList,
12356        ceil_mode: bool,
12357    ) -> (Tensor, Tensor) {
12358        self.f_max_pool3d_with_indices_out(
12359            out,
12360            indices,
12361            kernel_size,
12362            stride,
12363            padding,
12364            dilation,
12365            ceil_mode,
12366        )
12367        .unwrap()
12368    }
12369
12370    pub fn max_unary_out(&self, out: &Tensor) -> Tensor {
12371        self.f_max_unary_out(out).unwrap()
12372    }
12373
12374    pub fn max_unpool2d(&self, indices: &Tensor, output_size: impl IntList) -> Tensor {
12375        self.f_max_unpool2d(indices, output_size).unwrap()
12376    }
12377
12378    pub fn max_unpool2d_out(
12379        &self,
12380        out: &Tensor,
12381        indices: &Tensor,
12382        output_size: impl IntList,
12383    ) -> Tensor {
12384        self.f_max_unpool2d_out(out, indices, output_size).unwrap()
12385    }
12386
12387    pub fn max_unpool3d(
12388        &self,
12389        indices: &Tensor,
12390        output_size: impl IntList,
12391        stride: impl IntList,
12392        padding: impl IntList,
12393    ) -> Tensor {
12394        self.f_max_unpool3d(indices, output_size, stride, padding).unwrap()
12395    }
12396
12397    pub fn max_unpool3d_out(
12398        &self,
12399        out: &Tensor,
12400        indices: &Tensor,
12401        output_size: impl IntList,
12402        stride: impl IntList,
12403        padding: impl IntList,
12404    ) -> Tensor {
12405        self.f_max_unpool3d_out(out, indices, output_size, stride, padding).unwrap()
12406    }
12407
12408    pub fn maximum(&self, other: &Tensor) -> Tensor {
12409        self.f_maximum(other).unwrap()
12410    }
12411
12412    pub fn maximum_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
12413        self.f_maximum_out(out, other).unwrap()
12414    }
12415
12416    pub fn mean(&self, dtype: impl Into<Option<Kind>>) -> Tensor {
12417        self.f_mean(dtype).unwrap()
12418    }
12419
12420    pub fn mean_dim(
12421        &self,
12422        dim: impl IntListOption,
12423        keepdim: bool,
12424        dtype: impl Into<Option<Kind>>,
12425    ) -> Tensor {
12426        self.f_mean_dim(dim, keepdim, dtype).unwrap()
12427    }
12428
12429    pub fn mean_dtype_out(&self, out: &Tensor, dtype: impl Into<Option<Kind>>) -> Tensor {
12430        self.f_mean_dtype_out(out, dtype).unwrap()
12431    }
12432
12433    pub fn mean_out(
12434        &self,
12435        out: &Tensor,
12436        dim: impl IntListOption,
12437        keepdim: bool,
12438        dtype: impl Into<Option<Kind>>,
12439    ) -> Tensor {
12440        self.f_mean_out(out, dim, keepdim, dtype).unwrap()
12441    }
12442
12443    pub fn median(&self) -> Tensor {
12444        self.f_median().unwrap()
12445    }
12446
12447    pub fn median_dim(&self, dim: i64, keepdim: bool) -> (Tensor, Tensor) {
12448        self.f_median_dim(dim, keepdim).unwrap()
12449    }
12450
12451    pub fn median_dim_values(
12452        &self,
12453        values: &Tensor,
12454        indices: &Tensor,
12455        dim: i64,
12456        keepdim: bool,
12457    ) -> (Tensor, Tensor) {
12458        self.f_median_dim_values(values, indices, dim, keepdim).unwrap()
12459    }
12460
12461    pub fn median_out(&self, out: &Tensor) -> Tensor {
12462        self.f_median_out(out).unwrap()
12463    }
12464
12465    pub fn meshgrid<T: Borrow<Tensor>>(tensors: &[T]) -> Vec<Tensor> {
12466        Tensor::f_meshgrid(tensors).unwrap()
12467    }
12468
12469    pub fn meshgrid_indexing<T: Borrow<Tensor>>(tensors: &[T], indexing: &str) -> Vec<Tensor> {
12470        Tensor::f_meshgrid_indexing(tensors, indexing).unwrap()
12471    }
12472
12473    pub fn mh(&self) -> Tensor {
12474        self.f_mh().unwrap()
12475    }
12476
12477    pub fn min(&self) -> Tensor {
12478        self.f_min().unwrap()
12479    }
12480
12481    pub fn min_dim(&self, dim: i64, keepdim: bool) -> (Tensor, Tensor) {
12482        self.f_min_dim(dim, keepdim).unwrap()
12483    }
12484
12485    pub fn min_dim_min(
12486        &self,
12487        min: &Tensor,
12488        min_indices: &Tensor,
12489        dim: i64,
12490        keepdim: bool,
12491    ) -> (Tensor, Tensor) {
12492        self.f_min_dim_min(min, min_indices, dim, keepdim).unwrap()
12493    }
12494
12495    pub fn min_other(&self, other: &Tensor) -> Tensor {
12496        self.f_min_other(other).unwrap()
12497    }
12498
12499    pub fn min_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
12500        self.f_min_out(out, other).unwrap()
12501    }
12502
12503    pub fn min_unary_out(&self, out: &Tensor) -> Tensor {
12504        self.f_min_unary_out(out).unwrap()
12505    }
12506
12507    pub fn minimum(&self, other: &Tensor) -> Tensor {
12508        self.f_minimum(other).unwrap()
12509    }
12510
12511    pub fn minimum_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
12512        self.f_minimum_out(out, other).unwrap()
12513    }
12514
12515    pub fn miopen_batch_norm<T: Borrow<Tensor>>(
12516        &self,
12517        weight: &Tensor,
12518        bias: Option<T>,
12519        running_mean: Option<T>,
12520        running_var: Option<T>,
12521        training: bool,
12522        exponential_average_factor: f64,
12523        epsilon: f64,
12524    ) -> (Tensor, Tensor, Tensor) {
12525        self.f_miopen_batch_norm(
12526            weight,
12527            bias,
12528            running_mean,
12529            running_var,
12530            training,
12531            exponential_average_factor,
12532            epsilon,
12533        )
12534        .unwrap()
12535    }
12536
12537    pub fn miopen_batch_norm_backward<T: Borrow<Tensor>>(
12538        &self,
12539        grad_output: &Tensor,
12540        weight: &Tensor,
12541        running_mean: Option<T>,
12542        running_var: Option<T>,
12543        save_mean: Option<T>,
12544        save_var: Option<T>,
12545        epsilon: f64,
12546    ) -> (Tensor, Tensor, Tensor) {
12547        self.f_miopen_batch_norm_backward(
12548            grad_output,
12549            weight,
12550            running_mean,
12551            running_var,
12552            save_mean,
12553            save_var,
12554            epsilon,
12555        )
12556        .unwrap()
12557    }
12558
12559    pub fn miopen_batch_norm_backward_out<T: Borrow<Tensor>>(
12560        &self,
12561        out0: &Tensor,
12562        out1: &Tensor,
12563        out2: &Tensor,
12564        grad_output: &Tensor,
12565        weight: &Tensor,
12566        running_mean: Option<T>,
12567        running_var: Option<T>,
12568        save_mean: Option<T>,
12569        save_var: Option<T>,
12570        epsilon: f64,
12571    ) -> (Tensor, Tensor, Tensor) {
12572        self.f_miopen_batch_norm_backward_out(
12573            out0,
12574            out1,
12575            out2,
12576            grad_output,
12577            weight,
12578            running_mean,
12579            running_var,
12580            save_mean,
12581            save_var,
12582            epsilon,
12583        )
12584        .unwrap()
12585    }
12586
12587    pub fn miopen_batch_norm_out<T: Borrow<Tensor>>(
12588        &self,
12589        out0: &Tensor,
12590        out1: &Tensor,
12591        out2: &Tensor,
12592        weight: &Tensor,
12593        bias: Option<T>,
12594        running_mean: Option<T>,
12595        running_var: Option<T>,
12596        training: bool,
12597        exponential_average_factor: f64,
12598        epsilon: f64,
12599    ) -> (Tensor, Tensor, Tensor) {
12600        self.f_miopen_batch_norm_out(
12601            out0,
12602            out1,
12603            out2,
12604            weight,
12605            bias,
12606            running_mean,
12607            running_var,
12608            training,
12609            exponential_average_factor,
12610            epsilon,
12611        )
12612        .unwrap()
12613    }
12614
12615    pub fn miopen_convolution<T: Borrow<Tensor>>(
12616        &self,
12617        weight: &Tensor,
12618        bias: Option<T>,
12619        padding: impl IntList,
12620        stride: impl IntList,
12621        dilation: impl IntList,
12622        groups: i64,
12623        benchmark: bool,
12624        deterministic: bool,
12625    ) -> Tensor {
12626        self.f_miopen_convolution(
12627            weight,
12628            bias,
12629            padding,
12630            stride,
12631            dilation,
12632            groups,
12633            benchmark,
12634            deterministic,
12635        )
12636        .unwrap()
12637    }
12638
12639    pub fn miopen_convolution_add_relu<T: Borrow<Tensor>, S: Into<Scalar>>(
12640        &self,
12641        weight: &Tensor,
12642        z: &Tensor,
12643        alpha: S,
12644        bias: Option<T>,
12645        stride: impl IntList,
12646        padding: impl IntList,
12647        dilation: impl IntList,
12648        groups: i64,
12649    ) -> Tensor {
12650        self.f_miopen_convolution_add_relu(
12651            weight, z, alpha, bias, stride, padding, dilation, groups,
12652        )
12653        .unwrap()
12654    }
12655
12656    pub fn miopen_convolution_out<T: Borrow<Tensor>>(
12657        &self,
12658        out: &Tensor,
12659        weight: &Tensor,
12660        bias: Option<T>,
12661        padding: impl IntList,
12662        stride: impl IntList,
12663        dilation: impl IntList,
12664        groups: i64,
12665        benchmark: bool,
12666        deterministic: bool,
12667    ) -> Tensor {
12668        self.f_miopen_convolution_out(
12669            out,
12670            weight,
12671            bias,
12672            padding,
12673            stride,
12674            dilation,
12675            groups,
12676            benchmark,
12677            deterministic,
12678        )
12679        .unwrap()
12680    }
12681
12682    pub fn miopen_convolution_relu<T: Borrow<Tensor>>(
12683        &self,
12684        weight: &Tensor,
12685        bias: Option<T>,
12686        stride: impl IntList,
12687        padding: impl IntList,
12688        dilation: impl IntList,
12689        groups: i64,
12690    ) -> Tensor {
12691        self.f_miopen_convolution_relu(weight, bias, stride, padding, dilation, groups).unwrap()
12692    }
12693
12694    pub fn miopen_convolution_transpose<T: Borrow<Tensor>>(
12695        &self,
12696        weight: &Tensor,
12697        bias: Option<T>,
12698        padding: impl IntList,
12699        output_padding: impl IntList,
12700        stride: impl IntList,
12701        dilation: impl IntList,
12702        groups: i64,
12703        benchmark: bool,
12704        deterministic: bool,
12705    ) -> Tensor {
12706        self.f_miopen_convolution_transpose(
12707            weight,
12708            bias,
12709            padding,
12710            output_padding,
12711            stride,
12712            dilation,
12713            groups,
12714            benchmark,
12715            deterministic,
12716        )
12717        .unwrap()
12718    }
12719
12720    pub fn miopen_convolution_transpose_out<T: Borrow<Tensor>>(
12721        &self,
12722        out: &Tensor,
12723        weight: &Tensor,
12724        bias: Option<T>,
12725        padding: impl IntList,
12726        output_padding: impl IntList,
12727        stride: impl IntList,
12728        dilation: impl IntList,
12729        groups: i64,
12730        benchmark: bool,
12731        deterministic: bool,
12732    ) -> Tensor {
12733        self.f_miopen_convolution_transpose_out(
12734            out,
12735            weight,
12736            bias,
12737            padding,
12738            output_padding,
12739            stride,
12740            dilation,
12741            groups,
12742            benchmark,
12743            deterministic,
12744        )
12745        .unwrap()
12746    }
12747
12748    pub fn miopen_depthwise_convolution<T: Borrow<Tensor>>(
12749        &self,
12750        weight: &Tensor,
12751        bias: Option<T>,
12752        padding: impl IntList,
12753        stride: impl IntList,
12754        dilation: impl IntList,
12755        groups: i64,
12756        benchmark: bool,
12757        deterministic: bool,
12758    ) -> Tensor {
12759        self.f_miopen_depthwise_convolution(
12760            weight,
12761            bias,
12762            padding,
12763            stride,
12764            dilation,
12765            groups,
12766            benchmark,
12767            deterministic,
12768        )
12769        .unwrap()
12770    }
12771
12772    pub fn miopen_depthwise_convolution_out<T: Borrow<Tensor>>(
12773        &self,
12774        out: &Tensor,
12775        weight: &Tensor,
12776        bias: Option<T>,
12777        padding: impl IntList,
12778        stride: impl IntList,
12779        dilation: impl IntList,
12780        groups: i64,
12781        benchmark: bool,
12782        deterministic: bool,
12783    ) -> Tensor {
12784        self.f_miopen_depthwise_convolution_out(
12785            out,
12786            weight,
12787            bias,
12788            padding,
12789            stride,
12790            dilation,
12791            groups,
12792            benchmark,
12793            deterministic,
12794        )
12795        .unwrap()
12796    }
12797
12798    pub fn miopen_rnn<T: Borrow<Tensor>>(
12799        &self,
12800        weight: &[T],
12801        weight_stride0: i64,
12802        hx: &Tensor,
12803        cx: Option<T>,
12804        mode: i64,
12805        hidden_size: i64,
12806        num_layers: i64,
12807        batch_first: bool,
12808        dropout: f64,
12809        train: bool,
12810        bidirectional: bool,
12811        batch_sizes: impl IntList,
12812        dropout_state: Option<T>,
12813    ) -> (Tensor, Tensor, Tensor, Tensor, Tensor) {
12814        self.f_miopen_rnn(
12815            weight,
12816            weight_stride0,
12817            hx,
12818            cx,
12819            mode,
12820            hidden_size,
12821            num_layers,
12822            batch_first,
12823            dropout,
12824            train,
12825            bidirectional,
12826            batch_sizes,
12827            dropout_state,
12828        )
12829        .unwrap()
12830    }
12831
12832    pub fn miopen_rnn_out<T: Borrow<Tensor>>(
12833        &self,
12834        out0: &Tensor,
12835        out1: &Tensor,
12836        out2: &Tensor,
12837        out3: &Tensor,
12838        out4: &Tensor,
12839        weight: &[T],
12840        weight_stride0: i64,
12841        hx: &Tensor,
12842        cx: Option<T>,
12843        mode: i64,
12844        hidden_size: i64,
12845        num_layers: i64,
12846        batch_first: bool,
12847        dropout: f64,
12848        train: bool,
12849        bidirectional: bool,
12850        batch_sizes: impl IntList,
12851        dropout_state: Option<T>,
12852    ) -> (Tensor, Tensor, Tensor, Tensor, Tensor) {
12853        self.f_miopen_rnn_out(
12854            out0,
12855            out1,
12856            out2,
12857            out3,
12858            out4,
12859            weight,
12860            weight_stride0,
12861            hx,
12862            cx,
12863            mode,
12864            hidden_size,
12865            num_layers,
12866            batch_first,
12867            dropout,
12868            train,
12869            bidirectional,
12870            batch_sizes,
12871            dropout_state,
12872        )
12873        .unwrap()
12874    }
12875
12876    pub fn mish(&self) -> Tensor {
12877        self.f_mish().unwrap()
12878    }
12879
12880    pub fn mish_(&mut self) -> Tensor {
12881        self.f_mish_().unwrap()
12882    }
12883
12884    pub fn mish_backward(&self, grad_output: &Tensor) -> Tensor {
12885        self.f_mish_backward(grad_output).unwrap()
12886    }
12887
12888    pub fn mish_out(&self, out: &Tensor) -> Tensor {
12889        self.f_mish_out(out).unwrap()
12890    }
12891
12892    pub fn mkldnn_adaptive_avg_pool2d(&self, output_size: impl IntList) -> Tensor {
12893        self.f_mkldnn_adaptive_avg_pool2d(output_size).unwrap()
12894    }
12895
12896    pub fn mkldnn_adaptive_avg_pool2d_backward(&self, grad_output: &Tensor) -> Tensor {
12897        self.f_mkldnn_adaptive_avg_pool2d_backward(grad_output).unwrap()
12898    }
12899
12900    pub fn mkldnn_adaptive_avg_pool2d_backward_out(
12901        &self,
12902        out: &Tensor,
12903        grad_output: &Tensor,
12904    ) -> Tensor {
12905        self.f_mkldnn_adaptive_avg_pool2d_backward_out(out, grad_output).unwrap()
12906    }
12907
12908    pub fn mkldnn_adaptive_avg_pool2d_out(
12909        &self,
12910        out: &Tensor,
12911        output_size: impl IntList,
12912    ) -> Tensor {
12913        self.f_mkldnn_adaptive_avg_pool2d_out(out, output_size).unwrap()
12914    }
12915
12916    pub fn mkldnn_convolution<T: Borrow<Tensor>>(
12917        &self,
12918        weight: &Tensor,
12919        bias: Option<T>,
12920        padding: impl IntList,
12921        stride: impl IntList,
12922        dilation: impl IntList,
12923        groups: i64,
12924    ) -> Tensor {
12925        self.f_mkldnn_convolution(weight, bias, padding, stride, dilation, groups).unwrap()
12926    }
12927
12928    pub fn mkldnn_convolution_out<T: Borrow<Tensor>>(
12929        &self,
12930        out: &Tensor,
12931        weight: &Tensor,
12932        bias: Option<T>,
12933        padding: impl IntList,
12934        stride: impl IntList,
12935        dilation: impl IntList,
12936        groups: i64,
12937    ) -> Tensor {
12938        self.f_mkldnn_convolution_out(out, weight, bias, padding, stride, dilation, groups).unwrap()
12939    }
12940
12941    pub fn mkldnn_linear<T: Borrow<Tensor>>(&self, weight: &Tensor, bias: Option<T>) -> Tensor {
12942        self.f_mkldnn_linear(weight, bias).unwrap()
12943    }
12944
12945    pub fn mkldnn_linear_backward_input(
12946        input_size: impl IntList,
12947        grad_output: &Tensor,
12948        weight: &Tensor,
12949    ) -> Tensor {
12950        Tensor::f_mkldnn_linear_backward_input(input_size, grad_output, weight).unwrap()
12951    }
12952
12953    pub fn mkldnn_linear_backward_input_out(
12954        out: &Tensor,
12955        input_size: impl IntList,
12956        grad_output: &Tensor,
12957        weight: &Tensor,
12958    ) -> Tensor {
12959        Tensor::f_mkldnn_linear_backward_input_out(out, input_size, grad_output, weight).unwrap()
12960    }
12961
12962    pub fn mkldnn_linear_backward_weights(
12963        &self,
12964        grad_output: &Tensor,
12965        weight: &Tensor,
12966        bias_defined: bool,
12967    ) -> (Tensor, Tensor) {
12968        self.f_mkldnn_linear_backward_weights(grad_output, weight, bias_defined).unwrap()
12969    }
12970
12971    pub fn mkldnn_linear_backward_weights_out(
12972        &self,
12973        out0: &Tensor,
12974        out1: &Tensor,
12975        grad_output: &Tensor,
12976        weight: &Tensor,
12977        bias_defined: bool,
12978    ) -> (Tensor, Tensor) {
12979        self.f_mkldnn_linear_backward_weights_out(out0, out1, grad_output, weight, bias_defined)
12980            .unwrap()
12981    }
12982
12983    pub fn mkldnn_linear_out<T: Borrow<Tensor>>(
12984        &self,
12985        out: &Tensor,
12986        weight: &Tensor,
12987        bias: Option<T>,
12988    ) -> Tensor {
12989        self.f_mkldnn_linear_out(out, weight, bias).unwrap()
12990    }
12991
12992    pub fn mkldnn_max_pool2d(
12993        &self,
12994        kernel_size: impl IntList,
12995        stride: impl IntList,
12996        padding: impl IntList,
12997        dilation: impl IntList,
12998        ceil_mode: bool,
12999    ) -> Tensor {
13000        self.f_mkldnn_max_pool2d(kernel_size, stride, padding, dilation, ceil_mode).unwrap()
13001    }
13002
13003    pub fn mkldnn_max_pool2d_backward(
13004        &self,
13005        grad_output: &Tensor,
13006        output: &Tensor,
13007        kernel_size: impl IntList,
13008        stride: impl IntList,
13009        padding: impl IntList,
13010        dilation: impl IntList,
13011        ceil_mode: bool,
13012    ) -> Tensor {
13013        self.f_mkldnn_max_pool2d_backward(
13014            grad_output,
13015            output,
13016            kernel_size,
13017            stride,
13018            padding,
13019            dilation,
13020            ceil_mode,
13021        )
13022        .unwrap()
13023    }
13024
13025    pub fn mkldnn_max_pool2d_backward_out(
13026        &self,
13027        out: &Tensor,
13028        grad_output: &Tensor,
13029        output: &Tensor,
13030        kernel_size: impl IntList,
13031        stride: impl IntList,
13032        padding: impl IntList,
13033        dilation: impl IntList,
13034        ceil_mode: bool,
13035    ) -> Tensor {
13036        self.f_mkldnn_max_pool2d_backward_out(
13037            out,
13038            grad_output,
13039            output,
13040            kernel_size,
13041            stride,
13042            padding,
13043            dilation,
13044            ceil_mode,
13045        )
13046        .unwrap()
13047    }
13048
13049    pub fn mkldnn_max_pool2d_out(
13050        &self,
13051        out: &Tensor,
13052        kernel_size: impl IntList,
13053        stride: impl IntList,
13054        padding: impl IntList,
13055        dilation: impl IntList,
13056        ceil_mode: bool,
13057    ) -> Tensor {
13058        self.f_mkldnn_max_pool2d_out(out, kernel_size, stride, padding, dilation, ceil_mode)
13059            .unwrap()
13060    }
13061
13062    pub fn mkldnn_max_pool3d(
13063        &self,
13064        kernel_size: impl IntList,
13065        stride: impl IntList,
13066        padding: impl IntList,
13067        dilation: impl IntList,
13068        ceil_mode: bool,
13069    ) -> Tensor {
13070        self.f_mkldnn_max_pool3d(kernel_size, stride, padding, dilation, ceil_mode).unwrap()
13071    }
13072
13073    pub fn mkldnn_max_pool3d_backward(
13074        &self,
13075        grad_output: &Tensor,
13076        output: &Tensor,
13077        kernel_size: impl IntList,
13078        stride: impl IntList,
13079        padding: impl IntList,
13080        dilation: impl IntList,
13081        ceil_mode: bool,
13082    ) -> Tensor {
13083        self.f_mkldnn_max_pool3d_backward(
13084            grad_output,
13085            output,
13086            kernel_size,
13087            stride,
13088            padding,
13089            dilation,
13090            ceil_mode,
13091        )
13092        .unwrap()
13093    }
13094
13095    pub fn mkldnn_max_pool3d_backward_out(
13096        &self,
13097        out: &Tensor,
13098        grad_output: &Tensor,
13099        output: &Tensor,
13100        kernel_size: impl IntList,
13101        stride: impl IntList,
13102        padding: impl IntList,
13103        dilation: impl IntList,
13104        ceil_mode: bool,
13105    ) -> Tensor {
13106        self.f_mkldnn_max_pool3d_backward_out(
13107            out,
13108            grad_output,
13109            output,
13110            kernel_size,
13111            stride,
13112            padding,
13113            dilation,
13114            ceil_mode,
13115        )
13116        .unwrap()
13117    }
13118
13119    pub fn mkldnn_max_pool3d_out(
13120        &self,
13121        out: &Tensor,
13122        kernel_size: impl IntList,
13123        stride: impl IntList,
13124        padding: impl IntList,
13125        dilation: impl IntList,
13126        ceil_mode: bool,
13127    ) -> Tensor {
13128        self.f_mkldnn_max_pool3d_out(out, kernel_size, stride, padding, dilation, ceil_mode)
13129            .unwrap()
13130    }
13131
13132    pub fn mkldnn_reorder_conv2d_weight(
13133        &self,
13134        padding: impl IntList,
13135        stride: impl IntList,
13136        dilation: impl IntList,
13137        groups: i64,
13138        input_size: impl IntListOption,
13139    ) -> Tensor {
13140        self.f_mkldnn_reorder_conv2d_weight(padding, stride, dilation, groups, input_size).unwrap()
13141    }
13142
13143    pub fn mkldnn_reorder_conv2d_weight_out(
13144        &self,
13145        out: &Tensor,
13146        padding: impl IntList,
13147        stride: impl IntList,
13148        dilation: impl IntList,
13149        groups: i64,
13150        input_size: impl IntListOption,
13151    ) -> Tensor {
13152        self.f_mkldnn_reorder_conv2d_weight_out(out, padding, stride, dilation, groups, input_size)
13153            .unwrap()
13154    }
13155
13156    pub fn mkldnn_reorder_conv3d_weight(
13157        &self,
13158        padding: impl IntList,
13159        stride: impl IntList,
13160        dilation: impl IntList,
13161        groups: i64,
13162        input_size: impl IntListOption,
13163    ) -> Tensor {
13164        self.f_mkldnn_reorder_conv3d_weight(padding, stride, dilation, groups, input_size).unwrap()
13165    }
13166
13167    pub fn mkldnn_reorder_conv3d_weight_out(
13168        &self,
13169        out: &Tensor,
13170        padding: impl IntList,
13171        stride: impl IntList,
13172        dilation: impl IntList,
13173        groups: i64,
13174        input_size: impl IntListOption,
13175    ) -> Tensor {
13176        self.f_mkldnn_reorder_conv3d_weight_out(out, padding, stride, dilation, groups, input_size)
13177            .unwrap()
13178    }
13179
13180    pub fn mkldnn_rnn_layer(
13181        &self,
13182        weight0: &Tensor,
13183        weight1: &Tensor,
13184        weight2: &Tensor,
13185        weight3: &Tensor,
13186        hx_: &Tensor,
13187        cx_: &Tensor,
13188        reverse: bool,
13189        batch_sizes: impl IntList,
13190        mode: i64,
13191        hidden_size: i64,
13192        num_layers: i64,
13193        has_biases: bool,
13194        bidirectional: bool,
13195        batch_first: bool,
13196        train: bool,
13197    ) -> (Tensor, Tensor, Tensor, Tensor) {
13198        self.f_mkldnn_rnn_layer(
13199            weight0,
13200            weight1,
13201            weight2,
13202            weight3,
13203            hx_,
13204            cx_,
13205            reverse,
13206            batch_sizes,
13207            mode,
13208            hidden_size,
13209            num_layers,
13210            has_biases,
13211            bidirectional,
13212            batch_first,
13213            train,
13214        )
13215        .unwrap()
13216    }
13217
13218    pub fn mkldnn_rnn_layer_backward<T: Borrow<Tensor>>(
13219        &self,
13220        weight1: &Tensor,
13221        weight2: &Tensor,
13222        weight3: &Tensor,
13223        weight4: &Tensor,
13224        hx_: &Tensor,
13225        cx_tmp: &Tensor,
13226        output: &Tensor,
13227        hy_: &Tensor,
13228        cy_: &Tensor,
13229        grad_output: Option<T>,
13230        grad_hy: Option<T>,
13231        grad_cy: Option<T>,
13232        reverse: bool,
13233        mode: i64,
13234        hidden_size: i64,
13235        num_layers: i64,
13236        has_biases: bool,
13237        train: bool,
13238        bidirectional: bool,
13239        batch_sizes: impl IntList,
13240        batch_first: bool,
13241        workspace: &Tensor,
13242    ) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) {
13243        self.f_mkldnn_rnn_layer_backward(
13244            weight1,
13245            weight2,
13246            weight3,
13247            weight4,
13248            hx_,
13249            cx_tmp,
13250            output,
13251            hy_,
13252            cy_,
13253            grad_output,
13254            grad_hy,
13255            grad_cy,
13256            reverse,
13257            mode,
13258            hidden_size,
13259            num_layers,
13260            has_biases,
13261            train,
13262            bidirectional,
13263            batch_sizes,
13264            batch_first,
13265            workspace,
13266        )
13267        .unwrap()
13268    }
13269
13270    pub fn mkldnn_rnn_layer_backward_out<T: Borrow<Tensor>>(
13271        &self,
13272        out0: &Tensor,
13273        out1: &Tensor,
13274        out2: &Tensor,
13275        out3: &Tensor,
13276        out4: &Tensor,
13277        out5: &Tensor,
13278        out6: &Tensor,
13279        weight1: &Tensor,
13280        weight2: &Tensor,
13281        weight3: &Tensor,
13282        weight4: &Tensor,
13283        hx_: &Tensor,
13284        cx_tmp: &Tensor,
13285        output: &Tensor,
13286        hy_: &Tensor,
13287        cy_: &Tensor,
13288        grad_output: Option<T>,
13289        grad_hy: Option<T>,
13290        grad_cy: Option<T>,
13291        reverse: bool,
13292        mode: i64,
13293        hidden_size: i64,
13294        num_layers: i64,
13295        has_biases: bool,
13296        train: bool,
13297        bidirectional: bool,
13298        batch_sizes: impl IntList,
13299        batch_first: bool,
13300        workspace: &Tensor,
13301    ) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) {
13302        self.f_mkldnn_rnn_layer_backward_out(
13303            out0,
13304            out1,
13305            out2,
13306            out3,
13307            out4,
13308            out5,
13309            out6,
13310            weight1,
13311            weight2,
13312            weight3,
13313            weight4,
13314            hx_,
13315            cx_tmp,
13316            output,
13317            hy_,
13318            cy_,
13319            grad_output,
13320            grad_hy,
13321            grad_cy,
13322            reverse,
13323            mode,
13324            hidden_size,
13325            num_layers,
13326            has_biases,
13327            train,
13328            bidirectional,
13329            batch_sizes,
13330            batch_first,
13331            workspace,
13332        )
13333        .unwrap()
13334    }
13335
13336    pub fn mkldnn_rnn_layer_out(
13337        &self,
13338        out0: &Tensor,
13339        out1: &Tensor,
13340        out2: &Tensor,
13341        out3: &Tensor,
13342        weight0: &Tensor,
13343        weight1: &Tensor,
13344        weight2: &Tensor,
13345        weight3: &Tensor,
13346        hx_: &Tensor,
13347        cx_: &Tensor,
13348        reverse: bool,
13349        batch_sizes: impl IntList,
13350        mode: i64,
13351        hidden_size: i64,
13352        num_layers: i64,
13353        has_biases: bool,
13354        bidirectional: bool,
13355        batch_first: bool,
13356        train: bool,
13357    ) -> (Tensor, Tensor, Tensor, Tensor) {
13358        self.f_mkldnn_rnn_layer_out(
13359            out0,
13360            out1,
13361            out2,
13362            out3,
13363            weight0,
13364            weight1,
13365            weight2,
13366            weight3,
13367            hx_,
13368            cx_,
13369            reverse,
13370            batch_sizes,
13371            mode,
13372            hidden_size,
13373            num_layers,
13374            has_biases,
13375            bidirectional,
13376            batch_first,
13377            train,
13378        )
13379        .unwrap()
13380    }
13381
13382    pub fn mm(&self, mat2: &Tensor) -> Tensor {
13383        self.f_mm(mat2).unwrap()
13384    }
13385
13386    pub fn mm_out(&self, out: &Tensor, mat2: &Tensor) -> Tensor {
13387        self.f_mm_out(out, mat2).unwrap()
13388    }
13389
13390    pub fn mode(&self, dim: i64, keepdim: bool) -> (Tensor, Tensor) {
13391        self.f_mode(dim, keepdim).unwrap()
13392    }
13393
13394    pub fn mode_values(
13395        &self,
13396        values: &Tensor,
13397        indices: &Tensor,
13398        dim: i64,
13399        keepdim: bool,
13400    ) -> (Tensor, Tensor) {
13401        self.f_mode_values(values, indices, dim, keepdim).unwrap()
13402    }
13403
13404    pub fn moveaxis(&self, source: impl IntList, destination: impl IntList) -> Tensor {
13405        self.f_moveaxis(source, destination).unwrap()
13406    }
13407
13408    pub fn moveaxis_int(&self, source: i64, destination: i64) -> Tensor {
13409        self.f_moveaxis_int(source, destination).unwrap()
13410    }
13411
13412    pub fn movedim(&self, source: impl IntList, destination: impl IntList) -> Tensor {
13413        self.f_movedim(source, destination).unwrap()
13414    }
13415
13416    pub fn movedim_int(&self, source: i64, destination: i64) -> Tensor {
13417        self.f_movedim_int(source, destination).unwrap()
13418    }
13419
13420    pub fn mse_loss(&self, target: &Tensor, reduction: crate::Reduction) -> Tensor {
13421        self.f_mse_loss(target, reduction).unwrap()
13422    }
13423
13424    pub fn mse_loss_backward(
13425        &self,
13426        grad_output: &Tensor,
13427        target: &Tensor,
13428        reduction: crate::Reduction,
13429    ) -> Tensor {
13430        self.f_mse_loss_backward(grad_output, target, reduction).unwrap()
13431    }
13432
13433    pub fn mse_loss_backward_grad_input(
13434        &self,
13435        grad_input: &Tensor,
13436        grad_output: &Tensor,
13437        target: &Tensor,
13438        reduction: crate::Reduction,
13439    ) -> Tensor {
13440        self.f_mse_loss_backward_grad_input(grad_input, grad_output, target, reduction).unwrap()
13441    }
13442
13443    pub fn mse_loss_out(
13444        &self,
13445        out: &Tensor,
13446        target: &Tensor,
13447        reduction: crate::Reduction,
13448    ) -> Tensor {
13449        self.f_mse_loss_out(out, target, reduction).unwrap()
13450    }
13451
13452    pub fn msort(&self) -> Tensor {
13453        self.f_msort().unwrap()
13454    }
13455
13456    pub fn msort_out(&self, out: &Tensor) -> Tensor {
13457        self.f_msort_out(out).unwrap()
13458    }
13459
13460    pub fn mt(&self) -> Tensor {
13461        self.f_mt().unwrap()
13462    }
13463
13464    pub fn g_mul(&self, other: &Tensor) -> Tensor {
13465        self.f_mul(other).unwrap()
13466    }
13467
13468    pub fn g_mul_(&mut self, other: &Tensor) -> Tensor {
13469        self.f_mul_(other).unwrap()
13470    }
13471
13472    pub fn mul_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
13473        self.f_mul_out(out, other).unwrap()
13474    }
13475
13476    pub fn g_mul_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
13477        self.f_mul_scalar(other).unwrap()
13478    }
13479
13480    pub fn g_mul_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
13481        self.f_mul_scalar_(other).unwrap()
13482    }
13483
13484    pub fn mul_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
13485        self.f_mul_scalar_out(out, other).unwrap()
13486    }
13487
13488    pub fn multi_margin_loss_backward<T: Borrow<Tensor>, S: Into<Scalar>>(
13489        &self,
13490        grad_output: &Tensor,
13491        target: &Tensor,
13492        p: S,
13493        margin: S,
13494        weight: Option<T>,
13495        reduction: crate::Reduction,
13496    ) -> Tensor {
13497        self.f_multi_margin_loss_backward(grad_output, target, p, margin, weight, reduction)
13498            .unwrap()
13499    }
13500
13501    pub fn multi_margin_loss_backward_grad_input<T: Borrow<Tensor>, S: Into<Scalar>>(
13502        &self,
13503        grad_input: &Tensor,
13504        grad_output: &Tensor,
13505        target: &Tensor,
13506        p: S,
13507        margin: S,
13508        weight: Option<T>,
13509        reduction: crate::Reduction,
13510    ) -> Tensor {
13511        self.f_multi_margin_loss_backward_grad_input(
13512            grad_input,
13513            grad_output,
13514            target,
13515            p,
13516            margin,
13517            weight,
13518            reduction,
13519        )
13520        .unwrap()
13521    }
13522
13523    pub fn multilabel_margin_loss(&self, target: &Tensor, reduction: crate::Reduction) -> Tensor {
13524        self.f_multilabel_margin_loss(target, reduction).unwrap()
13525    }
13526
13527    pub fn multilabel_margin_loss_backward(
13528        &self,
13529        grad_output: &Tensor,
13530        target: &Tensor,
13531        reduction: crate::Reduction,
13532        is_target: &Tensor,
13533    ) -> Tensor {
13534        self.f_multilabel_margin_loss_backward(grad_output, target, reduction, is_target).unwrap()
13535    }
13536
13537    pub fn multilabel_margin_loss_backward_grad_input(
13538        &self,
13539        grad_input: &Tensor,
13540        grad_output: &Tensor,
13541        target: &Tensor,
13542        reduction: crate::Reduction,
13543        is_target: &Tensor,
13544    ) -> Tensor {
13545        self.f_multilabel_margin_loss_backward_grad_input(
13546            grad_input,
13547            grad_output,
13548            target,
13549            reduction,
13550            is_target,
13551        )
13552        .unwrap()
13553    }
13554
13555    pub fn multilabel_margin_loss_out(
13556        &self,
13557        out: &Tensor,
13558        target: &Tensor,
13559        reduction: crate::Reduction,
13560    ) -> Tensor {
13561        self.f_multilabel_margin_loss_out(out, target, reduction).unwrap()
13562    }
13563
13564    pub fn multinomial(&self, num_samples: i64, replacement: bool) -> Tensor {
13565        self.f_multinomial(num_samples, replacement).unwrap()
13566    }
13567
13568    pub fn multinomial_out(&self, out: &Tensor, num_samples: i64, replacement: bool) -> Tensor {
13569        self.f_multinomial_out(out, num_samples, replacement).unwrap()
13570    }
13571
13572    pub fn multiply(&self, other: &Tensor) -> Tensor {
13573        self.f_multiply(other).unwrap()
13574    }
13575
13576    pub fn multiply_(&mut self, other: &Tensor) -> Tensor {
13577        self.f_multiply_(other).unwrap()
13578    }
13579
13580    pub fn multiply_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
13581        self.f_multiply_out(out, other).unwrap()
13582    }
13583
13584    pub fn multiply_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
13585        self.f_multiply_scalar(other).unwrap()
13586    }
13587
13588    pub fn multiply_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
13589        self.f_multiply_scalar_(other).unwrap()
13590    }
13591
13592    pub fn mv(&self, vec: &Tensor) -> Tensor {
13593        self.f_mv(vec).unwrap()
13594    }
13595
13596    pub fn mv_out(&self, out: &Tensor, vec: &Tensor) -> Tensor {
13597        self.f_mv_out(out, vec).unwrap()
13598    }
13599
13600    pub fn mvlgamma(&self, p: i64) -> Tensor {
13601        self.f_mvlgamma(p).unwrap()
13602    }
13603
13604    pub fn mvlgamma_(&mut self, p: i64) -> Tensor {
13605        self.f_mvlgamma_(p).unwrap()
13606    }
13607
13608    pub fn mvlgamma_out(&self, out: &Tensor, p: i64) -> Tensor {
13609        self.f_mvlgamma_out(out, p).unwrap()
13610    }
13611
13612    pub fn nan_to_num(
13613        &self,
13614        nan: impl Into<Option<f64>>,
13615        posinf: impl Into<Option<f64>>,
13616        neginf: impl Into<Option<f64>>,
13617    ) -> Tensor {
13618        self.f_nan_to_num(nan, posinf, neginf).unwrap()
13619    }
13620
13621    pub fn nan_to_num_(
13622        &mut self,
13623        nan: impl Into<Option<f64>>,
13624        posinf: impl Into<Option<f64>>,
13625        neginf: impl Into<Option<f64>>,
13626    ) -> Tensor {
13627        self.f_nan_to_num_(nan, posinf, neginf).unwrap()
13628    }
13629
13630    pub fn nan_to_num_out(
13631        &self,
13632        out: &Tensor,
13633        nan: impl Into<Option<f64>>,
13634        posinf: impl Into<Option<f64>>,
13635        neginf: impl Into<Option<f64>>,
13636    ) -> Tensor {
13637        self.f_nan_to_num_out(out, nan, posinf, neginf).unwrap()
13638    }
13639
13640    pub fn nanmean(
13641        &self,
13642        dim: impl IntListOption,
13643        keepdim: bool,
13644        dtype: impl Into<Option<Kind>>,
13645    ) -> Tensor {
13646        self.f_nanmean(dim, keepdim, dtype).unwrap()
13647    }
13648
13649    pub fn nanmean_out(
13650        &self,
13651        out: &Tensor,
13652        dim: impl IntListOption,
13653        keepdim: bool,
13654        dtype: impl Into<Option<Kind>>,
13655    ) -> Tensor {
13656        self.f_nanmean_out(out, dim, keepdim, dtype).unwrap()
13657    }
13658
13659    pub fn nanmedian(&self) -> Tensor {
13660        self.f_nanmedian().unwrap()
13661    }
13662
13663    pub fn nanmedian_dim(&self, dim: i64, keepdim: bool) -> (Tensor, Tensor) {
13664        self.f_nanmedian_dim(dim, keepdim).unwrap()
13665    }
13666
13667    pub fn nanmedian_dim_values(
13668        &self,
13669        values: &Tensor,
13670        indices: &Tensor,
13671        dim: i64,
13672        keepdim: bool,
13673    ) -> (Tensor, Tensor) {
13674        self.f_nanmedian_dim_values(values, indices, dim, keepdim).unwrap()
13675    }
13676
13677    pub fn nanmedian_out(&self, out: &Tensor) -> Tensor {
13678        self.f_nanmedian_out(out).unwrap()
13679    }
13680
13681    pub fn nanquantile(
13682        &self,
13683        q: &Tensor,
13684        dim: impl Into<Option<i64>>,
13685        keepdim: bool,
13686        interpolation: &str,
13687    ) -> Tensor {
13688        self.f_nanquantile(q, dim, keepdim, interpolation).unwrap()
13689    }
13690
13691    pub fn nanquantile_out(
13692        &self,
13693        out: &Tensor,
13694        q: &Tensor,
13695        dim: impl Into<Option<i64>>,
13696        keepdim: bool,
13697        interpolation: &str,
13698    ) -> Tensor {
13699        self.f_nanquantile_out(out, q, dim, keepdim, interpolation).unwrap()
13700    }
13701
13702    pub fn nanquantile_scalar(
13703        &self,
13704        q: f64,
13705        dim: impl Into<Option<i64>>,
13706        keepdim: bool,
13707        interpolation: &str,
13708    ) -> Tensor {
13709        self.f_nanquantile_scalar(q, dim, keepdim, interpolation).unwrap()
13710    }
13711
13712    pub fn nanquantile_scalar_out(
13713        &self,
13714        out: &Tensor,
13715        q: f64,
13716        dim: impl Into<Option<i64>>,
13717        keepdim: bool,
13718        interpolation: &str,
13719    ) -> Tensor {
13720        self.f_nanquantile_scalar_out(out, q, dim, keepdim, interpolation).unwrap()
13721    }
13722
13723    pub fn nansum(
13724        &self,
13725        dim: impl IntListOption,
13726        keepdim: bool,
13727        dtype: impl Into<Option<Kind>>,
13728    ) -> Tensor {
13729        self.f_nansum(dim, keepdim, dtype).unwrap()
13730    }
13731
13732    pub fn nansum_out(
13733        &self,
13734        out: &Tensor,
13735        dim: impl IntListOption,
13736        keepdim: bool,
13737        dtype: impl Into<Option<Kind>>,
13738    ) -> Tensor {
13739        self.f_nansum_out(out, dim, keepdim, dtype).unwrap()
13740    }
13741
13742    pub fn narrow(&self, dim: i64, start: i64, length: i64) -> Tensor {
13743        self.f_narrow(dim, start, length).unwrap()
13744    }
13745
13746    pub fn narrow_copy(&self, dim: i64, start: i64, length: i64) -> Tensor {
13747        self.f_narrow_copy(dim, start, length).unwrap()
13748    }
13749
13750    pub fn narrow_copy_out(&self, out: &Tensor, dim: i64, start: i64, length: i64) -> Tensor {
13751        self.f_narrow_copy_out(out, dim, start, length).unwrap()
13752    }
13753
13754    pub fn narrow_tensor(&self, dim: i64, start: &Tensor, length: i64) -> Tensor {
13755        self.f_narrow_tensor(dim, start, length).unwrap()
13756    }
13757
13758    pub fn native_batch_norm<T: Borrow<Tensor>>(
13759        &self,
13760        weight: Option<T>,
13761        bias: Option<T>,
13762        running_mean: Option<T>,
13763        running_var: Option<T>,
13764        training: bool,
13765        momentum: f64,
13766        eps: f64,
13767    ) -> (Tensor, Tensor, Tensor) {
13768        self.f_native_batch_norm(weight, bias, running_mean, running_var, training, momentum, eps)
13769            .unwrap()
13770    }
13771
13772    pub fn native_batch_norm_out<T: Borrow<Tensor>>(
13773        &self,
13774        out: &Tensor,
13775        save_mean: &Tensor,
13776        save_invstd: &Tensor,
13777        weight: Option<T>,
13778        bias: Option<T>,
13779        running_mean: Option<T>,
13780        running_var: Option<T>,
13781        training: bool,
13782        momentum: f64,
13783        eps: f64,
13784    ) -> (Tensor, Tensor, Tensor) {
13785        self.f_native_batch_norm_out(
13786            out,
13787            save_mean,
13788            save_invstd,
13789            weight,
13790            bias,
13791            running_mean,
13792            running_var,
13793            training,
13794            momentum,
13795            eps,
13796        )
13797        .unwrap()
13798    }
13799
13800    pub fn native_channel_shuffle(&self, groups: i64) -> Tensor {
13801        self.f_native_channel_shuffle(groups).unwrap()
13802    }
13803
13804    pub fn native_dropout(&self, p: f64, train: bool) -> (Tensor, Tensor) {
13805        self.f_native_dropout(p, train).unwrap()
13806    }
13807
13808    pub fn native_dropout_backward(grad_output: &Tensor, mask: &Tensor, scale: f64) -> Tensor {
13809        Tensor::f_native_dropout_backward(grad_output, mask, scale).unwrap()
13810    }
13811
13812    pub fn native_dropout_backward_out(
13813        out: &Tensor,
13814        grad_output: &Tensor,
13815        mask: &Tensor,
13816        scale: f64,
13817    ) -> Tensor {
13818        Tensor::f_native_dropout_backward_out(out, grad_output, mask, scale).unwrap()
13819    }
13820
13821    pub fn native_dropout_out(
13822        &self,
13823        out0: &Tensor,
13824        out1: &Tensor,
13825        p: f64,
13826        train: bool,
13827    ) -> (Tensor, Tensor) {
13828        self.f_native_dropout_out(out0, out1, p, train).unwrap()
13829    }
13830
13831    pub fn native_group_norm<T: Borrow<Tensor>>(
13832        &self,
13833        weight: Option<T>,
13834        bias: Option<T>,
13835        n: i64,
13836        c: i64,
13837        hxw: i64,
13838        group: i64,
13839        eps: f64,
13840    ) -> (Tensor, Tensor, Tensor) {
13841        self.f_native_group_norm(weight, bias, n, c, hxw, group, eps).unwrap()
13842    }
13843
13844    pub fn native_group_norm_out<T: Borrow<Tensor>>(
13845        &self,
13846        out0: &Tensor,
13847        out1: &Tensor,
13848        out2: &Tensor,
13849        weight: Option<T>,
13850        bias: Option<T>,
13851        n: i64,
13852        c: i64,
13853        hxw: i64,
13854        group: i64,
13855        eps: f64,
13856    ) -> (Tensor, Tensor, Tensor) {
13857        self.f_native_group_norm_out(out0, out1, out2, weight, bias, n, c, hxw, group, eps).unwrap()
13858    }
13859
13860    pub fn native_layer_norm<T: Borrow<Tensor>>(
13861        &self,
13862        normalized_shape: impl IntList,
13863        weight: Option<T>,
13864        bias: Option<T>,
13865        eps: f64,
13866    ) -> (Tensor, Tensor, Tensor) {
13867        self.f_native_layer_norm(normalized_shape, weight, bias, eps).unwrap()
13868    }
13869
13870    pub fn native_layer_norm_out<T: Borrow<Tensor>>(
13871        &self,
13872        out0: &Tensor,
13873        out1: &Tensor,
13874        out2: &Tensor,
13875        normalized_shape: impl IntList,
13876        weight: Option<T>,
13877        bias: Option<T>,
13878        eps: f64,
13879    ) -> (Tensor, Tensor, Tensor) {
13880        self.f_native_layer_norm_out(out0, out1, out2, normalized_shape, weight, bias, eps).unwrap()
13881    }
13882
13883    pub fn native_norm(&self) -> Tensor {
13884        self.f_native_norm().unwrap()
13885    }
13886
13887    pub fn native_norm_out(&self, out: &Tensor) -> Tensor {
13888        self.f_native_norm_out(out).unwrap()
13889    }
13890
13891    pub fn native_norm_scalaropt_dim_dtype<S: Into<Scalar>>(
13892        &self,
13893        p: S,
13894        dim: impl IntList,
13895        keepdim: bool,
13896        dtype: impl Into<Option<Kind>>,
13897    ) -> Tensor {
13898        self.f_native_norm_scalaropt_dim_dtype(p, dim, keepdim, dtype).unwrap()
13899    }
13900
13901    pub fn native_norm_scalaropt_dim_dtype_out<S: Into<Scalar>>(
13902        &self,
13903        out: &Tensor,
13904        p: S,
13905        dim: impl IntList,
13906        keepdim: bool,
13907        dtype: impl Into<Option<Kind>>,
13908    ) -> Tensor {
13909        self.f_native_norm_scalaropt_dim_dtype_out(out, p, dim, keepdim, dtype).unwrap()
13910    }
13911
13912    pub fn ne<S: Into<Scalar>>(&self, other: S) -> Tensor {
13913        self.f_ne(other).unwrap()
13914    }
13915
13916    pub fn ne_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
13917        self.f_ne_(other).unwrap()
13918    }
13919
13920    pub fn ne_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
13921        self.f_ne_scalar_out(out, other).unwrap()
13922    }
13923
13924    pub fn ne_tensor(&self, other: &Tensor) -> Tensor {
13925        self.f_ne_tensor(other).unwrap()
13926    }
13927
13928    pub fn ne_tensor_(&mut self, other: &Tensor) -> Tensor {
13929        self.f_ne_tensor_(other).unwrap()
13930    }
13931
13932    pub fn ne_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
13933        self.f_ne_tensor_out(out, other).unwrap()
13934    }
13935
13936    pub fn neg(&self) -> Tensor {
13937        self.f_neg().unwrap()
13938    }
13939
13940    pub fn neg_(&mut self) -> Tensor {
13941        self.f_neg_().unwrap()
13942    }
13943
13944    pub fn neg_out(&self, out: &Tensor) -> Tensor {
13945        self.f_neg_out(out).unwrap()
13946    }
13947
13948    pub fn negative(&self) -> Tensor {
13949        self.f_negative().unwrap()
13950    }
13951
13952    pub fn negative_(&mut self) -> Tensor {
13953        self.f_negative_().unwrap()
13954    }
13955
13956    pub fn negative_out(&self, out: &Tensor) -> Tensor {
13957        self.f_negative_out(out).unwrap()
13958    }
13959
13960    pub fn nested_to_padded_tensor(&self, padding: f64, output_size: impl IntListOption) -> Tensor {
13961        self.f_nested_to_padded_tensor(padding, output_size).unwrap()
13962    }
13963
13964    pub fn new_empty(&self, size: impl IntList, options: (Kind, Device)) -> Tensor {
13965        self.f_new_empty(size, options).unwrap()
13966    }
13967
13968    pub fn new_empty_out(&self, out: &Tensor, size: impl IntList) -> Tensor {
13969        self.f_new_empty_out(out, size).unwrap()
13970    }
13971
13972    pub fn new_empty_strided(
13973        &self,
13974        size: impl IntList,
13975        stride: impl IntList,
13976        options: (Kind, Device),
13977    ) -> Tensor {
13978        self.f_new_empty_strided(size, stride, options).unwrap()
13979    }
13980
13981    pub fn new_empty_strided_out(
13982        &self,
13983        out: &Tensor,
13984        size: impl IntList,
13985        stride: impl IntList,
13986    ) -> Tensor {
13987        self.f_new_empty_strided_out(out, size, stride).unwrap()
13988    }
13989
13990    pub fn new_full<S: Into<Scalar>>(
13991        &self,
13992        size: impl IntList,
13993        fill_value: S,
13994        options: (Kind, Device),
13995    ) -> Tensor {
13996        self.f_new_full(size, fill_value, options).unwrap()
13997    }
13998
13999    pub fn new_full_out<S: Into<Scalar>>(
14000        &self,
14001        out: &Tensor,
14002        size: impl IntList,
14003        fill_value: S,
14004    ) -> Tensor {
14005        self.f_new_full_out(out, size, fill_value).unwrap()
14006    }
14007
14008    pub fn new_ones(&self, size: impl IntList, options: (Kind, Device)) -> Tensor {
14009        self.f_new_ones(size, options).unwrap()
14010    }
14011
14012    pub fn new_ones_out(&self, out: &Tensor, size: impl IntList) -> Tensor {
14013        self.f_new_ones_out(out, size).unwrap()
14014    }
14015
14016    pub fn new_zeros(&self, size: impl IntList, options: (Kind, Device)) -> Tensor {
14017        self.f_new_zeros(size, options).unwrap()
14018    }
14019
14020    pub fn new_zeros_out(&self, out: &Tensor, size: impl IntList) -> Tensor {
14021        self.f_new_zeros_out(out, size).unwrap()
14022    }
14023
14024    pub fn nextafter(&self, other: &Tensor) -> Tensor {
14025        self.f_nextafter(other).unwrap()
14026    }
14027
14028    pub fn nextafter_(&mut self, other: &Tensor) -> Tensor {
14029        self.f_nextafter_(other).unwrap()
14030    }
14031
14032    pub fn nextafter_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
14033        self.f_nextafter_out(out, other).unwrap()
14034    }
14035
14036    pub fn g_nll_loss<T: Borrow<Tensor>>(
14037        &self,
14038        target: &Tensor,
14039        weight: Option<T>,
14040        reduction: crate::Reduction,
14041        ignore_index: i64,
14042    ) -> Tensor {
14043        self.f_nll_loss(target, weight, reduction, ignore_index).unwrap()
14044    }
14045
14046    pub fn nll_loss2d<T: Borrow<Tensor>>(
14047        &self,
14048        target: &Tensor,
14049        weight: Option<T>,
14050        reduction: crate::Reduction,
14051        ignore_index: i64,
14052    ) -> Tensor {
14053        self.f_nll_loss2d(target, weight, reduction, ignore_index).unwrap()
14054    }
14055
14056    pub fn nll_loss2d_backward<T: Borrow<Tensor>>(
14057        &self,
14058        grad_output: &Tensor,
14059        target: &Tensor,
14060        weight: Option<T>,
14061        reduction: crate::Reduction,
14062        ignore_index: i64,
14063        total_weight: &Tensor,
14064    ) -> Tensor {
14065        self.f_nll_loss2d_backward(
14066            grad_output,
14067            target,
14068            weight,
14069            reduction,
14070            ignore_index,
14071            total_weight,
14072        )
14073        .unwrap()
14074    }
14075
14076    pub fn nll_loss2d_backward_grad_input<T: Borrow<Tensor>>(
14077        &self,
14078        grad_input: &Tensor,
14079        grad_output: &Tensor,
14080        target: &Tensor,
14081        weight: Option<T>,
14082        reduction: crate::Reduction,
14083        ignore_index: i64,
14084        total_weight: &Tensor,
14085    ) -> Tensor {
14086        self.f_nll_loss2d_backward_grad_input(
14087            grad_input,
14088            grad_output,
14089            target,
14090            weight,
14091            reduction,
14092            ignore_index,
14093            total_weight,
14094        )
14095        .unwrap()
14096    }
14097
14098    pub fn nll_loss2d_out<T: Borrow<Tensor>>(
14099        &self,
14100        out: &Tensor,
14101        target: &Tensor,
14102        weight: Option<T>,
14103        reduction: crate::Reduction,
14104        ignore_index: i64,
14105    ) -> Tensor {
14106        self.f_nll_loss2d_out(out, target, weight, reduction, ignore_index).unwrap()
14107    }
14108
14109    pub fn nll_loss_backward<T: Borrow<Tensor>>(
14110        &self,
14111        grad_output: &Tensor,
14112        target: &Tensor,
14113        weight: Option<T>,
14114        reduction: crate::Reduction,
14115        ignore_index: i64,
14116        total_weight: &Tensor,
14117    ) -> Tensor {
14118        self.f_nll_loss_backward(grad_output, target, weight, reduction, ignore_index, total_weight)
14119            .unwrap()
14120    }
14121
14122    pub fn nll_loss_backward_grad_input<T: Borrow<Tensor>>(
14123        &self,
14124        grad_input: &Tensor,
14125        grad_output: &Tensor,
14126        target: &Tensor,
14127        weight: Option<T>,
14128        reduction: crate::Reduction,
14129        ignore_index: i64,
14130        total_weight: &Tensor,
14131    ) -> Tensor {
14132        self.f_nll_loss_backward_grad_input(
14133            grad_input,
14134            grad_output,
14135            target,
14136            weight,
14137            reduction,
14138            ignore_index,
14139            total_weight,
14140        )
14141        .unwrap()
14142    }
14143
14144    pub fn nll_loss_nd<T: Borrow<Tensor>>(
14145        &self,
14146        target: &Tensor,
14147        weight: Option<T>,
14148        reduction: crate::Reduction,
14149        ignore_index: i64,
14150    ) -> Tensor {
14151        self.f_nll_loss_nd(target, weight, reduction, ignore_index).unwrap()
14152    }
14153
14154    pub fn nll_loss_out<T: Borrow<Tensor>>(
14155        &self,
14156        out: &Tensor,
14157        target: &Tensor,
14158        weight: Option<T>,
14159        reduction: crate::Reduction,
14160        ignore_index: i64,
14161    ) -> Tensor {
14162        self.f_nll_loss_out(out, target, weight, reduction, ignore_index).unwrap()
14163    }
14164
14165    pub fn nonzero(&self) -> Tensor {
14166        self.f_nonzero().unwrap()
14167    }
14168
14169    pub fn nonzero_numpy(&self) -> Vec<Tensor> {
14170        self.f_nonzero_numpy().unwrap()
14171    }
14172
14173    pub fn nonzero_out(&self, out: &Tensor) -> Tensor {
14174        self.f_nonzero_out(out).unwrap()
14175    }
14176
14177    pub fn nonzero_static(&self, size: i64, fill_value: i64) -> Tensor {
14178        self.f_nonzero_static(size, fill_value).unwrap()
14179    }
14180
14181    pub fn nonzero_static_out(&self, out: &Tensor, size: i64, fill_value: i64) -> Tensor {
14182        self.f_nonzero_static_out(out, size, fill_value).unwrap()
14183    }
14184
14185    pub fn norm(&self) -> Tensor {
14186        self.f_norm().unwrap()
14187    }
14188
14189    pub fn norm_dtype_out<S: Into<Scalar>>(
14190        &self,
14191        out: &Tensor,
14192        p: S,
14193        dim: impl IntList,
14194        keepdim: bool,
14195        dtype: Kind,
14196    ) -> Tensor {
14197        self.f_norm_dtype_out(out, p, dim, keepdim, dtype).unwrap()
14198    }
14199
14200    pub fn norm_except_dim(v: &Tensor, pow: i64, dim: i64) -> Tensor {
14201        Tensor::f_norm_except_dim(v, pow, dim).unwrap()
14202    }
14203
14204    pub fn norm_out<S: Into<Scalar>>(
14205        &self,
14206        out: &Tensor,
14207        p: S,
14208        dim: impl IntList,
14209        keepdim: bool,
14210    ) -> Tensor {
14211        self.f_norm_out(out, p, dim, keepdim).unwrap()
14212    }
14213
14214    pub fn norm_scalar_out(&self, out: &Tensor) -> Tensor {
14215        self.f_norm_scalar_out(out).unwrap()
14216    }
14217
14218    pub fn norm_scalaropt_dim<S: Into<Scalar>>(
14219        &self,
14220        p: S,
14221        dim: impl IntList,
14222        keepdim: bool,
14223    ) -> Tensor {
14224        self.f_norm_scalaropt_dim(p, dim, keepdim).unwrap()
14225    }
14226
14227    pub fn norm_scalaropt_dim_dtype<S: Into<Scalar>>(
14228        &self,
14229        p: S,
14230        dim: impl IntList,
14231        keepdim: bool,
14232        dtype: Kind,
14233    ) -> Tensor {
14234        self.f_norm_scalaropt_dim_dtype(p, dim, keepdim, dtype).unwrap()
14235    }
14236
14237    pub fn norm_scalaropt_dtype<S: Into<Scalar>>(&self, p: S, dtype: Kind) -> Tensor {
14238        self.f_norm_scalaropt_dtype(p, dtype).unwrap()
14239    }
14240
14241    pub fn norm_scalaropt_dtype_out<S: Into<Scalar>>(
14242        &self,
14243        out: &Tensor,
14244        p: S,
14245        dtype: Kind,
14246    ) -> Tensor {
14247        self.f_norm_scalaropt_dtype_out(out, p, dtype).unwrap()
14248    }
14249
14250    pub fn normal_(&mut self, mean: f64, std: f64) -> Tensor {
14251        self.f_normal_(mean, std).unwrap()
14252    }
14253
14254    pub fn normal_functional(&self, mean: f64, std: f64) -> Tensor {
14255        self.f_normal_functional(mean, std).unwrap()
14256    }
14257
14258    pub fn not_equal<S: Into<Scalar>>(&self, other: S) -> Tensor {
14259        self.f_not_equal(other).unwrap()
14260    }
14261
14262    pub fn not_equal_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
14263        self.f_not_equal_(other).unwrap()
14264    }
14265
14266    pub fn not_equal_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
14267        self.f_not_equal_scalar_out(out, other).unwrap()
14268    }
14269
14270    pub fn not_equal_tensor(&self, other: &Tensor) -> Tensor {
14271        self.f_not_equal_tensor(other).unwrap()
14272    }
14273
14274    pub fn not_equal_tensor_(&mut self, other: &Tensor) -> Tensor {
14275        self.f_not_equal_tensor_(other).unwrap()
14276    }
14277
14278    pub fn not_equal_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
14279        self.f_not_equal_tensor_out(out, other).unwrap()
14280    }
14281
14282    pub fn nuclear_norm(&self, keepdim: bool) -> Tensor {
14283        self.f_nuclear_norm(keepdim).unwrap()
14284    }
14285
14286    pub fn nuclear_norm_dim(&self, dim: impl IntList, keepdim: bool) -> Tensor {
14287        self.f_nuclear_norm_dim(dim, keepdim).unwrap()
14288    }
14289
14290    pub fn nuclear_norm_dim_out(&self, out: &Tensor, dim: impl IntList, keepdim: bool) -> Tensor {
14291        self.f_nuclear_norm_dim_out(out, dim, keepdim).unwrap()
14292    }
14293
14294    pub fn nuclear_norm_out(&self, out: &Tensor, keepdim: bool) -> Tensor {
14295        self.f_nuclear_norm_out(out, keepdim).unwrap()
14296    }
14297
14298    pub fn numpy_t(&self) -> Tensor {
14299        self.f_numpy_t().unwrap()
14300    }
14301
14302    pub fn one_hot(&self, num_classes: i64) -> Tensor {
14303        self.f_one_hot(num_classes).unwrap()
14304    }
14305
14306    pub fn ones(size: impl IntList, options: (Kind, Device)) -> Tensor {
14307        Tensor::f_ones(size, options).unwrap()
14308    }
14309
14310    pub fn ones_like(&self) -> Tensor {
14311        self.f_ones_like().unwrap()
14312    }
14313
14314    pub fn ones_like_out(&self, out: &Tensor) -> Tensor {
14315        self.f_ones_like_out(out).unwrap()
14316    }
14317
14318    pub fn ones_out(out: &Tensor, size: impl IntList) -> Tensor {
14319        Tensor::f_ones_out(out, size).unwrap()
14320    }
14321
14322    pub fn orgqr(&self, input2: &Tensor) -> Tensor {
14323        self.f_orgqr(input2).unwrap()
14324    }
14325
14326    pub fn orgqr_out(&self, out: &Tensor, input2: &Tensor) -> Tensor {
14327        self.f_orgqr_out(out, input2).unwrap()
14328    }
14329
14330    pub fn ormqr(&self, input2: &Tensor, input3: &Tensor, left: bool, transpose: bool) -> Tensor {
14331        self.f_ormqr(input2, input3, left, transpose).unwrap()
14332    }
14333
14334    pub fn ormqr_out(
14335        &self,
14336        out: &Tensor,
14337        input2: &Tensor,
14338        input3: &Tensor,
14339        left: bool,
14340        transpose: bool,
14341    ) -> Tensor {
14342        self.f_ormqr_out(out, input2, input3, left, transpose).unwrap()
14343    }
14344
14345    pub fn outer(&self, vec2: &Tensor) -> Tensor {
14346        self.f_outer(vec2).unwrap()
14347    }
14348
14349    pub fn outer_out(&self, out: &Tensor, vec2: &Tensor) -> Tensor {
14350        self.f_outer_out(out, vec2).unwrap()
14351    }
14352
14353    pub fn output_nr(&self) -> i64 {
14354        self.f_output_nr().unwrap()
14355    }
14356
14357    pub fn pad(&self, pad: impl IntList, mode: &str, value: impl Into<Option<f64>>) -> Tensor {
14358        self.f_pad(pad, mode, value).unwrap()
14359    }
14360
14361    pub fn pad_sequence<T: Borrow<Tensor>>(
14362        sequences: &[T],
14363        batch_first: bool,
14364        padding_value: f64,
14365        padding_side: &str,
14366    ) -> Tensor {
14367        Tensor::f_pad_sequence(sequences, batch_first, padding_value, padding_side).unwrap()
14368    }
14369
14370    pub fn pairwise_distance(x1: &Tensor, x2: &Tensor, p: f64, eps: f64, keepdim: bool) -> Tensor {
14371        Tensor::f_pairwise_distance(x1, x2, p, eps, keepdim).unwrap()
14372    }
14373
14374    pub fn pdist(&self, p: f64) -> Tensor {
14375        self.f_pdist(p).unwrap()
14376    }
14377
14378    pub fn permute(&self, dims: impl IntList) -> Tensor {
14379        self.f_permute(dims).unwrap()
14380    }
14381
14382    pub fn permute_copy(&self, dims: impl IntList) -> Tensor {
14383        self.f_permute_copy(dims).unwrap()
14384    }
14385
14386    pub fn permute_copy_out(&self, out: &Tensor, dims: impl IntList) -> Tensor {
14387        self.f_permute_copy_out(out, dims).unwrap()
14388    }
14389
14390    pub fn pin_memory(&self, device: Device) -> Tensor {
14391        self.f_pin_memory(device).unwrap()
14392    }
14393
14394    pub fn pinverse(&self, rcond: f64) -> Tensor {
14395        self.f_pinverse(rcond).unwrap()
14396    }
14397
14398    pub fn pixel_shuffle(&self, upscale_factor: i64) -> Tensor {
14399        self.f_pixel_shuffle(upscale_factor).unwrap()
14400    }
14401
14402    pub fn pixel_shuffle_out(&self, out: &Tensor, upscale_factor: i64) -> Tensor {
14403        self.f_pixel_shuffle_out(out, upscale_factor).unwrap()
14404    }
14405
14406    pub fn pixel_unshuffle(&self, downscale_factor: i64) -> Tensor {
14407        self.f_pixel_unshuffle(downscale_factor).unwrap()
14408    }
14409
14410    pub fn pixel_unshuffle_out(&self, out: &Tensor, downscale_factor: i64) -> Tensor {
14411        self.f_pixel_unshuffle_out(out, downscale_factor).unwrap()
14412    }
14413
14414    pub fn poisson(&self) -> Tensor {
14415        self.f_poisson().unwrap()
14416    }
14417
14418    pub fn poisson_nll_loss(
14419        &self,
14420        target: &Tensor,
14421        log_input: bool,
14422        full: bool,
14423        eps: f64,
14424        reduction: crate::Reduction,
14425    ) -> Tensor {
14426        self.f_poisson_nll_loss(target, log_input, full, eps, reduction).unwrap()
14427    }
14428
14429    pub fn poisson_out(&self, out: &Tensor) -> Tensor {
14430        self.f_poisson_out(out).unwrap()
14431    }
14432
14433    pub fn polar(abs: &Tensor, angle: &Tensor) -> Tensor {
14434        Tensor::f_polar(abs, angle).unwrap()
14435    }
14436
14437    pub fn polar_out(out: &Tensor, abs: &Tensor, angle: &Tensor) -> Tensor {
14438        Tensor::f_polar_out(out, abs, angle).unwrap()
14439    }
14440
14441    pub fn polygamma(&self, n: i64) -> Tensor {
14442        self.f_polygamma(n).unwrap()
14443    }
14444
14445    pub fn polygamma_(&mut self, n: i64) -> Tensor {
14446        self.f_polygamma_(n).unwrap()
14447    }
14448
14449    pub fn polygamma_out(&self, out: &Tensor, n: i64) -> Tensor {
14450        self.f_polygamma_out(out, n).unwrap()
14451    }
14452
14453    pub fn positive(&self) -> Tensor {
14454        self.f_positive().unwrap()
14455    }
14456
14457    pub fn pow(&self, exponent: &Tensor) -> Tensor {
14458        self.f_pow(exponent).unwrap()
14459    }
14460
14461    pub fn pow_<S: Into<Scalar>>(&mut self, exponent: S) -> Tensor {
14462        self.f_pow_(exponent).unwrap()
14463    }
14464
14465    pub fn pow_scalar<S: Into<Scalar>>(self_scalar: S, exponent: &Tensor) -> Tensor {
14466        Tensor::f_pow_scalar(self_scalar, exponent).unwrap()
14467    }
14468
14469    pub fn pow_scalar_out<S: Into<Scalar>>(
14470        out: &Tensor,
14471        self_scalar: S,
14472        exponent: &Tensor,
14473    ) -> Tensor {
14474        Tensor::f_pow_scalar_out(out, self_scalar, exponent).unwrap()
14475    }
14476
14477    pub fn pow_tensor_(&mut self, exponent: &Tensor) -> Tensor {
14478        self.f_pow_tensor_(exponent).unwrap()
14479    }
14480
14481    pub fn pow_tensor_scalar<S: Into<Scalar>>(&self, exponent: S) -> Tensor {
14482        self.f_pow_tensor_scalar(exponent).unwrap()
14483    }
14484
14485    pub fn pow_tensor_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, exponent: S) -> Tensor {
14486        self.f_pow_tensor_scalar_out(out, exponent).unwrap()
14487    }
14488
14489    pub fn pow_tensor_tensor_out(&self, out: &Tensor, exponent: &Tensor) -> Tensor {
14490        self.f_pow_tensor_tensor_out(out, exponent).unwrap()
14491    }
14492
14493    pub fn prelu(&self, weight: &Tensor) -> Tensor {
14494        self.f_prelu(weight).unwrap()
14495    }
14496
14497    pub fn prod(&self, dtype: impl Into<Option<Kind>>) -> Tensor {
14498        self.f_prod(dtype).unwrap()
14499    }
14500
14501    pub fn prod_dim_int(&self, dim: i64, keepdim: bool, dtype: impl Into<Option<Kind>>) -> Tensor {
14502        self.f_prod_dim_int(dim, keepdim, dtype).unwrap()
14503    }
14504
14505    pub fn prod_int_out(
14506        &self,
14507        out: &Tensor,
14508        dim: i64,
14509        keepdim: bool,
14510        dtype: impl Into<Option<Kind>>,
14511    ) -> Tensor {
14512        self.f_prod_int_out(out, dim, keepdim, dtype).unwrap()
14513    }
14514
14515    pub fn prod_out(&self, out: &Tensor, dtype: impl Into<Option<Kind>>) -> Tensor {
14516        self.f_prod_out(out, dtype).unwrap()
14517    }
14518
14519    pub fn put(&self, index: &Tensor, source: &Tensor, accumulate: bool) -> Tensor {
14520        self.f_put(index, source, accumulate).unwrap()
14521    }
14522
14523    pub fn put_(&mut self, index: &Tensor, source: &Tensor, accumulate: bool) -> Tensor {
14524        self.f_put_(index, source, accumulate).unwrap()
14525    }
14526
14527    pub fn put_out(
14528        &self,
14529        out: &Tensor,
14530        index: &Tensor,
14531        source: &Tensor,
14532        accumulate: bool,
14533    ) -> Tensor {
14534        self.f_put_out(out, index, source, accumulate).unwrap()
14535    }
14536
14537    pub fn q_per_channel_axis(&self) -> i64 {
14538        self.f_q_per_channel_axis().unwrap()
14539    }
14540
14541    pub fn q_per_channel_scales(&self) -> Tensor {
14542        self.f_q_per_channel_scales().unwrap()
14543    }
14544
14545    pub fn q_per_channel_scales_out(&self, out: &Tensor) -> Tensor {
14546        self.f_q_per_channel_scales_out(out).unwrap()
14547    }
14548
14549    pub fn q_per_channel_zero_points(&self) -> Tensor {
14550        self.f_q_per_channel_zero_points().unwrap()
14551    }
14552
14553    pub fn q_per_channel_zero_points_out(&self, out: &Tensor) -> Tensor {
14554        self.f_q_per_channel_zero_points_out(out).unwrap()
14555    }
14556
14557    pub fn q_scale(&self) -> f64 {
14558        self.f_q_scale().unwrap()
14559    }
14560
14561    pub fn q_zero_point(&self) -> i64 {
14562        self.f_q_zero_point().unwrap()
14563    }
14564
14565    pub fn qr(&self, some: bool) -> (Tensor, Tensor) {
14566        self.f_qr(some).unwrap()
14567    }
14568
14569    pub fn qr_q(&self, q: &Tensor, r: &Tensor, some: bool) -> (Tensor, Tensor) {
14570        self.f_qr_q(q, r, some).unwrap()
14571    }
14572
14573    pub fn quantile(
14574        &self,
14575        q: &Tensor,
14576        dim: impl Into<Option<i64>>,
14577        keepdim: bool,
14578        interpolation: &str,
14579    ) -> Tensor {
14580        self.f_quantile(q, dim, keepdim, interpolation).unwrap()
14581    }
14582
14583    pub fn quantile_out(
14584        &self,
14585        out: &Tensor,
14586        q: &Tensor,
14587        dim: impl Into<Option<i64>>,
14588        keepdim: bool,
14589        interpolation: &str,
14590    ) -> Tensor {
14591        self.f_quantile_out(out, q, dim, keepdim, interpolation).unwrap()
14592    }
14593
14594    pub fn quantile_scalar(
14595        &self,
14596        q: f64,
14597        dim: impl Into<Option<i64>>,
14598        keepdim: bool,
14599        interpolation: &str,
14600    ) -> Tensor {
14601        self.f_quantile_scalar(q, dim, keepdim, interpolation).unwrap()
14602    }
14603
14604    pub fn quantile_scalar_out(
14605        &self,
14606        out: &Tensor,
14607        q: f64,
14608        dim: impl Into<Option<i64>>,
14609        keepdim: bool,
14610        interpolation: &str,
14611    ) -> Tensor {
14612        self.f_quantile_scalar_out(out, q, dim, keepdim, interpolation).unwrap()
14613    }
14614
14615    pub fn quantize_per_channel(
14616        &self,
14617        scales: &Tensor,
14618        zero_points: &Tensor,
14619        axis: i64,
14620        dtype: Kind,
14621    ) -> Tensor {
14622        self.f_quantize_per_channel(scales, zero_points, axis, dtype).unwrap()
14623    }
14624
14625    pub fn quantize_per_channel_out(
14626        &self,
14627        out: &Tensor,
14628        scales: &Tensor,
14629        zero_points: &Tensor,
14630        axis: i64,
14631        dtype: Kind,
14632    ) -> Tensor {
14633        self.f_quantize_per_channel_out(out, scales, zero_points, axis, dtype).unwrap()
14634    }
14635
14636    pub fn quantize_per_tensor(&self, scale: f64, zero_point: i64, dtype: Kind) -> Tensor {
14637        self.f_quantize_per_tensor(scale, zero_point, dtype).unwrap()
14638    }
14639
14640    pub fn quantize_per_tensor_dynamic(&self, dtype: Kind, reduce_range: bool) -> Tensor {
14641        self.f_quantize_per_tensor_dynamic(dtype, reduce_range).unwrap()
14642    }
14643
14644    pub fn quantize_per_tensor_dynamic_out(
14645        &self,
14646        out: &Tensor,
14647        dtype: Kind,
14648        reduce_range: bool,
14649    ) -> Tensor {
14650        self.f_quantize_per_tensor_dynamic_out(out, dtype, reduce_range).unwrap()
14651    }
14652
14653    pub fn quantize_per_tensor_out(
14654        &self,
14655        out: &Tensor,
14656        scale: f64,
14657        zero_point: i64,
14658        dtype: Kind,
14659    ) -> Tensor {
14660        self.f_quantize_per_tensor_out(out, scale, zero_point, dtype).unwrap()
14661    }
14662
14663    pub fn quantize_per_tensor_tensor_qparams(
14664        &self,
14665        scale: &Tensor,
14666        zero_point: &Tensor,
14667        dtype: Kind,
14668    ) -> Tensor {
14669        self.f_quantize_per_tensor_tensor_qparams(scale, zero_point, dtype).unwrap()
14670    }
14671
14672    pub fn quantize_per_tensor_tensor_qparams_out(
14673        &self,
14674        out: &Tensor,
14675        scale: &Tensor,
14676        zero_point: &Tensor,
14677        dtype: Kind,
14678    ) -> Tensor {
14679        self.f_quantize_per_tensor_tensor_qparams_out(out, scale, zero_point, dtype).unwrap()
14680    }
14681
14682    pub fn quantize_per_tensor_tensors<T: Borrow<Tensor>>(
14683        tensors: &[T],
14684        scales: &Tensor,
14685        zero_points: &Tensor,
14686        dtype: Kind,
14687    ) -> Vec<Tensor> {
14688        Tensor::f_quantize_per_tensor_tensors(tensors, scales, zero_points, dtype).unwrap()
14689    }
14690
14691    pub fn quantize_per_tensor_tensors_out<T: Borrow<Tensor>>(
14692        out: &[T],
14693        tensors: &[T],
14694        scales: &Tensor,
14695        zero_points: &Tensor,
14696        dtype: Kind,
14697    ) {
14698        Tensor::f_quantize_per_tensor_tensors_out(out, tensors, scales, zero_points, dtype).unwrap()
14699    }
14700
14701    pub fn quantized_batch_norm<T: Borrow<Tensor>>(
14702        &self,
14703        weight: Option<T>,
14704        bias: Option<T>,
14705        mean: &Tensor,
14706        var: &Tensor,
14707        eps: f64,
14708        output_scale: f64,
14709        output_zero_point: i64,
14710    ) -> Tensor {
14711        self.f_quantized_batch_norm(weight, bias, mean, var, eps, output_scale, output_zero_point)
14712            .unwrap()
14713    }
14714
14715    pub fn quantized_batch_norm_out<T: Borrow<Tensor>>(
14716        &self,
14717        out: &Tensor,
14718        weight: Option<T>,
14719        bias: Option<T>,
14720        mean: &Tensor,
14721        var: &Tensor,
14722        eps: f64,
14723        output_scale: f64,
14724        output_zero_point: i64,
14725    ) -> Tensor {
14726        self.f_quantized_batch_norm_out(
14727            out,
14728            weight,
14729            bias,
14730            mean,
14731            var,
14732            eps,
14733            output_scale,
14734            output_zero_point,
14735        )
14736        .unwrap()
14737    }
14738
14739    pub fn quantized_gru_cell<S: Into<Scalar>>(
14740        &self,
14741        hx: &Tensor,
14742        w_ih: &Tensor,
14743        w_hh: &Tensor,
14744        b_ih: &Tensor,
14745        b_hh: &Tensor,
14746        packed_ih: &Tensor,
14747        packed_hh: &Tensor,
14748        col_offsets_ih: &Tensor,
14749        col_offsets_hh: &Tensor,
14750        scale_ih: S,
14751        scale_hh: S,
14752        zero_point_ih: S,
14753        zero_point_hh: S,
14754    ) -> Tensor {
14755        self.f_quantized_gru_cell(
14756            hx,
14757            w_ih,
14758            w_hh,
14759            b_ih,
14760            b_hh,
14761            packed_ih,
14762            packed_hh,
14763            col_offsets_ih,
14764            col_offsets_hh,
14765            scale_ih,
14766            scale_hh,
14767            zero_point_ih,
14768            zero_point_hh,
14769        )
14770        .unwrap()
14771    }
14772
14773    pub fn quantized_lstm_cell<T: Borrow<Tensor>, S: Into<Scalar>>(
14774        &self,
14775        hx: &[T],
14776        w_ih: &Tensor,
14777        w_hh: &Tensor,
14778        b_ih: &Tensor,
14779        b_hh: &Tensor,
14780        packed_ih: &Tensor,
14781        packed_hh: &Tensor,
14782        col_offsets_ih: &Tensor,
14783        col_offsets_hh: &Tensor,
14784        scale_ih: S,
14785        scale_hh: S,
14786        zero_point_ih: S,
14787        zero_point_hh: S,
14788    ) -> (Tensor, Tensor) {
14789        self.f_quantized_lstm_cell(
14790            hx,
14791            w_ih,
14792            w_hh,
14793            b_ih,
14794            b_hh,
14795            packed_ih,
14796            packed_hh,
14797            col_offsets_ih,
14798            col_offsets_hh,
14799            scale_ih,
14800            scale_hh,
14801            zero_point_ih,
14802            zero_point_hh,
14803        )
14804        .unwrap()
14805    }
14806
14807    pub fn quantized_max_pool1d(
14808        &self,
14809        kernel_size: impl IntList,
14810        stride: impl IntList,
14811        padding: impl IntList,
14812        dilation: impl IntList,
14813        ceil_mode: bool,
14814    ) -> Tensor {
14815        self.f_quantized_max_pool1d(kernel_size, stride, padding, dilation, ceil_mode).unwrap()
14816    }
14817
14818    pub fn quantized_max_pool1d_out(
14819        &self,
14820        out: &Tensor,
14821        kernel_size: impl IntList,
14822        stride: impl IntList,
14823        padding: impl IntList,
14824        dilation: impl IntList,
14825        ceil_mode: bool,
14826    ) -> Tensor {
14827        self.f_quantized_max_pool1d_out(out, kernel_size, stride, padding, dilation, ceil_mode)
14828            .unwrap()
14829    }
14830
14831    pub fn quantized_max_pool2d(
14832        &self,
14833        kernel_size: impl IntList,
14834        stride: impl IntList,
14835        padding: impl IntList,
14836        dilation: impl IntList,
14837        ceil_mode: bool,
14838    ) -> Tensor {
14839        self.f_quantized_max_pool2d(kernel_size, stride, padding, dilation, ceil_mode).unwrap()
14840    }
14841
14842    pub fn quantized_max_pool2d_out(
14843        &self,
14844        out: &Tensor,
14845        kernel_size: impl IntList,
14846        stride: impl IntList,
14847        padding: impl IntList,
14848        dilation: impl IntList,
14849        ceil_mode: bool,
14850    ) -> Tensor {
14851        self.f_quantized_max_pool2d_out(out, kernel_size, stride, padding, dilation, ceil_mode)
14852            .unwrap()
14853    }
14854
14855    pub fn quantized_max_pool3d(
14856        &self,
14857        kernel_size: impl IntList,
14858        stride: impl IntList,
14859        padding: impl IntList,
14860        dilation: impl IntList,
14861        ceil_mode: bool,
14862    ) -> Tensor {
14863        self.f_quantized_max_pool3d(kernel_size, stride, padding, dilation, ceil_mode).unwrap()
14864    }
14865
14866    pub fn quantized_max_pool3d_out(
14867        &self,
14868        out: &Tensor,
14869        kernel_size: impl IntList,
14870        stride: impl IntList,
14871        padding: impl IntList,
14872        dilation: impl IntList,
14873        ceil_mode: bool,
14874    ) -> Tensor {
14875        self.f_quantized_max_pool3d_out(out, kernel_size, stride, padding, dilation, ceil_mode)
14876            .unwrap()
14877    }
14878
14879    pub fn quantized_rnn_relu_cell<S: Into<Scalar>>(
14880        &self,
14881        hx: &Tensor,
14882        w_ih: &Tensor,
14883        w_hh: &Tensor,
14884        b_ih: &Tensor,
14885        b_hh: &Tensor,
14886        packed_ih: &Tensor,
14887        packed_hh: &Tensor,
14888        col_offsets_ih: &Tensor,
14889        col_offsets_hh: &Tensor,
14890        scale_ih: S,
14891        scale_hh: S,
14892        zero_point_ih: S,
14893        zero_point_hh: S,
14894    ) -> Tensor {
14895        self.f_quantized_rnn_relu_cell(
14896            hx,
14897            w_ih,
14898            w_hh,
14899            b_ih,
14900            b_hh,
14901            packed_ih,
14902            packed_hh,
14903            col_offsets_ih,
14904            col_offsets_hh,
14905            scale_ih,
14906            scale_hh,
14907            zero_point_ih,
14908            zero_point_hh,
14909        )
14910        .unwrap()
14911    }
14912
14913    pub fn quantized_rnn_tanh_cell<S: Into<Scalar>>(
14914        &self,
14915        hx: &Tensor,
14916        w_ih: &Tensor,
14917        w_hh: &Tensor,
14918        b_ih: &Tensor,
14919        b_hh: &Tensor,
14920        packed_ih: &Tensor,
14921        packed_hh: &Tensor,
14922        col_offsets_ih: &Tensor,
14923        col_offsets_hh: &Tensor,
14924        scale_ih: S,
14925        scale_hh: S,
14926        zero_point_ih: S,
14927        zero_point_hh: S,
14928    ) -> Tensor {
14929        self.f_quantized_rnn_tanh_cell(
14930            hx,
14931            w_ih,
14932            w_hh,
14933            b_ih,
14934            b_hh,
14935            packed_ih,
14936            packed_hh,
14937            col_offsets_ih,
14938            col_offsets_hh,
14939            scale_ih,
14940            scale_hh,
14941            zero_point_ih,
14942            zero_point_hh,
14943        )
14944        .unwrap()
14945    }
14946
14947    pub fn rad2deg(&self) -> Tensor {
14948        self.f_rad2deg().unwrap()
14949    }
14950
14951    pub fn rad2deg_(&mut self) -> Tensor {
14952        self.f_rad2deg_().unwrap()
14953    }
14954
14955    pub fn rad2deg_out(&self, out: &Tensor) -> Tensor {
14956        self.f_rad2deg_out(out).unwrap()
14957    }
14958
14959    pub fn rand(size: impl IntList, options: (Kind, Device)) -> Tensor {
14960        Tensor::f_rand(size, options).unwrap()
14961    }
14962
14963    pub fn rand_like(&self) -> Tensor {
14964        self.f_rand_like().unwrap()
14965    }
14966
14967    pub fn rand_like_out(&self, out: &Tensor) -> Tensor {
14968        self.f_rand_like_out(out).unwrap()
14969    }
14970
14971    pub fn rand_out(out: &Tensor, size: impl IntList) -> Tensor {
14972        Tensor::f_rand_out(out, size).unwrap()
14973    }
14974
14975    pub fn randint(high: i64, size: impl IntList, options: (Kind, Device)) -> Tensor {
14976        Tensor::f_randint(high, size, options).unwrap()
14977    }
14978
14979    pub fn randint_like(&self, high: i64) -> Tensor {
14980        self.f_randint_like(high).unwrap()
14981    }
14982
14983    pub fn randint_like_low_dtype(&self, low: i64, high: i64) -> Tensor {
14984        self.f_randint_like_low_dtype(low, high).unwrap()
14985    }
14986
14987    pub fn randint_like_low_dtype_out(&self, out: &Tensor, low: i64, high: i64) -> Tensor {
14988        self.f_randint_like_low_dtype_out(out, low, high).unwrap()
14989    }
14990
14991    pub fn randint_like_out(&self, out: &Tensor, high: i64) -> Tensor {
14992        self.f_randint_like_out(out, high).unwrap()
14993    }
14994
14995    pub fn randint_low(low: i64, high: i64, size: impl IntList, options: (Kind, Device)) -> Tensor {
14996        Tensor::f_randint_low(low, high, size, options).unwrap()
14997    }
14998
14999    pub fn randint_low_out(out: &Tensor, low: i64, high: i64, size: impl IntList) -> Tensor {
15000        Tensor::f_randint_low_out(out, low, high, size).unwrap()
15001    }
15002
15003    pub fn randint_out(out: &Tensor, high: i64, size: impl IntList) -> Tensor {
15004        Tensor::f_randint_out(out, high, size).unwrap()
15005    }
15006
15007    pub fn randn(size: impl IntList, options: (Kind, Device)) -> Tensor {
15008        Tensor::f_randn(size, options).unwrap()
15009    }
15010
15011    pub fn randn_like(&self) -> Tensor {
15012        self.f_randn_like().unwrap()
15013    }
15014
15015    pub fn randn_like_out(&self, out: &Tensor) -> Tensor {
15016        self.f_randn_like_out(out).unwrap()
15017    }
15018
15019    pub fn randn_out(out: &Tensor, size: impl IntList) -> Tensor {
15020        Tensor::f_randn_out(out, size).unwrap()
15021    }
15022
15023    pub fn random(&self) -> Tensor {
15024        self.f_random().unwrap()
15025    }
15026
15027    pub fn random_(&mut self) -> Tensor {
15028        self.f_random_().unwrap()
15029    }
15030
15031    pub fn random_from(&self, from: i64, to: impl Into<Option<i64>>) -> Tensor {
15032        self.f_random_from(from, to).unwrap()
15033    }
15034
15035    pub fn random_from_(&mut self, from: i64, to: impl Into<Option<i64>>) -> Tensor {
15036        self.f_random_from_(from, to).unwrap()
15037    }
15038
15039    pub fn random_from_out(&self, out: &Tensor, from: i64, to: impl Into<Option<i64>>) -> Tensor {
15040        self.f_random_from_out(out, from, to).unwrap()
15041    }
15042
15043    pub fn random_out(&self, out: &Tensor) -> Tensor {
15044        self.f_random_out(out).unwrap()
15045    }
15046
15047    pub fn random_to(&self, to: i64) -> Tensor {
15048        self.f_random_to(to).unwrap()
15049    }
15050
15051    pub fn random_to_(&mut self, to: i64) -> Tensor {
15052        self.f_random_to_(to).unwrap()
15053    }
15054
15055    pub fn random_to_out(&self, out: &Tensor, to: i64) -> Tensor {
15056        self.f_random_to_out(out, to).unwrap()
15057    }
15058
15059    pub fn randperm(n: i64, options: (Kind, Device)) -> Tensor {
15060        Tensor::f_randperm(n, options).unwrap()
15061    }
15062
15063    pub fn randperm_out(out: &Tensor, n: i64) -> Tensor {
15064        Tensor::f_randperm_out(out, n).unwrap()
15065    }
15066
15067    pub fn range<S: Into<Scalar>>(start: S, end: S, options: (Kind, Device)) -> Tensor {
15068        Tensor::f_range(start, end, options).unwrap()
15069    }
15070
15071    pub fn range_out<S: Into<Scalar>>(out: &Tensor, start: S, end: S) -> Tensor {
15072        Tensor::f_range_out(out, start, end).unwrap()
15073    }
15074
15075    pub fn range_out_<S: Into<Scalar>>(out: &Tensor, start: S, end: S) -> Tensor {
15076        Tensor::f_range_out_(out, start, end).unwrap()
15077    }
15078
15079    pub fn range_step<S: Into<Scalar>>(start: S, end: S, options: (Kind, Device)) -> Tensor {
15080        Tensor::f_range_step(start, end, options).unwrap()
15081    }
15082
15083    pub fn ravel(&self) -> Tensor {
15084        self.f_ravel().unwrap()
15085    }
15086
15087    pub fn real(&self) -> Tensor {
15088        self.f_real().unwrap()
15089    }
15090
15091    pub fn reciprocal(&self) -> Tensor {
15092        self.f_reciprocal().unwrap()
15093    }
15094
15095    pub fn reciprocal_(&mut self) -> Tensor {
15096        self.f_reciprocal_().unwrap()
15097    }
15098
15099    pub fn reciprocal_out(&self, out: &Tensor) -> Tensor {
15100        self.f_reciprocal_out(out).unwrap()
15101    }
15102
15103    pub fn reflection_pad1d(&self, padding: impl IntList) -> Tensor {
15104        self.f_reflection_pad1d(padding).unwrap()
15105    }
15106
15107    pub fn reflection_pad1d_backward(&self, grad_output: &Tensor, padding: impl IntList) -> Tensor {
15108        self.f_reflection_pad1d_backward(grad_output, padding).unwrap()
15109    }
15110
15111    pub fn reflection_pad1d_backward_grad_input(
15112        &self,
15113        grad_input: &Tensor,
15114        grad_output: &Tensor,
15115        padding: impl IntList,
15116    ) -> Tensor {
15117        self.f_reflection_pad1d_backward_grad_input(grad_input, grad_output, padding).unwrap()
15118    }
15119
15120    pub fn reflection_pad1d_out(&self, out: &Tensor, padding: impl IntList) -> Tensor {
15121        self.f_reflection_pad1d_out(out, padding).unwrap()
15122    }
15123
15124    pub fn reflection_pad2d(&self, padding: impl IntList) -> Tensor {
15125        self.f_reflection_pad2d(padding).unwrap()
15126    }
15127
15128    pub fn reflection_pad2d_backward(&self, grad_output: &Tensor, padding: impl IntList) -> Tensor {
15129        self.f_reflection_pad2d_backward(grad_output, padding).unwrap()
15130    }
15131
15132    pub fn reflection_pad2d_backward_grad_input(
15133        &self,
15134        grad_input: &Tensor,
15135        grad_output: &Tensor,
15136        padding: impl IntList,
15137    ) -> Tensor {
15138        self.f_reflection_pad2d_backward_grad_input(grad_input, grad_output, padding).unwrap()
15139    }
15140
15141    pub fn reflection_pad2d_out(&self, out: &Tensor, padding: impl IntList) -> Tensor {
15142        self.f_reflection_pad2d_out(out, padding).unwrap()
15143    }
15144
15145    pub fn reflection_pad3d(&self, padding: impl IntList) -> Tensor {
15146        self.f_reflection_pad3d(padding).unwrap()
15147    }
15148
15149    pub fn reflection_pad3d_backward(&self, grad_output: &Tensor, padding: impl IntList) -> Tensor {
15150        self.f_reflection_pad3d_backward(grad_output, padding).unwrap()
15151    }
15152
15153    pub fn reflection_pad3d_backward_grad_input(
15154        &self,
15155        grad_input: &Tensor,
15156        grad_output: &Tensor,
15157        padding: impl IntList,
15158    ) -> Tensor {
15159        self.f_reflection_pad3d_backward_grad_input(grad_input, grad_output, padding).unwrap()
15160    }
15161
15162    pub fn reflection_pad3d_out(&self, out: &Tensor, padding: impl IntList) -> Tensor {
15163        self.f_reflection_pad3d_out(out, padding).unwrap()
15164    }
15165
15166    pub fn relu(&self) -> Tensor {
15167        self.f_relu().unwrap()
15168    }
15169
15170    pub fn relu6(&self) -> Tensor {
15171        self.f_relu6().unwrap()
15172    }
15173
15174    pub fn relu6_(&mut self) -> Tensor {
15175        self.f_relu6_().unwrap()
15176    }
15177
15178    pub fn relu_(&mut self) -> Tensor {
15179        self.f_relu_().unwrap()
15180    }
15181
15182    pub fn relu_out(&self, out: &Tensor) -> Tensor {
15183        self.f_relu_out(out).unwrap()
15184    }
15185
15186    pub fn remainder<S: Into<Scalar>>(&self, other: S) -> Tensor {
15187        self.f_remainder(other).unwrap()
15188    }
15189
15190    pub fn remainder_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
15191        self.f_remainder_(other).unwrap()
15192    }
15193
15194    pub fn remainder_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
15195        self.f_remainder_scalar_out(out, other).unwrap()
15196    }
15197
15198    pub fn remainder_scalar_tensor<S: Into<Scalar>>(self_scalar: S, other: &Tensor) -> Tensor {
15199        Tensor::f_remainder_scalar_tensor(self_scalar, other).unwrap()
15200    }
15201
15202    pub fn remainder_scalar_tensor_out<S: Into<Scalar>>(
15203        out: &Tensor,
15204        self_scalar: S,
15205        other: &Tensor,
15206    ) -> Tensor {
15207        Tensor::f_remainder_scalar_tensor_out(out, self_scalar, other).unwrap()
15208    }
15209
15210    pub fn remainder_tensor(&self, other: &Tensor) -> Tensor {
15211        self.f_remainder_tensor(other).unwrap()
15212    }
15213
15214    pub fn remainder_tensor_(&mut self, other: &Tensor) -> Tensor {
15215        self.f_remainder_tensor_(other).unwrap()
15216    }
15217
15218    pub fn remainder_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
15219        self.f_remainder_tensor_out(out, other).unwrap()
15220    }
15221
15222    pub fn renorm<S: Into<Scalar>>(&self, p: S, dim: i64, maxnorm: S) -> Tensor {
15223        self.f_renorm(p, dim, maxnorm).unwrap()
15224    }
15225
15226    pub fn renorm_<S: Into<Scalar>>(&mut self, p: S, dim: i64, maxnorm: S) -> Tensor {
15227        self.f_renorm_(p, dim, maxnorm).unwrap()
15228    }
15229
15230    pub fn renorm_out<S: Into<Scalar>>(&self, out: &Tensor, p: S, dim: i64, maxnorm: S) -> Tensor {
15231        self.f_renorm_out(out, p, dim, maxnorm).unwrap()
15232    }
15233
15234    pub fn repeat(&self, repeats: impl IntList) -> Tensor {
15235        self.f_repeat(repeats).unwrap()
15236    }
15237
15238    pub fn repeat_interleave(repeats: &Tensor, output_size: impl Into<Option<i64>>) -> Tensor {
15239        Tensor::f_repeat_interleave(repeats, output_size).unwrap()
15240    }
15241
15242    pub fn repeat_interleave_self_int(
15243        &self,
15244        repeats: i64,
15245        dim: impl Into<Option<i64>>,
15246        output_size: impl Into<Option<i64>>,
15247    ) -> Tensor {
15248        self.f_repeat_interleave_self_int(repeats, dim, output_size).unwrap()
15249    }
15250
15251    pub fn repeat_interleave_self_tensor(
15252        &self,
15253        repeats: &Tensor,
15254        dim: impl Into<Option<i64>>,
15255        output_size: impl Into<Option<i64>>,
15256    ) -> Tensor {
15257        self.f_repeat_interleave_self_tensor(repeats, dim, output_size).unwrap()
15258    }
15259
15260    pub fn repeat_interleave_tensor_out(
15261        out: &Tensor,
15262        repeats: &Tensor,
15263        output_size: impl Into<Option<i64>>,
15264    ) -> Tensor {
15265        Tensor::f_repeat_interleave_tensor_out(out, repeats, output_size).unwrap()
15266    }
15267
15268    pub fn repeat_out(&self, out: &Tensor, repeats: impl IntList) -> Tensor {
15269        self.f_repeat_out(out, repeats).unwrap()
15270    }
15271
15272    pub fn replication_pad1d(&self, padding: impl IntList) -> Tensor {
15273        self.f_replication_pad1d(padding).unwrap()
15274    }
15275
15276    pub fn replication_pad1d_backward(
15277        &self,
15278        grad_output: &Tensor,
15279        padding: impl IntList,
15280    ) -> Tensor {
15281        self.f_replication_pad1d_backward(grad_output, padding).unwrap()
15282    }
15283
15284    pub fn replication_pad1d_backward_grad_input(
15285        &self,
15286        grad_input: &Tensor,
15287        grad_output: &Tensor,
15288        padding: impl IntList,
15289    ) -> Tensor {
15290        self.f_replication_pad1d_backward_grad_input(grad_input, grad_output, padding).unwrap()
15291    }
15292
15293    pub fn replication_pad1d_out(&self, out: &Tensor, padding: impl IntList) -> Tensor {
15294        self.f_replication_pad1d_out(out, padding).unwrap()
15295    }
15296
15297    pub fn replication_pad2d(&self, padding: impl IntList) -> Tensor {
15298        self.f_replication_pad2d(padding).unwrap()
15299    }
15300
15301    pub fn replication_pad2d_backward(
15302        &self,
15303        grad_output: &Tensor,
15304        padding: impl IntList,
15305    ) -> Tensor {
15306        self.f_replication_pad2d_backward(grad_output, padding).unwrap()
15307    }
15308
15309    pub fn replication_pad2d_backward_grad_input(
15310        &self,
15311        grad_input: &Tensor,
15312        grad_output: &Tensor,
15313        padding: impl IntList,
15314    ) -> Tensor {
15315        self.f_replication_pad2d_backward_grad_input(grad_input, grad_output, padding).unwrap()
15316    }
15317
15318    pub fn replication_pad2d_out(&self, out: &Tensor, padding: impl IntList) -> Tensor {
15319        self.f_replication_pad2d_out(out, padding).unwrap()
15320    }
15321
15322    pub fn replication_pad3d(&self, padding: impl IntList) -> Tensor {
15323        self.f_replication_pad3d(padding).unwrap()
15324    }
15325
15326    pub fn replication_pad3d_backward(
15327        &self,
15328        grad_output: &Tensor,
15329        padding: impl IntList,
15330    ) -> Tensor {
15331        self.f_replication_pad3d_backward(grad_output, padding).unwrap()
15332    }
15333
15334    pub fn replication_pad3d_backward_grad_input(
15335        &self,
15336        grad_input: &Tensor,
15337        grad_output: &Tensor,
15338        padding: impl IntList,
15339    ) -> Tensor {
15340        self.f_replication_pad3d_backward_grad_input(grad_input, grad_output, padding).unwrap()
15341    }
15342
15343    pub fn replication_pad3d_out(&self, out: &Tensor, padding: impl IntList) -> Tensor {
15344        self.f_replication_pad3d_out(out, padding).unwrap()
15345    }
15346
15347    pub fn requires_grad_(&mut self, requires_grad: bool) -> Tensor {
15348        self.f_requires_grad_(requires_grad).unwrap()
15349    }
15350
15351    pub fn reshape(&self, shape: impl IntList) -> Tensor {
15352        self.f_reshape(shape).unwrap()
15353    }
15354
15355    pub fn reshape_as(&self, other: &Tensor) -> Tensor {
15356        self.f_reshape_as(other).unwrap()
15357    }
15358
15359    pub fn resize(&self, size: impl IntList) -> Tensor {
15360        self.f_resize(size).unwrap()
15361    }
15362
15363    pub fn resize_(&mut self, size: impl IntList) -> Tensor {
15364        self.f_resize_(size).unwrap()
15365    }
15366
15367    pub fn resize_as(&self, the_template: &Tensor) -> Tensor {
15368        self.f_resize_as(the_template).unwrap()
15369    }
15370
15371    pub fn resize_as_(&mut self, the_template: &Tensor) -> Tensor {
15372        self.f_resize_as_(the_template).unwrap()
15373    }
15374
15375    pub fn resize_as_out(&self, out: &Tensor, the_template: &Tensor) -> Tensor {
15376        self.f_resize_as_out(out, the_template).unwrap()
15377    }
15378
15379    pub fn resize_as_sparse(&self, the_template: &Tensor) -> Tensor {
15380        self.f_resize_as_sparse(the_template).unwrap()
15381    }
15382
15383    pub fn resize_as_sparse_(&mut self, the_template: &Tensor) -> Tensor {
15384        self.f_resize_as_sparse_(the_template).unwrap()
15385    }
15386
15387    pub fn resize_as_sparse_out(&self, out: &Tensor, the_template: &Tensor) -> Tensor {
15388        self.f_resize_as_sparse_out(out, the_template).unwrap()
15389    }
15390
15391    pub fn resize_out(&self, out: &Tensor, size: impl IntList) -> Tensor {
15392        self.f_resize_out(out, size).unwrap()
15393    }
15394
15395    pub fn resolve_conj(&self) -> Tensor {
15396        self.f_resolve_conj().unwrap()
15397    }
15398
15399    pub fn resolve_neg(&self) -> Tensor {
15400        self.f_resolve_neg().unwrap()
15401    }
15402
15403    pub fn retains_grad(&self) -> bool {
15404        self.f_retains_grad().unwrap()
15405    }
15406
15407    pub fn rms_norm<T: Borrow<Tensor>>(
15408        &self,
15409        normalized_shape: impl IntList,
15410        weight: Option<T>,
15411        eps: impl Into<Option<f64>>,
15412    ) -> Tensor {
15413        self.f_rms_norm(normalized_shape, weight, eps).unwrap()
15414    }
15415
15416    pub fn rnn_relu<T: Borrow<Tensor>>(
15417        &self,
15418        hx: &Tensor,
15419        params: &[T],
15420        has_biases: bool,
15421        num_layers: i64,
15422        dropout: f64,
15423        train: bool,
15424        bidirectional: bool,
15425        batch_first: bool,
15426    ) -> (Tensor, Tensor) {
15427        self.f_rnn_relu(
15428            hx,
15429            params,
15430            has_biases,
15431            num_layers,
15432            dropout,
15433            train,
15434            bidirectional,
15435            batch_first,
15436        )
15437        .unwrap()
15438    }
15439
15440    pub fn rnn_relu_cell<T: Borrow<Tensor>>(
15441        &self,
15442        hx: &Tensor,
15443        w_ih: &Tensor,
15444        w_hh: &Tensor,
15445        b_ih: Option<T>,
15446        b_hh: Option<T>,
15447    ) -> Tensor {
15448        self.f_rnn_relu_cell(hx, w_ih, w_hh, b_ih, b_hh).unwrap()
15449    }
15450
15451    pub fn rnn_relu_data<T: Borrow<Tensor>>(
15452        data: &Tensor,
15453        batch_sizes: &Tensor,
15454        hx: &Tensor,
15455        params: &[T],
15456        has_biases: bool,
15457        num_layers: i64,
15458        dropout: f64,
15459        train: bool,
15460        bidirectional: bool,
15461    ) -> (Tensor, Tensor) {
15462        Tensor::f_rnn_relu_data(
15463            data,
15464            batch_sizes,
15465            hx,
15466            params,
15467            has_biases,
15468            num_layers,
15469            dropout,
15470            train,
15471            bidirectional,
15472        )
15473        .unwrap()
15474    }
15475
15476    pub fn rnn_tanh<T: Borrow<Tensor>>(
15477        &self,
15478        hx: &Tensor,
15479        params: &[T],
15480        has_biases: bool,
15481        num_layers: i64,
15482        dropout: f64,
15483        train: bool,
15484        bidirectional: bool,
15485        batch_first: bool,
15486    ) -> (Tensor, Tensor) {
15487        self.f_rnn_tanh(
15488            hx,
15489            params,
15490            has_biases,
15491            num_layers,
15492            dropout,
15493            train,
15494            bidirectional,
15495            batch_first,
15496        )
15497        .unwrap()
15498    }
15499
15500    pub fn rnn_tanh_cell<T: Borrow<Tensor>>(
15501        &self,
15502        hx: &Tensor,
15503        w_ih: &Tensor,
15504        w_hh: &Tensor,
15505        b_ih: Option<T>,
15506        b_hh: Option<T>,
15507    ) -> Tensor {
15508        self.f_rnn_tanh_cell(hx, w_ih, w_hh, b_ih, b_hh).unwrap()
15509    }
15510
15511    pub fn rnn_tanh_data<T: Borrow<Tensor>>(
15512        data: &Tensor,
15513        batch_sizes: &Tensor,
15514        hx: &Tensor,
15515        params: &[T],
15516        has_biases: bool,
15517        num_layers: i64,
15518        dropout: f64,
15519        train: bool,
15520        bidirectional: bool,
15521    ) -> (Tensor, Tensor) {
15522        Tensor::f_rnn_tanh_data(
15523            data,
15524            batch_sizes,
15525            hx,
15526            params,
15527            has_biases,
15528            num_layers,
15529            dropout,
15530            train,
15531            bidirectional,
15532        )
15533        .unwrap()
15534    }
15535
15536    pub fn roll(&self, shifts: impl IntList, dims: impl IntList) -> Tensor {
15537        self.f_roll(shifts, dims).unwrap()
15538    }
15539
15540    pub fn roll_out(&self, out: &Tensor, shifts: impl IntList, dims: impl IntList) -> Tensor {
15541        self.f_roll_out(out, shifts, dims).unwrap()
15542    }
15543
15544    pub fn rot90(&self, k: i64, dims: impl IntList) -> Tensor {
15545        self.f_rot90(k, dims).unwrap()
15546    }
15547
15548    pub fn rot90_out(&self, out: &Tensor, k: i64, dims: impl IntList) -> Tensor {
15549        self.f_rot90_out(out, k, dims).unwrap()
15550    }
15551
15552    pub fn round(&self) -> Tensor {
15553        self.f_round().unwrap()
15554    }
15555
15556    pub fn round_(&mut self) -> Tensor {
15557        self.f_round_().unwrap()
15558    }
15559
15560    pub fn round_decimals(&self, decimals: i64) -> Tensor {
15561        self.f_round_decimals(decimals).unwrap()
15562    }
15563
15564    pub fn round_decimals_(&mut self, decimals: i64) -> Tensor {
15565        self.f_round_decimals_(decimals).unwrap()
15566    }
15567
15568    pub fn round_decimals_out(&self, out: &Tensor, decimals: i64) -> Tensor {
15569        self.f_round_decimals_out(out, decimals).unwrap()
15570    }
15571
15572    pub fn round_out(&self, out: &Tensor) -> Tensor {
15573        self.f_round_out(out).unwrap()
15574    }
15575
15576    pub fn row_indices(&self) -> Tensor {
15577        self.f_row_indices().unwrap()
15578    }
15579
15580    pub fn row_indices_copy(&self) -> Tensor {
15581        self.f_row_indices_copy().unwrap()
15582    }
15583
15584    pub fn row_indices_copy_out(&self, out: &Tensor) -> Tensor {
15585        self.f_row_indices_copy_out(out).unwrap()
15586    }
15587
15588    pub fn row_stack<T: Borrow<Tensor>>(tensors: &[T]) -> Tensor {
15589        Tensor::f_row_stack(tensors).unwrap()
15590    }
15591
15592    pub fn row_stack_out<T: Borrow<Tensor>>(out: &Tensor, tensors: &[T]) -> Tensor {
15593        Tensor::f_row_stack_out(out, tensors).unwrap()
15594    }
15595
15596    pub fn rrelu(&self, training: bool) -> Tensor {
15597        self.f_rrelu(training).unwrap()
15598    }
15599
15600    pub fn rrelu_(&mut self, training: bool) -> Tensor {
15601        self.f_rrelu_(training).unwrap()
15602    }
15603
15604    pub fn rrelu_with_noise(&self, noise: &Tensor, training: bool) -> Tensor {
15605        self.f_rrelu_with_noise(noise, training).unwrap()
15606    }
15607
15608    pub fn rrelu_with_noise_(&mut self, noise: &Tensor, training: bool) -> Tensor {
15609        self.f_rrelu_with_noise_(noise, training).unwrap()
15610    }
15611
15612    pub fn rrelu_with_noise_backward<S: Into<Scalar>>(
15613        &self,
15614        grad_output: &Tensor,
15615        noise: &Tensor,
15616        lower: S,
15617        upper: S,
15618        training: bool,
15619        self_is_result: bool,
15620    ) -> Tensor {
15621        self.f_rrelu_with_noise_backward(grad_output, noise, lower, upper, training, self_is_result)
15622            .unwrap()
15623    }
15624
15625    pub fn rrelu_with_noise_backward_out<S: Into<Scalar>>(
15626        &self,
15627        out: &Tensor,
15628        grad_output: &Tensor,
15629        noise: &Tensor,
15630        lower: S,
15631        upper: S,
15632        training: bool,
15633        self_is_result: bool,
15634    ) -> Tensor {
15635        self.f_rrelu_with_noise_backward_out(
15636            out,
15637            grad_output,
15638            noise,
15639            lower,
15640            upper,
15641            training,
15642            self_is_result,
15643        )
15644        .unwrap()
15645    }
15646
15647    pub fn rrelu_with_noise_out(&self, out: &Tensor, noise: &Tensor, training: bool) -> Tensor {
15648        self.f_rrelu_with_noise_out(out, noise, training).unwrap()
15649    }
15650
15651    pub fn rsqrt(&self) -> Tensor {
15652        self.f_rsqrt().unwrap()
15653    }
15654
15655    pub fn rsqrt_(&mut self) -> Tensor {
15656        self.f_rsqrt_().unwrap()
15657    }
15658
15659    pub fn rsqrt_out(&self, out: &Tensor) -> Tensor {
15660        self.f_rsqrt_out(out).unwrap()
15661    }
15662
15663    pub fn rsub(&self, other: &Tensor) -> Tensor {
15664        self.f_rsub(other).unwrap()
15665    }
15666
15667    pub fn rsub_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
15668        self.f_rsub_scalar(other).unwrap()
15669    }
15670
15671    pub fn rsub_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
15672        self.f_rsub_scalar_out(out, other).unwrap()
15673    }
15674
15675    pub fn rsub_tensor_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
15676        self.f_rsub_tensor_out(out, other).unwrap()
15677    }
15678
15679    pub fn scalar_tensor<S: Into<Scalar>>(s: S, options: (Kind, Device)) -> Tensor {
15680        Tensor::f_scalar_tensor(s, options).unwrap()
15681    }
15682
15683    pub fn scalar_tensor_out<S: Into<Scalar>>(out: &Tensor, s: S) -> Tensor {
15684        Tensor::f_scalar_tensor_out(out, s).unwrap()
15685    }
15686
15687    pub fn scaled_dot_product_attention<T: Borrow<Tensor>>(
15688        query: &Tensor,
15689        key: &Tensor,
15690        value: &Tensor,
15691        attn_mask: Option<T>,
15692        dropout_p: f64,
15693        is_causal: bool,
15694        scale: impl Into<Option<f64>>,
15695        enable_gqa: bool,
15696    ) -> Tensor {
15697        Tensor::f_scaled_dot_product_attention(
15698            query, key, value, attn_mask, dropout_p, is_causal, scale, enable_gqa,
15699        )
15700        .unwrap()
15701    }
15702
15703    pub fn scatter(&self, dim: i64, index: &Tensor, src: &Tensor) -> Tensor {
15704        self.f_scatter(dim, index, src).unwrap()
15705    }
15706
15707    pub fn scatter_(&mut self, dim: i64, index: &Tensor, src: &Tensor) -> Tensor {
15708        self.f_scatter_(dim, index, src).unwrap()
15709    }
15710
15711    pub fn scatter_add(&self, dim: i64, index: &Tensor, src: &Tensor) -> Tensor {
15712        self.f_scatter_add(dim, index, src).unwrap()
15713    }
15714
15715    pub fn scatter_add_(&mut self, dim: i64, index: &Tensor, src: &Tensor) -> Tensor {
15716        self.f_scatter_add_(dim, index, src).unwrap()
15717    }
15718
15719    pub fn scatter_add_out(&self, out: &Tensor, dim: i64, index: &Tensor, src: &Tensor) -> Tensor {
15720        self.f_scatter_add_out(out, dim, index, src).unwrap()
15721    }
15722
15723    pub fn scatter_reduce(&self, dim: i64, index: &Tensor, src: &Tensor, reduce: &str) -> Tensor {
15724        self.f_scatter_reduce(dim, index, src, reduce).unwrap()
15725    }
15726
15727    pub fn scatter_reduce_(
15728        &mut self,
15729        dim: i64,
15730        index: &Tensor,
15731        src: &Tensor,
15732        reduce: &str,
15733    ) -> Tensor {
15734        self.f_scatter_reduce_(dim, index, src, reduce).unwrap()
15735    }
15736
15737    pub fn scatter_reduce_out(
15738        &self,
15739        out: &Tensor,
15740        dim: i64,
15741        index: &Tensor,
15742        src: &Tensor,
15743        reduce: &str,
15744    ) -> Tensor {
15745        self.f_scatter_reduce_out(out, dim, index, src, reduce).unwrap()
15746    }
15747
15748    pub fn scatter_src_out(&self, out: &Tensor, dim: i64, index: &Tensor, src: &Tensor) -> Tensor {
15749        self.f_scatter_src_out(out, dim, index, src).unwrap()
15750    }
15751
15752    pub fn scatter_value<S: Into<Scalar>>(&self, dim: i64, index: &Tensor, value: S) -> Tensor {
15753        self.f_scatter_value(dim, index, value).unwrap()
15754    }
15755
15756    pub fn scatter_value_<S: Into<Scalar>>(
15757        &mut self,
15758        dim: i64,
15759        index: &Tensor,
15760        value: S,
15761    ) -> Tensor {
15762        self.f_scatter_value_(dim, index, value).unwrap()
15763    }
15764
15765    pub fn scatter_value_out<S: Into<Scalar>>(
15766        &self,
15767        out: &Tensor,
15768        dim: i64,
15769        index: &Tensor,
15770        value: S,
15771    ) -> Tensor {
15772        self.f_scatter_value_out(out, dim, index, value).unwrap()
15773    }
15774
15775    pub fn scatter_value_reduce<S: Into<Scalar>>(
15776        &self,
15777        dim: i64,
15778        index: &Tensor,
15779        value: S,
15780        reduce: &str,
15781    ) -> Tensor {
15782        self.f_scatter_value_reduce(dim, index, value, reduce).unwrap()
15783    }
15784
15785    pub fn scatter_value_reduce_<S: Into<Scalar>>(
15786        &mut self,
15787        dim: i64,
15788        index: &Tensor,
15789        value: S,
15790        reduce: &str,
15791    ) -> Tensor {
15792        self.f_scatter_value_reduce_(dim, index, value, reduce).unwrap()
15793    }
15794
15795    pub fn scatter_value_reduce_out<S: Into<Scalar>>(
15796        &self,
15797        out: &Tensor,
15798        dim: i64,
15799        index: &Tensor,
15800        value: S,
15801        reduce: &str,
15802    ) -> Tensor {
15803        self.f_scatter_value_reduce_out(out, dim, index, value, reduce).unwrap()
15804    }
15805
15806    pub fn searchsorted<T: Borrow<Tensor>>(
15807        &self,
15808        sorted_sequence: &Tensor,
15809        out_int32: bool,
15810        right: bool,
15811        side: &str,
15812        sorter: Option<T>,
15813    ) -> Tensor {
15814        self.f_searchsorted(sorted_sequence, out_int32, right, side, sorter).unwrap()
15815    }
15816
15817    pub fn searchsorted_scalar<T: Borrow<Tensor>, S: Into<Scalar>>(
15818        sorted_sequence: &Tensor,
15819        self_scalar: S,
15820        out_int32: bool,
15821        right: bool,
15822        side: &str,
15823        sorter: Option<T>,
15824    ) -> Tensor {
15825        Tensor::f_searchsorted_scalar(sorted_sequence, self_scalar, out_int32, right, side, sorter)
15826            .unwrap()
15827    }
15828
15829    pub fn searchsorted_scalar_out<T: Borrow<Tensor>, S: Into<Scalar>>(
15830        out: &Tensor,
15831        sorted_sequence: &Tensor,
15832        self_scalar: S,
15833        out_int32: bool,
15834        right: bool,
15835        side: &str,
15836        sorter: Option<T>,
15837    ) -> Tensor {
15838        Tensor::f_searchsorted_scalar_out(
15839            out,
15840            sorted_sequence,
15841            self_scalar,
15842            out_int32,
15843            right,
15844            side,
15845            sorter,
15846        )
15847        .unwrap()
15848    }
15849
15850    pub fn searchsorted_tensor_out<T: Borrow<Tensor>>(
15851        &self,
15852        out: &Tensor,
15853        sorted_sequence: &Tensor,
15854        out_int32: bool,
15855        right: bool,
15856        side: &str,
15857        sorter: Option<T>,
15858    ) -> Tensor {
15859        self.f_searchsorted_tensor_out(out, sorted_sequence, out_int32, right, side, sorter)
15860            .unwrap()
15861    }
15862
15863    pub fn segment_reduce<T: Borrow<Tensor>, S: Into<Scalar>>(
15864        data: &Tensor,
15865        reduce: &str,
15866        lengths: Option<T>,
15867        indices: Option<T>,
15868        offsets: Option<T>,
15869        axis: i64,
15870        unsafe_: bool,
15871        initial: S,
15872    ) -> Tensor {
15873        Tensor::f_segment_reduce(data, reduce, lengths, indices, offsets, axis, unsafe_, initial)
15874            .unwrap()
15875    }
15876
15877    pub fn segment_reduce_out<T: Borrow<Tensor>, S: Into<Scalar>>(
15878        out: &Tensor,
15879        data: &Tensor,
15880        reduce: &str,
15881        lengths: Option<T>,
15882        indices: Option<T>,
15883        offsets: Option<T>,
15884        axis: i64,
15885        unsafe_: bool,
15886        initial: S,
15887    ) -> Tensor {
15888        Tensor::f_segment_reduce_out(
15889            out, data, reduce, lengths, indices, offsets, axis, unsafe_, initial,
15890        )
15891        .unwrap()
15892    }
15893
15894    pub fn select(&self, dim: i64, index: i64) -> Tensor {
15895        self.f_select(dim, index).unwrap()
15896    }
15897
15898    pub fn select_backward(
15899        grad_output: &Tensor,
15900        input_sizes: impl IntList,
15901        dim: i64,
15902        index: i64,
15903    ) -> Tensor {
15904        Tensor::f_select_backward(grad_output, input_sizes, dim, index).unwrap()
15905    }
15906
15907    pub fn select_backward_out(
15908        out: &Tensor,
15909        grad_output: &Tensor,
15910        input_sizes: impl IntList,
15911        dim: i64,
15912        index: i64,
15913    ) -> Tensor {
15914        Tensor::f_select_backward_out(out, grad_output, input_sizes, dim, index).unwrap()
15915    }
15916
15917    pub fn select_copy(&self, dim: i64, index: i64) -> Tensor {
15918        self.f_select_copy(dim, index).unwrap()
15919    }
15920
15921    pub fn select_copy_int_out(&self, out: &Tensor, dim: i64, index: i64) -> Tensor {
15922        self.f_select_copy_int_out(out, dim, index).unwrap()
15923    }
15924
15925    pub fn select_scatter(&self, src: &Tensor, dim: i64, index: i64) -> Tensor {
15926        self.f_select_scatter(src, dim, index).unwrap()
15927    }
15928
15929    pub fn select_scatter_out(&self, out: &Tensor, src: &Tensor, dim: i64, index: i64) -> Tensor {
15930        self.f_select_scatter_out(out, src, dim, index).unwrap()
15931    }
15932
15933    pub fn selu(&self) -> Tensor {
15934        self.f_selu().unwrap()
15935    }
15936
15937    pub fn selu_(&mut self) -> Tensor {
15938        self.f_selu_().unwrap()
15939    }
15940
15941    pub fn set(&self) -> Tensor {
15942        self.f_set().unwrap()
15943    }
15944
15945    pub fn set_(&mut self) -> Tensor {
15946        self.f_set_().unwrap()
15947    }
15948
15949    pub fn set_data(&mut self, new_data: &Tensor) {
15950        self.f_set_data(new_data).unwrap()
15951    }
15952
15953    pub fn set_out(&self, out: &Tensor) -> Tensor {
15954        self.f_set_out(out).unwrap()
15955    }
15956
15957    pub fn set_requires_grad(&self, r: bool) -> Tensor {
15958        self.f_set_requires_grad(r).unwrap()
15959    }
15960
15961    pub fn set_source_tensor(&self, source: &Tensor) -> Tensor {
15962        self.f_set_source_tensor(source).unwrap()
15963    }
15964
15965    pub fn set_source_tensor_(&mut self, source: &Tensor) -> Tensor {
15966        self.f_set_source_tensor_(source).unwrap()
15967    }
15968
15969    pub fn set_source_tensor_out(&self, out: &Tensor, source: &Tensor) -> Tensor {
15970        self.f_set_source_tensor_out(out, source).unwrap()
15971    }
15972
15973    pub fn set_source_tensor_storage_offset_(
15974        &mut self,
15975        source: &Tensor,
15976        storage_offset: i64,
15977        size: impl IntList,
15978        stride: impl IntList,
15979    ) -> Tensor {
15980        self.f_set_source_tensor_storage_offset_(source, storage_offset, size, stride).unwrap()
15981    }
15982
15983    pub fn sgn(&self) -> Tensor {
15984        self.f_sgn().unwrap()
15985    }
15986
15987    pub fn sgn_(&mut self) -> Tensor {
15988        self.f_sgn_().unwrap()
15989    }
15990
15991    pub fn sgn_out(&self, out: &Tensor) -> Tensor {
15992        self.f_sgn_out(out).unwrap()
15993    }
15994
15995    pub fn sigmoid(&self) -> Tensor {
15996        self.f_sigmoid().unwrap()
15997    }
15998
15999    pub fn sigmoid_(&mut self) -> Tensor {
16000        self.f_sigmoid_().unwrap()
16001    }
16002
16003    pub fn sigmoid_backward(grad_output: &Tensor, output: &Tensor) -> Tensor {
16004        Tensor::f_sigmoid_backward(grad_output, output).unwrap()
16005    }
16006
16007    pub fn sigmoid_backward_grad_input(
16008        grad_input: &Tensor,
16009        grad_output: &Tensor,
16010        output: &Tensor,
16011    ) -> Tensor {
16012        Tensor::f_sigmoid_backward_grad_input(grad_input, grad_output, output).unwrap()
16013    }
16014
16015    pub fn sigmoid_out(&self, out: &Tensor) -> Tensor {
16016        self.f_sigmoid_out(out).unwrap()
16017    }
16018
16019    pub fn sign(&self) -> Tensor {
16020        self.f_sign().unwrap()
16021    }
16022
16023    pub fn sign_(&mut self) -> Tensor {
16024        self.f_sign_().unwrap()
16025    }
16026
16027    pub fn sign_out(&self, out: &Tensor) -> Tensor {
16028        self.f_sign_out(out).unwrap()
16029    }
16030
16031    pub fn signbit(&self) -> Tensor {
16032        self.f_signbit().unwrap()
16033    }
16034
16035    pub fn signbit_out(&self, out: &Tensor) -> Tensor {
16036        self.f_signbit_out(out).unwrap()
16037    }
16038
16039    pub fn silu(&self) -> Tensor {
16040        self.f_silu().unwrap()
16041    }
16042
16043    pub fn silu_(&mut self) -> Tensor {
16044        self.f_silu_().unwrap()
16045    }
16046
16047    pub fn silu_backward(&self, grad_output: &Tensor) -> Tensor {
16048        self.f_silu_backward(grad_output).unwrap()
16049    }
16050
16051    pub fn silu_backward_grad_input(&self, grad_input: &Tensor, grad_output: &Tensor) -> Tensor {
16052        self.f_silu_backward_grad_input(grad_input, grad_output).unwrap()
16053    }
16054
16055    pub fn silu_out(&self, out: &Tensor) -> Tensor {
16056        self.f_silu_out(out).unwrap()
16057    }
16058
16059    pub fn sin(&self) -> Tensor {
16060        self.f_sin().unwrap()
16061    }
16062
16063    pub fn sin_(&mut self) -> Tensor {
16064        self.f_sin_().unwrap()
16065    }
16066
16067    pub fn sin_out(&self, out: &Tensor) -> Tensor {
16068        self.f_sin_out(out).unwrap()
16069    }
16070
16071    pub fn sinc(&self) -> Tensor {
16072        self.f_sinc().unwrap()
16073    }
16074
16075    pub fn sinc_(&mut self) -> Tensor {
16076        self.f_sinc_().unwrap()
16077    }
16078
16079    pub fn sinc_out(&self, out: &Tensor) -> Tensor {
16080        self.f_sinc_out(out).unwrap()
16081    }
16082
16083    pub fn sinh(&self) -> Tensor {
16084        self.f_sinh().unwrap()
16085    }
16086
16087    pub fn sinh_(&mut self) -> Tensor {
16088        self.f_sinh_().unwrap()
16089    }
16090
16091    pub fn sinh_out(&self, out: &Tensor) -> Tensor {
16092        self.f_sinh_out(out).unwrap()
16093    }
16094
16095    pub fn slice(
16096        &self,
16097        dim: i64,
16098        start: impl Into<Option<i64>>,
16099        end: impl Into<Option<i64>>,
16100        step: i64,
16101    ) -> Tensor {
16102        self.f_slice(dim, start, end, step).unwrap()
16103    }
16104
16105    pub fn slice_backward(
16106        grad_output: &Tensor,
16107        input_sizes: impl IntList,
16108        dim: i64,
16109        start: i64,
16110        end: i64,
16111        step: i64,
16112    ) -> Tensor {
16113        Tensor::f_slice_backward(grad_output, input_sizes, dim, start, end, step).unwrap()
16114    }
16115
16116    pub fn slice_backward_out(
16117        out: &Tensor,
16118        grad_output: &Tensor,
16119        input_sizes: impl IntList,
16120        dim: i64,
16121        start: i64,
16122        end: i64,
16123        step: i64,
16124    ) -> Tensor {
16125        Tensor::f_slice_backward_out(out, grad_output, input_sizes, dim, start, end, step).unwrap()
16126    }
16127
16128    pub fn slice_copy(
16129        &self,
16130        dim: i64,
16131        start: impl Into<Option<i64>>,
16132        end: impl Into<Option<i64>>,
16133        step: i64,
16134    ) -> Tensor {
16135        self.f_slice_copy(dim, start, end, step).unwrap()
16136    }
16137
16138    pub fn slice_copy_tensor_out(
16139        &self,
16140        out: &Tensor,
16141        dim: i64,
16142        start: impl Into<Option<i64>>,
16143        end: impl Into<Option<i64>>,
16144        step: i64,
16145    ) -> Tensor {
16146        self.f_slice_copy_tensor_out(out, dim, start, end, step).unwrap()
16147    }
16148
16149    pub fn slice_inverse(
16150        &self,
16151        src: &Tensor,
16152        dim: i64,
16153        start: impl Into<Option<i64>>,
16154        end: impl Into<Option<i64>>,
16155        step: i64,
16156    ) -> Tensor {
16157        self.f_slice_inverse(src, dim, start, end, step).unwrap()
16158    }
16159
16160    pub fn slice_scatter(
16161        &self,
16162        src: &Tensor,
16163        dim: i64,
16164        start: impl Into<Option<i64>>,
16165        end: impl Into<Option<i64>>,
16166        step: i64,
16167    ) -> Tensor {
16168        self.f_slice_scatter(src, dim, start, end, step).unwrap()
16169    }
16170
16171    pub fn slice_scatter_out(
16172        &self,
16173        out: &Tensor,
16174        src: &Tensor,
16175        dim: i64,
16176        start: impl Into<Option<i64>>,
16177        end: impl Into<Option<i64>>,
16178        step: i64,
16179    ) -> Tensor {
16180        self.f_slice_scatter_out(out, src, dim, start, end, step).unwrap()
16181    }
16182
16183    pub fn slogdet(&self) -> (Tensor, Tensor) {
16184        self.f_slogdet().unwrap()
16185    }
16186
16187    pub fn slogdet_out(&self, sign: &Tensor, logabsdet: &Tensor) -> (Tensor, Tensor) {
16188        self.f_slogdet_out(sign, logabsdet).unwrap()
16189    }
16190
16191    pub fn slow_conv3d<T: Borrow<Tensor>>(
16192        &self,
16193        weight: &Tensor,
16194        kernel_size: impl IntList,
16195        bias: Option<T>,
16196        stride: impl IntList,
16197        padding: impl IntList,
16198    ) -> Tensor {
16199        self.f_slow_conv3d(weight, kernel_size, bias, stride, padding).unwrap()
16200    }
16201
16202    pub fn slow_conv3d_out<T: Borrow<Tensor>>(
16203        &self,
16204        out: &Tensor,
16205        weight: &Tensor,
16206        kernel_size: impl IntList,
16207        bias: Option<T>,
16208        stride: impl IntList,
16209        padding: impl IntList,
16210    ) -> Tensor {
16211        self.f_slow_conv3d_out(out, weight, kernel_size, bias, stride, padding).unwrap()
16212    }
16213
16214    pub fn slow_conv_dilated2d<T: Borrow<Tensor>>(
16215        &self,
16216        weight: &Tensor,
16217        kernel_size: impl IntList,
16218        bias: Option<T>,
16219        stride: impl IntList,
16220        padding: impl IntList,
16221        dilation: impl IntList,
16222    ) -> Tensor {
16223        self.f_slow_conv_dilated2d(weight, kernel_size, bias, stride, padding, dilation).unwrap()
16224    }
16225
16226    pub fn slow_conv_dilated2d_out<T: Borrow<Tensor>>(
16227        &self,
16228        out: &Tensor,
16229        weight: &Tensor,
16230        kernel_size: impl IntList,
16231        bias: Option<T>,
16232        stride: impl IntList,
16233        padding: impl IntList,
16234        dilation: impl IntList,
16235    ) -> Tensor {
16236        self.f_slow_conv_dilated2d_out(out, weight, kernel_size, bias, stride, padding, dilation)
16237            .unwrap()
16238    }
16239
16240    pub fn slow_conv_dilated3d<T: Borrow<Tensor>>(
16241        &self,
16242        weight: &Tensor,
16243        kernel_size: impl IntList,
16244        bias: Option<T>,
16245        stride: impl IntList,
16246        padding: impl IntList,
16247        dilation: impl IntList,
16248    ) -> Tensor {
16249        self.f_slow_conv_dilated3d(weight, kernel_size, bias, stride, padding, dilation).unwrap()
16250    }
16251
16252    pub fn slow_conv_dilated3d_out<T: Borrow<Tensor>>(
16253        &self,
16254        out: &Tensor,
16255        weight: &Tensor,
16256        kernel_size: impl IntList,
16257        bias: Option<T>,
16258        stride: impl IntList,
16259        padding: impl IntList,
16260        dilation: impl IntList,
16261    ) -> Tensor {
16262        self.f_slow_conv_dilated3d_out(out, weight, kernel_size, bias, stride, padding, dilation)
16263            .unwrap()
16264    }
16265
16266    pub fn slow_conv_transpose2d<T: Borrow<Tensor>>(
16267        &self,
16268        weight: &Tensor,
16269        kernel_size: impl IntList,
16270        bias: Option<T>,
16271        stride: impl IntList,
16272        padding: impl IntList,
16273        output_padding: impl IntList,
16274        dilation: impl IntList,
16275    ) -> Tensor {
16276        self.f_slow_conv_transpose2d(
16277            weight,
16278            kernel_size,
16279            bias,
16280            stride,
16281            padding,
16282            output_padding,
16283            dilation,
16284        )
16285        .unwrap()
16286    }
16287
16288    pub fn slow_conv_transpose2d_out<T: Borrow<Tensor>>(
16289        &self,
16290        out: &Tensor,
16291        weight: &Tensor,
16292        kernel_size: impl IntList,
16293        bias: Option<T>,
16294        stride: impl IntList,
16295        padding: impl IntList,
16296        output_padding: impl IntList,
16297        dilation: impl IntList,
16298    ) -> Tensor {
16299        self.f_slow_conv_transpose2d_out(
16300            out,
16301            weight,
16302            kernel_size,
16303            bias,
16304            stride,
16305            padding,
16306            output_padding,
16307            dilation,
16308        )
16309        .unwrap()
16310    }
16311
16312    pub fn slow_conv_transpose3d<T: Borrow<Tensor>>(
16313        &self,
16314        weight: &Tensor,
16315        kernel_size: impl IntList,
16316        bias: Option<T>,
16317        stride: impl IntList,
16318        padding: impl IntList,
16319        output_padding: impl IntList,
16320        dilation: impl IntList,
16321    ) -> Tensor {
16322        self.f_slow_conv_transpose3d(
16323            weight,
16324            kernel_size,
16325            bias,
16326            stride,
16327            padding,
16328            output_padding,
16329            dilation,
16330        )
16331        .unwrap()
16332    }
16333
16334    pub fn slow_conv_transpose3d_out<T: Borrow<Tensor>>(
16335        &self,
16336        out: &Tensor,
16337        weight: &Tensor,
16338        kernel_size: impl IntList,
16339        bias: Option<T>,
16340        stride: impl IntList,
16341        padding: impl IntList,
16342        output_padding: impl IntList,
16343        dilation: impl IntList,
16344    ) -> Tensor {
16345        self.f_slow_conv_transpose3d_out(
16346            out,
16347            weight,
16348            kernel_size,
16349            bias,
16350            stride,
16351            padding,
16352            output_padding,
16353            dilation,
16354        )
16355        .unwrap()
16356    }
16357
16358    pub fn smm(&self, mat2: &Tensor) -> Tensor {
16359        self.f_smm(mat2).unwrap()
16360    }
16361
16362    pub fn smooth_l1_loss(
16363        &self,
16364        target: &Tensor,
16365        reduction: crate::Reduction,
16366        beta: f64,
16367    ) -> Tensor {
16368        self.f_smooth_l1_loss(target, reduction, beta).unwrap()
16369    }
16370
16371    pub fn smooth_l1_loss_backward(
16372        &self,
16373        grad_output: &Tensor,
16374        target: &Tensor,
16375        reduction: crate::Reduction,
16376        beta: f64,
16377    ) -> Tensor {
16378        self.f_smooth_l1_loss_backward(grad_output, target, reduction, beta).unwrap()
16379    }
16380
16381    pub fn smooth_l1_loss_backward_grad_input(
16382        &self,
16383        grad_input: &Tensor,
16384        grad_output: &Tensor,
16385        target: &Tensor,
16386        reduction: crate::Reduction,
16387        beta: f64,
16388    ) -> Tensor {
16389        self.f_smooth_l1_loss_backward_grad_input(grad_input, grad_output, target, reduction, beta)
16390            .unwrap()
16391    }
16392
16393    pub fn smooth_l1_loss_out(
16394        &self,
16395        out: &Tensor,
16396        target: &Tensor,
16397        reduction: crate::Reduction,
16398        beta: f64,
16399    ) -> Tensor {
16400        self.f_smooth_l1_loss_out(out, target, reduction, beta).unwrap()
16401    }
16402
16403    pub fn soft_margin_loss(&self, target: &Tensor, reduction: crate::Reduction) -> Tensor {
16404        self.f_soft_margin_loss(target, reduction).unwrap()
16405    }
16406
16407    pub fn soft_margin_loss_backward(
16408        &self,
16409        grad_output: &Tensor,
16410        target: &Tensor,
16411        reduction: crate::Reduction,
16412    ) -> Tensor {
16413        self.f_soft_margin_loss_backward(grad_output, target, reduction).unwrap()
16414    }
16415
16416    pub fn soft_margin_loss_backward_grad_input(
16417        &self,
16418        grad_input: &Tensor,
16419        grad_output: &Tensor,
16420        target: &Tensor,
16421        reduction: crate::Reduction,
16422    ) -> Tensor {
16423        self.f_soft_margin_loss_backward_grad_input(grad_input, grad_output, target, reduction)
16424            .unwrap()
16425    }
16426
16427    pub fn soft_margin_loss_out(
16428        &self,
16429        out: &Tensor,
16430        target: &Tensor,
16431        reduction: crate::Reduction,
16432    ) -> Tensor {
16433        self.f_soft_margin_loss_out(out, target, reduction).unwrap()
16434    }
16435
16436    pub fn softmax(&self, dim: i64, dtype: impl Into<Option<Kind>>) -> Tensor {
16437        self.f_softmax(dim, dtype).unwrap()
16438    }
16439
16440    pub fn softmax_int_out(
16441        &self,
16442        out: &Tensor,
16443        dim: i64,
16444        dtype: impl Into<Option<Kind>>,
16445    ) -> Tensor {
16446        self.f_softmax_int_out(out, dim, dtype).unwrap()
16447    }
16448
16449    pub fn softplus(&self) -> Tensor {
16450        self.f_softplus().unwrap()
16451    }
16452
16453    pub fn softplus_backward<S: Into<Scalar>>(
16454        &self,
16455        grad_output: &Tensor,
16456        beta: S,
16457        threshold: S,
16458    ) -> Tensor {
16459        self.f_softplus_backward(grad_output, beta, threshold).unwrap()
16460    }
16461
16462    pub fn softplus_backward_grad_input<S: Into<Scalar>>(
16463        &self,
16464        grad_input: &Tensor,
16465        grad_output: &Tensor,
16466        beta: S,
16467        threshold: S,
16468    ) -> Tensor {
16469        self.f_softplus_backward_grad_input(grad_input, grad_output, beta, threshold).unwrap()
16470    }
16471
16472    pub fn softplus_out(&self, out: &Tensor) -> Tensor {
16473        self.f_softplus_out(out).unwrap()
16474    }
16475
16476    pub fn softshrink(&self) -> Tensor {
16477        self.f_softshrink().unwrap()
16478    }
16479
16480    pub fn softshrink_backward<S: Into<Scalar>>(&self, grad_output: &Tensor, lambd: S) -> Tensor {
16481        self.f_softshrink_backward(grad_output, lambd).unwrap()
16482    }
16483
16484    pub fn softshrink_backward_grad_input<S: Into<Scalar>>(
16485        &self,
16486        grad_input: &Tensor,
16487        grad_output: &Tensor,
16488        lambd: S,
16489    ) -> Tensor {
16490        self.f_softshrink_backward_grad_input(grad_input, grad_output, lambd).unwrap()
16491    }
16492
16493    pub fn softshrink_out(&self, out: &Tensor) -> Tensor {
16494        self.f_softshrink_out(out).unwrap()
16495    }
16496
16497    pub fn sort(&self, dim: i64, descending: bool) -> (Tensor, Tensor) {
16498        self.f_sort(dim, descending).unwrap()
16499    }
16500
16501    pub fn sort_stable(&self, stable: bool, dim: i64, descending: bool) -> (Tensor, Tensor) {
16502        self.f_sort_stable(stable, dim, descending).unwrap()
16503    }
16504
16505    pub fn sort_values(
16506        &self,
16507        values: &Tensor,
16508        indices: &Tensor,
16509        dim: i64,
16510        descending: bool,
16511    ) -> (Tensor, Tensor) {
16512        self.f_sort_values(values, indices, dim, descending).unwrap()
16513    }
16514
16515    pub fn sort_values_stable(
16516        &self,
16517        values: &Tensor,
16518        indices: &Tensor,
16519        stable: bool,
16520        dim: i64,
16521        descending: bool,
16522    ) -> (Tensor, Tensor) {
16523        self.f_sort_values_stable(values, indices, stable, dim, descending).unwrap()
16524    }
16525
16526    pub fn sparse_bsc_tensor(
16527        ccol_indices: &Tensor,
16528        row_indices: &Tensor,
16529        values: &Tensor,
16530        options: (Kind, Device),
16531    ) -> Tensor {
16532        Tensor::f_sparse_bsc_tensor(ccol_indices, row_indices, values, options).unwrap()
16533    }
16534
16535    pub fn sparse_bsc_tensor_ccol_row_value_size(
16536        ccol_indices: &Tensor,
16537        row_indices: &Tensor,
16538        values: &Tensor,
16539        size: impl IntList,
16540        options: (Kind, Device),
16541    ) -> Tensor {
16542        Tensor::f_sparse_bsc_tensor_ccol_row_value_size(
16543            ccol_indices,
16544            row_indices,
16545            values,
16546            size,
16547            options,
16548        )
16549        .unwrap()
16550    }
16551
16552    pub fn sparse_bsr_tensor(
16553        crow_indices: &Tensor,
16554        col_indices: &Tensor,
16555        values: &Tensor,
16556        options: (Kind, Device),
16557    ) -> Tensor {
16558        Tensor::f_sparse_bsr_tensor(crow_indices, col_indices, values, options).unwrap()
16559    }
16560
16561    pub fn sparse_bsr_tensor_crow_col_value_size(
16562        crow_indices: &Tensor,
16563        col_indices: &Tensor,
16564        values: &Tensor,
16565        size: impl IntList,
16566        options: (Kind, Device),
16567    ) -> Tensor {
16568        Tensor::f_sparse_bsr_tensor_crow_col_value_size(
16569            crow_indices,
16570            col_indices,
16571            values,
16572            size,
16573            options,
16574        )
16575        .unwrap()
16576    }
16577
16578    pub fn sparse_compressed_tensor(
16579        compressed_indices: &Tensor,
16580        plain_indices: &Tensor,
16581        values: &Tensor,
16582        options: (Kind, Device),
16583    ) -> Tensor {
16584        Tensor::f_sparse_compressed_tensor(compressed_indices, plain_indices, values, options)
16585            .unwrap()
16586    }
16587
16588    pub fn sparse_compressed_tensor_comp_plain_value_size(
16589        compressed_indices: &Tensor,
16590        plain_indices: &Tensor,
16591        values: &Tensor,
16592        size: impl IntList,
16593        options: (Kind, Device),
16594    ) -> Tensor {
16595        Tensor::f_sparse_compressed_tensor_comp_plain_value_size(
16596            compressed_indices,
16597            plain_indices,
16598            values,
16599            size,
16600            options,
16601        )
16602        .unwrap()
16603    }
16604
16605    pub fn sparse_coo_tensor(size: impl IntList, options: (Kind, Device)) -> Tensor {
16606        Tensor::f_sparse_coo_tensor(size, options).unwrap()
16607    }
16608
16609    pub fn sparse_coo_tensor_indices(
16610        indices: &Tensor,
16611        values: &Tensor,
16612        options: (Kind, Device),
16613        is_coalesced: bool,
16614    ) -> Tensor {
16615        Tensor::f_sparse_coo_tensor_indices(indices, values, options, is_coalesced).unwrap()
16616    }
16617
16618    pub fn sparse_coo_tensor_indices_size(
16619        indices: &Tensor,
16620        values: &Tensor,
16621        size: impl IntList,
16622        options: (Kind, Device),
16623        is_coalesced: bool,
16624    ) -> Tensor {
16625        Tensor::f_sparse_coo_tensor_indices_size(indices, values, size, options, is_coalesced)
16626            .unwrap()
16627    }
16628
16629    pub fn sparse_coo_tensor_size_out(out: &Tensor, size: impl IntList) -> Tensor {
16630        Tensor::f_sparse_coo_tensor_size_out(out, size).unwrap()
16631    }
16632
16633    pub fn sparse_csc_tensor(
16634        ccol_indices: &Tensor,
16635        row_indices: &Tensor,
16636        values: &Tensor,
16637        options: (Kind, Device),
16638    ) -> Tensor {
16639        Tensor::f_sparse_csc_tensor(ccol_indices, row_indices, values, options).unwrap()
16640    }
16641
16642    pub fn sparse_csc_tensor_ccol_row_value_size(
16643        ccol_indices: &Tensor,
16644        row_indices: &Tensor,
16645        values: &Tensor,
16646        size: impl IntList,
16647        options: (Kind, Device),
16648    ) -> Tensor {
16649        Tensor::f_sparse_csc_tensor_ccol_row_value_size(
16650            ccol_indices,
16651            row_indices,
16652            values,
16653            size,
16654            options,
16655        )
16656        .unwrap()
16657    }
16658
16659    pub fn sparse_csr_tensor(
16660        crow_indices: &Tensor,
16661        col_indices: &Tensor,
16662        values: &Tensor,
16663        options: (Kind, Device),
16664    ) -> Tensor {
16665        Tensor::f_sparse_csr_tensor(crow_indices, col_indices, values, options).unwrap()
16666    }
16667
16668    pub fn sparse_csr_tensor_crow_col_value_size(
16669        crow_indices: &Tensor,
16670        col_indices: &Tensor,
16671        values: &Tensor,
16672        size: impl IntList,
16673        options: (Kind, Device),
16674    ) -> Tensor {
16675        Tensor::f_sparse_csr_tensor_crow_col_value_size(
16676            crow_indices,
16677            col_indices,
16678            values,
16679            size,
16680            options,
16681        )
16682        .unwrap()
16683    }
16684
16685    pub fn sparse_dim(&self) -> i64 {
16686        self.f_sparse_dim().unwrap()
16687    }
16688
16689    pub fn sparse_mask(&self, mask: &Tensor) -> Tensor {
16690        self.f_sparse_mask(mask).unwrap()
16691    }
16692
16693    pub fn sparse_mask_out(&self, out: &Tensor, mask: &Tensor) -> Tensor {
16694        self.f_sparse_mask_out(out, mask).unwrap()
16695    }
16696
16697    pub fn sparse_resize(&self, size: impl IntList, sparse_dim: i64, dense_dim: i64) -> Tensor {
16698        self.f_sparse_resize(size, sparse_dim, dense_dim).unwrap()
16699    }
16700
16701    pub fn sparse_resize_(
16702        &mut self,
16703        size: impl IntList,
16704        sparse_dim: i64,
16705        dense_dim: i64,
16706    ) -> Tensor {
16707        self.f_sparse_resize_(size, sparse_dim, dense_dim).unwrap()
16708    }
16709
16710    pub fn sparse_resize_and_clear(
16711        &self,
16712        size: impl IntList,
16713        sparse_dim: i64,
16714        dense_dim: i64,
16715    ) -> Tensor {
16716        self.f_sparse_resize_and_clear(size, sparse_dim, dense_dim).unwrap()
16717    }
16718
16719    pub fn sparse_resize_and_clear_(
16720        &mut self,
16721        size: impl IntList,
16722        sparse_dim: i64,
16723        dense_dim: i64,
16724    ) -> Tensor {
16725        self.f_sparse_resize_and_clear_(size, sparse_dim, dense_dim).unwrap()
16726    }
16727
16728    pub fn sparse_resize_and_clear_out(
16729        &self,
16730        out: &Tensor,
16731        size: impl IntList,
16732        sparse_dim: i64,
16733        dense_dim: i64,
16734    ) -> Tensor {
16735        self.f_sparse_resize_and_clear_out(out, size, sparse_dim, dense_dim).unwrap()
16736    }
16737
16738    pub fn sparse_resize_out(
16739        &self,
16740        out: &Tensor,
16741        size: impl IntList,
16742        sparse_dim: i64,
16743        dense_dim: i64,
16744    ) -> Tensor {
16745        self.f_sparse_resize_out(out, size, sparse_dim, dense_dim).unwrap()
16746    }
16747
16748    pub fn sparse_sampled_addmm(&self, mat1: &Tensor, mat2: &Tensor) -> Tensor {
16749        self.f_sparse_sampled_addmm(mat1, mat2).unwrap()
16750    }
16751
16752    pub fn sparse_sampled_addmm_out(&self, out: &Tensor, mat1: &Tensor, mat2: &Tensor) -> Tensor {
16753        self.f_sparse_sampled_addmm_out(out, mat1, mat2).unwrap()
16754    }
16755
16756    pub fn special_airy_ai(x: &Tensor) -> Tensor {
16757        Tensor::f_special_airy_ai(x).unwrap()
16758    }
16759
16760    pub fn special_airy_ai_out(out: &Tensor, x: &Tensor) -> Tensor {
16761        Tensor::f_special_airy_ai_out(out, x).unwrap()
16762    }
16763
16764    pub fn special_bessel_j0(&self) -> Tensor {
16765        self.f_special_bessel_j0().unwrap()
16766    }
16767
16768    pub fn special_bessel_j0_out(&self, out: &Tensor) -> Tensor {
16769        self.f_special_bessel_j0_out(out).unwrap()
16770    }
16771
16772    pub fn special_bessel_j1(&self) -> Tensor {
16773        self.f_special_bessel_j1().unwrap()
16774    }
16775
16776    pub fn special_bessel_j1_out(&self, out: &Tensor) -> Tensor {
16777        self.f_special_bessel_j1_out(out).unwrap()
16778    }
16779
16780    pub fn special_bessel_y0(&self) -> Tensor {
16781        self.f_special_bessel_y0().unwrap()
16782    }
16783
16784    pub fn special_bessel_y0_out(&self, out: &Tensor) -> Tensor {
16785        self.f_special_bessel_y0_out(out).unwrap()
16786    }
16787
16788    pub fn special_bessel_y1(&self) -> Tensor {
16789        self.f_special_bessel_y1().unwrap()
16790    }
16791
16792    pub fn special_bessel_y1_out(&self, out: &Tensor) -> Tensor {
16793        self.f_special_bessel_y1_out(out).unwrap()
16794    }
16795
16796    pub fn special_chebyshev_polynomial_t(x: &Tensor, n: &Tensor) -> Tensor {
16797        Tensor::f_special_chebyshev_polynomial_t(x, n).unwrap()
16798    }
16799
16800    pub fn special_chebyshev_polynomial_t_n_scalar<S: Into<Scalar>>(x: &Tensor, n: S) -> Tensor {
16801        Tensor::f_special_chebyshev_polynomial_t_n_scalar(x, n).unwrap()
16802    }
16803
16804    pub fn special_chebyshev_polynomial_t_n_scalar_out<S: Into<Scalar>>(
16805        out: &Tensor,
16806        x: &Tensor,
16807        n: S,
16808    ) -> Tensor {
16809        Tensor::f_special_chebyshev_polynomial_t_n_scalar_out(out, x, n).unwrap()
16810    }
16811
16812    pub fn special_chebyshev_polynomial_t_out(out: &Tensor, x: &Tensor, n: &Tensor) -> Tensor {
16813        Tensor::f_special_chebyshev_polynomial_t_out(out, x, n).unwrap()
16814    }
16815
16816    pub fn special_chebyshev_polynomial_t_x_scalar<S: Into<Scalar>>(x: S, n: &Tensor) -> Tensor {
16817        Tensor::f_special_chebyshev_polynomial_t_x_scalar(x, n).unwrap()
16818    }
16819
16820    pub fn special_chebyshev_polynomial_t_x_scalar_out<S: Into<Scalar>>(
16821        out: &Tensor,
16822        x: S,
16823        n: &Tensor,
16824    ) -> Tensor {
16825        Tensor::f_special_chebyshev_polynomial_t_x_scalar_out(out, x, n).unwrap()
16826    }
16827
16828    pub fn special_chebyshev_polynomial_u(x: &Tensor, n: &Tensor) -> Tensor {
16829        Tensor::f_special_chebyshev_polynomial_u(x, n).unwrap()
16830    }
16831
16832    pub fn special_chebyshev_polynomial_u_n_scalar<S: Into<Scalar>>(x: &Tensor, n: S) -> Tensor {
16833        Tensor::f_special_chebyshev_polynomial_u_n_scalar(x, n).unwrap()
16834    }
16835
16836    pub fn special_chebyshev_polynomial_u_n_scalar_out<S: Into<Scalar>>(
16837        out: &Tensor,
16838        x: &Tensor,
16839        n: S,
16840    ) -> Tensor {
16841        Tensor::f_special_chebyshev_polynomial_u_n_scalar_out(out, x, n).unwrap()
16842    }
16843
16844    pub fn special_chebyshev_polynomial_u_out(out: &Tensor, x: &Tensor, n: &Tensor) -> Tensor {
16845        Tensor::f_special_chebyshev_polynomial_u_out(out, x, n).unwrap()
16846    }
16847
16848    pub fn special_chebyshev_polynomial_u_x_scalar<S: Into<Scalar>>(x: S, n: &Tensor) -> Tensor {
16849        Tensor::f_special_chebyshev_polynomial_u_x_scalar(x, n).unwrap()
16850    }
16851
16852    pub fn special_chebyshev_polynomial_u_x_scalar_out<S: Into<Scalar>>(
16853        out: &Tensor,
16854        x: S,
16855        n: &Tensor,
16856    ) -> Tensor {
16857        Tensor::f_special_chebyshev_polynomial_u_x_scalar_out(out, x, n).unwrap()
16858    }
16859
16860    pub fn special_chebyshev_polynomial_v(x: &Tensor, n: &Tensor) -> Tensor {
16861        Tensor::f_special_chebyshev_polynomial_v(x, n).unwrap()
16862    }
16863
16864    pub fn special_chebyshev_polynomial_v_n_scalar<S: Into<Scalar>>(x: &Tensor, n: S) -> Tensor {
16865        Tensor::f_special_chebyshev_polynomial_v_n_scalar(x, n).unwrap()
16866    }
16867
16868    pub fn special_chebyshev_polynomial_v_n_scalar_out<S: Into<Scalar>>(
16869        out: &Tensor,
16870        x: &Tensor,
16871        n: S,
16872    ) -> Tensor {
16873        Tensor::f_special_chebyshev_polynomial_v_n_scalar_out(out, x, n).unwrap()
16874    }
16875
16876    pub fn special_chebyshev_polynomial_v_out(out: &Tensor, x: &Tensor, n: &Tensor) -> Tensor {
16877        Tensor::f_special_chebyshev_polynomial_v_out(out, x, n).unwrap()
16878    }
16879
16880    pub fn special_chebyshev_polynomial_v_x_scalar<S: Into<Scalar>>(x: S, n: &Tensor) -> Tensor {
16881        Tensor::f_special_chebyshev_polynomial_v_x_scalar(x, n).unwrap()
16882    }
16883
16884    pub fn special_chebyshev_polynomial_v_x_scalar_out<S: Into<Scalar>>(
16885        out: &Tensor,
16886        x: S,
16887        n: &Tensor,
16888    ) -> Tensor {
16889        Tensor::f_special_chebyshev_polynomial_v_x_scalar_out(out, x, n).unwrap()
16890    }
16891
16892    pub fn special_chebyshev_polynomial_w(x: &Tensor, n: &Tensor) -> Tensor {
16893        Tensor::f_special_chebyshev_polynomial_w(x, n).unwrap()
16894    }
16895
16896    pub fn special_chebyshev_polynomial_w_n_scalar<S: Into<Scalar>>(x: &Tensor, n: S) -> Tensor {
16897        Tensor::f_special_chebyshev_polynomial_w_n_scalar(x, n).unwrap()
16898    }
16899
16900    pub fn special_chebyshev_polynomial_w_n_scalar_out<S: Into<Scalar>>(
16901        out: &Tensor,
16902        x: &Tensor,
16903        n: S,
16904    ) -> Tensor {
16905        Tensor::f_special_chebyshev_polynomial_w_n_scalar_out(out, x, n).unwrap()
16906    }
16907
16908    pub fn special_chebyshev_polynomial_w_out(out: &Tensor, x: &Tensor, n: &Tensor) -> Tensor {
16909        Tensor::f_special_chebyshev_polynomial_w_out(out, x, n).unwrap()
16910    }
16911
16912    pub fn special_chebyshev_polynomial_w_x_scalar<S: Into<Scalar>>(x: S, n: &Tensor) -> Tensor {
16913        Tensor::f_special_chebyshev_polynomial_w_x_scalar(x, n).unwrap()
16914    }
16915
16916    pub fn special_chebyshev_polynomial_w_x_scalar_out<S: Into<Scalar>>(
16917        out: &Tensor,
16918        x: S,
16919        n: &Tensor,
16920    ) -> Tensor {
16921        Tensor::f_special_chebyshev_polynomial_w_x_scalar_out(out, x, n).unwrap()
16922    }
16923
16924    pub fn special_digamma(&self) -> Tensor {
16925        self.f_special_digamma().unwrap()
16926    }
16927
16928    pub fn special_digamma_out(&self, out: &Tensor) -> Tensor {
16929        self.f_special_digamma_out(out).unwrap()
16930    }
16931
16932    pub fn special_entr(&self) -> Tensor {
16933        self.f_special_entr().unwrap()
16934    }
16935
16936    pub fn special_entr_out(&self, out: &Tensor) -> Tensor {
16937        self.f_special_entr_out(out).unwrap()
16938    }
16939
16940    pub fn special_erf(&self) -> Tensor {
16941        self.f_special_erf().unwrap()
16942    }
16943
16944    pub fn special_erf_out(&self, out: &Tensor) -> Tensor {
16945        self.f_special_erf_out(out).unwrap()
16946    }
16947
16948    pub fn special_erfc(&self) -> Tensor {
16949        self.f_special_erfc().unwrap()
16950    }
16951
16952    pub fn special_erfc_out(&self, out: &Tensor) -> Tensor {
16953        self.f_special_erfc_out(out).unwrap()
16954    }
16955
16956    pub fn special_erfcx(&self) -> Tensor {
16957        self.f_special_erfcx().unwrap()
16958    }
16959
16960    pub fn special_erfcx_out(&self, out: &Tensor) -> Tensor {
16961        self.f_special_erfcx_out(out).unwrap()
16962    }
16963
16964    pub fn special_erfinv(&self) -> Tensor {
16965        self.f_special_erfinv().unwrap()
16966    }
16967
16968    pub fn special_erfinv_out(&self, out: &Tensor) -> Tensor {
16969        self.f_special_erfinv_out(out).unwrap()
16970    }
16971
16972    pub fn special_exp2(&self) -> Tensor {
16973        self.f_special_exp2().unwrap()
16974    }
16975
16976    pub fn special_exp2_out(&self, out: &Tensor) -> Tensor {
16977        self.f_special_exp2_out(out).unwrap()
16978    }
16979
16980    pub fn special_expit(&self) -> Tensor {
16981        self.f_special_expit().unwrap()
16982    }
16983
16984    pub fn special_expit_out(&self, out: &Tensor) -> Tensor {
16985        self.f_special_expit_out(out).unwrap()
16986    }
16987
16988    pub fn special_expm1(&self) -> Tensor {
16989        self.f_special_expm1().unwrap()
16990    }
16991
16992    pub fn special_expm1_out(&self, out: &Tensor) -> Tensor {
16993        self.f_special_expm1_out(out).unwrap()
16994    }
16995
16996    pub fn special_gammainc(&self, other: &Tensor) -> Tensor {
16997        self.f_special_gammainc(other).unwrap()
16998    }
16999
17000    pub fn special_gammainc_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
17001        self.f_special_gammainc_out(out, other).unwrap()
17002    }
17003
17004    pub fn special_gammaincc(&self, other: &Tensor) -> Tensor {
17005        self.f_special_gammaincc(other).unwrap()
17006    }
17007
17008    pub fn special_gammaincc_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
17009        self.f_special_gammaincc_out(out, other).unwrap()
17010    }
17011
17012    pub fn special_gammaln(&self) -> Tensor {
17013        self.f_special_gammaln().unwrap()
17014    }
17015
17016    pub fn special_gammaln_out(&self, out: &Tensor) -> Tensor {
17017        self.f_special_gammaln_out(out).unwrap()
17018    }
17019
17020    pub fn special_hermite_polynomial_h(x: &Tensor, n: &Tensor) -> Tensor {
17021        Tensor::f_special_hermite_polynomial_h(x, n).unwrap()
17022    }
17023
17024    pub fn special_hermite_polynomial_h_n_scalar<S: Into<Scalar>>(x: &Tensor, n: S) -> Tensor {
17025        Tensor::f_special_hermite_polynomial_h_n_scalar(x, n).unwrap()
17026    }
17027
17028    pub fn special_hermite_polynomial_h_n_scalar_out<S: Into<Scalar>>(
17029        out: &Tensor,
17030        x: &Tensor,
17031        n: S,
17032    ) -> Tensor {
17033        Tensor::f_special_hermite_polynomial_h_n_scalar_out(out, x, n).unwrap()
17034    }
17035
17036    pub fn special_hermite_polynomial_h_out(out: &Tensor, x: &Tensor, n: &Tensor) -> Tensor {
17037        Tensor::f_special_hermite_polynomial_h_out(out, x, n).unwrap()
17038    }
17039
17040    pub fn special_hermite_polynomial_h_x_scalar<S: Into<Scalar>>(x: S, n: &Tensor) -> Tensor {
17041        Tensor::f_special_hermite_polynomial_h_x_scalar(x, n).unwrap()
17042    }
17043
17044    pub fn special_hermite_polynomial_h_x_scalar_out<S: Into<Scalar>>(
17045        out: &Tensor,
17046        x: S,
17047        n: &Tensor,
17048    ) -> Tensor {
17049        Tensor::f_special_hermite_polynomial_h_x_scalar_out(out, x, n).unwrap()
17050    }
17051
17052    pub fn special_hermite_polynomial_he(x: &Tensor, n: &Tensor) -> Tensor {
17053        Tensor::f_special_hermite_polynomial_he(x, n).unwrap()
17054    }
17055
17056    pub fn special_hermite_polynomial_he_n_scalar<S: Into<Scalar>>(x: &Tensor, n: S) -> Tensor {
17057        Tensor::f_special_hermite_polynomial_he_n_scalar(x, n).unwrap()
17058    }
17059
17060    pub fn special_hermite_polynomial_he_n_scalar_out<S: Into<Scalar>>(
17061        out: &Tensor,
17062        x: &Tensor,
17063        n: S,
17064    ) -> Tensor {
17065        Tensor::f_special_hermite_polynomial_he_n_scalar_out(out, x, n).unwrap()
17066    }
17067
17068    pub fn special_hermite_polynomial_he_out(out: &Tensor, x: &Tensor, n: &Tensor) -> Tensor {
17069        Tensor::f_special_hermite_polynomial_he_out(out, x, n).unwrap()
17070    }
17071
17072    pub fn special_hermite_polynomial_he_x_scalar<S: Into<Scalar>>(x: S, n: &Tensor) -> Tensor {
17073        Tensor::f_special_hermite_polynomial_he_x_scalar(x, n).unwrap()
17074    }
17075
17076    pub fn special_hermite_polynomial_he_x_scalar_out<S: Into<Scalar>>(
17077        out: &Tensor,
17078        x: S,
17079        n: &Tensor,
17080    ) -> Tensor {
17081        Tensor::f_special_hermite_polynomial_he_x_scalar_out(out, x, n).unwrap()
17082    }
17083
17084    pub fn special_i0(&self) -> Tensor {
17085        self.f_special_i0().unwrap()
17086    }
17087
17088    pub fn special_i0_out(&self, out: &Tensor) -> Tensor {
17089        self.f_special_i0_out(out).unwrap()
17090    }
17091
17092    pub fn special_i0e(&self) -> Tensor {
17093        self.f_special_i0e().unwrap()
17094    }
17095
17096    pub fn special_i0e_out(&self, out: &Tensor) -> Tensor {
17097        self.f_special_i0e_out(out).unwrap()
17098    }
17099
17100    pub fn special_i1(&self) -> Tensor {
17101        self.f_special_i1().unwrap()
17102    }
17103
17104    pub fn special_i1_out(&self, out: &Tensor) -> Tensor {
17105        self.f_special_i1_out(out).unwrap()
17106    }
17107
17108    pub fn special_i1e(&self) -> Tensor {
17109        self.f_special_i1e().unwrap()
17110    }
17111
17112    pub fn special_i1e_out(&self, out: &Tensor) -> Tensor {
17113        self.f_special_i1e_out(out).unwrap()
17114    }
17115
17116    pub fn special_laguerre_polynomial_l(x: &Tensor, n: &Tensor) -> Tensor {
17117        Tensor::f_special_laguerre_polynomial_l(x, n).unwrap()
17118    }
17119
17120    pub fn special_laguerre_polynomial_l_n_scalar<S: Into<Scalar>>(x: &Tensor, n: S) -> Tensor {
17121        Tensor::f_special_laguerre_polynomial_l_n_scalar(x, n).unwrap()
17122    }
17123
17124    pub fn special_laguerre_polynomial_l_n_scalar_out<S: Into<Scalar>>(
17125        out: &Tensor,
17126        x: &Tensor,
17127        n: S,
17128    ) -> Tensor {
17129        Tensor::f_special_laguerre_polynomial_l_n_scalar_out(out, x, n).unwrap()
17130    }
17131
17132    pub fn special_laguerre_polynomial_l_out(out: &Tensor, x: &Tensor, n: &Tensor) -> Tensor {
17133        Tensor::f_special_laguerre_polynomial_l_out(out, x, n).unwrap()
17134    }
17135
17136    pub fn special_laguerre_polynomial_l_x_scalar<S: Into<Scalar>>(x: S, n: &Tensor) -> Tensor {
17137        Tensor::f_special_laguerre_polynomial_l_x_scalar(x, n).unwrap()
17138    }
17139
17140    pub fn special_laguerre_polynomial_l_x_scalar_out<S: Into<Scalar>>(
17141        out: &Tensor,
17142        x: S,
17143        n: &Tensor,
17144    ) -> Tensor {
17145        Tensor::f_special_laguerre_polynomial_l_x_scalar_out(out, x, n).unwrap()
17146    }
17147
17148    pub fn special_legendre_polynomial_p(x: &Tensor, n: &Tensor) -> Tensor {
17149        Tensor::f_special_legendre_polynomial_p(x, n).unwrap()
17150    }
17151
17152    pub fn special_legendre_polynomial_p_n_scalar<S: Into<Scalar>>(x: &Tensor, n: S) -> Tensor {
17153        Tensor::f_special_legendre_polynomial_p_n_scalar(x, n).unwrap()
17154    }
17155
17156    pub fn special_legendre_polynomial_p_n_scalar_out<S: Into<Scalar>>(
17157        out: &Tensor,
17158        x: &Tensor,
17159        n: S,
17160    ) -> Tensor {
17161        Tensor::f_special_legendre_polynomial_p_n_scalar_out(out, x, n).unwrap()
17162    }
17163
17164    pub fn special_legendre_polynomial_p_out(out: &Tensor, x: &Tensor, n: &Tensor) -> Tensor {
17165        Tensor::f_special_legendre_polynomial_p_out(out, x, n).unwrap()
17166    }
17167
17168    pub fn special_legendre_polynomial_p_x_scalar<S: Into<Scalar>>(x: S, n: &Tensor) -> Tensor {
17169        Tensor::f_special_legendre_polynomial_p_x_scalar(x, n).unwrap()
17170    }
17171
17172    pub fn special_legendre_polynomial_p_x_scalar_out<S: Into<Scalar>>(
17173        out: &Tensor,
17174        x: S,
17175        n: &Tensor,
17176    ) -> Tensor {
17177        Tensor::f_special_legendre_polynomial_p_x_scalar_out(out, x, n).unwrap()
17178    }
17179
17180    pub fn special_log1p(&self) -> Tensor {
17181        self.f_special_log1p().unwrap()
17182    }
17183
17184    pub fn special_log1p_out(&self, out: &Tensor) -> Tensor {
17185        self.f_special_log1p_out(out).unwrap()
17186    }
17187
17188    pub fn special_log_ndtr(&self) -> Tensor {
17189        self.f_special_log_ndtr().unwrap()
17190    }
17191
17192    pub fn special_log_ndtr_out(&self, out: &Tensor) -> Tensor {
17193        self.f_special_log_ndtr_out(out).unwrap()
17194    }
17195
17196    pub fn special_log_softmax(&self, dim: i64, dtype: impl Into<Option<Kind>>) -> Tensor {
17197        self.f_special_log_softmax(dim, dtype).unwrap()
17198    }
17199
17200    pub fn special_logit(&self, eps: impl Into<Option<f64>>) -> Tensor {
17201        self.f_special_logit(eps).unwrap()
17202    }
17203
17204    pub fn special_logit_out(&self, out: &Tensor, eps: impl Into<Option<f64>>) -> Tensor {
17205        self.f_special_logit_out(out, eps).unwrap()
17206    }
17207
17208    pub fn special_logsumexp(&self, dim: impl IntList, keepdim: bool) -> Tensor {
17209        self.f_special_logsumexp(dim, keepdim).unwrap()
17210    }
17211
17212    pub fn special_logsumexp_out(&self, out: &Tensor, dim: impl IntList, keepdim: bool) -> Tensor {
17213        self.f_special_logsumexp_out(out, dim, keepdim).unwrap()
17214    }
17215
17216    pub fn special_modified_bessel_i0(&self) -> Tensor {
17217        self.f_special_modified_bessel_i0().unwrap()
17218    }
17219
17220    pub fn special_modified_bessel_i0_out(&self, out: &Tensor) -> Tensor {
17221        self.f_special_modified_bessel_i0_out(out).unwrap()
17222    }
17223
17224    pub fn special_modified_bessel_i1(&self) -> Tensor {
17225        self.f_special_modified_bessel_i1().unwrap()
17226    }
17227
17228    pub fn special_modified_bessel_i1_out(&self, out: &Tensor) -> Tensor {
17229        self.f_special_modified_bessel_i1_out(out).unwrap()
17230    }
17231
17232    pub fn special_modified_bessel_k0(&self) -> Tensor {
17233        self.f_special_modified_bessel_k0().unwrap()
17234    }
17235
17236    pub fn special_modified_bessel_k0_out(&self, out: &Tensor) -> Tensor {
17237        self.f_special_modified_bessel_k0_out(out).unwrap()
17238    }
17239
17240    pub fn special_modified_bessel_k1(&self) -> Tensor {
17241        self.f_special_modified_bessel_k1().unwrap()
17242    }
17243
17244    pub fn special_modified_bessel_k1_out(&self, out: &Tensor) -> Tensor {
17245        self.f_special_modified_bessel_k1_out(out).unwrap()
17246    }
17247
17248    pub fn special_multigammaln(&self, p: i64) -> Tensor {
17249        self.f_special_multigammaln(p).unwrap()
17250    }
17251
17252    pub fn special_multigammaln_out(&self, out: &Tensor, p: i64) -> Tensor {
17253        self.f_special_multigammaln_out(out, p).unwrap()
17254    }
17255
17256    pub fn special_ndtr(&self) -> Tensor {
17257        self.f_special_ndtr().unwrap()
17258    }
17259
17260    pub fn special_ndtr_out(&self, out: &Tensor) -> Tensor {
17261        self.f_special_ndtr_out(out).unwrap()
17262    }
17263
17264    pub fn special_ndtri(&self) -> Tensor {
17265        self.f_special_ndtri().unwrap()
17266    }
17267
17268    pub fn special_ndtri_out(&self, out: &Tensor) -> Tensor {
17269        self.f_special_ndtri_out(out).unwrap()
17270    }
17271
17272    pub fn special_polygamma(&self, n: i64) -> Tensor {
17273        self.f_special_polygamma(n).unwrap()
17274    }
17275
17276    pub fn special_polygamma_out(&self, out: &Tensor, n: i64) -> Tensor {
17277        self.f_special_polygamma_out(out, n).unwrap()
17278    }
17279
17280    pub fn special_psi(&self) -> Tensor {
17281        self.f_special_psi().unwrap()
17282    }
17283
17284    pub fn special_psi_out(&self, out: &Tensor) -> Tensor {
17285        self.f_special_psi_out(out).unwrap()
17286    }
17287
17288    pub fn special_round(&self, decimals: i64) -> Tensor {
17289        self.f_special_round(decimals).unwrap()
17290    }
17291
17292    pub fn special_round_out(&self, out: &Tensor, decimals: i64) -> Tensor {
17293        self.f_special_round_out(out, decimals).unwrap()
17294    }
17295
17296    pub fn special_scaled_modified_bessel_k0(x: &Tensor) -> Tensor {
17297        Tensor::f_special_scaled_modified_bessel_k0(x).unwrap()
17298    }
17299
17300    pub fn special_scaled_modified_bessel_k0_out(out: &Tensor, x: &Tensor) -> Tensor {
17301        Tensor::f_special_scaled_modified_bessel_k0_out(out, x).unwrap()
17302    }
17303
17304    pub fn special_scaled_modified_bessel_k1(x: &Tensor) -> Tensor {
17305        Tensor::f_special_scaled_modified_bessel_k1(x).unwrap()
17306    }
17307
17308    pub fn special_scaled_modified_bessel_k1_out(out: &Tensor, x: &Tensor) -> Tensor {
17309        Tensor::f_special_scaled_modified_bessel_k1_out(out, x).unwrap()
17310    }
17311
17312    pub fn special_shifted_chebyshev_polynomial_t(x: &Tensor, n: &Tensor) -> Tensor {
17313        Tensor::f_special_shifted_chebyshev_polynomial_t(x, n).unwrap()
17314    }
17315
17316    pub fn special_shifted_chebyshev_polynomial_t_n_scalar<S: Into<Scalar>>(
17317        x: &Tensor,
17318        n: S,
17319    ) -> Tensor {
17320        Tensor::f_special_shifted_chebyshev_polynomial_t_n_scalar(x, n).unwrap()
17321    }
17322
17323    pub fn special_shifted_chebyshev_polynomial_t_n_scalar_out<S: Into<Scalar>>(
17324        out: &Tensor,
17325        x: &Tensor,
17326        n: S,
17327    ) -> Tensor {
17328        Tensor::f_special_shifted_chebyshev_polynomial_t_n_scalar_out(out, x, n).unwrap()
17329    }
17330
17331    pub fn special_shifted_chebyshev_polynomial_t_out(
17332        out: &Tensor,
17333        x: &Tensor,
17334        n: &Tensor,
17335    ) -> Tensor {
17336        Tensor::f_special_shifted_chebyshev_polynomial_t_out(out, x, n).unwrap()
17337    }
17338
17339    pub fn special_shifted_chebyshev_polynomial_t_x_scalar<S: Into<Scalar>>(
17340        x: S,
17341        n: &Tensor,
17342    ) -> Tensor {
17343        Tensor::f_special_shifted_chebyshev_polynomial_t_x_scalar(x, n).unwrap()
17344    }
17345
17346    pub fn special_shifted_chebyshev_polynomial_t_x_scalar_out<S: Into<Scalar>>(
17347        out: &Tensor,
17348        x: S,
17349        n: &Tensor,
17350    ) -> Tensor {
17351        Tensor::f_special_shifted_chebyshev_polynomial_t_x_scalar_out(out, x, n).unwrap()
17352    }
17353
17354    pub fn special_shifted_chebyshev_polynomial_u(x: &Tensor, n: &Tensor) -> Tensor {
17355        Tensor::f_special_shifted_chebyshev_polynomial_u(x, n).unwrap()
17356    }
17357
17358    pub fn special_shifted_chebyshev_polynomial_u_n_scalar<S: Into<Scalar>>(
17359        x: &Tensor,
17360        n: S,
17361    ) -> Tensor {
17362        Tensor::f_special_shifted_chebyshev_polynomial_u_n_scalar(x, n).unwrap()
17363    }
17364
17365    pub fn special_shifted_chebyshev_polynomial_u_n_scalar_out<S: Into<Scalar>>(
17366        out: &Tensor,
17367        x: &Tensor,
17368        n: S,
17369    ) -> Tensor {
17370        Tensor::f_special_shifted_chebyshev_polynomial_u_n_scalar_out(out, x, n).unwrap()
17371    }
17372
17373    pub fn special_shifted_chebyshev_polynomial_u_out(
17374        out: &Tensor,
17375        x: &Tensor,
17376        n: &Tensor,
17377    ) -> Tensor {
17378        Tensor::f_special_shifted_chebyshev_polynomial_u_out(out, x, n).unwrap()
17379    }
17380
17381    pub fn special_shifted_chebyshev_polynomial_u_x_scalar<S: Into<Scalar>>(
17382        x: S,
17383        n: &Tensor,
17384    ) -> Tensor {
17385        Tensor::f_special_shifted_chebyshev_polynomial_u_x_scalar(x, n).unwrap()
17386    }
17387
17388    pub fn special_shifted_chebyshev_polynomial_u_x_scalar_out<S: Into<Scalar>>(
17389        out: &Tensor,
17390        x: S,
17391        n: &Tensor,
17392    ) -> Tensor {
17393        Tensor::f_special_shifted_chebyshev_polynomial_u_x_scalar_out(out, x, n).unwrap()
17394    }
17395
17396    pub fn special_shifted_chebyshev_polynomial_v(x: &Tensor, n: &Tensor) -> Tensor {
17397        Tensor::f_special_shifted_chebyshev_polynomial_v(x, n).unwrap()
17398    }
17399
17400    pub fn special_shifted_chebyshev_polynomial_v_n_scalar<S: Into<Scalar>>(
17401        x: &Tensor,
17402        n: S,
17403    ) -> Tensor {
17404        Tensor::f_special_shifted_chebyshev_polynomial_v_n_scalar(x, n).unwrap()
17405    }
17406
17407    pub fn special_shifted_chebyshev_polynomial_v_n_scalar_out<S: Into<Scalar>>(
17408        out: &Tensor,
17409        x: &Tensor,
17410        n: S,
17411    ) -> Tensor {
17412        Tensor::f_special_shifted_chebyshev_polynomial_v_n_scalar_out(out, x, n).unwrap()
17413    }
17414
17415    pub fn special_shifted_chebyshev_polynomial_v_out(
17416        out: &Tensor,
17417        x: &Tensor,
17418        n: &Tensor,
17419    ) -> Tensor {
17420        Tensor::f_special_shifted_chebyshev_polynomial_v_out(out, x, n).unwrap()
17421    }
17422
17423    pub fn special_shifted_chebyshev_polynomial_v_x_scalar<S: Into<Scalar>>(
17424        x: S,
17425        n: &Tensor,
17426    ) -> Tensor {
17427        Tensor::f_special_shifted_chebyshev_polynomial_v_x_scalar(x, n).unwrap()
17428    }
17429
17430    pub fn special_shifted_chebyshev_polynomial_v_x_scalar_out<S: Into<Scalar>>(
17431        out: &Tensor,
17432        x: S,
17433        n: &Tensor,
17434    ) -> Tensor {
17435        Tensor::f_special_shifted_chebyshev_polynomial_v_x_scalar_out(out, x, n).unwrap()
17436    }
17437
17438    pub fn special_shifted_chebyshev_polynomial_w(x: &Tensor, n: &Tensor) -> Tensor {
17439        Tensor::f_special_shifted_chebyshev_polynomial_w(x, n).unwrap()
17440    }
17441
17442    pub fn special_shifted_chebyshev_polynomial_w_n_scalar<S: Into<Scalar>>(
17443        x: &Tensor,
17444        n: S,
17445    ) -> Tensor {
17446        Tensor::f_special_shifted_chebyshev_polynomial_w_n_scalar(x, n).unwrap()
17447    }
17448
17449    pub fn special_shifted_chebyshev_polynomial_w_n_scalar_out<S: Into<Scalar>>(
17450        out: &Tensor,
17451        x: &Tensor,
17452        n: S,
17453    ) -> Tensor {
17454        Tensor::f_special_shifted_chebyshev_polynomial_w_n_scalar_out(out, x, n).unwrap()
17455    }
17456
17457    pub fn special_shifted_chebyshev_polynomial_w_out(
17458        out: &Tensor,
17459        x: &Tensor,
17460        n: &Tensor,
17461    ) -> Tensor {
17462        Tensor::f_special_shifted_chebyshev_polynomial_w_out(out, x, n).unwrap()
17463    }
17464
17465    pub fn special_shifted_chebyshev_polynomial_w_x_scalar<S: Into<Scalar>>(
17466        x: S,
17467        n: &Tensor,
17468    ) -> Tensor {
17469        Tensor::f_special_shifted_chebyshev_polynomial_w_x_scalar(x, n).unwrap()
17470    }
17471
17472    pub fn special_shifted_chebyshev_polynomial_w_x_scalar_out<S: Into<Scalar>>(
17473        out: &Tensor,
17474        x: S,
17475        n: &Tensor,
17476    ) -> Tensor {
17477        Tensor::f_special_shifted_chebyshev_polynomial_w_x_scalar_out(out, x, n).unwrap()
17478    }
17479
17480    pub fn special_sinc(&self) -> Tensor {
17481        self.f_special_sinc().unwrap()
17482    }
17483
17484    pub fn special_sinc_out(&self, out: &Tensor) -> Tensor {
17485        self.f_special_sinc_out(out).unwrap()
17486    }
17487
17488    pub fn special_softmax(&self, dim: i64, dtype: impl Into<Option<Kind>>) -> Tensor {
17489        self.f_special_softmax(dim, dtype).unwrap()
17490    }
17491
17492    pub fn special_spherical_bessel_j0(x: &Tensor) -> Tensor {
17493        Tensor::f_special_spherical_bessel_j0(x).unwrap()
17494    }
17495
17496    pub fn special_spherical_bessel_j0_out(out: &Tensor, x: &Tensor) -> Tensor {
17497        Tensor::f_special_spherical_bessel_j0_out(out, x).unwrap()
17498    }
17499
17500    pub fn special_xlog1py(&self, other: &Tensor) -> Tensor {
17501        self.f_special_xlog1py(other).unwrap()
17502    }
17503
17504    pub fn special_xlog1py_other_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
17505        self.f_special_xlog1py_other_scalar(other).unwrap()
17506    }
17507
17508    pub fn special_xlog1py_other_scalar_out<S: Into<Scalar>>(
17509        &self,
17510        out: &Tensor,
17511        other: S,
17512    ) -> Tensor {
17513        self.f_special_xlog1py_other_scalar_out(out, other).unwrap()
17514    }
17515
17516    pub fn special_xlog1py_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
17517        self.f_special_xlog1py_out(out, other).unwrap()
17518    }
17519
17520    pub fn special_xlog1py_self_scalar<S: Into<Scalar>>(self_scalar: S, other: &Tensor) -> Tensor {
17521        Tensor::f_special_xlog1py_self_scalar(self_scalar, other).unwrap()
17522    }
17523
17524    pub fn special_xlog1py_self_scalar_out<S: Into<Scalar>>(
17525        out: &Tensor,
17526        self_scalar: S,
17527        other: &Tensor,
17528    ) -> Tensor {
17529        Tensor::f_special_xlog1py_self_scalar_out(out, self_scalar, other).unwrap()
17530    }
17531
17532    pub fn special_xlogy(&self, other: &Tensor) -> Tensor {
17533        self.f_special_xlogy(other).unwrap()
17534    }
17535
17536    pub fn special_xlogy_other_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
17537        self.f_special_xlogy_other_scalar(other).unwrap()
17538    }
17539
17540    pub fn special_xlogy_other_scalar_out<S: Into<Scalar>>(
17541        &self,
17542        out: &Tensor,
17543        other: S,
17544    ) -> Tensor {
17545        self.f_special_xlogy_other_scalar_out(out, other).unwrap()
17546    }
17547
17548    pub fn special_xlogy_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
17549        self.f_special_xlogy_out(out, other).unwrap()
17550    }
17551
17552    pub fn special_xlogy_self_scalar<S: Into<Scalar>>(self_scalar: S, other: &Tensor) -> Tensor {
17553        Tensor::f_special_xlogy_self_scalar(self_scalar, other).unwrap()
17554    }
17555
17556    pub fn special_xlogy_self_scalar_out<S: Into<Scalar>>(
17557        out: &Tensor,
17558        self_scalar: S,
17559        other: &Tensor,
17560    ) -> Tensor {
17561        Tensor::f_special_xlogy_self_scalar_out(out, self_scalar, other).unwrap()
17562    }
17563
17564    pub fn special_zeta(&self, other: &Tensor) -> Tensor {
17565        self.f_special_zeta(other).unwrap()
17566    }
17567
17568    pub fn special_zeta_other_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
17569        self.f_special_zeta_other_scalar(other).unwrap()
17570    }
17571
17572    pub fn special_zeta_other_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
17573        self.f_special_zeta_other_scalar_out(out, other).unwrap()
17574    }
17575
17576    pub fn special_zeta_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
17577        self.f_special_zeta_out(out, other).unwrap()
17578    }
17579
17580    pub fn special_zeta_self_scalar<S: Into<Scalar>>(self_scalar: S, other: &Tensor) -> Tensor {
17581        Tensor::f_special_zeta_self_scalar(self_scalar, other).unwrap()
17582    }
17583
17584    pub fn special_zeta_self_scalar_out<S: Into<Scalar>>(
17585        out: &Tensor,
17586        self_scalar: S,
17587        other: &Tensor,
17588    ) -> Tensor {
17589        Tensor::f_special_zeta_self_scalar_out(out, self_scalar, other).unwrap()
17590    }
17591
17592    pub fn split(&self, split_size: i64, dim: i64) -> Vec<Tensor> {
17593        self.f_split(split_size, dim).unwrap()
17594    }
17595
17596    pub fn split_copy(&self, split_size: i64, dim: i64) -> Vec<Tensor> {
17597        self.f_split_copy(split_size, dim).unwrap()
17598    }
17599
17600    pub fn split_copy_tensor_out<T: Borrow<Tensor>>(&self, out: &[T], split_size: i64, dim: i64) {
17601        self.f_split_copy_tensor_out(out, split_size, dim).unwrap()
17602    }
17603
17604    pub fn split_sizes(&self, split_size: impl IntList, dim: i64) -> Vec<Tensor> {
17605        self.f_split_sizes(split_size, dim).unwrap()
17606    }
17607
17608    pub fn split_with_sizes(&self, split_sizes: impl IntList, dim: i64) -> Vec<Tensor> {
17609        self.f_split_with_sizes(split_sizes, dim).unwrap()
17610    }
17611
17612    pub fn split_with_sizes_copy(&self, split_sizes: impl IntList, dim: i64) -> Vec<Tensor> {
17613        self.f_split_with_sizes_copy(split_sizes, dim).unwrap()
17614    }
17615
17616    pub fn split_with_sizes_copy_out<T: Borrow<Tensor>>(
17617        &self,
17618        out: &[T],
17619        split_sizes: impl IntList,
17620        dim: i64,
17621    ) {
17622        self.f_split_with_sizes_copy_out(out, split_sizes, dim).unwrap()
17623    }
17624
17625    pub fn sqrt(&self) -> Tensor {
17626        self.f_sqrt().unwrap()
17627    }
17628
17629    pub fn sqrt_(&mut self) -> Tensor {
17630        self.f_sqrt_().unwrap()
17631    }
17632
17633    pub fn sqrt_out(&self, out: &Tensor) -> Tensor {
17634        self.f_sqrt_out(out).unwrap()
17635    }
17636
17637    pub fn square(&self) -> Tensor {
17638        self.f_square().unwrap()
17639    }
17640
17641    pub fn square_(&mut self) -> Tensor {
17642        self.f_square_().unwrap()
17643    }
17644
17645    pub fn square_out(&self, out: &Tensor) -> Tensor {
17646        self.f_square_out(out).unwrap()
17647    }
17648
17649    pub fn squeeze(&self) -> Tensor {
17650        self.f_squeeze().unwrap()
17651    }
17652
17653    pub fn squeeze_(&mut self) -> Tensor {
17654        self.f_squeeze_().unwrap()
17655    }
17656
17657    pub fn squeeze_copy(&self) -> Tensor {
17658        self.f_squeeze_copy().unwrap()
17659    }
17660
17661    pub fn squeeze_copy_dim(&self, dim: i64) -> Tensor {
17662        self.f_squeeze_copy_dim(dim).unwrap()
17663    }
17664
17665    pub fn squeeze_copy_dim_out(&self, out: &Tensor, dim: i64) -> Tensor {
17666        self.f_squeeze_copy_dim_out(out, dim).unwrap()
17667    }
17668
17669    pub fn squeeze_copy_dims(&self, dim: impl IntList) -> Tensor {
17670        self.f_squeeze_copy_dims(dim).unwrap()
17671    }
17672
17673    pub fn squeeze_copy_dims_out(&self, out: &Tensor, dim: impl IntList) -> Tensor {
17674        self.f_squeeze_copy_dims_out(out, dim).unwrap()
17675    }
17676
17677    pub fn squeeze_copy_out(&self, out: &Tensor) -> Tensor {
17678        self.f_squeeze_copy_out(out).unwrap()
17679    }
17680
17681    pub fn squeeze_dim(&self, dim: i64) -> Tensor {
17682        self.f_squeeze_dim(dim).unwrap()
17683    }
17684
17685    pub fn squeeze_dim_(&mut self, dim: i64) -> Tensor {
17686        self.f_squeeze_dim_(dim).unwrap()
17687    }
17688
17689    pub fn squeeze_dims(&self, dim: impl IntList) -> Tensor {
17690        self.f_squeeze_dims(dim).unwrap()
17691    }
17692
17693    pub fn squeeze_dims_(&mut self, dim: impl IntList) -> Tensor {
17694        self.f_squeeze_dims_(dim).unwrap()
17695    }
17696
17697    pub fn sspaddmm(&self, mat1: &Tensor, mat2: &Tensor) -> Tensor {
17698        self.f_sspaddmm(mat1, mat2).unwrap()
17699    }
17700
17701    pub fn sspaddmm_out(&self, out: &Tensor, mat1: &Tensor, mat2: &Tensor) -> Tensor {
17702        self.f_sspaddmm_out(out, mat1, mat2).unwrap()
17703    }
17704
17705    pub fn stack<T: Borrow<Tensor>>(tensors: &[T], dim: i64) -> Tensor {
17706        Tensor::f_stack(tensors, dim).unwrap()
17707    }
17708
17709    pub fn stack_out<T: Borrow<Tensor>>(out: &Tensor, tensors: &[T], dim: i64) -> Tensor {
17710        Tensor::f_stack_out(out, tensors, dim).unwrap()
17711    }
17712
17713    pub fn std(&self, unbiased: bool) -> Tensor {
17714        self.f_std(unbiased).unwrap()
17715    }
17716
17717    pub fn std_correction<S: Into<Scalar>>(
17718        &self,
17719        dim: impl IntListOption,
17720        correction: S,
17721        keepdim: bool,
17722    ) -> Tensor {
17723        self.f_std_correction(dim, correction, keepdim).unwrap()
17724    }
17725
17726    pub fn std_correction_out<S: Into<Scalar>>(
17727        &self,
17728        out: &Tensor,
17729        dim: impl IntListOption,
17730        correction: S,
17731        keepdim: bool,
17732    ) -> Tensor {
17733        self.f_std_correction_out(out, dim, correction, keepdim).unwrap()
17734    }
17735
17736    pub fn std_dim(&self, dim: impl IntListOption, unbiased: bool, keepdim: bool) -> Tensor {
17737        self.f_std_dim(dim, unbiased, keepdim).unwrap()
17738    }
17739
17740    pub fn std_mean(&self, unbiased: bool) -> (Tensor, Tensor) {
17741        self.f_std_mean(unbiased).unwrap()
17742    }
17743
17744    pub fn std_mean_correction<S: Into<Scalar>>(
17745        &self,
17746        dim: impl IntListOption,
17747        correction: S,
17748        keepdim: bool,
17749    ) -> (Tensor, Tensor) {
17750        self.f_std_mean_correction(dim, correction, keepdim).unwrap()
17751    }
17752
17753    pub fn std_mean_correction_out<S: Into<Scalar>>(
17754        &self,
17755        out0: &Tensor,
17756        out1: &Tensor,
17757        dim: impl IntListOption,
17758        correction: S,
17759        keepdim: bool,
17760    ) -> (Tensor, Tensor) {
17761        self.f_std_mean_correction_out(out0, out1, dim, correction, keepdim).unwrap()
17762    }
17763
17764    pub fn std_mean_dim(
17765        &self,
17766        dim: impl IntListOption,
17767        unbiased: bool,
17768        keepdim: bool,
17769    ) -> (Tensor, Tensor) {
17770        self.f_std_mean_dim(dim, unbiased, keepdim).unwrap()
17771    }
17772
17773    pub fn std_out(
17774        &self,
17775        out: &Tensor,
17776        dim: impl IntListOption,
17777        unbiased: bool,
17778        keepdim: bool,
17779    ) -> Tensor {
17780        self.f_std_out(out, dim, unbiased, keepdim).unwrap()
17781    }
17782
17783    pub fn stft<T: Borrow<Tensor>>(
17784        &self,
17785        n_fft: i64,
17786        hop_length: impl Into<Option<i64>>,
17787        win_length: impl Into<Option<i64>>,
17788        window: Option<T>,
17789        normalized: bool,
17790        onesided: bool,
17791        return_complex: bool,
17792    ) -> Tensor {
17793        self.f_stft(n_fft, hop_length, win_length, window, normalized, onesided, return_complex)
17794            .unwrap()
17795    }
17796
17797    pub fn stft_center<T: Borrow<Tensor>>(
17798        &self,
17799        n_fft: i64,
17800        hop_length: impl Into<Option<i64>>,
17801        win_length: impl Into<Option<i64>>,
17802        window: Option<T>,
17803        center: bool,
17804        pad_mode: &str,
17805        normalized: bool,
17806        onesided: bool,
17807        return_complex: bool,
17808    ) -> Tensor {
17809        self.f_stft_center(
17810            n_fft,
17811            hop_length,
17812            win_length,
17813            window,
17814            center,
17815            pad_mode,
17816            normalized,
17817            onesided,
17818            return_complex,
17819        )
17820        .unwrap()
17821    }
17822
17823    pub fn g_sub(&self, other: &Tensor) -> Tensor {
17824        self.f_sub(other).unwrap()
17825    }
17826
17827    pub fn g_sub_(&mut self, other: &Tensor) -> Tensor {
17828        self.f_sub_(other).unwrap()
17829    }
17830
17831    pub fn sub_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
17832        self.f_sub_out(out, other).unwrap()
17833    }
17834
17835    pub fn g_sub_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
17836        self.f_sub_scalar(other).unwrap()
17837    }
17838
17839    pub fn g_sub_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
17840        self.f_sub_scalar_(other).unwrap()
17841    }
17842
17843    pub fn sub_scalar_out<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
17844        self.f_sub_scalar_out(out, other).unwrap()
17845    }
17846
17847    pub fn subtract(&self, other: &Tensor) -> Tensor {
17848        self.f_subtract(other).unwrap()
17849    }
17850
17851    pub fn subtract_(&mut self, other: &Tensor) -> Tensor {
17852        self.f_subtract_(other).unwrap()
17853    }
17854
17855    pub fn subtract_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
17856        self.f_subtract_out(out, other).unwrap()
17857    }
17858
17859    pub fn subtract_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
17860        self.f_subtract_scalar(other).unwrap()
17861    }
17862
17863    pub fn subtract_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
17864        self.f_subtract_scalar_(other).unwrap()
17865    }
17866
17867    pub fn sum(&self, dtype: impl Into<Option<Kind>>) -> Tensor {
17868        self.f_sum(dtype).unwrap()
17869    }
17870
17871    pub fn sum_dim_intlist(
17872        &self,
17873        dim: impl IntListOption,
17874        keepdim: bool,
17875        dtype: impl Into<Option<Kind>>,
17876    ) -> Tensor {
17877        self.f_sum_dim_intlist(dim, keepdim, dtype).unwrap()
17878    }
17879
17880    pub fn sum_intlist_out(
17881        &self,
17882        out: &Tensor,
17883        dim: impl IntListOption,
17884        keepdim: bool,
17885        dtype: impl Into<Option<Kind>>,
17886    ) -> Tensor {
17887        self.f_sum_intlist_out(out, dim, keepdim, dtype).unwrap()
17888    }
17889
17890    pub fn sum_out(&self, out: &Tensor, dtype: impl Into<Option<Kind>>) -> Tensor {
17891        self.f_sum_out(out, dtype).unwrap()
17892    }
17893
17894    pub fn sum_to_size(&self, size: impl IntList) -> Tensor {
17895        self.f_sum_to_size(size).unwrap()
17896    }
17897
17898    pub fn svd(&self, some: bool, compute_uv: bool) -> (Tensor, Tensor, Tensor) {
17899        self.f_svd(some, compute_uv).unwrap()
17900    }
17901
17902    pub fn svd_u(
17903        &self,
17904        u: &Tensor,
17905        s: &Tensor,
17906        v: &Tensor,
17907        some: bool,
17908        compute_uv: bool,
17909    ) -> (Tensor, Tensor, Tensor) {
17910        self.f_svd_u(u, s, v, some, compute_uv).unwrap()
17911    }
17912
17913    pub fn swapaxes(&self, axis0: i64, axis1: i64) -> Tensor {
17914        self.f_swapaxes(axis0, axis1).unwrap()
17915    }
17916
17917    pub fn swapaxes_(&mut self, axis0: i64, axis1: i64) -> Tensor {
17918        self.f_swapaxes_(axis0, axis1).unwrap()
17919    }
17920
17921    pub fn swapdims(&self, dim0: i64, dim1: i64) -> Tensor {
17922        self.f_swapdims(dim0, dim1).unwrap()
17923    }
17924
17925    pub fn swapdims_(&mut self, dim0: i64, dim1: i64) -> Tensor {
17926        self.f_swapdims_(dim0, dim1).unwrap()
17927    }
17928
17929    pub fn tr(&self) -> Tensor {
17930        self.f_tr().unwrap()
17931    }
17932
17933    pub fn t_(&mut self) -> Tensor {
17934        self.f_t_().unwrap()
17935    }
17936
17937    pub fn t_copy(&self) -> Tensor {
17938        self.f_t_copy().unwrap()
17939    }
17940
17941    pub fn t_copy_out(&self, out: &Tensor) -> Tensor {
17942        self.f_t_copy_out(out).unwrap()
17943    }
17944
17945    pub fn take(&self, index: &Tensor) -> Tensor {
17946        self.f_take(index).unwrap()
17947    }
17948
17949    pub fn take_along_dim(&self, indices: &Tensor, dim: impl Into<Option<i64>>) -> Tensor {
17950        self.f_take_along_dim(indices, dim).unwrap()
17951    }
17952
17953    pub fn take_along_dim_out(
17954        &self,
17955        out: &Tensor,
17956        indices: &Tensor,
17957        dim: impl Into<Option<i64>>,
17958    ) -> Tensor {
17959        self.f_take_along_dim_out(out, indices, dim).unwrap()
17960    }
17961
17962    pub fn take_out(&self, out: &Tensor, index: &Tensor) -> Tensor {
17963        self.f_take_out(out, index).unwrap()
17964    }
17965
17966    pub fn tan(&self) -> Tensor {
17967        self.f_tan().unwrap()
17968    }
17969
17970    pub fn tan_(&mut self) -> Tensor {
17971        self.f_tan_().unwrap()
17972    }
17973
17974    pub fn tan_out(&self, out: &Tensor) -> Tensor {
17975        self.f_tan_out(out).unwrap()
17976    }
17977
17978    pub fn tanh(&self) -> Tensor {
17979        self.f_tanh().unwrap()
17980    }
17981
17982    pub fn tanh_(&mut self) -> Tensor {
17983        self.f_tanh_().unwrap()
17984    }
17985
17986    pub fn tanh_backward(grad_output: &Tensor, output: &Tensor) -> Tensor {
17987        Tensor::f_tanh_backward(grad_output, output).unwrap()
17988    }
17989
17990    pub fn tanh_backward_grad_input(
17991        grad_input: &Tensor,
17992        grad_output: &Tensor,
17993        output: &Tensor,
17994    ) -> Tensor {
17995        Tensor::f_tanh_backward_grad_input(grad_input, grad_output, output).unwrap()
17996    }
17997
17998    pub fn tanh_out(&self, out: &Tensor) -> Tensor {
17999        self.f_tanh_out(out).unwrap()
18000    }
18001
18002    pub fn tensor_split(&self, sections: i64, dim: i64) -> Vec<Tensor> {
18003        self.f_tensor_split(sections, dim).unwrap()
18004    }
18005
18006    pub fn tensor_split_indices(&self, indices: impl IntList, dim: i64) -> Vec<Tensor> {
18007        self.f_tensor_split_indices(indices, dim).unwrap()
18008    }
18009
18010    pub fn tensor_split_tensor_indices_or_sections(
18011        &self,
18012        tensor_indices_or_sections: &Tensor,
18013        dim: i64,
18014    ) -> Vec<Tensor> {
18015        self.f_tensor_split_tensor_indices_or_sections(tensor_indices_or_sections, dim).unwrap()
18016    }
18017
18018    pub fn tensordot(
18019        &self,
18020        other: &Tensor,
18021        dims_self: impl IntList,
18022        dims_other: impl IntList,
18023    ) -> Tensor {
18024        self.f_tensordot(other, dims_self, dims_other).unwrap()
18025    }
18026
18027    pub fn tensordot_out(
18028        &self,
18029        out: &Tensor,
18030        other: &Tensor,
18031        dims_self: impl IntList,
18032        dims_other: impl IntList,
18033    ) -> Tensor {
18034        self.f_tensordot_out(out, other, dims_self, dims_other).unwrap()
18035    }
18036
18037    pub fn threshold<S: Into<Scalar>>(&self, threshold: S, value: S) -> Tensor {
18038        self.f_threshold(threshold, value).unwrap()
18039    }
18040
18041    pub fn threshold_<S: Into<Scalar>>(&mut self, threshold: S, value: S) -> Tensor {
18042        self.f_threshold_(threshold, value).unwrap()
18043    }
18044
18045    pub fn threshold_backward<S: Into<Scalar>>(
18046        &self,
18047        grad_output: &Tensor,
18048        threshold: S,
18049    ) -> Tensor {
18050        self.f_threshold_backward(grad_output, threshold).unwrap()
18051    }
18052
18053    pub fn threshold_backward_grad_input<S: Into<Scalar>>(
18054        &self,
18055        grad_input: &Tensor,
18056        grad_output: &Tensor,
18057        threshold: S,
18058    ) -> Tensor {
18059        self.f_threshold_backward_grad_input(grad_input, grad_output, threshold).unwrap()
18060    }
18061
18062    pub fn threshold_out<S: Into<Scalar>>(&self, out: &Tensor, threshold: S, value: S) -> Tensor {
18063        self.f_threshold_out(out, threshold, value).unwrap()
18064    }
18065
18066    pub fn tile(&self, dims: impl IntList) -> Tensor {
18067        self.f_tile(dims).unwrap()
18068    }
18069
18070    pub fn to(&self, device: Device) -> Tensor {
18071        self.f_to(device).unwrap()
18072    }
18073
18074    pub fn to_dense(&self, dtype: impl Into<Option<Kind>>, masked_grad: bool) -> Tensor {
18075        self.f_to_dense(dtype, masked_grad).unwrap()
18076    }
18077
18078    pub fn to_dense_backward(&self, grad: &Tensor, masked_grad: bool) -> Tensor {
18079        self.f_to_dense_backward(grad, masked_grad).unwrap()
18080    }
18081
18082    pub fn to_device_(
18083        &self,
18084        device: Device,
18085        dtype: Kind,
18086        non_blocking: bool,
18087        copy: bool,
18088    ) -> Tensor {
18089        self.f_to_device_(device, dtype, non_blocking, copy).unwrap()
18090    }
18091
18092    pub fn to_dtype(&self, dtype: Kind, non_blocking: bool, copy: bool) -> Tensor {
18093        self.f_to_dtype(dtype, non_blocking, copy).unwrap()
18094    }
18095
18096    pub fn to_dtype_layout(
18097        &self,
18098        options: (Kind, Device),
18099        non_blocking: bool,
18100        copy: bool,
18101    ) -> Tensor {
18102        self.f_to_dtype_layout(options, non_blocking, copy).unwrap()
18103    }
18104
18105    pub fn g_to_mkldnn(&self, dtype: impl Into<Option<Kind>>) -> Tensor {
18106        self.f_to_mkldnn(dtype).unwrap()
18107    }
18108
18109    pub fn to_mkldnn_backward(&self, grad: &Tensor) -> Tensor {
18110        self.f_to_mkldnn_backward(grad).unwrap()
18111    }
18112
18113    pub fn to_mkldnn_out(&self, out: &Tensor, dtype: impl Into<Option<Kind>>) -> Tensor {
18114        self.f_to_mkldnn_out(out, dtype).unwrap()
18115    }
18116
18117    pub fn to_other(&self, other: &Tensor, non_blocking: bool, copy: bool) -> Tensor {
18118        self.f_to_other(other, non_blocking, copy).unwrap()
18119    }
18120
18121    pub fn to_padded_tensor(&self, padding: f64, output_size: impl IntListOption) -> Tensor {
18122        self.f_to_padded_tensor(padding, output_size).unwrap()
18123    }
18124
18125    pub fn to_padded_tensor_out(
18126        &self,
18127        out: &Tensor,
18128        padding: f64,
18129        output_size: impl IntListOption,
18130    ) -> Tensor {
18131        self.f_to_padded_tensor_out(out, padding, output_size).unwrap()
18132    }
18133
18134    pub fn to_sparse(
18135        &self,
18136        layout: Option<Layout>,
18137        blocksize: impl IntListOption,
18138        dense_dim: impl Into<Option<i64>>,
18139    ) -> Tensor {
18140        self.f_to_sparse(layout, blocksize, dense_dim).unwrap()
18141    }
18142
18143    pub fn to_sparse_bsc(
18144        &self,
18145        blocksize: impl IntList,
18146        dense_dim: impl Into<Option<i64>>,
18147    ) -> Tensor {
18148        self.f_to_sparse_bsc(blocksize, dense_dim).unwrap()
18149    }
18150
18151    pub fn to_sparse_bsr(
18152        &self,
18153        blocksize: impl IntList,
18154        dense_dim: impl Into<Option<i64>>,
18155    ) -> Tensor {
18156        self.f_to_sparse_bsr(blocksize, dense_dim).unwrap()
18157    }
18158
18159    pub fn to_sparse_csc(&self, dense_dim: impl Into<Option<i64>>) -> Tensor {
18160        self.f_to_sparse_csc(dense_dim).unwrap()
18161    }
18162
18163    pub fn to_sparse_csr(&self, dense_dim: impl Into<Option<i64>>) -> Tensor {
18164        self.f_to_sparse_csr(dense_dim).unwrap()
18165    }
18166
18167    pub fn to_sparse_sparse_dim(&self, sparse_dim: i64) -> Tensor {
18168        self.f_to_sparse_sparse_dim(sparse_dim).unwrap()
18169    }
18170
18171    pub fn topk(&self, k: i64, dim: i64, largest: bool, sorted: bool) -> (Tensor, Tensor) {
18172        self.f_topk(k, dim, largest, sorted).unwrap()
18173    }
18174
18175    pub fn topk_values(
18176        &self,
18177        values: &Tensor,
18178        indices: &Tensor,
18179        k: i64,
18180        dim: i64,
18181        largest: bool,
18182        sorted: bool,
18183    ) -> (Tensor, Tensor) {
18184        self.f_topk_values(values, indices, k, dim, largest, sorted).unwrap()
18185    }
18186
18187    pub fn totype(&self, scalar_type: Kind) -> Tensor {
18188        self.f_totype(scalar_type).unwrap()
18189    }
18190
18191    pub fn trace(&self) -> Tensor {
18192        self.f_trace().unwrap()
18193    }
18194
18195    pub fn trace_backward(grad: &Tensor, sizes: impl IntList) -> Tensor {
18196        Tensor::f_trace_backward(grad, sizes).unwrap()
18197    }
18198
18199    pub fn trace_out(&self, out: &Tensor) -> Tensor {
18200        self.f_trace_out(out).unwrap()
18201    }
18202
18203    pub fn transpose(&self, dim0: i64, dim1: i64) -> Tensor {
18204        self.f_transpose(dim0, dim1).unwrap()
18205    }
18206
18207    pub fn transpose_(&mut self, dim0: i64, dim1: i64) -> Tensor {
18208        self.f_transpose_(dim0, dim1).unwrap()
18209    }
18210
18211    pub fn transpose_copy(&self, dim0: i64, dim1: i64) -> Tensor {
18212        self.f_transpose_copy(dim0, dim1).unwrap()
18213    }
18214
18215    pub fn transpose_copy_int_out(&self, out: &Tensor, dim0: i64, dim1: i64) -> Tensor {
18216        self.f_transpose_copy_int_out(out, dim0, dim1).unwrap()
18217    }
18218
18219    pub fn trapezoid(y: &Tensor, dim: i64) -> Tensor {
18220        Tensor::f_trapezoid(y, dim).unwrap()
18221    }
18222
18223    pub fn trapezoid_x(y: &Tensor, x: &Tensor, dim: i64) -> Tensor {
18224        Tensor::f_trapezoid_x(y, x, dim).unwrap()
18225    }
18226
18227    pub fn trapz(y: &Tensor, x: &Tensor, dim: i64) -> Tensor {
18228        Tensor::f_trapz(y, x, dim).unwrap()
18229    }
18230
18231    pub fn trapz_dx(y: &Tensor, dx: f64, dim: i64) -> Tensor {
18232        Tensor::f_trapz_dx(y, dx, dim).unwrap()
18233    }
18234
18235    pub fn triangular_solve(
18236        &self,
18237        a: &Tensor,
18238        upper: bool,
18239        transpose: bool,
18240        unitriangular: bool,
18241    ) -> (Tensor, Tensor) {
18242        self.f_triangular_solve(a, upper, transpose, unitriangular).unwrap()
18243    }
18244
18245    pub fn triangular_solve_x(
18246        &self,
18247        x: &Tensor,
18248        m: &Tensor,
18249        a: &Tensor,
18250        upper: bool,
18251        transpose: bool,
18252        unitriangular: bool,
18253    ) -> (Tensor, Tensor) {
18254        self.f_triangular_solve_x(x, m, a, upper, transpose, unitriangular).unwrap()
18255    }
18256
18257    pub fn tril(&self, diagonal: i64) -> Tensor {
18258        self.f_tril(diagonal).unwrap()
18259    }
18260
18261    pub fn tril_(&mut self, diagonal: i64) -> Tensor {
18262        self.f_tril_(diagonal).unwrap()
18263    }
18264
18265    pub fn tril_indices(row: i64, col: i64, offset: i64, options: (Kind, Device)) -> Tensor {
18266        Tensor::f_tril_indices(row, col, offset, options).unwrap()
18267    }
18268
18269    pub fn tril_indices_out(out: &Tensor, row: i64, col: i64, offset: i64) -> Tensor {
18270        Tensor::f_tril_indices_out(out, row, col, offset).unwrap()
18271    }
18272
18273    pub fn tril_out(&self, out: &Tensor, diagonal: i64) -> Tensor {
18274        self.f_tril_out(out, diagonal).unwrap()
18275    }
18276
18277    pub fn triplet_margin_loss(
18278        anchor: &Tensor,
18279        positive: &Tensor,
18280        negative: &Tensor,
18281        margin: f64,
18282        p: f64,
18283        eps: f64,
18284        swap: bool,
18285        reduction: crate::Reduction,
18286    ) -> Tensor {
18287        Tensor::f_triplet_margin_loss(anchor, positive, negative, margin, p, eps, swap, reduction)
18288            .unwrap()
18289    }
18290
18291    pub fn triu(&self, diagonal: i64) -> Tensor {
18292        self.f_triu(diagonal).unwrap()
18293    }
18294
18295    pub fn triu_(&mut self, diagonal: i64) -> Tensor {
18296        self.f_triu_(diagonal).unwrap()
18297    }
18298
18299    pub fn triu_indices(row: i64, col: i64, offset: i64, options: (Kind, Device)) -> Tensor {
18300        Tensor::f_triu_indices(row, col, offset, options).unwrap()
18301    }
18302
18303    pub fn triu_indices_out(out: &Tensor, row: i64, col: i64, offset: i64) -> Tensor {
18304        Tensor::f_triu_indices_out(out, row, col, offset).unwrap()
18305    }
18306
18307    pub fn triu_out(&self, out: &Tensor, diagonal: i64) -> Tensor {
18308        self.f_triu_out(out, diagonal).unwrap()
18309    }
18310
18311    pub fn true_divide(&self, other: &Tensor) -> Tensor {
18312        self.f_true_divide(other).unwrap()
18313    }
18314
18315    pub fn true_divide_(&mut self, other: &Tensor) -> Tensor {
18316        self.f_true_divide_(other).unwrap()
18317    }
18318
18319    pub fn true_divide_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
18320        self.f_true_divide_out(out, other).unwrap()
18321    }
18322
18323    pub fn true_divide_scalar<S: Into<Scalar>>(&self, other: S) -> Tensor {
18324        self.f_true_divide_scalar(other).unwrap()
18325    }
18326
18327    pub fn true_divide_scalar_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
18328        self.f_true_divide_scalar_(other).unwrap()
18329    }
18330
18331    pub fn trunc(&self) -> Tensor {
18332        self.f_trunc().unwrap()
18333    }
18334
18335    pub fn trunc_(&mut self) -> Tensor {
18336        self.f_trunc_().unwrap()
18337    }
18338
18339    pub fn trunc_out(&self, out: &Tensor) -> Tensor {
18340        self.f_trunc_out(out).unwrap()
18341    }
18342
18343    pub fn type_as(&self, other: &Tensor) -> Tensor {
18344        self.f_type_as(other).unwrap()
18345    }
18346
18347    pub fn unbind(&self, dim: i64) -> Vec<Tensor> {
18348        self.f_unbind(dim).unwrap()
18349    }
18350
18351    pub fn unbind_copy(&self, dim: i64) -> Vec<Tensor> {
18352        self.f_unbind_copy(dim).unwrap()
18353    }
18354
18355    pub fn unbind_copy_int_out<T: Borrow<Tensor>>(&self, out: &[T], dim: i64) {
18356        self.f_unbind_copy_int_out(out, dim).unwrap()
18357    }
18358
18359    pub fn unflatten(&self, dim: i64, sizes: impl IntList) -> Tensor {
18360        self.f_unflatten(dim, sizes).unwrap()
18361    }
18362
18363    pub fn unflatten_dense_tensors<T: Borrow<Tensor>>(flat: &Tensor, tensors: &[T]) -> Vec<Tensor> {
18364        Tensor::f_unflatten_dense_tensors(flat, tensors).unwrap()
18365    }
18366
18367    pub fn unfold(&self, dimension: i64, size: i64, step: i64) -> Tensor {
18368        self.f_unfold(dimension, size, step).unwrap()
18369    }
18370
18371    pub fn unfold_backward(
18372        grad_in: &Tensor,
18373        input_sizes: impl IntList,
18374        dim: i64,
18375        size: i64,
18376        step: i64,
18377    ) -> Tensor {
18378        Tensor::f_unfold_backward(grad_in, input_sizes, dim, size, step).unwrap()
18379    }
18380
18381    pub fn unfold_backward_out(
18382        out: &Tensor,
18383        grad_in: &Tensor,
18384        input_sizes: impl IntList,
18385        dim: i64,
18386        size: i64,
18387        step: i64,
18388    ) -> Tensor {
18389        Tensor::f_unfold_backward_out(out, grad_in, input_sizes, dim, size, step).unwrap()
18390    }
18391
18392    pub fn unfold_copy(&self, dimension: i64, size: i64, step: i64) -> Tensor {
18393        self.f_unfold_copy(dimension, size, step).unwrap()
18394    }
18395
18396    pub fn unfold_copy_out(&self, out: &Tensor, dimension: i64, size: i64, step: i64) -> Tensor {
18397        self.f_unfold_copy_out(out, dimension, size, step).unwrap()
18398    }
18399
18400    pub fn uniform(&self, from: f64, to: f64) -> Tensor {
18401        self.f_uniform(from, to).unwrap()
18402    }
18403
18404    pub fn uniform_(&mut self, from: f64, to: f64) -> Tensor {
18405        self.f_uniform_(from, to).unwrap()
18406    }
18407
18408    pub fn uniform_out(&self, out: &Tensor, from: f64, to: f64) -> Tensor {
18409        self.f_uniform_out(out, from, to).unwrap()
18410    }
18411
18412    pub fn unique_consecutive(
18413        &self,
18414        return_inverse: bool,
18415        return_counts: bool,
18416        dim: impl Into<Option<i64>>,
18417    ) -> (Tensor, Tensor, Tensor) {
18418        self.f_unique_consecutive(return_inverse, return_counts, dim).unwrap()
18419    }
18420
18421    pub fn unique_consecutive_out(
18422        &self,
18423        out0: &Tensor,
18424        out1: &Tensor,
18425        out2: &Tensor,
18426        return_inverse: bool,
18427        return_counts: bool,
18428        dim: impl Into<Option<i64>>,
18429    ) -> (Tensor, Tensor, Tensor) {
18430        self.f_unique_consecutive_out(out0, out1, out2, return_inverse, return_counts, dim).unwrap()
18431    }
18432
18433    pub fn unique_dim(
18434        &self,
18435        dim: i64,
18436        sorted: bool,
18437        return_inverse: bool,
18438        return_counts: bool,
18439    ) -> (Tensor, Tensor, Tensor) {
18440        self.f_unique_dim(dim, sorted, return_inverse, return_counts).unwrap()
18441    }
18442
18443    pub fn unique_dim_consecutive(
18444        &self,
18445        dim: i64,
18446        return_inverse: bool,
18447        return_counts: bool,
18448    ) -> (Tensor, Tensor, Tensor) {
18449        self.f_unique_dim_consecutive(dim, return_inverse, return_counts).unwrap()
18450    }
18451
18452    pub fn unique_dim_consecutive_out(
18453        &self,
18454        out0: &Tensor,
18455        out1: &Tensor,
18456        out2: &Tensor,
18457        dim: i64,
18458        return_inverse: bool,
18459        return_counts: bool,
18460    ) -> (Tensor, Tensor, Tensor) {
18461        self.f_unique_dim_consecutive_out(out0, out1, out2, dim, return_inverse, return_counts)
18462            .unwrap()
18463    }
18464
18465    pub fn unique_dim_out(
18466        &self,
18467        out0: &Tensor,
18468        out1: &Tensor,
18469        out2: &Tensor,
18470        dim: i64,
18471        sorted: bool,
18472        return_inverse: bool,
18473        return_counts: bool,
18474    ) -> (Tensor, Tensor, Tensor) {
18475        self.f_unique_dim_out(out0, out1, out2, dim, sorted, return_inverse, return_counts).unwrap()
18476    }
18477
18478    pub fn unsafe_chunk(&self, chunks: i64, dim: i64) -> Vec<Tensor> {
18479        self.f_unsafe_chunk(chunks, dim).unwrap()
18480    }
18481
18482    pub fn unsafe_split(&self, split_size: i64, dim: i64) -> Vec<Tensor> {
18483        self.f_unsafe_split(split_size, dim).unwrap()
18484    }
18485
18486    pub fn unsafe_split_tensor_out<T: Borrow<Tensor>>(&self, out: &[T], split_size: i64, dim: i64) {
18487        self.f_unsafe_split_tensor_out(out, split_size, dim).unwrap()
18488    }
18489
18490    pub fn unsafe_split_with_sizes(&self, split_sizes: impl IntList, dim: i64) -> Vec<Tensor> {
18491        self.f_unsafe_split_with_sizes(split_sizes, dim).unwrap()
18492    }
18493
18494    pub fn unsafe_split_with_sizes_out<T: Borrow<Tensor>>(
18495        &self,
18496        out: &[T],
18497        split_sizes: impl IntList,
18498        dim: i64,
18499    ) {
18500        self.f_unsafe_split_with_sizes_out(out, split_sizes, dim).unwrap()
18501    }
18502
18503    pub fn unsqueeze(&self, dim: i64) -> Tensor {
18504        self.f_unsqueeze(dim).unwrap()
18505    }
18506
18507    pub fn unsqueeze_(&mut self, dim: i64) -> Tensor {
18508        self.f_unsqueeze_(dim).unwrap()
18509    }
18510
18511    pub fn unsqueeze_copy(&self, dim: i64) -> Tensor {
18512        self.f_unsqueeze_copy(dim).unwrap()
18513    }
18514
18515    pub fn unsqueeze_copy_out(&self, out: &Tensor, dim: i64) -> Tensor {
18516        self.f_unsqueeze_copy_out(out, dim).unwrap()
18517    }
18518
18519    pub fn upsample_bicubic2d(
18520        &self,
18521        output_size: impl IntList,
18522        align_corners: bool,
18523        scales_h: impl Into<Option<f64>>,
18524        scales_w: impl Into<Option<f64>>,
18525    ) -> Tensor {
18526        self.f_upsample_bicubic2d(output_size, align_corners, scales_h, scales_w).unwrap()
18527    }
18528
18529    pub fn upsample_bicubic2d_backward(
18530        grad_output: &Tensor,
18531        output_size: impl IntList,
18532        input_size: impl IntList,
18533        align_corners: bool,
18534        scales_h: impl Into<Option<f64>>,
18535        scales_w: impl Into<Option<f64>>,
18536    ) -> Tensor {
18537        Tensor::f_upsample_bicubic2d_backward(
18538            grad_output,
18539            output_size,
18540            input_size,
18541            align_corners,
18542            scales_h,
18543            scales_w,
18544        )
18545        .unwrap()
18546    }
18547
18548    pub fn upsample_bicubic2d_backward_grad_input(
18549        grad_input: &Tensor,
18550        grad_output: &Tensor,
18551        output_size: impl IntList,
18552        input_size: impl IntList,
18553        align_corners: bool,
18554        scales_h: impl Into<Option<f64>>,
18555        scales_w: impl Into<Option<f64>>,
18556    ) -> Tensor {
18557        Tensor::f_upsample_bicubic2d_backward_grad_input(
18558            grad_input,
18559            grad_output,
18560            output_size,
18561            input_size,
18562            align_corners,
18563            scales_h,
18564            scales_w,
18565        )
18566        .unwrap()
18567    }
18568
18569    pub fn upsample_bicubic2d_out(
18570        &self,
18571        out: &Tensor,
18572        output_size: impl IntList,
18573        align_corners: bool,
18574        scales_h: impl Into<Option<f64>>,
18575        scales_w: impl Into<Option<f64>>,
18576    ) -> Tensor {
18577        self.f_upsample_bicubic2d_out(out, output_size, align_corners, scales_h, scales_w).unwrap()
18578    }
18579
18580    pub fn upsample_bicubic2d_vec(
18581        &self,
18582        output_size: impl IntListOption,
18583        align_corners: bool,
18584        scale_factors: impl DoubleList,
18585    ) -> Tensor {
18586        self.f_upsample_bicubic2d_vec(output_size, align_corners, scale_factors).unwrap()
18587    }
18588
18589    pub fn upsample_bilinear2d(
18590        &self,
18591        output_size: impl IntList,
18592        align_corners: bool,
18593        scales_h: impl Into<Option<f64>>,
18594        scales_w: impl Into<Option<f64>>,
18595    ) -> Tensor {
18596        self.f_upsample_bilinear2d(output_size, align_corners, scales_h, scales_w).unwrap()
18597    }
18598
18599    pub fn upsample_bilinear2d_backward(
18600        grad_output: &Tensor,
18601        output_size: impl IntList,
18602        input_size: impl IntList,
18603        align_corners: bool,
18604        scales_h: impl Into<Option<f64>>,
18605        scales_w: impl Into<Option<f64>>,
18606    ) -> Tensor {
18607        Tensor::f_upsample_bilinear2d_backward(
18608            grad_output,
18609            output_size,
18610            input_size,
18611            align_corners,
18612            scales_h,
18613            scales_w,
18614        )
18615        .unwrap()
18616    }
18617
18618    pub fn upsample_bilinear2d_backward_grad_input(
18619        grad_input: &Tensor,
18620        grad_output: &Tensor,
18621        output_size: impl IntList,
18622        input_size: impl IntList,
18623        align_corners: bool,
18624        scales_h: impl Into<Option<f64>>,
18625        scales_w: impl Into<Option<f64>>,
18626    ) -> Tensor {
18627        Tensor::f_upsample_bilinear2d_backward_grad_input(
18628            grad_input,
18629            grad_output,
18630            output_size,
18631            input_size,
18632            align_corners,
18633            scales_h,
18634            scales_w,
18635        )
18636        .unwrap()
18637    }
18638
18639    pub fn upsample_bilinear2d_out(
18640        &self,
18641        out: &Tensor,
18642        output_size: impl IntList,
18643        align_corners: bool,
18644        scales_h: impl Into<Option<f64>>,
18645        scales_w: impl Into<Option<f64>>,
18646    ) -> Tensor {
18647        self.f_upsample_bilinear2d_out(out, output_size, align_corners, scales_h, scales_w).unwrap()
18648    }
18649
18650    pub fn upsample_bilinear2d_vec(
18651        &self,
18652        output_size: impl IntListOption,
18653        align_corners: bool,
18654        scale_factors: impl DoubleList,
18655    ) -> Tensor {
18656        self.f_upsample_bilinear2d_vec(output_size, align_corners, scale_factors).unwrap()
18657    }
18658
18659    pub fn upsample_linear1d(
18660        &self,
18661        output_size: impl IntList,
18662        align_corners: bool,
18663        scales: impl Into<Option<f64>>,
18664    ) -> Tensor {
18665        self.f_upsample_linear1d(output_size, align_corners, scales).unwrap()
18666    }
18667
18668    pub fn upsample_linear1d_backward(
18669        grad_output: &Tensor,
18670        output_size: impl IntList,
18671        input_size: impl IntList,
18672        align_corners: bool,
18673        scales: impl Into<Option<f64>>,
18674    ) -> Tensor {
18675        Tensor::f_upsample_linear1d_backward(
18676            grad_output,
18677            output_size,
18678            input_size,
18679            align_corners,
18680            scales,
18681        )
18682        .unwrap()
18683    }
18684
18685    pub fn upsample_linear1d_backward_grad_input(
18686        grad_input: &Tensor,
18687        grad_output: &Tensor,
18688        output_size: impl IntList,
18689        input_size: impl IntList,
18690        align_corners: bool,
18691        scales: impl Into<Option<f64>>,
18692    ) -> Tensor {
18693        Tensor::f_upsample_linear1d_backward_grad_input(
18694            grad_input,
18695            grad_output,
18696            output_size,
18697            input_size,
18698            align_corners,
18699            scales,
18700        )
18701        .unwrap()
18702    }
18703
18704    pub fn upsample_linear1d_out(
18705        &self,
18706        out: &Tensor,
18707        output_size: impl IntList,
18708        align_corners: bool,
18709        scales: impl Into<Option<f64>>,
18710    ) -> Tensor {
18711        self.f_upsample_linear1d_out(out, output_size, align_corners, scales).unwrap()
18712    }
18713
18714    pub fn upsample_linear1d_vec(
18715        &self,
18716        output_size: impl IntListOption,
18717        align_corners: bool,
18718        scale_factors: impl DoubleList,
18719    ) -> Tensor {
18720        self.f_upsample_linear1d_vec(output_size, align_corners, scale_factors).unwrap()
18721    }
18722
18723    pub fn upsample_nearest1d(
18724        &self,
18725        output_size: impl IntList,
18726        scales: impl Into<Option<f64>>,
18727    ) -> Tensor {
18728        self.f_upsample_nearest1d(output_size, scales).unwrap()
18729    }
18730
18731    pub fn upsample_nearest1d_backward(
18732        grad_output: &Tensor,
18733        output_size: impl IntList,
18734        input_size: impl IntList,
18735        scales: impl Into<Option<f64>>,
18736    ) -> Tensor {
18737        Tensor::f_upsample_nearest1d_backward(grad_output, output_size, input_size, scales).unwrap()
18738    }
18739
18740    pub fn upsample_nearest1d_backward_grad_input(
18741        grad_input: &Tensor,
18742        grad_output: &Tensor,
18743        output_size: impl IntList,
18744        input_size: impl IntList,
18745        scales: impl Into<Option<f64>>,
18746    ) -> Tensor {
18747        Tensor::f_upsample_nearest1d_backward_grad_input(
18748            grad_input,
18749            grad_output,
18750            output_size,
18751            input_size,
18752            scales,
18753        )
18754        .unwrap()
18755    }
18756
18757    pub fn upsample_nearest1d_out(
18758        &self,
18759        out: &Tensor,
18760        output_size: impl IntList,
18761        scales: impl Into<Option<f64>>,
18762    ) -> Tensor {
18763        self.f_upsample_nearest1d_out(out, output_size, scales).unwrap()
18764    }
18765
18766    pub fn upsample_nearest1d_vec(
18767        &self,
18768        output_size: impl IntListOption,
18769        scale_factors: impl DoubleList,
18770    ) -> Tensor {
18771        self.f_upsample_nearest1d_vec(output_size, scale_factors).unwrap()
18772    }
18773
18774    pub fn upsample_nearest2d(
18775        &self,
18776        output_size: impl IntList,
18777        scales_h: impl Into<Option<f64>>,
18778        scales_w: impl Into<Option<f64>>,
18779    ) -> Tensor {
18780        self.f_upsample_nearest2d(output_size, scales_h, scales_w).unwrap()
18781    }
18782
18783    pub fn upsample_nearest2d_backward(
18784        grad_output: &Tensor,
18785        output_size: impl IntList,
18786        input_size: impl IntList,
18787        scales_h: impl Into<Option<f64>>,
18788        scales_w: impl Into<Option<f64>>,
18789    ) -> Tensor {
18790        Tensor::f_upsample_nearest2d_backward(
18791            grad_output,
18792            output_size,
18793            input_size,
18794            scales_h,
18795            scales_w,
18796        )
18797        .unwrap()
18798    }
18799
18800    pub fn upsample_nearest2d_backward_grad_input(
18801        grad_input: &Tensor,
18802        grad_output: &Tensor,
18803        output_size: impl IntList,
18804        input_size: impl IntList,
18805        scales_h: impl Into<Option<f64>>,
18806        scales_w: impl Into<Option<f64>>,
18807    ) -> Tensor {
18808        Tensor::f_upsample_nearest2d_backward_grad_input(
18809            grad_input,
18810            grad_output,
18811            output_size,
18812            input_size,
18813            scales_h,
18814            scales_w,
18815        )
18816        .unwrap()
18817    }
18818
18819    pub fn upsample_nearest2d_out(
18820        &self,
18821        out: &Tensor,
18822        output_size: impl IntList,
18823        scales_h: impl Into<Option<f64>>,
18824        scales_w: impl Into<Option<f64>>,
18825    ) -> Tensor {
18826        self.f_upsample_nearest2d_out(out, output_size, scales_h, scales_w).unwrap()
18827    }
18828
18829    pub fn upsample_nearest2d_vec(
18830        &self,
18831        output_size: impl IntListOption,
18832        scale_factors: impl DoubleList,
18833    ) -> Tensor {
18834        self.f_upsample_nearest2d_vec(output_size, scale_factors).unwrap()
18835    }
18836
18837    pub fn upsample_nearest3d(
18838        &self,
18839        output_size: impl IntList,
18840        scales_d: impl Into<Option<f64>>,
18841        scales_h: impl Into<Option<f64>>,
18842        scales_w: impl Into<Option<f64>>,
18843    ) -> Tensor {
18844        self.f_upsample_nearest3d(output_size, scales_d, scales_h, scales_w).unwrap()
18845    }
18846
18847    pub fn upsample_nearest3d_backward(
18848        grad_output: &Tensor,
18849        output_size: impl IntList,
18850        input_size: impl IntList,
18851        scales_d: impl Into<Option<f64>>,
18852        scales_h: impl Into<Option<f64>>,
18853        scales_w: impl Into<Option<f64>>,
18854    ) -> Tensor {
18855        Tensor::f_upsample_nearest3d_backward(
18856            grad_output,
18857            output_size,
18858            input_size,
18859            scales_d,
18860            scales_h,
18861            scales_w,
18862        )
18863        .unwrap()
18864    }
18865
18866    pub fn upsample_nearest3d_backward_grad_input(
18867        grad_input: &Tensor,
18868        grad_output: &Tensor,
18869        output_size: impl IntList,
18870        input_size: impl IntList,
18871        scales_d: impl Into<Option<f64>>,
18872        scales_h: impl Into<Option<f64>>,
18873        scales_w: impl Into<Option<f64>>,
18874    ) -> Tensor {
18875        Tensor::f_upsample_nearest3d_backward_grad_input(
18876            grad_input,
18877            grad_output,
18878            output_size,
18879            input_size,
18880            scales_d,
18881            scales_h,
18882            scales_w,
18883        )
18884        .unwrap()
18885    }
18886
18887    pub fn upsample_nearest3d_out(
18888        &self,
18889        out: &Tensor,
18890        output_size: impl IntList,
18891        scales_d: impl Into<Option<f64>>,
18892        scales_h: impl Into<Option<f64>>,
18893        scales_w: impl Into<Option<f64>>,
18894    ) -> Tensor {
18895        self.f_upsample_nearest3d_out(out, output_size, scales_d, scales_h, scales_w).unwrap()
18896    }
18897
18898    pub fn upsample_nearest3d_vec(
18899        &self,
18900        output_size: impl IntListOption,
18901        scale_factors: impl DoubleList,
18902    ) -> Tensor {
18903        self.f_upsample_nearest3d_vec(output_size, scale_factors).unwrap()
18904    }
18905
18906    pub fn upsample_trilinear3d(
18907        &self,
18908        output_size: impl IntList,
18909        align_corners: bool,
18910        scales_d: impl Into<Option<f64>>,
18911        scales_h: impl Into<Option<f64>>,
18912        scales_w: impl Into<Option<f64>>,
18913    ) -> Tensor {
18914        self.f_upsample_trilinear3d(output_size, align_corners, scales_d, scales_h, scales_w)
18915            .unwrap()
18916    }
18917
18918    pub fn upsample_trilinear3d_backward(
18919        grad_output: &Tensor,
18920        output_size: impl IntList,
18921        input_size: impl IntList,
18922        align_corners: bool,
18923        scales_d: impl Into<Option<f64>>,
18924        scales_h: impl Into<Option<f64>>,
18925        scales_w: impl Into<Option<f64>>,
18926    ) -> Tensor {
18927        Tensor::f_upsample_trilinear3d_backward(
18928            grad_output,
18929            output_size,
18930            input_size,
18931            align_corners,
18932            scales_d,
18933            scales_h,
18934            scales_w,
18935        )
18936        .unwrap()
18937    }
18938
18939    pub fn upsample_trilinear3d_backward_grad_input(
18940        grad_input: &Tensor,
18941        grad_output: &Tensor,
18942        output_size: impl IntList,
18943        input_size: impl IntList,
18944        align_corners: bool,
18945        scales_d: impl Into<Option<f64>>,
18946        scales_h: impl Into<Option<f64>>,
18947        scales_w: impl Into<Option<f64>>,
18948    ) -> Tensor {
18949        Tensor::f_upsample_trilinear3d_backward_grad_input(
18950            grad_input,
18951            grad_output,
18952            output_size,
18953            input_size,
18954            align_corners,
18955            scales_d,
18956            scales_h,
18957            scales_w,
18958        )
18959        .unwrap()
18960    }
18961
18962    pub fn upsample_trilinear3d_out(
18963        &self,
18964        out: &Tensor,
18965        output_size: impl IntList,
18966        align_corners: bool,
18967        scales_d: impl Into<Option<f64>>,
18968        scales_h: impl Into<Option<f64>>,
18969        scales_w: impl Into<Option<f64>>,
18970    ) -> Tensor {
18971        self.f_upsample_trilinear3d_out(
18972            out,
18973            output_size,
18974            align_corners,
18975            scales_d,
18976            scales_h,
18977            scales_w,
18978        )
18979        .unwrap()
18980    }
18981
18982    pub fn upsample_trilinear3d_vec(
18983        &self,
18984        output_size: impl IntListOption,
18985        align_corners: bool,
18986        scale_factors: impl DoubleList,
18987    ) -> Tensor {
18988        self.f_upsample_trilinear3d_vec(output_size, align_corners, scale_factors).unwrap()
18989    }
18990
18991    pub fn value_selecting_reduction_backward(
18992        grad: &Tensor,
18993        dim: i64,
18994        indices: &Tensor,
18995        sizes: impl IntList,
18996        keepdim: bool,
18997    ) -> Tensor {
18998        Tensor::f_value_selecting_reduction_backward(grad, dim, indices, sizes, keepdim).unwrap()
18999    }
19000
19001    pub fn values(&self) -> Tensor {
19002        self.f_values().unwrap()
19003    }
19004
19005    pub fn values_copy(&self) -> Tensor {
19006        self.f_values_copy().unwrap()
19007    }
19008
19009    pub fn values_copy_out(&self, out: &Tensor) -> Tensor {
19010        self.f_values_copy_out(out).unwrap()
19011    }
19012
19013    pub fn vander(x: &Tensor, n: impl Into<Option<i64>>, increasing: bool) -> Tensor {
19014        Tensor::f_vander(x, n, increasing).unwrap()
19015    }
19016
19017    pub fn var(&self, unbiased: bool) -> Tensor {
19018        self.f_var(unbiased).unwrap()
19019    }
19020
19021    pub fn var_correction<S: Into<Scalar>>(
19022        &self,
19023        dim: impl IntListOption,
19024        correction: S,
19025        keepdim: bool,
19026    ) -> Tensor {
19027        self.f_var_correction(dim, correction, keepdim).unwrap()
19028    }
19029
19030    pub fn var_correction_out<S: Into<Scalar>>(
19031        &self,
19032        out: &Tensor,
19033        dim: impl IntListOption,
19034        correction: S,
19035        keepdim: bool,
19036    ) -> Tensor {
19037        self.f_var_correction_out(out, dim, correction, keepdim).unwrap()
19038    }
19039
19040    pub fn var_dim(&self, dim: impl IntListOption, unbiased: bool, keepdim: bool) -> Tensor {
19041        self.f_var_dim(dim, unbiased, keepdim).unwrap()
19042    }
19043
19044    pub fn var_mean(&self, unbiased: bool) -> (Tensor, Tensor) {
19045        self.f_var_mean(unbiased).unwrap()
19046    }
19047
19048    pub fn var_mean_correction<S: Into<Scalar>>(
19049        &self,
19050        dim: impl IntListOption,
19051        correction: S,
19052        keepdim: bool,
19053    ) -> (Tensor, Tensor) {
19054        self.f_var_mean_correction(dim, correction, keepdim).unwrap()
19055    }
19056
19057    pub fn var_mean_correction_out<S: Into<Scalar>>(
19058        &self,
19059        out0: &Tensor,
19060        out1: &Tensor,
19061        dim: impl IntListOption,
19062        correction: S,
19063        keepdim: bool,
19064    ) -> (Tensor, Tensor) {
19065        self.f_var_mean_correction_out(out0, out1, dim, correction, keepdim).unwrap()
19066    }
19067
19068    pub fn var_mean_dim(
19069        &self,
19070        dim: impl IntListOption,
19071        unbiased: bool,
19072        keepdim: bool,
19073    ) -> (Tensor, Tensor) {
19074        self.f_var_mean_dim(dim, unbiased, keepdim).unwrap()
19075    }
19076
19077    pub fn var_out(
19078        &self,
19079        out: &Tensor,
19080        dim: impl IntListOption,
19081        unbiased: bool,
19082        keepdim: bool,
19083    ) -> Tensor {
19084        self.f_var_out(out, dim, unbiased, keepdim).unwrap()
19085    }
19086
19087    pub fn vdot(&self, other: &Tensor) -> Tensor {
19088        self.f_vdot(other).unwrap()
19089    }
19090
19091    pub fn vdot_out(&self, out: &Tensor, other: &Tensor) -> Tensor {
19092        self.f_vdot_out(out, other).unwrap()
19093    }
19094
19095    pub fn view_(&self, size: impl IntList) -> Tensor {
19096        self.f_view_(size).unwrap()
19097    }
19098
19099    pub fn view_as(&self, other: &Tensor) -> Tensor {
19100        self.f_view_as(other).unwrap()
19101    }
19102
19103    pub fn view_as_complex(&self) -> Tensor {
19104        self.f_view_as_complex().unwrap()
19105    }
19106
19107    pub fn view_as_complex_copy(&self) -> Tensor {
19108        self.f_view_as_complex_copy().unwrap()
19109    }
19110
19111    pub fn view_as_complex_copy_out(&self, out: &Tensor) -> Tensor {
19112        self.f_view_as_complex_copy_out(out).unwrap()
19113    }
19114
19115    pub fn view_as_real(&self) -> Tensor {
19116        self.f_view_as_real().unwrap()
19117    }
19118
19119    pub fn view_as_real_copy(&self) -> Tensor {
19120        self.f_view_as_real_copy().unwrap()
19121    }
19122
19123    pub fn view_as_real_copy_out(&self, out: &Tensor) -> Tensor {
19124        self.f_view_as_real_copy_out(out).unwrap()
19125    }
19126
19127    pub fn view_copy(&self, size: impl IntList) -> Tensor {
19128        self.f_view_copy(size).unwrap()
19129    }
19130
19131    pub fn view_copy_dtype(&self, dtype: Kind) -> Tensor {
19132        self.f_view_copy_dtype(dtype).unwrap()
19133    }
19134
19135    pub fn view_copy_dtype_out(&self, out: &Tensor, dtype: Kind) -> Tensor {
19136        self.f_view_copy_dtype_out(out, dtype).unwrap()
19137    }
19138
19139    pub fn view_copy_out(&self, out: &Tensor, size: impl IntList) -> Tensor {
19140        self.f_view_copy_out(out, size).unwrap()
19141    }
19142
19143    pub fn view_dtype(&self, dtype: Kind) -> Tensor {
19144        self.f_view_dtype(dtype).unwrap()
19145    }
19146
19147    pub fn vsplit(&self, sections: i64) -> Vec<Tensor> {
19148        self.f_vsplit(sections).unwrap()
19149    }
19150
19151    pub fn vsplit_array(&self, indices: impl IntList) -> Vec<Tensor> {
19152        self.f_vsplit_array(indices).unwrap()
19153    }
19154
19155    pub fn vstack<T: Borrow<Tensor>>(tensors: &[T]) -> Tensor {
19156        Tensor::f_vstack(tensors).unwrap()
19157    }
19158
19159    pub fn vstack_out<T: Borrow<Tensor>>(out: &Tensor, tensors: &[T]) -> Tensor {
19160        Tensor::f_vstack_out(out, tensors).unwrap()
19161    }
19162
19163    pub fn where_(condition: &Tensor) -> Vec<Tensor> {
19164        Tensor::f_where_(condition).unwrap()
19165    }
19166
19167    pub fn where_scalar<S: Into<Scalar>>(condition: &Tensor, self_scalar: S, other: S) -> Tensor {
19168        Tensor::f_where_scalar(condition, self_scalar, other).unwrap()
19169    }
19170
19171    pub fn where_scalarother<S: Into<Scalar>>(&self, condition: &Tensor, other: S) -> Tensor {
19172        self.f_where_scalarother(condition, other).unwrap()
19173    }
19174
19175    pub fn where_scalarself<S: Into<Scalar>>(
19176        condition: &Tensor,
19177        self_scalar: S,
19178        other: &Tensor,
19179    ) -> Tensor {
19180        Tensor::f_where_scalarself(condition, self_scalar, other).unwrap()
19181    }
19182
19183    pub fn where_self(&self, condition: &Tensor, other: &Tensor) -> Tensor {
19184        self.f_where_self(condition, other).unwrap()
19185    }
19186
19187    pub fn where_self_out(&self, out: &Tensor, condition: &Tensor, other: &Tensor) -> Tensor {
19188        self.f_where_self_out(out, condition, other).unwrap()
19189    }
19190
19191    pub fn xlogy(&self, other: &Tensor) -> Tensor {
19192        self.f_xlogy(other).unwrap()
19193    }
19194
19195    pub fn xlogy_(&mut self, other: &Tensor) -> Tensor {
19196        self.f_xlogy_(other).unwrap()
19197    }
19198
19199    pub fn xlogy_outscalar_other<S: Into<Scalar>>(&self, out: &Tensor, other: S) -> Tensor {
19200        self.f_xlogy_outscalar_other(out, other).unwrap()
19201    }
19202
19203    pub fn xlogy_outscalar_self<S: Into<Scalar>>(
19204        out: &Tensor,
19205        self_scalar: S,
19206        other: &Tensor,
19207    ) -> Tensor {
19208        Tensor::f_xlogy_outscalar_self(out, self_scalar, other).unwrap()
19209    }
19210
19211    pub fn xlogy_outtensor(&self, out: &Tensor, other: &Tensor) -> Tensor {
19212        self.f_xlogy_outtensor(out, other).unwrap()
19213    }
19214
19215    pub fn xlogy_scalar_other<S: Into<Scalar>>(&self, other: S) -> Tensor {
19216        self.f_xlogy_scalar_other(other).unwrap()
19217    }
19218
19219    pub fn xlogy_scalar_other_<S: Into<Scalar>>(&mut self, other: S) -> Tensor {
19220        self.f_xlogy_scalar_other_(other).unwrap()
19221    }
19222
19223    pub fn xlogy_scalar_self<S: Into<Scalar>>(self_scalar: S, other: &Tensor) -> Tensor {
19224        Tensor::f_xlogy_scalar_self(self_scalar, other).unwrap()
19225    }
19226
19227    pub fn zero(&self) -> Tensor {
19228        self.f_zero().unwrap()
19229    }
19230
19231    pub fn zero_(&mut self) -> Tensor {
19232        self.f_zero_().unwrap()
19233    }
19234
19235    pub fn zero_out(&self, out: &Tensor) -> Tensor {
19236        self.f_zero_out(out).unwrap()
19237    }
19238
19239    pub fn zeros(size: impl IntList, options: (Kind, Device)) -> Tensor {
19240        Tensor::f_zeros(size, options).unwrap()
19241    }
19242
19243    pub fn zeros_like(&self) -> Tensor {
19244        self.f_zeros_like().unwrap()
19245    }
19246
19247    pub fn zeros_like_out(&self, out: &Tensor) -> Tensor {
19248        self.f_zeros_like_out(out).unwrap()
19249    }
19250
19251    pub fn zeros_out(out: &Tensor, size: impl IntList) -> Tensor {
19252        Tensor::f_zeros_out(out, size).unwrap()
19253    }
19254}