rstsr_core/tensor/manuplication/reshape.rs
1use crate::prelude_dev::*;
2
3/* #region reshape args */
4
5/// Reshape arguments.
6#[derive(Debug, Clone, PartialEq, Eq, Default)]
7pub struct ReshapeArgs {
8 /// The indexing order for **reading**. This also affects the order for writing.
9 /// By default, the device's default order is used.
10 pub order: Option<TensorOrder>,
11
12 /// Whether to clone data when the new shape is not compatible with the original shape.
13 ///
14 /// - True: the tensor will always be copied, with order specified.
15 /// - False: panic if the new shape is not compatible with the original shape.
16 /// - None: the tensor will be copied only if necessary.
17 pub copy: Option<bool>,
18}
19
20impl From<TensorOrder> for ReshapeArgs {
21 fn from(order: TensorOrder) -> Self {
22 Self { order: Some(order), copy: None }
23 }
24}
25
26impl From<bool> for ReshapeArgs {
27 fn from(copy: bool) -> Self {
28 Self { order: None, copy: Some(copy) }
29 }
30}
31
32impl From<(TensorOrder, bool)> for ReshapeArgs {
33 fn from(args: (TensorOrder, bool)) -> Self {
34 let (order, copy) = args;
35 Self { order: Some(order), copy: Some(copy) }
36 }
37}
38
39impl From<(TensorOrder, Option<bool>)> for ReshapeArgs {
40 fn from(args: (TensorOrder, Option<bool>)) -> Self {
41 let (order, copy) = args;
42 Self { order: Some(order), copy }
43 }
44}
45
46impl From<Option<bool>> for ReshapeArgs {
47 fn from(copy: Option<bool>) -> Self {
48 Self { order: None, copy }
49 }
50}
51
52/* #endregion */
53
54/* #region reshapeable */
55
56/// Check if this tensor can be reshaped to a new shape without explicitly copying underlying data.
57///
58/// Please note this function returns `Result` instead of boolean.
59///
60/// - If shape not match, this function will raise error.
61/// - If shape match but data need to be copied, return `Ok(None)`.
62/// - If everything is fine, return `Ok(Some(layout_out))`.
63///
64/// For order, row-major and col-major behaves differently.
65///
66/// # See also
67///
68/// - [`reshape`]: the actual function for tensor reshaping.
69/// - [`layout_reshapeable`]: The underlying function for checking layout compatibility for
70/// reshaping, input by shape instead of tensor.
71pub fn reshapeable_without_copy<R, T, B, D>(
72 tensor: &TensorAny<R, T, B, D>,
73 shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>,
74 order: Option<TensorOrder>,
75) -> Result<Option<Layout<IxD>>>
76where
77 R: DataAPI<Data = <B as DeviceRawAPI<T>>::Raw>,
78 D: DimAPI,
79 B: DeviceAPI<T>,
80{
81 let shape = reshape_substitute_negatives(shape.try_into().map_err(Into::into)?.as_ref(), tensor.size())?;
82 let order = order.unwrap_or_else(|| tensor.device().default_order());
83 layout_reshapeable(&tensor.layout().to_dim()?, &shape, order)
84}
85
86impl<R, T, B, D> TensorAny<R, T, B, D>
87where
88 R: DataAPI<Data = <B as DeviceRawAPI<T>>::Raw>,
89 D: DimAPI,
90 B: DeviceAPI<T>,
91{
92 /// Check if this tensor can be reshaped to a new shape without explicitly copying underlying
93 /// data.
94 ///
95 /// See also [`reshapeable_without_copy`].
96 pub fn reshapeable_without_copy(
97 &self,
98 shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>,
99 order: Option<TensorOrder>,
100 ) -> Result<Option<Layout<IxD>>> {
101 reshapeable_without_copy(self, shape, order)
102 }
103}
104
105/* #endregion */
106
107/* #region reshape_with_args */
108
109/// Reshapes the given tensor to the specified shape, with argument specifying the order and whether
110/// to copy data.
111///
112/// See also [`reshape_with_args`].
113pub fn change_shape_with_args_f<'a, R, T, B, D>(
114 tensor: TensorAny<R, T, B, D>,
115 shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>,
116 args: impl Into<ReshapeArgs>,
117) -> Result<TensorCow<'a, T, B, IxD>>
118where
119 R: DataAPI<Data = <B as DeviceRawAPI<T>>::Raw> + DataIntoCowAPI<'a>,
120 D: DimAPI,
121 B: DeviceAPI<T> + DeviceRawAPI<MaybeUninit<T>> + DeviceCreationAnyAPI<T> + OpAssignArbitaryAPI<T, IxD, D>,
122{
123 // own shape, this is cheap operation
124 let shape_new = reshape_substitute_negatives(shape.try_into().map_err(Into::into)?.as_ref(), tensor.size())?;
125 let ReshapeArgs { order, copy } = args.into();
126 let order = order.unwrap_or(tensor.device().default_order());
127
128 // rust 2021 does not allow chain if let
129 if copy.is_none() || copy == Some(false) {
130 if let Some(layout_new) = layout_reshapeable(&tensor.layout().to_dim()?, &shape_new, order)? {
131 // shape does not need to be changed
132 let (storage, _) = tensor.into_raw_parts();
133 let layout = layout_new.into_dim::<IxD>()?;
134 return unsafe { Ok(TensorBase::new_unchecked(storage, layout).into_cow()) };
135 }
136 }
137
138 // if not allow copy, but layout is not compatible, raise error
139 if copy == Some(false) {
140 rstsr_raise!(
141 InvalidValue,
142 "copy is set to false in reshape, but layout {:?} is not compatible with shape {shape_new:?} and order {order:?}",
143 tensor.layout(),
144 )?
145 }
146
147 // clone underlying data by assign_arbitary
148 // dev note: assign_arbitary_uninit depends on the iteration order of device
149 let (storage, layout) = tensor.into_raw_parts();
150 let device = storage.device();
151 let layout_new = match order {
152 RowMajor => shape_new.new_c_contig(None),
153 ColMajor => shape_new.new_f_contig(None),
154 };
155 let mut storage_new = device.uninit_impl(layout_new.size())?;
156 if device.default_order() == order {
157 device.assign_arbitary_uninit(storage_new.raw_mut(), &layout_new, storage.raw(), &layout)?;
158 } else {
159 let mut device = device.clone();
160 device.set_default_order(order);
161 device.assign_arbitary_uninit(storage_new.raw_mut(), &layout_new, storage.raw(), &layout)?;
162 }
163 let storage_new = unsafe { B::assume_init_impl(storage_new)? };
164 return unsafe { Ok(TensorBase::new_unchecked(storage_new, layout_new).into_cow()) };
165}
166
167/// Reshapes the given tensor to the specified shape, with argument specifying the order and whether
168/// to copy data.
169///
170/// See also [`reshape_with_args`].
171pub fn change_shape_with_args<'a, R, T, B, D>(
172 tensor: TensorAny<R, T, B, D>,
173 shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>,
174 args: impl Into<ReshapeArgs>,
175) -> TensorCow<'a, T, B, IxD>
176where
177 R: DataAPI<Data = <B as DeviceRawAPI<T>>::Raw> + DataIntoCowAPI<'a>,
178 D: DimAPI,
179 B: DeviceAPI<T> + DeviceRawAPI<MaybeUninit<T>> + DeviceCreationAnyAPI<T> + OpAssignArbitaryAPI<T, IxD, D>,
180{
181 change_shape_with_args_f(tensor, shape, args).rstsr_unwrap()
182}
183
184/// Reshapes the given tensor to the specified shape, with argument specifying the order and whether
185/// to copy data.
186///
187/// See also [`reshape_with_args`].
188pub fn into_shape_with_args_f<'a, R, T, B, D>(
189 tensor: TensorAny<R, T, B, D>,
190 shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>,
191 args: impl Into<ReshapeArgs>,
192) -> Result<Tensor<T, B, IxD>>
193where
194 R: DataAPI<Data = <B as DeviceRawAPI<T>>::Raw> + DataIntoCowAPI<'a>,
195 D: DimAPI,
196 T: Clone,
197 B: DeviceAPI<T>
198 + DeviceRawAPI<MaybeUninit<T>>
199 + DeviceCreationAnyAPI<T>
200 + OpAssignArbitaryAPI<T, IxD, D>
201 + OpAssignAPI<T, IxD>,
202 <B as DeviceRawAPI<T>>::Raw: Clone + 'a,
203{
204 change_shape_with_args_f(tensor, shape, args).map(|v| v.into_owned())
205}
206
207/// Reshapes the given tensor to the specified shape, with argument specifying the order and whether
208/// to copy data.
209///
210/// See also [`reshape_with_args`].
211pub fn into_shape_with_args<'a, R, T, B, D>(
212 tensor: TensorAny<R, T, B, D>,
213 shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>,
214 args: impl Into<ReshapeArgs>,
215) -> Tensor<T, B, IxD>
216where
217 R: DataAPI<Data = <B as DeviceRawAPI<T>>::Raw> + DataIntoCowAPI<'a>,
218 D: DimAPI,
219 T: Clone,
220 B: DeviceAPI<T>
221 + DeviceRawAPI<MaybeUninit<T>>
222 + DeviceCreationAnyAPI<T>
223 + OpAssignArbitaryAPI<T, IxD, D>
224 + OpAssignAPI<T, IxD>,
225 <B as DeviceRawAPI<T>>::Raw: Clone + 'a,
226{
227 into_shape_with_args_f(tensor, shape, args).rstsr_unwrap()
228}
229
230/// Reshapes the given tensor to the specified shape, with argument specifying the order and whether
231/// to copy data.
232///
233/// See also [`reshape_with_args`].
234pub fn reshape_with_args_f<'a, R, T, B, D>(
235 tensor: &'a TensorAny<R, T, B, D>,
236 shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>,
237 args: impl Into<ReshapeArgs>,
238) -> Result<TensorCow<'a, T, B, IxD>>
239where
240 R: DataAPI<Data = <B as DeviceRawAPI<T>>::Raw> + DataIntoCowAPI<'a>,
241 D: DimAPI,
242 B: DeviceAPI<T> + DeviceRawAPI<MaybeUninit<T>> + DeviceCreationAnyAPI<T> + OpAssignArbitaryAPI<T, IxD, D>,
243{
244 change_shape_with_args_f(tensor.view(), shape, args)
245}
246
247/// Reshapes the given tensor to the specified shape, with argument specifying the order and whether
248/// to copy data.
249///
250/// For usual users, please consider using [`reshape`] (take reference of tensor) or [`into_shape`]
251/// (take ownership of tensor) instead, which are simpler interfaces to reshaping.
252///
253/// <div class="warning">
254///
255/// **Row/Column Major Notice**
256///
257/// This function behaves differently on default orders ([`RowMajor`] and [`ColMajor`]) of device.
258///
259/// </div>
260///
261/// # Parameters
262///
263/// - `tensor`: [`TensorAny<R, T, B, D>`]
264///
265/// - The input tensor to be reshaped.
266///
267/// - `shape`: TryInto [`AxesIndex<isize>`]
268///
269/// - The new shape of the tensor.
270/// - Can be a single integer, or a list/tuple of integers.
271/// - Negative values are supported and indicate counting dimensions from the back.
272/// - Overloads:
273///
274/// - integer: 1-D shape with a single dimension.
275/// - vector/array/tuple of integers: N-D shape with N dimensions. For tuples,
276/// mixed-signed/unsigned integers are supported.
277///
278/// - `args`: Into [`ReshapeArgs`]
279///
280/// - `order`: The indexing order for **reading** (similar to changing the default-order of
281/// device). This also affects the order for writing. [`RowMajor`] and [`ColMajor`] are
282/// supported. By default, the device's default order is used.
283///
284/// - `copy`: Whether to clone data when the new shape is not compatible with the original shape.
285///
286/// - True: The tensor will always be copied. The output tensor will be contiguous with the
287/// specified order.
288/// - False: Panic if the new shape is not compatible with the original shape.
289/// - None (default): The tensor will be copied only if necessary. If copied, the output tensor
290/// will be contiguous with the specified order. Copy will be avoided if the new shape is
291/// compatible with the original layout, even if the tensor is not contiguous.
292///
293/// - Overloads:
294///
295/// - copy: [`bool`]
296/// - copy: [`Option<bool>`] (None means default behavior)
297/// - order: [`TensorOrder`]
298/// - (order: [`TensorOrder`], copy: [`bool`])
299/// - (order: [`TensorOrder`], copy: [`Option<bool>`])
300///
301/// # Examples
302///
303/// You can specify the order for reading the tensor by argument `order`.
304///
305/// Following is an example of row-major reshape. This is independent to the original default-layout
306/// of device.
307///
308/// ```rust
309/// # use rstsr::prelude::*;
310/// # let mut device = DeviceCpu::default();
311/// # device.set_default_order(RowMajor);
312/// let a = rt::tensor_from_nested!([[0, 1, 2], [3, 4, 5]], &device);
313/// println!("{a}");
314/// // [[ 0 1 2]
315/// // [ 3 4 5]]
316/// let a_row = rt::tensor_from_nested!([[0, 1], [2, 3], [4, 5]], &device);
317/// println!("{a_row}");
318/// // [[ 0 1]
319/// // [ 2 3]
320/// // [ 4 5]]
321/// ```
322///
323/// And here is an example of col-major reshape.
324///
325/// ```rust
326/// # use rstsr::prelude::*;
327/// # let mut device = DeviceCpu::default();
328/// # device.set_default_order(RowMajor);
329/// let a = rt::tensor_from_nested!([[0, 1, 2], [3, 4, 5]], &device);
330/// println!("{a}");
331/// // [[ 0 1 2]
332/// // [ 3 4 5]]
333/// let a_col = rt::tensor_from_nested!([[0, 4], [3, 2], [1, 5]], &device);
334/// println!("{a_col}");
335/// // [[ 0 4]
336/// // [ 3 2]
337/// // [ 1 5]]
338/// ```
339///
340/// The following example shows that if `copy = false`, then an error will be raised when the new
341/// shape is not compatible with the original shape. Given a strided tensor:
342///
343/// ```rust
344/// # use rstsr::prelude::*;
345/// # let mut device = DeviceCpu::default();
346/// # device.set_default_order(RowMajor);
347/// // shape: (4, 6, 9), stride: (72, 9, 1), not c-contiguous
348/// // contiguous situation: (4, [6, 9]), or say the last two dimensions are contiguous
349/// let a = rt::arange((288, &device)).into_shape([4, 8, 9]).into_slice((.., 0..6, ..));
350/// assert_eq!(a.shape(), &[4, 6, 9]);
351/// assert_eq!(a.stride(), &[72, 9, 1]);
352/// assert!(!a.c_contig());
353/// ```
354///
355/// The following example shows the reshaping does not explicitly clones data, and `copy = false`
356/// does not raise error.
357///
358/// ```rust
359/// # use rstsr::prelude::*;
360/// # let mut device = DeviceCpu::default();
361/// # device.set_default_order(RowMajor);
362/// let a = rt::arange((288, &device)).into_shape([4, 8, 9]).into_slice((.., 0..6, ..));
363/// // split a single dimension into multiple dimensions
364/// assert!(a.reshape_with_args_f([2, 2, 6, 9], false).is_ok()); // (4, 6, 9) -> ([2, 2], 6, 9)
365/// assert!(a.reshape_with_args_f([4, 3, 2, 9], false).is_ok()); // (4, 6, 9) -> (4, [3, 2], 9)
366/// assert!(a.reshape_with_args_f([4, 2, 3, 3, 3], false).is_ok()); // (4, 6, 9) -> (4, [2, 3], [3, 3])
367///
368/// // merge contiguous dimensions into a single dimension
369/// assert!(a.reshape_with_args_f([4, 54], false).is_ok()); // (4, 6, 9) -> (4, 6 * 9)
370///
371/// // merge contiguous dimensions and then split
372/// assert!(a.reshape_with_args_f([4, 3, 6, 3], false).is_ok()); // (4, [6, 9]) -> (4, [3, 6, 3])
373/// ```
374///
375/// However, the following example will raise error due to shape-incompatible. Using `copy = None`
376/// or `copy = true` will work, but the data will be cloned.
377///
378/// ```rust
379/// # use rstsr::prelude::*;
380/// # let mut device = DeviceCpu::default();
381/// # device.set_default_order(RowMajor);
382/// let a = rt::arange((288, &device)).into_shape([4, 8, 9]).into_slice((.., 0..6, ..));
383/// // merge non-contiguous dimensions
384/// assert!(a.reshape_with_args_f([24, 9], false).is_err()); // (4, 6, 9) -> (4 * 6, 9)
385/// assert!(a.reshape_with_args_f([-1], false).is_err()); // (4, 6, 9) -> (4 * 6 * 9)
386/// assert!(a.reshape_with_args_f([12, 2, 9], false).is_err()); // (4, 6, 9) -> (4 * [3, 2], 9)
387/// ```
388///
389/// # Notes of API accordance
390///
391/// - Array-API: `reshape(x, /, shape, *, copy=None)` ([`reshape`](https://data-apis.org/array-api/latest/API_specification/generated/array_api.reshape.html))
392/// - NumPy: `reshape(a, /, shape, order='C', *, copy=False)` ([`numpy.reshape`](https://numpy.org/doc/stable/reference/generated/numpy.reshape.html)):
393/// - RSTSR: `rt::reshape_with_args(tensor, shape, (order, copy))`
394/// - RSTSR: `rt::reshape(tensor, shape)`
395///
396/// Please note that the `order` argument in RSTSR does not support NumPy's `'A'` (order='A' means
397/// 'F' if the array is Fortran contiguous, 'C' otherwise in NumPy).
398///
399/// # See also
400///
401/// ## Similar function from other crates/libraries
402///
403/// - Python Array API standard: [`reshape`](https://data-apis.org/array-api/2024.12/API_specification/generated/array_api.reshape.html)
404/// - NumPy: [`reshape`](https://numpy.org/doc/stable/reference/generated/numpy.reshape.html)
405/// - ndarray: [`to_shape`](https://docs.rs/ndarray/latest/ndarray/struct.ArrayBase.html#method.to_shape)
406///
407/// ## Related functions in RSTSR
408///
409/// - [`reshape`]: simpler interface for reshaping.
410/// - [`reshapeable_without_copy`]: Check whether the layout is compatible with the new shape.
411/// - [`to_layout`]: Return a tensor with the specified layout.
412/// - [`to_contig`]: Return an owned contiguous tensor.
413///
414/// ## Variants of this function
415///
416/// - [`reshape_with_args`] / [`reshape_with_args_f`]: Taking reference and returning Cow.
417/// - [`into_shape_with_args`] / [`into_shape_with_args_f`]: Taking ownership and returning owned
418/// tensor.
419/// - [`change_shape_with_args`] / [`change_shape_with_args_f`]: Taking ownership and returning Cow.
420/// - [`to_shape_with_args`] / [`to_shape_with_args_f`]: Alias to [`reshape_with_args`] /
421/// [`reshape_with_args_f`].
422/// - Associated methods on [`TensorAny`]:
423///
424/// - [`Tensor::reshape_with_args`] / [`Tensor::reshape_with_args_f`]
425/// - [`Tensor::into_shape_with_args`] / [`Tensor::into_shape_with_args_f`]
426/// - [`Tensor::change_shape_with_args`] / [`Tensor::change_shape_with_args_f`]
427/// - [`Tensor::to_shape_with_args`] / [`Tensor::to_shape_with_args_f`]
428pub fn reshape_with_args<'a, R, T, B, D>(
429 tensor: &'a TensorAny<R, T, B, D>,
430 shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>,
431 args: impl Into<ReshapeArgs>,
432) -> TensorCow<'a, T, B, IxD>
433where
434 R: DataAPI<Data = <B as DeviceRawAPI<T>>::Raw> + DataIntoCowAPI<'a>,
435 D: DimAPI,
436 B: DeviceAPI<T> + DeviceRawAPI<MaybeUninit<T>> + DeviceCreationAnyAPI<T> + OpAssignArbitaryAPI<T, IxD, D>,
437{
438 reshape_with_args_f(tensor, shape, args).rstsr_unwrap()
439}
440
441pub use reshape_with_args as to_shape_with_args;
442pub use reshape_with_args_f as to_shape_with_args_f;
443
444impl<'a, R, T, B, D> TensorAny<R, T, B, D>
445where
446 R: DataAPI<Data = <B as DeviceRawAPI<T>>::Raw> + DataIntoCowAPI<'a>,
447 D: DimAPI,
448 B: DeviceAPI<T> + DeviceRawAPI<MaybeUninit<T>> + DeviceCreationAnyAPI<T> + OpAssignArbitaryAPI<T, IxD, D>,
449 T: Clone,
450{
451 /// Reshapes the given tensor to the specified shape, with argument specifying the order and
452 /// whether to copy data.
453 ///
454 /// # See also [`reshape_with_args`].
455 pub fn change_shape_with_args_f(
456 self,
457 shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>,
458 args: impl Into<ReshapeArgs>,
459 ) -> Result<TensorCow<'a, T, B, IxD>> {
460 change_shape_with_args_f(self, shape, args)
461 }
462
463 /// Reshapes the given tensor to the specified shape, with argument specifying the order and
464 /// whether to copy data.
465 ///
466 /// # See also [`reshape_with_args`].
467 pub fn change_shape_with_args(
468 self,
469 shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>,
470 args: impl Into<ReshapeArgs>,
471 ) -> TensorCow<'a, T, B, IxD> {
472 change_shape_with_args(self, shape, args)
473 }
474
475 /// Reshapes the given tensor to the specified shape, with argument specifying the order and
476 /// whether to copy data.
477 ///
478 /// # See also [`reshape_with_args`].
479 pub fn into_shape_with_args_f(
480 self,
481 shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>,
482 args: impl Into<ReshapeArgs>,
483 ) -> Result<Tensor<T, B, IxD>>
484 where
485 <B as DeviceRawAPI<T>>::Raw: Clone + 'a,
486 B: OpAssignAPI<T, IxD>,
487 {
488 into_shape_with_args_f(self, shape, args)
489 }
490
491 /// Reshapes the given tensor to the specified shape, with argument specifying the order and
492 /// whether to copy data.
493 ///
494 /// # See also [`reshape_with_args`].
495 pub fn into_shape_with_args(
496 self,
497 shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>,
498 args: impl Into<ReshapeArgs>,
499 ) -> Tensor<T, B, IxD>
500 where
501 <B as DeviceRawAPI<T>>::Raw: Clone + 'a,
502 B: OpAssignAPI<T, IxD>,
503 {
504 into_shape_with_args(self, shape, args)
505 }
506
507 /// Reshapes the given tensor to the specified shape, with argument specifying the order and
508 /// whether to copy data.
509 ///
510 /// # See also [`reshape_with_args`].
511 pub fn reshape_with_args(
512 &'a self,
513 shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>,
514 args: impl Into<ReshapeArgs>,
515 ) -> TensorCow<'a, T, B, IxD> {
516 reshape_with_args(self, shape, args)
517 }
518
519 /// Reshapes the given tensor to the specified shape, with argument specifying the order and
520 /// whether to copy data.
521 ///
522 /// # See also [`reshape_with_args`].
523 pub fn reshape_with_args_f(
524 &'a self,
525 shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>,
526 args: impl Into<ReshapeArgs>,
527 ) -> Result<TensorCow<'a, T, B, IxD>> {
528 reshape_with_args_f(self, shape, args)
529 }
530
531 /// Reshapes the given tensor to the specified shape, with argument specifying the order and
532 /// whether to copy data.
533 ///
534 /// # See also [`reshape_with_args`].
535 pub fn to_shape_with_args(
536 &'a self,
537 shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>,
538 args: impl Into<ReshapeArgs>,
539 ) -> TensorCow<'a, T, B, IxD> {
540 to_shape_with_args(self, shape, args)
541 }
542
543 /// Reshapes the given tensor to the specified shape, with argument specifying the order and
544 /// whether to copy data.
545 ///
546 /// # See also [`reshape_with_args`].
547 pub fn to_shape_with_args_f(
548 &'a self,
549 shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>,
550 args: impl Into<ReshapeArgs>,
551 ) -> Result<TensorCow<'a, T, B, IxD>> {
552 to_shape_with_args_f(self, shape, args)
553 }
554}
555
556/* #endregion */
557
558/* #region reshape */
559
560/// Reshapes the given tensor to the specified shape.
561///
562/// # See also [`reshape`], [`into_shape`], [`change_shape`] and [`reshape_with_args`].
563pub fn change_shape_f<'a, R, T, B, D>(
564 tensor: TensorAny<R, T, B, D>,
565 shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>,
566) -> Result<TensorCow<'a, T, B, IxD>>
567where
568 R: DataAPI<Data = <B as DeviceRawAPI<T>>::Raw> + DataIntoCowAPI<'a>,
569 D: DimAPI,
570 B: DeviceAPI<T> + DeviceRawAPI<MaybeUninit<T>> + DeviceCreationAnyAPI<T> + OpAssignArbitaryAPI<T, IxD, D>,
571{
572 change_shape_with_args_f(tensor, shape, None)
573}
574
575/// Reshapes the given tensor to the specified shape.
576///
577/// This function is not intended to be used by usual users. Please consider using
578/// [`reshape`] (take reference of tensor) or [`into_shape`] (take ownership of tensor)
579/// instead.
580///
581/// <div class="warning">
582///
583/// **Row/Column Major Notice**
584///
585/// This function behaves differently on default orders ([`RowMajor`] and [`ColMajor`]) of device.
586///
587/// </div>
588///
589/// # Parameters
590///
591/// - `tensor`: [`TensorAny<R, T, B, D>`]
592///
593/// - The input tensor to be reshaped.
594/// - Ownership of input tensor is taken.
595///
596/// - `shape`: TryInto [`AxesIndex<isize>`]
597///
598/// - Position in the expanded axes where the new axis (or axes) is placed.
599/// - Can be a single integer, or a list/tuple of integers.
600/// - Negative values are supported and indicate counting dimensions from the back.
601///
602/// # Returns
603///
604/// - [`TensorCow<'a, T, B, IxD>`](TensorCow)
605///
606/// - The reshaped tensor.
607/// - This function will try to avoid data cloning if possible.
608///
609/// - If layout-compatible, depending on whether the input tensor is owned or other cases,
610/// either a view or owned tensor will be returned.
611/// - If layout-not-compatible, an owned tensor will be returned, cloning the data.
612/// - Cow (Clone-on-Write) semantics is used for representing either view or owned tensor.
613///
614/// This function is different to [`reshape`], in that it takes ownership of the input
615/// tensor.
616///
617/// This function is also different to [`into_shape`], in that it may return a view, if the input
618/// tensor also have the ownership of tensor view, and the layout is compatible.
619///
620/// # See also
621///
622/// Refer to [`reshape`] for more details and examples.
623pub fn change_shape<'a, R, T, B, D>(
624 tensor: TensorAny<R, T, B, D>,
625 shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>,
626) -> TensorCow<'a, T, B, IxD>
627where
628 R: DataAPI<Data = <B as DeviceRawAPI<T>>::Raw> + DataIntoCowAPI<'a>,
629 D: DimAPI,
630 B: DeviceAPI<T> + DeviceRawAPI<MaybeUninit<T>> + DeviceCreationAnyAPI<T> + OpAssignArbitaryAPI<T, IxD, D>,
631{
632 change_shape_with_args(tensor, shape, None)
633}
634
635/// Reshapes the given tensor to the specified shape.
636///
637/// # See also [`reshape`], [`into_shape`], [`change_shape`] and [`reshape_with_args`].
638pub fn into_shape_f<'a, R, T, B, D>(
639 tensor: TensorAny<R, T, B, D>,
640 shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>,
641) -> Result<Tensor<T, B, IxD>>
642where
643 R: DataAPI<Data = <B as DeviceRawAPI<T>>::Raw> + DataIntoCowAPI<'a>,
644 D: DimAPI,
645 T: Clone,
646 B: DeviceAPI<T>
647 + DeviceRawAPI<MaybeUninit<T>>
648 + DeviceCreationAnyAPI<T>
649 + OpAssignArbitaryAPI<T, IxD, D>
650 + OpAssignAPI<T, IxD>,
651 <B as DeviceRawAPI<T>>::Raw: Clone + 'a,
652{
653 into_shape_with_args_f(tensor, shape, None)
654}
655
656/// Reshapes the given tensor to the specified shape.
657///
658/// <div class="warning">
659///
660/// **Row/Column Major Notice**
661///
662/// This function behaves differently on default orders ([`RowMajor`] and [`ColMajor`]) of device.
663///
664/// </div>
665///
666/// # Parameters
667///
668/// - `tensor`: [`TensorAny<R, T, B, D>`]
669///
670/// - The input tensor to be reshaped.
671/// - Ownership of input tensor is taken.
672///
673/// - `shape`: TryInto [`AxesIndex<isize>`]
674///
675/// - The new shape of the tensor.
676/// - Can be a single integer, or a list/tuple of integers.
677/// - Negative values are supported and indicate counting dimensions from the back.
678/// - Overloads:
679/// - integer: 1-D shape with a single dimension.
680/// - vector/array/tuple of integers: N-D shape with N dimensions. For tuples,
681/// mixed-signed/unsigned integers are supported.
682///
683/// # Returns
684///
685/// - [`Tensor<T, B, IxD>`]
686///
687/// - The reshaped tensor.
688/// - This function will try to avoid data cloning if possible, but with strict conditions:
689///
690/// - Layout-compatible after reshaping;
691/// - Input tensor owns the underlying data (i.e., not a view);
692/// - The input tensor is compact in memory (i.e., the underlying data does not have redundant
693/// elements; size of tensor exactly matches the length of underlying data).
694///
695/// This function is different to [`change_shape`](change_shape()) and [`reshape`], in
696/// that it takes ownership of the input tensor, and always returns an owned tensor.
697///
698/// # Examples
699///
700/// ```rust
701/// use rstsr::prelude::*;
702/// let a = rt::arange(6).into_shape([2, 3]);
703/// ```
704///
705/// # Elaborated examples
706///
707/// Here is some showcases that demonstrate when data cloning happens or not. All examples are
708/// row-major.
709///
710/// A first case is a tensor that is not fully contiguous (containing negative strides), but the
711/// tensor is compact (size of tensor is the same to the length of underlying data). In this case,
712/// if the new shape is compatible, no data cloning happens:
713///
714/// ```rust
715/// # use rstsr::prelude::*;
716/// # let mut device = DeviceCpu::default();
717/// # device.set_default_order(RowMajor);
718/// // shape: (4, 6, 9), stride: (-54, 9, 1), not c-contiguous
719/// // contiguous situation: (4, [6, 9]); the first dimension is reversed
720/// let a = rt::arange((216, &device)).into_shape([4, 6, 9]).into_flip(0);
721/// let a_ptr = a.raw().as_ptr();
722/// let b = a.into_shape([4, 54]);
723/// let b_ptr = b.raw().as_ptr();
724/// assert_eq!(a_ptr, b_ptr); // contiguous dims merged, no data clone happened
725/// ```
726///
727/// However, if the new shape is not compatible, data cloning will happen:
728///
729/// ```rust
730/// # use rstsr::prelude::*;
731/// # let mut device = DeviceCpu::default();
732/// # device.set_default_order(RowMajor);
733/// // shape: (4, 6, 9), stride: (-54, 9, 1), not c-contiguous
734/// // contiguous situation: (4, [6, 9]); the first dimension is reversed
735/// let a = rt::arange((216, &device)).into_shape([4, 6, 9]).into_flip(0);
736/// let a_ptr = a.raw().as_ptr();
737/// let b = a.into_shape([24, 9]);
738/// let b_ptr = b.raw().as_ptr();
739/// assert_ne!(a_ptr, b_ptr); // layout not compatible, data clone happened
740/// ```
741///
742/// Another case is a tensor that is not compact (size of tensor is less than the length of
743/// underlying data). In this case, even if the new shape is compatible, data cloning will happen:
744///
745/// ```rust
746/// # use rstsr::prelude::*;
747/// # let mut device = DeviceCpu::default();
748/// # device.set_default_order(RowMajor);
749/// // shape: (4, 6, 9), stride: (72, 9, 1), not c-contiguous
750/// // contiguous situation: (4, [6, 9]), or say the last two dimensions are contiguous
751/// let a = rt::arange((288, &device)).into_shape([4, 8, 9]).into_slice((.., 0..6, ..));
752/// let a_ptr = a.raw().as_ptr();
753/// let b = a.into_shape([4, 54]);
754/// let b_ptr = b.raw().as_ptr();
755/// assert_ne!(a_ptr, b_ptr); // layout-compatible, but input tensor is not compact (216 < 288)
756/// ```
757///
758/// # See also
759///
760/// Refer to [`reshape`] for more details and examples.
761pub fn into_shape<'a, R, T, B, D>(
762 tensor: TensorAny<R, T, B, D>,
763 shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>,
764) -> Tensor<T, B, IxD>
765where
766 R: DataAPI<Data = <B as DeviceRawAPI<T>>::Raw> + DataIntoCowAPI<'a>,
767 D: DimAPI,
768 T: Clone,
769 B: DeviceAPI<T>
770 + DeviceRawAPI<MaybeUninit<T>>
771 + DeviceCreationAnyAPI<T>
772 + OpAssignArbitaryAPI<T, IxD, D>
773 + OpAssignAPI<T, IxD>,
774 <B as DeviceRawAPI<T>>::Raw: Clone + 'a,
775{
776 into_shape_with_args(tensor, shape, None)
777}
778
779/// Reshapes the given tensor to the specified shape.
780///
781/// # See also [`reshape`], [`into_shape`], [`change_shape`] and [`reshape_with_args`].
782pub fn reshape_f<'a, R, T, B, D>(
783 tensor: &'a TensorAny<R, T, B, D>,
784 shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>,
785) -> Result<TensorCow<'a, T, B, IxD>>
786where
787 R: DataAPI<Data = <B as DeviceRawAPI<T>>::Raw> + DataIntoCowAPI<'a>,
788 D: DimAPI,
789 B: DeviceAPI<T> + DeviceRawAPI<MaybeUninit<T>> + DeviceCreationAnyAPI<T> + OpAssignArbitaryAPI<T, IxD, D>,
790{
791 reshape_with_args_f(tensor, shape, None)
792}
793
794/// Reshapes the given tensor to the specified shape.
795///
796/// Advanced arguments can be specified by function [`reshape_with_args`] if you want to control the
797/// order for reading the tensor, and whether to copy data.
798///
799/// <div class="warning">
800///
801/// **Row/Column Major Notice**
802///
803/// This function behaves differently on default orders ([`RowMajor`] and [`ColMajor`]) of device.
804///
805/// </div>
806///
807/// # Parameters
808///
809/// - `tensor`: [`&TensorAny<R, T, B, D>`](TensorAny)
810///
811/// - The input tensor to be reshaped.
812///
813/// - `shape`: TryInto [`AxesIndex<isize>`]
814///
815/// - The new shape of the tensor.
816/// - Can be a single integer, or a list/tuple of integers.
817/// - Negative values are supported and indicate counting dimensions from the back.
818/// - Overloads:
819/// - integer: 1-D shape with a single dimension.
820/// - vector/array/tuple of integers: N-D shape with N dimensions. For tuples,
821/// mixed-signed/unsigned integers are supported.
822///
823/// # Returns
824///
825/// - [`TensorCow<'a, T, B, IxD>`](TensorCow)
826///
827/// - The reshaped tensor.
828/// - This function will try to avoid data cloning if possible.
829///
830/// - If layout-compatible, a view will be returned.
831/// - If shape-not-compatible, an owned tensor will be returned, cloning the data.
832/// - Cow (Clone-on-Write) semantics is used for representing either view or owned tensor.
833///
834/// # Examples
835///
836/// In row-major order, to reshape a vector of (6, ) to a matrix of (2, 3):
837/// ```rust
838/// # use rstsr::prelude::*;
839/// # let mut device = DeviceCpu::default();
840/// # device.set_default_order(RowMajor);
841/// let a = rt::arange((6, &device));
842/// let result = a.reshape([2, 3]);
843/// println!("{result}");
844/// // [[ 0 1 2]
845/// // [ 3 4 5]]
846/// ```
847///
848/// You can also use negative dimension, where -1 means "infer this dimension":
849///
850/// ```rust
851/// # use rstsr::prelude::*;
852/// # let mut device = DeviceCpu::default();
853/// # device.set_default_order(RowMajor);
854/// // in this case, unspecified axes length is inferred as 6 / 3 = 2
855/// let a = rt::arange((6, &device));
856/// let result = a.reshape([3, -1]);
857/// println!("{result}");
858/// // [[ 0 1]
859/// // [ 2 3]
860/// // [ 4 5]]
861/// ```
862///
863/// # Ownership Semantics between [`reshape`], [`into_shape`] and [`change_shape`]
864///
865/// [`into_shape`] and [`change_shape`] take ownership of the input tensor. They are important
866/// variants to this function [`reshape`].
867///
868/// | Function | Input Ownership | Output Ownership | Cloning Condition |
869/// |--|--|--|--|
870/// | [`reshape`] | Borrowed <br> [`&TensorAny`](TensorAny) | View <br> [`TensorCow`] with [`DataCow::Ref`] | not cloned (layout-compatible) |
871/// | | | Owned <br> [`TensorCow`] with [`DataCow::Owned`] | cloned (layout-not-compatible) |
872/// | [`into_shape`] | Owned <br> [`Tensor`] | Owned <br> [`Tensor`] | not cloned (layout-compatible, input tensor owns data, input tensor is compact) |
873/// | | | Owned <br> [`Tensor`] | cloned (otherwise) |
874/// | | Otherwise <br> [`TensorAny`] | Owned <br> [`Tensor`] | cloned (always) |
875/// | [`change_shape`] | Owned <br> [`Tensor`] | Owned <br> [`TensorCow`] with [`DataCow::Owned`] | not cloned (layout-compatible, input tensor owns data, input tensor is compact) |
876/// | | | Owned <br> [`TensorCow`] with [`DataCow::Owned`] | cloned (otherwise) |
877/// | | Otherwise <br> [`TensorAny`] | View <br> [`TensorCow`] with [`DataCow::Ref`] | not cloned (layout-compatible) |
878/// | | | Owned <br> [`TensorCow`] with [`DataCow::Owned`] | cloned (layout-not-compatible) |
879///
880/// # Tips on common compilation errors
881///
882/// You may encounter ownership problem when you try to assign a reshaped tensor like this:
883///
884/// ```compile_fail
885/// # use rstsr::prelude::*;
886/// # let mut device = DeviceCpu::default();
887/// # device.set_default_order(RowMajor);
888/// let a = rt::arange((6, &device)).reshape([2, 3]);
889/// println!("a: {:?}", a);
890/// ```
891///
892/// The compiler may give an error like:
893///
894/// ```text
895/// 704 | let a = rt::arange((6, &device)).reshape([2, 3]);
896/// | ^^^^^^^^^^^^^^^^^^^^^^^^ - temporary value is freed at the end of this statement
897/// | |
898/// | creates a temporary value which is freed while still in use
899/// 705 | println!("a: {:?}", a);
900/// | - borrow later used here
901/// |
902/// help: consider using a `let` binding to create a longer lived value
903/// |
904/// 704 ~ let binding = rt::arange((6, &device));
905/// 705 ~ let a = binding.reshape([2, 3]);
906/// |
907/// ```
908///
909/// The suggestion by compiler is correct. However, you have another simpler way to solve this
910/// problem by using [`into_shape`] variant that takes ownership:
911///
912/// ```rust
913/// # use rstsr::prelude::*;
914/// # let mut device = DeviceCpu::default();
915/// # device.set_default_order(RowMajor);
916/// let a = rt::arange((6, &device)).into_shape([2, 3]);
917/// ```
918///
919/// # Notes of API accordance
920///
921/// - Array-API: `reshape(x, /, shape, *, copy=None)` ([`reshape`](https://data-apis.org/array-api/latest/API_specification/generated/array_api.reshape.html))
922/// - NumPy: `reshape(a, /, shape, order='C', *, copy=False)` ([`numpy.reshape`](https://numpy.org/doc/stable/reference/generated/numpy.reshape.html)):
923/// - RSTSR: `rt::reshape_with_args(tensor, shape, (order, copy))`
924/// - RSTSR: `rt::reshape(tensor, shape)`
925///
926/// Please note this function does not support `order` and `copy` arguments in NumPy's `reshape`.
927/// You can use function [`reshape_with_args`] to specify these arguments.
928///
929/// # Elaborated examples
930///
931/// ## Difference between [RowMajor] and [ColMajor]
932///
933/// Tensor can be uniquely iterated (into a 1-dimension vector), for either row-major or
934/// column-major order.
935///
936/// **Reshape operation does not change the iterated sequence of a tensor**, by definition. In other
937/// words, the following code always holds true:
938///
939/// ```rust
940/// # use rstsr::prelude::*;
941/// # let mut device = DeviceCpu::default();
942/// # device.set_default_order(ColMajor);
943/// let a = rt::tensor_from_nested!([[0, 1, 2], [3, 4, 5]], &device);
944/// # let b = a.reshape([3, 2]);
945/// // note iteration order of associated method `iter` depends on `device.default_order()`
946///
947/// // let b = a.reshape(... SOME SHAPE ...);
948/// let a_vec = a.iter().collect::<Vec<_>>();
949/// let b_vec = b.iter().collect::<Vec<_>>();
950/// assert_eq!(a_vec, b_vec); // iterated sequence is the same
951/// ```
952///
953/// For example, in row-major order, reshape a matrix of (2, 3) to (3, 2):
954///
955/// ```rust
956/// # use rstsr::prelude::*;
957/// # let mut device = DeviceCpu::default();
958/// // set to row-major order
959/// device.set_default_order(RowMajor);
960/// // a: [[0, 1, 2], [3, 4, 5]]
961/// // b: [[0, 1], [2, 3], [4, 5]]
962/// // iterated sequence: [0, 1, 2, 3, 4, 5]
963///
964/// let a = rt::tensor_from_nested!([[0, 1, 2], [3, 4, 5]], &device);
965/// println!("{a}");
966/// // [[ 0 1 2]
967/// // [ 3 4 5]]
968/// let b = a.reshape([3, 2]);
969/// println!("{b}");
970/// // [[ 0 1]
971/// // [ 2 3]
972/// // [ 4 5]]
973///
974/// let a_vec = a.iter().cloned().collect::<Vec<_>>();
975/// println!("{a_vec:?}");
976/// // [0, 1, 2, 3, 4, 5]
977/// let b_vec = b.iter().cloned().collect::<Vec<_>>();
978/// println!("{b_vec:?}");
979/// // [0, 1, 2, 3, 4, 5]
980/// ```
981///
982/// In the column-major order, reshape the same matrix of (2, 3) to (3, 2) will yield a different
983/// result:
984///
985/// ```rust
986/// # use rstsr::prelude::*;
987/// # let mut device = DeviceCpu::default();
988/// // set to column-major order
989/// device.set_default_order(ColMajor);
990/// // a: [[0, 1, 2], [3, 4, 5]]
991/// // b: [[0, 4], [3, 2], [1, 5]]
992/// // iterated sequence: [0, 3, 1, 4, 2, 5]
993///
994/// let a = rt::tensor_from_nested!([[0, 1, 2], [3, 4, 5]], &device);
995/// println!("{a}");
996/// // [[ 0 1 2]
997/// // [ 3 4 5]]
998/// let b = a.reshape([3, 2]);
999/// println!("{b}");
1000/// // [[ 0 4]
1001/// // [ 3 2]
1002/// // [ 1 5]]
1003///
1004/// let a_vec = a.iter().cloned().collect::<Vec<_>>();
1005/// println!("{a_vec:?}");
1006/// // [0, 3, 1, 4, 2, 5]
1007/// let b_vec = b.iter().cloned().collect::<Vec<_>>();
1008/// println!("{b_vec:?}");
1009/// // [0, 3, 1, 4, 2, 5]
1010/// ```
1011///
1012/// You can also use function [`reshape_with_args`]`(shape, order)` to specify the order for reading
1013/// the tensor.
1014///
1015/// ## Occasions of data cloning
1016///
1017/// The following discussion assumes the tensor is in row-major order. Similar discussion applies to
1018/// column-major order.
1019///
1020/// If the tensor to be reshaped is already in C-contiguous if the device is also row-major, or
1021/// F-contiguous if the device is column-major, then the reshape operation can be performed without
1022/// any data cloning.
1023///
1024/// Otherwise, whether data cloning is necessary depends. For example, consider a tensor of shape
1025/// (4, 6, 9) but with non-contiguous strides:
1026///
1027/// ```rust
1028/// # use rstsr::prelude::*;
1029/// # let mut device = DeviceCpu::default();
1030/// # device.set_default_order(RowMajor);
1031/// // contiguous situation: (4, [6, 9]), or say the last two dimensions are contiguous
1032/// let a = rt::arange((288, &device)).into_shape([4, 8, 9]).into_slice((.., 0..6, ..));
1033/// println!("{:?}", a.layout());
1034/// // 3-Dim (dyn), contiguous: c
1035/// // shape: [4, 6, 9], stride: [72, 9, 1], offset: 0
1036/// ```
1037///
1038/// Those cases will not require data cloning (returns a view, or [`DataCow::Ref`] internally):
1039///
1040/// ```rust
1041/// # use rstsr::prelude::*;
1042/// # let mut device = DeviceCpu::default();
1043/// # device.set_default_order(RowMajor);
1044/// let a = rt::arange((288, &device)).into_shape([4, 8, 9]).into_slice((.., 0..6, ..));
1045/// // split a single dimension into multiple dimensions
1046/// assert!(!a.reshape([2, 2, 6, 9]).is_owned()); // (4, 6, 9) -> ([2, 2], 6, 9)
1047/// assert!(!a.reshape([4, 3, 2, 9]).is_owned()); // (4, 6, 9) -> (4, [3, 2], 9)
1048/// assert!(!a.reshape([4, 2, 3, 3, 3]).is_owned()); // (4, 6, 9) -> (4, [2, 3], [3, 3])
1049///
1050/// // merge contiguous dimensions into a single dimension
1051/// assert!(!a.reshape([4, 54]).is_owned()); // (4, 6, 9) -> (4, 6 * 9)
1052///
1053/// // merge contiguous dimensions and then split
1054/// assert!(!a.reshape([4, 3, 6, 3]).is_owned()); // (4, [6, 9]) -> (4, [3, 6, 3])
1055/// ```
1056///
1057/// However, the following cases will require data cloning (returns an owned tensor, or
1058/// [`DataCow::Owned`] internally):
1059///
1060/// ```rust
1061/// # use rstsr::prelude::*;
1062/// # let mut device = DeviceCpu::default();
1063/// # device.set_default_order(RowMajor);
1064/// let a = rt::arange((288, &device)).into_shape([4, 8, 9]).into_slice((.., 0..6, ..));
1065/// assert!(a.reshape([24, 9]).is_owned()); // (4, 6, 9) -> (4 * 6, 9)
1066/// assert!(a.reshape(-1).is_owned()); // (4, 6, 9) -> (4 * 6 * 9)
1067/// assert!(a.reshape([12, 2, 9]).is_owned()); // (4, 6, 9) -> (4 * [3, 2], 9)
1068/// ```
1069///
1070/// Please note that default order of device (row-major or column-major) matters. For the same
1071/// tensor slicing, if the device is column major, then behavior of merging contiguous dimensions
1072/// can be different:
1073///
1074/// ```rust
1075/// # use rstsr::prelude::*;
1076/// # let mut device = DeviceCpu::default();
1077/// # device.set_default_order(ColMajor);
1078/// // contiguous situation: ([4, 6], 9), or say the first two dimensions are contiguous
1079/// // this is different to (4, [6, 9]) in row major case
1080/// let a = rt::arange((288, &device)).into_shape([4, 8, 9]).into_slice((.., 0..6, ..));
1081/// println!("{:?}", a.layout());
1082/// // 3-Dim (dyn), contiguous: f
1083/// // shape: [4, 6, 9], stride: [1, 4, 32], offset: 0
1084///
1085/// // merge dimensions into a single dimension, col-major will be different to row-major case
1086/// assert!(a.reshape([4, 54]).is_owned()); // (4, 6, 9) -> (4, 6 * 9)
1087/// assert!(!a.reshape([24, 9]).is_owned()); // ([4, 6], 9) -> (4 * 6, 9)
1088/// ```
1089///
1090/// You can also use function [`reshape_with_args`]`(shape, copy)` to specify whether to copy data
1091/// when the new shape is not compatible with the original shape.
1092///
1093/// Also, you can use function [`reshapeable_without_copy`] to check whether the tensor can be
1094/// reshaped to the new shape without copying data.
1095///
1096/// # See also
1097///
1098/// ## Similar function from other crates/libraries
1099///
1100/// - Python Array API standard: [`reshape`](https://data-apis.org/array-api/2024.12/API_specification/generated/array_api.reshape.html)
1101/// - NumPy: [`reshape`](https://numpy.org/doc/stable/reference/generated/numpy.reshape.html)
1102/// - ndarray: [`to_shape`](https://docs.rs/ndarray/latest/ndarray/struct.ArrayBase.html#method.to_shape)
1103///
1104/// ## Related functions in RSTSR
1105///
1106/// - [`reshape_with_args`]: Reshape with advanced arguments for controlling the order for reading
1107/// the tensor, and whether to copy data.
1108/// - [`reshapeable_without_copy`]: Check whether the layout is compatible with the new shape.
1109/// - [`to_layout`]: Return a tensor with the specified layout.
1110/// - [`to_contig`]: Return an owned contiguous tensor.
1111///
1112/// ## Variants of this function
1113///
1114/// - [`reshape`] / [`reshape_f`]: Taking reference and returning Cow.
1115/// - [`into_shape`] / [`into_shape_f`]: Taking ownership and returning owned tensor.
1116/// - [`change_shape`] / [`change_shape_f`]: Taking ownership and returning Cow.
1117/// - [`to_shape`] / [`to_shape_f`]: Alias to [`reshape`] / [`reshape_f`].
1118/// - Associated methods on [`TensorAny`]:
1119///
1120/// - [`TensorAny::reshape`] / [`TensorAny::reshape_f`]
1121/// - [`TensorAny::into_shape`] / [`TensorAny::into_shape_f`]
1122/// - [`TensorAny::change_shape`] / [`TensorAny::change_shape_f`]
1123/// - [`TensorAny::to_shape`] / [`TensorAny::to_shape_f`]
1124pub fn reshape<'a, R, T, B, D>(
1125 tensor: &'a TensorAny<R, T, B, D>,
1126 shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>,
1127) -> TensorCow<'a, T, B, IxD>
1128where
1129 R: DataAPI<Data = <B as DeviceRawAPI<T>>::Raw> + DataIntoCowAPI<'a>,
1130 D: DimAPI,
1131 B: DeviceAPI<T> + DeviceRawAPI<MaybeUninit<T>> + DeviceCreationAnyAPI<T> + OpAssignArbitaryAPI<T, IxD, D>,
1132{
1133 reshape_with_args(tensor, shape, None)
1134}
1135
1136pub use reshape as to_shape;
1137pub use reshape_f as to_shape_f;
1138
1139/// Reshapes the given tensor to the specified shape.
1140///
1141/// # See also [`reshape`], [`into_shape`], [`change_shape`] and [`reshape_with_args`].
1142impl<'a, R, T, B, D> TensorAny<R, T, B, D>
1143where
1144 R: DataAPI<Data = <B as DeviceRawAPI<T>>::Raw> + DataIntoCowAPI<'a>,
1145 D: DimAPI,
1146 B: DeviceAPI<T> + DeviceRawAPI<MaybeUninit<T>> + DeviceCreationAnyAPI<T> + OpAssignArbitaryAPI<T, IxD, D>,
1147 T: Clone,
1148{
1149 /// Reshapes the given tensor to the specified shape.
1150 ///
1151 /// # See also [`reshape`], [`into_shape`], [`change_shape`] and [`reshape_with_args`].
1152 pub fn change_shape_f(
1153 self,
1154 shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>,
1155 ) -> Result<TensorCow<'a, T, B, IxD>> {
1156 change_shape_f(self, shape)
1157 }
1158
1159 /// Reshapes the given tensor to the specified shape.
1160 ///
1161 /// # See also [`reshape`], [`into_shape`], [`change_shape`] and [`reshape_with_args`].
1162 pub fn change_shape(self, shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>) -> TensorCow<'a, T, B, IxD> {
1163 change_shape(self, shape)
1164 }
1165
1166 /// Reshapes the given tensor to the specified shape.
1167 ///
1168 /// # See also [`reshape`], [`into_shape`], [`change_shape`] and [`reshape_with_args`].
1169 pub fn into_shape_f(self, shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>) -> Result<Tensor<T, B, IxD>>
1170 where
1171 <B as DeviceRawAPI<T>>::Raw: Clone + 'a,
1172 B: OpAssignAPI<T, IxD>,
1173 {
1174 into_shape_f(self, shape)
1175 }
1176
1177 /// Reshapes the given tensor to the specified shape.
1178 ///
1179 /// # See also [`reshape`], [`into_shape`], [`change_shape`] and [`reshape_with_args`].
1180 pub fn into_shape(self, shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>) -> Tensor<T, B, IxD>
1181 where
1182 <B as DeviceRawAPI<T>>::Raw: Clone + 'a,
1183 B: OpAssignAPI<T, IxD>,
1184 {
1185 into_shape(self, shape)
1186 }
1187
1188 /// Reshapes the given tensor to the specified shape.
1189 ///
1190 /// # See also [`reshape`], [`into_shape`], [`change_shape`] and [`reshape_with_args`].
1191 pub fn to_shape_f(
1192 &'a self,
1193 shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>,
1194 ) -> Result<TensorCow<'a, T, B, IxD>> {
1195 to_shape_f(self, shape)
1196 }
1197
1198 /// Reshapes the given tensor to the specified shape.
1199 ///
1200 /// # See also [`reshape`], [`into_shape`], [`change_shape`] and [`reshape_with_args`].
1201 pub fn to_shape(&'a self, shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>) -> TensorCow<'a, T, B, IxD> {
1202 to_shape(self, shape)
1203 }
1204
1205 /// Reshapes the given tensor to the specified shape.
1206 ///
1207 /// # See also [`reshape`], [`into_shape`], [`change_shape`] and [`reshape_with_args`].
1208 pub fn reshape_f(
1209 &'a self,
1210 shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>,
1211 ) -> Result<TensorCow<'a, T, B, IxD>> {
1212 reshape_f(self, shape)
1213 }
1214
1215 /// Reshapes the given tensor to the specified shape.
1216 ///
1217 /// # See also [`reshape`], [`into_shape`], [`change_shape`] and [`reshape_with_args`].
1218 pub fn reshape(&'a self, shape: impl TryInto<AxesIndex<isize>, Error: Into<Error>>) -> TensorCow<'a, T, B, IxD> {
1219 reshape(self, shape)
1220 }
1221}
1222
1223/* #endregion */