rstsr_core/tensor/
asarray.rs

1//! Implementation of function `asarray`.
2
3use crate::prelude_dev::*;
4use core::mem::ManuallyDrop;
5use num::complex::{Complex32, Complex64};
6
7/// Trait for function [`asarray`] impl: converting the input to an array.
8///
9/// This trait can be implemented for different backends. For usual CPU backends, we refer to
10/// function [`asarray`] for API documentation details.
11pub trait AsArrayAPI<Inp> {
12    type Out;
13
14    fn asarray_f(self) -> Result<Self::Out>;
15
16    fn asarray(self) -> Self::Out
17    where
18        Self: Sized,
19    {
20        Self::asarray_f(self).rstsr_unwrap()
21    }
22}
23
24/// Convert the input to an array.
25///
26/// **This function is overloaded.**
27///
28/// <div class="warning">
29///
30/// **Row/Column Major Notice**
31///
32/// When passing shape into this function, the layout of output tensor will be different on default
33/// orders ([`RowMajor`] and [`ColMajor`]) of device.
34///
35/// </div>
36///
37/// Note that this function always returns a dynamic-dimensional ([`IxD`]) tensor. To convert it
38/// into a fixed-dimensional tensor, you can use [`.into_dim::<D>()`](Tensor::into_dim) method on
39/// the output tensor without explicit data copy.
40///
41/// # Overloads Table
42///
43/// ## Output owned tensor [`Tensor`]
44///
45/// Input vector [`Vec<T>`] as raw data:
46///
47/// - `asarray((input: Vec<T>, layout: Layout<D>, device: &B)) -> Tensor<T, B, IxD>`
48/// - `asarray((input: Vec<T>, shape: D, device: &B)) -> Tensor<T, B, IxD>`
49/// - `asarray((input: Vec<T>, device: &B)) -> Tensor<T, B, IxD>`
50/// - `asarray((input: Vec<T>, layout: Layout<D>)) -> Tensor<T, DeviceCpu, IxD>`
51/// - `asarray((input: Vec<T>, shape: D)) -> Tensor<T, DeviceCpu, IxD>`
52/// - `asarray(input: Vec<T>) -> Tensor<T, DeviceCpu, IxD>`
53///
54/// Input scalar `T` as raw data:
55///
56/// - `asarray((input: T, device: &B)) -> Tensor<T, B, IxD>`
57/// - `asarray(input: T) -> Tensor<T, DeviceCpu, IxD>`
58///
59/// Input tensor as raw data and change its layout:
60///
61/// - `asarray((input: &TensorAny<R, T, B, D>, order: TensorIterOrder)) -> Tensor<T, B, D>`
62/// - `asarray((input: TensorView<'_, T, B, D>, order: TensorIterOrder)) -> Tensor<T, B, D>`
63/// - `asarray((input: Tensor<T, B, D>, order: TensorIterOrder)) -> Tensor<T, B, D>`
64/// - `asarray(input: &TensorAny<R, T, B, D>) -> Tensor<T, B, D>`
65/// - `asarray(input: TensorView<'_, T, B, D>) -> Tensor<T, B, D>`
66/// - `asarray(input: Tensor<T, B, D>) -> Tensor<T, B, D>`
67///
68/// ## Output tensor view [`TensorView`]
69///
70/// - `asarray((input: &[T], layout: Layout<D>, device: &B)) -> TensorView<'a, T, B, IxD>`
71/// - `asarray((input: &[T], shape: D, device: &B)) -> TensorView<'a, T, B, IxD>`
72/// - `asarray((input: &[T], device: &B)) -> TensorView<'a, T, B, IxD>`
73/// - `asarray((input: &[T], layout: Layout<D>)) -> TensorView<'a, T, DeviceCpu, IxD>`
74/// - `asarray((input: &[T], shape: D)) -> TensorView<'a, T, DeviceCpu, IxD>`
75/// - `asarray(input: &[T]) -> TensorView<'a, T, DeviceCpu, IxD>`
76///
77/// Also, overloads for `&Vec<T>` that behave the same as `&[T]`.
78///
79/// ## Output mutable tensor view [`TensorMut`]
80///
81/// All overloads for `&[T]` and `&Vec<T>` above also have mutable versions for `&mut [T]` and `&mut
82/// Vec<T>`, which output [`TensorMut<'a, T, B, IxD>`] and [`TensorMut<'a, T, DeviceCpu, IxD>`],
83/// respectively.
84///
85/// # Examples
86///
87/// ## Vector as input
88///
89/// The most usual usage is to convert a vector into a tensor. You can also specify the shape /
90/// layout and device.
91///
92/// **The following example assumes that the device's default order is row-major**. The input shape
93/// `[2, 3]` corresponds to a row-major layout `[2, 3].c()`.
94///
95/// ```rust
96/// use rstsr::prelude::*;
97/// let mut device = DeviceCpu::default();
98/// device.set_default_order(RowMajor);
99///
100/// // vector as input, row-major layout by default
101/// let input = vec![1, 2, 3, 4, 5, 6];
102/// let a = rt::asarray((input, [2, 3], &device));
103/// println!("{a:?}");
104/// // [[1, 2, 3],
105/// //  [4, 5, 6]]
106/// // 2-Dim (dyn), contiguous: Cc, shape: [2, 3], stride: [3, 1], offset: 0
107/// # let expected = rt::tensor_from_nested!([[1, 2, 3], [4, 5, 6]], &device);
108/// # assert!(rt::allclose(&a, &expected, None));
109/// ```
110///
111/// If you want to use column-major layout, you can specify the layout explicitly. But be cautious
112/// that **row-major and column-major layouts will lead to different arrangements of data**.
113///
114/// ```rust
115/// # use rstsr::prelude::*;
116/// # let mut device = DeviceCpu::default();
117/// # device.set_default_order(RowMajor);
118/// // vector as input, column-major layout
119/// let input = vec![1, 2, 3, 4, 5, 6];
120/// let a = rt::asarray((input, [2, 3].f(), &device));
121/// println!("{a:?}");
122/// // [[ 1 3 5]
123/// //  [ 2 4 6]]
124/// // 2-Dim (dyn), contiguous: Ff, shape: [2, 3], stride: [1, 2], offset: 0
125/// # let expected = rt::tensor_from_nested!([[1, 3, 5], [2, 4, 6]], &device);
126/// # assert!(rt::allclose(&a, &expected, None));
127/// ```
128///
129/// Also, **if the device's default order is column-major**, the shape input (`[2, 3]`) will also
130/// lead to a column-major layout (`[2, 3].f()`):
131///
132/// ```rust
133/// # use rstsr::prelude::*;
134/// # let mut device = DeviceCpu::default();
135/// # device.set_default_order(RowMajor);
136/// // vector as input, column layout by default
137/// device.set_default_order(ColMajor);
138/// let input = vec![1, 2, 3, 4, 5, 6];
139/// let a = rt::asarray((input, [2, 3], &device));
140/// println!("{a:?}");
141/// // [[ 1 3 5]
142/// //  [ 2 4 6]]
143/// // 2-Dim (dyn), contiguous: Ff, shape: [2, 3], stride: [1, 2], offset: 0
144/// # let expected = rt::tensor_from_nested!([[1, 3, 5], [2, 4, 6]], &device);
145/// # assert!(rt::allclose(&a, &expected, None));
146/// ```
147///
148/// Finally, you can omit the device argument to use the default CPU device, and omit the
149/// layout/shape to get an 1-D tensor:
150///
151/// ```rust
152/// # use rstsr::prelude::*;
153/// # let mut device = DeviceCpu::default();
154/// # device.set_default_order(RowMajor);
155/// let input = vec![1, 2, 3, 4, 5, 6];
156/// let a = rt::asarray(input);;
157/// println!("{a:?}");
158/// // [ 1 2 3 4 5 6]
159/// // 1-Dim (dyn), contiguous: Cc, shape: [6], stride: [1], offset: 0
160/// # let expected = rt::tensor_from_nested!([1, 2, 3, 4, 5, 6]);
161/// # assert!(rt::allclose(&a, &expected, None));
162/// ```
163///
164/// ## `&[T]` or `&mut [T]` as input
165///
166/// You can also convert a slice into a tensor view. Please note, `asarray` accepts `&[T]` and
167/// `&Vec<T>`, but do not accept other slice-like types such as `&[T; N]`. You may need to convert
168/// them by `.as_ref()` first.
169///
170/// ```rust
171/// # use rstsr::prelude::*;
172/// # let mut device = DeviceCpu::default();
173/// # device.set_default_order(RowMajor);
174/// // Slice &[T] as input
175/// let input = &[1, 2, 3, 4, 5, 6];
176/// let a = rt::asarray((input.as_ref(), [2, 3].c(), &device));
177/// println!("{a:?}");
178/// // [[ 1 2 3]
179/// //  [ 4 5 6]]
180/// # let expected = rt::tensor_from_nested!([[1, 2, 3], [4, 5, 6]], &device);
181/// # assert!(rt::allclose(&a, &expected, None));
182/// ```
183///
184/// Also, mutable slices `&mut [T]` and `&mut Vec<T>` are supported. You can modify the original
185/// data via the output tensor mutable view.
186///
187/// ```rust
188/// # use rstsr::prelude::*;
189/// # let mut device = DeviceCpu::default();
190/// # device.set_default_order(RowMajor);
191/// // Slice &mut [T] as input
192/// let mut input = vec![1, 2, 3, 4, 5, 6];
193/// let mut a = rt::asarray((&mut input, [2, 3].c(), &device));
194/// // change `input` via tensor view `a`
195/// a[[0, 0]] = 10;
196/// println!("{a:2?}");
197/// // [[ 10  2  3]
198/// //  [  4  5  6]]
199/// # let expected = rt::tensor_from_nested!([[10, 2, 3], [4, 5, 6]], &device);
200/// # assert!(rt::allclose(&a, &expected, None));
201/// println!("{input:?}");
202/// // [10, 2, 3, 4, 5, 6]
203/// # assert_eq!(input, vec![10, 2, 3, 4, 5, 6]);
204/// ```
205///
206/// You can also specify a sub-view via layout:
207///
208/// ```rust
209/// # use rstsr::prelude::*;
210/// # let mut device = DeviceCpu::default();
211/// # device.set_default_order(RowMajor);
212/// let input = (0..30).collect::<Vec<i32>>();
213/// let layout = Layout::new([3, 2], [2, 7], 5).unwrap();
214/// let a = rt::asarray((&input, layout, &device));
215/// println!("{a:2?}");
216/// // [[  5 12]
217/// //  [  7 14]
218/// //  [  9 16]]
219/// # let expected = rt::tensor_from_nested!([[5, 12], [7, 14], [9, 16]], &device);
220/// # assert!(rt::allclose(&a, &expected, None));
221/// ```
222///
223/// Finally, you can also omit the device argument to use the default CPU device.
224///
225/// ## Scalar as input
226///
227/// You can also convert a scalar into a tensor with zero dimensions.
228///
229/// ```rust
230/// # use rstsr::prelude::*;
231/// # let mut device = DeviceCpu::default();
232/// # device.set_default_order(RowMajor);
233/// let a = rt::asarray((42, &device));
234/// println!("{a:?}");
235/// // 42
236/// // 0-Dim (dyn), contiguous: CcFf, shape: [], stride: [], offset: 0
237/// ```
238///
239/// ## Tensor or its view as input
240///
241/// You can convert a tensor or its view into a new tensor with specified iteration order (layout).
242///
243/// This is similar to the optional argument `order` in NumPy's `np.asarray` function. The converted
244/// tensor behaves exactly the same to the input tensor, but may have different memory layout.
245///
246/// To specify the order, you may pass [`TensorIterOrder`] along with the input tensor:
247/// - [`TensorIterOrder::K`] for keeping the original layout as much as possible;
248/// - [`TensorIterOrder::C`] for row-major layout;
249/// - [`TensorIterOrder::F`] for column-major layout.
250///
251/// ```rust
252/// # use rstsr::prelude::*;
253/// # let mut device = DeviceCpu::default();
254/// # device.set_default_order(RowMajor);
255/// // Generate a strided, non-row-or-col-prefer tensor view
256/// let a_raw = rt::arange((96, &device)).into_shape([4, 6, 4]);
257/// let a = a_raw.i((..2, slice!(2, 6, 2), 2..)).into_transpose([1, 0, 2]);
258/// println!("{a:2?}");
259/// // [[[10, 11], [34, 35]], [[18, 19], [42, 43]]]
260/// // shape: [2, 2, 2], stride: [8, 24, 1], offset: 10
261/// # let expected = rt::tensor_from_nested!([[[10, 11], [34, 35]], [[18, 19], [42, 43]]], &device);
262/// # assert!(rt::allclose(&a, &expected, None));
263///
264/// // shrink useful memory space, with preserved layout
265/// let b = rt::asarray((&a, TensorIterOrder::K));
266/// println!("{b:2?}");
267/// // shape: [2, 2, 2], stride: [2, 4, 1], offset: 0
268/// # assert!(rt::allclose(&b, &expected, None));
269/// # assert_eq!(b.stride(), &[2, 4, 1]);
270///
271/// // convert to row-major layout
272/// let b = rt::asarray((&a, TensorIterOrder::C));
273/// println!("{b:2?}");
274/// // shape: [2, 2, 2], stride: [4, 2, 1], offset: 0
275/// # assert!(rt::allclose(&b, &expected, None));
276/// # assert_eq!(b.stride(), &[4, 2, 1]);
277///
278/// // convert to column-major layout
279/// let b = rt::asarray((&a, TensorIterOrder::F));
280/// println!("{b:2?}");
281/// // shape: [2, 2, 2], stride: [1, 2, 4], offset: 0
282/// # assert!(rt::allclose(&b, &expected, None));
283/// # assert_eq!(b.stride(), &[1, 2, 4]);
284/// ```
285///
286/// # See also
287///
288/// ## Similar function from other crates/libraries
289///
290/// - [`numpy.asarray`](https://numpy.org/doc/stable/reference/generated/numpy.asarray.html)
291///
292/// ## Related functions in RSTSR
293///
294/// - [`tensor_from_nested`]: Create a tensor from nested array-like data (for debug usage, only).
295/// - [`Tensor::new`] or [`Tensor::new_unchecked`]: Create a tensor from storage and layout.
296///
297/// ## Variants of this function
298///
299/// - [`asarray_f`]: Fallible version of this function.
300pub fn asarray<Args, Inp>(param: Args) -> Args::Out
301where
302    Args: AsArrayAPI<Inp>,
303{
304    return AsArrayAPI::asarray(param);
305}
306
307/// Convert the input to an array.
308///
309/// # See also
310///
311/// Refer to [`asarray`] for more details and examples.
312pub fn asarray_f<Args, Inp>(param: Args) -> Result<Args::Out>
313where
314    Args: AsArrayAPI<Inp>,
315{
316    return AsArrayAPI::asarray_f(param);
317}
318
319/* #region tensor input */
320
321impl<R, T, B, D> AsArrayAPI<()> for (&TensorAny<R, T, B, D>, TensorIterOrder)
322where
323    R: DataAPI<Data = <B as DeviceRawAPI<T>>::Raw>,
324    T: Clone,
325    D: DimAPI,
326    B: DeviceAPI<T> + DeviceRawAPI<MaybeUninit<T>> + DeviceCreationAnyAPI<T> + OpAssignAPI<T, D>,
327{
328    type Out = Tensor<T, B, D>;
329
330    fn asarray_f(self) -> Result<Self::Out> {
331        let (input, order) = self;
332        let device = input.device();
333        let layout_a = input.layout();
334        let layout_c = layout_for_array_copy(layout_a, order)?;
335        let mut storage_c = device.uninit_impl(layout_c.size())?;
336        device.assign_uninit(storage_c.raw_mut(), &layout_c, input.raw(), layout_a)?;
337        let storage_c = unsafe { B::assume_init_impl(storage_c) }?;
338        let tensor = Tensor::new_f(storage_c, layout_c)?;
339        return Ok(tensor);
340    }
341}
342
343impl<R, T, B, D> AsArrayAPI<()> for &TensorAny<R, T, B, D>
344where
345    R: DataAPI<Data = <B as DeviceRawAPI<T>>::Raw>,
346    T: Clone,
347    D: DimAPI,
348    B: DeviceAPI<T> + DeviceRawAPI<MaybeUninit<T>> + DeviceCreationAnyAPI<T> + OpAssignAPI<T, D>,
349{
350    type Out = Tensor<T, B, D>;
351
352    fn asarray_f(self) -> Result<Self::Out> {
353        asarray_f((self, TensorIterOrder::default()))
354    }
355}
356
357impl<T, B, D> AsArrayAPI<()> for (Tensor<T, B, D>, TensorIterOrder)
358where
359    T: Clone,
360    D: DimAPI,
361    B: DeviceAPI<T> + DeviceCreationAnyAPI<T> + OpAssignAPI<T, D>,
362{
363    type Out = Tensor<T, B, D>;
364
365    fn asarray_f(self) -> Result<Self::Out> {
366        let (input, order) = self;
367        let storage_a = input.storage();
368        let layout_a = input.layout();
369        let device = storage_a.device();
370        let layout_c = layout_for_array_copy(layout_a, order)?;
371        if layout_c == *layout_a {
372            return Ok(input);
373        } else {
374            let mut storage_c = device.uninit_impl(layout_c.size())?;
375            device.assign_uninit(storage_c.raw_mut(), &layout_c, storage_a.raw(), layout_a)?;
376            let storage_c = unsafe { B::assume_init_impl(storage_c) }?;
377            let tensor = Tensor::new_f(storage_c, layout_c)?;
378            return Ok(tensor);
379        }
380    }
381}
382
383impl<T, B, D> AsArrayAPI<()> for Tensor<T, B, D>
384where
385    T: Clone,
386    D: DimAPI,
387    B: DeviceAPI<T> + DeviceCreationAnyAPI<T> + OpAssignAPI<T, D>,
388{
389    type Out = Tensor<T, B, D>;
390
391    fn asarray_f(self) -> Result<Self::Out> {
392        asarray_f((self, TensorIterOrder::default()))
393    }
394}
395
396impl<T, B, D> AsArrayAPI<()> for (TensorView<'_, T, B, D>, TensorIterOrder)
397where
398    T: Clone,
399    D: DimAPI,
400    B: DeviceAPI<T> + DeviceCreationAnyAPI<T> + OpAssignAPI<T, D>,
401{
402    type Out = Tensor<T, B, D>;
403
404    fn asarray_f(self) -> Result<Self::Out> {
405        let (input, order) = self;
406        asarray_f((&input, order))
407    }
408}
409
410impl<T, B, D> AsArrayAPI<()> for TensorView<'_, T, B, D>
411where
412    T: Clone,
413    D: DimAPI,
414    B: DeviceAPI<T> + DeviceCreationAnyAPI<T> + OpAssignAPI<T, D>,
415{
416    type Out = Tensor<T, B, D>;
417
418    fn asarray_f(self) -> Result<Self::Out> {
419        asarray_f((self, TensorIterOrder::default()))
420    }
421}
422
423/* #endregion */
424
425/* #region vec-like input */
426
427impl<T, B> AsArrayAPI<()> for (Vec<T>, &B)
428where
429    B: DeviceAPI<T> + DeviceCreationAnyAPI<T>,
430{
431    type Out = Tensor<T, B, IxD>;
432
433    fn asarray_f(self) -> Result<Self::Out> {
434        let (input, device) = self;
435        let layout = vec![input.len()].c();
436        let storage = device.outof_cpu_vec(input)?;
437        let tensor = Tensor::new_f(storage, layout)?;
438        return Ok(tensor);
439    }
440}
441
442impl<T, B, D> AsArrayAPI<D> for (Vec<T>, Layout<D>, &B)
443where
444    D: DimAPI,
445    B: DeviceAPI<T> + DeviceCreationAnyAPI<T>,
446{
447    type Out = Tensor<T, B, IxD>;
448
449    fn asarray_f(self) -> Result<Self::Out> {
450        let (input, layout, device) = self;
451        rstsr_assert_eq!(
452            layout.bounds_index()?,
453            (0, layout.size()),
454            InvalidLayout,
455            "This constructor assumes compact memory layout."
456        )?;
457        rstsr_assert_eq!(
458            layout.size(),
459            input.len(),
460            InvalidLayout,
461            "This constructor assumes that the layout size is equal to the input size."
462        )?;
463        let storage = device.outof_cpu_vec(input)?;
464        let tensor = Tensor::new_f(storage, layout.into_dim()?)?;
465        return Ok(tensor);
466    }
467}
468
469impl<T, B, D> AsArrayAPI<D> for (Vec<T>, D, &B)
470where
471    D: DimAPI,
472    B: DeviceAPI<T> + DeviceCreationAnyAPI<T>,
473{
474    type Out = Tensor<T, B, IxD>;
475
476    fn asarray_f(self) -> Result<Self::Out> {
477        let (input, shape, device) = self;
478        let default_order = device.default_order();
479        let layout = match default_order {
480            RowMajor => shape.c(),
481            ColMajor => shape.f(),
482        };
483        asarray_f((input, layout, device))
484    }
485}
486
487impl<T> AsArrayAPI<()> for Vec<T>
488where
489    T: Clone,
490{
491    type Out = Tensor<T, DeviceCpu, IxD>;
492
493    fn asarray_f(self) -> Result<Self::Out> {
494        asarray_f((self, &DeviceCpu::default()))
495    }
496}
497
498#[duplicate_item(L; [D]; [Layout<D>])]
499impl<T, D> AsArrayAPI<D> for (Vec<T>, L)
500where
501    T: Clone,
502    D: DimAPI,
503{
504    type Out = Tensor<T, DeviceCpu, IxD>;
505
506    fn asarray_f(self) -> Result<Self::Out> {
507        let (input, layout) = self;
508        asarray_f((input, layout, &DeviceCpu::default()))
509    }
510}
511
512impl<T> From<Vec<T>> for Tensor<T, DeviceCpu, IxD>
513where
514    T: Clone,
515{
516    fn from(input: Vec<T>) -> Self {
517        asarray_f(input).rstsr_unwrap()
518    }
519}
520
521/* #endregion */
522
523/* #region slice-like input */
524
525impl<'a, T, B, D> AsArrayAPI<D> for (&'a [T], Layout<D>, &B)
526where
527    T: Clone,
528    B: DeviceAPI<T, Raw = Vec<T>>,
529    D: DimAPI,
530{
531    type Out = TensorView<'a, T, B, IxD>;
532
533    fn asarray_f(self) -> Result<Self::Out> {
534        let (input, layout, device) = self;
535        let ptr = input.as_ptr();
536        let len = input.len();
537        let raw = unsafe {
538            let ptr = ptr as *mut T;
539            Vec::from_raw_parts(ptr, len, len)
540        };
541        let device = device.clone();
542        let data = DataRef::from_manually_drop(ManuallyDrop::new(raw));
543        let storage = Storage::new(data, device);
544        let tensor = TensorView::new_f(storage, layout.into_dim()?)?;
545        return Ok(tensor);
546    }
547}
548
549impl<'a, T, B, D> AsArrayAPI<D> for (&'a [T], D, &B)
550where
551    T: Clone,
552    B: DeviceAPI<T, Raw = Vec<T>>,
553    D: DimAPI,
554{
555    type Out = TensorView<'a, T, B, IxD>;
556
557    fn asarray_f(self) -> Result<Self::Out> {
558        let (input, shape, device) = self;
559        let default_order = device.default_order();
560        let layout = match default_order {
561            RowMajor => shape.c(),
562            ColMajor => shape.f(),
563        };
564        asarray_f((input, layout, device))
565    }
566}
567
568impl<'a, T, B> AsArrayAPI<()> for (&'a [T], &B)
569where
570    T: Clone,
571    B: DeviceAPI<T, Raw = Vec<T>>,
572{
573    type Out = TensorView<'a, T, B, IxD>;
574
575    fn asarray_f(self) -> Result<Self::Out> {
576        let (input, device) = self;
577        let layout = vec![input.len()].c();
578        let device = device.clone();
579
580        let ptr = input.as_ptr();
581        let len = input.len();
582        let raw = unsafe {
583            let ptr = ptr as *mut T;
584            Vec::from_raw_parts(ptr, len, len)
585        };
586        let data = DataRef::from_manually_drop(ManuallyDrop::new(raw));
587        let storage = Storage::new(data, device);
588        let tensor = TensorView::new_f(storage, layout)?;
589        return Ok(tensor);
590    }
591}
592
593#[duplicate_item(L; [D]; [Layout<D>])]
594impl<'a, T, D> AsArrayAPI<D> for (&'a [T], L)
595where
596    T: Clone,
597    D: DimAPI,
598{
599    type Out = TensorView<'a, T, DeviceCpu, IxD>;
600
601    fn asarray_f(self) -> Result<Self::Out> {
602        let (input, layout) = self;
603        asarray_f((input, layout, &DeviceCpu::default()))
604    }
605}
606
607impl<'a, T> AsArrayAPI<()> for &'a [T]
608where
609    T: Clone,
610{
611    type Out = TensorView<'a, T, DeviceCpu, IxD>;
612
613    fn asarray_f(self) -> Result<Self::Out> {
614        asarray_f((self, &DeviceCpu::default()))
615    }
616}
617
618#[duplicate_item(L; [D]; [Layout<D>])]
619impl<'a, T, B, D> AsArrayAPI<D> for (&'a Vec<T>, L, &B)
620where
621    T: Clone,
622    B: DeviceAPI<T, Raw = Vec<T>> + 'a,
623    D: DimAPI,
624{
625    type Out = TensorView<'a, T, B, IxD>;
626
627    fn asarray_f(self) -> Result<Self::Out> {
628        let (input, layout, device) = self;
629        asarray_f((input.as_slice(), layout, device))
630    }
631}
632
633impl<'a, T, B> AsArrayAPI<()> for (&'a Vec<T>, &B)
634where
635    T: Clone,
636    B: DeviceAPI<T, Raw = Vec<T>>,
637{
638    type Out = TensorView<'a, T, B, IxD>;
639
640    fn asarray_f(self) -> Result<Self::Out> {
641        let (input, device) = self;
642        asarray_f((input.as_slice(), device))
643    }
644}
645
646#[duplicate_item(L; [D]; [Layout<D>])]
647impl<'a, T, D> AsArrayAPI<D> for (&'a Vec<T>, L)
648where
649    T: Clone,
650    D: DimAPI,
651{
652    type Out = TensorView<'a, T, DeviceCpu, IxD>;
653
654    fn asarray_f(self) -> Result<Self::Out> {
655        let (input, layout) = self;
656        asarray_f((input.as_slice(), layout, &DeviceCpu::default()))
657    }
658}
659
660impl<'a, T> AsArrayAPI<()> for &'a Vec<T>
661where
662    T: Clone,
663{
664    type Out = TensorView<'a, T, DeviceCpu, IxD>;
665
666    fn asarray_f(self) -> Result<Self::Out> {
667        asarray_f((self.as_slice(), &DeviceCpu::default()))
668    }
669}
670
671impl<'a, T> From<&'a [T]> for TensorView<'a, T, DeviceCpu, IxD>
672where
673    T: Clone,
674{
675    fn from(input: &'a [T]) -> Self {
676        asarray(input)
677    }
678}
679
680impl<'a, T> From<&'a Vec<T>> for TensorView<'a, T, DeviceCpu, IxD>
681where
682    T: Clone,
683{
684    fn from(input: &'a Vec<T>) -> Self {
685        asarray(input)
686    }
687}
688
689/* #endregion */
690
691/* #region slice-like mutable input */
692
693impl<'a, T, B, D> AsArrayAPI<D> for (&'a mut [T], Layout<D>, &B)
694where
695    T: Clone,
696    B: DeviceAPI<T, Raw = Vec<T>>,
697    D: DimAPI,
698{
699    type Out = TensorMut<'a, T, B, IxD>;
700
701    fn asarray_f(self) -> Result<Self::Out> {
702        let (input, layout, device) = self;
703        let ptr = input.as_ptr();
704        let len = input.len();
705        let raw = unsafe {
706            let ptr = ptr as *mut T;
707            Vec::from_raw_parts(ptr, len, len)
708        };
709        let device = device.clone();
710        let data = DataMut::from_manually_drop(ManuallyDrop::new(raw));
711        let storage = Storage::new(data, device);
712        let tensor = TensorMut::new_f(storage, layout.into_dim()?)?;
713        return Ok(tensor);
714    }
715}
716
717impl<'a, T, B, D> AsArrayAPI<D> for (&'a mut [T], D, &B)
718where
719    T: Clone,
720    B: DeviceAPI<T, Raw = Vec<T>>,
721    D: DimAPI,
722{
723    type Out = TensorMut<'a, T, B, IxD>;
724
725    fn asarray_f(self) -> Result<Self::Out> {
726        let (input, shape, device) = self;
727        let default_order = device.default_order();
728        let layout = match default_order {
729            RowMajor => shape.c(),
730            ColMajor => shape.f(),
731        };
732        asarray_f((input, layout, device))
733    }
734}
735
736impl<'a, T, B> AsArrayAPI<()> for (&'a mut [T], &B)
737where
738    T: Clone,
739    B: DeviceAPI<T, Raw = Vec<T>>,
740{
741    type Out = TensorMut<'a, T, B, IxD>;
742
743    fn asarray_f(self) -> Result<Self::Out> {
744        let (input, device) = self;
745        let layout = [input.len()].c();
746        let device = device.clone();
747
748        let ptr = input.as_ptr();
749        let len = input.len();
750        let raw = unsafe {
751            let ptr = ptr as *mut T;
752            Vec::from_raw_parts(ptr, len, len)
753        };
754        let data = DataMut::from_manually_drop(ManuallyDrop::new(raw));
755        let storage = Storage::new(data, device);
756        let tensor = TensorMut::new_f(storage, layout.into_dim()?)?;
757        return Ok(tensor);
758    }
759}
760
761#[duplicate_item(L; [D]; [Layout<D>])]
762impl<'a, T, D> AsArrayAPI<D> for (&'a mut [T], L)
763where
764    T: Clone,
765    D: DimAPI,
766{
767    type Out = TensorMut<'a, T, DeviceCpu, IxD>;
768
769    fn asarray_f(self) -> Result<Self::Out> {
770        let (input, layout) = self;
771        asarray_f((input, layout, &DeviceCpu::default()))
772    }
773}
774
775impl<'a, T> AsArrayAPI<()> for &'a mut [T]
776where
777    T: Clone,
778{
779    type Out = TensorMut<'a, T, DeviceCpu, IxD>;
780
781    fn asarray_f(self) -> Result<Self::Out> {
782        asarray_f((self, &DeviceCpu::default()))
783    }
784}
785
786#[duplicate_item(L; [D]; [Layout<D>])]
787impl<'a, T, B, D> AsArrayAPI<D> for (&'a mut Vec<T>, L, &B)
788where
789    T: Clone,
790    B: DeviceAPI<T, Raw = Vec<T>>,
791    D: DimAPI,
792{
793    type Out = TensorMut<'a, T, B, IxD>;
794
795    fn asarray_f(self) -> Result<Self::Out> {
796        let (input, layout, device) = self;
797        asarray_f((input.as_mut_slice(), layout, device))
798    }
799}
800
801impl<'a, T, B> AsArrayAPI<()> for (&'a mut Vec<T>, &B)
802where
803    T: Clone,
804    B: DeviceAPI<T, Raw = Vec<T>>,
805{
806    type Out = TensorMut<'a, T, B, IxD>;
807
808    fn asarray_f(self) -> Result<Self::Out> {
809        let (input, device) = self;
810        asarray_f((input.as_mut_slice(), device))
811    }
812}
813
814#[duplicate_item(L; [D]; [Layout<D>])]
815impl<'a, T, D> AsArrayAPI<D> for (&'a mut Vec<T>, L)
816where
817    T: Clone,
818    D: DimAPI,
819{
820    type Out = TensorMut<'a, T, DeviceCpu, IxD>;
821
822    fn asarray_f(self) -> Result<Self::Out> {
823        let (input, layout) = self;
824        asarray_f((input.as_mut_slice(), layout, &DeviceCpu::default()))
825    }
826}
827
828impl<'a, T> AsArrayAPI<()> for &'a mut Vec<T>
829where
830    T: Clone,
831{
832    type Out = TensorMut<'a, T, DeviceCpu, IxD>;
833
834    fn asarray_f(self) -> Result<Self::Out> {
835        asarray_f((self.as_mut_slice(), &DeviceCpu::default()))
836    }
837}
838
839impl<'a, T> From<&'a mut [T]> for TensorMut<'a, T, DeviceCpu, IxD>
840where
841    T: Clone,
842{
843    fn from(input: &'a mut [T]) -> Self {
844        asarray(input)
845    }
846}
847
848impl<'a, T> From<&'a mut Vec<T>> for TensorMut<'a, T, DeviceCpu, IxD>
849where
850    T: Clone,
851{
852    fn from(input: &'a mut Vec<T>) -> Self {
853        asarray(input)
854    }
855}
856
857/* #endregion */
858
859/* #region scalar input */
860
861macro_rules! impl_asarray_scalar {
862    ($($t:ty),*) => {
863        $(
864            impl<B> AsArrayAPI<()> for ($t, &B)
865            where
866                B: DeviceAPI<$t> + DeviceCreationAnyAPI<$t>,
867            {
868                type Out = Tensor<$t, B, IxD>;
869
870                fn asarray_f(self) -> Result<Self::Out> {
871                    let (input, device) = self;
872                    let layout = Layout::new(vec![], vec![], 0)?;
873                    let storage = device.outof_cpu_vec(vec![input])?;
874                    let tensor = unsafe { Tensor::new_unchecked(storage, layout) };
875                    return Ok(tensor);
876                }
877            }
878
879            impl AsArrayAPI<()> for $t {
880                type Out = Tensor<$t, DeviceCpu, IxD>;
881
882                fn asarray_f(self) -> Result<Self::Out> {
883                    asarray_f((self, &DeviceCpu::default()))
884                }
885            }
886        )*
887    };
888}
889
890impl_asarray_scalar!(i8, i16, i32, i64, i128, isize, u8, u16, u32, u64, u128, usize, f32, f64, Complex32, Complex64);
891
892/* #endregion */
893
894#[cfg(test)]
895mod tests {
896    use super::*;
897
898    #[test]
899    fn test_asarray() {
900        let input = vec![1, 2, 3];
901        let tensor = asarray_f(input).unwrap();
902        println!("{tensor:?}");
903        let input = [1, 2, 3];
904        let tensor = asarray_f(input.as_ref()).unwrap();
905        println!("{tensor:?}");
906
907        let input = vec![1, 2, 3];
908        let tensor = asarray_f(&input).unwrap();
909        println!("{:?}", tensor.raw().as_ptr());
910        println!("{tensor:?}");
911
912        let tensor = asarray_f((&tensor, TensorIterOrder::K)).unwrap();
913        println!("{tensor:?}");
914
915        let tensor = asarray_f((tensor, TensorIterOrder::K)).unwrap();
916        println!("{tensor:?}");
917    }
918
919    #[test]
920    fn test_asarray_scalar() {
921        let tensor = asarray_f(1).unwrap();
922        println!("{tensor:?}");
923        let tensor = asarray_f((Complex64::new(0., 1.), &DeviceCpuSerial::default())).unwrap();
924        println!("{tensor:?}");
925    }
926
927    #[test]
928    fn doc_asarray() {
929        use rstsr::prelude::*;
930        let mut device = DeviceCpu::default();
931        device.set_default_order(RowMajor);
932
933        // vector as input, row-major layout by default
934        let input = vec![1, 2, 3, 4, 5, 6];
935        let a = rt::asarray((input, [2, 3], &device));
936        println!("{a:?}");
937        // [[ 1 2 3]
938        //  [ 4 5 6]]
939        // 2-Dim (dyn), contiguous: Cc, shape: [2, 3], stride: [3, 1], offset: 0
940        let expected = rt::tensor_from_nested!([[1, 2, 3], [4, 5, 6]], &device);
941        assert!(rt::allclose(&a, &expected, None));
942
943        // vector as input, column-major layout
944        let input = vec![1, 2, 3, 4, 5, 6];
945        let a = rt::asarray((input, [2, 3].f(), &device));
946        println!("{a:?}");
947        // [[ 1 3 5]
948        //  [ 2 4 6]]
949        // 2-Dim (dyn), contiguous: Ff, shape: [2, 3], stride: [1, 2], offset: 0
950        let expected = rt::tensor_from_nested!([[1, 3, 5], [2, 4, 6]], &device);
951        assert!(rt::allclose(&a, &expected, None));
952
953        // vector as input, column layout by default
954        device.set_default_order(ColMajor);
955        let input = vec![1, 2, 3, 4, 5, 6];
956        let a = rt::asarray((input, [2, 3], &device));
957        println!("{a:?}");
958        // [[ 1 3 5]
959        //  [ 2 4 6]]
960        // 2-Dim (dyn), contiguous: Ff, shape: [2, 3], stride: [1, 2], offset: 0
961        let expected = rt::tensor_from_nested!([[1, 3, 5], [2, 4, 6]], &device);
962        assert!(rt::allclose(&a, &expected, None));
963
964        // 1-D vector, default CPU device
965        let input = vec![1, 2, 3, 4, 5, 6];
966        let a = rt::asarray(input);
967        println!("{a:?}");
968        // [ 1 2 3 4 5 6]
969        // 1-Dim (dyn), contiguous: Cc, shape: [6], stride: [1], offset: 0
970        let expected = rt::tensor_from_nested!([1, 2, 3, 4, 5, 6]);
971        assert!(rt::allclose(&a, &expected, None));
972
973        // Slice &[T] as input
974        device.set_default_order(RowMajor);
975        let input = &[1, 2, 3, 4, 5, 6];
976        let a = rt::asarray((input.as_ref(), [2, 3].c(), &device));
977        println!("{a:?}");
978        // [[ 1 2 3]
979        //  [ 4 5 6]]
980        let expected = rt::tensor_from_nested!([[1, 2, 3], [4, 5, 6]], &device);
981        assert!(rt::allclose(&a, &expected, None));
982
983        // Slice &mut [T] as input
984        let mut input = vec![1, 2, 3, 4, 5, 6];
985        let mut a = rt::asarray((&mut input, [2, 3].c(), &device));
986        // change `input` via tensor view `a`
987        a[[0, 0]] = 10;
988        println!("{a:2?}");
989        // [[10 2 3]
990        //  [ 4 5 6]]
991        let expected = rt::tensor_from_nested!([[10, 2, 3], [4, 5, 6]], &device);
992        assert!(rt::allclose(&a, &expected, None));
993        println!("{input:?}");
994        // [10, 2, 3, 4, 5, 6]
995        assert_eq!(input, vec![10, 2, 3, 4, 5, 6]);
996
997        // Sub-view from &Vec<T>
998        let input = (0..30).collect::<Vec<i32>>();
999        let layout = Layout::new([3, 2], [2, 7], 5).unwrap();
1000        let a = rt::asarray((&input, layout, &device));
1001        println!("{a:2?}");
1002        // [[  5 12]
1003        //  [  7 14]
1004        //  [  9 16]]
1005        let expected = rt::tensor_from_nested!([[5, 12], [7, 14], [9, 16]], &device);
1006        assert!(rt::allclose(&a, &expected, None));
1007
1008        // Scalar as input
1009        let a = rt::asarray((42, &device));
1010        println!("{a:?}");
1011        // 42
1012        // 0-Dim (dyn), contiguous: CcFf, shape: [], stride: [], offset: 0
1013    }
1014
1015    #[test]
1016    fn doc_asarray_from_tensor() {
1017        use rstsr::prelude::*;
1018        let mut device = DeviceCpu::default();
1019        device.set_default_order(RowMajor);
1020
1021        // Generate a strided, non-row-or-col-prefer tensor view
1022        let a_raw = rt::arange((96, &device)).into_shape([4, 6, 4]);
1023        let a = a_raw.i((..2, slice!(2, 6, 2), 2..)).into_transpose([1, 0, 2]);
1024        println!("{a:2?}");
1025        // [[[10, 11], [34, 35]], [[18, 19], [42, 43]]]
1026        // shape: [2, 2, 2], stride: [8, 24, 1], offset: 10
1027        let expected = rt::tensor_from_nested!([[[10, 11], [34, 35]], [[18, 19], [42, 43]]], &device);
1028        assert!(rt::allclose(&a, &expected, None));
1029        let b = rt::asarray((&a, TensorIterOrder::K));
1030        println!("{b:2?}");
1031        // shape: [2, 2, 2], stride: [2, 4, 1], offset: 0
1032        assert!(rt::allclose(&b, &expected, None));
1033        assert_eq!(b.stride(), &[2, 4, 1]);
1034        let b = rt::asarray((&a, TensorIterOrder::C));
1035        println!("{b:2?}");
1036        // shape: [2, 2, 2], stride: [4, 2, 1], offset: 0
1037        assert!(rt::allclose(&b, &expected, None));
1038        assert_eq!(b.stride(), &[4, 2, 1]);
1039        let b = rt::asarray((&a, TensorIterOrder::F));
1040        println!("{b:2?}");
1041        // shape: [2, 2, 2], stride: [1, 2, 4], offset: 0
1042        assert!(rt::allclose(&b, &expected, None));
1043        assert_eq!(b.stride(), &[1, 2, 4]);
1044    }
1045}