Skip to main content

lumen_core/tensor/
construct.rs

1use std::sync::Arc;
2use crate::{AutogradInfo, Error, FloatDType, Layout, NumDType, Result, Shape, Storage, StorageArc, WithDType};
3use super::{Tensor, TensorId, TensorImpl};
4
5impl<T: WithDType> Tensor<T> {
6    /// Creates a new `Tensor` from any supported Rust array or slice.
7    ///
8    /// ```rust
9    /// use lumen_core::Tensor;
10    ///
11    /// let a = Tensor::new(&[1, 2, 3]).unwrap();
12    /// println!("{}", a.shape());
13    /// ```
14    pub fn new<A: ToTensor<T>>(array: A) -> Result<Self> {
15        Self::new_impl(array, T::AutogradMeta::default())
16    }
17
18    pub(crate) fn new_impl<A: ToTensor<T>>(array: A, meta: T::AutogradMeta) -> Result<Self> {
19        let shape = array.shape()?;
20        let storage = array.to_storage()?;
21        Ok(Self::from_storage(storage, shape, meta))
22    }
23
24    /// Creates an array full with a constant `value`.
25    ///
26    /// ```rust
27    /// use lumen_core::Tensor;
28    ///
29    /// let a = Tensor::full((2, 2), 7).unwrap();
30    /// println!("{}", a);
31    /// ```
32    pub fn full<S: Into<Shape>>(shape: S, value: T) -> Result<Self> {
33        Self::full_impl(shape, value, T::AutogradMeta::default())
34    }
35
36    pub(crate) fn full_impl<S: Into<Shape>>(shape: S, value: T, meta: T::AutogradMeta) -> Result<Self> {
37        let shape: Shape = shape.into();
38        let storage = Storage::new(vec![value; shape.element_count()]);
39        Ok(Self::from_storage(storage, shape, meta))
40    }
41
42    /// Creates a new `Tensor` with un initialze value!
43    pub fn empty<S: Into<Shape>>(shape: S) -> Result<Self> {
44        Self::empty_impl(shape, T::AutogradMeta::default())
45    }
46
47    pub(crate) fn empty_impl<S: Into<Shape>>(shape: S, meta: T::AutogradMeta) -> Result<Self> {
48        let shape: Shape = shape.into();
49        let element_count = shape.element_count();
50        let mut v: Vec<T> = Vec::with_capacity(element_count);
51        unsafe { v.set_len(element_count); }
52        let storage = Storage::new(v);
53        Ok(Self::from_storage(storage, shape, meta))
54    }
55
56    /// Creates a new `Tensor` no storage
57    pub fn meta<S: Into<Shape>>(shape: S) -> Result<Self> {
58        Self::meta_impl(shape, T::AutogradMeta::default())
59    }
60
61    pub(crate) fn meta_impl<S: Into<Shape>>(shape: S, meta: T::AutogradMeta) -> Result<Self> {
62        let shape: Shape = shape.into();
63        let tensor_ = TensorImpl {
64            id: TensorId::new(),
65            storage: None,
66            layout: Layout::contiguous(shape),
67            meta,
68        };
69        Ok(Tensor(Arc::new(tensor_)))
70    }
71
72    /// Creates a new `Tensor` directly from a storage buffer, shape and meta data.
73    ///
74    /// Typically used internally, but can also be used when you already
75    /// have a `Storage<T>` prepared.
76    pub(crate) fn from_storage<L: Into<Layout>>(storage: Storage<T>, layout: L, meta: T::AutogradMeta) -> Self {
77        let tensor_ = TensorImpl {
78            id: TensorId::new(),
79            storage: Some(StorageArc::new(storage)),
80            layout: layout.into(),
81            meta,
82        };
83        Tensor(Arc::new(tensor_))
84    }
85
86    pub(crate) fn share_storage<L: Into<Layout>>(&self, layout: L, meta: T::AutogradMeta) -> Self {
87        let tensor_ = TensorImpl {
88            id: TensorId::new(),
89            storage: self.0.storage.clone(),
90            layout: layout.into(),
91            meta,
92        };
93        Tensor(Arc::new(tensor_))
94    }
95
96}
97
98impl<T: WithDType> Tensor<T> {
99    /// Creates an array of zeros with the given shape.
100    ///
101    /// ```rust
102    /// use lumen_core::Tensor;
103    ///
104    /// let a = Tensor::<f32>::zeros((2, 3)).unwrap();
105    /// println!("{}", a);
106    /// ```
107    pub fn zeros<S: Into<Shape>>(shape: S) -> Result<Self> {
108        Self::zeros_impl(shape, T::AutogradMeta::default())
109    }
110
111    pub(crate) fn zeros_impl<S: Into<Shape>>(shape: S, meta: T::AutogradMeta) -> Result<Self> {
112        let shape = shape.into();
113        let storage = Storage::zeros(&shape);
114        Ok(Self::from_storage(storage, shape, meta))
115    }
116
117    /// Creates a zero-filled array with the same shape as `self`.
118    ///
119    /// ```rust
120    /// use lumen_core::Tensor;
121    ///
122    /// let a = Tensor::<i32>::ones((2, 2)).unwrap();
123    /// let b = a.zeros_like().unwrap();
124    /// println!("{}", b);
125    /// ```
126    pub fn zeros_like(&self) -> Result<Self> {
127        Self::zeros(self.shape())
128    }
129
130    /// Creates an array of ones with the given shape.
131    ///
132    /// ```rust
133    /// use lumen_core::Tensor;
134    ///
135    /// let a = Tensor::<f64>::ones((3, 3)).unwrap();
136    /// println!("{}", a);
137    /// ```
138    pub fn ones<S: Into<Shape>>(shape: S) -> Result<Self> {
139        Self::ones_impl(shape, T::AutogradMeta::default())
140    }
141
142    pub(crate) fn ones_impl<S: Into<Shape>>(shape: S, meta: T::AutogradMeta) -> Result<Self> {
143        let shape = shape.into();
144        let storage = Storage::ones(&shape);
145        Ok(Self::from_storage(storage, shape, meta))
146    }
147
148    /// Creates a one-filled array with the same shape as `self`.
149    pub fn ones_like(&self) -> Result<Self> {
150        Self::ones(self.shape())
151    }
152}
153
154impl<T: NumDType> Tensor<T> {
155    /// Creates a 1-D array with values from `start` up to (but not including) `end`.
156    ///
157    /// ```rust
158    /// use lumen_core::Tensor;
159    ///
160    /// let a = Tensor::arange(0., 5.).unwrap();
161    /// println!("{}", a);
162    /// ```
163    pub fn arange(start: T, end: T) -> Result<Self> {
164        Self::arange_impl(start, end, T::AutogradMeta::default())
165    }
166
167    pub(crate) fn arange_impl(start: T, end: T, meta: T::AutogradMeta) -> Result<Self> {
168        let storage = T::to_range_storage(start, end)?;
169        let shape = storage.len();
170        Ok(Self::from_storage(storage, shape, meta))
171    }
172}
173
174impl<T: WithDType> Tensor<T> {
175
176    /// Creates an array from a flat `Vec<T>` and explicit shape.
177    ///
178    /// ```rust
179    /// use lumen_core::Tensor;
180    ///
181    /// let a = Tensor::from_vec(vec![1, 2, 3, 4], (2, 2)).unwrap();
182    /// println!("{}", a);
183    /// ```
184    pub fn from_vec<V: Into<Vec<T>>, S: Into<Shape>>(vec: V, shape: S) -> Result<Self> {
185        Self::from_vec_impl(vec, shape, T::AutogradMeta::default())
186    }
187
188    pub(crate) fn from_vec_impl<V: Into<Vec<T>>, S: Into<Shape>>(vec: V, shape: S, meta: T::AutogradMeta) -> Result<Self> {
189        let vec = vec.into();
190        let shape: Shape = shape.into();
191        if shape.element_count() != vec.len() {
192            Err(Error::ElementSizeMismatch { expected: vec.len(), got: shape.element_count(), op: "from_vec" })?
193        }
194        let storage = Storage::new(vec);
195        Ok(Self::from_storage(storage, shape, meta))
196    }
197
198    pub fn diag(diag: &[T]) -> Result<Self> {
199        Self::diag_impl(diag, T::AutogradMeta::default())
200    }
201
202    pub(crate) fn diag_impl(diag: &[T], meta: T::AutogradMeta) -> Result<Self> {
203        let size = diag.len();
204        let mut vec = vec![T::ZERO; size * size];
205        for n in 0..size {
206            vec[n * size + n] = diag[n];
207        }
208        let storage = Storage::new(vec);
209        Ok(Self::from_storage(storage, (size, size), meta))
210    }
211}
212
213impl<T: NumDType> Tensor<T> {
214    /// Creates an array with uniformly distributed random values in `[min, max)`.
215    ///
216    /// ```rust
217    /// use lumen_core::Tensor;
218    ///
219    /// let a = Tensor::<f32>::rand(0., 1., (2, 3)).unwrap();
220    /// println!("{}", a);
221    /// ```
222    pub fn rand<S: Into<Shape>>(min: T, max: T, shape: S) -> Result<Self> {
223        Self::rand_impl(min, max, shape, T::AutogradMeta::default())
224    }
225
226    pub(crate) fn rand_impl<S: Into<Shape>>(min: T, max: T, shape: S, meta: T::AutogradMeta) -> Result<Self> {
227        let shape = shape.into();
228        let storage = Storage::rand_uniform(&shape, min, max)?;
229        Ok(Self::from_storage(storage, shape, meta))
230    }
231
232    /// Creates a random array with the same shape as `self`.
233    pub fn rand_like(&self, min: T, max: T) -> Result<Self> {
234        Self::rand(min, max, self.shape())
235    }
236}
237
238impl<F: FloatDType> Tensor<F> {
239    /// Generate a 1-D `Tensor` of `num` evenly spaced values over the interval [start, stop).
240    /// 
241    /// # Example
242    ///
243    /// ```
244    /// # use lumen_core::Tensor;
245    /// let arr = Tensor::linspace(0.0, 1.0, 5).unwrap();
246    /// assert_eq!(arr.to_vec().unwrap(), [0.0, 0.2, 0.4, 0.6000000000000001, 0.8]);
247    /// ```
248    pub fn linspace(start: F, stop: F, num: usize) -> Result<Self> {
249        Self::linspace_impl(start, stop, num, F::AutogradMeta::default())
250    }
251
252    pub(crate) fn linspace_impl(start: F, stop: F, num: usize, meta: F::AutogradMeta) -> Result<Self> {
253        let step = (stop - start) / F::from_usize(num);
254        let vec: Vec<_> = std::iter::successors(Some(start), |&x| {
255            let next = x + step;
256            if next < stop { Some(next) } else { None }
257        })
258        .collect();
259
260        let len = vec.len();
261        let storage = Storage::new(vec);
262        Ok(Self::from_storage(storage, len, meta))
263    }
264}
265
266impl<F: FloatDType> Tensor<F> {
267    /// Creates an array with normally distributed random values
268    /// with given `mean` and `std`.
269    ///
270    /// ```rust
271    /// use lumen_core::Tensor;
272    ///
273    /// let a = Tensor::<f64>::randn(0.0, 1.0, (2, 2)).unwrap();
274    /// println!("{}", a);
275    /// ```
276    pub fn randn<S: Into<Shape>>(mean: F, std: F, shape: S) -> Result<Self> {
277        Self::randn_impl(mean, std, shape, F::AutogradMeta::default())
278    }
279
280    pub(crate) fn randn_impl<S: Into<Shape>>(mean: F, std: F, shape: S, meta: F::AutogradMeta) -> Result<Self> {
281        let shape = shape.into();
282        let storage = Storage::rand_normal(&shape, mean, std)?;
283        Ok(Self::from_storage(storage, shape, meta))
284    }
285
286    /// Creates a normal-distributed random array with the same shape as `self`.
287    pub fn randn_like(&self, mean: F, std: F) -> Result<Self> {
288        Self::randn(mean, std, self.shape())
289    }
290}
291
292impl<T: WithDType> Tensor<T> {
293    pub fn eye(size: usize) -> Result<Self> {
294        Self::eye_impl(size, T::AutogradMeta::default())
295    }
296
297    pub(crate) fn eye_impl(size: usize, meta: T::AutogradMeta) -> Result<Self> {
298        let mut vec = vec![T::ZERO; size * size];
299        for n in 0..size {
300            vec[n * size + n] = T::ONE;
301        }
302        let storage = Storage::new(vec);
303        Ok(Self::from_storage(storage, (size, size), meta))
304    }
305
306    pub fn tril(size: usize, diagonal: bool) -> Result<Self> {
307        Self::tril_impl(size, diagonal, T::AutogradMeta::default())
308    }
309
310    pub fn triu(size: usize, diagonal: bool) -> Result<Self> {
311        Self::triu_impl(size, diagonal, T::AutogradMeta::default())
312    }
313
314    pub(crate) fn tril_impl(size: usize, diagonal: bool, meta: T::AutogradMeta) -> Result<Self> {
315        let mut vec = vec![T::ZERO; size * size];
316        
317        for i in 0..size {
318            let end = if diagonal { i + 1 } else { i };            
319            for j in 0..end { 
320                vec[i * size + j] = T::ONE;
321            }
322        }
323        
324        let storage = Storage::new(vec);
325        Ok(Self::from_storage(storage, (size, size), meta))
326    }
327
328    pub(crate) fn triu_impl(size: usize, diagonal: bool, meta: T::AutogradMeta) -> Result<Self> {
329        let mut vec = vec![T::ZERO; size * size];
330
331        for i in 0..size { 
332            let start = if diagonal { i } else { i + 1 };
333            
334            for j in start..size {
335                vec[i * size + j] = T::ONE;
336            }
337        }
338
339        let storage = Storage::new(vec);
340        Ok(Self::from_storage(storage, (size, size), meta))        
341    }
342}
343
344impl<T: FloatDType> Tensor<T> {
345    #[inline]
346    pub fn new_var<A: ToTensor<T>>(array: A) -> Result<Self> {
347        Self::new_impl(array, AutogradInfo::var())
348    }
349
350    #[inline]
351    pub fn empty_var<S: Into<Shape>>(shape: S) -> Result<Self> {
352        Self::empty_impl(shape, AutogradInfo::var())
353    }
354
355    #[inline]
356    pub fn meta_var<S: Into<Shape>>(shape: S) -> Result<Self> {
357        Self::meta_impl(shape, AutogradInfo::var())
358    }
359
360    #[inline]
361    pub fn full_var<S: Into<Shape>>(shape: S, value: T) -> Result<Self> {
362        Self::full_impl(shape, value, AutogradInfo::var())
363    }
364
365    #[inline]
366    pub fn zeros_var<S: Into<Shape>>(shape: S) -> Result<Self> {
367        Self::zeros_impl(shape, AutogradInfo::var())
368    }
369
370    #[inline]
371    pub fn zeros_like_var(&self) -> Result<Self> {
372        Self::zeros_var(self.shape())
373    }
374
375    #[inline]
376    pub fn ones_var<S: Into<Shape>>(shape: S) -> Result<Self> {
377        Self::ones_impl(shape, AutogradInfo::var())
378    }
379
380    #[inline]
381    pub fn ones_like_var(&self) -> Result<Self> {
382        Self::ones_var(self.shape())
383    }
384
385    #[inline]
386    pub fn arange_var(start: T, end: T) -> Result<Self> {
387        Self::arange_impl(start, end, AutogradInfo::var())
388    }
389
390    #[inline]
391    pub fn from_vec_var<V: Into<Vec<T>>, S: Into<Shape>>(vec: V, shape: S) -> Result<Self> {
392        Self::from_vec_impl(vec, shape, AutogradInfo::var())
393    }
394
395    #[inline]
396    pub fn eye_var(size: usize) -> Result<Self> {
397        Self::eye_impl(size, AutogradInfo::var())
398    }
399
400    #[inline]
401    pub fn tril_var(size: usize, diagonal: bool) -> Result<Self> {
402        Self::tril_impl(size, diagonal, AutogradInfo::var())
403    }
404
405    #[inline]
406    pub fn triu_var(size: usize, diagonal: bool) -> Result<Self> {
407        Self::triu_impl(size, diagonal, AutogradInfo::var())
408    }
409
410    #[inline]
411    pub fn diag_var(diag: &[T]) -> Result<Self> {
412        Self::diag_impl(diag, AutogradInfo::var())
413    }
414
415    #[inline]
416    pub fn linspace_var(start: T, stop: T, num: usize) -> Result<Self> {
417        Self::linspace_impl(start, stop, num, AutogradInfo::var())
418    }
419
420    #[inline]
421    pub fn randn_var<S: Into<Shape>>(mean: T, std: T, shape: S) -> Result<Self> {
422        Self::randn_impl(mean, std, shape, AutogradInfo::var())
423    }
424
425    #[inline]
426    pub fn rand_var<S: Into<Shape>>(min: T, max: T, shape: S) -> Result<Self> {
427        Self::rand_impl(min, max, shape, AutogradInfo::var())
428    }
429}
430
431impl Tensor<bool> {
432    /// Creates a boolean array filled with `true`.
433    ///
434    /// ```rust
435    /// use lumen_core::Tensor;
436    ///
437    /// let a = Tensor::trues((2, 2)).unwrap();
438    /// println!("{}", a);
439    /// ```
440    pub fn trues<S: Into<Shape>>(shape: S) -> Result<Self> {
441        let shape: Shape = shape.into();
442        let storage = Storage::new(vec![true; shape.element_count()]);
443        Ok(Self::from_storage(storage, shape, Default::default()))
444    }
445
446    /// Creates a boolean array filled with `false`.
447    pub fn falses<S: Into<Shape>>(shape: S) -> Result<Self> {
448        let shape: Shape = shape.into();
449        let storage = Storage::new(vec![false; shape.element_count()]);
450        Ok(Self::from_storage(storage, shape, Default::default()))
451    }
452}
453
454pub trait ToTensor<T> {
455    fn shape(&self) -> Result<Shape>;
456    fn to_storage(self) -> Result<Storage<T>>;
457}
458
459impl<D: WithDType> ToTensor<D> for D {
460    fn shape(&self) -> Result<Shape> {
461        Ok(Shape::scalar())
462    }
463
464    fn to_storage(self) -> Result<Storage<D>> {
465        Ok(Storage::new([self].to_vec()))
466    }
467}
468
469impl<S: WithDType, const N: usize> ToTensor<S> for &[S; N] {
470    fn shape(&self) -> Result<Shape> {
471        Ok(Shape::from(self.len()))    
472    }
473
474    fn to_storage(self) -> Result<Storage<S>> {
475        Ok(Storage::new(self.to_vec()))
476    }
477}
478
479impl<S: WithDType, const N: usize> ToTensor<S> for [S; N] {
480    fn shape(&self) -> Result<Shape> {
481        Ok(Shape::from(self.len()))    
482    }
483
484    fn to_storage(self) -> Result<Storage<S>> {
485        Ok(Storage::new(self.to_vec()))
486    }
487}
488
489impl<S: WithDType> ToTensor<S> for &[S] {
490    fn shape(&self) -> Result<Shape> {
491        Ok(Shape::from(self.len()))    
492    }
493
494    fn to_storage(self) -> Result<Storage<S>> {
495        Ok(Storage::new(self.to_vec()))
496    }
497}
498
499impl<S: WithDType, const N1: usize, const N2: usize> ToTensor<S> 
500    for &[[S; N2]; N1] 
501{
502    fn shape(&self) -> Result<Shape> {
503        Ok(Shape::from((N1, N2)))
504    }
505
506    fn to_storage(self) -> Result<Storage<S>> {
507        Ok(Storage::new(self.concat()))
508    }
509}
510
511impl<S: WithDType, const N1: usize, const N2: usize, const N3: usize> ToTensor<S>
512    for &[[[S; N3]; N2]; N1] 
513{
514    fn shape(&self) -> Result<Shape> {
515        Ok(Shape::from((N1, N2, N3)))
516    }
517
518    fn to_storage(self) -> Result<Storage<S>> {
519        let mut vec = Vec::with_capacity(N1 * N2 * N3);
520        for i1 in 0..N1 {
521            for i2 in 0..N2 {
522                vec.extend(self[i1][i2])
523            }
524        }
525        Ok(Storage::new(vec))
526
527    }
528}
529
530impl<S: WithDType, const N1: usize, const N2: usize, const N3: usize, const N4: usize> ToTensor<S>
531    for &[[[[S; N4]; N3]; N2]; N1]
532{
533    fn shape(&self) -> Result<Shape> {
534        Ok(Shape::from((N1, N2, N3, N4)))
535    }
536
537    fn to_storage(self) -> Result<Storage<S>> {
538        let mut vec = Vec::with_capacity(N1 * N2 * N3 * N4);
539        for i1 in 0..N1 {
540            for i2 in 0..N2 {
541                for i3 in 0..N3 {
542                    vec.extend(self[i1][i2][i3])
543                }
544            }
545        }
546        Ok(Storage::new(vec))
547    }
548}
549
550impl<S: WithDType> ToTensor<S> for Vec<S> {
551    fn shape(&self) -> Result<Shape> {
552        Ok(Shape::from(self.len()))    
553    }
554
555    fn to_storage(self) -> Result<Storage<S>> {
556        Ok(Storage::new(self))
557
558    }
559}
560
561#[cfg(test)]
562mod test {
563    use crate::Tensor;
564
565    #[test]
566    fn test_shape() {
567        let t = Tensor::<f64>::ones(()).unwrap();
568        println!("{}", t);
569    }
570
571    #[test]
572    fn test_meta() {
573        let t = Tensor::<f64>::meta((1, 2, 3)).unwrap();
574        assert!(t.is_meta());
575        println!("{}", t);
576    }
577
578    #[should_panic]
579    #[test]
580    fn test_meta_op() {
581        let t = Tensor::<f64>::meta((1, 2, 3)).unwrap();
582        assert!(t.is_meta());
583        let _ = t + 1.0;
584    }
585}