1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
//! Cumulative operations trait.
use crate::error::{Error, Result};
use crate::runtime::Runtime;
use crate::tensor::Tensor;
/// Cumulative operations
pub trait CumulativeOps<R: Runtime> {
/// Cumulative sum along a dimension
///
/// Returns the cumulative sum of elements along the specified dimension.
/// For input [a, b, c, d], output is [a, a+b, a+b+c, a+b+c+d].
///
/// # Arguments
///
/// * `a` - Input tensor
/// * `dim` - Dimension along which to compute cumulative sum (supports negative indexing)
///
/// # Returns
///
/// Tensor with same shape as input containing cumulative sums
///
/// # Example
///
/// ```
/// # use numr::prelude::*;
/// # let device = CpuDevice::new();
/// # let client = CpuRuntime::default_client(&device);
/// let a = Tensor::<CpuRuntime>::from_slice(&[1.0, 2.0, 3.0, 4.0], &[4], &device);
/// let result = client.cumsum(&a, 0)?; // [1, 3, 6, 10]
/// # Ok::<(), numr::error::Error>(())
/// ```
fn cumsum(&self, a: &Tensor<R>, dim: isize) -> Result<Tensor<R>> {
let _ = (a, dim);
Err(Error::NotImplemented {
feature: "CumulativeOps::cumsum",
})
}
/// Cumulative product along a dimension
///
/// Returns the cumulative product of elements along the specified dimension.
/// For input [a, b, c, d], output is [a, a*b, a*b*c, a*b*c*d].
///
/// # Arguments
///
/// * `a` - Input tensor
/// * `dim` - Dimension along which to compute cumulative product (supports negative indexing)
///
/// # Returns
///
/// Tensor with same shape as input containing cumulative products
///
/// # Example
///
/// ```
/// # use numr::prelude::*;
/// # let device = CpuDevice::new();
/// # let client = CpuRuntime::default_client(&device);
/// let a = Tensor::<CpuRuntime>::from_slice(&[1.0, 2.0, 3.0, 4.0], &[4], &device);
/// let result = client.cumprod(&a, 0)?; // [1, 2, 6, 24]
/// # Ok::<(), numr::error::Error>(())
/// ```
fn cumprod(&self, a: &Tensor<R>, dim: isize) -> Result<Tensor<R>> {
let _ = (a, dim);
Err(Error::NotImplemented {
feature: "CumulativeOps::cumprod",
})
}
/// Log-sum-exp along specified dimensions (numerically stable)
///
/// Computes log(sum(exp(x))) in a numerically stable way:
/// logsumexp(x) = max(x) + log(sum(exp(x - max(x))))
///
/// This is commonly used in softmax computation and log-probability calculations.
///
/// # Arguments
///
/// * `a` - Input tensor
/// * `dims` - Dimensions to reduce over
/// * `keepdim` - If true, reduced dimensions are kept with size 1
///
/// # Returns
///
/// Tensor containing log-sum-exp values
///
/// # Example
///
/// ```
/// # use numr::prelude::*;
/// # let device = CpuDevice::new();
/// # let client = CpuRuntime::default_client(&device);
/// let a = Tensor::<CpuRuntime>::from_slice(&[1.0, 2.0, 3.0], &[3], &device);
/// let result = client.logsumexp(&a, &[0], false)?;
/// // result ≈ log(exp(1) + exp(2) + exp(3)) ≈ 3.4076
/// # Ok::<(), numr::error::Error>(())
/// ```
fn logsumexp(&self, a: &Tensor<R>, dims: &[usize], keepdim: bool) -> Result<Tensor<R>> {
let _ = (a, dims, keepdim);
Err(Error::NotImplemented {
feature: "CumulativeOps::logsumexp",
})
}
}