use runmat_accelerate_api::GpuTensorHandle;
use runmat_builtins::{CharArray, ComplexTensor, Tensor, Value};
use runmat_macros::runtime_builtin;
use crate::builtins::common::spec::{
BroadcastSemantics, BuiltinFusionSpec, BuiltinGpuSpec, ConstantStrategy, FusionError,
FusionExprContext, FusionKernelTemplate, GpuOpKind, ProviderHook, ReductionNaN,
ResidencyPolicy, ScalarType, ShapeRequirements,
};
use crate::builtins::common::{gpu_helpers, tensor};
use crate::builtins::math::type_resolvers::numeric_unary_type;
use crate::{build_runtime_error, BuiltinResult, RuntimeError};
const BUILTIN_NAME: &str = "sinh";
#[runmat_macros::register_gpu_spec(builtin_path = "crate::builtins::math::trigonometry::sinh")]
pub const GPU_SPEC: BuiltinGpuSpec = BuiltinGpuSpec {
name: "sinh",
op_kind: GpuOpKind::Elementwise,
supported_precisions: &[ScalarType::F32, ScalarType::F64],
broadcast: BroadcastSemantics::Matlab,
provider_hooks: &[ProviderHook::Unary { name: "unary_sinh" }],
constant_strategy: ConstantStrategy::InlineLiteral,
residency: ResidencyPolicy::NewHandle,
nan_mode: ReductionNaN::Include,
two_pass_threshold: None,
workgroup_size: None,
accepts_nan_mode: false,
notes:
"Providers may execute sinh directly on the device; runtimes gather to the host when unary_sinh is unavailable.",
};
fn runtime_error_for(message: impl Into<String>) -> RuntimeError {
build_runtime_error(message)
.with_builtin(BUILTIN_NAME)
.build()
}
#[runmat_macros::register_fusion_spec(builtin_path = "crate::builtins::math::trigonometry::sinh")]
pub const FUSION_SPEC: BuiltinFusionSpec = BuiltinFusionSpec {
name: "sinh",
shape: ShapeRequirements::BroadcastCompatible,
constant_strategy: ConstantStrategy::InlineLiteral,
elementwise: Some(FusionKernelTemplate {
scalar_precisions: &[ScalarType::F32, ScalarType::F64],
wgsl_body: |ctx: &FusionExprContext| {
let input = ctx.inputs.first().ok_or(FusionError::MissingInput(0))?;
Ok(format!("sinh({input})"))
},
}),
reduction: None,
emits_nan: false,
notes: "Fusion planner emits WGSL `sinh` calls; providers may override via fused elementwise kernels.",
};
#[runtime_builtin(
name = "sinh",
category = "math/trigonometry",
summary = "Hyperbolic sine of scalars, vectors, matrices, or N-D tensors (element-wise).",
keywords = "sinh,hyperbolic,trigonometry,gpu",
accel = "unary",
type_resolver(numeric_unary_type),
builtin_path = "crate::builtins::math::trigonometry::sinh"
)]
async fn sinh_builtin(value: Value) -> BuiltinResult<Value> {
match value {
Value::GpuTensor(handle) => sinh_gpu(handle).await,
Value::Complex(re, im) => Ok(Value::Complex(
sinh_complex_re(re, im),
sinh_complex_im(re, im),
)),
Value::ComplexTensor(ct) => sinh_complex_tensor(ct),
Value::CharArray(ca) => sinh_char_array(ca),
Value::String(_) | Value::StringArray(_) => {
Err(runtime_error_for("sinh: expected numeric input"))
}
other => sinh_real(other),
}
}
async fn sinh_gpu(handle: GpuTensorHandle) -> BuiltinResult<Value> {
if let Some(provider) = runmat_accelerate_api::provider_for_handle(&handle) {
if let Ok(out) = provider.unary_sinh(&handle).await {
return Ok(Value::GpuTensor(out));
}
}
let tensor = gpu_helpers::gather_tensor_async(&handle).await?;
sinh_tensor(tensor).map(tensor::tensor_into_value)
}
fn sinh_real(value: Value) -> BuiltinResult<Value> {
let tensor = tensor::value_into_tensor_for("sinh", value).map_err(runtime_error_for)?;
sinh_tensor(tensor).map(tensor::tensor_into_value)
}
fn sinh_tensor(tensor: Tensor) -> BuiltinResult<Tensor> {
let data = tensor.data.iter().map(|&v| v.sinh()).collect::<Vec<_>>();
Tensor::new(data, tensor.shape.clone()).map_err(|e| runtime_error_for(format!("sinh: {e}")))
}
fn sinh_complex_tensor(ct: ComplexTensor) -> BuiltinResult<Value> {
let mapped = ct
.data
.iter()
.map(|&(re, im)| (sinh_complex_re(re, im), sinh_complex_im(re, im)))
.collect::<Vec<_>>();
let tensor = ComplexTensor::new(mapped, ct.shape.clone())
.map_err(|e| runtime_error_for(format!("sinh: {e}")))?;
Ok(Value::ComplexTensor(tensor))
}
fn sinh_char_array(ca: CharArray) -> BuiltinResult<Value> {
let data = ca
.data
.iter()
.map(|&ch| (ch as u32 as f64).sinh())
.collect::<Vec<_>>();
let tensor = Tensor::new(data, vec![ca.rows, ca.cols])
.map_err(|e| runtime_error_for(format!("sinh: {e}")))?;
Ok(Value::Tensor(tensor))
}
#[inline]
fn sinh_complex_re(re: f64, im: f64) -> f64 {
re.sinh() * im.cos()
}
#[inline]
fn sinh_complex_im(re: f64, im: f64) -> f64 {
re.cosh() * im.sin()
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
use futures::executor::block_on;
use runmat_builtins::{IntValue, ResolveContext, Tensor, Type};
use crate::builtins::common::test_support;
fn sinh_builtin(value: Value) -> BuiltinResult<Value> {
block_on(super::sinh_builtin(value))
}
#[test]
fn sinh_type_preserves_tensor_shape() {
let out = numeric_unary_type(
&[Type::Tensor {
shape: Some(vec![Some(2), Some(3)]),
}],
&ResolveContext::new(Vec::new()),
);
assert_eq!(
out,
Type::Tensor {
shape: Some(vec![Some(2), Some(3)])
}
);
}
#[test]
fn sinh_type_scalar_tensor_returns_num() {
let out = numeric_unary_type(
&[Type::Tensor {
shape: Some(vec![Some(1), Some(1)]),
}],
&ResolveContext::new(Vec::new()),
);
assert_eq!(out, Type::Num);
}
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
#[test]
fn sinh_scalar() {
let value = Value::Num(1.0);
let result = sinh_builtin(value).expect("sinh");
match result {
Value::Num(v) => assert!((v - 1.0f64.sinh()).abs() < 1e-12),
other => panic!("expected scalar result, got {other:?}"),
}
}
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
#[test]
fn sinh_tensor_elements() {
let tensor = Tensor::new(vec![-1.0, 0.0, 1.0], vec![3, 1]).unwrap();
let result = sinh_builtin(Value::Tensor(tensor)).expect("sinh");
match result {
Value::Tensor(t) => {
assert_eq!(t.shape, vec![3, 1]);
let expected = [-1.0f64.sinh(), 0.0, 1.0f64.sinh()];
for (got, exp) in t.data.iter().zip(expected.iter()) {
assert!((got - exp).abs() < 1e-12);
}
}
other => panic!("expected tensor result, got {other:?}"),
}
}
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
#[test]
fn sinh_int_value_promotes() {
let value = Value::Int(IntValue::I32(1));
let result = sinh_builtin(value).expect("sinh");
match result {
Value::Num(v) => assert!((v - 1.0f64.sinh()).abs() < 1e-12),
other => panic!("expected scalar result, got {other:?}"),
}
}
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
#[test]
fn sinh_complex_scalar() {
let result = sinh_builtin(Value::Complex(1.0, 2.0)).expect("sinh");
match result {
Value::Complex(re, im) => {
assert!((re - sinh_complex_re(1.0, 2.0)).abs() < 1e-12);
assert!((im - sinh_complex_im(1.0, 2.0)).abs() < 1e-12);
}
other => panic!("expected complex result, got {other:?}"),
}
}
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
#[test]
fn sinh_char_array_roundtrip() {
let chars = CharArray::new("abc".chars().collect(), 1, 3).unwrap();
let result = sinh_builtin(Value::CharArray(chars)).expect("sinh");
match result {
Value::Tensor(t) => {
assert_eq!(t.shape, vec![1, 3]);
for (idx, ch) in ['a', 'b', 'c'].into_iter().enumerate() {
let expected = (ch as u32 as f64).sinh();
assert!((t.data[idx] - expected).abs() < 1e-12);
}
}
other => panic!("expected tensor result, got {other:?}"),
}
}
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
#[test]
fn sinh_gpu_provider_roundtrip() {
test_support::with_test_provider(|provider| {
let tensor = Tensor::new(vec![0.0, 0.5, 1.0, 1.5], vec![4, 1]).unwrap();
let view = runmat_accelerate_api::HostTensorView {
data: &tensor.data,
shape: &tensor.shape,
};
let handle = provider.upload(&view).expect("upload");
let result = sinh_builtin(Value::GpuTensor(handle)).expect("sinh");
let gathered = test_support::gather(result).expect("gather");
let expected: Vec<f64> = tensor.data.iter().map(|&v| v.sinh()).collect();
assert_eq!(gathered.shape, vec![4, 1]);
assert_eq!(gathered.data, expected);
});
}
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
#[test]
#[cfg(feature = "wgpu")]
fn sinh_wgpu_matches_cpu_elementwise() {
let _ = runmat_accelerate::backend::wgpu::provider::register_wgpu_provider(
runmat_accelerate::backend::wgpu::provider::WgpuProviderOptions::default(),
);
let t = Tensor::new(vec![0.0, 0.25, 0.5, 0.75], vec![4, 1]).unwrap();
let cpu = sinh_real(Value::Tensor(t.clone())).unwrap();
let view = runmat_accelerate_api::HostTensorView {
data: &t.data,
shape: &t.shape,
};
let h = runmat_accelerate_api::provider()
.unwrap()
.upload(&view)
.unwrap();
let gpu = block_on(sinh_gpu(h)).unwrap();
let gathered = test_support::gather(gpu).expect("gather");
match (cpu, gathered) {
(Value::Tensor(ct), gt) => {
assert_eq!(gt.shape, ct.shape);
let tol = match runmat_accelerate_api::provider().unwrap().precision() {
runmat_accelerate_api::ProviderPrecision::F64 => 1e-12,
runmat_accelerate_api::ProviderPrecision::F32 => 1e-5,
};
for (a, b) in gt.data.iter().zip(ct.data.iter()) {
assert!((a - b).abs() < tol, "|{} - {}| >= {}", a, b, tol);
}
}
_ => panic!("unexpected shapes"),
}
}
}