Skip to main content

Crate torsh_functional

Crate torsh_functional 

Source
Expand description

Functional API for ToRSh

This module provides functional operations similar to torch.functional, including tensor manipulation, mathematical operations, and utilities.

For comprehensive performance optimization guidance, see the separate Performance Tuning Guide documentation.

Re-exports§

pub use activations::celu;
pub use activations::elu;
pub use activations::gelu;
pub use activations::gumbel_softmax;
pub use activations::hardshrink;
pub use activations::hardsigmoid;
pub use activations::hardsigmoid_v2;
pub use activations::hardswish;
pub use activations::hardtanh;
pub use activations::leaky_relu;
pub use activations::log_sigmoid;
pub use activations::log_softmax;
pub use activations::mish;
pub use activations::prelu;
pub use activations::relu;
pub use activations::relu6;
pub use activations::rrelu;
pub use activations::selu;
pub use activations::sigmoid;
pub use activations::silu;
pub use activations::softmax;
pub use activations::softmin;
pub use activations::softplus;
pub use activations::softshrink;
pub use activations::softsign;
pub use activations::tanh;
pub use activations::tanhshrink;
pub use activations::threshold;
pub use loss::binary_cross_entropy;
pub use loss::binary_cross_entropy_with_logits;
pub use loss::contrastive_loss;
pub use loss::cosine_embedding_loss;
pub use loss::cross_entropy;
pub use loss::cross_entropy_with_label_smoothing;
pub use loss::ctc_loss;
pub use loss::focal_loss;
pub use loss::gaussian_nll_loss;
pub use loss::hinge_embedding_loss;
pub use loss::kl_div;
pub use loss::l1_loss;
pub use loss::margin_ranking_loss;
pub use loss::mse_loss;
pub use loss::multi_margin_loss;
pub use loss::nll_loss;
pub use loss::poisson_nll_loss;
pub use loss::smooth_l1_loss;
pub use loss::triplet_margin_loss;
pub use loss::triplet_margin_with_distance_loss;
pub use loss::ReductionType;
pub use conv::conv1d;
pub use conv::conv2d;
pub use conv::conv3d;
pub use conv::conv_output_size;
pub use conv::conv_transpose1d;
pub use conv::conv_transpose2d;
pub use conv::conv_transpose3d;
pub use conv::conv_transpose_output_size;
pub use conv::depthwise_conv2d;
pub use conv::fold;
pub use conv::separable_conv2d;
pub use conv::unfold;
pub use pooling::adaptive_avg_pool1d;
pub use pooling::adaptive_avg_pool2d;
pub use pooling::adaptive_avg_pool3d;
pub use pooling::adaptive_max_pool1d;
pub use pooling::adaptive_max_pool2d;
pub use pooling::adaptive_max_pool3d;
pub use pooling::avg_pool1d;
pub use pooling::avg_pool2d;
pub use pooling::avg_pool3d;
pub use pooling::fractional_max_pool2d;
pub use pooling::global_avg_pool;
pub use pooling::global_max_pool;
pub use pooling::learnable_pool2d;
pub use pooling::lp_pool1d;
pub use pooling::lp_pool2d;
pub use pooling::max_pool1d;
pub use pooling::max_pool2d;
pub use pooling::max_pool3d;
pub use pooling::max_unpool1d;
pub use pooling::max_unpool2d;
pub use pooling::max_unpool3d;
pub use pooling::spatial_pyramid_pool2d;
pub use pooling::stochastic_pool2d;
pub use normalization::batch_norm;
pub use normalization::group_norm;
pub use normalization::instance_norm;
pub use normalization::layer_norm;
pub use normalization::local_response_norm;
pub use normalization::normalize;
pub use normalization::weight_norm;
pub use dropout::alpha_dropout;
pub use dropout::dropout;
pub use dropout::dropout1d;
pub use dropout::dropout2d;
pub use dropout::dropout3d;
pub use dropout::feature_alpha_dropout;
pub use dropout::gaussian_dropout;
pub use attention::cross_attention;
pub use attention::flash_attention;
pub use attention::multi_head_attention;
pub use attention::scaled_dot_product_attention;
pub use attention::self_attention;
pub use regularization::consistency_penalty;
pub use regularization::gradient_penalty;
pub use regularization::r1_gradient_penalty;
pub use regularization::r2_gradient_penalty;
pub use regularization::spectral_gradient_penalty;
pub use advanced_nn::cutmix;
pub use advanced_nn::darts_operation;
pub use advanced_nn::decode_architecture;
pub use advanced_nn::differentiable_augment;
pub use advanced_nn::encode_architecture;
pub use advanced_nn::knowledge_distillation_loss;
pub use advanced_nn::label_smoothing;
pub use advanced_nn::mixup;
pub use advanced_nn::mutate_architecture;
pub use advanced_nn::predict_architecture_performance;
pub use advanced_nn::spectral_norm;
pub use advanced_nn::temperature_scale;
pub use advanced_nn::weight_standardization;
pub use broadcast::broadcast_shapes;
pub use broadcast::broadcast_tensors;
pub use linalg::baddbmm;
pub use linalg::bmm;
pub use linalg::chain_matmul;
pub use linalg::cholesky;
pub use linalg::cond;
pub use linalg::det;
pub use linalg::eig;
pub use linalg::inv;
pub use linalg::lstsq;
pub use linalg::lu;
pub use linalg::matrix_rank;
pub use linalg::norm;
pub use linalg::pca_lowrank;
pub use linalg::pinv;
pub use linalg::qr;
pub use linalg::solve;
pub use linalg::svd;
pub use linalg::svd_lowrank;
pub use linalg::triangular_solve;
pub use linalg::NormOrd;
pub use manipulation::atleast_1d;
pub use manipulation::atleast_2d;
pub use manipulation::atleast_3d;
pub use manipulation::block_diag;
pub use manipulation::cartesian_prod;
pub use manipulation::chunk;
pub use manipulation::dsplit;
pub use manipulation::hsplit;
pub use manipulation::meshgrid;
pub use manipulation::split;
pub use manipulation::tensor_split;
pub use manipulation::tensordot;
pub use manipulation::unravel_index;
pub use manipulation::vsplit;
pub use manipulation::SplitArg;
pub use manipulation::TensorSplitArg;
pub use math::cdist;
pub use math::einsum;
pub use reduction::unique;
pub use reduction::unique_consecutive;
pub use reduction::UniqueResult;
pub use spectral::cepstrum;
pub use spectral::create_mel_filterbank;
pub use spectral::fftn;
pub use spectral::generate_window;
pub use spectral::hfft;
pub use spectral::hz_to_mel;
pub use spectral::ifftn;
pub use spectral::ihfft;
pub use spectral::irfft;
pub use spectral::istft;
pub use spectral::istft_complete;
pub use spectral::mel_spectrogram;
pub use spectral::mel_to_hz;
pub use spectral::rfft2;
pub use spectral::rfftn;
pub use spectral::spectral_centroid;
pub use spectral::spectral_rolloff;
pub use spectral::spectrogram;
pub use spectral::stft;
pub use spectral::stft_complete;
pub use spectral::SpectrogramType;
pub use spectral::WindowFunction;
pub use tensor_ops::cosine_similarity;
pub use tensor_ops::embedding;
pub use tensor_ops::linear;
pub use tensor_ops::one_hot;
pub use tensor_ops::pairwise_distance;
pub use tensor_ops::pixel_shuffle;
pub use tensor_ops::pixel_unshuffle;
pub use image::affine_transform;
pub use image::closing;
pub use image::dilation;
pub use image::erosion;
pub use image::gaussian_blur;
pub use image::hsv_to_rgb;
pub use image::laplacian_filter;
pub use image::opening;
pub use image::resize;
pub use image::rgb_to_hsv;
pub use image::sobel_filter;
pub use image::InterpolationMode;
pub use image::SobelDirection;
pub use signal::correlate;
pub use signal::filtfilt;
pub use signal::frame;
pub use signal::lfilter;
pub use signal::overlap_add;
pub use signal::periodogram;
pub use signal::welch;
pub use signal::window;
pub use signal::CorrelationMode;
pub use signal::PsdScaling;
pub use signal::WindowType;
pub use data_ops::bincount;
pub use data_ops::histogram;
pub use data_ops::histogram_with_edges;
pub use data_ops::unique as unique_values;
pub use data_ops::value_counts;
pub use random_ops::bernoulli;
pub use random_ops::bernoulli_;
pub use random_ops::exponential_;
pub use random_ops::multinomial;
pub use random_ops::normal_;
pub use random_ops::rand;
pub use random_ops::randint;
pub use random_ops::randint_;
pub use random_ops::randn;
pub use random_ops::randperm;
pub use random_ops::uniform_;
pub use type_promotion::can_cast_safely;
pub use type_promotion::common_dtype_for_operation;
pub use type_promotion::ensure_compatible_types;
pub use type_promotion::get_type_category;
pub use type_promotion::get_type_precision;
pub use type_promotion::promote_multiple_types;
pub use type_promotion::promote_scalar_type;
pub use type_promotion::promote_tensors;
pub use type_promotion::promote_types;
pub use type_promotion::reduction_result_type;
pub use type_promotion::result_type;
pub use type_promotion::TypeCategory;
pub use fusion::analyze_fusion_opportunities;
pub use fusion::detect_fusible_patterns;
pub use fusion::fused_add_mul;
pub use fusion::fused_add_relu_mul;
pub use fusion::fused_batch_norm;
pub use fusion::fused_mul_add;
pub use fusion::fused_relu_add;
pub use fusion::fused_sigmoid_mul;
pub use fusion::fused_silu;
pub use fusion::fused_tanh_scale;
pub use fusion::AdaptiveFusionEngine;
pub use fusion::FusedOp;
pub use fusion::FusionOpportunity;
pub use fusion::FusionPerformance;
pub use fusion::OpFusionEngine;
pub use fusion::OpSequence;
pub use special::airy_ai;
pub use special::bessel_iv;
pub use special::betainc;
pub use special::dawson;
pub use special::erfcinv;
pub use special::expint;
pub use special::hypergeometric_1f1;
pub use special::kelvin_ber;
pub use special::logsumexp;
pub use special::multigammaln;
pub use special::normal_cdf;
pub use special::normal_icdf;
pub use special::spherical_j0;
pub use special::spherical_j1;
pub use special::spherical_jn;
pub use special::spherical_y0;
pub use special::spherical_y1;
pub use special::spherical_yn;
pub use special::voigt_profile;
pub use wavelet::cwt;
pub use wavelet::dwt_1d;
pub use wavelet::dwt_2d;
pub use wavelet::idwt_1d;
pub use wavelet::idwt_2d;
pub use wavelet::wavedec;
pub use wavelet::waverec;
pub use wavelet::WaveletMode;
pub use wavelet::WaveletType;
pub use interpolation::barycentric_interp;
pub use interpolation::grid_sample;
pub use interpolation::interp1d;
pub use interpolation::interp2d;
pub use interpolation::lanczos_interp1d;
pub use interpolation::spline1d;
pub use interpolation::InterpolationMode as InterpMode;
pub use numerical::adaptive_quad;
pub use numerical::bisection;
pub use numerical::cumtrapz;
pub use numerical::gaussian_quad;
pub use numerical::gradient;
pub use numerical::newton_raphson;
pub use numerical::partial_derivative;
pub use numerical::second_derivative;
pub use numerical::simps;
pub use numerical::trapz;
pub use numerical::DifferentiationMethod;
pub use numerical::IntegrationMethod;
pub use optimization::adam_optimizer;
pub use optimization::analyze_optimization_problem;
pub use optimization::auto_configure_optimization;
pub use optimization::gradient_descent;
pub use optimization::gradient_descent;
pub use optimization::lbfgs_optimizer;
pub use optimization::momentum_gradient_descent;
pub use optimization::AdamParams;
pub use optimization::AdaptiveAlgorithmSelector;
pub use optimization::BFGSParams;
pub use optimization::BacktrackingParams;
pub use optimization::GradientDescentParams;
pub use optimization::LineSearchMethod;
pub use optimization::MomentumParams;
pub use optimization::OptimizationAlgorithm;
pub use optimization::TensorCharacteristics;
pub use optimization::WolfeParams;
pub use lazy::lazy_ops::execute;
pub use lazy::lazy_ops::lazy;
pub use lazy::lazy_ops::with_optimization;
pub use lazy::LazyBuilder;
pub use lazy::LazyContext;
pub use lazy::LazyOp;
pub use lazy::LazyTensor;
pub use advanced_manipulation::boolean_index;
pub use advanced_manipulation::cat;
pub use advanced_manipulation::masked_fill;
pub use advanced_manipulation::pad;
pub use advanced_manipulation::reshape;
pub use advanced_manipulation::slice_with_step;
pub use advanced_manipulation::squeeze;
pub use advanced_manipulation::unsqueeze;
pub use advanced_manipulation::where_tensor;
pub use advanced_manipulation::PaddingMode;
pub use quantization::dynamic_quantize;
pub use quantization::fake_quantize;
pub use quantization::gradual_magnitude_prune;
pub use quantization::lottery_ticket_prune;
pub use quantization::magnitude_prune;
pub use quantization::quantization_error_analysis;
pub use quantization::uniform_dequantize;
pub use quantization::uniform_quantize;
pub use quantization::weight_clustering;
pub use quantization::QuantizationScheme;
pub use quantization::QuantizationType;
pub use sparse::sparse_add;
pub use sparse::sparse_conv1d;
pub use sparse::sparse_conv2d;
pub use sparse::sparse_coo_tensor;
pub use sparse::sparse_eye;
pub use sparse::sparse_max;
pub use sparse::sparse_mean;
pub use sparse::sparse_min;
pub use sparse::sparse_mm;
pub use sparse::sparse_mul;
pub use sparse::sparse_sum;
pub use sparse::sparse_to_csr;
pub use sparse::sparse_transpose;
pub use sparse::SparseTensor;
pub use autograd::apply_custom_function;
pub use autograd::apply_custom_function_with_context;
pub use autograd::apply_registered_function;
pub use autograd::get_global_registry;
pub use autograd::register_custom_function;
pub use autograd::AutogradContext;
pub use autograd::AutogradRegistry;
pub use autograd::CustomAutogradFunction;
pub use autograd::CustomAutogradFunctionWithContext;
pub use autograd::ExpFunction;
pub use autograd::ScaledAddFunction;
pub use autograd::SquareFunction;
pub use profiling::benchmark;
pub use profiling::global_profiler;
pub use profiling::profile_operation;
pub use profiling::run_performance_regression_test;
pub use profiling::BaselineSummary;
pub use profiling::BenchmarkConfig;
pub use profiling::BenchmarkResults;
pub use profiling::OperationMetrics;
pub use profiling::OperationSummary;
pub use profiling::PerformanceBaseline;
pub use profiling::PerformanceRegressionTester;
pub use profiling::Profiler;
pub use profiling::RegressionTestConfig;
pub use profiling::RegressionTestResult;
pub use profiling::SystemInfo;
pub use utils::apply_binary_elementwise;
pub use utils::apply_conditional_elementwise;
pub use utils::apply_elementwise_operation;
pub use utils::calculate_pooling_output_size;
pub use utils::calculate_pooling_output_size_2d;
pub use utils::calculate_pooling_output_size_3d;
pub use utils::create_tensor_like;
pub use utils::function_context;
pub use utils::safe_for_log;
pub use utils::safe_log;
pub use utils::safe_log_prob;
pub use utils::validate_broadcastable_shapes;
pub use utils::validate_dimension;
pub use utils::validate_elementwise_shapes;
pub use utils::validate_loss_params;
pub use utils::validate_non_empty;
pub use utils::validate_pooling_params;
pub use utils::validate_positive;
pub use utils::validate_range;
pub use utils::validate_tensor_dims;
pub use transformations::einsum_optimized;
pub use transformations::tensor_contract;
pub use transformations::tensor_fold;
pub use transformations::tensor_map;
pub use transformations::tensor_outer;
pub use transformations::tensor_reduce;
pub use transformations::tensor_scan;
pub use transformations::tensor_zip;
pub use tensor_decomposition::cp_decomposition;
pub use tensor_decomposition::tucker_decomposition;

Modules§

activation_lookup
Lookup table optimizations for activation functions
activations
Activation Functions for Neural Networks
advanced_manipulation
Advanced Tensor Manipulation Utilities
advanced_nn
Advanced Neural Network Operations
api_patterns
Standardized API patterns and conventions for torsh-functional
attention
Attention Mechanisms for Neural Networks
autograd
Custom autograd function creation utilities
broadcast
Broadcasting utilities
conv
Convolution Operations for Neural Networks
data_ops
Data operations including unique, bincount, and histogram
dropout
Dropout and regularization functions for neural networks
fusion
Operation fusion module for optimizing functional operation patterns
gamma
Gamma and related functions
image
Image Processing Operations
interpolation
Interpolation functions for tensor operations
lazy
Lazy evaluation system for chained functional operations
linalg
Linear algebra operations module
loss
Loss functions for neural networks
manipulation
Tensor manipulation operations module
math
Mathematical operations
normalization
Normalization functions for neural networks
numerical
Numerical integration and differentiation operations
optimization
Optimization utilities for tensor operations
parallel
Multi-threaded execution for large tensor operations
pooling
Pooling operations organized by functionality
profiling
Performance profiling and benchmarking framework
quantization
Quantization and Compression Functions
random_ops
Random sampling operations module
reduction
Reduction Operations for Tensors
regularization
Regularization functions for training stability
signal
Signal Processing Operations
sparse
Sparse tensor operations for ToRSh functional API
special
Special mathematical functions for neural networks
spectral
Spectral operations (FFT, STFT, etc.)
spectral_advanced
Advanced spectral operations - Extended FFT variants and spectral analysis
spectral_analysis
Comprehensive spectral analysis functions
spectral_stft
Complete STFT/ISTFT implementation with windowing and overlap-add
tensor_decomposition
Tensor Decomposition Operations
tensor_ops
transformations
Advanced Functional Transformations with SciRS2
type_promotion
Type promotion utilities for functional operations
utils
Utility functions for torsh-functional
wavelet
Wavelet transform operations

Macros§

create_custom_autograd_function
Macro for creating simple custom autograd functions
profile
Macro for easy profiling of operations

Constants§

VERSION
VERSION_MAJOR
VERSION_MINOR
VERSION_PATCH

Functions§

acosh
Inverse hyperbolic cosine
align_tensors
Align tensors to have the same number of dimensions
asinh
Inverse hyperbolic sine
atanh
Inverse hyperbolic tangent
bessel_i0
Wrapper for modified Bessel function I₀ using SciRS2
bessel_i1
Wrapper for modified Bessel function I₁ using SciRS2
bessel_j0
Wrapper for Bessel function J₀ using SciRS2
bessel_j1
Wrapper for Bessel function J₁ using SciRS2
bessel_jn
Wrapper for Bessel function Jₙ using SciRS2
bessel_k0
Wrapper for modified Bessel function K₀ using SciRS2
bessel_k1
Wrapper for modified Bessel function K₁ using SciRS2
bessel_y0
Wrapper for Bessel function Y₀ using SciRS2
bessel_y1
Wrapper for Bessel function Y₁ using SciRS2
bessel_yn
Wrapper for Bessel function Yₙ using SciRS2
beta
Wrapper for the beta function using SciRS2
digamma
Wrapper for the digamma function using SciRS2
erf
Wrapper for the error function using SciRS2
erfc
Wrapper for the complementary error function using SciRS2
erfcx
Wrapper for the scaled complementary error function using SciRS2
erfinv
Wrapper for the inverse error function using SciRS2
expm1
Exponential minus one (exp(x) - 1) for better numerical stability
fresnel
Compute both Fresnel integrals S(x) and C(x) simultaneously
fresnel_c
Wrapper for Fresnel cosine integral using SciRS2
fresnel_s
Wrapper for Fresnel sine integral using SciRS2
gamma
Wrapper for the gamma function using SciRS2
lgamma
Wrapper for the log gamma function using SciRS2
log1p
Natural logarithm of one plus x (log(1 + x)) for better numerical stability
polygamma
Wrapper for the polygamma function using SciRS2
sinc
Wrapper for the sinc function using SciRS2