Expand description
GhostFlow Neural Network Layers
High-level building blocks for neural networks.
Re-exports§
pub use module::Module;pub use linear::Linear;pub use conv::Conv1d;pub use conv::Conv2d;pub use conv::Conv3d;pub use conv::TransposeConv2d;pub use norm::BatchNorm1d;pub use norm::BatchNorm2d;pub use norm::LayerNorm;pub use norm::GroupNorm;pub use norm::InstanceNorm;pub use dropout::Dropout;pub use attention::MultiHeadAttention;pub use attention::scaled_dot_product_attention;pub use transformer::TransformerEncoder;pub use transformer::TransformerEncoderLayer;pub use transformer::TransformerDecoderLayer;pub use transformer::FeedForward;pub use transformer::PositionalEncoding;pub use transformer::RotaryEmbedding;pub use embedding::Embedding;pub use rnn::LSTM;pub use rnn::LSTMCell;pub use rnn::GRU;pub use rnn::GRUCell;pub use quantization::QuantizedTensor;pub use quantization::QuantizationConfig;pub use quantization::QuantizationScheme;pub use quantization::QuantizationAwareTraining;pub use quantization::DynamicQuantization;pub use distributed::DistributedConfig;pub use distributed::DistributedBackend;pub use distributed::DataParallel;pub use distributed::ModelParallel;pub use distributed::GradientAccumulator;pub use distributed::DistributedDataParallel;pub use distributed::PipelineParallel;pub use serialization::ModelCheckpoint;pub use serialization::ModelMetadata;pub use serialization::save_model;pub use serialization::load_model;pub use gnn::Graph;pub use gnn::GCNLayer;pub use gnn::GATLayer;pub use gnn::GraphSAGELayer;pub use gnn::MPNNLayer;pub use gnn::AggregatorType;pub use rl::ReplayBuffer;pub use rl::Experience;pub use rl::DQNAgent;pub use rl::QNetwork;pub use rl::PolicyNetwork;pub use rl::REINFORCEAgent;pub use rl::ActorCriticAgent;pub use rl::ValueNetwork;pub use rl::PPOAgent;pub use federated::FederatedClient;pub use federated::FederatedServer;pub use federated::AggregationStrategy;pub use federated::SecureAggregation;pub use federated::DifferentialPrivacy;pub use onnx::ONNXModel;pub use onnx::ONNXNode;pub use onnx::ONNXTensor;pub use onnx::ONNXDataType;pub use onnx::ONNXAttribute;pub use onnx::tensor_to_onnx;pub use onnx::onnx_to_tensor;pub use inference::InferenceConfig;pub use inference::InferenceOptimizer;pub use inference::InferenceSession;pub use inference::BatchInference;pub use inference::warmup_model;pub use differential_privacy::DPConfig;pub use differential_privacy::PrivacyAccountant;pub use differential_privacy::DPSGDOptimizer;pub use differential_privacy::PATEEnsemble;pub use differential_privacy::LocalDP;pub use adversarial::AttackConfig;pub use adversarial::AttackType;pub use adversarial::AdversarialAttack;pub use adversarial::AdversarialTrainingConfig;pub use adversarial::AdversarialTrainer;pub use adversarial::RandomizedSmoothing;pub use activation::*;pub use loss::*;pub use pooling::*;
Modules§
- activation
- Activation function modules
- adversarial
- Adversarial Training and Robustness
- attention
- Attention mechanisms
- conv
- Convolutional layers
- differential_
privacy - Differential Privacy for Machine Learning
- distributed
- Distributed Training
- dropout
- Dropout regularization
- embedding
- Embedding layers
- federated
- Federated Learning
- gnn
- Graph Neural Networks (GNN) module
- inference
- Inference optimization utilities
- init
- Weight initialization strategies
- linear
- Linear (fully connected) layer
- loss
- Loss functions
- module
- Base Module trait for neural network layers
- norm
- Normalization layers
- onnx
- ONNX export and import functionality
- pooling
- Pooling layers
- prelude
- Prelude for convenient imports
- quantization
- Model Quantization
- rl
- Reinforcement Learning module
- rnn
- Recurrent Neural Network Layers
- serialization
- Model Serialization
- transformer
- Transformer architecture components