pub struct SequentialModel { /* private fields */ }Expand description
Ordered stack of layers executed one-by-one.
Implementations§
Source§impl SequentialModel
impl SequentialModel
Sourcepub fn new(graph: &Graph) -> Self
pub fn new(graph: &Graph) -> Self
Creates an empty model and records current graph prefix as persistent base.
pub fn add_linear( &mut self, graph: &mut Graph, in_features: usize, out_features: usize, weight_init: Tensor, bias_init: Tensor, ) -> Result<(), ModelError>
pub fn add_linear_zero( &mut self, graph: &mut Graph, in_features: usize, out_features: usize, ) -> Result<(), ModelError>
pub fn add_relu(&mut self)
pub fn add_leaky_relu(&mut self, negative_slope: f32) -> Result<(), ModelError>
pub fn add_sigmoid(&mut self)
pub fn add_tanh(&mut self)
pub fn add_gelu(&mut self)
pub fn add_silu(&mut self)
pub fn add_mish(&mut self)
pub fn add_prelu(&mut self, alpha: Vec<f32>)
pub fn add_dropout(&mut self, rate: f32) -> Result<(), ModelError>
pub fn add_conv2d( &mut self, in_channels: usize, out_channels: usize, kernel_h: usize, kernel_w: usize, stride_h: usize, stride_w: usize, weight: Tensor, bias: Option<Tensor>, ) -> Result<(), ModelError>
pub fn add_conv2d_zero( &mut self, in_channels: usize, out_channels: usize, kernel_h: usize, kernel_w: usize, stride_h: usize, stride_w: usize, use_bias: bool, ) -> Result<(), ModelError>
pub fn add_deformable_conv2d( &mut self, in_channels: usize, out_channels: usize, kernel_h: usize, kernel_w: usize, stride: usize, padding: usize, weight: Tensor, offset_weight: Tensor, bias: Option<Tensor>, ) -> Result<(), ModelError>
pub fn add_deformable_conv2d_zero( &mut self, in_channels: usize, out_channels: usize, kernel_h: usize, kernel_w: usize, stride: usize, padding: usize, use_bias: bool, ) -> Result<(), ModelError>
pub fn add_depthwise_conv2d( &mut self, channels: usize, kernel_h: usize, kernel_w: usize, stride_h: usize, stride_w: usize, weight: Tensor, bias: Option<Tensor>, ) -> Result<(), ModelError>
pub fn add_depthwise_conv2d_zero( &mut self, channels: usize, kernel_h: usize, kernel_w: usize, stride_h: usize, stride_w: usize, use_bias: bool, ) -> Result<(), ModelError>
pub fn add_separable_conv2d( &mut self, in_channels: usize, out_channels: usize, kernel_h: usize, kernel_w: usize, stride_h: usize, stride_w: usize, depthwise_weight: Tensor, pointwise_weight: Tensor, bias: Option<Tensor>, ) -> Result<(), ModelError>
pub fn add_separable_conv2d_zero( &mut self, in_channels: usize, out_channels: usize, kernel_h: usize, kernel_w: usize, stride_h: usize, stride_w: usize, use_bias: bool, ) -> Result<(), ModelError>
pub fn add_batch_norm2d( &mut self, num_features: usize, epsilon: f32, gamma: Tensor, beta: Tensor, running_mean: Tensor, running_var: Tensor, ) -> Result<(), ModelError>
pub fn add_batch_norm2d_identity( &mut self, num_features: usize, epsilon: f32, ) -> Result<(), ModelError>
pub fn add_max_pool2d( &mut self, kernel_h: usize, kernel_w: usize, stride_h: usize, stride_w: usize, ) -> Result<(), ModelError>
pub fn add_avg_pool2d( &mut self, kernel_h: usize, kernel_w: usize, stride_h: usize, stride_w: usize, ) -> Result<(), ModelError>
pub fn add_flatten(&mut self)
pub fn add_global_avg_pool2d(&mut self)
pub fn add_softmax(&mut self)
pub fn add_embedding( &mut self, graph: &mut Graph, num_embeddings: usize, embedding_dim: usize, weight_init: Tensor, ) -> Result<(), ModelError>
pub fn add_layer_norm( &mut self, graph: &mut Graph, normalized_shape: usize, eps: f32, ) -> Result<(), ModelError>
pub fn add_group_norm( &mut self, graph: &mut Graph, num_groups: usize, num_channels: usize, eps: f32, ) -> Result<(), ModelError>
Sourcepub fn apply_lora(
&mut self,
graph: &mut Graph,
config: &LoraConfig,
) -> Result<usize, ModelError>
pub fn apply_lora( &mut self, graph: &mut Graph, config: &LoraConfig, ) -> Result<usize, ModelError>
Replace all Linear layers with LoRA-adapted versions. The original weights are frozen; only the low-rank A and B matrices are trainable. Returns the number of layers converted.
Sourcepub fn merge_lora(&mut self, graph: &mut Graph) -> Result<usize, ModelError>
pub fn merge_lora(&mut self, graph: &mut Graph) -> Result<usize, ModelError>
Merge all LoRA layers back into regular Linear layers. Call this after fine-tuning for inference without overhead. Returns the number of layers merged.
Sourcepub fn optimize(&mut self, graph: &mut Graph) -> usize
pub fn optimize(&mut self, graph: &mut Graph) -> usize
Optimize the model by fusing Conv+BN layers. This reduces the number of layers and operations for faster inference. Should be called before inference, after training is complete. Returns the number of fusions performed.
Sourcepub fn set_training(&mut self, training: bool)
pub fn set_training(&mut self, training: bool)
Set training/eval mode for all dropout layers.
Sourcepub fn train_mode(&mut self)
pub fn train_mode(&mut self)
Switch to training mode (enables dropout).
Sourcepub fn is_training(&self) -> bool
pub fn is_training(&self) -> bool
Returns whether the model is in training mode.
Sourcepub fn summary(&self) -> String
pub fn summary(&self) -> String
Print a human-readable summary of the model architecture.
Shows each layer’s type, output shape info, and parameter count.
Returns a String so callers can print or log it.
Sourcepub fn num_parameters(&self) -> usize
pub fn num_parameters(&self) -> usize
Returns the total number of parameters (weights + biases) across all layers.
Sourcepub fn freeze_layer(&mut self, index: usize) -> Result<(), ModelError>
pub fn freeze_layer(&mut self, index: usize) -> Result<(), ModelError>
Freeze the layer at index so it is excluded from trainable_parameters.
Sourcepub fn unfreeze_layer(&mut self, index: usize) -> Result<(), ModelError>
pub fn unfreeze_layer(&mut self, index: usize) -> Result<(), ModelError>
Unfreeze the layer at index so it is included in trainable_parameters again.
Sourcepub fn frozen_mask(&self) -> &[bool]
pub fn frozen_mask(&self) -> &[bool]
Returns a slice of booleans indicating which layers are frozen.
Sourcepub fn trainable_parameters(&self) -> usize
pub fn trainable_parameters(&self) -> usize
Returns the total number of parameters in non-frozen layers.
Sourcepub fn named_parameters<'a>(
&'a self,
graph: &'a Graph,
) -> Result<Vec<(String, &'a Tensor)>, ModelError>
pub fn named_parameters<'a>( &'a self, graph: &'a Graph, ) -> Result<Vec<(String, &'a Tensor)>, ModelError>
Returns named parameter tensors from all layers.
For layers with graph-registered weights (Linear, Embedding, LayerNorm, GroupNorm, LoraLinear), the tensors are retrieved from the graph. For layers that own their tensors directly (Conv2d, BatchNorm2d, DepthwiseConv2d, SeparableConv2d, Conv1d, ConvTranspose2d), the tensors are accessed via the layer’s accessor methods.
Names follow the pattern "{type}{index}_{param}", e.g. "linear0_weight".
pub fn layers(&self) -> &[ModelLayer]
pub fn layers_mut(&mut self) -> &mut [ModelLayer]
Sourcepub fn add_residual_block(&mut self, layers: Vec<ModelLayer>)
pub fn add_residual_block(&mut self, layers: Vec<ModelLayer>)
Adds a residual block wrapping the given layers.
pub fn add_rnn(&mut self, input_size: usize, hidden_size: usize, seed: u64)
pub fn add_lstm(&mut self, input_size: usize, hidden_size: usize, seed: u64)
pub fn add_gru(&mut self, input_size: usize, hidden_size: usize, seed: u64)
pub fn add_multi_head_attention( &mut self, d_model: usize, num_heads: usize, seed: u64, )
pub fn add_transformer_encoder( &mut self, d_model: usize, num_heads: usize, d_ff: usize, seed: u64, )
pub fn add_feed_forward(&mut self, d_model: usize, d_ff: usize, seed: u64)
Sourcepub fn push_raw_layer(&mut self, layer: ModelLayer)
pub fn push_raw_layer(&mut self, layer: ModelLayer)
Push a pre-built layer directly (used by fusion for inference-only layers).
pub fn forward( &self, graph: &mut Graph, input: NodeId, ) -> Result<NodeId, ModelError>
Sourcepub fn forward_inference(&self, input: &Tensor) -> Result<Tensor, ModelError>
pub fn forward_inference(&self, input: &Tensor) -> Result<Tensor, ModelError>
Pure-tensor inference forward pass (no autograd graph).
Supports Conv2d, BatchNorm2d, MaxPool2d, AvgPool2d, Flatten, Softmax, and simple activation layers (ReLU, Sigmoid, Tanh, LeakyReLU).
Sourcepub fn register_cnn_params(&mut self, graph: &mut Graph)
pub fn register_cnn_params(&mut self, graph: &mut Graph)
Registers CNN layer parameters (Conv2d weight/bias, BatchNorm2d gamma/beta, etc.) as graph variables for autograd training.
Layers whose parameters are already registered (i.e. weight_node().is_some())
are skipped, so this method is safe to call multiple times.
Sourcepub fn sync_cnn_from_graph(&mut self, graph: &Graph) -> Result<(), ModelError>
pub fn sync_cnn_from_graph(&mut self, graph: &Graph) -> Result<(), ModelError>
Synchronizes CNN layer owned tensors from the graph (e.g. after optimizer step).
pub fn trainable_nodes(&self) -> Vec<NodeId>
pub fn persistent_node_count(&self) -> usize
pub fn checkpoint( &self, graph: &Graph, ) -> Result<SequentialCheckpoint, ModelError>
pub fn from_checkpoint( graph: &mut Graph, checkpoint: &SequentialCheckpoint, ) -> Result<Self, ModelError>
Trait Implementations§
Source§impl Clone for SequentialModel
impl Clone for SequentialModel
Source§fn clone(&self) -> SequentialModel
fn clone(&self) -> SequentialModel
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source. Read moreAuto Trait Implementations§
impl Freeze for SequentialModel
impl RefUnwindSafe for SequentialModel
impl Send for SequentialModel
impl Sync for SequentialModel
impl Unpin for SequentialModel
impl UnsafeUnpin for SequentialModel
impl UnwindSafe for SequentialModel
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read more