Skip to main content

SequentialModel

Struct SequentialModel 

Source
pub struct SequentialModel { /* private fields */ }
Expand description

Ordered stack of layers executed one-by-one.

Implementations§

Source§

impl SequentialModel

Source

pub fn new(graph: &Graph) -> Self

Creates an empty model and records current graph prefix as persistent base.

Source

pub fn add_linear( &mut self, graph: &mut Graph, in_features: usize, out_features: usize, weight_init: Tensor, bias_init: Tensor, ) -> Result<(), ModelError>

Source

pub fn add_linear_zero( &mut self, graph: &mut Graph, in_features: usize, out_features: usize, ) -> Result<(), ModelError>

Source

pub fn add_relu(&mut self)

Source

pub fn add_leaky_relu(&mut self, negative_slope: f32) -> Result<(), ModelError>

Source

pub fn add_sigmoid(&mut self)

Source

pub fn add_tanh(&mut self)

Source

pub fn add_gelu(&mut self)

Source

pub fn add_silu(&mut self)

Source

pub fn add_mish(&mut self)

Source

pub fn add_prelu(&mut self, alpha: Vec<f32>)

Source

pub fn add_dropout(&mut self, rate: f32) -> Result<(), ModelError>

Source

pub fn add_conv2d( &mut self, in_channels: usize, out_channels: usize, kernel_h: usize, kernel_w: usize, stride_h: usize, stride_w: usize, weight: Tensor, bias: Option<Tensor>, ) -> Result<(), ModelError>

Source

pub fn add_conv2d_zero( &mut self, in_channels: usize, out_channels: usize, kernel_h: usize, kernel_w: usize, stride_h: usize, stride_w: usize, use_bias: bool, ) -> Result<(), ModelError>

Source

pub fn add_deformable_conv2d( &mut self, in_channels: usize, out_channels: usize, kernel_h: usize, kernel_w: usize, stride: usize, padding: usize, weight: Tensor, offset_weight: Tensor, bias: Option<Tensor>, ) -> Result<(), ModelError>

Source

pub fn add_deformable_conv2d_zero( &mut self, in_channels: usize, out_channels: usize, kernel_h: usize, kernel_w: usize, stride: usize, padding: usize, use_bias: bool, ) -> Result<(), ModelError>

Source

pub fn add_depthwise_conv2d( &mut self, channels: usize, kernel_h: usize, kernel_w: usize, stride_h: usize, stride_w: usize, weight: Tensor, bias: Option<Tensor>, ) -> Result<(), ModelError>

Source

pub fn add_depthwise_conv2d_zero( &mut self, channels: usize, kernel_h: usize, kernel_w: usize, stride_h: usize, stride_w: usize, use_bias: bool, ) -> Result<(), ModelError>

Source

pub fn add_separable_conv2d( &mut self, in_channels: usize, out_channels: usize, kernel_h: usize, kernel_w: usize, stride_h: usize, stride_w: usize, depthwise_weight: Tensor, pointwise_weight: Tensor, bias: Option<Tensor>, ) -> Result<(), ModelError>

Source

pub fn add_separable_conv2d_zero( &mut self, in_channels: usize, out_channels: usize, kernel_h: usize, kernel_w: usize, stride_h: usize, stride_w: usize, use_bias: bool, ) -> Result<(), ModelError>

Source

pub fn add_batch_norm2d( &mut self, num_features: usize, epsilon: f32, gamma: Tensor, beta: Tensor, running_mean: Tensor, running_var: Tensor, ) -> Result<(), ModelError>

Source

pub fn add_batch_norm2d_identity( &mut self, num_features: usize, epsilon: f32, ) -> Result<(), ModelError>

Source

pub fn add_max_pool2d( &mut self, kernel_h: usize, kernel_w: usize, stride_h: usize, stride_w: usize, ) -> Result<(), ModelError>

Source

pub fn add_avg_pool2d( &mut self, kernel_h: usize, kernel_w: usize, stride_h: usize, stride_w: usize, ) -> Result<(), ModelError>

Source

pub fn add_flatten(&mut self)

Source

pub fn add_global_avg_pool2d(&mut self)

Source

pub fn add_softmax(&mut self)

Source

pub fn add_embedding( &mut self, graph: &mut Graph, num_embeddings: usize, embedding_dim: usize, weight_init: Tensor, ) -> Result<(), ModelError>

Source

pub fn add_layer_norm( &mut self, graph: &mut Graph, normalized_shape: usize, eps: f32, ) -> Result<(), ModelError>

Source

pub fn add_group_norm( &mut self, graph: &mut Graph, num_groups: usize, num_channels: usize, eps: f32, ) -> Result<(), ModelError>

Source

pub fn apply_lora( &mut self, graph: &mut Graph, config: &LoraConfig, ) -> Result<usize, ModelError>

Replace all Linear layers with LoRA-adapted versions. The original weights are frozen; only the low-rank A and B matrices are trainable. Returns the number of layers converted.

Source

pub fn merge_lora(&mut self, graph: &mut Graph) -> Result<usize, ModelError>

Merge all LoRA layers back into regular Linear layers. Call this after fine-tuning for inference without overhead. Returns the number of layers merged.

Source

pub fn optimize(&mut self, graph: &mut Graph) -> usize

Optimize the model by fusing Conv+BN layers. This reduces the number of layers and operations for faster inference. Should be called before inference, after training is complete. Returns the number of fusions performed.

Source

pub fn set_training(&mut self, training: bool)

Set training/eval mode for all dropout layers.

Source

pub fn eval(&mut self)

Switch to evaluation mode (disables dropout).

Source

pub fn train_mode(&mut self)

Switch to training mode (enables dropout).

Source

pub fn is_training(&self) -> bool

Returns whether the model is in training mode.

Source

pub fn summary(&self) -> String

Print a human-readable summary of the model architecture.

Shows each layer’s type, output shape info, and parameter count. Returns a String so callers can print or log it.

Source

pub fn num_parameters(&self) -> usize

Returns the total number of parameters (weights + biases) across all layers.

Source

pub fn freeze_layer(&mut self, index: usize) -> Result<(), ModelError>

Freeze the layer at index so it is excluded from trainable_parameters.

Source

pub fn unfreeze_layer(&mut self, index: usize) -> Result<(), ModelError>

Unfreeze the layer at index so it is included in trainable_parameters again.

Source

pub fn frozen_mask(&self) -> &[bool]

Returns a slice of booleans indicating which layers are frozen.

Source

pub fn trainable_parameters(&self) -> usize

Returns the total number of parameters in non-frozen layers.

Source

pub fn named_parameters<'a>( &'a self, graph: &'a Graph, ) -> Result<Vec<(String, &'a Tensor)>, ModelError>

Returns named parameter tensors from all layers.

For layers with graph-registered weights (Linear, Embedding, LayerNorm, GroupNorm, LoraLinear), the tensors are retrieved from the graph. For layers that own their tensors directly (Conv2d, BatchNorm2d, DepthwiseConv2d, SeparableConv2d, Conv1d, ConvTranspose2d), the tensors are accessed via the layer’s accessor methods.

Names follow the pattern "{type}{index}_{param}", e.g. "linear0_weight".

Source

pub fn layers(&self) -> &[ModelLayer]

Source

pub fn layers_mut(&mut self) -> &mut [ModelLayer]

Source

pub fn add_residual_block(&mut self, layers: Vec<ModelLayer>)

Adds a residual block wrapping the given layers.

Source

pub fn add_rnn(&mut self, input_size: usize, hidden_size: usize, seed: u64)

Source

pub fn add_lstm(&mut self, input_size: usize, hidden_size: usize, seed: u64)

Source

pub fn add_gru(&mut self, input_size: usize, hidden_size: usize, seed: u64)

Source

pub fn add_multi_head_attention( &mut self, d_model: usize, num_heads: usize, seed: u64, )

Source

pub fn add_transformer_encoder( &mut self, d_model: usize, num_heads: usize, d_ff: usize, seed: u64, )

Source

pub fn add_feed_forward(&mut self, d_model: usize, d_ff: usize, seed: u64)

Source

pub fn push_raw_layer(&mut self, layer: ModelLayer)

Push a pre-built layer directly (used by fusion for inference-only layers).

Source

pub fn forward( &self, graph: &mut Graph, input: NodeId, ) -> Result<NodeId, ModelError>

Source

pub fn forward_inference(&self, input: &Tensor) -> Result<Tensor, ModelError>

Pure-tensor inference forward pass (no autograd graph).

Supports Conv2d, BatchNorm2d, MaxPool2d, AvgPool2d, Flatten, Softmax, and simple activation layers (ReLU, Sigmoid, Tanh, LeakyReLU).

Source

pub fn register_cnn_params(&mut self, graph: &mut Graph)

Registers CNN layer parameters (Conv2d weight/bias, BatchNorm2d gamma/beta, etc.) as graph variables for autograd training.

Layers whose parameters are already registered (i.e. weight_node().is_some()) are skipped, so this method is safe to call multiple times.

Source

pub fn sync_cnn_from_graph(&mut self, graph: &Graph) -> Result<(), ModelError>

Synchronizes CNN layer owned tensors from the graph (e.g. after optimizer step).

Source

pub fn trainable_nodes(&self) -> Vec<NodeId>

Source

pub fn persistent_node_count(&self) -> usize

Source

pub fn checkpoint( &self, graph: &Graph, ) -> Result<SequentialCheckpoint, ModelError>

Source

pub fn from_checkpoint( graph: &mut Graph, checkpoint: &SequentialCheckpoint, ) -> Result<Self, ModelError>

Trait Implementations§

Source§

impl Clone for SequentialModel

Source§

fn clone(&self) -> SequentialModel

Returns a duplicate of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl Debug for SequentialModel

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dest: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dest. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

impl<T> Pointable for T

Source§

const ALIGN: usize

The alignment of pointer.
Source§

type Init = T

The type for initializers.
Source§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
Source§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
Source§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
Source§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.