pub struct GRULayer { /* private fields */ }
Expand description
GRU recurrent one-layer
Accepts input sequence and computes the final hidden state for each element in the batch.
- input[0] containing the features of the input sequence.
input[0] should have shape [
T
,N
,data_dims
] whereT
is sequence length,N
is batch size,data_dims
is input size - output would have shape [
T
,N
,D
*hidden_size
] whereD = 2
if layer is bidirectional otherwiseD = 1
Depends on the following attributes:
- hidden_size - Number of neurons in the hidden layer
- direction - RNN could be bidirectional or forward
The final hidden state @f$ h_t @f$ computes by the following formulas:
@f{eqnarray*}{ r_t = \sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \ z_t = \sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \ n_t = \tanh(W_{in} x_t + b_{in} + r_t \odot (W_{hn} h_{(t-1)}+ b_{hn})) \ h_t = (1 - z_t) \odot n_t + z_t \odot h_{(t-1)} \ @f} Where @f$x_t@f$ is current input, @f$h_{(t-1)}@f$ is previous or initial hidden state.
@f$W_{x?}@f$, @f$W_{h?}@f$ and @f$b_{?}@f$ are learned weights represented as matrices: @f$W_{x?} \in R^{N_h \times N_x}@f$, @f$W_{h?} \in R^{N_h \times N_h}@f$, @f$b_? \in R^{N_h}@f$.
@f$\odot@f$ is per-element multiply operation.
Implementations
Trait Implementations
sourceimpl AlgorithmTrait for GRULayer
impl AlgorithmTrait for GRULayer
sourceimpl AlgorithmTraitConst for GRULayer
impl AlgorithmTraitConst for GRULayer
fn as_raw_Algorithm(&self) -> *const c_void
sourcefn write(&self, fs: &mut FileStorage) -> Result<()>
fn write(&self, fs: &mut FileStorage) -> Result<()>
sourcefn write_with_name(&self, fs: &Ptr<FileStorage>, name: &str) -> Result<()>
fn write_with_name(&self, fs: &Ptr<FileStorage>, name: &str) -> Result<()>
sourcefn empty(&self) -> Result<bool>
fn empty(&self) -> Result<bool>
sourcefn save(&self, filename: &str) -> Result<()>
fn save(&self, filename: &str) -> Result<()>
sourcefn get_default_name(&self) -> Result<String>
fn get_default_name(&self) -> Result<String>
sourceimpl Boxed for GRULayer
impl Boxed for GRULayer
sourceimpl GRULayerTrait for GRULayer
impl GRULayerTrait for GRULayer
fn as_raw_mut_GRULayer(&mut self) -> *mut c_void
sourceimpl GRULayerTraitConst for GRULayer
impl GRULayerTraitConst for GRULayer
fn as_raw_GRULayer(&self) -> *const c_void
sourceimpl LayerTrait for GRULayer
impl LayerTrait for GRULayer
fn as_raw_mut_Layer(&mut self) -> *mut c_void
sourcefn set_blobs(&mut self, val: Vector<Mat>)
fn set_blobs(&mut self, val: Vector<Mat>)
sourcefn set_name(&mut self, val: &str)
fn set_name(&mut self, val: &str)
sourcefn set_type(&mut self, val: &str)
fn set_type(&mut self, val: &str)
sourcefn set_preferable_target(&mut self, val: i32)
fn set_preferable_target(&mut self, val: i32)
sourcefn finalize(
&mut self,
inputs: &dyn ToInputArray,
outputs: &mut dyn ToOutputArray
) -> Result<()>
fn finalize(
&mut self,
inputs: &dyn ToInputArray,
outputs: &mut dyn ToOutputArray
) -> Result<()>
sourcefn forward_mat(
&mut self,
input: &mut Vector<Mat>,
output: &mut Vector<Mat>,
internals: &mut Vector<Mat>
) -> Result<()>
fn forward_mat(
&mut self,
input: &mut Vector<Mat>,
output: &mut Vector<Mat>,
internals: &mut Vector<Mat>
) -> Result<()>
Use Layer::forward(InputArrayOfArrays, OutputArrayOfArrays, OutputArrayOfArrays) instead
sourcefn forward(
&mut self,
inputs: &dyn ToInputArray,
outputs: &mut dyn ToOutputArray,
internals: &mut dyn ToOutputArray
) -> Result<()>
fn forward(
&mut self,
inputs: &dyn ToInputArray,
outputs: &mut dyn ToOutputArray,
internals: &mut dyn ToOutputArray
) -> Result<()>
sourcefn try_quantize(
&mut self,
scales: &Vector<Vector<f32>>,
zeropoints: &Vector<Vector<i32>>,
params: &mut LayerParams
) -> Result<bool>
fn try_quantize(
&mut self,
scales: &Vector<Vector<f32>>,
zeropoints: &Vector<Vector<i32>>,
params: &mut LayerParams
) -> Result<bool>
sourcefn forward_fallback(
&mut self,
inputs: &dyn ToInputArray,
outputs: &mut dyn ToOutputArray,
internals: &mut dyn ToOutputArray
) -> Result<()>
fn forward_fallback(
&mut self,
inputs: &dyn ToInputArray,
outputs: &mut dyn ToOutputArray,
internals: &mut dyn ToOutputArray
) -> Result<()>
sourcefn finalize_mat_to(
&mut self,
inputs: &Vector<Mat>,
outputs: &mut Vector<Mat>
) -> Result<()>
fn finalize_mat_to(
&mut self,
inputs: &Vector<Mat>,
outputs: &mut Vector<Mat>
) -> Result<()>
Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead
sourcefn finalize_mat(&mut self, inputs: &Vector<Mat>) -> Result<Vector<Mat>>
fn finalize_mat(&mut self, inputs: &Vector<Mat>) -> Result<Vector<Mat>>
Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead
sourcefn run(
&mut self,
inputs: &Vector<Mat>,
outputs: &mut Vector<Mat>,
internals: &mut Vector<Mat>
) -> Result<()>
fn run(
&mut self,
inputs: &Vector<Mat>,
outputs: &mut Vector<Mat>,
internals: &mut Vector<Mat>
) -> Result<()>
This method will be removed in the future release.