Struct wyrm::Variable
[−]
[src]
pub struct Variable<T> where
T: Node, { /* fields omitted */ }
Handle to a node in the computation graph. The underlying nodes are reference counted, so the handles can be freely cloned to use the nodes multiple times in the same graph.
Methods
impl<T> Variable<T> where
T: Node,
[src]
T: Node,
fn value(&self) -> Bor<T::Value>
[src]
Get the value of the node.
fn forward(&self)
[src]
Run the forward pass through the subgraph terminating at this node, recursing through the ancestor nodes.
fn zero_gradient(&self)
[src]
Zero the gradients. Must be called after a backward step or whenever inputs change.
fn needs_gradient(&self) -> bool
[src]
fn parameters(&self) -> Vec<Variable<ParameterNode>>
[src]
Return the parameters of the graph.
impl<T> Variable<T> where
T: Node<Value = Arr, InputGradient = Arr>,
[src]
T: Node<Value = Arr, InputGradient = Arr>,
fn boxed(&self) -> Variable<Rc<Node<Value = Arr, InputGradient = Arr>>>
[src]
Box the variable, erasing its specific type. Use to manage the complexity of variable types in deep computation graphs.
fn backward(&mut self, weight: f32)
[src]
Run the backward pass through the subgraph terminating at this node. The weight parameter scales the gradients.
fn square(&self) -> Variable<SquareNode<T>>
[src]
Square this variable.
fn scalar_sum(&self) -> Variable<SumNode<T>>
[src]
Sum this variable.
fn ln(&self) -> Variable<LogNode<T>>
[src]
Take the natural logarithm of this variable.
fn tanh(&self) -> Variable<TanhNode<T>>
[src]
Take the tanh of this variable.
fn t(&self) -> Variable<TransposeNode<T>>
[src]
Transpose this variable.
fn exp(&self) -> Variable<ExpNode<T>>
[src]
Exponentiate this variable.
fn softmax(&self) -> Variable<SoftmaxNode<T>>
[src]
Compute the softmax of this variable.
fn log_softmax(&self) -> Variable<LogSoftmaxNode<T>>
[src]
Compute the log-softmax of this variable.
fn sigmoid(&self) -> Variable<SigmoidNode<T>>
[src]
Compute the sigmoid of this variable.
fn vector_dot<S>(&self, other: &Variable<S>) -> Variable<VectorDotNode<T, S>> where
S: Node<Value = Arr, InputGradient = Arr>,
[src]
S: Node<Value = Arr, InputGradient = Arr>,
Compute the row-wise vector dot product of LHS and RHS.
fn dot<S>(&self, other: &Variable<S>) -> Variable<DotNode<T, S>> where
S: Node<Value = Arr, InputGradient = Arr>,
[src]
S: Node<Value = Arr, InputGradient = Arr>,
Compute the matrix multiplication of LHS and RHS.
fn stack<S>(
&self,
other: &Variable<S>,
axis: Axis
) -> Variable<ConcatenateNode<T, S>> where
S: Node<Value = Arr, InputGradient = Arr>,
[src]
&self,
other: &Variable<S>,
axis: Axis
) -> Variable<ConcatenateNode<T, S>> where
S: Node<Value = Arr, InputGradient = Arr>,
Stack/concatenate LHS and RHS, either row-wise (ndarray::Axis(0)
) or
column-wise (ndarray::Axis(1)
).
impl Variable<ParameterNode>
[src]
fn dense_gradient(&self) -> Option<Arr>
[src]
Return the (dense) gradient value of this node.
fn index(
&self,
index: &Variable<IndexInputNode>
) -> Variable<IndexNode<ParameterNode>>
[src]
&self,
index: &Variable<IndexInputNode>
) -> Variable<IndexNode<ParameterNode>>
Row-wise indexing of this parameter node. Primiarily used to implement embedding layers.
impl<T> Variable<SparseCategoricalCrossentropyNode<T>> where
T: Node<Value = Arr, InputGradient = Arr>,
[src]
T: Node<Value = Arr, InputGradient = Arr>,
fn predictions(&self) -> Bor<Arr>
[src]
Return the log-softmax predictions from a sparse categorical cross-entropy node.
Calling .value()
on the node returns the value of the loss;
this function allows getting the predictins with low overhead.
Trait Implementations
impl<T: Debug> Debug for Variable<T> where
T: Node,
[src]
T: Node,
impl<T: Node> Clone for Variable<T>
[src]
fn clone(&self) -> Self
[src]
Returns a copy of the value. Read more
fn clone_from(&mut self, source: &Self)
1.0.0[src]
Performs copy-assignment from source
. Read more
impl<'value> DataInput<&'value Arr> for Variable<ParameterNode>
[src]
impl<'value> DataInput<&'value Arr> for Variable<InputNode>
[src]
impl DataInput<f32> for Variable<InputNode>
[src]
impl<'value> DataInput<&'value [usize]> for Variable<IndexInputNode>
[src]
impl DataInput<usize> for Variable<IndexInputNode>
[src]
impl<LHS, RHS> Add<Variable<RHS>> for Variable<LHS> where
RHS: Node<Value = Arr, InputGradient = Arr>,
LHS: Node<Value = Arr, InputGradient = Arr>,
[src]
RHS: Node<Value = Arr, InputGradient = Arr>,
LHS: Node<Value = Arr, InputGradient = Arr>,
type Output = Variable<AddNode<LHS, RHS>>
The resulting type after applying the +
operator.
fn add(self, other: Variable<RHS>) -> Self::Output
[src]
Performs the +
operation.
impl<LHS, RHS> Sub<Variable<RHS>> for Variable<LHS> where
RHS: Node<Value = Arr, InputGradient = Arr>,
LHS: Node<Value = Arr, InputGradient = Arr>,
[src]
RHS: Node<Value = Arr, InputGradient = Arr>,
LHS: Node<Value = Arr, InputGradient = Arr>,
type Output = Variable<SubNode<LHS, RHS>>
The resulting type after applying the -
operator.
fn sub(self, other: Variable<RHS>) -> Self::Output
[src]
Performs the -
operation.
impl<LHS, RHS> Mul<Variable<RHS>> for Variable<LHS> where
RHS: Node<Value = Arr, InputGradient = Arr>,
LHS: Node<Value = Arr, InputGradient = Arr>,
[src]
RHS: Node<Value = Arr, InputGradient = Arr>,
LHS: Node<Value = Arr, InputGradient = Arr>,
type Output = Variable<MulNode<LHS, RHS>>
The resulting type after applying the *
operator.
fn mul(self, other: Variable<RHS>) -> Self::Output
[src]
Performs the *
operation.
impl<LHS, RHS> Div<Variable<RHS>> for Variable<LHS> where
RHS: Node<Value = Arr, InputGradient = Arr>,
LHS: Node<Value = Arr, InputGradient = Arr>,
[src]
RHS: Node<Value = Arr, InputGradient = Arr>,
LHS: Node<Value = Arr, InputGradient = Arr>,
type Output = Variable<DivNode<LHS, RHS>>
The resulting type after applying the /
operator.
fn div(self, other: Variable<RHS>) -> Self::Output
[src]
Performs the /
operation.
impl<T> Neg for Variable<T> where
T: Node<Value = Arr, InputGradient = Arr>,
[src]
T: Node<Value = Arr, InputGradient = Arr>,