concision_core/activate/
mod.rs

1/*
2    Appellation: activate <module>
3    Contrib: FL03 <jo3mccain@icloud.com>
4*/
5//! This module implements various activation functions for neural networks.
6//!
7//! ## Traits
8//!
9//! - [Heavyside]
10//! - [LinearActivation]
11//! - [Sigmoid]
12//! - [Softmax]
13//! - [ReLU]
14//! - [Tanh]
15//!
16#[doc(inline)]
17pub use self::prelude::*;
18
19pub(crate) mod traits;
20
21mod impls {
22    mod impl_binary;
23    mod impl_linear;
24    mod impl_nonlinear;
25}
26
27pub(crate) mod prelude {
28    pub use super::traits::*;
29    pub use super::{Activate, ActivateGradient, BinaryAction};
30}
31
32pub trait BinaryAction<A, B = A> {
33    type Output;
34
35    fn activate(&self, lhs: A, rhs: B) -> Self::Output;
36}
37
38/// The [Activate] trait enables the definition of new activation functions often implemented
39/// as _fieldless_ structs.
40pub trait Activate<Rhs = Self> {
41    type Output;
42
43    fn activate(&self, rhs: Rhs) -> Self::Output;
44}
45
46pub trait ActivateGradient<Rhs = Self>: Activate<Self::Input> {
47    type Input;
48    type Delta;
49
50    fn activate_gradient(&self, rhs: Rhs) -> Self::Delta;
51}
52
53impl<X, Y> Activate<X> for Box<dyn Activate<X, Output = Y>> {
54    type Output = Y;
55
56    fn activate(&self, rhs: X) -> Self::Output {
57        self.as_ref().activate(rhs)
58    }
59}
60
61impl<X, Y, F> Activate<X> for F
62where
63    F: Fn(X) -> Y,
64{
65    type Output = Y;
66
67    fn activate(&self, rhs: X) -> Self::Output {
68        self(rhs)
69    }
70}