concision_core/activate/
mod.rs

1/*
2    Appellation: activate <module>
3    Contrib: FL03 <jo3mccain@icloud.com>
4*/
5//! This module implements various activation functions for neural networks.
6//!
7//! ## Traits
8//!
9//! - [Heavyside]
10//! - [LinearActivation]
11//! - [Sigmoid]
12//! - [Softmax]
13//! - [ReLU]
14//! - [Tanh]
15//!
16#[doc(inline)]
17pub use self::prelude::*;
18
19pub(crate) mod traits;
20
21mod impls {
22    mod impl_binary;
23    mod impl_linear;
24    mod impl_nonlinear;
25}
26
27pub(crate) mod prelude {
28    pub use super::traits::*;
29    pub use super::{Activate, ActivateGradient};
30}
31
32/// The [Activate] trait enables the definition of new activation functions often implemented
33/// as _fieldless_ structs.
34pub trait Activate<Rhs = Self> {
35    type Output;
36
37    fn activate(&self, rhs: Rhs) -> Self::Output;
38}
39
40pub trait ActivateGradient<Rhs = Self>: Activate<Self::Input> {
41    type Input;
42    type Delta;
43
44    fn activate_gradient(&self, rhs: Rhs) -> Self::Delta;
45}
46
47/*
48 ************* Implementations *************
49*/
50
51impl<X, Y> Activate<X> for Box<dyn Activate<X, Output = Y>> {
52    type Output = Y;
53
54    fn activate(&self, rhs: X) -> Self::Output {
55        self.as_ref().activate(rhs)
56    }
57}
58
59impl<X, Y, F> Activate<X> for F
60where
61    F: Fn(X) -> Y,
62{
63    type Output = Y;
64
65    fn activate(&self, rhs: X) -> Self::Output {
66        self(rhs)
67    }
68}