#ifndef ACTIVATION_LAYER_HPP #define ACTIVATION_LAYER_HPP #include "layer.hpp" #include "math.hpp" namespace Layer{ /** Enumeration type for the different implemented actiovation map */ enum ActivationMap{ Sigmoid /**< \f$x\mapsto \frac{1}{1+e^{-x}}\f$ */ }; /** Activation map.*/ template Real activation_map(Real); /** Derivative of activation map.*/ template Real activation_diff_map(Real); /** * Class for activation layer. * The Output vector is obtained by applying activation map to each entry of the input vector. */ template class ActivationLayer:public Layer{ public: ActivationLayer(const size_t); ~ActivationLayer(){}; /** \f$y[i]:=\alpha(x[i])\f$ where \f$\alpha\f$ is the activation map.*/ Vector feed_forward(Vector x) override; /**Null.*/ void init_nabla() override {}; /** \f$d[i]:=\alpha'(x[i])\times \e[i]\f$ where \f$\alpha\f$ is the activation map and $e$ the difference output vector.*/ Vector back_propagation(Vector e) override; /**Null.*/ void update(Real eta) override{}; }; template inline ActivationLayer::ActivationLayer(size_t n):Layer(n,n){ } template inline Vector ActivationLayer::feed_forward(Vector x_){ x=x_; for(size_t i=0;i(x[i]); } return y; } template inline Vector ActivationLayer::back_propagation(Vector e){ for(size_t i=0;i(x[i])*e[i]; } return d; } template<> inline Real activation_map(Real x){ return 1.0/(1.0+exp(-x)); } template<> inline Real activation_diff_map(Real x){ Real t=activation_map(x); return t*(1.0-t); } } #endif