#ifndef NETWORK_HPP #define NETWORK_HPP #include #include #include #include #include "dataset.hpp" #include "vector.hpp" #include "matrix.hpp" using namespace std; class Trainer; double sigmoid(double x); double sigmoid_prime(double x); double cost_derivative(double a,double y); class Network{ friend class Trainer; protected: size_t depth; vector sizes; Matrix* weights; Vector *biais; Vector *a; Vector *z; Vector *nabla_b; Matrix *nabla_w; Vector *delta; void shuffle(size_t* tab,size_t size); void compute_z(size_t l); void compute_a(size_t l); void compute_last_delta(const Vector& y); void compute_delta(size_t l); void init_nabla_b(size_t l); void init_nabla_w(size_t l); void update_nabla_b(size_t l); void update_nabla_w(size_t l); void update_b(size_t l,double eta_batch); void update_w(size_t l,double eta_batch); public: template Network(Sizes ... _sizes); void init_normal_distribution(double m,double d); void init_standard(); double* new_output_vector() const; const Vector& feed_forward(const Vector& x); double eval(Dataset* dataset); void train(Dataset* dataset,size_t nb_epochs,size_t batch_size,double eta); void update_batch(Dataset* dataset,size_t* indices,size_t begin,size_t end,double eta); void back_propagation(const Vector& x,const Vector& y,double eta); Vector hack(const Vector& x,const Vector& y,double eta,size_t nb_steps,void (*)(const Vector&)); }; inline double sigmoid(double x){ return 1.0/(1.0+exp(-x)); }; inline double sigmoid_prime(double x){ double t=sigmoid(x); return t*(1.0-t); }; template inline Network::Network(Sizes ... _sizes):sizes({(size_t)_sizes ...}){ depth=sizes.size(); // Biais vectors biais=new Vector[depth]; for(size_t l=0;l