qp.h

00001 //-----------------------------------------------------------------------------
00002 // qp.h
00003 //-----------------------------------------------------------------------------
00004 
00005 #ifndef qp_h
00006 #define qp_h
00007 
00008 //-----------------------------------------------------------------------------
00009 
00010 #include <iostream>               // istream ostream
00011 #include <algorithm>              // fill
00012 #include <vector>                 // vector
00013 #include <utils/rnd_generators.h> // uniform_generator
00014 #include <mlp.h>                  // neuron layer net
00015 
00016 //-----------------------------------------------------------------------------
00017 
00018 namespace qp
00019 {
00020   //---------------------------------------------------------------------------
00021   // useful typedefs
00022   //---------------------------------------------------------------------------
00023 
00024   using mlp::real;
00025   using mlp::vector;
00026 
00027   using mlp::max_real;
00028   using mlp::min_real;
00029 
00030   using mlp::set;
00031 
00032   //---------------------------------------------------------------------------
00033   // useful constants
00034   //---------------------------------------------------------------------------
00035 
00036   const real eta_default    = 0.5;
00037   const real eta_floor      = 0.0001;
00038   const real alpha_default  = 0.9;
00039   const real lambda_default = 0.5;
00040   const real lambda0        = 0.1;
00041   const real backtrack_step = 0.5;
00042   const real me_floor       = 0.0001;
00043   const real mw_floor       = 0.0001;
00044   
00045 
00046   //---------------------------------------------------------------------------
00047   // neuron
00048   //---------------------------------------------------------------------------
00049   
00050   struct neuron
00051   {
00052     mlp::neuron* n;
00053     real out, delta, ndelta, dbias1, dbias2;
00054     vector dweight1, dweight2, dxo;
00055     
00056     neuron(mlp::neuron& _n): 
00057       n(&_n), out(0), delta(0), ndelta(0), dbias1(0), dbias2(0), 
00058       dweight1(n->weight.size(), 0), 
00059       dweight2(n->weight.size(), 0), 
00060       dxo(n->weight.size(), 0) {}
00061     
00062     void reset()
00063     {
00064       // underlaying neuron
00065       n->reset();
00066       
00067       // addons
00068       out = delta = ndelta = dbias1 = dbias2 = 0;
00069       fill(dweight1.begin(), dweight1.end(), 0);
00070       fill(dweight2.begin(), dweight2.end(), 0);
00071       fill(dxo.begin(), dxo.end(), 0);
00072     }
00073     
00074     real operator()(const vector& input)
00075     {
00076       return out = mlp::sigmoid(n->bias + dbias1 + 
00077                                 (n->weight + dweight1) * input);
00078     }
00079   };
00080   
00081   ostream& operator<<(ostream& os, const neuron& n)
00082   {
00083     return os << *n.n << " " << n.out << " " << n.delta << " " 
00084               << n.ndelta << " " << n.dbias1 << " " << n.dbias2 << " " 
00085               << n.dweight1 << " " << n.dweight2 << " " << n.dxo;
00086   }
00087   
00088   
00089   //---------------------------------------------------------------------------
00090   // layer
00091   //---------------------------------------------------------------------------
00092   
00093   class layer: public std::vector<neuron>
00094   {
00095   public:
00096     layer(mlp::layer& l)//: std::vector<neuron>(l.begin(), l.end()) {}
00097     {
00098       for (mlp::layer::iterator n = l.begin(); n != l.end(); ++n)
00099         push_back(neuron(*n));
00100     }
00101 
00102     void reset()
00103     {
00104       for(iterator n = begin(); n != end(); ++n)
00105         n->reset();
00106     }
00107 
00108     vector operator()(const vector& input)
00109     {
00110       vector output(size());
00111       
00112       for(unsigned i = 0; i < output.size(); ++i)
00113         output[i] = (*this)[i](input);
00114       
00115       return output;
00116     }
00117   };
00118 
00119 
00120   //---------------------------------------------------------------------------
00121   // net
00122   //---------------------------------------------------------------------------
00123   
00124   class net: public std::vector<layer>
00125   {
00126   public:  
00127     net(mlp::net& n) //: std::vector<layer>(n.begin(), n.end()) { reset(); }
00128     {
00129       for (mlp::net::iterator l = n.begin(); l != n.end(); ++l)
00130         push_back(*l);
00131     }
00132 
00133     virtual ~net() {}
00134 
00135     void reset()
00136     {
00137       for(iterator l = begin(); l != end(); ++l)
00138         l->reset();
00139     }
00140     
00141     real train(const set& ts, 
00142                unsigned   epochs, 
00143                real       target_error, 
00144                real       tolerance,
00145                real       eta      = eta_default, 
00146                real       momentum = alpha_default, 
00147                real       lambda   = lambda_default)
00148     {
00149       real error_ = max_real;
00150       
00151       while (epochs-- && error_ > target_error)
00152         {
00153           real last_error = error_;
00154           
00155           init_delta();
00156 
00157           error_ = error(ts);
00158           
00159           if (error_ < last_error + tolerance)
00160             {
00161               coeff_adapt(eta, momentum, lambda);
00162               weight_update(ts.size(), true, eta, momentum);
00163             }
00164           else
00165             {
00166               eta *= backtrack_step;
00167               eta = max(eta, eta_floor);
00168               momentum = eta * lambda;
00169               weight_update(ts.size(), false, eta, momentum);
00170               error_ = last_error;
00171             }
00172         }
00173       
00174       return error_;
00175     }
00176     
00177     virtual real error(const set& ts) = 0;
00178 
00179     // protected:
00180     void forward(vector input)
00181     {
00182       for (iterator l = begin(); l != end(); ++l)
00183         {
00184           vector tmp = (*l)(input);
00185           input.swap(tmp);
00186         }
00187     }
00188      
00189     // private:
00190     void init_delta()
00191     {
00192       for (iterator l = begin(); l != end(); ++l)
00193         for (layer::iterator n = l->begin(); n != l->end(); ++n)
00194           fill(n->dxo.begin(), n->dxo.end(), n->ndelta = 0.0);
00195     }
00196     
00197     void coeff_adapt(real& eta, real& momentum, real& lambda)
00198     {
00199       real me = 0, mw = 0, ew = 0;
00200       
00201       for (iterator l = begin(); l != end(); ++l)
00202         for (layer::iterator n = l->begin(); n != l->end(); ++n)
00203           {
00204             me += n->dxo * n->dxo;
00205             mw += n->dweight1 * n->dweight1;
00206             ew += n->dxo * n->dweight1;
00207           }
00208       
00209       me = max(static_cast<real>(sqrt(me)), me_floor);
00210       mw = max(static_cast<real>(sqrt(mw)), mw_floor);
00211       eta *= (1.0 + 0.5 * ew / ( me * mw));
00212       eta = max(eta, eta_floor);
00213       lambda = lambda0 * me / mw;
00214       momentum = eta * lambda;
00215 #ifdef DEBUG
00216       cout << me << "  \t" << mw << "  \t" << ew << "  \t" 
00217            << eta << "  \t" << momentum << "  \t" << lambda << endl;
00218 #endif // DEBUG
00219     }
00220     
00221     void weight_update(unsigned size, bool fire, real eta, real momentum)
00222     {
00223       for (iterator l = begin(); l != end(); ++l)
00224         for (layer::iterator n = l->begin(); n != l->end(); ++n)
00225           {
00226             n->ndelta /= size;
00227             n->dxo /= size;
00228             if (fire)
00229               {
00230                 n->n->weight += n->dweight1;
00231                 n->dweight2 = n->dweight1;
00232                 n->n->bias += n->dbias1;
00233                 n->dbias2 = n->dbias1;
00234               }
00235             n->dweight1 = eta * n->dxo + momentum * n->dweight2;
00236             n->dbias1 = eta * n->ndelta + momentum * n->dbias2;
00237           }
00238     }
00239   };
00240 
00241   //---------------------------------------------------------------------------
00242   
00243 } // namespace qp
00244 
00245 //-----------------------------------------------------------------------------
00246 
00247 #endif // qp_h
00248 
00249 // Local Variables: 
00250 // mode:C++ 
00251 // End:

Generated on Thu Oct 19 05:06:42 2006 for EO by  doxygen 1.3.9.1