diff --git a/eo/app/Makefile.am b/eo/app/Makefile.am new file mode 100644 index 00000000..64ca3c70 --- /dev/null +++ b/eo/app/Makefile.am @@ -0,0 +1,17 @@ +############################################################################### +## +## Makefile.am for app +## +############################################################################### + +SUBDIRS = gprop + +############################################################################### + +all: + for i in $(SUBDIRS); do pushd $$i && $(MAKE) all; popd; done + +clean: + for i in $(SUBDIRS); do pushd $$i && $(MAKE) clean; popd; done + +############################################################################### diff --git a/eo/app/gprop/Makefile.am b/eo/app/gprop/Makefile.am new file mode 100644 index 00000000..bc4a7b85 --- /dev/null +++ b/eo/app/gprop/Makefile.am @@ -0,0 +1,25 @@ +############################################################################### +# +# Makefile.am for app/gprop +# +############################################################################### + +DEPS = $(top_builddir)/src/libeo.a $(top_builddir)/src/utils/libeoutils.a + +############################################################################### + +INCLUDES = -I$(top_builddir)/src +LDADDS = $(top_builddir)/src/libeo.a $(top_builddir)/src/utils/libeoutils.a + +############################################################################### + +bin_PROGRAMS = gprop + +############################################################################### + +gprop_SOURCES = gprop.cc +gprop_DEPENDENCIES = $(DEPS) +gprop_LDFLAGS = -lm +gprop_LDADD = $(LDADDS) + +############################################################################### diff --git a/eo/app/gprop/gprop.cc b/eo/app/gprop/gprop.cc new file mode 100644 index 00000000..7890b18a --- /dev/null +++ b/eo/app/gprop/gprop.cc @@ -0,0 +1,146 @@ +//----------------------------------------------------------------------------- +// gprop +//----------------------------------------------------------------------------- + +#include // EXIT_SUCCESS EXIT_FAILURE +#include // exception +#include // cerr cout +#include // ifstream +#include // string +#include // eoParser +#include // eoPop +#include // eoGenContinue +#include // eoProportional +#include // eoSGA +#include "gprop.h" // Chrom eoChromInit eoChromMutation eoChromXover eoChromEvaluator + +//----------------------------------------------------------------------------- +// global variables +//----------------------------------------------------------------------------- + + + +//----------------------------------------------------------------------------- +// parameters +//----------------------------------------------------------------------------- + +eoValueParam pop_size(10, "pop_size", "default population size", 'p'); +eoValueParam generations(10, "generations", "default generation number", 'g'); +eoValueParam mut_rate(0.1, "mut_rate", "default mutation rate", 'm'); +eoValueParam xover_rate(0.1, "xover_rate", "default crossover rate", 'x'); +eoValueParam file("", "file", "common part of patterns filenames *.trn *.val and *.tst", 'f'); + +//----------------------------------------------------------------------------- +// auxiliar functions +//----------------------------------------------------------------------------- + +void arg(int argc, char** argv); +void load_file(mlp::set& s, const string& s); +void ga(); + +//----------------------------------------------------------------------------- +// main +//----------------------------------------------------------------------------- + +int main(int argc, char** argv) +{ + try + { + arg(argc, argv); + ga(); + } + catch (exception& e) + { + cerr << argv[0] << ": " << e.what() << endl; + exit(EXIT_FAILURE); + } + + return 0; +} + +//----------------------------------------------------------------------------- +// implementation +//----------------------------------------------------------------------------- + +void arg(int argc, char** argv) +{ + eoParser parser(argc, argv); + + parser.processParam(pop_size, "genetic operators"); + parser.processParam(generations, "genetic operators"); + parser.processParam(mut_rate, "genetic operators"); + parser.processParam(xover_rate, "genetic operators"); + parser.processParam(file, "files"); + + if (parser.userNeedsHelp()) + { + parser.printHelp(cout); + exit(EXIT_SUCCESS); + } + + load_file(trn_set, "trn"); + load_file(val_set, "val"); + load_file(tst_set, "tst"); + + phenotype::trn_max = trn_set.size(); + phenotype::val_max = val_set.size(); + phenotype::tst_max = tst_set.size(); +} + +//----------------------------------------------------------------------------- + +void load_file(mlp::set& set, const string& ext) +{ + string filename = file.value(); filename += "." + ext; + + ifstream ifs(filename.c_str()); + if (!ifs) + { + cerr << "can't open file \"" << filename << "\"" << endl; + exit(EXIT_FAILURE); + } + + ifs >> set; + + cout << "set.size() = " << set.size() << endl; + + if (set.size() == 0) + { + cerr << filename << " data file is empty!"; + exit(EXIT_FAILURE); + } +} + +//----------------------------------------------------------------------------- + +void ga() +{ + eoGenContinue continuator(generations.value()); + + eoProportional select; + eoChromMutation mutation(generations); + eoChromXover xover; + eoEvalFuncPtr evaluator(eoChromEvaluator); + + eoSGA sga(select, + xover, xover_rate.value(), + mutation, mut_rate.value(), + evaluator, + continuator); + + eoInitChrom init; + eoPop pop(pop_size.value(), init); + apply(evaluator, pop); + + cout << pop << endl; + + sga(pop); + + cout << pop << endl; +} + +//----------------------------------------------------------------------------- + +// Local Variables: +// mode:C++ +// End: diff --git a/eo/app/gprop/gprop.h b/eo/app/gprop/gprop.h new file mode 100644 index 00000000..392a51bb --- /dev/null +++ b/eo/app/gprop/gprop.h @@ -0,0 +1,183 @@ +//----------------------------------------------------------------------------- +// gprop.h +//----------------------------------------------------------------------------- + +#ifndef gprop_h +#define gprop_h + +//----------------------------------------------------------------------------- + +#include // istream ostream +#include // string +#include // EO +#include // eoMonOp eoQuadraticOp +#include // eoEvalFunc +#include // eoInit +#include // normal_generator +#include "mlp.h" // mlp::net mlp::set +#include "qp.h" // qp::set +#include "mse.h" // mse::error + +//----------------------------------------------------------------------------- +// phenotype +//----------------------------------------------------------------------------- + +struct phenotype +{ + unsigned trn_ok, val_ok, tst_ok; + double mse_error; + + static unsigned trn_max, val_max, tst_max; + + phenotype(const double& _mse_error = 0): mse_error(_mse_error) {} + + operator double(void) const { return mse_error; } + + friend bool operator<(const phenotype& a, const phenotype& b) + { + return a.mse_error < b.mse_error; + } + + friend ostream& operator<<(ostream& os, const phenotype& p) + { + return os << p.trn_ok << "/" << p.trn_max << " " + << p.val_ok << "/" << p.val_max << " " + << p.tst_ok << "/" << p.tst_max << " " + << p.mse_error; + } + + friend istream& operator>>(istream& is, phenotype& p) + { + return is; + } +}; + +unsigned phenotype::trn_max = 0, phenotype::val_max = 0, phenotype::tst_max = 0; + +//----------------------------------------------------------------------------- +// genotype +//----------------------------------------------------------------------------- + +typedef mlp::net genotype; + +//----------------------------------------------------------------------------- +// Chrom +//----------------------------------------------------------------------------- + +class Chrom: public EO, public genotype +{ +public: + Chrom(): genotype(25, 2) {} + + string className() const { return "Chrom"; } + + void printOn (ostream& os) const + { + // os << static_cast(*this) << " " << fitness(); + os << fitness(); + } + + void readFrom (istream& is) + { + invalidate(); + } +}; + +//----------------------------------------------------------------------------- +// eoChromInit +//----------------------------------------------------------------------------- + +class eoInitChrom: public eoInit +{ +public: + void operator()(Chrom& chrom) + { + chrom.reset(); + chrom.invalidate(); + } +}; + +//----------------------------------------------------------------------------- +// global variables +//----------------------------------------------------------------------------- + +mlp::set trn_set, val_set, tst_set; + +//----------------------------------------------------------------------------- +// eoChromMutation +//----------------------------------------------------------------------------- + +class eoChromMutation: public eoMonOp +{ +public: + eoChromMutation(eoValueParam& _generation): + generation(_generation) {} + + void operator()(Chrom& chrom) + { + mse::net tmp(chrom); + tmp.train(trn_set, 10, 0, 0.001); + } + +private: + eoValueParam& generation; +}; + +//----------------------------------------------------------------------------- +// eoChromXover +//----------------------------------------------------------------------------- + +class eoChromXover: public eoQuadraticOp +{ +public: + void operator()(Chrom& chrom1, Chrom& chrom2) + { + } +}; + +//----------------------------------------------------------------------------- +// eoChromEvaluator +//----------------------------------------------------------------------------- + +unsigned correct(const mlp::net& net, const qp::set& set) +{ + unsigned sum = 0; + + for (qp::set::const_iterator s = set.begin(); s != set.end(); ++s) + { + unsigned partial = 0; + + for (unsigned i = 0; i < s->output.size(); ++i) + if (s->output[i] < 0.5 && net(s->input)[i] < 0.5 || + s->output[i] > 0.5 && net(s->input)[i] > 0.5) + ++partial; + + if (partial == s->output.size()) + ++sum; + } + + return sum; +} + + + +phenotype eoChromEvaluator(const Chrom& chrom) +{ + // extern mlp::set trn_set, val_set, tst_set; + + phenotype p; + p.trn_ok = correct(chrom, trn_set); + p.val_ok = correct(chrom, val_set); + p.tst_ok = correct(chrom, tst_set); + p.mse_error = mse::error(chrom, val_set); + + return p; +}; + +//----------------------------------------------------------------------------- + +#endif // gprop_h + +// Local Variables: +// mode:C++ +// End: diff --git a/eo/app/gprop/l2.h b/eo/app/gprop/l2.h new file mode 100644 index 00000000..011c6610 --- /dev/null +++ b/eo/app/gprop/l2.h @@ -0,0 +1,140 @@ +//----------------------------------------------------------------------------- +// l2.h +//----------------------------------------------------------------------------- + +#ifndef l2_h +#define l2_h + +//----------------------------------------------------------------------------- + +#include // log +#include // neuron layer net set + +//----------------------------------------------------------------------------- + +namespace l2 +{ + //--------------------------------------------------------------------------- + // useful typedefs + //--------------------------------------------------------------------------- + + using qp::real; + using qp::vector; + using qp::max_real; + using qp::min_real; + using qp::set; + using qp::neuron; + using qp::layer; + + //--------------------------------------------------------------------------- + // error + //--------------------------------------------------------------------------- + + real error(const mlp::net& net, const set& ts) + { + real error_ = 0.0; + + for (set::const_iterator s = ts.begin(); s != ts.end(); ++s) + { + vector out = net(s->input); + + for (unsigned i = 0; i < out.size(); ++i) + { + real target = s->output[i]; + real value = out[i]; + error_ -= target * log(value + min_real) + + (1.0 - target) * log(1.0 - value + min_real); + } + } + + return error_; + } + + //------------------------------------------------------------------------- + // l2 + //------------------------------------------------------------------------- + + class net: public qp::net + { + public: + net(mlp::net& n): qp::net(n) {} + + real error(const set& ts) + { + real error_ = 0; + + for (set::const_iterator s = ts.begin(); s != ts.end(); ++s) + { + forward(s->input); + error_ -= backward(s->input, s->output); + } + + return error_; + } + + private: + real backward(const vector& input, const vector& output) + { + reverse_iterator current_layer = rbegin(); + reverse_iterator backward_layer = current_layer + 1; + real error_ = 0; + + // output layer + for (unsigned j = 0; j < current_layer->size(); ++j) + { + neuron& n = (*current_layer)[j]; + real out = output[j]; + n.ndelta += n.delta = (out - n.out) / + (n.out * (1.0 - n.out) + min_real) * n.out * (1.0 - n.out); + + if (size() == 1) // monolayer + n.dxo += n.delta * input; + else // multilayer + for (unsigned k = 0; k < n.dxo.size(); ++k) + n.dxo[k] += n.delta * (*backward_layer)[k].out; + + error_ += out * log(n.out + min_real) + + (1.0 - out) * log(1.0 - n.out + min_real); + } + + // hidden layers + while (++current_layer != rend()) + { + reverse_iterator forward_layer = current_layer - 1; + reverse_iterator backward_layer = current_layer + 1; + + for (unsigned j = 0; j < current_layer->size(); ++j) + { + neuron& n = (*current_layer)[j]; + real sum = 0; + for (unsigned k = 0; k < forward_layer->size(); ++k) + { + neuron& nf = (*forward_layer)[k]; + sum += nf.delta * (nf.n->weight[j] + nf.dweight1[j]); + } + n.delta = n.out * (1.0 - n.out) * sum; + n.ndelta += n.delta; + + if (backward_layer == rend()) // first hidden layer + n.dxo += n.delta * input; + else // rest of hidden layers + for (unsigned k = 0; k < n.dxo.size(); ++k) + n.dxo[k] += n.delta * (*backward_layer)[k].out; + } + } + + return error_; + } + }; + + //--------------------------------------------------------------------------- + +} // namespace l2 + +//----------------------------------------------------------------------------- + +#endif // l2_h + +// Local Variables: +// mode:C++ +// End: diff --git a/eo/app/gprop/mlp.h b/eo/app/gprop/mlp.h new file mode 100644 index 00000000..68e81a58 --- /dev/null +++ b/eo/app/gprop/mlp.h @@ -0,0 +1,290 @@ +//----------------------------------------------------------------------------- +// mlp.h +//----------------------------------------------------------------------------- + +#ifndef mlp_h +#define mlp_h + +//----------------------------------------------------------------------------- + +#include // MAXFLOAT MINFLOAT +#include // exp +#include // invalid_argument +#include // istream ostream +#include // generate +#include // vector +#include // eoRng +#include // normal_geneurator +#include // * + +//----------------------------------------------------------------------------- + +namespace mlp +{ + //--------------------------------------------------------------------------- + // useful typedefs + //--------------------------------------------------------------------------- + + typedef double real; + typedef std::vector vector; + + const real max_real = MAXFLOAT; + const real min_real = MINFLOAT; + + + //--------------------------------------------------------------------------- + // sigmoid + //--------------------------------------------------------------------------- + + real sigmoid(const real& x) + { + return 1.0 / (1.0 + exp(-x)); + } + + + //--------------------------------------------------------------------------- + // neuron + //--------------------------------------------------------------------------- + + struct neuron + { + real bias; + vector weight; + + neuron(const unsigned& num_inputs = 0): weight(num_inputs) {} + + void reset() + { + normal_generator rnd(1.0); + bias = rnd(); + generate(weight.begin(), weight.end(), rnd); + } + + real operator()(const vector& input) const + { + return sigmoid(bias + weight * input); + } + + unsigned length() { return weight.size() + 1; } + + void normalize() + { + real n = sqrt(bias * bias + weight * weight); + bias /= n; + weight /= n; + } + + void desaturate() + { + bias = -5.0 + 10.0 / (1.0 + exp(bias / -5.0)); + + for (vector::iterator w = weight.begin(); w != weight.end(); ++w) + *w = -5.0 + 10.0 / (1.0 + exp(*w / -5.0)); + } + }; + + ostream& operator<<(ostream& os, const neuron& n) + { + return os << n.bias << " " << n.weight; + } + + + //--------------------------------------------------------------------------- + // layer + //--------------------------------------------------------------------------- + + class layer: public std::vector + { + public: + layer(const unsigned& num_inputs = 0, const unsigned& num_neurons = 0): + std::vector(num_neurons, neuron(num_inputs)) {} + + void reset() + { + normal_generator rnd(1.0); + for(iterator n = begin(); n != end(); ++n) + n->reset(); + } + + vector operator()(const vector& input) const + { + vector output(size()); + + for(unsigned i = 0; i < output.size(); ++i) + output[i] = (*this)[i](input); + + return output; + } + + unsigned length() { return front().length() * size(); } + + void normalize() + { + for(iterator n = begin(); n != end(); ++n) + n->normalize(); + } + + void desaturate() + { + for(iterator n = begin(); n != end(); ++n) + n->desaturate(); + } + }; + + + //--------------------------------------------------------------------------- + // net + //--------------------------------------------------------------------------- + + class net: public std::vector + { + public: + net(const unsigned& num_inputs = 0, + const unsigned& num_outputs = 0, + const std::vector& hidden = std::vector()) + { + switch(hidden.size()) + { + case 0: + push_back(layer(num_inputs, num_outputs)); + break; + default: + push_back(layer(num_inputs, hidden.front())); + for (unsigned i = 0; i < hidden.size() - 1; ++i) + push_back(layer(hidden[i], hidden[i + 1])); + push_back(layer(hidden.back(), num_outputs)); + break; + } + } + + void reset() + { + normal_generator rnd(1.0); + for(iterator l = begin(); l != end(); ++l) + l->reset(); + } + + vector operator()(const vector& input) const + { + vector tmp = input; + + for(const_iterator l = begin(); l != end(); ++l) + tmp = (*l)(tmp); + + return tmp; + } + + unsigned winner(const vector& input) const + { + vector tmp = (*this)(input); + return (max_element(tmp.begin(), tmp.end()) - tmp.begin()); + } + + unsigned length() + { + unsigned sum = 0; + + for(iterator l = begin(); l != end(); ++l) + sum += l->length(); + + return sum; + } + + void normalize() + { + for(iterator l = begin(); l != end(); ++l) + l->normalize(); + } + + void desaturate() + { + for(iterator l = begin(); l != end(); ++l) + l->desaturate(); + } + }; + + + //--------------------------------------------------------------------------- + // sample + //--------------------------------------------------------------------------- + + struct sample + { + vector input, output; + + sample(unsigned input_size = 0, unsigned output_size = 0): + input(input_size), output(output_size) {} + }; + + istream& operator>>(istream& is, sample& s) + { + return is >> s.input >> s.output; + } + + ostream& operator<<(ostream& os, const sample& s) + { + return os << s.input << " " << s.output; + } + + + //--------------------------------------------------------------------------- + // set + //--------------------------------------------------------------------------- + + class set: public std::vector + { + public: + set(unsigned input_size = 0, unsigned output_size = 0, + unsigned num_samples = 0): + std::vector(num_samples, sample(input_size, output_size)) {} + + set(istream& is) + { + is >> (*this); + } + }; + + ostream& operator<<(ostream& os, const set& s) + { + os << "<" << endl; + for (unsigned i = 0; i < s.size(); ++i) + os << s[i] << endl; + return os << ">"; + } + + //--------------------------------------------------------------------------- + // euclidean_distance + //--------------------------------------------------------------------------- + + real euclidean_distance(const net& n1, const net& n2) + { + real sum = 0; + + for(net::const_reverse_iterator l1 = n1.rbegin(), l2 = n2.rbegin(); + l1 != n1.rend() && l2 != n2.rend(); ++l1, ++l2) + for(layer::const_iterator n1 = l1->begin(), n2 = l2->begin(); + n1 != l1->end() && n2 != l2->end(); ++n1, ++n2) + { + real b = n1->bias - n2->bias; + vector w = n1->weight - n2->weight; + sum += b * b + w * w; + } + /* + #include + std::ofstream file("dist.stat", ios::app); + file << sqrt(sum) << endl; + */ + return sqrt(sum); + } + + //--------------------------------------------------------------------------- + +} // namespace mlp + +//----------------------------------------------------------------------------- + +#endif // mlp_h + +// Local Variables: +// mode:C++ +// End: diff --git a/eo/app/gprop/mse.h b/eo/app/gprop/mse.h new file mode 100644 index 00000000..841a88e3 --- /dev/null +++ b/eo/app/gprop/mse.h @@ -0,0 +1,140 @@ +//----------------------------------------------------------------------------- +// mse.h +//----------------------------------------------------------------------------- + +#ifndef mse_h +#define mse_h + +//----------------------------------------------------------------------------- + +#include // neuron layer net set + +//----------------------------------------------------------------------------- + +namespace mse +{ + //--------------------------------------------------------------------------- + // useful typedefs + //--------------------------------------------------------------------------- + + using qp::real; + using qp::vector; + using qp::max_real; + using qp::min_real; + using qp::set; + using qp::neuron; + using qp::layer; + + //--------------------------------------------------------------------------- + // error + //--------------------------------------------------------------------------- + + real error(const mlp::net& net, const set& ts) + { + real error_ = 0.0; + + for (set::const_iterator s = ts.begin(); s != ts.end(); ++s) + { + vector out = net(s->input); + + for (unsigned i = 0; i < out.size(); ++i) + { + real diff = s->output[i] - out[i]; + error_ += diff * diff; + } + } + + return error_ / ts.size(); + } + //------------------------------------------------------------------------- + // mse + //------------------------------------------------------------------------- + + class net: public qp::net + { + public: + net(mlp::net& n): qp::net(n) {} + + real error(const set& ts) + { + real error_ = 0; + + for (set::const_iterator s = ts.begin(); s != ts.end(); ++s) + { + forward(s->input); + error_ += backward(s->input, s->output); + } + error_ /= ts.size(); + + return error_; + } + + private: + real backward(const vector& input, const vector& output) + { + reverse_iterator current_layer = rbegin(); + reverse_iterator backward_layer = current_layer + 1; + real error_ = 0; + + // output layer + for (unsigned j = 0; j < current_layer->size(); ++j) + { + neuron& n = (*current_layer)[j]; + + real diff = output[j] - n.out; + n.ndelta += n.delta = diff * n.out * (1.0 - n.out); + + if (size() == 1) // monolayer + n.dxo += n.delta * input; + else // multilayer + for (unsigned k = 0; k < n.dxo.size(); ++k) + n.dxo[k] += n.delta * (*backward_layer)[k].out; + + error_ += diff * diff; + } + + // hidden layers + while (++current_layer != rend()) + { + reverse_iterator forward_layer = current_layer - 1; + reverse_iterator backward_layer = current_layer + 1; + + for (unsigned j = 0; j < current_layer->size(); ++j) + { + + neuron& n = (*current_layer)[j]; + real sum = 0; + + for (unsigned k = 0; k < forward_layer->size(); ++k) + { + neuron& nf = (*forward_layer)[k]; + sum += nf.delta * (nf.n->weight[j] + nf.dweight1[j]); + } + + n.delta = n.out * (1.0 - n.out) * sum; + n.ndelta += n.delta; + + + if (backward_layer == rend()) // first hidden layer + n.dxo += n.delta * input; + else // rest of hidden layers + for (unsigned k = 0; k < n.dxo.size(); ++k) + n.dxo[k] += n.delta * (*backward_layer)[k].out; + } + } + + return error_; + } + }; + + //--------------------------------------------------------------------------- + +} // namespace mse + +//----------------------------------------------------------------------------- + +#endif // mse_h + +// Local Variables: +// mode:C++ +// End: diff --git a/eo/app/gprop/qp.h b/eo/app/gprop/qp.h new file mode 100644 index 00000000..e9142562 --- /dev/null +++ b/eo/app/gprop/qp.h @@ -0,0 +1,251 @@ +//----------------------------------------------------------------------------- +// qp.h +//----------------------------------------------------------------------------- + +#ifndef qp_h +#define qp_h + +//----------------------------------------------------------------------------- + +#include // istream ostream +#include // fill +#include // vector +#include // uniform_generator +#include // neuron layer net + +//----------------------------------------------------------------------------- + +namespace qp +{ + //--------------------------------------------------------------------------- + // useful typedefs + //--------------------------------------------------------------------------- + + using mlp::real; + using mlp::vector; + + using mlp::max_real; + using mlp::min_real; + + using mlp::set; + + //--------------------------------------------------------------------------- + // useful constants + //--------------------------------------------------------------------------- + + const real eta_default = 0.5; + const real eta_floor = 0.0001; + const real alpha_default = 0.9; + const real lambda_default = 0.5; + const real lambda0 = 0.1; + const real backtrack_step = 0.5; + const real me_floor = 0.0001; + const real mw_floor = 0.0001; + + + //--------------------------------------------------------------------------- + // neuron + //--------------------------------------------------------------------------- + + struct neuron + { + mlp::neuron* n; + real out, delta, ndelta, dbias1, dbias2; + vector dweight1, dweight2, dxo; + + neuron(mlp::neuron& _n): + n(&_n), out(0), delta(0), ndelta(0), dbias1(0), dbias2(0), + dweight1(n->weight.size(), 0), + dweight2(n->weight.size(), 0), + dxo(n->weight.size(), 0) {} + + void reset() + { + // underlaying neuron + n->reset(); + + // addons + out = delta = ndelta = dbias1 = dbias2 = 0; + fill(dweight1.begin(), dweight1.end(), 0); + fill(dweight2.begin(), dweight2.end(), 0); + fill(dxo.begin(), dxo.end(), 0); + } + + real operator()(const vector& input) + { + return out = mlp::sigmoid(n->bias + dbias1 + + (n->weight + dweight1) * input); + } + }; + + ostream& operator<<(ostream& os, const neuron& n) + { + return os << *n.n << " " << n.out << " " << n.delta << " " + << n.ndelta << " " << n.dbias1 << " " << n.dbias2 << " " + << n.dweight1 << " " << n.dweight2 << " " << n.dxo; + } + + + //--------------------------------------------------------------------------- + // layer + //--------------------------------------------------------------------------- + + class layer: public std::vector + { + public: + layer(mlp::layer& l)//: std::vector(l.begin(), l.end()) {} + { + for (mlp::layer::iterator n = l.begin(); n != l.end(); ++n) + push_back(neuron(*n)); + } + + void reset() + { + for(iterator n = begin(); n != end(); ++n) + n->reset(); + } + + vector operator()(const vector& input) + { + vector output(size()); + + for(unsigned i = 0; i < output.size(); ++i) + output[i] = (*this)[i](input); + + return output; + } + }; + + + //--------------------------------------------------------------------------- + // net + //--------------------------------------------------------------------------- + + class net: public std::vector + { + public: + net(mlp::net& n) //: std::vector(n.begin(), n.end()) { reset(); } + { + for (mlp::net::iterator l = n.begin(); l != n.end(); ++l) + push_back(*l); + } + + virtual ~net() {} + + void reset() + { + for(iterator l = begin(); l != end(); ++l) + l->reset(); + } + + real train(const set& ts, + unsigned epochs, + real target_error, + real tolerance, + real eta = eta_default, + real momentum = alpha_default, + real lambda = lambda_default) + { + real error_ = max_real; + + while (epochs-- && error_ > target_error) + { + real last_error = error_; + + init_delta(); + + error_ = error(ts); + + if (error_ < last_error + tolerance) + { + coeff_adapt(eta, momentum, lambda); + weight_update(ts.size(), true, eta, momentum); + } + else + { + eta *= backtrack_step; + eta = max(eta, eta_floor); + momentum = eta * lambda; + weight_update(ts.size(), false, eta, momentum); + error_ = last_error; + } + } + + return error_; + } + + virtual real error(const set& ts) = 0; + + // protected: + void forward(vector input) + { + for (iterator l = begin(); l != end(); ++l) + { + vector tmp = (*l)(input); + input.swap(tmp); + } + } + + // private: + void init_delta() + { + for (iterator l = begin(); l != end(); ++l) + for (layer::iterator n = l->begin(); n != l->end(); ++n) + fill(n->dxo.begin(), n->dxo.end(), n->ndelta = 0.0); + } + + void coeff_adapt(real& eta, real& momentum, real& lambda) + { + real me = 0, mw = 0, ew = 0; + + for (iterator l = begin(); l != end(); ++l) + for (layer::iterator n = l->begin(); n != l->end(); ++n) + { + me += n->dxo * n->dxo; + mw += n->dweight1 * n->dweight1; + ew += n->dxo * n->dweight1; + } + + me = max(static_cast(sqrt(me)), me_floor); + mw = max(static_cast(sqrt(mw)), mw_floor); + eta *= (1.0 + 0.5 * ew / ( me * mw)); + eta = max(eta, eta_floor); + lambda = lambda0 * me / mw; + momentum = eta * lambda; +#ifdef DEBUG + cout << me << " \t" << mw << " \t" << ew << " \t" + << eta << " \t" << momentum << " \t" << lambda << endl; +#endif // DEBUG + } + + void weight_update(unsigned size, bool fire, real eta, real momentum) + { + for (iterator l = begin(); l != end(); ++l) + for (layer::iterator n = l->begin(); n != l->end(); ++n) + { + n->ndelta /= size; + n->dxo /= size; + if (fire) + { + n->n->weight += n->dweight1; + n->dweight2 = n->dweight1; + n->n->bias += n->dbias1; + n->dbias2 = n->dbias1; + } + n->dweight1 = eta * n->dxo + momentum * n->dweight2; + n->dbias1 = eta * n->ndelta + momentum * n->dbias2; + } + } + }; + + //--------------------------------------------------------------------------- + +} // namespace qp + +//----------------------------------------------------------------------------- + +#endif // qp_h + +// Local Variables: +// mode:C++ +// End: diff --git a/eo/app/gprop/vecop.h b/eo/app/gprop/vecop.h new file mode 100644 index 00000000..6e45a5d5 --- /dev/null +++ b/eo/app/gprop/vecop.h @@ -0,0 +1,213 @@ +//----------------------------------------------------------------------------- +// vecop.h +//----------------------------------------------------------------------------- + +#ifndef VECOP_H +#define VECOP_H + +//----------------------------------------------------------------------------- + +#include // ostream istream +#include // vector +#include // plus minus multiplies divides +#include // inner_product + +//----------------------------------------------------------------------------- +// vector + vector +//----------------------------------------------------------------------------- + +template vector operator+(const vector& v1, const vector& v2) +{ + vector tmp = v1; + transform(tmp.begin(), tmp.end(), v2.begin(), tmp.begin(), plus()); + return tmp; +} + +template vector operator-(const vector& v1, const vector& v2) +{ + vector tmp = v1; + transform(tmp.begin(), tmp.end(), v2.begin(), tmp.begin(), minus()); + return tmp; +} + +template T operator*(const vector& v1, const vector& v2) +{ + return inner_product(v1.begin(), v1.end(), v2.begin(), static_cast(0)); +} + +template T operator/(const vector& v1, const vector& v2) +{ + return inner_product(v1.begin(), v1.end(), v2.begin(), static_cast(0), + plus(), divides()); +} + +//----------------------------------------------------------------------------- +// vector += vector +//----------------------------------------------------------------------------- + +template vector& operator+=(vector& v1, const vector& v2) +{ + transform(v1.begin(), v1.end(), v2.begin(), v1.begin(), plus()); + return v1; +} + +template vector& operator-=(vector& v1, const vector& v2) +{ + transform(v1.begin(), v1.end(), v2.begin(), v1.begin(), minus()); + return v1; +} + +//----------------------------------------------------------------------------- +// vector + number +//----------------------------------------------------------------------------- + +template vector operator+(const vector& a, const B& b) +{ + vector tmp = a; + transform(tmp.begin(), tmp.end(), tmp.begin(), bind2nd(plus(), b)); + return tmp; +} + +template vector operator-(const vector& a, const B& b) +{ + vector tmp = a; + transform(tmp.begin(), tmp.end(), tmp.begin(), bind2nd(minus(), b)); + return tmp; +} + +template vector operator*(const vector& a, const B& b) +{ + vector tmp = a; + transform(tmp.begin(), tmp.end(), tmp.begin(), bind2nd(multiplies(), b)); + return tmp; +} + +template vector operator/(const vector& a, const B& b) +{ + vector tmp = a; + transform(tmp.begin(), tmp.end(), tmp.begin(), bind2nd(divides(), b)); + return tmp; +} + +//----------------------------------------------------------------------------- +// number + vector +//----------------------------------------------------------------------------- + +template vector operator+(const B& b, const vector& a) +{ + vector tmp = a; + transform(tmp.begin(), tmp.end(), tmp.begin(), bind2nd(plus(), b)); + return tmp; +} + +template vector operator-(const B& b, const vector& a) +{ + vector tmp(a.size(), b); + transform(tmp.begin(), tmp.end(), a.begin(), tmp.begin(), minus()); + return tmp; +} + +template vector operator*(const B& b, const vector& a) +{ + vector tmp = a; + transform(tmp.begin(), tmp.end(), tmp.begin(), bind2nd(multiplies(), b)); + return tmp; +} + +template vector operator/(const B& b, const vector& a) +{ + vector tmp(a.size(), b); + transform(tmp.begin(), tmp.end(), a.begin(), tmp.begin(), divides()); + return tmp; +} + +//----------------------------------------------------------------------------- +// vector += number +//----------------------------------------------------------------------------- + +template vector& operator+=(vector& a, const B& b) +{ + transform(a.begin(), a.end(), a.begin(), bind2nd(plus(), b)); + return a; +} + +template vector& operator-=(vector& a, const B& b) +{ + transform(a.begin(), a.end(), a.begin(), bind2nd(minus(), b)); + return a; +} + +template vector& operator*=(vector& a, const B& b) +{ + transform(a.begin(), a.end(), a.begin(), bind2nd(multiplies(), b)); + return a; +} + +template vector& operator/=(vector& a, const B& b) +{ + transform(a.begin(), a.end(), a.begin(), bind2nd(divides(), b)); + return a; +} + +//----------------------------------------------------------------------------- +// I/O +//----------------------------------------------------------------------------- + +template ostream& operator<<(ostream& os, const vector& v) +{ + os << '<'; + if (v.size()) + { + copy(v.begin(), v.end() - 1, ostream_iterator(os, " ")); + os << v.back(); + } + return os << '>'; +} + +template istream& operator>>(istream& is, vector& v) +{ + v.clear(); + + char c; + is >> c; + if (!is || c != '<') + is.setstate(ios::failbit); + else + { + T t; + do { + is >> c; + if (is && c!= '>') + { + is.putback(c); + is >> t; + if (is) + v.push_back(t); + } + } while (is && c != '>'); + } + + return is; +} + +//----------------------------------------------------------------------------- +// euclidean_distance +//----------------------------------------------------------------------------- + +template T euclidean_distance(const vector& v1, + const vector& v2) +{ + T sum = 0, tmp; + + for (unsigned i = 0; i < v1.size(); ++i) + { + tmp = v1[i] - v2[i]; + sum += tmp * tmp; + } + + return sqrt(sum); +} + +//----------------------------------------------------------------------------- + +#endif VECOP_H