* New tree configuration of the project:

.../
   ...           + -- EO
   |             |
   |             |
   +-- src ----- + -- EDO
   |             |
   |             |
   +-- test      + -- MO
   |             |
   |             |
   +-- tutorial  + -- MOEO
   |             |
   |             |
   +-- doc       + -- SMP
   |             |
   |             |
   ...           + -- EOMPI
                 |
                 |
                 + -- EOSERIAL

Question for current maintainers: ./README: new release?

Also:

* Moving out eompi & eoserial modules (issue #2).

* Correction of the errors when executing "make doc" command.

* Adding a solution for the conflicting headers problem (see the two CMake Cache
 Values: PROJECT_TAG & PROJECT_HRS_INSTALL_SUBPATH) (issue #1)

* Header inclusions:
        ** src: changing absolute paths into relative paths ('#include <...>' -> '#include "..."')
        ** test, tutorial: changing relative paths into absolute paths ('#include "..."' -> '#include <...>')

* Moving out some scripts from EDO -> to the root

* Add a new script for compilation and installation (see build_gcc_linux_install)

* Compilation with uBLAS library or EDO module: now ok

* Minor modifications on README & INSTALL files

* Comment eompi failed tests with no end

*** TODO: CPack (debian (DEB) & RedHat (RPM) packages) (issues #6 & #7) ***
This commit is contained in:
Adèle Harrissart 2014-08-04 13:40:28 +02:00
commit 490e837f7a
2359 changed files with 7688 additions and 16329 deletions

8
tutorial/eo/app/CMakeLists.txt Executable file
View file

@ -0,0 +1,8 @@
######################################################################################
### 1) Where must cmake go now ?
######################################################################################
#ADD_SUBDIRECTORY(gprop)
#ADD_SUBDIRECTORY(gpsymreg)
ADD_SUBDIRECTORY(mastermind)
######################################################################################

View file

@ -0,0 +1,31 @@
######################################################################################
### 1) Include the sources
######################################################################################
#include_directories(${EO_SRC_DIR}/src)
#include_directories(${CMAKE_CURRENT_SOURCE_DIR})
######################################################################################
### 2) specify where cmake can find the libraries (mandatory: before 3) )
######################################################################################
link_directories(${EO_BIN_DIR}/lib)
######################################################################################
### 3) define your target(s): just an executable here
######################################################################################
set (GPROP_SOURCES gprop.cpp)
# especially for Visual Studio
if(NOT WIN32 OR CYGWIN)
add_executable(gprop ${GPROP_SOURCES})
add_dependencies(gprop eo eoutils)
target_link_libraries(gprop eo eoutils)
set(GPROP_VERSION ${GLOBAL_VERSION})
set_target_properties(gprop PROPERTIES VERSION "${GPROP_VERSION}")
endif(NOT WIN32 OR CYGWIN)
######################################################################################

171
tutorial/eo/app/gprop/gprop.cpp Executable file
View file

@ -0,0 +1,171 @@
//-----------------------------------------------------------------------------
// gprop
//-----------------------------------------------------------------------------
#include <stdlib.h> // EXIT_SUCCESS EXIT_FAILURE
#include <stdexcept> // exception
#include <iostream> // cerr cout
#include <fstream> // ifstream
#include <string> // string
#include <paradiseo/eo.h> // all usefull eo stuff
#include "gprop.h" // Chrom eoChromInit eoChromMutation eoChromXover eoChromEvaluator
using namespace std;
//-----------------------------------------------------------------------------
// global variables
//-----------------------------------------------------------------------------
unsigned in, out, hidden;
mlp::set train, validate, test;
//-----------------------------------------------------------------------------
// parameters
//-----------------------------------------------------------------------------
eoValueParam<unsigned> pop_size(10, "pop_size", "population size", 'p');
eoValueParam<unsigned> generations(10, "generations", "number of generation", 'g');
eoValueParam<double> mut_rate(0.25, "mut_rate", "mutation rate", 'm');
eoValueParam<double> xover_rate(0.25, "xover_rate", "default crossover rate", 'x');
eoValueParam<string> file("", "file", "common start of patterns filenames *.trn *.val and *.tst", 'f');
eoValueParam<unsigned> hiddenp(0, "hidden", "number of neurons in hidden layer", 'd');
//-----------------------------------------------------------------------------
// auxiliar functions
//-----------------------------------------------------------------------------
void arg(int argc, char** argv);
void load_file(mlp::set& s1, const string& s2);
void ga();
//-----------------------------------------------------------------------------
// main
//-----------------------------------------------------------------------------
int main(int argc, char** argv)
{
try
{
arg(argc, argv);
ga();
}
catch (exception& e)
{
cerr << argv[0] << ": " << e.what() << endl;
exit(EXIT_FAILURE);
}
return 0;
}
//-----------------------------------------------------------------------------
// implementation
//-----------------------------------------------------------------------------
void arg(int argc, char** argv)
{
eoParser parser(argc, argv);
parser.processParam(pop_size, "genetic operators");
parser.processParam(generations, "genetic operators");
parser.processParam(mut_rate, "genetic operators");
parser.processParam(xover_rate, "genetic operators");
parser.processParam(file, "files");
parser.processParam(hiddenp, "genetic operators");
if (parser.userNeedsHelp())
{
parser.printHelp(cout);
exit(EXIT_SUCCESS);
}
load_file(train, "trn");
load_file(validate, "val");
load_file(test, "tst");
phenotype::trn_max = train.size();
phenotype::val_max = validate.size();
phenotype::tst_max = test.size();
in = train.front().input.size();
out = train.front().output.size();
gprop_use_datasets(&train, &validate, &test);
hidden = hiddenp.value();
}
//-----------------------------------------------------------------------------
void load_file(mlp::set& set, const string& ext)
{
string filename = file.value(); filename += "." + ext;
ifstream ifs(filename.c_str());
if (!ifs)
{
cerr << "can't open file \"" << filename << "\"" << endl;
exit(EXIT_FAILURE);
}
ifs >> set;
if (set.size() == 0)
{
cerr << filename << " data file is empty!";
exit(EXIT_FAILURE);
}
}
//-----------------------------------------------------------------------------
void ga()
{
// create population
eoInitChrom init;
eoPop<Chrom> pop(pop_size.value(), init);
// evaluate population
eoEvalFuncPtr<Chrom> evaluator(eoChromEvaluator);
apply<Chrom>(evaluator, pop);
// selector
eoStochTournamentSelect<Chrom> select;
// genetic operators
eoChromMutation mutation;
eoChromXover xover;
// stop condition
eoGenContinue<Chrom> continuator1(generations.value());
phenotype p; p.val_ok = validate.size() - 1; p.mse_error = 0;
eoFitContinue<Chrom> continuator2(p);
eoCombinedContinue<Chrom> continuator(continuator1, continuator2);
// checkpoint
eoCheckPoint<Chrom> checkpoint(continuator);
// monitor
eoStdoutMonitor monitor;
checkpoint.add(monitor);
// statistics
eoBestFitnessStat<Chrom> stats;
checkpoint.add(stats);
monitor.add(stats);
// genetic algorithm
eoSGA<Chrom> sga(select,
xover, xover_rate.value(),
mutation, mut_rate.value(),
evaluator,
checkpoint);
sga(pop);
cout << "best: " << *max_element(pop.begin(), pop.end()) << endl;
}
//-----------------------------------------------------------------------------
// Local Variables:
// mode:C++
// End:

239
tutorial/eo/app/gprop/gprop.h Executable file
View file

@ -0,0 +1,239 @@
// -*- mode: c++; c-indent-level: 4; c++-member-init-indent: 8; comment-column: 35; -*-
//-----------------------------------------------------------------------------
// gprop.h
// (c) GeNeura Team 1998
/*
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: todos@geneura.ugr.es, http://geneura.ugr.es
*/
//-----------------------------------------------------------------------------
#ifndef gprop_h
#define gprop_h
//-----------------------------------------------------------------------------
#include <iostream> // istream ostream
#include <iomanip> // setprecision
#include <string> // string
#include <paradiseo/eo.h> // EO
#include <paradiseo/eo/eoOp.h> // eoMonOp eoQuadraticOp
#include <paradiseo/eo/eoInit.h> // eoInit
#include <paradiseo/eo/utils/rnd_generators.h> // normal_generator
#include "mlp.h" // mlp::net mlp::set
#include "qp.h" // qp::set
#include "mse.h" // mse::error
//-----------------------------------------------------------------------------
// phenotype
//-----------------------------------------------------------------------------
struct phenotype
{
int trn_ok, val_ok, tst_ok;
double mse_error;
static int trn_max, val_max, tst_max;
friend bool operator<(const phenotype& a, const phenotype& b)
{
return (a.val_ok < b.val_ok) || ((!(b.val_ok < a.val_ok)) && (b.mse_error < a.mse_error));
}
friend bool operator==(const phenotype& a, const phenotype& b)
{
return (a.val_ok == b.val_ok) && (b.mse_error == a.mse_error);
}
friend bool operator>=(const phenotype& a, const phenotype& b)
{
return !(a < b);
}
friend bool operator>(const phenotype& a, const phenotype& b)
{
return (!(a == b)) && (!(a < b));
}
friend std::ostream& operator<<(std::ostream& os, const phenotype& p)
{
return os << p.trn_ok << "/" << p.trn_max << " "
<< p.val_ok << "/" << p.val_max << " "
<< p.tst_ok << "/" << p.tst_max << " "
<< p.mse_error;
}
friend std::istream& operator>>(std::istream& is, phenotype& p)
{
return is; // complete me
}
};
int phenotype::trn_max = 0, phenotype::val_max = 0, phenotype::tst_max = 0;
//-----------------------------------------------------------------------------
// genotype
//-----------------------------------------------------------------------------
#ifndef GPROP_GENOTYPE
#define GPROP_GENOTYPE mlp::net
#endif
typedef GPROP_GENOTYPE genotype;
//-----------------------------------------------------------------------------
// Chrom
//-----------------------------------------------------------------------------
extern unsigned in, out, hidden;
class Chrom: public EO<phenotype>, public genotype
{
public:
Chrom(): genotype(in, out, std::vector<unsigned>(hidden < 1 ? 0 : 1, hidden)) {}
std::string className() const { return "Chrom"; }
void printOn (std::ostream& os) const
{
os << std::setprecision(3) << static_cast<genotype>(*this) << " \t"
<< fitness();
// os << fitness();
}
void readFrom (std::istream& is)
{
invalidate(); // complete me
}
};
//-----------------------------------------------------------------------------
// eoChromInit
//-----------------------------------------------------------------------------
class eoInitChrom: public eoInit<Chrom>
{
public:
void operator()(Chrom& chrom)
{
chrom.reset();
chrom.invalidate();
}
};
//-----------------------------------------------------------------------------
// global variables
//-----------------------------------------------------------------------------
mlp::set *trn_set = 0, *val_set = 0, *tst_set = 0;
void gprop_use_datasets(mlp::set *trn, mlp::set *val, mlp::set *tst) {
trn_set = trn;
val_set = val;
tst_set = tst;
}
void ensure_datasets_initialized() {
if (!trn_set) {
std::cerr << "trn_set is not initialized. Must call gprop_use_datasets before training\n";
std::cerr.flush();
abort();
}
}
//-----------------------------------------------------------------------------
// eoChromMutation
//-----------------------------------------------------------------------------
class eoChromMutation: public eoMonOp<Chrom>
{
public:
bool operator()(Chrom& chrom)
{
mse::net tmp(chrom);
tmp.train(*trn_set, 10, 0, 0.001);
return true;
}
};
//-----------------------------------------------------------------------------
// eoChromXover
//-----------------------------------------------------------------------------
class eoChromXover: public eoQuadOp<Chrom>
{
public:
bool operator()(Chrom& chrom1, Chrom& chrom2)
{
chrom1.normalize();
chrom2.desaturate();
mse::net tmp1(chrom1), tmp2(chrom2);
ensure_datasets_initialized();
tmp1.train(*trn_set, 100, 0, 0.001);
tmp2.train(*trn_set, 100, 0, 0.001);
return true;
}
};
//-----------------------------------------------------------------------------
// eoChromEvaluator
//-----------------------------------------------------------------------------
int correct(const mlp::net& net, const mlp::set& set)
{
int sum = 0;
for (mlp::set::const_iterator s = set.begin(); s != set.end(); ++s)
{
unsigned partial = 0;
for (unsigned i = 0; i < s->output.size(); ++i)
if ((s->output[i] < 0.5 && net(s->input)[i] < 0.5) ||
(s->output[i] > 0.5 && net(s->input)[i] > 0.5))
++partial;
if (partial == s->output.size())
++sum;
}
return sum;
}
phenotype eoChromEvaluator(const Chrom& chrom)
{
phenotype p;
ensure_datasets_initialized();
p.trn_ok = correct(chrom, *trn_set);
p.val_ok = correct(chrom, *val_set);
p.tst_ok = correct(chrom, *tst_set);
p.mse_error = mse::error(chrom, *val_set);
return p;
}
//-----------------------------------------------------------------------------
#endif // gprop_h
// Local Variables:
// mode:C++
// End:

140
tutorial/eo/app/gprop/l2.h Executable file
View file

@ -0,0 +1,140 @@
//-----------------------------------------------------------------------------
// l2.h
//-----------------------------------------------------------------------------
#ifndef l2_h
#define l2_h
//-----------------------------------------------------------------------------
#include <math.h> // log
#include "qp.h" // neuron layer net set
//-----------------------------------------------------------------------------
namespace l2
{
//---------------------------------------------------------------------------
// useful typedefs
//---------------------------------------------------------------------------
using qp::real;
using qp::vector;
using qp::max_real;
using qp::min_real;
using qp::set;
using qp::neuron;
using qp::layer;
//---------------------------------------------------------------------------
// error
//---------------------------------------------------------------------------
real error(const mlp::net& net, const set& ts)
{
real error_ = 0.0;
for (set::const_iterator s = ts.begin(); s != ts.end(); ++s)
{
vector out = net(s->input);
for (unsigned i = 0; i < out.size(); ++i)
{
real target = s->output[i];
real value = out[i];
error_ -= target * log(value + min_real) +
(1.0 - target) * log(1.0 - value + min_real);
}
}
return error_;
}
//-------------------------------------------------------------------------
// l2
//-------------------------------------------------------------------------
class net: public qp::net
{
public:
net(mlp::net& n): qp::net(n) {}
real error(const set& ts)
{
real error_ = 0;
for (set::const_iterator s = ts.begin(); s != ts.end(); ++s)
{
forward(s->input);
error_ -= backward(s->input, s->output);
}
return error_;
}
private:
real backward(const vector& input, const vector& output)
{
reverse_iterator current_layer = rbegin();
reverse_iterator backward_layer = current_layer + 1;
real error_ = 0;
// output layer
for (unsigned j = 0; j < current_layer->size(); ++j)
{
neuron& n = (*current_layer)[j];
real out = output[j];
n.ndelta += n.delta = (out - n.out) /
(n.out * (1.0 - n.out) + min_real) * n.out * (1.0 - n.out);
if (size() == 1) // monolayer
n.dxo += n.delta * input;
else // multilayer
for (unsigned k = 0; k < n.dxo.size(); ++k)
n.dxo[k] += n.delta * (*backward_layer)[k].out;
error_ += out * log(n.out + min_real) +
(1.0 - out) * log(1.0 - n.out + min_real);
}
// hidden layers
while (++current_layer != rend())
{
reverse_iterator forward_layer = current_layer - 1;
reverse_iterator backward_layer = current_layer + 1;
for (unsigned j = 0; j < current_layer->size(); ++j)
{
neuron& n = (*current_layer)[j];
real sum = 0;
for (unsigned k = 0; k < forward_layer->size(); ++k)
{
neuron& nf = (*forward_layer)[k];
sum += nf.delta * (nf.n->weight[j] + nf.dweight1[j]);
}
n.delta = n.out * (1.0 - n.out) * sum;
n.ndelta += n.delta;
if (backward_layer == rend()) // first hidden layer
n.dxo += n.delta * input;
else // rest of hidden layers
for (unsigned k = 0; k < n.dxo.size(); ++k)
n.dxo[k] += n.delta * (*backward_layer)[k].out;
}
}
return error_;
}
};
//---------------------------------------------------------------------------
} // namespace l2
//-----------------------------------------------------------------------------
#endif // l2_h
// Local Variables:
// mode:C++
// End:

514
tutorial/eo/app/gprop/mlp.h Executable file
View file

@ -0,0 +1,514 @@
//-----------------------------------------------------------------------------
// mlp.h
//-----------------------------------------------------------------------------
#ifndef mlp_h
#define mlp_h
#include <algorithm> // generate
#include <cmath> // exp
#include <iostream>
#include <iterator>
#include <numeric>
#include <stdexcept> // invalid_argument
#include <utility>
#include <vector>
#include <paradiseo/eo/utils/eoRNG.h> // eoRng
#include <paradiseo/eo/utils/rnd_generators.h> // normal_generator
#include "vecop.h" // *
#include <assert.h>
#include <limits>
#ifdef HAVE_LIBYAML_CPP
#include <yaml-cpp/serializable.h>
#endif // HAVE_LIBYAML_CPP
namespace mlp
{
using namespace std;
typedef double real;
typedef std::vector<real> vector;
}
namespace std {
ostream& operator<<(ostream& os, const mlp::vector& v)
{
ostream_iterator<mlp::real> oi(os, " ");
copy(v.begin(), v.end(), oi);
return os;
}
istream& operator>>(istream& is, mlp::vector& v)
{
for (mlp::vector::iterator vi = v.begin() ; vi != v.end() ; vi++) {
is >> *vi;
}
return is;
}
}
namespace mlp
{
using namespace std;
//---------------------------------------------------------------------------
// useful typedefs
//---------------------------------------------------------------------------
const real max_real = std::numeric_limits<real>::max();
const real min_real = std::numeric_limits<real>::min();
//---------------------------------------------------------------------------
// sigmoid
//---------------------------------------------------------------------------
real sigmoid(const real& x)
{
return 1.0 / (1.0 + exp(-x));
}
//---------------------------------------------------------------------------
// neuron
//---------------------------------------------------------------------------
struct neuron
{
real bias;
vector weight;
neuron(const unsigned& num_inputs = 0): weight(num_inputs) {}
void reset()
{
normal_generator<real> rnd(1.0);
bias = rnd();
generate(weight.begin(), weight.end(), rnd);
}
real operator()(const vector& input) const
{
return sigmoid(bias + weight * input);
}
unsigned length() const { return weight.size() + 1; }
void normalize()
{
real n = sqrt(bias * bias + weight * weight);
bias /= n;
weight /= n;
}
void desaturate()
{
bias = -5.0 + 10.0 / (1.0 + exp(bias / -5.0));
for (vector::iterator w = weight.begin(); w != weight.end(); ++w)
*w = -5.0 + 10.0 / (1.0 + exp(*w / -5.0));
}
void perturb_num(double &num, double magnitude) {
double scale = max(num, 0.05) * magnitude;
double perturbation = scale * (rng.uniform() - 0.5);
num += perturbation;
}
void perturb(double magnitude = 0.3, double probability = 1.0)
{
for (vector::iterator w = weight.begin(); w != weight.end(); ++w)
if ( probability >= 1.0 || rng.uniform() < probability)
perturb_num(*w, magnitude);
if ( probability >= 1.0 || rng.uniform() < probability)
perturb_num(bias, magnitude);
}
#ifdef HAVE_LIBYAML_CPP
YAML_SERIALIZABLE_AUTO(neuron)
void emit_yaml(YAML::Emitter&out) const {
out << YAML::BeginMap;
out << YAML::Key << "Class" << YAML::Value << "mlp::neuron";
YAML_EMIT_MEMBER(out,bias);
YAML_EMIT_MEMBER(out,weight);
out << YAML::EndMap;
}
void load_yaml(const YAML::Node& node) {
YAML_LOAD_MEMBER(node, bias);
YAML_LOAD_MEMBER(node, weight);
}
#endif
};
}
namespace std {
ostream& operator<<(ostream& os, const mlp::neuron& n)
{
return os << n.bias << " " << n.weight;
}
istream& operator>>(istream& is, mlp::neuron& n)
{
return is >> n.bias >> n.weight;
}
}
namespace mlp {
//---------------------------------------------------------------------------
// layer
//---------------------------------------------------------------------------
class layer: public std::vector<neuron>
{
public:
layer(const unsigned& num_inputs = 0, const unsigned& num_neurons = 0):
std::vector<neuron>(num_neurons, neuron(num_inputs)) {}
void reset()
{
normal_generator<real> rnd(1.0);
for(iterator n = begin(); n != end(); ++n)
n->reset();
}
vector operator()(const vector& input) const
{
vector output(size());
for(unsigned i = 0; i < output.size(); ++i)
output[i] = (*this)[i](input);
return output;
}
unsigned length() const { return front().length() * size(); }
void normalize()
{
for(iterator n = begin(); n != end(); ++n)
n->normalize();
}
void desaturate()
{
for(iterator n = begin(); n != end(); ++n)
n->desaturate();
}
void perturb(double magnitude = 0.3, double probability = 1.0)
{
for(iterator n = begin(); n != end(); ++n)
n->perturb();
}
#ifdef HAVE_LIBYAML_CPP
friend ostream& operator<<(YAML::Emitter& e, const layer &l) {
e << ((std::vector<neuron>)l);
}
friend void operator>>(const YAML::Node& n, layer &l) {
// These temporary variable shenanegins are necessary because
// the compiler gets very confused about which template operator>>
// function to use.
// The following does not work: n >> l;
// So we use a temporary variable thusly:
std::vector<mlp::neuron> *obviously_a_vector = &l;
n >> *obviously_a_vector;
}
#endif
};
}
namespace std {
ostream& operator<<(ostream& os, const mlp::layer& l)
{
ostream_iterator<mlp::neuron> oi(os, " ");
copy(l.begin(), l.end(), oi);
return os;
}
istream& operator>>(istream& is, mlp::layer& l)
{
for (mlp::layer::iterator li = l.begin() ; li != l.end() ; li++) {
is >> *li;
}
return is;
}
}
namespace mlp {
//---------------------------------------------------------------------------
// net
//---------------------------------------------------------------------------
class net: public std::vector<layer>
#ifdef HAVE_LIBYAML_CPP
, public YAML::Serializable
#endif
{
public:
net(const unsigned& num_inputs = 0,
const unsigned& num_outputs = 0,
const std::vector<unsigned>& hidden = std::vector<unsigned>())
{
init(num_inputs,num_outputs,hidden);
}
net(istream &is) {
load(is);
}
#ifdef HAVE_LIBYAML_CPP
YAML_SERIALIZABLE_AUTO(net)
void emit_members(YAML::Emitter&out) const {
const std::vector<layer>* me_as_layer_vector = this;
out << YAML::Key << "layers" << YAML::Value << *me_as_layer_vector;
}
void load_members(const YAML::Node& node) {
std::vector<layer>* me_as_layer_vector = this;
node["layers"] >> *me_as_layer_vector;
}
#endif // HAVE_LIBYAML_CPP
/** Virtual destructor */
virtual ~net() {};
void load(istream &is) {
unsigned num_inputs;
unsigned num_outputs;
unsigned num_hidden_layers;
is >> num_inputs >> num_outputs >> num_hidden_layers;
std::vector<unsigned> layer_sizes;
for (unsigned i=0; i<num_hidden_layers;i++) {
unsigned layer_size;
is >> layer_size;
layer_sizes.push_back(layer_size);
}
unsigned check_outputs;
is >> check_outputs;
assert (check_outputs == num_outputs);
init (num_inputs,num_outputs,layer_sizes);
// skip forward to pass up opening '<' char
char c=' ';
while (c!='<' && !is.eof()) { is >> c;}
for (iterator l =begin() ; l != end(); l++) {
is >> *l;
}
do { is >> c; } while (c == ' ' && !is.eof());
assert(c == '>');
}
void init( unsigned num_inputs,
unsigned num_outputs,
const std::vector<unsigned>& hidden ) {
clear();
switch(hidden.size())
{
case 0:
push_back(layer(num_inputs, num_outputs));
break;
default:
push_back(layer(num_inputs, hidden.front()));
for (unsigned i = 0; i < hidden.size() - 1; ++i)
push_back(layer(hidden[i], hidden[i + 1]));
push_back(layer(hidden.back(), num_outputs));
break;
}
}
void reset()
{
normal_generator<real> rnd(1.0);
for(iterator l = begin(); l != end(); ++l)
l->reset();
}
virtual vector operator()(const vector& input) const ;
unsigned winner(const vector& input) const
{
vector tmp = (*this)(input);
return (max_element(tmp.begin(), tmp.end()) - tmp.begin());
}
void save(ostream &os) const {
// Save the number of inputs, number of outputs, and number of hidden layers
os << num_inputs() << "\n" << num_outputs() << "\n" << num_hidden_layers() << "\n";
for(const_iterator l = begin(); l != end(); ++l)
os << l->size() << " ";
os << "\n";
os << "< ";
for(const_iterator l = begin(); l != end(); ++l)
os << *l << " ";
os << ">\n";
}
unsigned num_inputs() const { return front().front().length() - 1; }
unsigned num_outputs() const { return back().size(); }
unsigned num_hidden_layers() const {
signed s = (signed) size() -1;
return (s<0) ? 0 : s ;
}
unsigned length()
{
unsigned sum = 0;
for(iterator l = begin(); l != end(); ++l)
sum += l->length();
return sum;
}
void normalize()
{
for(iterator l = begin(); l != end(); ++l)
l->normalize();
}
void desaturate()
{
for(iterator l = begin(); l != end(); ++l)
l->desaturate();
}
void perturb(double magnitude = 0.3, double probability = 1.0)
{
for(iterator l = begin(); l != end(); ++l)
l->perturb();
}
};
#ifndef NO_MLP_VIRTUALS
vector net::operator()(const vector& input) const
{
vector tmp = input;
for(const_iterator l = begin(); l != end(); ++l)
tmp = (*l)(tmp);
return tmp;
}
#endif
//---------------------------------------------------------------------------
// sample
//---------------------------------------------------------------------------
struct sample
{
vector input, output;
sample(unsigned input_size = 0, unsigned output_size = 0):
input(input_size), output(output_size) {}
};
istream& operator>>(istream& is, sample& s)
{
return is >> s.input >> s.output;
}
ostream& operator<<(ostream& os, const sample& s)
{
return os << s.input << " " << s.output;
}
//---------------------------------------------------------------------------
// set
//---------------------------------------------------------------------------
class set: public std::vector<sample>
{
public:
set(unsigned input_size = 0, unsigned output_size = 0,
unsigned num_samples = 0):
std::vector<sample>(num_samples, sample(input_size, output_size)) {}
set(istream& is) : std::vector<sample>(0, sample(0, 0)) {
clear();
load(is);
}
void load(istream &is) {
unsigned input_size, output_size;
is >> input_size >> output_size;
sample samp(input_size, output_size);;
while (is >> samp) { push_back(samp); }
}
void save(ostream &os) const {
os << front().input.size() << " " << front().output.size() << endl;
copy(begin(), end(), ostream_iterator<sample>(os,"\n"));
}
};
ostream& operator<<(ostream& os, const set& s)
{
os << "<" << endl;
for (unsigned i = 0; i < s.size(); ++i)
os << s[i] << endl;
return os << ">";
}
//---------------------------------------------------------------------------
// euclidean_distance
//---------------------------------------------------------------------------
real euclidean_distance(const net& n1, const net& n2)
{
real sum = 0;
for(net::const_reverse_iterator l1 = n1.rbegin(), l2 = n2.rbegin();
l1 != n1.rend() && l2 != n2.rend(); ++l1, ++l2)
for(layer::const_iterator n1 = l1->begin(), n2 = l2->begin();
n1 != l1->end() && n2 != l2->end(); ++n1, ++n2)
{
real b = n1->bias - n2->bias;
vector w = n1->weight - n2->weight;
sum += b * b + w * w;
}
/*
#include <fstream>
std::ofstream file("dist.stat", ios::app);
file << sqrt(sum) << endl;
*/
return sqrt(sum);
}
//---------------------------------------------------------------------------
} // namespace mlp
#endif // mlp_h
// Local Variables:
// mode:C++
// c-file-style: "Stroustrup"
// End:

140
tutorial/eo/app/gprop/mse.h Executable file
View file

@ -0,0 +1,140 @@
//-----------------------------------------------------------------------------
// mse.h
//-----------------------------------------------------------------------------
#ifndef mse_h
#define mse_h
//-----------------------------------------------------------------------------
#include "qp.h" // neuron layer net set
//-----------------------------------------------------------------------------
namespace mse
{
//---------------------------------------------------------------------------
// useful typedefs
//---------------------------------------------------------------------------
using qp::real;
using qp::vector;
using qp::max_real;
using qp::min_real;
using qp::set;
using qp::neuron;
using qp::layer;
//---------------------------------------------------------------------------
// error
//---------------------------------------------------------------------------
real error(const mlp::net& net, const set& ts)
{
real error_ = 0.0;
for (set::const_iterator s = ts.begin(); s != ts.end(); ++s)
{
vector out = net(s->input);
for (unsigned i = 0; i < out.size(); ++i)
{
real diff = s->output[i] - out[i];
error_ += diff * diff;
}
}
return error_ / ts.size();
}
//-------------------------------------------------------------------------
// mse
//-------------------------------------------------------------------------
class net: public qp::net
{
public:
net(mlp::net& n): qp::net(n) {}
real error(const set& ts)
{
real error_ = 0;
for (set::const_iterator s = ts.begin(); s != ts.end(); ++s)
{
forward(s->input);
error_ += backward(s->input, s->output);
}
error_ /= ts.size();
return error_;
}
private:
real backward(const vector& input, const vector& output)
{
reverse_iterator current_layer = rbegin();
reverse_iterator backward_layer = current_layer + 1;
real error_ = 0;
// output layer
for (unsigned j = 0; j < current_layer->size(); ++j)
{
neuron& n = (*current_layer)[j];
real diff = output[j] - n.out;
n.ndelta += n.delta = diff * n.out * (1.0 - n.out);
if (size() == 1) // monolayer
n.dxo += n.delta * input;
else // multilayer
for (unsigned k = 0; k < n.dxo.size(); ++k)
n.dxo[k] += n.delta * (*backward_layer)[k].out;
error_ += diff * diff;
}
// hidden layers
while (++current_layer != rend())
{
reverse_iterator forward_layer = current_layer - 1;
reverse_iterator backward_layer = current_layer + 1;
for (unsigned j = 0; j < current_layer->size(); ++j)
{
neuron& n = (*current_layer)[j];
real sum = 0;
for (unsigned k = 0; k < forward_layer->size(); ++k)
{
neuron& nf = (*forward_layer)[k];
sum += nf.delta * (nf.n->weight[j] + nf.dweight1[j]);
}
n.delta = n.out * (1.0 - n.out) * sum;
n.ndelta += n.delta;
if (backward_layer == rend()) // first hidden layer
n.dxo += n.delta * input;
else // rest of hidden layers
for (unsigned k = 0; k < n.dxo.size(); ++k)
n.dxo[k] += n.delta * (*backward_layer)[k].out;
}
}
return error_;
}
};
//---------------------------------------------------------------------------
} // namespace mse
//-----------------------------------------------------------------------------
#endif // mse_h
// Local Variables:
// mode:C++
// End:

251
tutorial/eo/app/gprop/qp.h Executable file
View file

@ -0,0 +1,251 @@
//-----------------------------------------------------------------------------
// qp.h
//-----------------------------------------------------------------------------
#ifndef qp_h
#define qp_h
//-----------------------------------------------------------------------------
#include <iostream> // istream ostream
#include <algorithm> // fill
#include <vector> // vector
#include <paradiseo/eo/utils/rnd_generators.h> // uniform_generator
#include "mlp.h" // neuron layer net
//-----------------------------------------------------------------------------
namespace qp
{
//---------------------------------------------------------------------------
// useful typedefs
//---------------------------------------------------------------------------
using mlp::real;
using mlp::vector;
using mlp::max_real;
using mlp::min_real;
using mlp::set;
//---------------------------------------------------------------------------
// useful constants
//---------------------------------------------------------------------------
const real eta_default = 0.5;
const real eta_floor = 0.0001;
const real alpha_default = 0.9;
const real lambda_default = 0.5;
const real lambda0 = 0.1;
const real backtrack_step = 0.5;
const real me_floor = 0.0001;
const real mw_floor = 0.0001;
//---------------------------------------------------------------------------
// neuron
//---------------------------------------------------------------------------
struct neuron
{
mlp::neuron* n;
real out, delta, ndelta, dbias1, dbias2;
vector dweight1, dweight2, dxo;
neuron(mlp::neuron& _n):
n(&_n), out(0), delta(0), ndelta(0), dbias1(0), dbias2(0),
dweight1(n->weight.size(), 0),
dweight2(n->weight.size(), 0),
dxo(n->weight.size(), 0) {}
void reset()
{
// underlaying neuron
n->reset();
// addons
out = delta = ndelta = dbias1 = dbias2 = 0;
fill(dweight1.begin(), dweight1.end(), 0);
fill(dweight2.begin(), dweight2.end(), 0);
fill(dxo.begin(), dxo.end(), 0);
}
real operator()(const vector& input)
{
return out = mlp::sigmoid(n->bias + dbias1 +
(n->weight + dweight1) * input);
}
};
std::ostream& operator<<(std::ostream& os, const neuron& n)
{
return os << *n.n << " " << n.out << " " << n.delta << " "
<< n.ndelta << " " << n.dbias1 << " " << n.dbias2 << " "
<< n.dweight1 << " " << n.dweight2 << " " << n.dxo;
}
//---------------------------------------------------------------------------
// layer
//---------------------------------------------------------------------------
class layer: public std::vector<neuron>
{
public:
layer(mlp::layer& l)//: std::vector<neuron>(l.begin(), l.end()) {}
{
for (mlp::layer::iterator n = l.begin(); n != l.end(); ++n)
push_back(neuron(*n));
}
void reset()
{
for(iterator n = begin(); n != end(); ++n)
n->reset();
}
vector operator()(const vector& input)
{
vector output(size());
for(unsigned i = 0; i < output.size(); ++i)
output[i] = (*this)[i](input);
return output;
}
};
//---------------------------------------------------------------------------
// net
//---------------------------------------------------------------------------
class net: public std::vector<layer>
{
public:
net(mlp::net& n) //: std::vector<layer>(n.begin(), n.end()) { reset(); }
{
for (mlp::net::iterator l = n.begin(); l != n.end(); ++l)
push_back(*l);
}
virtual ~net() {}
void reset()
{
for(iterator l = begin(); l != end(); ++l)
l->reset();
}
real train(const set& ts,
unsigned epochs,
real target_error,
real tolerance,
real eta = eta_default,
real momentum = alpha_default,
real lambda = lambda_default)
{
real error_ = max_real;
while (epochs-- && error_ > target_error)
{
real last_error = error_;
init_delta();
error_ = error(ts);
if (error_ < last_error + tolerance)
{
coeff_adapt(eta, momentum, lambda);
weight_update(ts.size(), true, eta, momentum);
}
else
{
eta *= backtrack_step;
eta = std::max(eta, eta_floor);
momentum = eta * lambda;
weight_update(ts.size(), false, eta, momentum);
error_ = last_error;
}
}
return error_;
}
virtual real error(const set& ts) = 0;
// protected:
void forward(vector input)
{
for (iterator l = begin(); l != end(); ++l)
{
vector tmp = (*l)(input);
input.swap(tmp);
}
}
// private:
void init_delta()
{
for (iterator l = begin(); l != end(); ++l)
for (layer::iterator n = l->begin(); n != l->end(); ++n)
fill(n->dxo.begin(), n->dxo.end(), n->ndelta = 0.0);
}
void coeff_adapt(real& eta, real& momentum, real& lambda)
{
real me = 0, mw = 0, ew = 0;
for (iterator l = begin(); l != end(); ++l)
for (layer::iterator n = l->begin(); n != l->end(); ++n)
{
me += n->dxo * n->dxo;
mw += n->dweight1 * n->dweight1;
ew += n->dxo * n->dweight1;
}
me = std::max(static_cast<real>(sqrt(me)), me_floor);
mw = std::max(static_cast<real>(sqrt(mw)), mw_floor);
eta *= (1.0 + 0.5 * ew / ( me * mw));
eta = std::max(eta, eta_floor);
lambda = lambda0 * me / mw;
momentum = eta * lambda;
#ifdef DEBUG
std::cout << me << " \t" << mw << " \t" << ew << " \t"
<< eta << " \t" << momentum << " \t" << lambda << std::endl;
#endif // DEBUG
}
void weight_update(unsigned size, bool fire, real eta, real momentum)
{
for (iterator l = begin(); l != end(); ++l)
for (layer::iterator n = l->begin(); n != l->end(); ++n)
{
n->ndelta /= size;
n->dxo /= size;
if (fire)
{
n->n->weight += n->dweight1;
n->dweight2 = n->dweight1;
n->n->bias += n->dbias1;
n->dbias2 = n->dbias1;
}
n->dweight1 = eta * n->dxo + momentum * n->dweight2;
n->dbias1 = eta * n->ndelta + momentum * n->dbias2;
}
}
};
//---------------------------------------------------------------------------
} // namespace qp
//-----------------------------------------------------------------------------
#endif // qp_h
// Local Variables:
// mode:C++
// End:

213
tutorial/eo/app/gprop/vecop.h Executable file
View file

@ -0,0 +1,213 @@
//-----------------------------------------------------------------------------
// vecop.h
//-----------------------------------------------------------------------------
#ifndef VECOP_H
#define VECOP_H
//-----------------------------------------------------------------------------
#include <iostream> // ostream istream
#include <vector> // vector
#include <functional> // plus minus multiplies divides
#include <numeric> // inner_product
//-----------------------------------------------------------------------------
// std::vector + std::vector
//-----------------------------------------------------------------------------
template<class T> std::vector<T> operator+(const std::vector<T>& v1, const std::vector<T>& v2)
{
std::vector<T> tmp = v1;
std::transform(tmp.begin(), tmp.end(), v2.begin(), tmp.begin(), std::plus<T>());
return tmp;
}
template<class T> std::vector<T> operator-(const std::vector<T>& v1, const std::vector<T>& v2)
{
std::vector<T> tmp = v1;
std::transform(tmp.begin(), tmp.end(), v2.begin(), tmp.begin(), std::minus<T>());
return tmp;
}
template<class T> T operator*(const std::vector<T>& v1, const std::vector<T>& v2)
{
return inner_product(v1.begin(), v1.end(), v2.begin(), static_cast<T>(0));
}
template<class T> T operator/(const std::vector<T>& v1, const std::vector<T>& v2)
{
return inner_product(v1.begin(), v1.end(), v2.begin(), static_cast<T>(0),
std::plus<T>(), std::divides<T>());
}
//-----------------------------------------------------------------------------
// std::vector += std::vector
//-----------------------------------------------------------------------------
template<class T> std::vector<T>& operator+=(std::vector<T>& v1, const std::vector<T>& v2)
{
std::transform(v1.begin(), v1.end(), v2.begin(), v1.begin(), std::plus<T>());
return v1;
}
template<class T> std::vector<T>& operator-=(std::vector<T>& v1, const std::vector<T>& v2)
{
std::transform(v1.begin(), v1.end(), v2.begin(), v1.begin(), std::minus<T>());
return v1;
}
//-----------------------------------------------------------------------------
// std::vector + number
//-----------------------------------------------------------------------------
template<class A, class B> std::vector<A> operator+(const std::vector<A>& a, const B& b)
{
std::vector<A> tmp = a;
std::transform(tmp.begin(), tmp.end(), tmp.begin(), std::bind2nd(std::plus<A>(), b));
return tmp;
}
template<class A, class B> std::vector<A> operator-(const std::vector<A>& a, const B& b)
{
std::vector<A> tmp = a;
std::transform(tmp.begin(), tmp.end(), tmp.begin(), std::bind2nd(std::minus<A>(), b));
return tmp;
}
template<class A, class B> std::vector<A> operator*(const std::vector<A>& a, const B& b)
{
std::vector<A> tmp = a;
std::transform(tmp.begin(), tmp.end(), tmp.begin(), std::bind2nd(std::multiplies<A>(), b));
return tmp;
}
template<class A, class B> std::vector<A> operator/(const std::vector<A>& a, const B& b)
{
std::vector<A> tmp = a;
std::transform(tmp.begin(), tmp.end(), tmp.begin(), std::bind2nd(std::divides<A>(), b));
return tmp;
}
//-----------------------------------------------------------------------------
// number + std::vector
//-----------------------------------------------------------------------------
template<class A, class B> std::vector<A> operator+(const B& b, const std::vector<A>& a)
{
std::vector<A> tmp = a;
std::transform(tmp.begin(), tmp.end(), tmp.begin(), std::bind2nd(std::plus<A>(), b));
return tmp;
}
template<class A, class B> std::vector<A> operator-(const B& b, const std::vector<A>& a)
{
std::vector<A> tmp(a.size(), b);
std::transform(tmp.begin(), tmp.end(), a.begin(), tmp.begin(), std::minus<A>());
return tmp;
}
template<class A, class B> std::vector<A> operator*(const B& b, const std::vector<A>& a)
{
std::vector<A> tmp = a;
std::transform(tmp.begin(), tmp.end(), tmp.begin(), bind2nd(std::multiplies<A>(), b));
return tmp;
}
template<class A, class B> std::vector<A> operator/(const B& b, const std::vector<A>& a)
{
std::vector<A> tmp(a.size(), b);
std::transform(tmp.begin(), tmp.end(), a.begin(), tmp.begin(), std::divides<A>());
return tmp;
}
//-----------------------------------------------------------------------------
// std::vector += number
//-----------------------------------------------------------------------------
template<class A, class B> std::vector<A>& operator+=(std::vector<A>& a, const B& b)
{
std::transform(a.begin(), a.end(), a.begin(), std::bind2nd(std::plus<A>(), b));
return a;
}
template<class A, class B> std::vector<A>& operator-=(std::vector<A>& a, const B& b)
{
std::transform(a.begin(), a.end(), a.begin(), std::bind2nd(std::minus<A>(), b));
return a;
}
template<class A, class B> std::vector<A>& operator*=(std::vector<A>& a, const B& b)
{
std::transform(a.begin(), a.end(), a.begin(), std::bind2nd(std::multiplies<A>(), b));
return a;
}
template<class A, class B> std::vector<A>& operator/=(std::vector<A>& a, const B& b)
{
std::transform(a.begin(), a.end(), a.begin(), std::bind2nd(std::divides<A>(), b));
return a;
}
//-----------------------------------------------------------------------------
// I/O
//-----------------------------------------------------------------------------
template<class T> std::ostream& operator<<(std::ostream& os, const std::vector<T>& v)
{
os << '<';
if (v.size())
{
std::copy(v.begin(), v.end() - 1, std::ostream_iterator<T>(os, " "));
os << v.back();
}
return os << '>';
}
template<class T> std::istream& operator>>(std::istream& is, std::vector<T>& v)
{
v.clear();
char c;
is >> c;
if (!is || c != '<')
is.setstate(std::ios::failbit);
else
{
T t;
do {
is >> c;
if (is && c!= '>')
{
is.putback(c);
is >> t;
if (is)
v.push_back(t);
}
} while (is && c != '>');
}
return is;
}
//-----------------------------------------------------------------------------
// euclidean_distance
//-----------------------------------------------------------------------------
template<class T> T euclidean_distance(const std::vector<T>& v1,
const std::vector<T>& v2)
{
T sum = 0, tmp;
for (unsigned i = 0; i < v1.size(); ++i)
{
tmp = v1[i] - v2[i];
sum += tmp * tmp;
}
return sqrt(sum);
}
//-----------------------------------------------------------------------------
#endif

View file

@ -0,0 +1,38 @@
######################################################################################
### 1) Include the sources
######################################################################################
#include_directories(${EO_SRC_DIR}/src)
#include_directories(${CMAKE_CURRENT_SOURCE_DIR})
######################################################################################
### 2) specify where cmake can find the libraries (mandatory: before 3) )
######################################################################################
link_directories(${EO_BIN_DIR}/lib)
######################################################################################
### 3) define your target(s): just an executable here
######################################################################################
set (GPSYMREG_SOURCES main.cpp)
# no matter what is the OS, hopefully
add_executable(gpsymreg ${GPSYMREG_SOURCES})
add_dependencies(gpsymreg eo eoutils)
######################################################################################
### 4) optionnal: define your target(s)'s version: no effect for windows
######################################################################################
set(GPSYMREG_VERSION ${GLOBAL_VERSION})
set_target_properties(gpsymreg PROPERTIES VERSION "${GPSYMREG_VERSION}")
######################################################################################
### 5) link the librairies for your target(s)
######################################################################################
target_link_libraries(gpsymreg eo eoutils)
######################################################################################

View file

@ -0,0 +1,227 @@
/*
This library is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU General Public License
along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: todos@geneura.ugr.es, http://geneura.ugr.es
jeggermo@liacs.nl
*/
#ifndef _FITNESS_FUNCTION_H
#define _FITNESS_FUNCTION_H
#include <paradiseo/eo/gp/eoParseTree.h>
#include <paradiseo/eo.h>
#include <cmath>
#include "parameters.h"
#include "node.h"
using namespace gp_parse_tree;
using namespace std;
// the first fitness is the normal goal fitness
// the second fitness is the tree size (we prefer smaller trees)
// lets use names to define the different fitnesses
#define NORMAL 0 // Stepwise Adaptation of Weights Fitness
#define SMALLESTSIZE 1 // The size of the tree, we want to minimize this one -- statistics will tell us the smallest tree size
// Look: overloading the maximization without overhead (thing can be inlined)
class MinimizingFitnessTraits : public eoParetoFitnessTraits
{
public :
static bool maximizing(int which) { return false;} // we want to minimize both fitnesses
static unsigned nObjectives() { return 2;} // the number of fitnesses }
};
// Lets define our MultiObjective FitnessType
typedef eoParetoFitness<MinimizingFitnessTraits> FitnessType;
// John Koza's sextic polynomial (our example problem)
double sextic_polynomial(double x)
{
double result=0;
result = pow(x,6) - (2*pow(x,4)) + pow(x,2);
return result;
}
// we use the following functions for the basic math functions
double _plus(double arg1, double arg2)
{
return arg1 + arg2;
}
double _minus(double arg1, double arg2)
{
return arg1 - arg2;
}
double _multiplies(double arg1, double arg2)
{
return arg1 * arg2;
}
// the function for a protected divide looks a little bit different
double _divides(double arg1, double arg2)
{
if (arg2 ==0)
return 0;
else
return arg1 / arg2;
}
double _negate(double arg1)
{
return -arg1;
}
// now let's define our tree nodes
void init(vector<Node> &initSequence)
{
// we have only one variable (X)
Operation varX( (unsigned int) 0, string("X") );
// the main binary operators
Operation OpPLUS ( _plus, string("+"));
Operation OpMINUS( _minus,string("-"));
Operation OpMULTIPLIES(_multiplies,string("*"));
// We can use a protected divide function.
Operation OpDIVIDE( _divides, string("/") );
// Now the functions as binary functions
Operation PLUS( string("plus"), _plus);
Operation MINUS( string("minus"), _minus);
Operation MULTIPLIES( string("multiply"), _multiplies);
Operation DIVIDE( string("divide"), _divides);
// and some unary functions
Operation NEGATE( _negate,string("-"));
Operation SIN ( sin, string("sin"));
Operation COS ( cos, string("cos"));
// Now we are ready to add the possible nodes to our initSequence (which is used by the eoDepthInitializer)
// so lets start with our variable
initSequence.push_back(varX);
// followed by the constants 2, 4, 6
for(unsigned int i=2; i <= 6; i+=2)
{
char text[255];
sprintf(text, "%i", i);
Operation op(i*1.0, text);
initSequence.push_back( op );
// and we add the variable again (so we have get lots of variables);
initSequence.push_back( varX );
}
// next we add the unary functions
initSequence.push_back( NEGATE );
initSequence.push_back( SIN );
initSequence.push_back( COS );
// and the binary functions
initSequence.push_back( PLUS);
initSequence.push_back( MINUS );
initSequence.push_back( MULTIPLIES );
initSequence.push_back( DIVIDE );
// and the binary operators
initSequence.push_back( OpPLUS);
initSequence.push_back( OpMINUS );
initSequence.push_back( OpMULTIPLIES );
initSequence.push_back( OpDIVIDE );
}
class RegFitness: public eoEvalFunc< eoParseTree<FitnessType, Node> >
{
public:
typedef eoParseTree<FitnessType, Node> EoType;
void operator()(EoType &_eo)
{
vector< double > input(1); // the input variable(s)
double output(0.);
double target;
FitnessType fitness;
float x=0;
double fit=0;
for(x=-1; x <= 1; x+=0.1)
{
input[0] = x;
target = sextic_polynomial(x);
_eo.apply(output,input);
fit += pow(target - output, 2);
}
fitness[NORMAL] = fit;
fitness[SMALLESTSIZE] = _eo.size() / (1.0*parameter.MaxSize);
_eo.fitness(fitness);
if (fitness[NORMAL] < best[NORMAL])
{
best[NORMAL] = fitness[NORMAL];
tree="";
_eo.apply(tree);
}
}
RegFitness(eoValueParam<unsigned> &_generationCounter, vector< Node > &initSequence, Parameters &_parameter) : eoEvalFunc<EoType>(), generationCounter(_generationCounter), parameter(_parameter)
{
init(initSequence);
best[NORMAL] = 1000;
tree= "not found";
};
~RegFitness()
{
cerr << "Best Fitness= " << best[NORMAL] << endl;
cerr << tree << endl;
};
private:
eoValueParam<unsigned> &generationCounter; // so we know the current generation
Parameters &parameter; // the parameters
FitnessType best; // the best found fitness
string tree;
};
#endif

337
tutorial/eo/app/gpsymreg/main.cpp Executable file
View file

@ -0,0 +1,337 @@
/*
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU General Public License
along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: todos@geneura.ugr.es, http://geneura.ugr.es
jeggermo@liacs.nl
*/
#ifdef _MSC_VER
#pragma warning(disable:4786)
#endif
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <iostream>
#include <paradiseo/eo/gp/eoParseTree.h>
#include <paradiseo/eo.h>
using namespace gp_parse_tree;
using namespace std;
//-----------------------------------------------------------------------------
#include "node.h"
#include "parameters.h"
#include "fitness.h"
// TYPE DECLARATIONS FOR GP
typedef eoParseTree<FitnessType, Node > EoType;
typedef eoPop<EoType> Pop;
//-----------------------------------------------------------------------------
int main(int argc, char *argv[])
{
// the vector containing the possible nodes
vector<Node> initSequence;
// initialise parameters
Parameters parameter(argc, argv);
// set the randomseed
rng.reseed(parameter.randomseed);
// Create a generation counter
eoValueParam<unsigned> generationCounter(0, "Gen.");
// Create an incrementor (sub-class of eoUpdater). Note that the
// parameter's value is passed by reference,
// so every time the incrementer is updated (every generation),
// the data in generationCounter will change.
eoIncrementor<unsigned> increment(generationCounter.value());
// create an instantiation of the fitness/evaluation function
// it initializes the initSequence vector
// the parameters are passed on as well
RegFitness eval(generationCounter, initSequence, parameter);
// Depth Initializor, set for Ramped Half and Half Initialization
eoParseTreeDepthInit<FitnessType, Node> initializer(parameter.InitMaxDepth, initSequence, true, true);
// create the initial population
Pop pop(parameter.population_size, initializer);
// and evaluate the individuals
apply<EoType>(eval, pop);
generationCounter.value()++; // set the generationCounter to 1
// define X-OVER
eoSubtreeXOver<FitnessType, Node> xover(parameter.MaxSize);
// define MUTATION
eoBranchMutation<FitnessType, Node> mutation(initializer, parameter.MaxSize);
// eoExpansionMutation<FitnessType, Node> mutation(initializer, parameter.MaxSize);
// eoCollapseSubtreeMutation<FitnessType, Node> mutation(initializer, parameter.MaxSize);
// eoPointMutation<FitnessType, Node> mutation(initSequence);
// eoHoistMutation<FitnessType, Node> mutation;
// The operators are encapsulated into an eoTRansform object,
// that performs sequentially crossover and mutation
eoSGATransform<EoType> transform(xover, parameter.xover_rate, mutation, parameter.mutation_rate);
// The robust tournament selection
// in our case 5-tournament selection
eoDetTournamentSelect<EoType> selectOne(parameter.tournamentsize);
// is now encapsulated in a eoSelectMany
eoSelectMany<EoType> select(selectOne, parameter.offspring_size, eo_is_an_integer);
// and the generational replacement
//eoGenerationalReplacement<EoType> replace;
// or the SteadtState replacment
//eoSSGAWorseReplacement<EoType> replace;
// or comma selection
eoCommaReplacement<EoType> replace;
// Terminators
eoGenContinue<EoType> term(parameter.nGenerations);
eoCheckPoint<EoType> checkPoint(term);
// STATISTICS
eoAverageStat<EoType> avg;
eoBestFitnessStat<EoType> best;
// Add it to the checkpoint,
// so the counter is updated (here, incremented) every generation
checkPoint.add(increment);
checkPoint.add(avg);
checkPoint.add(best);
#ifdef HAVE_GNUPLOT
eoGnuplot1DMonitor gnuplotmonitor("gnuplotBestStats");
gnuplotmonitor.add(generationCounter);
gnuplotmonitor.add(best);
// we need to add a empty string variable if we want to seed the second fitness value
eoValueParam<string> dummy1("", "Smallest Tree Size");
gnuplotmonitor.add(dummy1);
eoGnuplot1DMonitor gnuplotAvgmonitor("gnuplotAvgStats");
gnuplotAvgmonitor.add(generationCounter);
gnuplotAvgmonitor.add(avg);
// we need to add a empty string variable if we want to seed the second fitness value
eoValueParam<string> dummy2("", "Average Tree Size");
gnuplotAvgmonitor.add(dummy2);
checkPoint.add(gnuplotmonitor);
checkPoint.add(gnuplotAvgmonitor);
#endif
// GP Generation
eoEasyEA<EoType> gp(checkPoint, eval, select, transform, replace);
cout << "Initialization done" << endl;
try
{
gp(pop);
}
catch (exception& e)
{
cout << "exception: " << e.what() << endl;;
exit(EXIT_FAILURE);
}
return 1;
}

248
tutorial/eo/app/gpsymreg/node.h Executable file
View file

@ -0,0 +1,248 @@
/*
This library is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU General Public License
along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: todos@geneura.ugr.es, http://geneura.ugr.es
jeggermo@liacs.nl
*/
#ifndef _NODE_H
#define _NODE_H
#include <iostream>
#include <string>
#include <cmath> // for finite(double) function
using namespace gp_parse_tree;
using namespace std;
/* A new Operation and Node class for even more flexibility.
Improvements over the t-eoSymreg code are:
* No hardcoded functions or operators. The Operation and Node class below
allow you to specify your own unary and binary functions as well as
binary operators (like +,-,*,/). Moreover you can detemine if you want
to allow primitve subroutines with either one or two arguments.
If a Node has a subroutine Operation it will take evaluate the first
(and possible second) child branch and use them as input variables for
the remaining second (or third) child branch.
*/
typedef enum {Variable, UFunction, BFunction, BOperator, Const} Type;
typedef double (*BinaryFunction)(const double,const double);
typedef double (*UnaryFunction)(const double);
struct Operation
{
public:
typedef unsigned int VariableID;
typedef string Label;
// if your compiler allows you to have nameless unions you can make this a
// union by removing the //'s below
//union
//{
UnaryFunction uFunction;
BinaryFunction bFunction;
VariableID id;
double constant;
//};
Label label;
Type type;
// the default constructor results in a constant with value 0
Operation() : constant(0), label("0"), type(Const){};
// two possible constructors for Unary Functions
Operation(UnaryFunction _uf, Label _label): uFunction(_uf), label(_label), type(UFunction) {};
Operation(Label _label, UnaryFunction _uf): uFunction(_uf), label(_label), type(UFunction) {};
// Watch out there are two constructors using pointers two binary functions:
// Binary Function (printed as label(subtree0,subtree1) (e.g. pow(x,y))
// Binary Operator (printed as (subtree0 label subtree1) (e.g. x^y)
// The difference is purely cosmetic.
// If you specify the label before the function pointer -> Binary Function
Operation(Label _label, BinaryFunction _bf): bFunction(_bf), label(_label), type(BFunction) {};
// If you specify the function pointer before the label -> Binary Operator
Operation(BinaryFunction _bf, Label _label): bFunction(_bf), label(_label), type(BOperator) {};
// A constructor for variables
Operation(VariableID _id, Label _label): id(_id), label(_label), type(Variable) {};
// A constructor for constants
Operation(double _constant, Label _label): constant(_constant), label(_label), type(Const) {};
Operation(const Operation &_op)
{
switch(_op.type)
{
case Variable: id = _op.id; break;
case UFunction: uFunction = _op.uFunction; break;
case BFunction: bFunction = _op.bFunction; break;
case BOperator: bFunction = _op.bFunction; break;
case Const: constant = _op.constant; break;
}
type = _op.type;
label = _op.label;
};
virtual ~Operation(){};
};
class Node
{
private:
Operation op;
public:
Node(void): op(Operation()){};
Node(Operation &_op) : op(_op){};
virtual ~Node(void) {}
int arity(void) const
{
switch(op.type)
{
case Variable: return 0;
case UFunction: return 1;
case BFunction: return 2;
case BOperator: return 2;
case Const: return 0;
}
return 0;
}
void randomize(void) {}
template<class Children>
void operator()(double& result, Children args, vector<double> &var) const
{
double result0;
double result1;
switch(op.type)
{
case Variable: result = var[op.id%var.size()]; //%var.size() used in the case of Subroutines and as a security measure
break;
case UFunction: args[0].apply(result0, var);
result = op.uFunction(result0);
break;
case BFunction:
case BOperator: args[0].apply(result0, var);
args[1].apply(result1, var);
result = op.bFunction(result0,result1);
break;
case Const: result = op.constant;
break;
}
}
template<class Children>
void operator()(string& result, Children args) const
{
string subtree0;
string subtree1;
string subtree2;
switch(op.type)
{
case Variable:
case Const: result += op.label;
break;
case UFunction: result += op.label;
result += "(";
args[0].apply(subtree0);
result += subtree0;
result += ")";
break;
case BFunction: result += op.label;
result += "(";
args[0].apply(subtree0);
result += subtree0;
result += ",";
args[1].apply(subtree1);
result += subtree1;
result += ")";
break;
case BOperator: result += "(";
args[0].apply(subtree0);
result += subtree0;
result += op.label;
args[1].apply(subtree1);
result += subtree1;
result += ")";
break;
default: result += "ERROR in Node::operator(string,...) \n"; break;
}
}
Operation getOp(void) const {return op;}
};
//-----------------------------------------------------------
// saving, loading LETS LEAVE IT OUT FOR NOW
std::ostream& operator<<(std::ostream& os, const Node& eot)
{
Operation op(eot.getOp());
os << (eot.getOp()).label;
return os;
}
// we can't load because we are using function pointers. Instead we prevent a compiler warning by calling the arity() function.
std::istream& operator>>(std::istream& is, Node& eot)
{
eot.arity();
return is;
}
#endif

View file

@ -0,0 +1,112 @@
/*
This library is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU General Public License
along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: todos@geneura.ugr.es, http://geneura.ugr.es
jeggermo@liacs.nl
*/
#ifndef _PARAMETERS_FUNCTION_H
#define _PARAMETERS_FUNCTION_H
#include <paradiseo/eo/gp/eoParseTree.h>
#include <paradiseo/eo.h>
using namespace gp_parse_tree;
using namespace std;
struct Parameters{
unsigned int nGenerations; // -G
unsigned population_size; // -P
unsigned offspring_size; // -O
unsigned int MaxSize; // -S
unsigned int InitMaxDepth; // -D
unsigned int randomseed; // -R
double xover_rate; // -x
double mutation_rate; // -y
unsigned int tournamentsize; // -t
Parameters(int argc, char **argv)
{
eoParser parser(argc,argv);
// generations
eoValueParam<unsigned int> paramGenerations(1, "generations", "Generations", 'G', false);
parser.processParam( paramGenerations );
nGenerations = paramGenerations.value();
cerr << "nGenerations= " << nGenerations << endl;
// populationsize
eoValueParam<unsigned int> paramPopulationSize(10, "populationsize", "PopulationSize", 'P', false);
parser.processParam( paramPopulationSize );
population_size = paramPopulationSize.value();
cerr << "population_size= " << population_size << endl;
// offspringsize
eoValueParam<unsigned int> paramOffspringSize(population_size, "offspringsize", "OffspringSize", 'O', false);
parser.processParam( paramOffspringSize );
offspring_size = paramOffspringSize.value();
cerr << "offspring_size= " << offspring_size << endl;
// maxsize
eoValueParam<unsigned int> paramMaxSize(15, "maxsize", "MaxSize", 'S', false);
parser.processParam( paramMaxSize );
MaxSize = paramMaxSize.value();
cerr << "MaxSize= " << MaxSize << endl;
// initialmaxdepth
eoValueParam<unsigned int> paramInitialMaxDepth(4, "initialmaxdepth", "InitialMaxDepth", 'D', false);
parser.processParam( paramInitialMaxDepth );
InitMaxDepth = paramInitialMaxDepth.value();
cerr << "InitMaxDepth= " << InitMaxDepth << endl;
// randomseed
eoValueParam<unsigned int> paramRandomSeed(1, "randomseed", "Random Seed", 'R', false);
parser.processParam( paramRandomSeed );
randomseed = paramRandomSeed.value();
cerr << "randomseed= " << randomseed << endl;
// crossover-rate
eoValueParam<double> paramXover(0.75, "crossoverrate", "crossover rate", 'x', false);
parser.processParam(paramXover );
xover_rate = paramXover.value();
cerr << "xover_rate= " << xover_rate << endl;
//mutation-rate
eoValueParam<double> paramMutation(0.25, "mutationrate", "mutation rate", 'm', false);
parser.processParam(paramMutation );
mutation_rate = paramMutation.value();
cerr << "mutation_rate= " << mutation_rate << endl;
//tournament size
eoValueParam<unsigned int > paramTournamentSize(5, "tournamentsize", "tournament size", 't', false);
parser.processParam(paramTournamentSize );
tournamentsize = paramTournamentSize.value();
cerr << "Tournament Size= " << tournamentsize << endl;
if (parser.userNeedsHelp())
{
parser.printHelp(cout);
exit(1);
}
};
~Parameters(){};
};
#endif

View file

@ -0,0 +1,38 @@
######################################################################################
### 1) Include the sources
######################################################################################
#INCLUDE_DIRECTORIES(${EO_SRC_DIR}/src)
#INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR})
######################################################################################
### 2) Specify where CMake can find the libraries (mandatory: before 3) )
######################################################################################
LINK_DIRECTORIES(${EO_BIN_DIR}/${LIB})
######################################################################################
### 3) Define your target(s): just an executable here
######################################################################################
SET (MASTERMIND_SOURCES mastermind.cpp)
# no matter what is the OS, hopefully
ADD_EXECUTABLE(mastermind ${MASTERMIND_SOURCES})
ADD_DEPENDENCIES(mastermind eo eoutils)
######################################################################################
### 4) Optionnal: define your target(s)'s version: no effect for windows
######################################################################################
SET(MASTERMIND_VERSION ${GLOBAL_VERSION})
SET_TARGET_PROPERTIES(mastermind PROPERTIES VERSION "${MASTERMIND_VERSION}")
######################################################################################
### 5) Link the librairies for your target(s)
######################################################################################
TARGET_LINK_LIBRARIES(mastermind eo eoutils)
######################################################################################

View file

@ -0,0 +1,139 @@
//-----------------------------------------------------------------------------
// mastermind
//-----------------------------------------------------------------------------
#include <stdlib.h> // EXIT_SUCCESS EXIT_FAILURE
#include <stdexcept> // exception
#include <iostream> // cerr cout
#include <fstream> // ifstream
#include <string> // string
#include <paradiseo/eo.h> // all usefull eo stuff
#include "mastermind.h" // Chrom eoChromInit eoChromMutation eoChromXover eoChromEvaluator
using namespace std;
//-----------------------------------------------------------------------------
// global variables
//-----------------------------------------------------------------------------
unsigned in, out, hidden;
//-----------------------------------------------------------------------------
// parameters
//-----------------------------------------------------------------------------
eoValueParam<unsigned> pop_size(16, "pop_size", "population size", 'p');
eoValueParam<unsigned> generations(100, "generations", "number of generation", 'g');
eoValueParam<double> mut_rate(0.1, "mut_rate", "mutation rate", 'm');
eoValueParam<double> xover_rate(0.5, "xover_rate", "default crossover rate", 'x');
eoValueParam<unsigned> col_p(default_colors, "colors", "number of colors", 'c');
eoValueParam<unsigned> len_p(default_length, "legth", "solution legth", 'l');
eoValueParam<string> sol_p(default_solution, "solution", "problem solution", 's');
//-----------------------------------------------------------------------------
// auxiliar functions
//-----------------------------------------------------------------------------
void arg(int argc, char** argv);
void ga();
//-----------------------------------------------------------------------------
// main
//-----------------------------------------------------------------------------
int main(int argc, char** argv)
{
try
{
arg(argc, argv);
ga();
}
catch (exception& e)
{
cerr << argv[0] << ": " << e.what() << endl;
exit(EXIT_FAILURE);
}
return 0;
}
//-----------------------------------------------------------------------------
// implementation
//-----------------------------------------------------------------------------
void arg(int argc, char** argv)
{
eoParser parser(argc, argv);
parser.processParam(pop_size, "genetic operators");
parser.processParam(generations, "genetic operators");
parser.processParam(mut_rate, "genetic operators");
parser.processParam(xover_rate, "genetic operators");
parser.processParam(col_p, "problem");
parser.processParam(len_p, "problem");
parser.processParam(sol_p, "problem");
if (parser.userNeedsHelp())
{
parser.printHelp(cout);
exit(EXIT_SUCCESS);
}
init_eoChromEvaluator(col_p.value(), len_p.value(), sol_p.value());
}
//-----------------------------------------------------------------------------
void ga()
{
// create population
eoInitChrom init;
eoPop<Chrom> pop(pop_size.value(), init);
// evaluate population
eoEvalFuncPtr<Chrom> evaluator(eoChromEvaluator);
apply<Chrom>(evaluator, pop);
// selector
eoProportionalSelect<Chrom> select(pop);
// genetic operators
eoChromMutation mutation;
eoChromXover xover;
// stop condition
eoGenContinue<Chrom> continuator1(generations.value());
eoFitContinue<Chrom> continuator2(solution.fitness());
eoCombinedContinue<Chrom> continuator(continuator1);
continuator.add(continuator2);
// checkpoint
eoCheckPoint<Chrom> checkpoint(continuator);
// monitor
eoStdoutMonitor monitor;
checkpoint.add(monitor);
// statistics
eoBestFitnessStat<Chrom> stats;
checkpoint.add(stats);
monitor.add(stats);
// genetic algorithm
eoSGA<Chrom> sga(select,
xover, xover_rate.value(),
mutation, mut_rate.value(),
evaluator,
checkpoint);
sga(pop);
cout << "solution = " << solution << endl
<< "best = " << *max_element(pop.begin(), pop.end()) << endl;
}
//-----------------------------------------------------------------------------
// Local Variables:
// mode:C++
// End:

View file

@ -0,0 +1,199 @@
//-----------------------------------------------------------------------------
// mastermind.h
//-----------------------------------------------------------------------------
#ifndef mastermind_h
#define mastermind_h
//-----------------------------------------------------------------------------
#include <stdlib.h> // exit EXIT_FAILURE
#include <paradiseo/eo/eoVector.h> // eoVectorLength
#include <paradiseo/eo/eoOp.h> // eoMonOp eoQuadraticOp
#include <paradiseo/eo/eoInit.h> // eoInit
#include <paradiseo/eo/utils/rnd_generators.h> // uniform_generator
//-----------------------------------------------------------------------------
// phenotype
//-----------------------------------------------------------------------------
typedef float phenotype;
//-----------------------------------------------------------------------------
// genotype
//-----------------------------------------------------------------------------
typedef std::vector<int> genotype;
//-----------------------------------------------------------------------------
// Chrom
//-----------------------------------------------------------------------------
typedef eoVector<phenotype, int> Chrom;
//-----------------------------------------------------------------------------
// eoChromEvaluator
//-----------------------------------------------------------------------------
// const unsigned points_per_black = 3, points_per_white = 1;
Chrom solution;
phenotype eoChromEvaluator(const Chrom& chrom)
{
Chrom tmp = solution;
unsigned black = 0, white = 0;
// look for blacks
for (unsigned i = 0; i < chrom.size(); ++i)
if (chrom[i] == tmp[i])
{
++black;
tmp[i] = -1;
}
// look for whites
for (unsigned i = 0; i < chrom.size(); ++i)
for (unsigned j = 0; j < tmp.size(); ++j)
if (chrom[i] == tmp[j])
{
++white;
tmp[j] = -1;
break;
}
// return black * points_per_black + white * points_per_white;
return black * chrom.size() + white;
}
const unsigned default_length = 8;
const unsigned default_colors = 8;
const std::string default_solution = "01234567";
unsigned num_colors;
void init_eoChromEvaluator(const unsigned& c, const unsigned& l, std::string s)
{
num_colors = c;
// check consistency between parameters
if (s != default_solution)
{
// check length
if (l != default_length && s.size() != l)
{
std::cerr << "solution length != length" << std::endl;
exit(EXIT_FAILURE);
}
// check number of colors
if ((c != default_colors) && (c < unsigned(*max_element(s.begin(), s.end()) - '0')))
{
std::cerr << "too high color number found!" << std::endl;
exit(EXIT_FAILURE);
}
}
else
if (l != default_length || c != default_colors )
// generate a random solution
if(num_colors <= 10)
{
uniform_generator<char> color('0', static_cast<char>('0' + c));
s.resize(l);
generate(s.begin(), s.end(), color);
}
// put the solution parameter on the solution chromosome
if (num_colors <= 10)
{
solution.resize(s.size());
for (unsigned i = 0; i < solution.size(); ++i)
solution[i] = s[i] - '0';
}
else
{
solution.resize(l);
uniform_generator<int> color(0, num_colors);
generate(solution.begin(), solution.end(), color);
}
solution.fitness(eoChromEvaluator(solution));
}
//-----------------------------------------------------------------------------
// eoChromInit
//-----------------------------------------------------------------------------
class eoInitChrom: public eoInit<Chrom>
{
public:
void operator()(Chrom& chrom)
{
uniform_generator<int> color(0, num_colors);
chrom.resize(solution.size());
generate(chrom.begin(), chrom.end(), color);
chrom.invalidate();
}
};
//-----------------------------------------------------------------------------
// eoChromMutation
//-----------------------------------------------------------------------------
class eoChromMutation: public eoMonOp<Chrom>
{
// many operators in one :(
bool operator()(Chrom& chrom)
{
uniform_generator<unsigned> what(0, 2);
uniform_generator<unsigned> position(0, chrom.size());
switch(what())
{
case 0:
{
// mutation
uniform_generator<int> color(0, num_colors);
chrom[position()] = color();
break;
}
case 1:
{
// transposition
std::swap(chrom[position()], chrom[position()]);
break;
}
default:
{
std::cerr << "unknown operator!" << std::endl;
exit(EXIT_FAILURE);
break;
}
}
return true;
}
};
//-----------------------------------------------------------------------------
// eoChromXover
//-----------------------------------------------------------------------------
class eoChromXover: public eoQuadOp<Chrom>
{
public:
bool operator()(Chrom& chrom1, Chrom& chrom2)
{
uniform_generator<unsigned> position(0, chrom1.size());
swap_ranges(chrom1.begin(), chrom1.begin() + position(), chrom2.begin());
return true;
}
};
//-----------------------------------------------------------------------------
#endif // mastermind_h
// Local Variables:
// mode:C++
// End: