manual merge on eoPop.h

This commit is contained in:
Johann Dreo 2012-07-26 16:29:34 +02:00
commit 36f30db313
29 changed files with 1946 additions and 332 deletions

View file

@ -14,5 +14,4 @@ SET(WITH_OMP FALSE CACHE BOOL "Use OpenMP ?" FORCE)
SET(WITH_MPI FALSE CACHE BOOL "Use mpi ?" FORCE)
SET(MPI_DIR "/mpi/directory" CACHE PATH "OpenMPI directory" FORCE)
SET(BOOST_DIR "/boost/directory" CACHE PATH "Boost directory" FORCE)

View file

@ -9,23 +9,19 @@ INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR})
######################################################################################
IF(WITH_MPI)
MESSAGE("[EO] Compilation with MPI and BoostMPI.")
MESSAGE("[EO] Compilation with MPI.")
SET(CMAKE_CXX_COMPILER "${MPI_DIR}/bin/mpicxx")
# headers location
INCLUDE_DIRECTORIES(${MPI_DIR}/include)
INCLUDE_DIRECTORIES(${BOOST_DIR}/include)
# lib location
LINK_DIRECTORIES(${MPI_DIR}/lib)
LINK_DIRECTORIES(${BOOST_DIR}/lib)
# for conditional compilation in code
ADD_DEFINITIONS(-DWITH_MPI)
LINK_LIBRARIES(boost_mpi boost_serialization)
ADD_SUBDIRECTORY(mpi)
ENDIF()

View file

@ -67,4 +67,43 @@ public:
}
};
/**
* Termination condition with a count condition (totalGenerations). This continuator contains
* a count of cycles, which can be retrieved or set.
*
* @ingroup Continuators
* @ingroup Core
*/
template< class EOT >
class eoCountContinue : public eoContinue< EOT >
{
public:
eoCountContinue( ) :
thisGenerationPlaceholder( 0 ),
thisGeneration( thisGenerationPlaceholder )
{
// empty
}
eoCountContinue( unsigned long& currentGen ) :
thisGenerationPlaceholder( 0 ),
thisGeneration( currentGen )
{
// empty
}
virtual std::string className( void ) const { return "eoCountContinue"; }
virtual void reset( )
{
thisGeneration = 0;
}
protected:
unsigned long thisGenerationPlaceholder;
unsigned long& thisGeneration;
};
#endif

View file

@ -35,24 +35,24 @@
@ingroup Continuators
*/
template< class EOT>
class eoGenContinue: public eoContinue<EOT>, public eoValueParam<unsigned>
class eoGenContinue: public eoCountContinue<EOT>, public eoValueParam<unsigned>
{
public:
using eoCountContinue<EOT>::thisGeneration;
using eoCountContinue<EOT>::thisGenerationPlaceholder;
/// Ctor for setting a
eoGenContinue( unsigned long _totalGens)
: eoValueParam<unsigned>(0, "Generations", "Generations"),
repTotalGenerations( _totalGens ),
thisGenerationPlaceHolder(0),
thisGeneration(thisGenerationPlaceHolder)
: eoCountContinue<EOT>( ),
eoValueParam<unsigned>(0, "Generations", "Generations"),
repTotalGenerations( _totalGens )
{};
/// Ctor for enabling the save/load the no. of generations counted
eoGenContinue( unsigned long _totalGens, unsigned long& _currentGen)
: eoValueParam<unsigned>(0, "Generations", "Generations"),
repTotalGenerations( _totalGens ),
thisGenerationPlaceHolder(0),
thisGeneration(_currentGen)
: eoCountContinue<EOT>( _currentGen ), eoValueParam<unsigned>(0, "Generations", "Generations"),
repTotalGenerations( _totalGens )
{};
/** Returns false when a certain number of generations is
@ -77,7 +77,7 @@ public:
*/
virtual void totalGenerations( unsigned long _tg ) {
repTotalGenerations = _tg;
thisGeneration = 0;
eoCountContinue<EOT>::reset();
};
/** Returns the number of generations to reach*/
@ -86,7 +86,6 @@ public:
return repTotalGenerations;
};
virtual std::string className(void) const { return "eoGenContinue"; }
/** Read from a stream
@ -107,8 +106,6 @@ public:
private:
unsigned long repTotalGenerations;
unsigned long thisGenerationPlaceHolder;
unsigned long& thisGeneration;
};
#endif

View file

@ -221,10 +221,11 @@ class eoPop: public std::vector<EOT>, public eoObject, public eoPersistent
#else
typename eoPop<EOT>::const_iterator it = std::max_element(begin(), end());
#endif
if( it == end() )
throw std::runtime_error("eoPop<EOT>: Empty population, when calling best_element().");
return (*it);
}
/** returns a const reference to the worse individual DOES NOT MOVE ANYBODY */
const EOT & worse_element() const
{

View file

@ -35,23 +35,26 @@
@ingroup Continuators
*/
template< class EOT>
class eoSteadyFitContinue: public eoContinue<EOT>
class eoSteadyFitContinue: public eoCountContinue<EOT>
{
public:
typedef typename EOT::Fitness Fitness;
using eoCountContinue<EOT>::thisGenerationPlaceholder;
using eoCountContinue<EOT>::thisGeneration;
/// Ctor for setting a
eoSteadyFitContinue( unsigned long _minGens, unsigned long _steadyGens)
: repMinGenerations( _minGens ), repSteadyGenerations( _steadyGens),
steadyState(false), thisGenerationPlaceHolder(0),
thisGeneration(thisGenerationPlaceHolder){};
: eoCountContinue<EOT>( ), repMinGenerations( _minGens ), repSteadyGenerations( _steadyGens),
steadyState(false)
{};
/// Ctor for enabling the save/load the no. of generations counted
eoSteadyFitContinue( unsigned long _minGens, unsigned long _steadyGen,
unsigned long& _currentGen)
: repMinGenerations( _minGens ), repSteadyGenerations( _steadyGen),
steadyState(_currentGen>_minGens), thisGenerationPlaceHolder(0),
thisGeneration(_currentGen){};
: eoCountContinue<EOT>( _currentGen ), repMinGenerations( _minGens ), repSteadyGenerations( _steadyGen),
steadyState(_currentGen>_minGens)
{};
/** Returns false when a certain number of generations is
* reached withtout improvement */
@ -96,7 +99,7 @@ public:
/// Resets the state after it's been reached
virtual void reset () {
steadyState=false;
thisGeneration = 0;
eoCountContinue<EOT>::reset();
}
/** accessors*/
@ -110,8 +113,6 @@ private:
unsigned long repMinGenerations;
unsigned long repSteadyGenerations;
bool steadyState;
unsigned long thisGenerationPlaceHolder;
unsigned long& thisGeneration;
unsigned int lastImprovement;
Fitness bestSoFar;
};

View file

@ -14,6 +14,9 @@ SET(LIBRARY_OUTPUT_PATH ${EOMPI_LIB_OUTPUT_PATH})
SET(EOMPI_SOURCES
eoMpi.cpp
eoMpiAssignmentAlgorithm.cpp
eoMpiNode.cpp
implMpi.cpp
)
ADD_LIBRARY(eompi STATIC ${EOMPI_SOURCES})

View file

@ -1,11 +1,48 @@
/*
(c) Thales group, 2012
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2 of the License.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: http://eodev.sourceforge.net
Authors:
Benjamin Bouvier <benjamin.bouvier@gmail.com>
*/
# include "eoMpi.h"
namespace eo
{
namespace mpi
{
bmpi::communicator Node::_comm;
/**********************************************
* *********** GLOBALS ************************
* *******************************************/
eoTimerStat timerStat;
namespace Channel
{
const int Commands = 0;
const int Messages = 1;
}
namespace Message
{
const int Continue = 0;
const int Finish = 1;
const int Kill = 2;
}
const int DEFAULT_MASTER = 0;
}
}

View file

@ -143,8 +143,8 @@ namespace eo
*/
namespace Channel
{
const int Commands = 0;
const int Messages = 1;
extern const int Commands;
extern const int Messages;
}
/**
@ -157,9 +157,9 @@ namespace eo
*/
namespace Message
{
const int Continue = 0;
const int Finish = 1;
const int Kill = 2;
extern const int Continue;
extern const int Finish;
extern const int Kill;
}
/**
@ -167,7 +167,7 @@ namespace eo
*
* @ingroup MPI
*/
const int DEFAULT_MASTER = 0;
extern const int DEFAULT_MASTER;
/**
* @brief Base class for the 4 algorithm functors.

View file

@ -0,0 +1,224 @@
# include "eoMpiAssignmentAlgorithm.h"
/*
(c) Thales group, 2012
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2 of the License.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: http://eodev.sourceforge.net
Authors:
Benjamin Bouvier <benjamin.bouvier@gmail.com>
*/
# include "eoMpiNode.h"
namespace eo
{
namespace mpi
{
const int REST_OF_THE_WORLD = -1;
/********************************************************
* DYNAMIC ASSIGNMENT ALGORITHM *************************
*******************************************************/
DynamicAssignmentAlgorithm::DynamicAssignmentAlgorithm( )
{
for(int i = 1; i < Node::comm().size(); ++i)
{
availableWrk.push_back( i );
}
}
DynamicAssignmentAlgorithm::DynamicAssignmentAlgorithm( int unique )
{
availableWrk.push_back( unique );
}
DynamicAssignmentAlgorithm::DynamicAssignmentAlgorithm( const std::vector<int> & workers )
{
availableWrk = workers;
}
DynamicAssignmentAlgorithm::DynamicAssignmentAlgorithm( int first, int last )
{
if( last == REST_OF_THE_WORLD )
{
last = Node::comm().size() - 1;
}
for( int i = first; i <= last; ++i)
{
availableWrk.push_back( i );
}
}
int DynamicAssignmentAlgorithm::get( )
{
int assignee = -1;
if (! availableWrk.empty() )
{
assignee = availableWrk.back();
availableWrk.pop_back();
}
return assignee;
}
int DynamicAssignmentAlgorithm::availableWorkers()
{
return availableWrk.size();
}
void DynamicAssignmentAlgorithm::confirm( int rank )
{
availableWrk.push_back( rank );
}
std::vector<int> DynamicAssignmentAlgorithm::idles( )
{
return availableWrk;
}
void DynamicAssignmentAlgorithm::reinit( int _ )
{
++_;
// nothing to do
}
/********************************************************
* STATIC ASSIGNMENT ALGORITHM **************************
*******************************************************/
StaticAssignmentAlgorithm::StaticAssignmentAlgorithm( std::vector<int>& workers, int runs )
{
init( workers, runs );
}
StaticAssignmentAlgorithm::StaticAssignmentAlgorithm( int first, int last, int runs )
{
std::vector<int> workers;
if( last == REST_OF_THE_WORLD )
{
last = Node::comm().size() - 1;
}
for(int i = first; i <= last; ++i)
{
workers.push_back( i );
}
init( workers, runs );
}
StaticAssignmentAlgorithm::StaticAssignmentAlgorithm( int runs )
{
std::vector<int> workers;
for(int i = 1; i < Node::comm().size(); ++i)
{
workers.push_back( i );
}
init( workers, runs );
}
StaticAssignmentAlgorithm::StaticAssignmentAlgorithm( int unique, int runs )
{
std::vector<int> workers;
workers.push_back( unique );
init( workers, runs );
}
void StaticAssignmentAlgorithm::init( std::vector<int> & workers, int runs )
{
unsigned int nbWorkers = workers.size();
freeWorkers = nbWorkers;
busy.clear();
busy.resize( nbWorkers, false );
realRank = workers;
if( runs <= 0 )
{
return;
}
attributions.clear();
attributions.reserve( nbWorkers );
// Let be the euclidean division of runs by nbWorkers :
// runs == q * nbWorkers + r, 0 <= r < nbWorkers
// This one liner affects q requests to each worker
for (unsigned int i = 0; i < nbWorkers; attributions[i++] = runs / nbWorkers) ;
// The first line computes r and the one liner affects the remaining
// r requests to workers, in ascending order
unsigned int diff = runs - (runs / nbWorkers) * nbWorkers;
for (unsigned int i = 0; i < diff; ++attributions[i++]);
}
int StaticAssignmentAlgorithm::get( )
{
int assignee = -1;
for( unsigned i = 0; i < busy.size(); ++i )
{
if( !busy[i] && attributions[i] > 0 )
{
busy[i] = true;
--freeWorkers;
assignee = realRank[ i ];
break;
}
}
return assignee;
}
int StaticAssignmentAlgorithm::availableWorkers( )
{
return freeWorkers;
}
std::vector<int> StaticAssignmentAlgorithm::idles()
{
std::vector<int> ret;
for(unsigned int i = 0; i < busy.size(); ++i)
{
if( !busy[i] )
{
ret.push_back( realRank[i] );
}
}
return ret;
}
void StaticAssignmentAlgorithm::confirm( int rank )
{
int i = -1; // i is the real index in table
for( unsigned int j = 0; j < realRank.size(); ++j )
{
if( realRank[j] == rank )
{
i = j;
break;
}
}
--attributions[ i ];
busy[ i ] = false;
++freeWorkers;
}
void StaticAssignmentAlgorithm::reinit( int runs )
{
init( realRank, runs );
}
}
}

View file

@ -23,7 +23,6 @@ Authors:
# define __MPI_ASSIGNMENT_ALGORITHM_H__
# include <vector> // std::vector
# include "eoMpiNode.h"
namespace eo
{
@ -35,7 +34,7 @@ namespace eo
*
* @ingroup MPI
*/
const int REST_OF_THE_WORLD = -1;
extern const int REST_OF_THE_WORLD;
/**
* @brief Contains informations on the available workers and allows to find assignees for jobs.
@ -115,33 +114,21 @@ namespace eo
/**
* @brief Uses all the hosts whose rank is higher to 1, inclusive, as workers.
*/
DynamicAssignmentAlgorithm( )
{
for(int i = 1; i < Node::comm().size(); ++i)
{
availableWrk.push_back( i );
}
}
DynamicAssignmentAlgorithm( );
/**
* @brief Uses the unique host with given rank as a worker.
*
* @param unique MPI rank of the unique worker.
*/
DynamicAssignmentAlgorithm( int unique )
{
availableWrk.push_back( unique );
}
DynamicAssignmentAlgorithm( int unique );
/**
* @brief Uses the workers whose ranks are present in the argument as workers.
*
* @param workers std::vector containing MPI ranks of workers.
*/
DynamicAssignmentAlgorithm( const std::vector<int> & workers )
{
availableWrk = workers;
}
DynamicAssignmentAlgorithm( const std::vector<int> & workers );
/**
* @brief Uses a range of ranks as workers.
@ -150,50 +137,17 @@ namespace eo
* @param last The last worker to be included (inclusive). If last == eo::mpi::REST_OF_THE_WORLD, all
* hosts whose rank is higher than first are taken.
*/
DynamicAssignmentAlgorithm( int first, int last )
{
if( last == REST_OF_THE_WORLD )
{
last = Node::comm().size() - 1;
}
DynamicAssignmentAlgorithm( int first, int last );
for( int i = first; i <= last; ++i)
{
availableWrk.push_back( i );
}
}
virtual int get( );
virtual int get( )
{
int assignee = -1;
if (! availableWrk.empty() )
{
assignee = availableWrk.back();
availableWrk.pop_back();
}
return assignee;
}
int availableWorkers();
int availableWorkers()
{
return availableWrk.size();
}
void confirm( int rank );
void confirm( int rank )
{
availableWrk.push_back( rank );
}
std::vector<int> idles( );
std::vector<int> idles( )
{
return availableWrk;
}
void reinit( int _ )
{
++_;
// nothing to do
}
void reinit( int _ );
protected:
std::vector< int > availableWrk;
@ -223,10 +177,7 @@ namespace eo
* @param workers std::vector of MPI ranks of workers which will be used.
* @param runs Fixed amount of runs, strictly positive.
*/
StaticAssignmentAlgorithm( std::vector<int>& workers, int runs )
{
init( workers, runs );
}
StaticAssignmentAlgorithm( std::vector<int>& workers, int runs );
/**
* @brief Uses a range of workers.
@ -236,21 +187,7 @@ namespace eo
* workers from the first one are taken as workers.
* @param runs Fixed amount of runs, strictly positive.
*/
StaticAssignmentAlgorithm( int first, int last, int runs )
{
std::vector<int> workers;
if( last == REST_OF_THE_WORLD )
{
last = Node::comm().size() - 1;
}
for(int i = first; i <= last; ++i)
{
workers.push_back( i );
}
init( workers, runs );
}
StaticAssignmentAlgorithm( int first, int last, int runs );
/**
* @brief Uses all the hosts whose rank is higher than 1 (inclusive) as workers.
@ -258,16 +195,7 @@ namespace eo
* @param runs Fixed amount of runs, strictly positive. If it's not set, you'll have to call reinit()
* later.
*/
StaticAssignmentAlgorithm( int runs = 0 )
{
std::vector<int> workers;
for(int i = 1; i < Node::comm().size(); ++i)
{
workers.push_back( i );
}
init( workers, runs );
}
StaticAssignmentAlgorithm( int runs = 0 );
/**
* @brief Uses an unique host as worker.
@ -275,12 +203,7 @@ namespace eo
* @param unique The MPI rank of the host which will be the worker.
* @param runs Fixed amount of runs, strictly positive.
*/
StaticAssignmentAlgorithm( int unique, int runs )
{
std::vector<int> workers;
workers.push_back( unique );
init( workers, runs );
}
StaticAssignmentAlgorithm( int unique, int runs );
private:
/**
@ -292,89 +215,18 @@ namespace eo
* @param workers Vector of hosts' ranks
* @param runs Fixed amount of runs, strictly positive.
*/
void init( std::vector<int> & workers, int runs )
{
unsigned int nbWorkers = workers.size();
freeWorkers = nbWorkers;
busy.clear();
busy.resize( nbWorkers, false );
realRank = workers;
if( runs <= 0 )
{
return;
}
attributions.clear();
attributions.reserve( nbWorkers );
// Let be the euclidean division of runs by nbWorkers :
// runs == q * nbWorkers + r, 0 <= r < nbWorkers
// This one liner affects q requests to each worker
for (unsigned int i = 0; i < nbWorkers; attributions[i++] = runs / nbWorkers) ;
// The first line computes r and the one liner affects the remaining
// r requests to workers, in ascending order
unsigned int diff = runs - (runs / nbWorkers) * nbWorkers;
for (unsigned int i = 0; i < diff; ++attributions[i++]);
}
void init( std::vector<int> & workers, int runs );
public:
int get( )
{
int assignee = -1;
for( unsigned i = 0; i < busy.size(); ++i )
{
if( !busy[i] && attributions[i] > 0 )
{
busy[i] = true;
--freeWorkers;
assignee = realRank[ i ];
break;
}
}
return assignee;
}
int get( );
int availableWorkers( )
{
return freeWorkers;
}
int availableWorkers( );
std::vector<int> idles()
{
std::vector<int> ret;
for(unsigned int i = 0; i < busy.size(); ++i)
{
if( !busy[i] )
{
ret.push_back( realRank[i] );
}
}
return ret;
}
std::vector<int> idles();
void confirm( int rank )
{
int i = -1; // i is the real index in table
for( unsigned int j = 0; j < realRank.size(); ++j )
{
if( realRank[j] == rank )
{
i = j;
break;
}
}
void confirm( int rank );
--attributions[ i ];
busy[ i ] = false;
++freeWorkers;
}
void reinit( int runs )
{
init( realRank, runs );
}
void reinit( int runs );
private:
std::vector<int> attributions;

40
eo/src/mpi/eoMpiNode.cpp Normal file
View file

@ -0,0 +1,40 @@
/*
(c) Thales group, 2012
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2 of the License.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: http://eodev.sourceforge.net
Authors:
Benjamin Bouvier <benjamin.bouvier@gmail.com>
*/
# include "eoMpiNode.h"
namespace eo
{
namespace mpi
{
void Node::init( int argc, char** argv )
{
static bmpi::environment env( argc, argv );
}
bmpi::communicator& Node::comm()
{
return _comm;
}
bmpi::communicator Node::_comm;
}
}

View file

@ -22,17 +22,17 @@ Authors:
# ifndef __MPI_NODE_H__
# define __MPI_NODE_H__
# include <boost/mpi.hpp>
namespace bmpi = boost::mpi;
# include "implMpi.h"
namespace bmpi = mpi;
namespace eo
{
namespace mpi
{
/**
* @brief Global object used to reach boost::mpi::communicator everywhere.
* @brief Global object used to reach mpi::communicator everywhere.
*
* boost::mpi::communicator is the main object used to send and receive messages between the different hosts of
* mpi::communicator is the main object used to send and receive messages between the different hosts of
* a MPI algorithm.
*
* @ingroup MPI
@ -49,18 +49,12 @@ namespace eo
* @param argc Main's argc
* @param argv Main's argv
*/
static void init( int argc, char** argv )
{
static bmpi::environment env( argc, argv );
}
static void init( int argc, char** argv );
/**
* @brief Returns the global boost::mpi::communicator
* @brief Returns the global mpi::communicator
*/
static bmpi::communicator& comm()
{
return _comm;
}
static bmpi::communicator& comm();
protected:
static bmpi::communicator _comm;

491
eo/src/mpi/eoMultiStart.h Normal file
View file

@ -0,0 +1,491 @@
# ifndef __EO_MULTISTART_H__
# define __EO_MULTISTART_H__
# include <eo>
# include "eoMpi.h"
/**
* @ingroup MPI
* @{
*/
/**
* @file eoMultiStart.h
*
* Contains implementation of a MPI job which consists in a multi start, which basically consists in the following:
* the same eoAlgo is launched on computers of a clusters, with different seeds for each. As the eoAlgo are most of
* the time stochastics, the results won't be the same. It is fully equivalent to launch the same program but with
* different seeds.
*
* It follows the structure of a MPI job, as described in eoMpi.h. The basic algorithm is trivial:
* - Loop while we have a run to perform.
* - Worker performs runs and send their best solution (individual with best fitness) to the master.
* - Master retrieves the best solution and adds it to a eoPop of best solutions (the user can chooses what he does
* with this population, for instance: retrieve the best element, etc.)
*
* The principal concerns about this algorithm are:
* - How do we reinitialize the algorithm? An eoAlgo can have several forms, and initializations have to be performed
* before each "start". We can hence decide whether we reinits the population or keep the same population obtained
* after the previous start, we have to reinitialize continuator, etc. This is customizable in the store.
*
* - Which seeds should be chosen? If we want the run to be re-runnable with the same results, we need to be sure that
* the seeds are the same. But user can not care about this, and just want random seeds. This is customizable in the
* store.
*
* These concerns are handled by functors, inheriting from MultiStartStore<EOT>::ResetAlgo (for the first concern), and
* MultiStartStore<EOT>::GetSeeds (for the second one). There are default implementations, but there is no problem about
* specializing them or coding your own, by directly inheriting from them.
*
* @ingroup MPI
*/
namespace eo
{
namespace mpi
{
/**
* @brief Data used by the Multi Start job.
*
* This data is shared between the different Job functors. More details are given for each attribute.
*/
template< class EOT >
struct MultiStartData
{
typedef eoUF< eoPop<EOT>&, void> ResetAlgo;
MultiStartData( bmpi::communicator& _comm, eoAlgo<EOT>& _algo, int _masterRank, ResetAlgo & _resetAlgo )
:
runs( 0 ), pop(), bests(),
comm( _comm ), algo( _algo ), masterRank( _masterRank ), resetAlgo( _resetAlgo )
{
// empty
}
// dynamic parameters
/**
* @brief Total remaining number of runs.
*
* It's decremented as the runs are performed.
*/
int runs;
/**
* @brief eoPop of the best individuals, which are the one sent by the workers.
*/
eoPop< EOT > bests;
/**
* @brief eoPop on which the worker is working.
*/
eoPop< EOT > pop;
// static parameters
/**
* @brief Communicator, used to send and retrieve messages.
*/
bmpi::communicator& comm;
/**
* @brief Algorithm which will be performed by the worker.
*/
eoAlgo<EOT>& algo;
/**
* @brief Reset Algo functor, which defines how to reset the algo (above) before re running it.
*/
ResetAlgo& resetAlgo;
// Rank of master
int masterRank;
};
/**
* @brief Send task (master side) in the Multi Start job.
*
* It only consists in decrementing the number of runs, as the worker already have the population and
* all the necessary parameters to run the eoAlgo.
*/
template< class EOT >
class SendTaskMultiStart : public SendTaskFunction< MultiStartData< EOT > >
{
public:
using SendTaskFunction< MultiStartData< EOT > >::_data;
void operator()( int wrkRank )
{
wrkRank++; // unused
--(_data->runs);
}
};
/**
* @brief Handle Response (master side) in the Multi Start job.
*
* It consists in retrieving the best solution sent by the worker and adds it to a population of best
* solutions.
*/
template< class EOT >
class HandleResponseMultiStart : public HandleResponseFunction< MultiStartData< EOT > >
{
public:
using HandleResponseFunction< MultiStartData< EOT > >::_data;
void operator()( int wrkRank )
{
EOT individual;
MultiStartData< EOT >& d = *_data;
d.comm.recv( wrkRank, 1, individual );
d.bests.push_back( individual );
}
};
/**
* @brief Process Task (worker side) in the Multi Start job.
*
* Consists in resetting the algorithm and launching it on the population, then
* send the best individual (the one with the best fitness) to the master.
*/
template< class EOT >
class ProcessTaskMultiStart : public ProcessTaskFunction< MultiStartData< EOT > >
{
public:
using ProcessTaskFunction< MultiStartData<EOT > >::_data;
void operator()()
{
_data->resetAlgo( _data->pop );
_data->algo( _data->pop );
_data->comm.send( _data->masterRank, 1, _data->pop.best_element() );
}
};
/**
* @brief Is Finished (master side) in the Multi Start job.
*
* The job is finished if and only if all the runs have been performed.
*/
template< class EOT >
class IsFinishedMultiStart : public IsFinishedFunction< MultiStartData< EOT > >
{
public:
using IsFinishedFunction< MultiStartData< EOT > >::_data;
bool operator()()
{
return _data->runs <= 0;
}
};
/**
* @brief Store for the Multi Start job.
*
* Contains the data used by the workers (algo,...) and functor to
* send the seeds.
*/
template< class EOT >
class MultiStartStore : public JobStore< MultiStartData< EOT > >
{
public:
/**
* @brief Generic functor to reset an algorithm before it's launched by
* the worker.
*
* This reset algorithm should reinits population (if necessary), continuator, etc.
*/
typedef typename MultiStartData<EOT>::ResetAlgo ResetAlgo;
/**
* @brief Generic functor which returns a vector of seeds for the workers.
*
* If this vector hasn't enough seeds to send, random ones are generated and
* sent to the workers.
*/
typedef eoUF< int, std::vector<int> > GetSeeds;
/**
* @brief Default ctor for MultiStartStore.
*
* @param algo The algorithm to launch in parallel
* @param masterRank The MPI rank of the master
* @param resetAlgo The ResetAlgo functor
* @param getSeeds The GetSeeds functor
*/
MultiStartStore(
eoAlgo<EOT> & algo,
int masterRank,
ResetAlgo & resetAlgo,
GetSeeds & getSeeds
)
: _data( eo::mpi::Node::comm(), algo, masterRank, resetAlgo ),
_getSeeds( getSeeds ),
_masterRank( masterRank )
{
// Default job functors for this one.
this->_iff = new IsFinishedMultiStart< EOT >;
this->_iff->needDelete(true);
this->_stf = new SendTaskMultiStart< EOT >;
this->_stf->needDelete(true);
this->_hrf = new HandleResponseMultiStart< EOT >;
this->_hrf->needDelete(true);
this->_ptf = new ProcessTaskMultiStart< EOT >;
this->_ptf->needDelete(true);
}
/**
* @brief Send new seeds to the workers before a job.
*
* Uses the GetSeeds functor given in constructor. If there's not
* enough seeds to send, random seeds are sent to the workers.
*
* @param workers Vector of MPI ranks of the workers
* @param runs The number of runs to perform
*/
void init( const std::vector<int>& workers, int runs )
{
_data.runs = runs;
int nbWorkers = workers.size();
std::vector< int > seeds = _getSeeds( nbWorkers );
if( eo::mpi::Node::comm().rank() == _masterRank )
{
if( seeds.size() < nbWorkers )
{
// Random seeds
for( int i = seeds.size(); i < nbWorkers; ++i )
{
seeds.push_back( eo::rng.rand() );
}
}
for( int i = 0 ; i < nbWorkers ; ++i )
{
int wrkRank = workers[i];
eo::mpi::Node::comm().send( wrkRank, 1, seeds[ i ] );
}
} else
{
int seed;
eo::mpi::Node::comm().recv( _masterRank, 1, seed );
eo::log << eo::debug << eo::mpi::Node::comm().rank() << "- Seed: " << seed << std::endl;
eo::rng.reseed( seed );
}
}
MultiStartData<EOT>* data()
{
return &_data;
}
private:
MultiStartData< EOT > _data;
GetSeeds & _getSeeds;
int _masterRank;
};
/**
* @brief MultiStart job, created for convenience.
*
* This is an OneShotJob, which means workers leave it along with
* the master.
*/
template< class EOT >
class MultiStart : public OneShotJob< MultiStartData< EOT > >
{
public:
MultiStart( AssignmentAlgorithm & algo,
int masterRank,
MultiStartStore< EOT > & store,
// dynamic parameters
int runs,
const std::vector<int>& seeds = std::vector<int>() ) :
OneShotJob< MultiStartData< EOT > >( algo, masterRank, store )
{
store.init( algo.idles(), runs );
}
/**
* @brief Returns the best solution, at the end of the job.
*
* Warning: if you call this function from a worker, or from the master before the
* launch of the job, you will only get an empty population!
*
* @return Population of best individuals retrieved by the master.
*/
eoPop<EOT>& best_individuals()
{
return this->store.data()->bests;
}
};
/*************************************
* DEFAULT GET SEEDS IMPLEMENTATIONS *
************************************/
/**
* @brief Uses the internal default seed generator to get seeds,
* which means: random seeds are sent.
*/
template<class EOT>
struct DummyGetSeeds : public MultiStartStore<EOT>::GetSeeds
{
std::vector<int> operator()( int n )
{
return std::vector<int>();
}
};
/**
* @brief Sends seeds to the workers, which are multiple of a number
* given by the master. If no number is given, a random one is used.
*
* This functor ensures that even if the same store is used with
* different jobs, the seeds will be different.
*/
template<class EOT>
struct MultiplesOfNumber : public MultiStartStore<EOT>::GetSeeds
{
MultiplesOfNumber ( int n = 0 )
{
while( n == 0 )
{
n = eo::rng.rand();
}
_seed = n;
_i = 0;
}
std::vector<int> operator()( int n )
{
std::vector<int> ret;
for( unsigned int i = 0; i < n; ++i )
{
ret.push_back( (++_i) * _seed );
}
return ret;
}
private:
unsigned int _seed;
unsigned int _i;
};
/**
* @brief Returns random seeds to the workers. We can controle which seeds are generated
* by precising the seed of the master.
*/
template<class EOT>
struct GetRandomSeeds : public MultiStartStore<EOT>::GetSeeds
{
GetRandomSeeds( int seed )
{
eo::rng.reseed( seed );
}
std::vector<int> operator()( int n )
{
std::vector<int> ret;
for(int i = 0; i < n; ++i)
{
ret.push_back( eo::rng.rand() );
}
return ret;
}
};
/**************************************
* DEFAULT RESET ALGO IMPLEMENTATIONS *
**************************************
/**
* @brief For a Genetic Algorithm, reinits the population by copying the original one
* given in constructor, and reinits the continuator.
*
* The evaluator should also be given, as the population needs to be evaluated
* before each run.
*/
template<class EOT>
struct ReuseOriginalPopEA: public MultiStartStore<EOT>::ResetAlgo
{
ReuseOriginalPopEA(
eoCountContinue<EOT> & continuator,
const eoPop<EOT>& originalPop,
eoEvalFunc<EOT>& eval) :
_continuator( continuator ),
_originalPop( originalPop ),
_eval( eval )
{
// empty
}
void operator()( eoPop<EOT>& pop )
{
pop = _originalPop; // copies the original population
for(unsigned i = 0, size = pop.size(); i < size; ++i)
{
_eval( pop[i] );
}
_continuator.reset();
}
private:
eoCountContinue<EOT> & _continuator;
const eoPop<EOT>& _originalPop;
eoEvalFunc<EOT>& _eval;
};
/**
* @brief For a Genetic Algorithm, reuses the same population without
* modifying it after a run.
*
* This means, if you launch a run after another one, you'll make evolve
* the same population.
*
* The evaluator should also be sent, as the population needs to be evaluated
* at the first time.
*/
template< class EOT >
struct ReuseSamePopEA : public MultiStartStore<EOT>::ResetAlgo
{
ReuseSamePopEA(
eoCountContinue<EOT>& continuator,
const eoPop<EOT>& originalPop,
eoEvalFunc<EOT>& eval
) :
_continuator( continuator ),
_originalPop( originalPop ),
_firstTime( true )
{
for( unsigned i = 0, size = originalPop.size();
i < size; ++i )
{
eval(_originalPop[i]);
}
}
void operator()( eoPop<EOT>& pop )
{
if( _firstTime )
{
pop = _originalPop;
_firstTime = false;
}
_continuator.reset();
}
protected:
eoCountContinue<EOT>& _continuator;
eoPop<EOT> _originalPop;
bool _firstTime;
};
} // namespace mpi
} // namespace eo
/**
* @}
*/
# endif // __EO_MULTISTART_H__

166
eo/src/mpi/implMpi.cpp Normal file
View file

@ -0,0 +1,166 @@
/*
(c) Thales group, 2012
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2 of the License.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: http://eodev.sourceforge.net
Authors:
Benjamin Bouvier <benjamin.bouvier@gmail.com>
*/
# include "implMpi.h"
namespace mpi
{
const int any_source = MPI_ANY_SOURCE;
const int any_tag = MPI_ANY_TAG;
environment::environment(int argc, char**argv)
{
MPI_Init(&argc, &argv);
}
environment::~environment()
{
MPI_Finalize();
}
status::status( const MPI_Status & s )
{
_source = s.MPI_SOURCE;
_tag = s.MPI_TAG;
_error = s.MPI_ERROR;
}
communicator::communicator( )
{
_rank = -1;
_size = -1;
_buf = 0;
_bufsize = -1;
}
communicator::~communicator()
{
if( _buf )
{
delete _buf;
_buf = 0;
}
}
int communicator::rank()
{
if ( _rank == -1 )
{
MPI_Comm_rank( MPI_COMM_WORLD, &_rank );
}
return _rank;
}
int communicator::size()
{
if ( _size == -1 )
{
MPI_Comm_size( MPI_COMM_WORLD, &_size );
}
return _size;
}
/*
* SEND / RECV INT
*/
void communicator::send( int dest, int tag, int n )
{
MPI_Send( &n, 1, MPI_INT, dest, tag, MPI_COMM_WORLD );
}
void communicator::recv( int src, int tag, int& n )
{
MPI_Status stat;
MPI_Recv( &n, 1, MPI_INT, src, tag, MPI_COMM_WORLD , &stat );
}
/*
* SEND / RECV STRING
*/
void communicator::send( int dest, int tag, const std::string& str )
{
int size = str.size() + 1;
send( dest, tag, size );
MPI_Send( (char*)str.c_str(), size, MPI_CHAR, dest, tag, MPI_COMM_WORLD);
}
void communicator::recv( int src, int tag, std::string& str )
{
int size = -1;
MPI_Status stat;
recv( src, tag, size );
if( _buf == 0 )
{
_buf = new char[ size ];
_bufsize = size;
} else if( _bufsize < size )
{
delete [] _buf;
_buf = new char[ size ];
_bufsize = size;
}
MPI_Recv( _buf, size, MPI_CHAR, src, tag, MPI_COMM_WORLD, &stat );
str.assign( _buf );
}
/*
* SEND / RECV Objects
*/
void communicator::send( int dest, int tag, const eoserial::Persistent & persistent )
{
eoserial::Object* obj = persistent.pack();
std::stringstream ss;
obj->print( ss );
delete obj;
send( dest, tag, ss.str() );
}
void communicator::recv( int src, int tag, eoserial::Persistent & persistent )
{
std::string asText;
recv( src, tag, asText );
eoserial::Object* obj = eoserial::Parser::parse( asText );
persistent.unpack( obj );
delete obj;
}
/*
* Other methods
*/
status communicator::probe( int src, int tag )
{
MPI_Status stat;
MPI_Probe( src, tag, MPI_COMM_WORLD , &stat );
return status( stat );
}
void communicator::barrier()
{
MPI_Barrier( MPI_COMM_WORLD );
}
void broadcast( communicator & comm, int value, int root )
{
MPI_Bcast( &value, 1, MPI_INT, root, MPI_COMM_WORLD );
}
}

322
eo/src/mpi/implMpi.h Normal file
View file

@ -0,0 +1,322 @@
/*
(c) Thales group, 2012
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2 of the License.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: http://eodev.sourceforge.net
Authors:
Benjamin Bouvier <benjamin.bouvier@gmail.com>
*/
# ifndef __EO_IMPL_MPI_HPP__
# define __EO_IMPL_MPI_HPP__
# include <mpi.h>
# include <serial/eoSerial.h>
/**
* This namespace contains reimplementations of some parts of the Boost::MPI API in C++, so as to be used in
* EO without any dependance to Boost. Historically, EO's parallelization module used the
* boost library to add a layer over MPI. After having noticed that just some functions
* were really used, we decided to reimplement our own C++-like implementation of MPI.
*
* Because the Boost::MPI API is really clean, we reused it in this module. However, all
* the functions of Boost::MPI were not used, hence a subset of the API is reused. For
* instance, users can just send integer, std::string or eoserial::Persistent objects;
* furthermore, only eoserial::Persistent objects can sent in a table.
*
* The documentation of the functions is exactly the same as the official Boost::MPI
* documentation. You can find it on www.boost.org/doc/libs/1_49_0/doc/html/mpi/
* The entities are here shortly described, if you need further details, don't hesitate
* to visit the boost URL.
*/
namespace mpi
{
/**
* @ingroup Parallel
* @{
*/
/**
* @brief Constant indicating that a message can come from any process.
*/
extern const int any_source;
/**
* @brief Constant indicating that a message can come from any tag (channel).
*/
extern const int any_tag;
/**
* @brief Wrapper class to have a MPI environment.
*
* Instead of calling MPI_Init and MPI_Finalize, it is only necessary to instantiate
* this class once, in the global context.
*/
class environment
{
public:
/**
* @brief Inits MPI context.
*
* @param argc Number of params in command line (same as one in main)
* @param argv Strings containing params (same as one in main)
*/
environment(int argc, char**argv);
/**
* @brief Closes MPI context.
*/
~environment();
};
/**
* @brief Wrapper class for MPI_Status
*
* Consists only in a C++ wrapper class, giving getters on status attributes.
*/
class status
{
public:
/**
* @brief Converts a MPI_Status into a status.
*/
status( const MPI_Status & s );
/**
* @brief Returns the tag of the associated communication.
*/
int tag() { return _tag; }
/**
* @brief Indicates which error number we obtained in the associated communication.
*/
int error() { return _error; }
/**
* @brief Returns the MPI rank of the source of the associated communication.
*/
int source() { return _source; }
private:
int _source;
int _tag;
int _error;
};
/**
* @brief Main object, used to send / receive messages, get informations about the rank and the size of the world,
* etc.
*/
class communicator
{
public:
/**
* Creates the communicator, using the whole world as a MPI_Comm.
*
* @todo Allow the user to precise which MPI_Comm to use
*/
communicator( );
~communicator();
/**
* @brief Returns the MPI rank of the current process.
*/
int rank();
/**
* @brief Returns the size of the MPI cluster.
*/
int size();
/*
* SEND / RECV INT
*/
/**
* @brief Sends an integer to dest on channel "tag".
*
* @param dest MPI rank of the receiver
* @param tag MPI tag of message
* @param n The integer to send
*/
void send( int dest, int tag, int n );
/*
* @brief Receives an integer from src on channel "tag".
*
* @param src MPI rank of the sender
* @param tag MPI tag of message
* @param n Where to save the received integer
*/
void recv( int src, int tag, int& n );
/*
* SEND / RECV STRING
*/
/**
* @brief Sends a string to dest on channel "tag".
*
* @param dest MPI rank of the receiver
* @param tag MPI tag of message
* @param str The std::string to send
*/
void send( int dest, int tag, const std::string& str );
/*
* @brief Receives a string from src on channel "tag".
*
* @param src MPI rank of the sender
* @param tag MPI tag of message
* @param std::string Where to save the received string
*/
void recv( int src, int tag, std::string& str );
/*
* SEND / RECV Objects
*/
/**
* @brief Sends an eoserial::Persistent to dest on channel "tag".
*
* @param dest MPI rank of the receiver
* @param tag MPI tag of message
* @param persistent The object to send (it must absolutely implement eoserial::Persistent)
*/
void send( int dest, int tag, const eoserial::Persistent & persistent );
/**
* @brief Sends an array of eoserial::Persistent to dest on channel "tag".
*
* @param dest MPI rank of the receiver
* @param tag MPI tag of message
* @param table The array of eoserial::Persistent objects
* @param size The number of elements to send (no check is done, the user has to be sure that the size won't
* overflow!)
*/
template< class T >
void send( int dest, int tag, T* table, int size )
{
// Puts all the values into an array
eoserial::Array* array = new eoserial::Array;
for( int i = 0; i < size; ++i )
{
array->push_back( table[i].pack() );
}
// Encapsulates the array into an object
eoserial::Object* obj = new eoserial::Object;
obj->add( "array", array );
std::stringstream ss;
obj->print( ss );
delete obj;
// Sends the object as a string
send( dest, tag, ss.str() );
}
/*
* @brief Receives an eoserial::Persistent object from src on channel "tag".
*
* @param src MPI rank of the sender
* @param tag MPI tag of message
* @param persistent Where to unpack the serialized object?
*/
void recv( int src, int tag, eoserial::Persistent & persistent );
/*
* @brief Receives an array of eoserial::Persistent from src on channel "tag".
*
* @param src MPI rank of the sender
* @param tag MPI tag of message
* @param table The table in which we're saving the received objects. It must have been allocated by the user,
* as no allocation is performed here.
* @param size The number of elements to receive (no check is done, the user has to be sure that the size won't
* overflow!)
*/
template< class T >
void recv( int src, int tag, T* table, int size )
{
// Receives the string which contains the object
std::string asText;
recv( src, tag, asText );
// Parses the object and retrieves the table
eoserial::Object* obj = eoserial::Parser::parse( asText );
eoserial::Array* array = static_cast<eoserial::Array*>( (*obj)["array"] );
// Retrieves all the values from the array
for( int i = 0; i < size; ++i )
{
eoserial::unpackObject( *array, i, table[i] );
}
delete obj;
}
/*
* Other methods
*/
/**
* @brief Wrapper for MPI_Probe
*
* Waits for a message to come from process having rank src, on the channel
* tag.
*
* @param src MPI rank of the sender (any_source if it can be any sender)
* @param tag MPI tag of the expected message (any_tag if it can be any tag)
*/
status probe( int src = any_source, int tag = any_tag );
/**
* @brief Wrapper for MPI_Barrier
*
*
*/
void barrier();
private:
int _rank;
int _size;
char* _buf; // temporary buffer for sending and receiving strings. Avoids reallocations
int _bufsize; // size of the above temporary buffer
};
/**
* @brief Wrapper for MPI_Bcast
*
* Broadcasts an integer value on the communicator comm, from the process having the MPI rank root.
*
* @param comm The communicator on which to broadcast
* @param value The integer value to send
* @param root The MPI rank of the broadcaster
*
* @todo Actually comm isn't used and broadcast is performed on the whole MPI_COMM_WORLD. TODO: Use comm instead
*/
void broadcast( communicator & comm, int value, int root );
/**
* @}
*/
} // namespace mpi
# endif //__EO_IMPL_MPI_HPP__

View file

@ -30,12 +30,7 @@ Authors:
# include "utils/eoParallel.h" // eo::parallel
# ifdef WITH_MPI
// For serialization purposes
# include <boost/serialization/access.hpp>
# include <boost/serialization/vector.hpp>
# include <boost/serialization/map.hpp>
# endif
# include "serial/eoSerial.h" // eo::Persistent
/**
* @brief Timer allowing to measure time between a start point and a stop point.
@ -202,6 +197,9 @@ class eoTimer
* @ingroup Utilities
*/
class eoTimerStat
# ifdef WITH_MPI
: public eoserial::Persistent
# endif
{
public:
@ -215,41 +213,63 @@ class eoTimerStat
* It can readily be serialized with boost when compiling with mpi.
*/
struct Stat
# ifdef WITH_MPI
: public eoserial::Persistent
# endif
{
std::vector<long int> utime;
std::vector<long int> stime;
std::vector<double> wtime;
#ifdef WITH_MPI
// Gives access to boost serialization
friend class boost::serialization::access;
void unpack( const eoserial::Object* obj )
{
utime.clear();
static_cast< eoserial::Array* >(obj->find("utime")->second)
->deserialize< std::vector<long int>, eoserial::Array::UnpackAlgorithm >( utime );
/**
* Serializes the single statistic in a boost archive (useful for boost::mpi).
* Just serializes the 3 vectors.
*/
template <class Archive>
void serialize( Archive & ar, const unsigned int version )
{
ar & utime & stime & wtime;
(void) version; // avoid compilation warning
}
stime.clear();
static_cast< eoserial::Array* >(obj->find("stime")->second)
->deserialize< std::vector<long int>, eoserial::Array::UnpackAlgorithm >( stime );
wtime.clear();
static_cast< eoserial::Array* >(obj->find("wtime")->second)
->deserialize< std::vector<double>, eoserial::Array::UnpackAlgorithm >( wtime );
}
eoserial::Object* pack( void ) const
{
eoserial::Object* obj = new eoserial::Object;
obj->add("utime", eoserial::makeArray< std::vector<long int>, eoserial::MakeAlgorithm >( utime ) );
obj->add("stime", eoserial::makeArray< std::vector<long int>, eoserial::MakeAlgorithm >( stime ) );
obj->add("wtime", eoserial::makeArray< std::vector<double>, eoserial::MakeAlgorithm >( wtime ) );
return obj;
}
# endif
};
#ifdef WITH_MPI
// Gives access to boost serialization
friend class boost::serialization::access;
/**
* Serializes the timerStat object in a boost archive (useful for boost::mpi).
* Just serializes the map.
*/
template <class Archive>
void serialize( Archive & ar, const unsigned int version )
void unpack( const eoserial::Object* obj )
{
_stats.clear();
for( eoserial::Object::const_iterator it = obj->begin(), final = obj->end();
it != final;
++it)
{
ar & _stats;
(void) version; // avoid compilation warning
eoserial::unpackObject( *obj, it->first, _stats[ it->first ] );
}
}
eoserial::Object* pack( void ) const
{
eoserial::Object* obj = new eoserial::Object;
for( std::map<std::string, Stat >::const_iterator it = _stats.begin(), final = _stats.end();
it != final;
++it)
{
obj->add( it->first, it->second.pack() );
}
return obj;
}
# endif
/**

View file

@ -15,7 +15,6 @@ INCLUDE_DIRECTORIES(${EO_SOURCE_DIR}/contrib/MGE)
INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR})
IF(WITH_MPI)
INCLUDE_DIRECTORIES(${BOOST_DIR}/include)
INCLUDE_DIRECTORIES(${MPI_DIR}/include)
ENDIF()
@ -26,7 +25,6 @@ ENDIF()
LINK_DIRECTORIES(${EO_BINARY_DIR}/lib)
IF(WITH_MPI)
LINK_DIRECTORIES(${BOOST_DIR}/lib)
LINK_DIRECTORIES(${MPI_DIR}/lib)
SET(CMAKE_CXX_COMPILER "${MPI_DIR}/bin/mpicxx")
ENDIF()

View file

@ -10,10 +10,8 @@
MESSAGE("EO SOURCE DIR: ${EO_SOURCE_DIR}")
MESSAGE("OMPI: ${MPI_DIR}")
MESSAGE("BOOST: ${BOOST_DIR}")
INCLUDE_DIRECTORIES(${MPI_DIR}/include)
INCLUDE_DIRECTORIES(${BOOST_DIR}/include)
INCLUDE_DIRECTORIES(${EO_SOURCE_DIR}/src)
INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR})
@ -23,7 +21,6 @@ INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR})
LINK_DIRECTORIES(${EO_BINARY_DIR}/lib)
LINK_DIRECTORIES(${MPI_DIR}/lib)
LINK_DIRECTORIES(${BOOST_DIR}/lib)
######################################################################################
### 3) Define your targets and link the librairies
@ -34,6 +31,7 @@ SET (TEST_LIST
t-mpi-wrapper
t-mpi-multipleRoles
t-mpi-eval
t-mpi-multistart
)
FOREACH (test ${TEST_LIST})
@ -47,7 +45,7 @@ IF(ENABLE_CMAKE_TESTING)
FOREACH (test ${TEST_LIST})
ADD_EXECUTABLE(${test} ${T_${test}_SOURCES})
ADD_TEST(${test} ${test})
TARGET_LINK_LIBRARIES(${test} boost_mpi boost_serialization eoutils eompi eoserial eo)
TARGET_LINK_LIBRARIES(${test} eoutils eompi eoserial eo)
INSTALL(TARGETS ${test} RUNTIME DESTINATION share/eo/test COMPONENT test)
ENDFOREACH (test)
ENDIF()

View file

@ -0,0 +1,51 @@
# ifndef __T_MPI_COMMON_H__
# define __T_MPI_COMMON_H__
#include <serial/eoSerial.h>
/**
* @file t-mpi-common.h
*
* This file shows an example of serialization of a primitive type, so as to be used in a parallel algorithm.
* It is fully compatible with the basic type, by implementing conversion operator and constructor based on type.
* It can contain any simple type which can be written in a std::ostream.
*/
template< class T >
struct SerializableBase : public eoserial::Persistent
{
public:
operator T&()
{
return _value;
}
SerializableBase() : _value()
{
// empty
}
SerializableBase( T base ) : _value( base )
{
// empty
}
void unpack( const eoserial::Object* obj )
{
eoserial::unpack( *obj, "value", _value );
}
eoserial::Object* pack(void) const
{
eoserial::Object* obj = new eoserial::Object;
obj->add("value", eoserial::make( _value ) );
return obj;
}
private:
T _value;
};
# endif // __T_MPI_COMMON_H__

View file

@ -34,8 +34,6 @@ Authors:
#include <mpi/eoMpi.h>
#include <boost/mpi.hpp>
#include <vector>
using namespace std;
@ -55,50 +53,33 @@ class eoRealSerializable : public eoReal< eoMinimizingFitness >, public eoserial
eoserial::makeArray< vector<double>, eoserial::MakeAlgorithm >
( *this )
);
bool invalidFitness = invalid();
obj->add("invalid", eoserial::make( invalidFitness ) );
if( !invalidFitness )
{
double fitnessVal = fitness();
obj->add("fitness", eoserial::make( fitnessVal ) );
}
return obj;
}
void unpack( const eoserial::Object* obj )
{
this->clear();
eoserial::unpackArray< vector<double>, eoserial::Array::UnpackAlgorithm >
( *obj, "array", *this );
bool invalidFitness;
eoserial::unpack( *obj, "invalid", invalidFitness );
if( invalidFitness ) {
invalidate();
} else {
double fitnessVal;
eoserial::unpack<double>( *obj, "fitness", fitnessVal );
fitness( fitnessVal );
}
}
// Gives access to boost serialization
friend class boost::serialization::access;
/**
* Serializes the decomposition in a boost archive (useful for boost::mpi)
*/
template <class Archive>
void save( Archive & ar, const unsigned int version ) const
{
std::stringstream ss;
printOn( ss );
std::string asStr = ss.str();
ar & asStr;
(void) version; // avoid compilation warning
}
/**
* Deserializes the decomposition from a boost archive (useful for boost:mpi)
*/
template <class Archive>
void load( Archive & ar, const unsigned int version )
{
std::string asStr;
ar & asStr;
std::stringstream ss;
ss << asStr;
readFrom( ss );
(void) version; // avoid compilation warning
}
// Indicates that boost save and load operations are not the same.
BOOST_SERIALIZATION_SPLIT_MEMBER()
};
typedef eoRealSerializable EOT;
@ -196,14 +177,12 @@ int main(int ac, char** av)
eo::log << "Size of population : " << popSize << std::endl;
/*
eo::mpi::ParallelApplyStore< EOT > store( eval, eo::mpi::DEFAULT_MASTER );
store.wrapHandleResponse( new CatBestAnswers );
eoParallelPopLoopEval< EOT > popEval( assign, eo::mpi::DEFAULT_MASTER, &store );
*/
eoParallelPopLoopEval< EOT > popEval( assign, eo::mpi::DEFAULT_MASTER, eval, 5 );
//eoParallelPopLoopEval< EOT > popEval( assign, eo::mpi::DEFAULT_MASTER, eval, 5 );
eo::log << eo::quiet << "Before first evaluation." << std::endl;
popEval( pop, pop );

View file

@ -41,7 +41,7 @@ Authors:
# include <mpi/eoParallelApply.h>
# include <mpi/eoTerminateJob.h>
# include <boost/serialization/vector.hpp>
# include "t-mpi-common.h"
# include <iostream>
@ -50,10 +50,37 @@ using namespace std;
using namespace eo::mpi;
// The real job to execute, for the subworkers: add one to each element of a table.
struct SubWork: public eoUF< int&, void >
/*
* This class allows the user to easily serialize a vector of elements which implement eoserial::Persistent too.
*
* T is the type of the contained element, which must implement eoserial::Persistent too.
*
* Here, it contains SerializableBase<int>, which is a serializable integer that can be used as an integer.
*/
template< class T >
struct SerializableVector : public std::vector<T>, public eoserial::Persistent
{
void operator() ( int & x )
public:
void unpack( const eoserial::Object* obj )
{
this->clear();
eoserial::Array* vector = static_cast<eoserial::Array*>( obj->find("vector")->second );
vector->deserialize< std::vector<T>, eoserial::Array::UnpackObjectAlgorithm >( *this );
}
eoserial::Object* pack( void ) const
{
eoserial::Object* obj = new eoserial::Object;
obj->add("vector", eoserial::makeArray< std::vector<T>, eoserial::SerializablePushAlgorithm >( *this ) );
return obj;
}
};
// The real job to execute, for the subworkers: add one to each element of a table.
struct SubWork: public eoUF< SerializableBase<int>&, void >
{
void operator() ( SerializableBase<int> & x )
{
cout << "Subwork phase." << endl;
++x;
@ -62,7 +89,7 @@ struct SubWork: public eoUF< int&, void >
// Function called by both subworkers and delegates.
// v is the vector to process, rank is the MPI rank of the sub master
void subtask( vector<int>& v, int rank )
void subtask( vector< SerializableBase<int> >& v, int rank )
{
// Attach workers according to nodes.
// Submaster with rank 1 will have ranks 3 and 5 as subworkers.
@ -74,9 +101,9 @@ void subtask( vector<int>& v, int rank )
SubWork sw;
// Launch the job!
ParallelApplyStore<int> store( sw, rank );
ParallelApplyStore< SerializableBase<int> > store( sw, rank );
store.data( v );
ParallelApply<int> job( algo, rank, store );
ParallelApply< SerializableBase<int> > job( algo, rank, store );
job.run();
EmptyJob stop( algo, rank );
}
@ -85,9 +112,9 @@ void subtask( vector<int>& v, int rank )
// each result by two).
// Note that this work receives a vector of integers as an entry, while subworkers task's operator receives a simple
// integer.
struct Work: public eoUF< vector<int>&, void >
struct Work: public eoUF< SerializableVector< SerializableBase<int> >&, void >
{
void operator() ( vector<int>& v )
void operator() ( SerializableVector< SerializableBase<int> >& v )
{
cout << "Work phase..." << endl;
subtask( v, Node::comm().rank() );
@ -106,7 +133,7 @@ int main(int argc, char** argv)
throw std::runtime_error("World size should be 7.");
}
vector<int> v;
SerializableVector< SerializableBase<int> > v;
v.push_back(1);
v.push_back(3);
@ -116,7 +143,7 @@ int main(int argc, char** argv)
// As submasters' operator receives a vector<int> as an input, and ParallelApply takes a vector of
// operator's input as an input, we have to deal with a vector of vector of integers for the master task.
vector< vector<int> > metaV;
vector< SerializableVector< SerializableBase<int> > > metaV;
// Here, we send twice the same vector. We could also have splitted the first vector into two vectors, one
// containing the beginning and another one containing the end.
metaV.push_back( v );
@ -132,9 +159,9 @@ int main(int argc, char** argv)
{
Work w;
DynamicAssignmentAlgorithm algo( 1, 2 );
ParallelApplyStore< vector<int> > store( w, 0 );
ParallelApplyStore< SerializableVector< SerializableBase<int> > > store( w, 0 );
store.data( metaV );
ParallelApply< vector<int> > job( algo, 0, store );
ParallelApply< SerializableVector< SerializableBase<int> > > job( algo, 0, store );
job.run();
if( job.isMaster() )
{

View file

@ -0,0 +1,169 @@
# include <mpi/eoMultiStart.h>
using namespace eo::mpi;
#include <stdexcept>
#include <iostream>
#include <sstream>
#include <eo>
#include <es.h>
/*
* This file is based on the tutorial lesson 1. We'll consider that you know all the EO
* related parts of the algorithm and we'll focus our attention on parallelization.
*
* This file shows an example of multistart applied to a eoSGA (simple genetic
* algorithm). As individuals need to be serialized, we implement a class inheriting
* from eoReal (which is the base individual), so as to manipulate individuals as they
* were eoReal AND serialize them.
*
* The main function shows how to launch a multistart job, with default functors. If you
* don't know which functors to use, these ones should fit the most of your purposes.
*/
using namespace std;
/*
* eoReal is a vector of double: we just have to serializes the value and the fitness.
*/
class SerializableEOReal: public eoReal<double>, public eoserial::Persistent
{
public:
SerializableEOReal(unsigned size = 0, double value = 0.0) :
eoReal<double>(size, value)
{
// empty
}
void unpack( const eoserial::Object* obj )
{
this->clear();
eoserial::unpackArray
< std::vector<double>, eoserial::Array::UnpackAlgorithm >
( *obj, "vector", *this );
bool invalidFitness;
eoserial::unpack( *obj, "invalid_fitness", invalidFitness );
if( invalidFitness )
{
this->invalidate();
} else
{
double f;
eoserial::unpack( *obj, "fitness", f );
this->fitness( f );
}
}
eoserial::Object* pack( void ) const
{
eoserial::Object* obj = new eoserial::Object;
obj->add( "vector", eoserial::makeArray< std::vector<double>, eoserial::MakeAlgorithm >( *this ) );
bool invalidFitness = this->invalid();
obj->add( "invalid_fitness", eoserial::make( invalidFitness ) );
if( !invalidFitness )
{
obj->add( "fitness", eoserial::make( this->fitness() ) );
}
return obj;
}
};
// REPRESENTATION
//-----------------------------------------------------------------------------
// define your individuals
typedef SerializableEOReal Indi;
// EVAL
//-----------------------------------------------------------------------------
// a simple fitness function that computes the euclidian norm of a real vector
// @param _indi A real-valued individual
double real_value(const Indi & _indi)
{
double sum = 0;
for (unsigned i = 0; i < _indi.size(); i++)
sum += _indi[i]*_indi[i];
return (-sum); // maximizing only
}
/************************** PARALLELIZATION JOB *******************************/
int main(int argc, char **argv)
{
Node::init( argc, argv );
// PARAMETRES
// all parameters are hard-coded!
const unsigned int SEED = 133742; // seed for random number generator
const unsigned int VEC_SIZE = 8; // Number of object variables in genotypes
const unsigned int POP_SIZE = 100; // Size of population
const unsigned int T_SIZE = 3; // size for tournament selection
const unsigned int MAX_GEN = 100; // Maximum number of generation before STOP
const float CROSS_RATE = 0.8; // Crossover rate
const double EPSILON = 0.01; // range for real uniform mutation
const float MUT_RATE = 0.5; // mutation rate
eoEvalFuncPtr<Indi> eval( real_value );
eoPop<Indi> pop;
eoUniformGenerator< double > generator;
eoInitFixedLength< Indi > init( VEC_SIZE, generator );
pop = eoPop<Indi>( POP_SIZE, init );
eoDetTournamentSelect<Indi> select(T_SIZE);
eoSegmentCrossover<Indi> xover;
eoUniformMutation<Indi> mutation(EPSILON);
eoGenContinue<Indi> continuator(MAX_GEN);
/* Does work too with a steady fit continuator. */
// eoSteadyFitContinue< Indi > continuator( 10, 50 );
eoSGA<Indi> gga(select, xover, CROSS_RATE, mutation, MUT_RATE,
eval, continuator);
/* How to assign tasks, which are starts? */
DynamicAssignmentAlgorithm assignmentAlgo;
/* Before a worker starts its algorithm, how does it reinits the population?
* There are a few default usable functors, defined in eoMultiStart.h.
*
* This one (ReuseSamePopEA) doesn't modify the population after a start, so
* the same population is reevaluated on each multistart: the solution tend
* to get better and better.
*/
ReuseSamePopEA< Indi > resetAlgo( continuator, pop, eval );
/**
* How to send seeds to the workers, at the beginning of the parallel job?
* This functors indicates that seeds should be random values.
*/
GetRandomSeeds< Indi > getSeeds( SEED );
// Builds the store
MultiStartStore< Indi > store(
gga,
DEFAULT_MASTER,
resetAlgo,
getSeeds);
// Creates the multistart job and runs it.
// The last argument indicates that we want to launch 5 runs.
MultiStart< Indi > msjob( assignmentAlgo, DEFAULT_MASTER, store, 5 );
msjob.run();
if( msjob.isMaster() )
{
msjob.best_individuals().sort();
std::cout << "Global best individual has fitness " << msjob.best_individuals().best_element().fitness() << std::endl;
}
MultiStart< Indi > msjob10( assignmentAlgo, DEFAULT_MASTER, store, 10 );
msjob10.run();
if( msjob10.isMaster() )
{
msjob10.best_individuals().sort();
std::cout << "Global best individual has fitness " << msjob10.best_individuals().best_element().fitness() << std::endl;
}
return 0;
}

View file

@ -25,6 +25,9 @@ Authors:
* incremented... in a parallel fashion. While this operation is very easy to perform even on a single host, it's just
* an example for parallel apply use.
*
* The table of integers has to be serialized before it's sent. The wrapper object SerializableBase allows to serialize
* any type and manipulate it like this type: SerializableBase<int> can be exactly be used as an integer.
*
* Besides, this is also a test for assignment (scheduling) algorithms, in different cases. The test succeeds if and
* only if the program terminates without any segfault ; otherwise, there could be a deadlock which prevents the end or
* a segfault at any time.
@ -40,7 +43,10 @@ Authors:
# include <mpi/eoParallelApply.h>
# include <mpi/eoTerminateJob.h>
# include "t-mpi-common.h"
# include <iostream>
# include <cstdlib>
# include <vector>
using namespace std;
@ -50,11 +56,11 @@ using namespace eo::mpi;
/*
* The function to be called on each element of the table: just increment the value.
*/
struct plusOne : public eoUF< int&, void >
struct plusOne : public eoUF< SerializableBase<int>&, void >
{
void operator() ( int & x )
void operator() ( SerializableBase<int> & x )
{
++x;
++x; // implicit conversion of SerializableBase<int> in the integer it contains
}
};
@ -79,7 +85,7 @@ int main(int argc, char** argv)
// Initializes a vector with random values.
srand( time(0) );
vector<int> v;
vector< SerializableBase<int> > v;
for( int i = 0; i < 1000; ++i )
{
v.push_back( rand() );
@ -90,7 +96,7 @@ int main(int argc, char** argv)
// incremented and we can compare the returned value of each element to the value of each element in originalV +
// offset. If the two values are different, there has been a problem.
int offset = 0;
vector<int> originalV = v;
vector< SerializableBase<int> > originalV = v;
// Instanciates the functor to apply on each element
plusOne plusOneInstance;
@ -166,11 +172,11 @@ int main(int argc, char** argv)
for( unsigned int i = 0; i < tests.size(); ++i )
{
// Instanciates a store with the functor, the master rank and size of packet (see ParallelApplyStore doc).
ParallelApplyStore< int > store( plusOneInstance, eo::mpi::DEFAULT_MASTER, 3 );
ParallelApplyStore< SerializableBase<int> > store( plusOneInstance, eo::mpi::DEFAULT_MASTER, 3 );
// Updates the contained data
store.data( v );
// Creates the job with the assignment algorithm, the master rank and the store
ParallelApply< int > job( *(tests[i].assign), eo::mpi::DEFAULT_MASTER, store );
ParallelApply< SerializableBase<int> > job( *(tests[i].assign), eo::mpi::DEFAULT_MASTER, store );
// Only master writes information
if( job.isMaster() )

View file

@ -33,7 +33,10 @@ Authors:
# include <mpi/eoParallelApply.h>
# include <mpi/eoTerminateJob.h>
# include "t-mpi-common.h"
# include <iostream>
# include <cstdlib>
# include <vector>
using namespace std;
@ -41,9 +44,9 @@ using namespace std;
using namespace eo::mpi;
// Job functor.
struct plusOne : public eoUF< int&, void >
struct plusOne : public eoUF< SerializableBase<int>&, void >
{
void operator() ( int & x )
void operator() ( SerializableBase<int>& x )
{
++x;
}
@ -83,28 +86,28 @@ int main(int argc, char** argv)
Node::init( argc, argv );
srand( time(0) );
vector<int> v;
vector< SerializableBase<int> > v;
for( int i = 0; i < 1000; ++i )
{
v.push_back( rand() );
}
int offset = 0;
vector<int> originalV = v;
vector< SerializableBase<int> > originalV = v;
plusOne plusOneInstance;
StaticAssignmentAlgorithm assign( v.size() );
ParallelApplyStore< int > store( plusOneInstance, eo::mpi::DEFAULT_MASTER, 1 );
ParallelApplyStore< SerializableBase<int> > store( plusOneInstance, eo::mpi::DEFAULT_MASTER, 1 );
store.data( v );
// This is the only thing which changes: we wrap the IsFinished function.
// According to RAII, we'll delete the invokated wrapper at the end of the main ; the store won't delete it
// automatically.
IsFinishedParallelApply<int>* wrapper = new ShowWrappedResult<int>;
store.wrapIsFinished( wrapper );
ShowWrappedResult< SerializableBase<int> > wrapper;
store.wrapIsFinished( &wrapper );
ParallelApply<int> job( assign, eo::mpi::DEFAULT_MASTER, store );
ParallelApply< SerializableBase<int> > job( assign, eo::mpi::DEFAULT_MASTER, store );
// Equivalent to:
// Job< ParallelApplyData<int> > job( assign, 0, store );
job.run();
@ -125,8 +128,6 @@ int main(int argc, char** argv)
cout << endl;
}
delete wrapper;
return 0;
}

View file

@ -0,0 +1,104 @@
# include <mpi/eoMpi.h>
using namespace eo::mpi;
/*
* This file is a template for a new eo::mpi::Job. You have everything that should be necessary to implement a new
* parallelized algorithm.
*
* Replace __TEMPLATE__ by the name of your algorithm (for instance: MultiStart, ParallelApply, etc.).
*/
template< class EOT >
struct __TEMPLATE__Data
{
};
template< class EOT >
class SendTask__TEMPLATE__ : public SendTaskFunction< __TEMPLATE__Data< EOT > >
{
public:
using SendTaskFunction< __TEMPLATE__Data< EOT > >::_data;
void operator()( int wrkRank )
{
// TODO implement me
}
};
template< class EOT >
class HandleResponse__TEMPLATE__ : public HandleResponseFunction< __TEMPLATE__Data< EOT > >
{
public:
using HandleResponseFunction< __TEMPLATE__Data< EOT > >::_data;
void operator()( int wrkRank )
{
// TODO implement me
}
};
template< class EOT >
class ProcessTask__TEMPLATE__ : public ProcessTaskFunction< __TEMPLATE__Data< EOT > >
{
public:
using ProcessTaskFunction< __TEMPLATE__Data<EOT> >::_data;
void operator()()
{
// TODO implement me
}
};
template< class EOT >
class IsFinished__TEMPLATE__ : public IsFinishedFunction< __TEMPLATE__Data< EOT > >
{
public:
using IsFinishedFunction< __TEMPLATE__Data< EOT > >::_data;
bool operator()()
{
// TODO implement me
}
};
template< class EOT >
class __TEMPLATE__Store : public JobStore< __TEMPLATE__Data< EOT > >
{
public:
__TEMPLATE__Data<EOT>* data()
{
// TODO implement me
return 0;
}
};
template< class EOT >
class __TEMPLATE__ : public MultiJob< __TEMPLATE__Data< EOT > >
{
public:
__TEMPLATE__( AssignmentAlgorithm & algo,
int masterRank,
__TEMPLATE__Store< EOT > & store ) :
MultiJob< __TEMPLATE__Data< EOT > >( algo, masterRank, store )
{
// TODO implement me
}
};
/*
int main(int argc, char **argv)
{
Node::init( argc, argv );
DynamicAssignmentAlgorithm assignmentAlgo;
__TEMPLATE__Store<int> store;
__TEMPLATE__<int> job( assignmentAlgo, DEFAULT_MASTER, store );
}
*/

View file

@ -0,0 +1,63 @@
How to install EoMPI
====================
Install OpenMpi
---------------
1) Download OpenMPI on their website or with the following command:
wget http://www.open-mpi.org/software/ompi/v1.6/downloads/openmpi-1.6.tar.bz2
2) Untar the downloaded archive in a directory and change working directory to there:
tar -xvjf openmpi*.tar.bz2
cd openmpi-X.Y
3) Use configuration script to indicate in which directory OpenMPI should be installed, and other options:
Simplest configuration:
./configure --prefix=/home/`whoami`/openmpi/
Only static libraries:
./configure --enable-static --disable-shared
Only static libraries, with prefix, disable build for Fortran77 and Fortran90, add support for SGE:
./configure --enable-static --disable-shared --prefix=/home/`whoami`/openmpi/ --disable-mpi-f77 --disable-mpi-f90 --with-sge
Other options are available in the README file.
4) Make it and install:
In sequential:
make all install
Or in parallel:
make -j 2 all
make install
5) Try to compile and run the sample program:
~/openmpi/bin/mpicxx -o sample mpi.c
~/openmpi/bin/mpirun -np 3 ./sample
Configure EO to use MPI
-----------------------
You only need to configure eo-conf.cmake so as to use MPI :
1) Put the WITH_MPI boolean to true:
SET(WITH_MPI TRUE CACHE BOOL "Use mpi ?" FORCE)
2) Indicate in which directories you have installed openmpi:
SET(MPI_DIR "/where/did/you/install/openmpi" CACHE PATH "OpenMPI directory" FORCE)
3) Recompile eo:
./distclean
./build_gcc_linux_release.sh
4) If you meet any issue, don't hesitate to contact the EO mailing list:
eodev-main@lists.sourceforge.net

View file

@ -0,0 +1,35 @@
# include <mpi.h>
# include <stdio.h>
# include <string.h>
int main(int argc, char **argv)
{
int rank, size;
char someString[] = "Can haz cheezburgerz?";
MPI_Init(&argc, &argv);
MPI_Comm_rank( MPI_COMM_WORLD, & rank );
MPI_Comm_size( MPI_COMM_WORLD, & size );
if ( rank == 0 )
{
int n = 42;
int i;
for( i = 1; i < size; ++i)
{
MPI_Send( &n, 1, MPI_INT, i, 0, MPI_COMM_WORLD );
MPI_Send( &someString, strlen( someString )+1, MPI_CHAR, i, 0, MPI_COMM_WORLD );
}
} else {
char buffer[ 128 ];
int received;
MPI_Status stat;
MPI_Recv( &received, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &stat );
printf( "[Worker] Number : %d\n", received );
MPI_Recv( buffer, 128, MPI_CHAR, 0, 0, MPI_COMM_WORLD, &stat );
printf( "[Worker] String : %s\n", buffer );
}
MPI_Finalize();
}