manual merge on apply.h

This commit is contained in:
Johann Dreo 2012-07-20 15:05:08 +02:00
commit 20d06df7d1
65 changed files with 7559 additions and 65 deletions

View file

@ -5,7 +5,32 @@
INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR})
######################################################################################
### 2) Define the eo target
### 2) Optional: add MPI and Boost MPI dependencies.
######################################################################################
IF(WITH_MPI)
MESSAGE("[EO] Compilation with MPI and BoostMPI.")
SET(CMAKE_CXX_COMPILER "${MPI_DIR}/bin/mpicxx")
# headers location
INCLUDE_DIRECTORIES(${MPI_DIR}/include)
INCLUDE_DIRECTORIES(${BOOST_DIR}/include)
# lib location
LINK_DIRECTORIES(${MPI_DIR}/lib)
LINK_DIRECTORIES(${BOOST_DIR}/lib)
# for conditional compilation in code
ADD_DEFINITIONS(-DWITH_MPI)
LINK_LIBRARIES(boost_mpi boost_serialization)
ADD_SUBDIRECTORY(mpi)
ENDIF()
######################################################################################
### 3) Define the eo target
######################################################################################
SET(EO_LIB_OUTPUT_PATH ${EO_BINARY_DIR}/lib)
@ -27,14 +52,14 @@ FILE(GLOB HDRS *.h eo)
INSTALL(FILES ${HDRS} DESTINATION include/eo COMPONENT headers)
######################################################################################
### 3) Optionnal: define your target(s)'s version: no effect for windows
### 4) Optionnal: define your target(s)'s version: no effect for windows
######################################################################################
SET(EO_VERSION ${GLOBAL_VERSION})
SET_TARGET_PROPERTIES(eo PROPERTIES VERSION "${EO_VERSION}")
######################################################################################
### 4) Where must cmake go now ?
### 5) Where must cmake go now ?
######################################################################################
ADD_SUBDIRECTORY(do)
@ -43,6 +68,7 @@ ADD_SUBDIRECTORY(ga)
ADD_SUBDIRECTORY(gp)
ADD_SUBDIRECTORY(other)
ADD_SUBDIRECTORY(utils)
ADD_SUBDIRECTORY(serial)
IF(ENABLE_PYEO)
ADD_SUBDIRECTORY(pyeo)

View file

@ -19,7 +19,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: todos@geneura.ugr.es, http://geneura.ugr.es
mak@dhi.dk
mak@dhi.dk
*/
//-----------------------------------------------------------------------------
@ -51,41 +51,41 @@ void apply(eoUF<EOT&, void>& _proc, std::vector<EOT>& _pop)
double t1 = 0;
if ( eo::parallel.enableResults() )
{
t1 = omp_get_wtime();
}
{
t1 = omp_get_wtime();
}
if (!eo::parallel.isDynamic())
{
{
#pragma omp parallel for if(eo::parallel.isEnabled()) //default(none) shared(_proc, _pop, size)
#ifdef _MSC_VER
//Visual Studio supports only OpenMP version 2.0 in which
//an index variable must be of a signed integral type
for (long long i = 0; i < size; ++i) { _proc(_pop[i]); }
//Visual Studio supports only OpenMP version 2.0 in which
//an index variable must be of a signed integral type
for (long long i = 0; i < size; ++i) { _proc(_pop[i]); }
#else // _MSC_VER
for (size_t i = 0; i < size; ++i) { _proc(_pop[i]); }
for (size_t i = 0; i < size; ++i) { _proc(_pop[i]); }
#endif
}
}
else
{
{
#pragma omp parallel for schedule(dynamic) if(eo::parallel.isEnabled())
#ifdef _MSC_VER
//Visual Studio supports only OpenMP version 2.0 in which
//an index variable must be of a signed integral type
for (long long i = 0; i < size; ++i) { _proc(_pop[i]); }
//Visual Studio supports only OpenMP version 2.0 in which
//an index variable must be of a signed integral type
for (long long i = 0; i < size; ++i) { _proc(_pop[i]); }
#else // _MSC_VER
//doesnot work with gcc 4.1.2
//default(none) shared(_proc, _pop, size)
for (size_t i = 0; i < size; ++i) { _proc(_pop[i]); }
//doesnot work with gcc 4.1.2
//default(none) shared(_proc, _pop, size)
for (size_t i = 0; i < size; ++i) { _proc(_pop[i]); }
#endif
}
}
if ( eo::parallel.enableResults() )
{
double t2 = omp_get_wtime();
eoLogger log;
log << eo::file(eo::parallel.prefix()) << t2 - t1 << ' ';
}
{
double t2 = omp_get_wtime();
eoLogger log;
log << eo::file(eo::parallel.prefix()) << t2 - t1 << ' ';
}
#else // _OPENMP
@ -109,7 +109,7 @@ void apply(eoUF<EOT&, void>& _proc, std::vector<EOT>& _pop)
// //default(none) shared(_proc, _pop, size)
// for (size_t i = 0; i < size; ++i)
// {
// _proc(_pop[i]);
// _proc(_pop[i]);
// }
// }
@ -127,7 +127,7 @@ void apply(eoUF<EOT&, void>& _proc, std::vector<EOT>& _pop)
// //default(none) shared(_proc, _pop, size)
// for (size_t i = 0; i < size; ++i)
// {
// _proc(_pop[i]);
// _proc(_pop[i]);
// }
// }

View file

@ -142,6 +142,9 @@
#include <utils/eoRealVectorBounds.h> // includes eoRealBounds.h
#include <utils/eoIntBounds.h> // no eoIntVectorBounds
// Serialization stuff
#include <serial/eoSerial.h>
// aliens
#include <other/external_eo>
#include <eoCounter.h>

View file

@ -37,8 +37,6 @@
#include <eoMergeReduce.h>
#include <eoReplacement.h>
template <class EOT> class eoIslandsEasyEA ;
template <class EOT> class eoDistEvalEasyEA ;
@ -102,6 +100,33 @@ template<class EOT> class eoEasyEA: public eoAlgo<EOT>
offspring.reserve(_offspringSize); // This line avoids an incremental resize of offsprings.
}
/**
* @brief Ctor allowing to specify which pop eval function we're going to use.
*
* Ctor taking a breed and merge, an overload of ctor to define an offspring size, and
* the pop eval function used. This allows to precise if we would like to use the
* parallel evaluation, for instance.
*/
eoEasyEA(
eoContinue<EOT>& _continuator,
eoEvalFunc<EOT>& _eval,
eoPopEvalFunc<EOT>& _pop_eval,
eoBreed<EOT>& _breed,
eoReplacement<EOT>& _replace,
unsigned _offspringSize
) : continuator(_continuator),
eval (_eval),
loopEval(_eval),
popEval(_pop_eval),
selectTransform(dummySelect, dummyTransform),
breed(_breed),
mergeReduce(dummyMerge, dummyReduce),
replace(_replace),
isFirstCall(true)
{
offspring.reserve(_offspringSize); // This line avoids an incremental resize of offsprings.
}
/*
eoEasyEA(eoContinue <EOT> & _continuator,
eoPopEvalFunc <EOT> & _pop_eval,
@ -219,45 +244,44 @@ template<class EOT> class eoEasyEA: public eoAlgo<EOT>
/// Apply a few generation of evolution to the population.
virtual void operator()(eoPop<EOT>& _pop)
{
if (isFirstCall)
{
size_t total_capacity = _pop.capacity() + offspring.capacity();
_pop.reserve(total_capacity);
offspring.reserve(total_capacity);
isFirstCall = false;
}
eoPop<EOT> empty_pop;
popEval(empty_pop, _pop); // A first eval of pop.
do
if (isFirstCall)
{
try
size_t total_capacity = _pop.capacity() + offspring.capacity();
_pop.reserve(total_capacity);
offspring.reserve(total_capacity);
isFirstCall = false;
}
eoPop<EOT> empty_pop;
do
{
try
{
unsigned pSize = _pop.size();
offspring.clear(); // new offspring
unsigned pSize = _pop.size();
breed(_pop, offspring);
offspring.clear(); // new offspring
popEval(_pop, offspring); // eval of parents + offspring if necessary
breed(_pop, offspring);
replace(_pop, offspring); // after replace, the new pop. is in _pop
popEval(_pop, offspring); // eval of parents + offspring if necessary
if (pSize > _pop.size())
throw std::runtime_error("Population shrinking!");
else if (pSize < _pop.size())
throw std::runtime_error("Population growing!");
replace(_pop, offspring); // after replace, the new pop. is in _pop
if (pSize > _pop.size())
throw std::runtime_error("Population shrinking!");
else if (pSize < _pop.size())
throw std::runtime_error("Population growing!");
}
catch (std::exception& e)
catch (std::exception& e)
{
std::string s = e.what();
s.append( " in eoEasyEA");
throw std::runtime_error( s );
std::string s = e.what();
s.append( " in eoEasyEA");
throw std::runtime_error( s );
}
}
while ( continuator( _pop ) );
while ( continuator( _pop ) );
}
protected :

View file

@ -30,6 +30,16 @@
#include <eoEvalFunc.h>
#include <apply.h>
# ifdef WITH_MPI
#include <mpi/eoMpi.h>
#include <mpi/eoTerminateJob.h>
#include <mpi/eoMpiAssignmentAlgorithm.h>
#include <mpi/eoParallelApply.h>
#include <utils/eoParallel.h>
#include <cmath> // ceil
# endif // WITH_MPI
/** eoPopEvalFunc: This abstract class is for GLOBAL evaluators
* of a population after variation.
* It takes 2 populations (typically the parents and the offspring)
@ -77,6 +87,142 @@ private:
eoEvalFunc<EOT> & eval;
};
#ifdef WITH_MPI
/**
* @brief Evaluator of a population of EOT which uses parallelization to evaluate individuals.
*
* This class implements an instance of eoPopEvalFunc that applies a private eoEvalFunc to
* all offspring, but in a parallel way. The original process becomes the central host from a network ("master"), and
* other machines disponible in the MPI network ("slaves") are used as evaluators. Population to evaluate is splitted in
* little packets of individuals, which are sent to the evaluators, that process the effective call to eval. Once all
* the individuals have been evaluated, they are returned to the master. The whole process is entirely invisible to the
* eyes of the user, who just has to launch a certain number of processes in MPI so as to have a result.
*
* The eoEvalFunc is no more directly given, but it is stored in the eo::mpi::ParallelApplyStore, which can be
* instanciated if no one is given at construction.
*
* The use of this class requires the user to have called the eo::mpi::Node::init function, at the beginning of its
* program.
*
* @ingroup Evaluation Parallel
*
* @author Benjamin Bouvier <benjamin.bouvier@gmail.com>
*/
template<class EOT>
class eoParallelPopLoopEval : public eoPopEvalFunc<EOT>
{
public:
/**
* @brief Constructor which creates the job store for the user.
*
* This constructor is the simplest to use, as it creates the store used by the parallel job, for the user.
* The user just precises the scheduling algorithm, the rank of the master and then gives its eval function and
* the size of a packet (how many individuals should be in a single message to evaluator).
*
* @param _assignAlgo The scheduling algorithm used to give orders to evaluators.
* @param _masterRank The MPI rank of the master.
* @param _eval The evaluation functor used to evaluate each individual in the population.
* @param _packetSize The number of individuals to send in one message to evaluator, and which are evaluated at
* a time.
*/
eoParallelPopLoopEval(
// Job parameters
eo::mpi::AssignmentAlgorithm& _assignAlgo,
int _masterRank,
// Default parameters for store
eoEvalFunc<EOT> & _eval,
int _packetSize = 1
) :
assignAlgo( _assignAlgo ),
masterRank( _masterRank ),
needToDeleteStore( true ) // we used new, we'll have to use delete (RAII)
{
store = new eo::mpi::ParallelApplyStore<EOT>( _eval, _masterRank, _packetSize );
}
/**
* @brief Constructor which allows the user to customize its job store.
*
* This constructor allows the user to customize the store, for instance by adding wrappers and other
* functionnalities, before using it for the parallelized evaluation.
*
* @param _assignAlgo The scheduling algorithm used to give orders to evaluators.
* @param _masterRank The MPI rank of the master.
* @param _store Pointer to a parallel eval store given by the user.
*/
eoParallelPopLoopEval(
// Job parameters
eo::mpi::AssignmentAlgorithm& _assignAlgo,
int _masterRank,
eo::mpi::ParallelApplyStore<EOT>* _store
) :
assignAlgo( _assignAlgo ),
masterRank( _masterRank ),
store( _store ),
needToDeleteStore( false ) // we haven't used new for creating store, we don't care if we have to delete it (RAII).
{
// empty
}
/**
* @brief Default destructor. Sends a message to all evaluators indicating that the global loop (eoEasyEA, for
* instance) is over.
*/
~eoParallelPopLoopEval()
{
// Only the master has to send the termination message
if( eo::mpi::Node::comm().rank() == masterRank )
{
eo::mpi::EmptyJob job( assignAlgo, masterRank );
job.run();
}
// RAII
if( needToDeleteStore )
{
delete store;
}
}
/**
* @brief Parallel implementation of the operator().
*
* @param _parents Population of parents (ignored).
* @param _offspring Population of children, which will be evaluated.
*/
void operator()( eoPop<EOT> & _parents, eoPop<EOT> & _offspring )
{
(void)_parents;
// Reinits the store and the scheduling algorithm
store->data( _offspring );
// For static scheduling, it's mandatory to reinit attributions
int nbWorkers = assignAlgo.availableWorkers();
assignAlgo.reinit( nbWorkers );
if( ! eo::parallel.isDynamic() ) {
store->data()->packetSize = ceil( static_cast<double>( _offspring.size() ) / nbWorkers );
}
// Effectively launches the job.
eo::mpi::ParallelApply<EOT> job( assignAlgo, masterRank, *store );
job.run();
}
private:
// Scheduling algorithm
eo::mpi::AssignmentAlgorithm & assignAlgo;
// Master MPI rank
int masterRank;
// Store
eo::mpi::ParallelApplyStore<EOT>* store;
// Do we have to delete the store by ourselves ?
bool needToDeleteStore;
};
/**
* @example t-mpi-eval.cpp
*/
#endif
/////////////////////////////////////////////////////////////
// eoTimeVaryingLoopEval
/////////////////////////////////////////////////////////////

32
eo/src/mpi/CMakeLists.txt Normal file
View file

@ -0,0 +1,32 @@
######################################################################################
### 1) Include the sources
######################################################################################
INCLUDE_DIRECTORIES(${EO_SOURCE_DIR}/src)
INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR})
######################################################################################
### 2) Define the eompi target
######################################################################################
SET(EOMPI_LIB_OUTPUT_PATH ${EO_BINARY_DIR}/lib)
SET(LIBRARY_OUTPUT_PATH ${EOMPI_LIB_OUTPUT_PATH})
SET(EOMPI_SOURCES
eoMpi.cpp
)
ADD_LIBRARY(eompi STATIC ${EOMPI_SOURCES})
INSTALL(TARGETS eompi ARCHIVE DESTINATION lib COMPONENT libraries)
FILE(GLOB HDRS *.h)
INSTALL(FILES ${HDRS} DESTINATION include/eo/mpi COMPONENT headers)
######################################################################################
### 3) Optionnal
######################################################################################
SET(EOMPI_VERSION ${GLOBAL_VERSION})
SET_TARGET_PROPERTIES(eompi PROPERTIES VERSION "${EOMPI_VERSION}")
######################################################################################

11
eo/src/mpi/eoMpi.cpp Normal file
View file

@ -0,0 +1,11 @@
# include "eoMpi.h"
namespace eo
{
namespace mpi
{
bmpi::communicator Node::_comm;
eoTimerStat timerStat;
}
}

836
eo/src/mpi/eoMpi.h Normal file
View file

@ -0,0 +1,836 @@
/*
(c) Thales group, 2012
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2 of the License.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: http://eodev.sourceforge.net
Authors:
Benjamin Bouvier <benjamin.bouvier@gmail.com>
*/
# ifndef __EO_MPI_H__
# define __EO_MPI_H__
# include <vector> // std::vector
# include <utils/eoLogger.h>
# include <utils/eoTimer.h>
# include <eoFunctor.h>
# include <eoExceptions.h>
# include "eoMpiNode.h"
# include "eoMpiAssignmentAlgorithm.h"
namespace eo
{
/**
* @ingroup Parallel
* @defgroup MPI Message Passing Interface
* @brief See namespace eo::mpi to have all explanations about this module.
* @{
*/
/**
* @brief MPI parallelization helpers for EO.
*
* This namespace contains parallelization functions which help to parallelize computations in EO. It is based on a
* generic algorithm, which is then customized with functors, corresponding to the algorithm main steps. These
* computations are centralized, i.e there is one central host whose role is to handle the steps of the algorithm ;
* we call it the "master". The other hosts just have to perform a "dummy" computation, which may be any kind of
* processing ; we call them, the "slaves", or less pejoratively, the "workers". Workers can communicate to each
* other, but they receive their orders from the Master and send him back some results. A worker can also be the
* master of a different parallelization process, as soon as it is a part of its work. Machines of the network, also
* called hosts, are identified by an unique number: their rank. At any time during the execution of the program,
* all the hosts know the total number of hosts.
*
* A parallelized Job is a set of tasks which are independant (i.e can be executed in random order without
* modifiying the result) and take a data input and compute a data output to be sent to the Master. The data can be
* of any type, however they have to be serialized to be sent over a network. It is sufficient that they can be
* serialized through boost.
*
* @todo For serialization purposes, don't depend upon boost. It would be easy to use only eoserial and send strings
* via mpi.
*
* The main steps of the algorithm are the following:
* - For the master:
* - Have we done with the treatment we are doing ?
* - If this is the case, we can quit.
* - Otherwise, send an input data to some available worker.
* - If there's no available worker, wait for a worker to be free.
* - When receiving the response, handle it (eventually compute something on the output data, store it...).
* - Go back to the first step.
* - For the worker, it is even easier:
* - Wait for an order.
* - If there's nothing to do, just quit.
* - Otherwise, eventually retrieve data and do the work.
* - Go back to the first step.
*
* There is of course some network adjustements to do and precisions to give there, but the main ideas are present. As the
* job is fully centralized, this is the master who tells the workers when to quit and when to work.
*
* The idea behind these MPI helpers is to be the most generic possible. If we look back at the steps of the
* algorithm, we found that the steps can be splitted into 2 parts: the first consists in the steps of any
* parallelization algorithm and the other consists in the specific parts of the algorithm. Ideally, the user should
* just have to implement the specific parts of the algorithm. We identified these parts to be:
* - For the master:
* - What does mean to have terminated ? There are only two alternatives, in our binary world: either it is
* terminated, or it is not. Hence we only need a function returning a boolean to know if we're done with the
* computation : we'll call it IsFinished.
* - What do we have to do when we send a task ? We don't have any a priori on the form of the sent data, or
* the number of sent data. Moreover, as the tasks are all independant, we don't care of who will do the
* computation, as soon as it's done. Knowing the rank of the worker will be sufficient to send him data. We
* have identified another function, taking a single argument which is the rank of the worker: we'll call it
* SendTask.
* - What do we have to do when we receive a response from a worker ? One more time, we don't know which form
* or structure can have the receive data, only the user can know. Also we let the user the charge to retrieve
* the data ; he just has to know from who the master will retrieve the data. Here is another function, taking
* a rank (the sender's one) as a function argument : this will be HandleResponse.
* - For the worker:
* - What is the processing ? It can have any nature. We just need to be sure that a data is sent back to the
* master, but it seems difficult to check that: it will be the role of the user to assert that data is sent by
* the worker at the end of an execution. We've got identified our last function: ProcessTask.
*
* In term of implementation, it would be annoying to have only abstract classes with these 4 methods to implement. It
* would mean that if you want to alter just one of these 4 functions, you have to implement a new sub class, with a
* new constructor which could have the same signature. Besides, this fashion doesn't allow you to add dynamic
* functionalities, using the design pattern Decorator for instance, without implement a class for each type of
* decoration you want to add. For these reasons, we decided to transform function into functors ; the user can then
* wrap the existing, basic comportments into more sophisticated computations, whenever he wants, and without the
* notion of order. We retrieve here the power of extension given by the design pattern Decorator.
*
* Our 4 functors could have a big amount of data in common (see eoParallelApply to have an idea).
* So as to make it easy for the user to implement these 4 functors, we consider that these functors
* have to share a common data structure. This data structure is referenced (as a pointer) in the 4 functors, so the
* user doesn't need to pass a lot of parameters to each functor constructor.
*
* There are two kinds of jobs:
* - The job which are launched a fixed and well known amount of times, i.e both master and workers know how many
* times they will be launched. They are "one shot jobs".
* - The job which are launched an unknown amount of times, for instance embedded in a while loop for which we don't
* know the amount of repetitions (typically, eoEasyEA loop is a good example, as we don't know the continuator
* condition). They are called "multi job".
* As the master tells the workers to quit, we have to differentiate these two kinds of jobs. When the job is of the
* kind "multi job", the workers would have to perform a while(true) loop so as to receive the orders ; but even if
* the master tells them to quit, they would begin another job and wait for another order, while the master would
* have quit: this would cause a deadlock and workers processes would be blocked, waiting for an order.
*/
namespace mpi
{
/**
* @brief A timer which allows user to generate statistics about computation times.
*/
extern eoTimerStat timerStat;
/**
* @brief Tags used in MPI messages for framework communication
*
* These tags are used for framework communication and fits "channels", so as to differentiate when we're
* sending an order to a worker (Commands) or data (Messages). They are not reserved by the framework and can be
* used by the user, but he is not bound to.
*
* @ingroup MPI
*/
namespace Channel
{
const int Commands = 0;
const int Messages = 1;
}
/**
* @brief Simple orders used by the framework.
*
* These orders are sent by the master to the workers, to indicate to them if they should receive another task
* to do (Continue), if an one shot job is done (Finish) or if a multi job is done (Kill).
*
* @ingroup MPI
*/
namespace Message
{
const int Continue = 0;
const int Finish = 1;
const int Kill = 2;
}
/**
* @brief If the job only has one master, the user can use this constant, so as not to worry with integer ids.
*
* @ingroup MPI
*/
const int DEFAULT_MASTER = 0;
/**
* @brief Base class for the 4 algorithm functors.
*
* This class can embed a data (JobData) and a wrapper, so as to make all the 4 functors wrappable.
* We can add a wrapper at initialization or at any time when executing the program.
*
* According to RAII, the boolean needDelete helps to know if we have to use the operator delete on the wrapper
* or not. Hence, if any functor is wrapped, user has just to put this boolean to true, to indicate to wrapper
* that it should call delete. This allows to mix wrapper initialized in the heap (with new) or in the stack.
*
* @param JobData a Data type, which can have any form. It can a struct, a single int, anything.
*
* @param Wrapped the type of the functor, which will be stored as a pointer under the name _wrapped.
* This allows to wrap directly the functor in functors of the same type
* here, instead of dealing with SharedDataFunction* that we would have to cast all the time.
* Doing also allows to handle the wrapped functor as the functor we're writing, when coding the wrappers,
* instead of doing some static_cast. For instance, if there are 2 functors subclasses, fA and fB, fA
* implementing doFa() and fB implementing doFb(), we could have the following code:
* @code
* struct fA_wrapper
* {
* // some code
* void doFa()
* {
* _wrapped->doFa();
* std::cout << "I'm a fA wrapper!" << std::endl;
* // if we didn't have the second template parameter, but a SharedDataFunction, we would have to do this:
* static_cast<fA*>(_wrapped)->doFa();
* // do other things (it's a wrapper)
* }
* };
*
* struct fB_wrapper
* {
* // some code
* void doFb()
* {
* _wrapped->doFb(); // and not: static_cast<fB*>(_wrapped)->doFb();
* }
* };
* @endcode
* This makes the code easier to write for the user.
*
* @ingroup MPI
*/
template< typename JobData, typename Wrapped >
struct SharedDataFunction
{
/**
* @brief Default constructor.
*
* The user is not bound to give a wrapped functor.
*/
SharedDataFunction( Wrapped * w = 0 ) : _data( 0 ), _wrapped( w ), _needDelete( false )
{
// empty
}
/**
* @brief Destructor.
*
* Calls delete on the wrapped function, only if necessary.
*/
virtual ~SharedDataFunction()
{
if( _wrapped && _wrapped->needDelete() )
{
delete _wrapped;
}
}
/**
* @brief Setter for the wrapped function.
*
* It doesn't do anything on the current wrapped function, like deleting it.
*/
void wrapped( Wrapped * w )
{
_wrapped = w;
}
/**
* @brief Setter for the data present in the functor.
*
* Calls the setter on the functor and on the wrapped functors, in a Composite pattern fashion.
*/
void data( JobData* d )
{
_data = d;
if( _wrapped )
{
_wrapped->data( d );
}
}
/**
* @brief Returns true if we need to use operator delete on this wrapper, false otherwise.
*
* Allows the user to reject delete responsability to the framework, by setting this value to true.
**/
bool needDelete() { return _needDelete; }
void needDelete( bool b ) { _needDelete = b; }
protected:
JobData* _data;
Wrapped* _wrapped; // Pointer and not a reference so as to be set at any time and to avoid affectation
bool _needDelete;
};
/**
* @brief Functor (master side) used to send a task to the worker.
*
* The user doesn't have to know which worker will receive a task, so we just indicate to master the rank of the
* worker. The data used for computation have to be explicitly sent by the master to the worker, with indicated
* rank. Once this functor has been called, the worker is considered busy until it sends a return message to the
* master.
*
* This is a functor implementing void operator()(int), and also a shared data function, containing wrapper on its
* own type.
*
* @ingroup MPI
*/
template< typename JobData >
struct SendTaskFunction : public eoUF<int, void>, public SharedDataFunction< JobData, SendTaskFunction<JobData> >
{
public:
SendTaskFunction( SendTaskFunction<JobData>* w = 0 ) : SharedDataFunction<JobData, SendTaskFunction<JobData> >( w )
{
// empty
}
virtual ~SendTaskFunction() {} // for inherited classes
};
/**
* @brief Functor (master side) used to indicate what to do when receiving a response.
*
* The master calls this function as soon as it receives some data, in some channel. Thanks to MPI, we retrieve
* the rank of the data's sender. This functor is then called with this rank. There is no memoization of a link
* between sent data and rank, so the user has to implement it, if he needs it.
*
* This is a functor implementing void operator()(int), and also a shared data function, containing wrapper on
* its own type.
*
* @ingroup MPI
*/
template< typename JobData >
struct HandleResponseFunction : public eoUF<int, void>, public SharedDataFunction< JobData, HandleResponseFunction<JobData> >
{
public:
HandleResponseFunction( HandleResponseFunction<JobData>* w = 0 ) : SharedDataFunction<JobData, HandleResponseFunction<JobData> >( w )
{
// empty
}
virtual ~HandleResponseFunction() {} // for inherited classes
};
/**
* @brief Functor (worker side) implementing the processing to do.
*
* This is where the real computation happen.
* Whenever the master sends the command "Continue" to workers, which indicates the worker will receive a task,
* the worker calls this functor. The user has to explicitly retrieve the data, handle it and transmit it,
* processed, back to the master. If the worker does not send any data back to the master, the latter will
* consider the worker isn't done and a deadlock could occur.
*
* This is a functor implementing void operator()(), and also a shared data function, containing wrapper on its
* own type.
*
* @ingroup MPI
*/
template< typename JobData >
struct ProcessTaskFunction : public eoF<void>, public SharedDataFunction< JobData, ProcessTaskFunction<JobData> >
{
public:
ProcessTaskFunction( ProcessTaskFunction<JobData>* w = 0 ) : SharedDataFunction<JobData, ProcessTaskFunction<JobData> >( w )
{
// empty
}
virtual ~ProcessTaskFunction() {} // for inherited classes
};
/**
* @brief Functor (master side) indicating whether the job is done or not.
*
* The master loops on this functor to know when to stop. When this functor returns true, the master will wait
* for the last responses and properly stops the job. Whenever this functor returns false, the master will send
* tasks, until this functor returns true.
*
* This is a functor implementing bool operator()(), and also a shared function, containing wrapper on its own
* type.
*
* @ingroup MPI
*/
template< typename JobData >
struct IsFinishedFunction : public eoF<bool>, public SharedDataFunction< JobData, IsFinishedFunction<JobData> >
{
public:
IsFinishedFunction( IsFinishedFunction<JobData>* w = 0 ) : SharedDataFunction<JobData, IsFinishedFunction<JobData> >( w )
{
// empty
}
virtual ~IsFinishedFunction() {} // for inherited classes
};
/**
* @brief Contains all the required data and the functors to launch a job.
*
* Splitting the functors and data from the job in itself allows to use the same functors and data for multiples
* instances of the same job. You define your store once and can use it a lot of times during your program. If
* the store was included in the job, you'd have to give again all the functors and all the datas to each
* invokation of the job.
*
* Job store contains the 4 functors (pointers, so as to be able to wrap them ; references couldn't have
* permitted that) described above and the JobData used by all these functors. It contains
* also helpers to easily wrap the functors, getters and setters on all of them.
*
* The user has to implement data(), which is the getter for retrieving JobData. We don't have any idea of who
* owns the data, moreover it is impossible to initialize it in this generic JobStore, as we don't know its
* form. As a matter of fact, the user has to define this in the JobStore subclasses.
*
* @ingroup MPI
*/
template< typename JobData >
struct JobStore
{
/**
* @brief Default ctor with the 4 functors.
*/
JobStore(
SendTaskFunction<JobData>* stf,
HandleResponseFunction<JobData>* hrf,
ProcessTaskFunction<JobData>* ptf,
IsFinishedFunction<JobData>* iff
) :
_stf( stf ), _hrf( hrf ), _ptf( ptf ), _iff( iff )
{
// empty
}
/**
* @brief Empty ctor, useful for not forcing users to call the other constructor.
*
* When using this constructor, the user have to care about the 4 functors pointers, otherwise null pointer
* segfaults have to be expected.
*/
JobStore()
{
// empty
}
/**
* @brief Default destructor.
*
* JobStore is the highest layer which calls needDelete on its functors.
*/
~JobStore()
{
if( _stf->needDelete() ) delete _stf;
if( _hrf->needDelete() ) delete _hrf;
if( _ptf->needDelete() ) delete _ptf;
if( _iff->needDelete() ) delete _iff;
}
// Getters
SendTaskFunction<JobData> & sendTask() { return *_stf; }
HandleResponseFunction<JobData> & handleResponse() { return *_hrf; }
ProcessTaskFunction<JobData> & processTask() { return *_ptf; }
IsFinishedFunction<JobData> & isFinished() { return *_iff; }
// Setters
void sendTask( SendTaskFunction<JobData>* stf ) { _stf = stf; }
void handleResponse( HandleResponseFunction<JobData>* hrf ) { _hrf = hrf; }
void processTask( ProcessTaskFunction<JobData>* ptf ) { _ptf = ptf; }
void isFinished( IsFinishedFunction<JobData>* iff ) { _iff = iff; }
/**
* @brief Helpers for wrapping send task functor.
*/
void wrapSendTask( SendTaskFunction<JobData>* stf )
{
if( stf )
{
stf->wrapped( _stf );
_stf = stf;
}
}
/**
* @brief Helpers for wrapping handle response functor.
*/
void wrapHandleResponse( HandleResponseFunction<JobData>* hrf )
{
if( hrf )
{
hrf->wrapped( _hrf );
_hrf = hrf;
}
}
/**
* @brief Helpers for wrapping process task functor.
*/
void wrapProcessTask( ProcessTaskFunction<JobData>* ptf )
{
if( ptf )
{
ptf->wrapped( _ptf );
_ptf = ptf;
}
}
/**
* @brief Helpers for wrapping is finished functor.
*/
void wrapIsFinished( IsFinishedFunction<JobData>* iff )
{
if( iff )
{
iff->wrapped( _iff );
_iff = iff;
}
}
virtual JobData* data() = 0;
protected:
SendTaskFunction< JobData >* _stf;
HandleResponseFunction< JobData >* _hrf;
ProcessTaskFunction< JobData >* _ptf;
IsFinishedFunction< JobData >* _iff;
};
/**
* @example t-mpi-wrapper.cpp
*/
/**
* @brief Class implementing the centralized job algorithm.
*
* This class handles all the job algorithm. With its store and its assignment (scheduling) algorithm, it
* executes the general algorithm described above, adding some networking, so as to make the global process
* work. It initializes all the functors with the data, then launches the main loop, indicating to workers when
* they will have to work and when they will finish, by sending them a termination message (integer that can be
* customized). As the algorithm is centralized, it is also mandatory to indicate what is the MPI rank of the
* master process, hence the workers will know from who they should receive their commands.
*
* Any of the 3 master functors can launch exception, it will be catched and rethrown as a std::runtime_exception
* to the higher layers.
*
* @ingroup MPI
*/
template< class JobData >
class Job
{
public:
/**
* @brief Main constructor for Job.
*
* @param _algo The used assignment (scheduling) algorithm. It has to be initialized, with its maximum
* possible number of workers (some workers referenced in this algorithm shouldn't be busy). See
* AssignmentAlgorithm for more details.
*
* @param _masterRank The MPI rank of the master.
*
* @param _workerStopCondition Number of the message which will cause the workers to terminate. It could
* be one of the constants defined in eo::mpi::Commands, or any other integer. The user has to be sure
* that a message containing this integer will be sent to each worker on the Commands channel, otherwise
* deadlock will happen. Master sends Finish messages at the end of a simple job, but as a job can
* happen multiples times (multi job), workers don't have to really finish on these messages but on
* another message. This is here where you can configurate it. See also OneShotJob and MultiJob.
*
* @param store The JobStore containing functors and data for this job.
*/
Job( AssignmentAlgorithm& _algo,
int _masterRank,
int _workerStopCondition,
JobStore<JobData> & _store
) :
assignmentAlgo( _algo ),
masterRank( _masterRank ),
workerStopCondition( _workerStopCondition ),
comm( Node::comm() ),
// Functors
store( _store ),
sendTask( _store.sendTask() ),
handleResponse( _store.handleResponse() ),
processTask( _store.processTask() ),
isFinished( _store.isFinished() )
{
_isMaster = Node::comm().rank() == _masterRank;
sendTask.data( _store.data() );
handleResponse.data( _store.data() );
processTask.data( _store.data() );
isFinished.data( _store.data() );
}
protected:
/**
* @brief Finally block of the main algorithm
*
* Herb Sutter's trick for having a finally block, in a try/catch section: invoke a class at the
* beginning of the try, its destructor will be called in every cases.
*
* This implements the end of the master algorithm:
* - sends to all available workers that they are free,
* - waits for last responses, handles them and sends termination messages to last workers.
*/
struct FinallyBlock
{
FinallyBlock(
int _totalWorkers,
AssignmentAlgorithm& _algo,
Job< JobData > & _that
) :
totalWorkers( _totalWorkers ),
assignmentAlgo( _algo ),
that( _that ),
// global field
comm( Node::comm() )
{
// empty
}
~FinallyBlock()
{
# ifndef NDEBUG
eo::log << eo::debug;
eo::log << "[M" << comm.rank() << "] Frees all the idle." << std::endl;
# endif
// frees all the idle workers
timerStat.start("master_wait_for_idles");
std::vector<int> idles = assignmentAlgo.idles();
for(unsigned int i = 0; i < idles.size(); ++i)
{
comm.send( idles[i], Channel::Commands, Message::Finish );
}
timerStat.stop("master_wait_for_idles");
# ifndef NDEBUG
eo::log << "[M" << comm.rank() << "] Waits for all responses." << std::endl;
# endif
// wait for all responses
timerStat.start("master_wait_for_all_responses");
while( assignmentAlgo.availableWorkers() != totalWorkers )
{
bmpi::status status = comm.probe( bmpi::any_source, bmpi::any_tag );
int wrkRank = status.source();
that.handleResponse( wrkRank );
comm.send( wrkRank, Channel::Commands, Message::Finish );
assignmentAlgo.confirm( wrkRank );
}
timerStat.stop("master_wait_for_all_responses");
# ifndef NDEBUG
eo::log << "[M" << comm.rank() << "] Leaving master task." << std::endl;
# endif
}
protected:
int totalWorkers;
AssignmentAlgorithm& assignmentAlgo;
Job< JobData > & that;
bmpi::communicator & comm;
};
/**
* @brief Master part of the job.
*
* Launches the parallelized job algorithm : while there is something to do (! IsFinished ), get a
* worker who will be the assignee ; if no worker is available, wait for a response, handle it and reask
* for an assignee. Then send the command and the task.
* Once there is no more to do (IsFinished), indicate to all available workers that they're free, wait
* for all the responses and send termination messages (see also FinallyBlock).
*/
void master( )
{
int totalWorkers = assignmentAlgo.availableWorkers();
# ifndef NDEBUG
eo::log << eo::debug;
eo::log << "[M" << comm.rank() << "] Have " << totalWorkers << " workers." << std::endl;
# endif
try {
FinallyBlock finally( totalWorkers, assignmentAlgo, *this );
while( ! isFinished() )
{
timerStat.start("master_wait_for_assignee");
int assignee = assignmentAlgo.get( );
while( assignee <= 0 )
{
# ifndef NDEBUG
eo::log << "[M" << comm.rank() << "] Waitin' for node..." << std::endl;
# endif
bmpi::status status = comm.probe( bmpi::any_source, bmpi::any_tag );
int wrkRank = status.source();
# ifndef NDEBUG
eo::log << "[M" << comm.rank() << "] Node " << wrkRank << " just terminated." << std::endl;
# endif
handleResponse( wrkRank );
assignmentAlgo.confirm( wrkRank );
assignee = assignmentAlgo.get( );
}
timerStat.stop("master_wait_for_assignee");
# ifndef NDEBUG
eo::log << "[M" << comm.rank() << "] Assignee : " << assignee << std::endl;
# endif
timerStat.start("master_wait_for_send");
comm.send( assignee, Channel::Commands, Message::Continue );
sendTask( assignee );
timerStat.stop("master_wait_for_send");
}
} catch( const std::exception & e )
{
std::string s = e.what();
s.append( " in eoMpi loop");
throw std::runtime_error( s );
}
}
/**
* @brief Worker part of the algorithm.
*
* The algorithm is more much simpler: wait for an order; if it's termination message, leave. Otherwise,
* prepare to work.
*/
void worker( )
{
int order;
# ifndef NDEBUG
eo::log << eo::debug;
# endif
timerStat.start("worker_wait_for_order");
comm.recv( masterRank, Channel::Commands, order );
timerStat.stop("worker_wait_for_order");
while( true )
{
# ifndef NDEBUG
eo::log << "[W" << comm.rank() << "] Waiting for an order..." << std::endl;
# endif
if ( order == workerStopCondition )
{
# ifndef NDEBUG
eo::log << "[W" << comm.rank() << "] Leaving worker task." << std::endl;
# endif
return;
} else if( order == Message::Continue )
{
# ifndef NDEBUG
eo::log << "[W" << comm.rank() << "] Processing task..." << std::endl;
# endif
processTask( );
}
timerStat.start("worker_wait_for_order");
comm.recv( masterRank, Channel::Commands, order );
timerStat.stop("worker_wait_for_order");
}
}
public:
/**
* @brief Launches the job algorithm, according to the role of the host (roles are deduced from the
* master rank indicated in the constructor).
*/
void run( )
{
( _isMaster ) ? master( ) : worker( );
}
/**
* @brief Returns true if the current host is the master, false otherwise.
*/
bool isMaster( )
{
return _isMaster;
}
protected:
AssignmentAlgorithm& assignmentAlgo;
int masterRank;
const int workerStopCondition;
bmpi::communicator& comm;
JobStore<JobData>& store;
SendTaskFunction<JobData> & sendTask;
HandleResponseFunction<JobData> & handleResponse;
ProcessTaskFunction<JobData> & processTask;
IsFinishedFunction<JobData> & isFinished;
bool _isMaster;
};
/**
* @brief Job that will be launched only once.
*
* As explained in eo::mpi documentation, jobs can happen either a well known amount of times or an unknown
* amount of times. This class implements the general case when the job is launched a well known amount of
* times. The job will be terminated on both sides (master and worker) once the master would have said it.
*
* It uses the message Message::Finish as the termination message.
*
* @ingroup MPI
*/
template< class JobData >
class OneShotJob : public Job< JobData >
{
public:
OneShotJob( AssignmentAlgorithm& algo,
int masterRank,
JobStore<JobData> & store )
: Job<JobData>( algo, masterRank, Message::Finish, store )
{
// empty
}
};
/**
* @brief Job that will be launched an unknown amount of times, in worker side.
*
* As explained in eo::mpi documentation, jobs can happen either a well known amount of times or an unknown
* amount of times. This class implements the general case when the job is launched an unknown amount of times, for
* instance in a while loop. The master will run many jobs (or the same job many times), but the workers will
* launch it only once.
*
* It uses the message Message::Kill as the termination message. This message can be launched with an EmptyJob,
* launched only by the master. If no Message::Kill is sent on the Channels::Commands, the worker will wait
* forever, which will cause a deadlock.
*
* @ingroup MPI
*/
template< class JobData >
class MultiJob : public Job< JobData >
{
public:
MultiJob ( AssignmentAlgorithm& algo,
int masterRank,
JobStore<JobData> & store )
: Job<JobData>( algo, masterRank, Message::Kill, store )
{
// empty
}
};
}
/**
* @}
*/
}
# endif // __EO_MPI_H__

View file

@ -0,0 +1,387 @@
/*
(c) Thales group, 2012
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2 of the License.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: http://eodev.sourceforge.net
Authors:
Benjamin Bouvier <benjamin.bouvier@gmail.com>
*/
# ifndef __MPI_ASSIGNMENT_ALGORITHM_H__
# define __MPI_ASSIGNMENT_ALGORITHM_H__
# include <vector> // std::vector
# include "eoMpiNode.h"
namespace eo
{
namespace mpi
{
/**
* @brief Constant indicating to use all the resting available workers, in assignment algorithms constructor
* using an interval.
*
* @ingroup MPI
*/
const int REST_OF_THE_WORLD = -1;
/**
* @brief Contains informations on the available workers and allows to find assignees for jobs.
*
* Available workers are workers who aren't processing anything. When they've received an order, workers switch
* from the state "available" to the state "busy", and the master has to wait for their response for considering
* them available again.
*
* @ingroup MPI
*/
struct AssignmentAlgorithm
{
/**
* @brief Gets the rank of an available worker, so as to send it a task.
*
* @return The MPI rank of an available worker, or -1 if there is no available worker.
*/
virtual int get( ) = 0;
/**
* @brief Gets the number of total available workers.
*
* Before the first call, it is equal to the total number of present workers, as specified in the
* specific assignment algorithm constructor. It allows the Job class to know when all the responses have
* been received, by comparing this number to the total number of workers.
*
* @return Integer indicating how many workers are available.
*/
virtual int availableWorkers( ) = 0;
/**
* @brief Reinject the worker of indicated rank in the available state.
*
* @param wrkRank The MPI rank of the worker who has finished its job.
*/
virtual void confirm( int wrkRank ) = 0;
/**
* @brief Indicates who are the workers which do nothing.
*
* At the end of the algorithm, the master has to warn all the workers that it's done. All the workers mean,
* the workers which are currently processing data, and the other ones who could be waiting : the idles.
* This function indicates to the master which worker aren't doing anything.
*
* @return A std::vector containing all the MPI ranks of the idles workers.
*/
virtual std::vector<int> idles( ) = 0;
/**
* @brief Reinitializes the assignment algorithm with the right number of runs.
*
* In fact, this is only useful for static assignment algorithm, which has to be reinitialized every time
* it's used, in the case of a Multi Job. It's the user's responsability to call this function.
*
* @todo Not really clean. Find a better way to do it.
*/
virtual void reinit( int runs ) = 0;
};
/**
* @brief Assignment (scheduling) algorithm which handles workers in a queue.
*
* With this assignment algorithm, workers are put in a queue and may be called an unlimited number of times.
* Whenever a worker returns, it is added to the queue, and it becomes available for the next call to get().
* The available workers are all located in the queue at any time, so the number of available workers is
* directly equal to the size of the queue.
*
* This kind of assignment is adapted for tasks whose execution time is stochastic or unknown, but without any
* warranty to be faster than other assignments.
*
* @ingroup MPI
*/
struct DynamicAssignmentAlgorithm : public AssignmentAlgorithm
{
public:
/**
* @brief Uses all the hosts whose rank is higher to 1, inclusive, as workers.
*/
DynamicAssignmentAlgorithm( )
{
for(int i = 1; i < Node::comm().size(); ++i)
{
availableWrk.push_back( i );
}
}
/**
* @brief Uses the unique host with given rank as a worker.
*
* @param unique MPI rank of the unique worker.
*/
DynamicAssignmentAlgorithm( int unique )
{
availableWrk.push_back( unique );
}
/**
* @brief Uses the workers whose ranks are present in the argument as workers.
*
* @param workers std::vector containing MPI ranks of workers.
*/
DynamicAssignmentAlgorithm( const std::vector<int> & workers )
{
availableWrk = workers;
}
/**
* @brief Uses a range of ranks as workers.
*
* @param first The first worker to be included (inclusive)
* @param last The last worker to be included (inclusive). If last == eo::mpi::REST_OF_THE_WORLD, all
* hosts whose rank is higher than first are taken.
*/
DynamicAssignmentAlgorithm( int first, int last )
{
if( last == REST_OF_THE_WORLD )
{
last = Node::comm().size() - 1;
}
for( int i = first; i <= last; ++i)
{
availableWrk.push_back( i );
}
}
virtual int get( )
{
int assignee = -1;
if (! availableWrk.empty() )
{
assignee = availableWrk.back();
availableWrk.pop_back();
}
return assignee;
}
int availableWorkers()
{
return availableWrk.size();
}
void confirm( int rank )
{
availableWrk.push_back( rank );
}
std::vector<int> idles( )
{
return availableWrk;
}
void reinit( int _ )
{
++_;
// nothing to do
}
protected:
std::vector< int > availableWrk;
};
/**
* @brief Assignment algorithm which gives to each worker a precise number of tasks to do, in a round robin
* fashion.
*
* This scheduling algorithm attributes, at initialization or when calling reinit(), a fixed amount of runs to
* distribute to the workers. The amount of runs is then equally distributed between all workers ; if total
* number of runs is not a direct multiple of workers number, then remainding unaffected runs are distributed to
* workers from the first to the last, in a round-robin fashion.
*
* This scheduling should be used when the amount of runs can be computed or is fixed and when we guess that the
* duration of processing task will be the same for each run. There is no warranty that this algorithm is more
* or less efficient that another one. When having a doubt, use DynamicAssignmentAlgorithm.
*
* @ingroup MPI
*/
struct StaticAssignmentAlgorithm : public AssignmentAlgorithm
{
public:
/**
* @brief Uses a given precise set of workers.
*
* @param workers std::vector of MPI ranks of workers which will be used.
* @param runs Fixed amount of runs, strictly positive.
*/
StaticAssignmentAlgorithm( std::vector<int>& workers, int runs )
{
init( workers, runs );
}
/**
* @brief Uses a range of workers.
*
* @param first The first MPI rank of worker to use
* @param last The last MPI rank of worker to use. If it's equal to REST_OF_THE_WORLD, then all the
* workers from the first one are taken as workers.
* @param runs Fixed amount of runs, strictly positive.
*/
StaticAssignmentAlgorithm( int first, int last, int runs )
{
std::vector<int> workers;
if( last == REST_OF_THE_WORLD )
{
last = Node::comm().size() - 1;
}
for(int i = first; i <= last; ++i)
{
workers.push_back( i );
}
init( workers, runs );
}
/**
* @brief Uses all the hosts whose rank is higher than 1 (inclusive) as workers.
*
* @param runs Fixed amount of runs, strictly positive. If it's not set, you'll have to call reinit()
* later.
*/
StaticAssignmentAlgorithm( int runs = 0 )
{
std::vector<int> workers;
for(int i = 1; i < Node::comm().size(); ++i)
{
workers.push_back( i );
}
init( workers, runs );
}
/**
* @brief Uses an unique host as worker.
*
* @param unique The MPI rank of the host which will be the worker.
* @param runs Fixed amount of runs, strictly positive.
*/
StaticAssignmentAlgorithm( int unique, int runs )
{
std::vector<int> workers;
workers.push_back( unique );
init( workers, runs );
}
private:
/**
* @brief Initializes the static scheduling.
*
* Gives to each worker an equal attribution number, equal to runs / workers.size(), eventually plus one
* if number of workers is not a divisor of runs.
*
* @param workers Vector of hosts' ranks
* @param runs Fixed amount of runs, strictly positive.
*/
void init( std::vector<int> & workers, int runs )
{
unsigned int nbWorkers = workers.size();
freeWorkers = nbWorkers;
busy.clear();
busy.resize( nbWorkers, false );
realRank = workers;
if( runs <= 0 )
{
return;
}
attributions.clear();
attributions.reserve( nbWorkers );
// Let be the euclidean division of runs by nbWorkers :
// runs == q * nbWorkers + r, 0 <= r < nbWorkers
// This one liner affects q requests to each worker
for (unsigned int i = 0; i < nbWorkers; attributions[i++] = runs / nbWorkers) ;
// The first line computes r and the one liner affects the remaining
// r requests to workers, in ascending order
unsigned int diff = runs - (runs / nbWorkers) * nbWorkers;
for (unsigned int i = 0; i < diff; ++attributions[i++]);
}
public:
int get( )
{
int assignee = -1;
for( unsigned i = 0; i < busy.size(); ++i )
{
if( !busy[i] && attributions[i] > 0 )
{
busy[i] = true;
--freeWorkers;
assignee = realRank[ i ];
break;
}
}
return assignee;
}
int availableWorkers( )
{
return freeWorkers;
}
std::vector<int> idles()
{
std::vector<int> ret;
for(unsigned int i = 0; i < busy.size(); ++i)
{
if( !busy[i] )
{
ret.push_back( realRank[i] );
}
}
return ret;
}
void confirm( int rank )
{
int i = -1; // i is the real index in table
for( unsigned int j = 0; j < realRank.size(); ++j )
{
if( realRank[j] == rank )
{
i = j;
break;
}
}
--attributions[ i ];
busy[ i ] = false;
++freeWorkers;
}
void reinit( int runs )
{
init( realRank, runs );
}
private:
std::vector<int> attributions;
std::vector<int> realRank;
std::vector<bool> busy;
unsigned int freeWorkers;
};
}
}
# endif // __MPI_ASSIGNMENT_ALGORITHM_H__

71
eo/src/mpi/eoMpiNode.h Normal file
View file

@ -0,0 +1,71 @@
/*
(c) Thales group, 2012
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2 of the License.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: http://eodev.sourceforge.net
Authors:
Benjamin Bouvier <benjamin.bouvier@gmail.com>
*/
# ifndef __MPI_NODE_H__
# define __MPI_NODE_H__
# include <boost/mpi.hpp>
namespace bmpi = boost::mpi;
namespace eo
{
namespace mpi
{
/**
* @brief Global object used to reach boost::mpi::communicator everywhere.
*
* boost::mpi::communicator is the main object used to send and receive messages between the different hosts of
* a MPI algorithm.
*
* @ingroup MPI
*/
class Node
{
public:
/**
* @brief Initializes the MPI environment with argc and argv.
*
* Should be called at the beginning of every parallel program.
*
* @param argc Main's argc
* @param argv Main's argv
*/
static void init( int argc, char** argv )
{
static bmpi::environment env( argc, argv );
}
/**
* @brief Returns the global boost::mpi::communicator
*/
static bmpi::communicator& comm()
{
return _comm;
}
protected:
static bmpi::communicator _comm;
};
}
}
# endif // __MPI_NODE_H__

View file

@ -0,0 +1,387 @@
/*
(c) Thales group, 2012
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2 of the License.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: http://eodev.sourceforge.net
Authors:
Benjamin Bouvier <benjamin.bouvier@gmail.com>
*/
# ifndef __EO_PARALLEL_APPLY_H__
# define __EO_PARALLEL_APPLY_H__
# include "eoMpi.h"
# include <eoFunctor.h> // eoUF
# include <vector> // std::vector population
/**
* @file eoParallelApply.h
*
* @brief Applies a functor with single parameter to elements of a table, in a parallel fashion.
*
* This file contains all the required classes to do a parallel apply of a table, in a parallel fashion. This can be
* very useful when applying the function can be made without any dependances within the data. In EO, it occurs in
* particular during the evaluation: the number of individuals to evaluate can be really high, and the evaluation of one
* individual is independant from the evaluation of other individuals.
*
* Elements in the table are directly replaced, as the table is given by reference. No new table is made during the
* process.
*
* User can tune this job, indicating how many elements of the table should be sent and evaluated by a worker, at a
* time: this is called the "packet size", as individuals are groupped into a packet of individuals which are sent to
* the worker before evaluation. The problem of choosing the optimal packet size is beyond the purposes of this documentation
* and deserves a theoritical study.
*
* This job is the parallel equivalent to the function apply<EOT>, defined in apply.h. It just applies the function to
* every element of a table. In Python or Javascript, it's the equivalent of the function Map.
*/
namespace eo
{
namespace mpi
{
/**
* @brief Structure used to save assignment to a worker, i.e which slice of the table it has to process.
*
* This slice is defined by the index of the first evaluated argument and the number of processed elements.
*/
struct ParallelApplyAssignment
{
int index;
int size;
};
/**
* @brief Data useful for a parallel apply (map).
*
* A parallel apply needs at least the functor to apply to every element of the table, and the table itself,
* whereas it can be set later with the function init(). Master rank is also needed, to send it informations and
* receive informations from it, inside the functors (the job knows these values, but the functors don't). The
* size of a packet can be tuned here.
*
* Internal attributes contain:
* - (useful for master) the index of the next element to be evaluated.
* - (useful for master) a map containing links between MPI ranks and slices of the table which the worker with
* this rank has evaluated. Without this map, when receiving results from a worker, the master couldn't be
* able to replace the right elements in the table.
*
* @ingroup MPI
*/
template<class EOT>
struct ParallelApplyData
{
/**
* @brief Ctor for Parallel apply (map) data.
*
* @param _proc The functor to apply on each element in the table
* @param _masterRank The MPI rank of the master
* @param _packetSize The number of elements on which the function will be applied by the worker, at a time.
* @param table The table to apply. If this value is NULL, user will have to call init() before launching the
* job.
*/
ParallelApplyData(
eoUF<EOT&, void> & _proc,
int _masterRank,
int _packetSize,
std::vector<EOT> * table = 0
) :
_table( table ), func( _proc ), index( 0 ), packetSize( _packetSize ), masterRank( _masterRank ), comm( Node::comm() )
{
if ( _packetSize <= 0 )
{
throw std::runtime_error("Packet size should not be negative.");
}
if( table )
{
size = table->size();
}
}
/**
* @brief Reinitializes the data for a new table to evaluate.
*/
void init( std::vector<EOT>& table )
{
index = 0;
size = table.size();
_table = &table;
assignedTasks.clear();
}
std::vector<EOT>& table()
{
return *_table;
}
// All elements are public since functors will often use them.
std::vector<EOT> * _table;
eoUF<EOT&, void> & func;
int index;
int size;
std::map< int /* worker rank */, ParallelApplyAssignment /* last assignment */> assignedTasks;
int packetSize;
std::vector<EOT> tempArray;
int masterRank;
bmpi::communicator& comm;
};
/**
* @brief Send task functor implementation for the parallel apply (map) job.
*
* Master side: Sends a slice of the table to evaluate to the worker.
*
* Implementation details:
* Finds the next slice of data to send to the worker, sends first the size and then the data, and memorizes
* that this slice has been distributed to the worker, then updates the next position of element to evaluate.
*/
template< class EOT >
class SendTaskParallelApply : public SendTaskFunction< ParallelApplyData<EOT> >
{
public:
using SendTaskFunction< ParallelApplyData<EOT> >::_data;
SendTaskParallelApply( SendTaskParallelApply<EOT> * w = 0 ) : SendTaskFunction< ParallelApplyData<EOT> >( w )
{
// empty
}
void operator()(int wrkRank)
{
int futureIndex;
if( _data->index + _data->packetSize < _data->size )
{
futureIndex = _data->index + _data->packetSize;
} else {
futureIndex = _data->size;
}
int sentSize = futureIndex - _data->index ;
_data->comm.send( wrkRank, 1, sentSize );
eo::log << eo::progress << "Evaluating individual " << _data->index << std::endl;
_data->assignedTasks[ wrkRank ].index = _data->index;
_data->assignedTasks[ wrkRank ].size = sentSize;
_data->comm.send( wrkRank, 1, & ( (_data->table())[ _data->index ] ) , sentSize );
_data->index = futureIndex;
}
};
/**
* @brief Handle response functor implementation for the parallel apply (map) job.
*
* Master side: Replaces the slice of data attributed to the worker in the table.
*/
template< class EOT >
class HandleResponseParallelApply : public HandleResponseFunction< ParallelApplyData<EOT> >
{
public:
using HandleResponseFunction< ParallelApplyData<EOT> >::_data;
HandleResponseParallelApply( HandleResponseParallelApply<EOT> * w = 0 ) : HandleResponseFunction< ParallelApplyData<EOT> >( w )
{
// empty
}
void operator()(int wrkRank)
{
_data->comm.recv( wrkRank, 1, & (_data->table()[ _data->assignedTasks[wrkRank].index ] ), _data->assignedTasks[wrkRank].size );
}
};
/**
* @brief Process task functor implementation for the parallel apply (map) job.
*
* Worker side: apply the function to the given slice of data.
*
* Implementation details: retrieves the number of elements to evaluate, retrieves them, applies the function
* and then returns the results.
*/
template< class EOT >
class ProcessTaskParallelApply : public ProcessTaskFunction< ParallelApplyData<EOT> >
{
public:
using ProcessTaskFunction< ParallelApplyData<EOT> >::_data;
ProcessTaskParallelApply( ProcessTaskParallelApply<EOT> * w = 0 ) : ProcessTaskFunction< ParallelApplyData<EOT> >( w )
{
// empty
}
void operator()()
{
int recvSize;
_data->comm.recv( _data->masterRank, 1, recvSize );
_data->tempArray.resize( recvSize );
_data->comm.recv( _data->masterRank, 1, & _data->tempArray[0] , recvSize );
timerStat.start("worker_processes");
for( int i = 0; i < recvSize ; ++i )
{
_data->func( _data->tempArray[ i ] );
}
timerStat.stop("worker_processes");
_data->comm.send( _data->masterRank, 1, & _data->tempArray[0], recvSize );
}
};
/**
* @brief Is finished functor implementation for the parallel apply (map) job.
*
* Master side: returns true if and only if the whole table has been evaluated. The job is also terminated only
* when the whole table has been evaluated.
*/
template< class EOT >
class IsFinishedParallelApply : public IsFinishedFunction< ParallelApplyData<EOT> >
{
public:
using IsFinishedFunction< ParallelApplyData<EOT> >::_data;
IsFinishedParallelApply( IsFinishedParallelApply<EOT> * w = 0 ) : IsFinishedFunction< ParallelApplyData<EOT> >( w )
{
// empty
}
bool operator()()
{
return _data->index == _data->size;
}
};
/**
* @brief Store containing all the datas and the functors for the parallel apply (map) job.
*
* User can tune functors when constructing the object. For each functor which is not given, a default one is
* generated.
*
* @ingroup MPI
*/
template< class EOT >
struct ParallelApplyStore : public JobStore< ParallelApplyData<EOT> >
{
using JobStore< ParallelApplyData<EOT> >::_stf;
using JobStore< ParallelApplyData<EOT> >::_hrf;
using JobStore< ParallelApplyData<EOT> >::_ptf;
using JobStore< ParallelApplyData<EOT> >::_iff;
/**
* @brief Main constructor for the parallel apply (map) job.
*
* @param _proc The procedure to apply to each element of the table.
* @param _masterRank The rank of the master process.
* @param _packetSize The number of elements of the table to be evaluated at a time, by the worker.
* @param stpa Pointer to Send Task parallel apply functor descendant. If null, a default one is used.
* @param hrpa Pointer to Handle Response parallel apply functor descendant. If null, a default one is used.
* @param ptpa Pointer to Process Task parallel apply functor descendant. If null, a default one is used.
* @param ifpa Pointer to Is Finished parallel apply functor descendant. If null, a default one is used.
*/
ParallelApplyStore(
eoUF<EOT&, void> & _proc,
int _masterRank,
int _packetSize = 1,
// JobStore functors
SendTaskParallelApply<EOT> * stpa = 0,
HandleResponseParallelApply<EOT>* hrpa = 0,
ProcessTaskParallelApply<EOT>* ptpa = 0,
IsFinishedParallelApply<EOT>* ifpa = 0
) :
_data( _proc, _masterRank, _packetSize )
{
if( stpa == 0 ) {
stpa = new SendTaskParallelApply<EOT>;
stpa->needDelete( true );
}
if( hrpa == 0 ) {
hrpa = new HandleResponseParallelApply<EOT>;
hrpa->needDelete( true );
}
if( ptpa == 0 ) {
ptpa = new ProcessTaskParallelApply<EOT>;
ptpa->needDelete( true );
}
if( ifpa == 0 ) {
ifpa = new IsFinishedParallelApply<EOT>;
ifpa->needDelete( true );
}
_stf = stpa;
_hrf = hrpa;
_ptf = ptpa;
_iff = ifpa;
}
ParallelApplyData<EOT>* data() { return &_data; }
/**
* @brief Reinits the store with a new table to evaluate.
*
* @param _pop The table of elements to be evaluated.
*/
void data( std::vector<EOT>& _pop )
{
_data.init( _pop );
}
virtual ~ParallelApplyStore() // for inheritance purposes only
{
}
protected:
ParallelApplyData<EOT> _data;
};
/**
* @brief Parallel apply job. Present for convenience only.
*
* A typedef wouldn't have been working, as typedef on templates don't work in C++. Traits would be a
* disgraceful overload for the user.
*
* @ingroup MPI
* @see eoParallelApply.h
*/
template< typename EOT >
class ParallelApply : public MultiJob< ParallelApplyData<EOT> >
{
public:
ParallelApply(
AssignmentAlgorithm & algo,
int _masterRank,
ParallelApplyStore<EOT> & store
) :
MultiJob< ParallelApplyData<EOT> >( algo, _masterRank, store )
{
// empty
}
};
/**
* @example t-mpi-parallelApply.cpp
* @example t-mpi-multipleRoles.cpp
*/
}
}
# endif // __EO_PARALLEL_APPLY_H__

140
eo/src/mpi/eoTerminateJob.h Normal file
View file

@ -0,0 +1,140 @@
/*
(c) Thales group, 2012
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2 of the License.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: http://eodev.sourceforge.net
Authors:
Benjamin Bouvier <benjamin.bouvier@gmail.com>
*/
# ifndef __EO_TERMINATE_H__
# define __EO_TERMINATE_H__
# include "eoMpi.h"
namespace eo
{
namespace mpi
{
/**
* @ingroup MPI
* @{
*/
/**
* @brief Send task functor which does nothing.
*/
struct DummySendTaskFunction : public SendTaskFunction<void>
{
void operator()( int _ )
{
++_;
}
};
/**
* @brief Handle response functor which does nothing.
*/
struct DummyHandleResponseFunction : public HandleResponseFunction<void>
{
void operator()( int _ )
{
++_;
}
};
/**
* @brief Process task functor which does nothing.
*/
struct DummyProcessTaskFunction : public ProcessTaskFunction<void>
{
void operator()()
{
// nothing!
}
};
/**
* @brief Is finished functor which returns true everytime.
*/
struct DummyIsFinishedFunction : public IsFinishedFunction<void>
{
bool operator()()
{
return true;
}
};
/**
* @brief Job store containing all dummy functors and containing no data.
*/
struct DummyJobStore : public JobStore<void>
{
using JobStore<void>::_stf;
using JobStore<void>::_hrf;
using JobStore<void>::_ptf;
using JobStore<void>::_iff;
DummyJobStore()
{
_stf = new DummySendTaskFunction;
_stf->needDelete( true );
_hrf = new DummyHandleResponseFunction;
_hrf->needDelete( true );
_ptf = new DummyProcessTaskFunction;
_ptf->needDelete( true );
_iff = new DummyIsFinishedFunction;
_iff->needDelete( true );
}
void* data() { return 0; }
};
/**
* @brief Job to run after a Multi Job, so as to indicate that every workers should terminate.
*/
struct EmptyJob : public OneShotJob<void>
{
/**
* @brief Main EmptyJob ctor
*
* @param algo Assignment (scheduling) algorithm used.
* @param masterRank The rank of the master process.
*/
EmptyJob( AssignmentAlgorithm& algo, int masterRank ) :
OneShotJob<void>( algo, masterRank, *(new DummyJobStore) )
// the job store is deleted on destructor
{
// empty
}
~EmptyJob()
{
std::vector< int > idles = assignmentAlgo.idles();
for(unsigned i = 0, size = idles.size(); i < size; ++i)
{
comm.send( idles[i], Channel::Commands, Message::Kill );
}
delete & this->store;
}
};
/**
* @}
*/
}
}
# endif // __EO_TERMINATE_H__

58
eo/src/serial/Array.cpp Normal file
View file

@ -0,0 +1,58 @@
/*
(c) Thales group, 2012
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2 of the License.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: http://eodev.sourceforge.net
Authors:
Benjamin Bouvier <benjamin.bouvier@gmail.com>
*/
# include "Array.h"
namespace eoserial
{
std::ostream& Array::print( std::ostream& out ) const
{
out << "[";
bool first = true;
for (ArrayChildren::const_iterator it = begin(),
end = this->end();
it != end;
++it)
{
if ( first )
{
first = false;
} else {
out << ", ";
}
(*it)->print( out );
}
out << "]\n";
return out;
}
Array::~Array()
{
for (ArrayChildren::iterator it = begin(),
end = this->end();
it != end;
++it)
{
delete *it;
}
}
} // namespace eoserial

170
eo/src/serial/Array.h Normal file
View file

@ -0,0 +1,170 @@
/*
(c) Thales group, 2012
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2 of the License.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: http://eodev.sourceforge.net
Authors:
Benjamin Bouvier <benjamin.bouvier@gmail.com>
*/
# ifndef __EOSERIAL_ARRAY_H__
# define __EOSERIAL_ARRAY_H__
# include <vector>
# include "Entity.h"
# include "Serializable.h"
# include "Object.h"
namespace eoserial
{
// Forward declaration for below declarations.
class Array;
/*
* Declarations of functions present in Utils.h
* These are put here to avoid instead of including the file Utils.h, which would
* cause a circular inclusion.
*/
template< class T >
void unpack( const Array & array, unsigned int index, T & value );
void unpackObject( const Array & array, unsigned int index, Persistent & value );
template< class Container, template<class> class UnpackAlgorithm >
void unpackArray( const Array & array, unsigned int index, Container & container );
/**
* @brief Represents a JSON array.
*
* Wrapper for an array, so as to be used as a JSON object.
*
* @ingroup Serialization
*/
class Array : public eoserial::Entity, public std::vector< eoserial::Entity* >
{
protected:
typedef std::vector< eoserial::Entity* > ArrayChildren;
public:
/**
* @brief Adds the serializable object as a JSON object.
* @param obj Object which implemnets JsonSerializable.
*/
void push_back( const eoserial::Printable* obj )
{
ArrayChildren::push_back( obj->pack() );
}
/**
* @brief Proxy for vector::push_back.
*/
void push_back( eoserial::Entity* json )
{
ArrayChildren::push_back( json );
}
/**
* @brief Prints the JSON array into the given stream.
* @param out The stream
*/
virtual std::ostream& print( std::ostream& out ) const;
/**
* @brief Dtor
*/
~Array();
/*
* The following parts allows the user to automatically deserialize an eoserial::Array into a
* standard container, by giving the algorithm which will be used to deserialize contained entities.
*/
/**
* @brief Functor which determines how to retrieve the real value contained in a eoserial::Entity at
* a given place.
*
* It will be applied for each contained variable in the array.
*/
template<class Container>
struct BaseAlgorithm
{
/**
* @brief Main operator.
*
* @param array The eoserial::Array from which we're reading.
* @param i The index of the contained value.
* @param container The standard (STL) container in which we'll push back the read value.
*/
virtual void operator()( const eoserial::Array& array, unsigned int i, Container & container ) const = 0;
};
/**
* @brief BaseAlgorithm for retrieving primitive variables.
*
* This one should be used to retrieve primitive (and types which implement operator>>) variables, for instance
* int, double, std::string, etc...
*/
template<typename C>
struct UnpackAlgorithm : public BaseAlgorithm<C>
{
void operator()( const eoserial::Array& array, unsigned int i, C & container ) const
{
typename C::value_type t;
unpack( array, i, t );
container.push_back( t );
}
};
/**
* @brief BaseAlgorithm for retrieving eoserial::Persistent objects.
*
* This one should be used to retrieve objects which implement eoserial::Persistent.
*/
template<typename C>
struct UnpackObjectAlgorithm : public BaseAlgorithm<C>
{
void operator()( const eoserial::Array& array, unsigned int i, C & container ) const
{
typename C::value_type t;
unpackObject( array, i, t );
container.push_back( t );
}
};
/**
* @brief General algorithm for array deserialization.
*
* Applies the BaseAlgorithm to each contained variable in the eoserial::Array.
*/
template<class Container, template<class T> class UnpackAlgorithm>
inline void deserialize( Container & array )
{
UnpackAlgorithm< Container > algo;
for( unsigned int i = 0, size = this->size();
i < size;
++i)
{
algo( *this, i, array );
}
}
};
} // namespace eoserial
# endif // __EOSERIAL_ARRAY_H__

View file

@ -0,0 +1,35 @@
######################################################################################
### 1) Include the sources
######################################################################################
INCLUDE_DIRECTORIES(${EO_SOURCE_DIR}/src)
INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR})
######################################################################################
### 2) Define the eoserial target
######################################################################################
SET(EOSERIAL_LIB_OUTPUT_PATH ${EO_BINARY_DIR}/lib)
SET(LIBRARY_OUTPUT_PATH ${EOSERIAL_LIB_OUTPUT_PATH})
SET(EOSERIAL_SOURCES
Array.cpp
Object.cpp
Parser.cpp
String.cpp
)
ADD_LIBRARY(eoserial STATIC ${EOSERIAL_SOURCES})
INSTALL(TARGETS eoserial ARCHIVE DESTINATION lib COMPONENT libraries)
FILE(GLOB HDRS *.h)
INSTALL(FILES ${HDRS} DESTINATION include/eo/serial COMPONENT headers)
######################################################################################
### 3) Optionnal
######################################################################################
SET(EOSERIAL_VERSION ${GLOBAL_VERSION})
SET_TARGET_PROPERTIES(eoserial PROPERTIES VERSION "${EOSERIAL_VERSION}")
######################################################################################

70
eo/src/serial/Entity.h Normal file
View file

@ -0,0 +1,70 @@
/*
(c) Thales group, 2012
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2 of the License.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: http://eodev.sourceforge.net
Authors:
Benjamin Bouvier <benjamin.bouvier@gmail.com>
*/
# ifndef __EOSERIAL_ENTITY_H__
# define __EOSERIAL_ENTITY_H__
# include <iostream> // ostream
/**
* @brief Contains all the necessary entities to serialize eo objects into JSON objects.
*
* Allows serialization from user objects into JSON objects, if they implement the interface
* eoserial::Serializable or eoserial::Persistent. The following user objects can be serialized:
* - primitive types (int, std::string, ...), in particular every type that can be written into a
* std::stringstream.
* - objects which implement eoserial::Serializable.
* - array of serializable things (primitive or serializable objects).
*
* @ingroup Utilities
* @defgroup Serialization Serialization helpers
**/
namespace eoserial
{
/**
* @brief JSON entity
*
* This class represents a JSON entity, which can be JSON objects,
* strings or arrays. It is the base class for the JSON hierarchy.
*
* @ingroup Serialization
*/
class Entity
{
public:
/**
* Virtual dtor (base class).
*/
virtual ~Entity() { /* empty */ }
/**
* @brief Prints the content of a JSON object into a stream.
* @param out The stream in which we're printing.
*/
virtual std::ostream& print( std::ostream& out ) const = 0;
};
} // namespace eoserial
# endif // __ENTITY_H__

60
eo/src/serial/Object.cpp Normal file
View file

@ -0,0 +1,60 @@
/*
(c) Thales group, 2012
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2 of the License.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: http://eodev.sourceforge.net
Authors:
Benjamin Bouvier <benjamin.bouvier@gmail.com>
*/
# include "Object.h"
using namespace eoserial;
namespace eoserial
{
std::ostream& Object::print( std::ostream& out ) const
{
out << '{';
bool first = true;
for(JsonValues::const_iterator it = begin(), end = this->end();
it != end;
++it)
{
if ( first )
{
first = false;
} else {
out << ", ";
}
out << '"' << it->first << "\":"; // key
it->second->print( out ); // value
}
out << "}\n";
return out;
}
Object::~Object()
{
for(JsonValues::iterator it = begin(), end = this->end();
it != end;
++it)
{
delete it->second;
}
}
} // namespace eoserial

88
eo/src/serial/Object.h Normal file
View file

@ -0,0 +1,88 @@
/*
(c) Thales group, 2012
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2 of the License.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: http://eodev.sourceforge.net
Authors:
Benjamin Bouvier <benjamin.bouvier@gmail.com>
*/
# ifndef __EOSERIAL_OBJECT_H__
# define __EOSERIAL_OBJECT_H__
# include <map>
# include <string>
# include "Entity.h"
# include "Serializable.h"
namespace eoserial
{
/**
* @brief JSON Object
*
* This class represents a JSON object, which is basically a dictionnary
* of keys (strings) and values (JSON entities).
*
* @ingroup Serialization
*/
class Object : public eoserial::Entity, public std::map< std::string, eoserial::Entity* >
{
public:
typedef std::map<std::string, eoserial::Entity*> JsonValues;
/**
* @brief Adds a pair into the JSON object.
* @param key The key associated with the eoserial object
* @param json The JSON object as created with framework.
*/
void add( const std::string& key, eoserial::Entity* json )
{
(*this)[ key ] = json;
}
/**
* @brief Adds a pair into the JSON object.
* @param key The key associated with the eoserial object
* @param obj A JSON-serializable object
*/
void add( const std::string& key, const eoserial::Printable* obj )
{
(*this)[ key ] = obj->pack();
}
/**
* @brief Deserializes a Serializable class instance from this JSON object.
* @param obj The object we want to rebuild.
*/
void deserialize( eoserial::Persistent & obj )
{
obj.unpack( this );
}
/**
* @brief Dtor
*/
~Object();
/**
* @brief Prints the content of a JSON object into a stream.
*/
virtual std::ostream& print( std::ostream& out ) const;
};
} // namespace eoserial
# endif // __EOSERIAL_OBJECT_H__

171
eo/src/serial/Parser.cpp Normal file
View file

@ -0,0 +1,171 @@
/*
(c) Thales group, 2012
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2 of the License.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: http://eodev.sourceforge.net
Authors:
Benjamin Bouvier <benjamin.bouvier@gmail.com>
*/
# include <string>
# include "Parser.h"
# include "Array.h"
# include "Object.h"
# include "String.h"
// in debug mode only
// # define DEBUG(x) std::cout << x << std::endl;
# define DEBUG(x)
using namespace eoserial;
namespace eoserial
{
/**
* @brief Parses a string contained between double quotes.
*
* Strings can contain escaped double quotes.
* @param str The string we're parsing.
* @param pos The index of current position in parsed string.
* This index will be updated so as to allow the parser to
* continue.
*/
static std::string parseString(const std::string& str, size_t & pos)
{
// example : "hello"
// example 2 : "\"world\""
// for hello:
// firstQuote == 0, secondQuote == 6
// sub string should be from firstQuote+1 to secondQuote-1
// so its size should be (secondQuote-1 -(firstQuote+1) + 1)
std::string value;
size_t firstQuote = str.find( '"', pos );
size_t secondQuote;
/* instead of just seeking the second quote, we need to ensure
// that there is no escaped quote before this one.
// actually this is harder than that. Using backslashes
// to escape double quotes mean that backslashes have to be
// escaped to.
// example : "text\\" to symbolize : text\
// example : "text\\\" to symbolize : text\"
// In fact, we should find if number of backslashes is odd; in this case,
// the double quotes are escaped and we should find the next one.
*/
int backslashesCount;
do {
++pos;
secondQuote = str.find( '"', pos );
size_t i = secondQuote - 1;
// Find the backslashes
backslashesCount = 0;
while ( str[ i ] == '\\' )
{
--i;
++backslashesCount;
}
pos = secondQuote;
} while( backslashesCount % 2 == 1 );
value = str.substr( firstQuote+1, secondQuote-firstQuote-1 );
pos = secondQuote + 1;
return value;
}
/**
* @brief Moves the given index pos to the next character which is
* neither a coma, a space nor a new line.
*
* @param str The string in which we want to ignores those characters.
* @param pos The index of current position in parsed string.
*/
static void ignoreChars(const std::string& str, size_t & pos)
{
// ignore white spaces and comas
for (char current = str[ pos ];
current == ',' || current == ' ' || current == '\n';
current = str[ ++pos ]);
}
String* Parser::parseJsonString(const std::string & str, size_t & pos)
{
return new String( parseString( str, pos ) );
}
Object* Parser::parse(const std::string & str)
{
size_t initial(0); // we begin at position 0
return static_cast<Object*>( parseRight(str, initial) );
}
Entity* Parser::parseRight(const std::string & str, size_t & pos)
{
Entity* value = 0;
if ( str[ pos ] == '{' )
{
// next one is an object
DEBUG("We read an object.")
Object* obj = new Object;
pos += 1;
while( pos < str.size() && str[ pos ] != '}' )
{
parseLeft( str, pos, obj );
ignoreChars( str, pos );
}
DEBUG("We just finished to read an object ! ")
pos += 1; // we're on the }, go to the next char
value = obj;
}
else if ( str[ pos ] == '"' )
{
// next one is a string
DEBUG("We read a string")
value = parseJsonString( str, pos );
}
else if ( str[ pos ] == '[' )
{
// next one is an array
DEBUG("We read an array")
Array* array = new Array;
pos += 1;
while( pos < str.size() && str[ pos ] != ']' )
{
Entity* child = parseRight( str, pos );
if ( child )
array->push_back( child );
}
DEBUG("We've finished to read our array.")
pos += 1; // we're on the ], go to the next char
value = array;
}
ignoreChars( str, pos );
return value;
}
void Parser::parseLeft(const std::string & str, size_t & pos, Object* eoserial)
{
std::string key = parseString(str, pos);
++pos; // the colon
DEBUG("We've read the key ")
(*eoserial)[ key ] = parseRight( str, pos );
}
} // namespace eoserial

103
eo/src/serial/Parser.h Normal file
View file

@ -0,0 +1,103 @@
/*
(c) Thales group, 2012
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2 of the License.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: http://eodev.sourceforge.net
Authors:
Benjamin Bouvier <benjamin.bouvier@gmail.com>
*/
# ifndef __EOSERIAL_PARSER_H__
# define __EOSERIAL_PARSER_H__
# include "Entity.h"
# include "String.h"
# include "Object.h"
/**
* @file Parser.h
*
* This file contains a tiny JSON parser used in DAE. This parser just handles
* a subset of JSON grammar, with the following restrictions :
* - all strings must be surrounded by double quotes.
* - everything which is not an object or an array is considered to be a string
* (even integers, booleans,...).
* - no syntax check is done. We trust the programmer and he has to ensure that
* every JSON string he produces is valid.
*
* @author Benjamin BOUVIER
*/
namespace eoserial
{
/**
* @brief Parser from a JSON source.
*
* This parser does just retrieve values and does NOT check the structure of
* the input. This implies that if the input is not correct, the result is undefined
* and can result to a failure on execution.
*
* @ingroup Serialization
*/
class Parser
{
public:
/**
* @brief Parses the given string and returns the JSON object read.
*/
static eoserial::Object* parse(const std::string & str);
protected:
/**
* @brief Parses the right part of a JSON object as a string.
*
* The right part of an object can be a string (for instance :
* "key":"value"), a JSON array (for instance: "key":["1"]) or
* another JSON object (for instance: "key":{"another_key":"value"}).
*
* The right parts are found after keys (which are parsed by parseLeft)
* and in arrays.
*
* @param str The string we're parsing.
* @param pos The index of the current position in the string.
* @return The JSON object matching the right part.
*/
static eoserial::Entity* parseRight(const std::string & str, size_t & pos);
/**
* @brief Parses the left value of a key-value pair, which is the key.
*
* @param str The string we're parsing.
* @param pos The index of the current position in the string.
* @param json The current JSON object for which we're adding a key-value pair.
*/
static void parseLeft(const std::string & str, size_t & pos, eoserial::Object* json);
/**
* @brief Retrieves a string in a JSON content.
*
* @param str The string we're parsing.
* @param pos The index of the current position of parsing,
* which will be updated.
*/
static eoserial::String* parseJsonString(const std::string & str, size_t & pos);
};
} // namespace eoserial
# endif // __EOSERIAL_PARSER_H__

View file

@ -0,0 +1,66 @@
/*
(c) Thales group, 2012
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2 of the License.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: http://eodev.sourceforge.net
Authors:
Benjamin Bouvier <benjamin.bouvier@gmail.com>
*/
# ifndef __EOSERIAL_SERIALIZABLE_H__
# define __EOSERIAL_SERIALIZABLE_H__
namespace eoserial
{
class Object; // to avoid recursive inclusion with JsonObject
/**
* @brief Interface showing that object can be written to a eoserial type
* (currently JSON).
*
* @ingroup Serialization
*/
class Printable
{
public:
/**
* @brief Serializes the object to JSON format.
* @return A JSON object created with new.
*/
virtual eoserial::Object* pack() const = 0;
};
/**
* @brief Interface showing that object can be eoserialized (written and read
* from an input).
*
* Note : Persistent objects should have a default non-arguments constructor.
*
* @ingroup Serialization
*/
class Persistent : public Printable
{
public:
/**
* @brief Loads class fields from a JSON object.
* @param json A JSON object. Programmer doesn't have to delete it, it
* is automatically done.
*/
virtual void unpack(const eoserial::Object* json) = 0;
};
} // namespace eoserial
# endif // __EOSERIAL_SERIALIZABLE_H__

32
eo/src/serial/String.cpp Normal file
View file

@ -0,0 +1,32 @@
/*
(c) Thales group, 2012
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2 of the License.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: http://eodev.sourceforge.net
Authors:
Benjamin Bouvier <benjamin.bouvier@gmail.com>
*/
# include "String.h"
namespace eoserial
{
std::ostream& String::print( std::ostream& out ) const
{
out << '"' << *this << '"';
return out;
}
} // namespace eoserial

103
eo/src/serial/String.h Normal file
View file

@ -0,0 +1,103 @@
/*
(c) Thales group, 2012
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2 of the License.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: http://eodev.sourceforge.net
Authors:
Benjamin Bouvier <benjamin.bouvier@gmail.com>
*/
# ifndef __EOSERIAL_STRING_H__
# define __EOSERIAL_STRING_H__
# include <string>
# include <sstream>
# include <limits>
# include "Entity.h"
namespace eoserial
{
/**
* @brief JSON String
*
* Wrapper for string, so as to be used as a JSON object.
*
* @ingroup Serialization
*/
class String : public eoserial::Entity, public std::string
{
public:
/**
* @brief Default ctor.
* @param str The string we want to wrap.
*/
String( const std::string& str ) : std::string( str ) {}
/**
* @brief Ctor used only when parsing.
*/
String( ) {}
/**
* @brief Prints out the string.
*/
virtual std::ostream& print( std::ostream& out ) const;
/**
* @brief Deserializes the current String into a given primitive type value.
* @param value The value in which we're writing.
*/
template<class T>
inline void deserialize( T & value );
protected:
// Copy and reaffectation are forbidden
explicit String( const String& _ );
String& operator=( const String& _ );
};
/**
* @brief Casts a eoserial::String into a primitive value, or in a type which at
* least overload operator>>.
*
* @param value A reference to the variable we're writing into.
*
* It's not necessary to specify the variable type, which can be infered by compiler when
* invoking.
*/
template<class T>
inline void String::deserialize( T & value )
{
std::stringstream ss;
ss.precision(std::numeric_limits<double>::digits10 + 1);
ss << *this;
ss >> value;
}
/**
* @brief Specialization for strings, which don't need to be converted through
* a stringstream.
*/
template<>
inline void String::deserialize( std::string & value )
{
value = *this;
}
} // namespace eoserial
# endif // __EOSERIAL_STRING_H__

189
eo/src/serial/Utils.h Normal file
View file

@ -0,0 +1,189 @@
/*
(c) Thales group, 2012
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2 of the License.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: http://eodev.sourceforge.net
Authors:
Benjamin Bouvier <benjamin.bouvier@gmail.com>
*/
# ifndef __EOSERIAL_UTILS_H__
# define __EOSERIAL_UTILS_H__
# include "Array.h"
# include "Object.h"
# include "String.h"
namespace eoserial
{
/* ***************************
* DESERIALIZATION FUNCTIONS *
*****************************
These functions are useful for casting eoserial::objects into simple, primitive
variables or into class instance which implement eoserial::Persistent.
The model is always quite the same :
- the first argument is the containing object (which is a eoserial::Entity,
an object or an array)
- the second argument is the key or index,
- the last argument is the value in which we're writing.
*/
template< class T >
inline void unpack( const Object & obj, const std::string & key, T & value )
{
static_cast<String*>( obj.find( key )->second )->deserialize( value );
}
inline void unpackObject( const Object & obj, const std::string & key, Persistent & value )
{
static_cast<Object*>( obj.find( key )->second )->deserialize( value );
}
template< class Container, template<class> class UnpackAlgorithm >
inline void unpackArray( const Object & obj, const std::string & key, Container & array )
{
static_cast<Array*>( obj.find( key )->second )->deserialize< Container, UnpackAlgorithm >( array );
}
template< class T >
inline void unpack( const Array & array, unsigned int index, T & value )
{
static_cast<String*>( array[ index ] )->deserialize( value );
}
inline void unpackObject( const Array & array, unsigned int index, Persistent & value )
{
static_cast<Object*>( array[ index ] )->deserialize( value );
}
template< class Container, template<class> class UnpackAlgorithm >
inline void unpackArray( const Array & array, unsigned int index, Container & container )
{
static_cast<Array*>( array[ index ] )->deserialize< Container, UnpackAlgorithm >( container );
}
/* *****************************
*** SERIALIZATION FUNCTIONS ***
*******************************
These functions are useful for casting classic objects and
eoserial::Persistent objects into eoserial entities which
can be manipulated by the framework.
*/
/**
* @brief Casts a value of a stream-serializable type (i.e, which implements
* operator <<) into a JsonString.
*
* This is used when serializing the objects : all primitives types should be
* converted into strings to get more easily manipulated.
*
* @param value The value we're converting.
* @return JsonString wrapper for the value.
*/
template <typename T>
String* make( const T & value )
{
std::stringstream ss;
ss.precision(std::numeric_limits<double>::digits10 + 1);
ss << value;
return new String( ss.str() );
}
/**
* @brief Specialization for strings : no need to convert as they're still
* usable as strings.
*/
template<>
inline String* make( const std::string & value )
{
return new String( value );
}
/*
* These functions are useful for automatically serializing STL containers into
* eoserial arrays which could be used by the framework.
**/
/**
* @brief Functor which explains how to push the value into the eoserial::Array.
*/
template< class T >
struct PushAlgorithm
{
/**
* @brief Main operator.
*
* @param array The eoserial::array in which we're writing.
* @param value The variable we are writing.
*/
virtual void operator()( Array & array, const T & value ) = 0;
};
/**
* @brief Push algorithm for primitive variables.
*
* This one should be used when inserting primitive (and types which implement
* operator<<) variables.
*/
template< class T >
struct MakeAlgorithm : public PushAlgorithm<T>
{
void operator()( Array & array, const T & value )
{
array.push_back( make( value ) );
}
};
/**
* @brief Push algorithm for eoserial::Persistent variables.
*/
template< class T >
struct SerializablePushAlgorithm : public PushAlgorithm<T>
{
void operator()( Array & array, const T & obj )
{
// obj address is not saved into array.push_back.
array.push_back( &obj );
}
};
/**
* @brief Casts a STL container (vector<int> or list<std::string>, for instance)
* into a eoserial::Array.
*
* @þaram PushAlgorithm The algorithm used for inserting new element in the eoserial::Array.
* This algorithm is directly called, so it is its own charge to invoke push_back on the
* eoserial::Array.
*/
template< class Container, template<class> class PushAlgorithm >
Array* makeArray( const Container & array )
{
Array* returned_array = new Array;
typedef typename Container::const_iterator iterator;
typedef typename Container::value_type Type;
PushAlgorithm< Type > algo;
for (
iterator it = array.begin(), end = array.end();
it != end;
++it)
{
algo( *returned_array, *it );
}
return returned_array;
}
} // namespace eoserial
# endif //__EOSERIAL_UTILS_H__

33
eo/src/serial/eoSerial.h Normal file
View file

@ -0,0 +1,33 @@
/*
(c) Thales group, 2012
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2 of the License.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: http://eodev.sourceforge.net
Authors:
Benjamin Bouvier <benjamin.bouvier@gmail.com>
*/
# ifndef __EOSERIAL_HEADERS__
# define __EOSERIAL_HEADERS__
# include "Object.h"
# include "Serializable.h"
# include "Array.h"
# include "Object.h"
# include "String.h"
# include "Parser.h"
# include "Utils.h"
# endif // __EOSERIAL_HEADERS__

View file

@ -0,0 +1,8 @@
{"a":"b",
"obj":
{"obj_a":"obj_}b","subobj_a":
{"subk":"subv"}
},
"c":"d",
"array":["1","2",{"\"array\"_obj\"":"array_ov]"}, ["3"], "4"]
}

View file

@ -21,7 +21,7 @@
Contact: http://eodev.sourceforge.net
Authors:
Caner Candan <caner.candan@thalesgroup.com>
Caner Candan <caner.candan@thalesgroup.com>
*/
@ -34,11 +34,12 @@ Caner Candan <caner.candan@thalesgroup.com>
eoParallel::eoParallel() :
_isEnabled( false, "parallelize-loop", "Enable memory shared parallelization into evaluation's loops", '\0' ),
_isDynamic( false, "parallelize-dynamic", "Enable dynamic memory shared parallelization", '\0' ),
_isDynamic( true, "parallelize-dynamic", "Enable dynamic memory shared parallelization", '\0' ),
_prefix( "results", "parallelize-prefix", "Here's the prefix filename where the results are going to be stored", '\0' ),
_nthreads( 0, "parallelize-nthreads", "Define the number of threads you want to use, nthreads = 0 means you want to use all threads available", '\0' ),
_enableResults( false, "parallelize-enable-results", "Enable the generation of results", '\0' ),
_doMeasure( false, "parallelize-do-measure", "Do some measures during execution", '\0' ),
_packetSize( 1U, "parallelize-packet-size", "Number of elements which should be sent in a single message during a parallel evaluation based on message passing.", '\0'),
_t_start(0)
{
}
@ -92,6 +93,7 @@ void eoParallel::_createParameters( eoParser& parser )
parser.processParam( _nthreads, section );
parser.processParam( _enableResults, section );
parser.processParam( _doMeasure, section );
parser.processParam( _packetSize, section );
}
void make_parallel(eoParser& parser)

View file

@ -20,8 +20,7 @@
Contact: http://eodev.sourceforge.net
Authors:
Caner Candan <caner.candan@thalesgroup.com>
Caner Candan <caner.candan@thalesgroup.com>
*/
/** @defgroup Parallel Parallel
@ -54,6 +53,7 @@ public:
std::string prefix() const;
inline unsigned int nthreads() const { return _nthreads.value(); }
inline unsigned int packetSize() const { return _packetSize.value(); }
inline bool enableResults() const { return _enableResults.value(); }
inline bool doMeasure() const { return _doMeasure.value(); }
@ -68,6 +68,7 @@ private:
eoValueParam<bool> _isDynamic;
eoValueParam<std::string> _prefix;
eoValueParam<unsigned int> _nthreads;
eoValueParam<unsigned int> _packetSize;
eoValueParam<bool> _enableResults;
eoValueParam<bool> _doMeasure;
double _t_start;

309
eo/src/utils/eoTimer.h Normal file
View file

@ -0,0 +1,309 @@
/*
(c) Thales group, 2012
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2 of the License.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Contact: http://eodev.sourceforge.net
Authors:
Benjamin Bouvier <benjamin.bouvier@gmail.com>
*/
# ifndef __EO_TIMER_H__
# define __EO_TIMER_H__
# include <sys/time.h> // time()
# include <sys/resource.h> // rusage()
# include <vector> // std::vector
# include <map> // std::map
# include "utils/eoParallel.h" // eo::parallel
# ifdef WITH_MPI
// For serialization purposes
# include <boost/serialization/access.hpp>
# include <boost/serialization/vector.hpp>
# include <boost/serialization/map.hpp>
# endif
/**
* @brief Timer allowing to measure time between a start point and a stop point.
*
* This timer allows the user to measure user time, system time and wallclock time
* between two points. Basically, user time is time spent in developer code ; system
* time is time spent during the IO wait and system calls ; wallclock is the difference
* of time we could observe by measuring time with a watch.
*
* @ingroup Utilities
*/
class eoTimer
{
public:
/**
* @brief Default ctor. Begins all the timers.
*/
eoTimer()
{
uuremainder = 0;
usremainder = 0;
restart();
}
/**
* @brief Restarts all the timers and launch the measure.
*/
void restart()
{
wc_start = time(NULL);
getrusage( RUSAGE_SELF, &_start );
}
/**
* @brief Measures the user time spent since the last restart().
*
* If the number of elapsed seconds is zero, the spent milliseconds are
* added to a remainder. If the remainder exceeds one second, it is
* added to the number of elapsed seconds.
*
* @return Number of seconds elapsed in user space.
*/
long int usertime()
{
struct rusage _now;
getrusage( RUSAGE_SELF, &_now );
long int result = _now.ru_utime.tv_sec - _start.ru_utime.tv_sec;
long int remainder = _now.ru_utime.tv_usec - _start.ru_utime.tv_usec;
if( remainder >= 0 )
{
uuremainder += remainder;
} else
{
uuremainder += ( 1000000 - remainder );
--result;
}
if( uuremainder >= 1000000 )
{
uuremainder -= 1000000;
++result;
}
return result;
}
/**
* @brief Measures the system time spent since the last restart().
*
* If the number of elapsed seconds is zero, the spent milliseconds are
* added to a remainder. If the remainder exceeds one second, it is
* added to the number of elapsed seconds.
*
* @return Number of seconds elapsed in system (kernel) space.
*/
long int systime()
{
struct rusage _now;
getrusage( RUSAGE_SELF, &_now );
long int result = _now.ru_stime.tv_sec - _start.ru_stime.tv_sec;
long int remainder = _now.ru_stime.tv_usec - _start.ru_stime.tv_usec;
if( remainder >= 0 )
{
usremainder += remainder;
} else
{
usremainder += ( 1000000 - remainder );
--result;
}
if( usremainder >= 1000000 )
{
usremainder -= 1000000;
++result;
}
return result;
}
/**
* @brief Measures the wallclock time spent since the last restart().
*
* @return Number of seconds elapsed, as a double.
*/
double wallclock()
{
return std::difftime( std::time(NULL) , wc_start );
}
protected:
// Structure used to measure user and system time.
struct rusage _start;
// Remainder (in milliseconds) for user time.
long int uuremainder;
// Remainder (in milliseconds) for system time.
long int usremainder;
// Structure used to measure wallclock time.
time_t wc_start;
};
/**
* @brief Registers a group of statistics, each statistic corresponding to user, system and wallclock times distribution.
*
* This class helps the user to measure time in different parts of an application. A name is associated to a statistic,
* on each call to start() and stop() for this name, a new number is added to the statistic, for each of the three
* measured times.
*
* The statistics are only registered if the option "--parallelized-do-measure" is set to true, which can be checked
* thanks to global object eo::parallel.
*
* This shows how the eoTimerStat can be used :
* @code
* eoTimerStat timerStat;
* timerStat.start("first_point");
* for( int i = 0; i < 1000; ++i )
* {
* timerStat.start("single_computation");
* single_computation( i );
* timerStat.stop("single_computation");
* }
* // After this loop, timerStat contains a statistic of key "single_computation" which contains 1000 measures for
* // each type of time.
* timerStat.stop("first_point");
* // After this line, timerStat contains another statistic of key "first_point" which counted the duration of the
* // whole loop.
*
* int singleComputationUsertimeMean = 0;
* for( int i = 0; i < 1000; ++i )
* {
* singleComputationUsertimeMean += timerStat.stats()["single_computation"].utime[i];
* }
* std::cout << "Mean of user time spent in single computation: " << singleComputationUsertimeMean / 1000. << std::endl;
* @endcode
*
* When using MPI, these statistics can be readily be serialized, so as to be sent over a network, for instance.
*
* Implementation details: this eoTimerStat is in fact a map of strings (key) / Stat (value). Stat is an internal
* structure directly defined in the class, which contains three vectors modeling the distributions of the different
* types of elapsed times. Another map of strings (key) / eoTimer (value) allows to effectively retrieve the different
* times. The struct Stat will be exposed to client, which will use its members ; however,
* the client doesn't have anything to do directly with the timer, that's why the two maps are splitted.
*
* @ingroup Utilities
*/
class eoTimerStat
{
public:
/**
* @brief Statistic related to a key (name).
*
* This structure is the value in the map saved in the eoTimerStat. It contains the statistic bound to a key,
* which are the user time distribution, the system time distribution and the wallclock time distribution, as
* std::vector s.
*
* It can readily be serialized with boost when compiling with mpi.
*/
struct Stat
{
std::vector<long int> utime;
std::vector<long int> stime;
std::vector<double> wtime;
#ifdef WITH_MPI
// Gives access to boost serialization
friend class boost::serialization::access;
/**
* Serializes the single statistic in a boost archive (useful for boost::mpi).
* Just serializes the 3 vectors.
*/
template <class Archive>
void serialize( Archive & ar, const unsigned int version )
{
ar & utime & stime & wtime;
(void) version; // avoid compilation warning
}
# endif
};
#ifdef WITH_MPI
// Gives access to boost serialization
friend class boost::serialization::access;
/**
* Serializes the timerStat object in a boost archive (useful for boost::mpi).
* Just serializes the map.
*/
template <class Archive>
void serialize( Archive & ar, const unsigned int version )
{
ar & _stats;
(void) version; // avoid compilation warning
}
# endif
/**
* @brief Starts a new measure for the given key.
*
* This is only performed if parallel.doMeasure() is true, which is equivalent to the fact that
* parser found "--parallel-do-measure=1" in command line args.
*
* @param key The key of the statistic.
*/
void start( const std::string & key )
{
if( eo::parallel.doMeasure() )
{
_timers[ key ].restart();
}
}
/**
* @brief Stops the mesure for the given key and saves the elapsed times.
*
* Must follow a call of start with the same key.
*
* This is only performed if parallel.doMeasure() is true, which is equivalent to the fact that
* parser found "--parallel-do-measure=1" in command line args.
*
* @param key The key of the statistic.
*/
void stop( const std::string& key )
{
if( eo::parallel.doMeasure() )
{
Stat & s = _stats[ key ];
eoTimer & t = _timers[ key ];
s.utime.push_back( t.usertime() );
s.stime.push_back( t.systime() );
s.wtime.push_back( t.wallclock() );
}
}
/**
* @brief Getter for the statistics map.
*/
std::map< std::string, Stat >& stats()
{
return _stats;
}
protected:
// Statistics map: links a key (string) to a statistic.
std::map< std::string, Stat > _stats;
// Timers map: links a key to its timer.
std::map< std::string, eoTimer > _timers;
};
# endif // __TIMER_H__