From eebeaa810ed86b79c0fb2d3517e22c40a1d91804 Mon Sep 17 00:00:00 2001 From: Benjamin Bouvier Date: Wed, 18 Jul 2012 17:37:50 +0200 Subject: [PATCH] Updating names: SharedDataFunction::d => SharedDataFunction::_data, ParallelApplyData::_data => ParallelApplyData::_table --- eo/src/mpi/eoMpi.h | 12 ++++--- eo/src/mpi/eoParallelApply.h | 64 +++++++++++++++++------------------ eo/test/mpi/t-mpi-eval.cpp | 7 ++-- eo/test/mpi/t-mpi-wrapper.cpp | 2 +- 4 files changed, 44 insertions(+), 41 deletions(-) diff --git a/eo/src/mpi/eoMpi.h b/eo/src/mpi/eoMpi.h index abb5fa6f5..ee92e0878 100644 --- a/eo/src/mpi/eoMpi.h +++ b/eo/src/mpi/eoMpi.h @@ -222,7 +222,7 @@ namespace eo * * The user is not bound to give a wrapped functor. */ - SharedDataFunction( Wrapped * w = 0 ) : _wrapped( w ), _needDelete( false ) + SharedDataFunction( Wrapped * w = 0 ) : _data( 0 ), _wrapped( w ), _needDelete( false ) { // empty } @@ -255,23 +255,25 @@ namespace eo * * Calls the setter on the functor and on the wrapped functors, in a Composite pattern fashion. */ - void data( JobData* _d ) + void data( JobData* d ) { - d = _d; + _data = d; if( _wrapped ) { - _wrapped->data( _d ); + _wrapped->data( d ); } } /** * @brief Returns true if we need to use operator delete on this wrapper, false otherwise. + * + * Allows the user to reject delete responsability to the framework, by setting this value to true. **/ bool needDelete() { return _needDelete; } void needDelete( bool b ) { _needDelete = b; } protected: - JobData* d; + JobData* _data; Wrapped* _wrapped; // Pointer and not a reference so as to be set at any time and to avoid affectation bool _needDelete; }; diff --git a/eo/src/mpi/eoParallelApply.h b/eo/src/mpi/eoParallelApply.h index 3cfd729c8..6b58aaf1b 100644 --- a/eo/src/mpi/eoParallelApply.h +++ b/eo/src/mpi/eoParallelApply.h @@ -89,46 +89,46 @@ namespace eo * @param _proc The functor to apply on each element in the table * @param _masterRank The MPI rank of the master * @param _packetSize The number of elements on which the function will be applied by the worker, at a time. - * @param _pop The table to apply. If this value is NULL, user will have to call init() before launching the + * @param table The table to apply. If this value is NULL, user will have to call init() before launching the * job. */ ParallelApplyData( eoUF & _proc, int _masterRank, int _packetSize, - std::vector * _pop = 0 + std::vector * table = 0 ) : - _data( _pop ), func( _proc ), index( 0 ), packetSize( _packetSize ), masterRank( _masterRank ), comm( Node::comm() ) + _table( table ), func( _proc ), index( 0 ), packetSize( _packetSize ), masterRank( _masterRank ), comm( Node::comm() ) { if ( _packetSize <= 0 ) { throw std::runtime_error("Packet size should not be negative."); } - if( _pop ) + if( table ) { - size = _pop->size(); + size = table->size(); } } /** * @brief Reinitializes the data for a new table to evaluate. */ - void init( std::vector& _pop ) + void init( std::vector& table ) { index = 0; - size = _pop.size(); - _data = &_pop; + size = table.size(); + _table = &table; assignedTasks.clear(); } - std::vector& data() + std::vector& table() { - return *_data; + return *_table; } // All elements are public since functors will often use them. - std::vector * _data; + std::vector * _table; eoUF & func; int index; int size; @@ -153,7 +153,7 @@ namespace eo class SendTaskParallelApply : public SendTaskFunction< ParallelApplyData > { public: - using SendTaskFunction< ParallelApplyData >::d; + using SendTaskFunction< ParallelApplyData >::_data; SendTaskParallelApply( SendTaskParallelApply * w = 0 ) : SendTaskFunction< ParallelApplyData >( w ) { @@ -164,24 +164,24 @@ namespace eo { int futureIndex; - if( d->index + d->packetSize < d->size ) + if( _data->index + _data->packetSize < _data->size ) { - futureIndex = d->index + d->packetSize; + futureIndex = _data->index + _data->packetSize; } else { - futureIndex = d->size; + futureIndex = _data->size; } - int sentSize = futureIndex - d->index ; + int sentSize = futureIndex - _data->index ; - d->comm.send( wrkRank, 1, sentSize ); + _data->comm.send( wrkRank, 1, sentSize ); - eo::log << eo::progress << "Evaluating individual " << d->index << std::endl; + eo::log << eo::progress << "Evaluating individual " << _data->index << std::endl; - d->assignedTasks[ wrkRank ].index = d->index; - d->assignedTasks[ wrkRank ].size = sentSize; + _data->assignedTasks[ wrkRank ].index = _data->index; + _data->assignedTasks[ wrkRank ].size = sentSize; - d->comm.send( wrkRank, 1, & ( (d->data())[ d->index ] ) , sentSize ); - d->index = futureIndex; + _data->comm.send( wrkRank, 1, & ( (_data->table())[ _data->index ] ) , sentSize ); + _data->index = futureIndex; } }; @@ -194,7 +194,7 @@ namespace eo class HandleResponseParallelApply : public HandleResponseFunction< ParallelApplyData > { public: - using HandleResponseFunction< ParallelApplyData >::d; + using HandleResponseFunction< ParallelApplyData >::_data; HandleResponseParallelApply( HandleResponseParallelApply * w = 0 ) : HandleResponseFunction< ParallelApplyData >( w ) { @@ -203,7 +203,7 @@ namespace eo void operator()(int wrkRank) { - d->comm.recv( wrkRank, 1, & (d->data()[ d->assignedTasks[wrkRank].index ] ), d->assignedTasks[wrkRank].size ); + _data->comm.recv( wrkRank, 1, & (_data->table()[ _data->assignedTasks[wrkRank].index ] ), _data->assignedTasks[wrkRank].size ); } }; @@ -219,7 +219,7 @@ namespace eo class ProcessTaskParallelApply : public ProcessTaskFunction< ParallelApplyData > { public: - using ProcessTaskFunction< ParallelApplyData >::d; + using ProcessTaskFunction< ParallelApplyData >::_data; ProcessTaskParallelApply( ProcessTaskParallelApply * w = 0 ) : ProcessTaskFunction< ParallelApplyData >( w ) { @@ -230,16 +230,16 @@ namespace eo { int recvSize; - d->comm.recv( d->masterRank, 1, recvSize ); - d->tempArray.resize( recvSize ); - d->comm.recv( d->masterRank, 1, & d->tempArray[0] , recvSize ); + _data->comm.recv( _data->masterRank, 1, recvSize ); + _data->tempArray.resize( recvSize ); + _data->comm.recv( _data->masterRank, 1, & _data->tempArray[0] , recvSize ); timerStat.start("worker_processes"); for( int i = 0; i < recvSize ; ++i ) { - d->func( d->tempArray[ i ] ); + _data->func( _data->tempArray[ i ] ); } timerStat.stop("worker_processes"); - d->comm.send( d->masterRank, 1, & d->tempArray[0], recvSize ); + _data->comm.send( _data->masterRank, 1, & _data->tempArray[0], recvSize ); } }; @@ -253,7 +253,7 @@ namespace eo class IsFinishedParallelApply : public IsFinishedFunction< ParallelApplyData > { public: - using IsFinishedFunction< ParallelApplyData >::d; + using IsFinishedFunction< ParallelApplyData >::_data; IsFinishedParallelApply( IsFinishedParallelApply * w = 0 ) : IsFinishedFunction< ParallelApplyData >( w ) { @@ -262,7 +262,7 @@ namespace eo bool operator()() { - return d->index == d->size; + return _data->index == _data->size; } }; diff --git a/eo/test/mpi/t-mpi-eval.cpp b/eo/test/mpi/t-mpi-eval.cpp index be3c2d96a..5216123ac 100644 --- a/eo/test/mpi/t-mpi-eval.cpp +++ b/eo/test/mpi/t-mpi-eval.cpp @@ -138,6 +138,7 @@ struct CatBestAnswers : public eo::mpi::HandleResponseParallelApply void operator()(int wrkRank) { + eo::mpi::ParallelApplyData * d = _data; // Retrieve informations about the slice processed by the worker int index = d->assignedTasks[wrkRank].index; int size = d->assignedTasks[wrkRank].size; @@ -146,10 +147,10 @@ struct CatBestAnswers : public eo::mpi::HandleResponseParallelApply // Compare fitnesses of evaluated individuals with the best saved for(int i = index; i < index+size; ++i) { - if( best.fitness() < d->data()[ i ].fitness() ) + if( best.fitness() < d->table()[ i ].fitness() ) { - eo::log << eo::quiet << "Better solution found:" << d->data()[i].fitness() << std::endl; - best = d->data()[ i ]; + eo::log << eo::quiet << "Better solution found:" << d->table()[i].fitness() << std::endl; + best = d->table()[ i ]; } } } diff --git a/eo/test/mpi/t-mpi-wrapper.cpp b/eo/test/mpi/t-mpi-wrapper.cpp index dbe70261d..97d4b04a1 100644 --- a/eo/test/mpi/t-mpi-wrapper.cpp +++ b/eo/test/mpi/t-mpi-wrapper.cpp @@ -101,7 +101,7 @@ int main(int argc, char** argv) // This is the only thing which changes: we wrap the IsFinished function. // According to RAII, we'll delete the invokated wrapper at the end of the main ; the store won't delete it // automatically. - IsFinishedParallelApply* wrapper = new ShowWrappedResult; + IsFinishedParallelApply* wrapper = new ShowWrappedResult; store.wrapIsFinished( wrapper ); ParallelApply job( assign, eo::mpi::DEFAULT_MASTER, store );