From f963a15fbe23ad8e3cb4c279ac0f557ddbe70e83 Mon Sep 17 00:00:00 2001 From: Benjamin Bouvier Date: Thu, 12 Jul 2012 19:15:07 +0200 Subject: [PATCH] Fixing assignment algorithm, who gave too many assignments by worker. Now, when using static assignment, the optimal size of packet (so as each worker has to process only one packet of data) is sent. --- eo/src/eoPopEvalFunc.h | 10 +++++++++- eo/src/mpi/eoMpiAssignmentAlgorithm.h | 3 +++ eo/src/mpi/eoParallelApply.h | 14 ++++---------- 3 files changed, 16 insertions(+), 11 deletions(-) diff --git a/eo/src/eoPopEvalFunc.h b/eo/src/eoPopEvalFunc.h index 2cc86807..b96e8cac 100644 --- a/eo/src/eoPopEvalFunc.h +++ b/eo/src/eoPopEvalFunc.h @@ -35,6 +35,9 @@ #include #include #include +#include + +#include // ceil # endif // WITH_MPI /** eoPopEvalFunc: This abstract class is for GLOBAL evaluators @@ -192,7 +195,12 @@ class eoParallelPopLoopEval : public eoPopEvalFunc (void)_parents; // Reinits the store and the scheduling algorithm store->data( _offspring ); - assignAlgo.reinit( _offspring.size() ); + // For static scheduling, it's mandatory to reinit attributions + int nbWorkers = assignAlgo.availableWorkers(); + assignAlgo.reinit( nbWorkers ); + if( ! eo::parallel.isDynamic() ) { + store->data()->packetSize = ceil( static_cast( _offspring.size() ) / nbWorkers ); + } // Effectively launches the job. eo::mpi::ParallelApply job( assignAlgo, masterRank, *store ); job.run(); diff --git a/eo/src/mpi/eoMpiAssignmentAlgorithm.h b/eo/src/mpi/eoMpiAssignmentAlgorithm.h index 0630a6f7..85177ea0 100644 --- a/eo/src/mpi/eoMpiAssignmentAlgorithm.h +++ b/eo/src/mpi/eoMpiAssignmentAlgorithm.h @@ -137,7 +137,10 @@ namespace eo { unsigned int nbWorkers = workers.size(); freeWorkers = nbWorkers; + + attributions.clear(); attributions.reserve( nbWorkers ); + busy.clear(); busy.resize( nbWorkers, false ); // Let be the euclidean division of runs by nbWorkers : diff --git a/eo/src/mpi/eoParallelApply.h b/eo/src/mpi/eoParallelApply.h index 9598b952..d0435cea 100644 --- a/eo/src/mpi/eoParallelApply.h +++ b/eo/src/mpi/eoParallelApply.h @@ -37,8 +37,6 @@ namespace eo { size = _pop->size(); } - - tempArray = new EOT[ _packetSize ]; } void init( std::vector& _pop ) @@ -49,11 +47,6 @@ namespace eo assignedTasks.clear(); } - ~ParallelApplyData() - { - delete [] tempArray; - } - std::vector& data() { return *_data; @@ -65,7 +58,7 @@ namespace eo int size; std::map< int /* worker rank */, ParallelApplyAssignment /* min indexes in vector */> assignedTasks; int packetSize; - EOT* tempArray; + std::vector tempArray; int masterRank; bmpi::communicator& comm; @@ -140,14 +133,15 @@ namespace eo int recvSize; d->comm.recv( d->masterRank, 1, recvSize ); - d->comm.recv( d->masterRank, 1, d->tempArray, recvSize ); + d->tempArray.resize( recvSize ); + d->comm.recv( d->masterRank, 1, & d->tempArray[0] , recvSize ); timerStat.start("worker_processes"); for( int i = 0; i < recvSize ; ++i ) { d->func( d->tempArray[ i ] ); } timerStat.stop("worker_processes"); - d->comm.send( d->masterRank, 1, d->tempArray, recvSize ); + d->comm.send( d->masterRank, 1, & d->tempArray[0], recvSize ); } };