Updating names: SharedDataFunction::d => SharedDataFunction::_data, ParallelApplyData::_data => ParallelApplyData::_table

This commit is contained in:
Benjamin Bouvier 2012-07-18 17:37:50 +02:00
commit eebeaa810e
4 changed files with 44 additions and 41 deletions

View file

@ -222,7 +222,7 @@ namespace eo
* *
* The user is not bound to give a wrapped functor. * The user is not bound to give a wrapped functor.
*/ */
SharedDataFunction( Wrapped * w = 0 ) : _wrapped( w ), _needDelete( false ) SharedDataFunction( Wrapped * w = 0 ) : _data( 0 ), _wrapped( w ), _needDelete( false )
{ {
// empty // empty
} }
@ -255,23 +255,25 @@ namespace eo
* *
* Calls the setter on the functor and on the wrapped functors, in a Composite pattern fashion. * Calls the setter on the functor and on the wrapped functors, in a Composite pattern fashion.
*/ */
void data( JobData* _d ) void data( JobData* d )
{ {
d = _d; _data = d;
if( _wrapped ) if( _wrapped )
{ {
_wrapped->data( _d ); _wrapped->data( d );
} }
} }
/** /**
* @brief Returns true if we need to use operator delete on this wrapper, false otherwise. * @brief Returns true if we need to use operator delete on this wrapper, false otherwise.
*
* Allows the user to reject delete responsability to the framework, by setting this value to true.
**/ **/
bool needDelete() { return _needDelete; } bool needDelete() { return _needDelete; }
void needDelete( bool b ) { _needDelete = b; } void needDelete( bool b ) { _needDelete = b; }
protected: protected:
JobData* d; JobData* _data;
Wrapped* _wrapped; // Pointer and not a reference so as to be set at any time and to avoid affectation Wrapped* _wrapped; // Pointer and not a reference so as to be set at any time and to avoid affectation
bool _needDelete; bool _needDelete;
}; };

View file

@ -89,46 +89,46 @@ namespace eo
* @param _proc The functor to apply on each element in the table * @param _proc The functor to apply on each element in the table
* @param _masterRank The MPI rank of the master * @param _masterRank The MPI rank of the master
* @param _packetSize The number of elements on which the function will be applied by the worker, at a time. * @param _packetSize The number of elements on which the function will be applied by the worker, at a time.
* @param _pop The table to apply. If this value is NULL, user will have to call init() before launching the * @param table The table to apply. If this value is NULL, user will have to call init() before launching the
* job. * job.
*/ */
ParallelApplyData( ParallelApplyData(
eoUF<EOT&, void> & _proc, eoUF<EOT&, void> & _proc,
int _masterRank, int _masterRank,
int _packetSize, int _packetSize,
std::vector<EOT> * _pop = 0 std::vector<EOT> * table = 0
) : ) :
_data( _pop ), func( _proc ), index( 0 ), packetSize( _packetSize ), masterRank( _masterRank ), comm( Node::comm() ) _table( table ), func( _proc ), index( 0 ), packetSize( _packetSize ), masterRank( _masterRank ), comm( Node::comm() )
{ {
if ( _packetSize <= 0 ) if ( _packetSize <= 0 )
{ {
throw std::runtime_error("Packet size should not be negative."); throw std::runtime_error("Packet size should not be negative.");
} }
if( _pop ) if( table )
{ {
size = _pop->size(); size = table->size();
} }
} }
/** /**
* @brief Reinitializes the data for a new table to evaluate. * @brief Reinitializes the data for a new table to evaluate.
*/ */
void init( std::vector<EOT>& _pop ) void init( std::vector<EOT>& table )
{ {
index = 0; index = 0;
size = _pop.size(); size = table.size();
_data = &_pop; _table = &table;
assignedTasks.clear(); assignedTasks.clear();
} }
std::vector<EOT>& data() std::vector<EOT>& table()
{ {
return *_data; return *_table;
} }
// All elements are public since functors will often use them. // All elements are public since functors will often use them.
std::vector<EOT> * _data; std::vector<EOT> * _table;
eoUF<EOT&, void> & func; eoUF<EOT&, void> & func;
int index; int index;
int size; int size;
@ -153,7 +153,7 @@ namespace eo
class SendTaskParallelApply : public SendTaskFunction< ParallelApplyData<EOT> > class SendTaskParallelApply : public SendTaskFunction< ParallelApplyData<EOT> >
{ {
public: public:
using SendTaskFunction< ParallelApplyData<EOT> >::d; using SendTaskFunction< ParallelApplyData<EOT> >::_data;
SendTaskParallelApply( SendTaskParallelApply<EOT> * w = 0 ) : SendTaskFunction< ParallelApplyData<EOT> >( w ) SendTaskParallelApply( SendTaskParallelApply<EOT> * w = 0 ) : SendTaskFunction< ParallelApplyData<EOT> >( w )
{ {
@ -164,24 +164,24 @@ namespace eo
{ {
int futureIndex; int futureIndex;
if( d->index + d->packetSize < d->size ) if( _data->index + _data->packetSize < _data->size )
{ {
futureIndex = d->index + d->packetSize; futureIndex = _data->index + _data->packetSize;
} else { } else {
futureIndex = d->size; futureIndex = _data->size;
} }
int sentSize = futureIndex - d->index ; int sentSize = futureIndex - _data->index ;
d->comm.send( wrkRank, 1, sentSize ); _data->comm.send( wrkRank, 1, sentSize );
eo::log << eo::progress << "Evaluating individual " << d->index << std::endl; eo::log << eo::progress << "Evaluating individual " << _data->index << std::endl;
d->assignedTasks[ wrkRank ].index = d->index; _data->assignedTasks[ wrkRank ].index = _data->index;
d->assignedTasks[ wrkRank ].size = sentSize; _data->assignedTasks[ wrkRank ].size = sentSize;
d->comm.send( wrkRank, 1, & ( (d->data())[ d->index ] ) , sentSize ); _data->comm.send( wrkRank, 1, & ( (_data->table())[ _data->index ] ) , sentSize );
d->index = futureIndex; _data->index = futureIndex;
} }
}; };
@ -194,7 +194,7 @@ namespace eo
class HandleResponseParallelApply : public HandleResponseFunction< ParallelApplyData<EOT> > class HandleResponseParallelApply : public HandleResponseFunction< ParallelApplyData<EOT> >
{ {
public: public:
using HandleResponseFunction< ParallelApplyData<EOT> >::d; using HandleResponseFunction< ParallelApplyData<EOT> >::_data;
HandleResponseParallelApply( HandleResponseParallelApply<EOT> * w = 0 ) : HandleResponseFunction< ParallelApplyData<EOT> >( w ) HandleResponseParallelApply( HandleResponseParallelApply<EOT> * w = 0 ) : HandleResponseFunction< ParallelApplyData<EOT> >( w )
{ {
@ -203,7 +203,7 @@ namespace eo
void operator()(int wrkRank) void operator()(int wrkRank)
{ {
d->comm.recv( wrkRank, 1, & (d->data()[ d->assignedTasks[wrkRank].index ] ), d->assignedTasks[wrkRank].size ); _data->comm.recv( wrkRank, 1, & (_data->table()[ _data->assignedTasks[wrkRank].index ] ), _data->assignedTasks[wrkRank].size );
} }
}; };
@ -219,7 +219,7 @@ namespace eo
class ProcessTaskParallelApply : public ProcessTaskFunction< ParallelApplyData<EOT> > class ProcessTaskParallelApply : public ProcessTaskFunction< ParallelApplyData<EOT> >
{ {
public: public:
using ProcessTaskFunction< ParallelApplyData<EOT> >::d; using ProcessTaskFunction< ParallelApplyData<EOT> >::_data;
ProcessTaskParallelApply( ProcessTaskParallelApply<EOT> * w = 0 ) : ProcessTaskFunction< ParallelApplyData<EOT> >( w ) ProcessTaskParallelApply( ProcessTaskParallelApply<EOT> * w = 0 ) : ProcessTaskFunction< ParallelApplyData<EOT> >( w )
{ {
@ -230,16 +230,16 @@ namespace eo
{ {
int recvSize; int recvSize;
d->comm.recv( d->masterRank, 1, recvSize ); _data->comm.recv( _data->masterRank, 1, recvSize );
d->tempArray.resize( recvSize ); _data->tempArray.resize( recvSize );
d->comm.recv( d->masterRank, 1, & d->tempArray[0] , recvSize ); _data->comm.recv( _data->masterRank, 1, & _data->tempArray[0] , recvSize );
timerStat.start("worker_processes"); timerStat.start("worker_processes");
for( int i = 0; i < recvSize ; ++i ) for( int i = 0; i < recvSize ; ++i )
{ {
d->func( d->tempArray[ i ] ); _data->func( _data->tempArray[ i ] );
} }
timerStat.stop("worker_processes"); timerStat.stop("worker_processes");
d->comm.send( d->masterRank, 1, & d->tempArray[0], recvSize ); _data->comm.send( _data->masterRank, 1, & _data->tempArray[0], recvSize );
} }
}; };
@ -253,7 +253,7 @@ namespace eo
class IsFinishedParallelApply : public IsFinishedFunction< ParallelApplyData<EOT> > class IsFinishedParallelApply : public IsFinishedFunction< ParallelApplyData<EOT> >
{ {
public: public:
using IsFinishedFunction< ParallelApplyData<EOT> >::d; using IsFinishedFunction< ParallelApplyData<EOT> >::_data;
IsFinishedParallelApply( IsFinishedParallelApply<EOT> * w = 0 ) : IsFinishedFunction< ParallelApplyData<EOT> >( w ) IsFinishedParallelApply( IsFinishedParallelApply<EOT> * w = 0 ) : IsFinishedFunction< ParallelApplyData<EOT> >( w )
{ {
@ -262,7 +262,7 @@ namespace eo
bool operator()() bool operator()()
{ {
return d->index == d->size; return _data->index == _data->size;
} }
}; };

View file

@ -138,6 +138,7 @@ struct CatBestAnswers : public eo::mpi::HandleResponseParallelApply<EOT>
void operator()(int wrkRank) void operator()(int wrkRank)
{ {
eo::mpi::ParallelApplyData<EOT> * d = _data;
// Retrieve informations about the slice processed by the worker // Retrieve informations about the slice processed by the worker
int index = d->assignedTasks[wrkRank].index; int index = d->assignedTasks[wrkRank].index;
int size = d->assignedTasks[wrkRank].size; int size = d->assignedTasks[wrkRank].size;
@ -146,10 +147,10 @@ struct CatBestAnswers : public eo::mpi::HandleResponseParallelApply<EOT>
// Compare fitnesses of evaluated individuals with the best saved // Compare fitnesses of evaluated individuals with the best saved
for(int i = index; i < index+size; ++i) for(int i = index; i < index+size; ++i)
{ {
if( best.fitness() < d->data()[ i ].fitness() ) if( best.fitness() < d->table()[ i ].fitness() )
{ {
eo::log << eo::quiet << "Better solution found:" << d->data()[i].fitness() << std::endl; eo::log << eo::quiet << "Better solution found:" << d->table()[i].fitness() << std::endl;
best = d->data()[ i ]; best = d->table()[ i ];
} }
} }
} }

View file

@ -101,7 +101,7 @@ int main(int argc, char** argv)
// This is the only thing which changes: we wrap the IsFinished function. // This is the only thing which changes: we wrap the IsFinished function.
// According to RAII, we'll delete the invokated wrapper at the end of the main ; the store won't delete it // According to RAII, we'll delete the invokated wrapper at the end of the main ; the store won't delete it
// automatically. // automatically.
IsFinishedParallelApply* wrapper = new ShowWrappedResult<int>; IsFinishedParallelApply<int>* wrapper = new ShowWrappedResult<int>;
store.wrapIsFinished( wrapper ); store.wrapIsFinished( wrapper );
ParallelApply<int> job( assign, eo::mpi::DEFAULT_MASTER, store ); ParallelApply<int> job( assign, eo::mpi::DEFAULT_MASTER, store );