Code organisation, splitted headers files in implementation files, as much as possible (impossible for templates functions, thank you C++)

This commit is contained in:
Benjamin Bouvier 2012-07-24 16:16:56 +02:00
commit 191b280371
10 changed files with 559 additions and 487 deletions

View file

@ -14,6 +14,9 @@ SET(LIBRARY_OUTPUT_PATH ${EOMPI_LIB_OUTPUT_PATH})
SET(EOMPI_SOURCES
eoMpi.cpp
eoMpiAssignmentAlgorithm.cpp
eoMpiNode.cpp
implMpi.cpp
)
ADD_LIBRARY(eompi STATIC ${EOMPI_SOURCES})

View file

@ -4,16 +4,24 @@ namespace eo
{
namespace mpi
{
bmpi::communicator Node::_comm;
/**********************************************
* *********** GLOBALS ************************
* *******************************************/
eoTimerStat timerStat;
}
}
namespace mpi
{
void broadcast( communicator & comm, int value, int root )
{
comm; // unused
MPI_Bcast( &value, 1, MPI_INT, root, MPI_COMM_WORLD );
namespace Channel
{
const int Commands = 0;
const int Messages = 1;
}
namespace Message
{
const int Continue = 0;
const int Finish = 1;
const int Kill = 2;
}
const int DEFAULT_MASTER = 0;
}
}

View file

@ -143,8 +143,8 @@ namespace eo
*/
namespace Channel
{
const int Commands = 0;
const int Messages = 1;
extern const int Commands;
extern const int Messages;
}
/**
@ -157,9 +157,9 @@ namespace eo
*/
namespace Message
{
const int Continue = 0;
const int Finish = 1;
const int Kill = 2;
extern const int Continue;
extern const int Finish;
extern const int Kill;
}
/**
@ -167,7 +167,7 @@ namespace eo
*
* @ingroup MPI
*/
const int DEFAULT_MASTER = 0;
extern const int DEFAULT_MASTER;
/**
* @brief Base class for the 4 algorithm functors.

View file

@ -0,0 +1,204 @@
# include "eoMpiAssignmentAlgorithm.h"
# include "eoMpiNode.h"
namespace eo
{
namespace mpi
{
const int REST_OF_THE_WORLD = -1;
/********************************************************
* DYNAMIC ASSIGNMENT ALGORITHM *************************
*******************************************************/
DynamicAssignmentAlgorithm::DynamicAssignmentAlgorithm( )
{
for(int i = 1; i < Node::comm().size(); ++i)
{
availableWrk.push_back( i );
}
}
DynamicAssignmentAlgorithm::DynamicAssignmentAlgorithm( int unique )
{
availableWrk.push_back( unique );
}
DynamicAssignmentAlgorithm::DynamicAssignmentAlgorithm( const std::vector<int> & workers )
{
availableWrk = workers;
}
DynamicAssignmentAlgorithm::DynamicAssignmentAlgorithm( int first, int last )
{
if( last == REST_OF_THE_WORLD )
{
last = Node::comm().size() - 1;
}
for( int i = first; i <= last; ++i)
{
availableWrk.push_back( i );
}
}
int DynamicAssignmentAlgorithm::get( )
{
int assignee = -1;
if (! availableWrk.empty() )
{
assignee = availableWrk.back();
availableWrk.pop_back();
}
return assignee;
}
int DynamicAssignmentAlgorithm::availableWorkers()
{
return availableWrk.size();
}
void DynamicAssignmentAlgorithm::confirm( int rank )
{
availableWrk.push_back( rank );
}
std::vector<int> DynamicAssignmentAlgorithm::idles( )
{
return availableWrk;
}
void DynamicAssignmentAlgorithm::reinit( int _ )
{
++_;
// nothing to do
}
/********************************************************
* STATIC ASSIGNMENT ALGORITHM **************************
*******************************************************/
StaticAssignmentAlgorithm::StaticAssignmentAlgorithm( std::vector<int>& workers, int runs )
{
init( workers, runs );
}
StaticAssignmentAlgorithm::StaticAssignmentAlgorithm( int first, int last, int runs )
{
std::vector<int> workers;
if( last == REST_OF_THE_WORLD )
{
last = Node::comm().size() - 1;
}
for(int i = first; i <= last; ++i)
{
workers.push_back( i );
}
init( workers, runs );
}
StaticAssignmentAlgorithm::StaticAssignmentAlgorithm( int runs )
{
std::vector<int> workers;
for(int i = 1; i < Node::comm().size(); ++i)
{
workers.push_back( i );
}
init( workers, runs );
}
StaticAssignmentAlgorithm::StaticAssignmentAlgorithm( int unique, int runs )
{
std::vector<int> workers;
workers.push_back( unique );
init( workers, runs );
}
void StaticAssignmentAlgorithm::init( std::vector<int> & workers, int runs )
{
unsigned int nbWorkers = workers.size();
freeWorkers = nbWorkers;
busy.clear();
busy.resize( nbWorkers, false );
realRank = workers;
if( runs <= 0 )
{
return;
}
attributions.clear();
attributions.reserve( nbWorkers );
// Let be the euclidean division of runs by nbWorkers :
// runs == q * nbWorkers + r, 0 <= r < nbWorkers
// This one liner affects q requests to each worker
for (unsigned int i = 0; i < nbWorkers; attributions[i++] = runs / nbWorkers) ;
// The first line computes r and the one liner affects the remaining
// r requests to workers, in ascending order
unsigned int diff = runs - (runs / nbWorkers) * nbWorkers;
for (unsigned int i = 0; i < diff; ++attributions[i++]);
}
int StaticAssignmentAlgorithm::get( )
{
int assignee = -1;
for( unsigned i = 0; i < busy.size(); ++i )
{
if( !busy[i] && attributions[i] > 0 )
{
busy[i] = true;
--freeWorkers;
assignee = realRank[ i ];
break;
}
}
return assignee;
}
int StaticAssignmentAlgorithm::availableWorkers( )
{
return freeWorkers;
}
std::vector<int> StaticAssignmentAlgorithm::idles()
{
std::vector<int> ret;
for(unsigned int i = 0; i < busy.size(); ++i)
{
if( !busy[i] )
{
ret.push_back( realRank[i] );
}
}
return ret;
}
void StaticAssignmentAlgorithm::confirm( int rank )
{
int i = -1; // i is the real index in table
for( unsigned int j = 0; j < realRank.size(); ++j )
{
if( realRank[j] == rank )
{
i = j;
break;
}
}
--attributions[ i ];
busy[ i ] = false;
++freeWorkers;
}
void StaticAssignmentAlgorithm::reinit( int runs )
{
init( realRank, runs );
}
}
}

View file

@ -23,7 +23,6 @@ Authors:
# define __MPI_ASSIGNMENT_ALGORITHM_H__
# include <vector> // std::vector
# include "eoMpiNode.h"
namespace eo
{
@ -35,7 +34,7 @@ namespace eo
*
* @ingroup MPI
*/
const int REST_OF_THE_WORLD = -1;
extern const int REST_OF_THE_WORLD;
/**
* @brief Contains informations on the available workers and allows to find assignees for jobs.
@ -115,33 +114,21 @@ namespace eo
/**
* @brief Uses all the hosts whose rank is higher to 1, inclusive, as workers.
*/
DynamicAssignmentAlgorithm( )
{
for(int i = 1; i < Node::comm().size(); ++i)
{
availableWrk.push_back( i );
}
}
DynamicAssignmentAlgorithm( );
/**
* @brief Uses the unique host with given rank as a worker.
*
* @param unique MPI rank of the unique worker.
*/
DynamicAssignmentAlgorithm( int unique )
{
availableWrk.push_back( unique );
}
DynamicAssignmentAlgorithm( int unique );
/**
* @brief Uses the workers whose ranks are present in the argument as workers.
*
* @param workers std::vector containing MPI ranks of workers.
*/
DynamicAssignmentAlgorithm( const std::vector<int> & workers )
{
availableWrk = workers;
}
DynamicAssignmentAlgorithm( const std::vector<int> & workers );
/**
* @brief Uses a range of ranks as workers.
@ -150,50 +137,17 @@ namespace eo
* @param last The last worker to be included (inclusive). If last == eo::mpi::REST_OF_THE_WORLD, all
* hosts whose rank is higher than first are taken.
*/
DynamicAssignmentAlgorithm( int first, int last )
{
if( last == REST_OF_THE_WORLD )
{
last = Node::comm().size() - 1;
}
DynamicAssignmentAlgorithm( int first, int last );
for( int i = first; i <= last; ++i)
{
availableWrk.push_back( i );
}
}
virtual int get( );
virtual int get( )
{
int assignee = -1;
if (! availableWrk.empty() )
{
assignee = availableWrk.back();
availableWrk.pop_back();
}
return assignee;
}
int availableWorkers();
int availableWorkers()
{
return availableWrk.size();
}
void confirm( int rank );
void confirm( int rank )
{
availableWrk.push_back( rank );
}
std::vector<int> idles( );
std::vector<int> idles( )
{
return availableWrk;
}
void reinit( int _ )
{
++_;
// nothing to do
}
void reinit( int _ );
protected:
std::vector< int > availableWrk;
@ -223,10 +177,7 @@ namespace eo
* @param workers std::vector of MPI ranks of workers which will be used.
* @param runs Fixed amount of runs, strictly positive.
*/
StaticAssignmentAlgorithm( std::vector<int>& workers, int runs )
{
init( workers, runs );
}
StaticAssignmentAlgorithm( std::vector<int>& workers, int runs );
/**
* @brief Uses a range of workers.
@ -236,21 +187,7 @@ namespace eo
* workers from the first one are taken as workers.
* @param runs Fixed amount of runs, strictly positive.
*/
StaticAssignmentAlgorithm( int first, int last, int runs )
{
std::vector<int> workers;
if( last == REST_OF_THE_WORLD )
{
last = Node::comm().size() - 1;
}
for(int i = first; i <= last; ++i)
{
workers.push_back( i );
}
init( workers, runs );
}
StaticAssignmentAlgorithm( int first, int last, int runs );
/**
* @brief Uses all the hosts whose rank is higher than 1 (inclusive) as workers.
@ -258,16 +195,7 @@ namespace eo
* @param runs Fixed amount of runs, strictly positive. If it's not set, you'll have to call reinit()
* later.
*/
StaticAssignmentAlgorithm( int runs = 0 )
{
std::vector<int> workers;
for(int i = 1; i < Node::comm().size(); ++i)
{
workers.push_back( i );
}
init( workers, runs );
}
StaticAssignmentAlgorithm( int runs = 0 );
/**
* @brief Uses an unique host as worker.
@ -275,12 +203,7 @@ namespace eo
* @param unique The MPI rank of the host which will be the worker.
* @param runs Fixed amount of runs, strictly positive.
*/
StaticAssignmentAlgorithm( int unique, int runs )
{
std::vector<int> workers;
workers.push_back( unique );
init( workers, runs );
}
StaticAssignmentAlgorithm( int unique, int runs );
private:
/**
@ -292,89 +215,18 @@ namespace eo
* @param workers Vector of hosts' ranks
* @param runs Fixed amount of runs, strictly positive.
*/
void init( std::vector<int> & workers, int runs )
{
unsigned int nbWorkers = workers.size();
freeWorkers = nbWorkers;
busy.clear();
busy.resize( nbWorkers, false );
realRank = workers;
if( runs <= 0 )
{
return;
}
attributions.clear();
attributions.reserve( nbWorkers );
// Let be the euclidean division of runs by nbWorkers :
// runs == q * nbWorkers + r, 0 <= r < nbWorkers
// This one liner affects q requests to each worker
for (unsigned int i = 0; i < nbWorkers; attributions[i++] = runs / nbWorkers) ;
// The first line computes r and the one liner affects the remaining
// r requests to workers, in ascending order
unsigned int diff = runs - (runs / nbWorkers) * nbWorkers;
for (unsigned int i = 0; i < diff; ++attributions[i++]);
}
void init( std::vector<int> & workers, int runs );
public:
int get( )
{
int assignee = -1;
for( unsigned i = 0; i < busy.size(); ++i )
{
if( !busy[i] && attributions[i] > 0 )
{
busy[i] = true;
--freeWorkers;
assignee = realRank[ i ];
break;
}
}
return assignee;
}
int get( );
int availableWorkers( )
{
return freeWorkers;
}
int availableWorkers( );
std::vector<int> idles()
{
std::vector<int> ret;
for(unsigned int i = 0; i < busy.size(); ++i)
{
if( !busy[i] )
{
ret.push_back( realRank[i] );
}
}
return ret;
}
std::vector<int> idles();
void confirm( int rank )
{
int i = -1; // i is the real index in table
for( unsigned int j = 0; j < realRank.size(); ++j )
{
if( realRank[j] == rank )
{
i = j;
break;
}
}
void confirm( int rank );
--attributions[ i ];
busy[ i ] = false;
++freeWorkers;
}
void reinit( int runs )
{
init( realRank, runs );
}
void reinit( int runs );
private:
std::vector<int> attributions;

19
eo/src/mpi/eoMpiNode.cpp Normal file
View file

@ -0,0 +1,19 @@
# include "eoMpiNode.h"
namespace eo
{
namespace mpi
{
void Node::init( int argc, char** argv )
{
static bmpi::environment env( argc, argv );
}
bmpi::communicator& Node::comm()
{
return _comm;
}
bmpi::communicator Node::_comm;
}
}

View file

@ -22,7 +22,7 @@ Authors:
# ifndef __MPI_NODE_H__
# define __MPI_NODE_H__
# include "implMpi.hpp"
# include "implMpi.h"
namespace bmpi = mpi;
namespace eo
@ -30,9 +30,9 @@ namespace eo
namespace mpi
{
/**
* @brief Global object used to reach boost::mpi::communicator everywhere.
* @brief Global object used to reach mpi::communicator everywhere.
*
* boost::mpi::communicator is the main object used to send and receive messages between the different hosts of
* mpi::communicator is the main object used to send and receive messages between the different hosts of
* a MPI algorithm.
*
* @ingroup MPI
@ -49,18 +49,12 @@ namespace eo
* @param argc Main's argc
* @param argv Main's argv
*/
static void init( int argc, char** argv )
{
static bmpi::environment env( argc, argv );
}
static void init( int argc, char** argv );
/**
* @brief Returns the global boost::mpi::communicator
* @brief Returns the global mpi::communicator
*/
static bmpi::communicator& comm()
{
return _comm;
}
static bmpi::communicator& comm();
protected:
static bmpi::communicator _comm;

146
eo/src/mpi/implMpi.cpp Normal file
View file

@ -0,0 +1,146 @@
# include "implMpi.h"
namespace mpi
{
const int any_source = MPI_ANY_SOURCE;
const int any_tag = MPI_ANY_TAG;
environment::environment(int argc, char**argv)
{
MPI_Init(&argc, &argv);
}
environment::~environment()
{
MPI_Finalize();
}
status::status( const MPI_Status & s )
{
_source = s.MPI_SOURCE;
_tag = s.MPI_TAG;
_error = s.MPI_ERROR;
}
communicator::communicator( )
{
_rank = -1;
_size = -1;
_buf = 0;
_bufsize = -1;
}
communicator::~communicator()
{
if( _buf )
{
delete _buf;
_buf = 0;
}
}
int communicator::rank()
{
if ( _rank == -1 )
{
MPI_Comm_rank( MPI_COMM_WORLD, &_rank );
}
return _rank;
}
int communicator::size()
{
if ( _size == -1 )
{
MPI_Comm_size( MPI_COMM_WORLD, &_size );
}
return _size;
}
/*
* SEND / RECV INT
*/
void communicator::send( int dest, int tag, int n )
{
MPI_Send( &n, 1, MPI_INT, dest, tag, MPI_COMM_WORLD );
}
void communicator::recv( int src, int tag, int& n )
{
MPI_Status stat;
MPI_Recv( &n, 1, MPI_INT, src, tag, MPI_COMM_WORLD , &stat );
}
/*
* SEND / RECV STRING
*/
void communicator::send( int dest, int tag, const std::string& str )
{
int size = str.size() + 1;
send( dest, tag, size );
MPI_Send( (char*)str.c_str(), size, MPI_CHAR, dest, tag, MPI_COMM_WORLD);
}
void communicator::recv( int src, int tag, std::string& str )
{
int size = -1;
MPI_Status stat;
recv( src, tag, size );
if( _buf == 0 )
{
_buf = new char[ size ];
_bufsize = size;
} else if( _bufsize < size )
{
delete [] _buf;
_buf = new char[ size ];
_bufsize = size;
}
MPI_Recv( _buf, size, MPI_CHAR, src, tag, MPI_COMM_WORLD, &stat );
str.assign( _buf );
}
/*
* SEND / RECV Objects
*/
void communicator::send( int dest, int tag, const eoserial::Persistent & persistent )
{
eoserial::Object* obj = persistent.pack();
std::stringstream ss;
obj->print( ss );
delete obj;
send( dest, tag, ss.str() );
}
void communicator::recv( int src, int tag, eoserial::Persistent & persistent )
{
std::string asText;
recv( src, tag, asText );
eoserial::Object* obj = eoserial::Parser::parse( asText );
persistent.unpack( obj );
delete obj;
}
/*
* Other methods
*/
status communicator::probe( int src, int tag )
{
MPI_Status stat;
MPI_Probe( src, tag, MPI_COMM_WORLD , &stat );
return status( stat );
}
void communicator::barrier()
{
MPI_Barrier( MPI_COMM_WORLD );
}
void broadcast( communicator & comm, int value, int root )
{
comm; // unused
MPI_Bcast( &value, 1, MPI_INT, root, MPI_COMM_WORLD );
}
}

129
eo/src/mpi/implMpi.h Normal file
View file

@ -0,0 +1,129 @@
# ifndef __EO_MPI_HPP__
# define __EO_MPI_HPP__
# include <mpi.h>
# include <serial/eoSerial.h>
namespace mpi
{
extern const int any_source;
extern const int any_tag;
class environment
{
public:
environment(int argc, char**argv);
~environment();
};
class status
{
public:
status( const MPI_Status & s );
int tag() { return _tag; }
int error() { return _error; }
int source() { return _source; }
private:
int _source;
int _tag;
int _error;
};
class communicator
{
public:
communicator( );
~communicator();
int rank();
int size();
/*
* SEND / RECV INT
*/
void send( int dest, int tag, int n );
void recv( int src, int tag, int& n );
/*
* SEND / RECV STRING
*/
void send( int dest, int tag, const std::string& str );
void recv( int src, int tag, std::string& str );
/*
* SEND / RECV Objects
*/
void send( int dest, int tag, const eoserial::Persistent & persistent );
template< class T >
void send( int dest, int tag, T* table, int size )
{
// Puts all the values into an array
eoserial::Array* array = new eoserial::Array;
for( int i = 0; i < size; ++i )
{
array->push_back( table[i].pack() );
}
// Encapsulates the array into an object
eoserial::Object* obj = new eoserial::Object;
obj->add( "array", array );
std::stringstream ss;
obj->print( ss );
delete obj;
// Sends the object as a string
send( dest, tag, ss.str() );
}
void recv( int src, int tag, eoserial::Persistent & persistent );
template< class T >
void recv( int src, int tag, T* table, int size )
{
// Receives the string which contains the object
std::string asText;
recv( src, tag, asText );
// Parses the object and retrieves the table
eoserial::Object* obj = eoserial::Parser::parse( asText );
eoserial::Array* array = static_cast<eoserial::Array*>( (*obj)["array"] );
// Retrieves all the values from the array
for( int i = 0; i < size; ++i )
{
eoserial::unpackObject( *array, i, table[i] );
}
delete obj;
}
/*
* Other methods
*/
status probe( int src = any_source, int tag = any_tag );
void barrier();
private:
int _rank;
int _size;
char* _buf;
int _bufsize;
};
void broadcast( communicator & comm, int value, int root );
} // namespace mpi
# endif //__EO_MPI_HPP__

View file

@ -1,283 +0,0 @@
# ifndef __EO_MPI_HPP__
# define __EO_MPI_HPP__
# include <mpi.h>
# include <serial/eoSerial.h>
# include <fstream>
namespace mpi
{
const int any_source = MPI_ANY_SOURCE;
const int any_tag = MPI_ANY_TAG;
class environment
{
public:
environment(int argc, char**argv)
{
MPI_Init(&argc, &argv);
}
~environment()
{
MPI_Finalize();
}
};
class status
{
public:
status( const MPI_Status & s )
{
_source = s.MPI_SOURCE;
_tag = s.MPI_TAG;
_error = s.MPI_ERROR;
}
int tag() { return _tag; }
int error() { return _error; }
int source() { return _source; }
private:
int _source;
int _tag;
int _error;
};
class communicator
{
public:
communicator( )
{
_rank = -1;
_size = -1;
_buf = 0;
_bufsize = -1;
}
~communicator()
{
if( _buf )
{
delete _buf;
_buf = 0;
}
}
int rank()
{
if ( _rank == -1 )
{
MPI_Comm_rank( MPI_COMM_WORLD, &_rank );
}
return _rank;
}
int size()
{
if ( _size == -1 )
{
MPI_Comm_size( MPI_COMM_WORLD, &_size );
}
return _size;
}
/*
* SEND / RECV INT
*/
void send( int dest, int tag, int n )
{
//send( dest, tag, &n, 1 );
MPI_Send( &n, 1, MPI_INT, dest, tag, MPI_COMM_WORLD );
}
void recv( int src, int tag, int& n )
{
MPI_Status stat;
MPI_Recv( &n, 1, MPI_INT, src, tag, MPI_COMM_WORLD , &stat );
//recv( src, tag, &n, 1 );
}
/*
void send( int dest, int tag, int* table, int size )
{
MPI_Send( table, size, MPI_INT, dest, tag, MPI_COMM_WORLD );
}
void recv( int src, int tag, int* table, int size )
{
MPI_Status stat;
MPI_Recv( table, size, MPI_INT, src, tag, MPI_COMM_WORLD , &stat );
}
*/
/*
* SEND / RECV STRING
*/
void send( int dest, int tag, const std::string& str )
{
int size = str.size() + 1;
send( dest, tag, size );
MPI_Send( (char*)str.c_str(), size, MPI_CHAR, dest, tag, MPI_COMM_WORLD);
}
void recv( int src, int tag, std::string& str )
{
int size = -1;
MPI_Status stat;
recv( src, tag, size );
if( _buf == 0 )
{
_buf = new char[ size ];
_bufsize = size;
} else if( _bufsize < size )
{
delete [] _buf;
_buf = new char[ size ];
_bufsize = size;
}
MPI_Recv( _buf, size, MPI_CHAR, src, tag, MPI_COMM_WORLD, &stat );
str.assign( _buf );
}
/*
* SEND / RECV Objects
*/
void send( int dest, int tag, const eoserial::Persistent & persistent )
{
eoserial::Object* obj = persistent.pack();
std::stringstream ss;
obj->print( ss );
delete obj;
send( dest, tag, ss.str() );
}
/*
void send( int dest, int tag, eoserial::Persistent* table, int size )
{
// Puts all the values into an array
eoserial::Array* array = new eoserial::Array;
std::cout << "DEBUG EO: @ premier: " << table << " / @ second: " << table+1 << std::endl;
for( int i = 0; i < size; ++i )
{
array->push_back( table[i].pack() );
}
// Encapsulates the array into an object
eoserial::Object* obj = new eoserial::Object;
obj->add( "array", array );
std::stringstream ss;
obj->print( ss );
delete obj;
// Sends the object as a string
send( dest, tag, ss.str() );
}
*/
template< class T >
void send( int dest, int tag, T* table, int size )
{
// Puts all the values into an array
eoserial::Array* array = new eoserial::Array;
for( int i = 0; i < size; ++i )
{
array->push_back( table[i].pack() );
}
// Encapsulates the array into an object
eoserial::Object* obj = new eoserial::Object;
obj->add( "array", array );
std::stringstream ss;
obj->print( ss );
delete obj;
// Sends the object as a string
send( dest, tag, ss.str() );
}
void recv( int src, int tag, eoserial::Persistent & persistent )
{
std::string asText;
recv( src, tag, asText );
eoserial::Object* obj = eoserial::Parser::parse( asText );
persistent.unpack( obj );
delete obj;
}
/*
void recv( int src, int tag, eoserial::Persistent* table, int size )
{
// Receives the string which contains the object
std::string asText;
recv( src, tag, asText );
// Parses the object and retrieves the table
eoserial::Object* obj = eoserial::Parser::parse( asText );
eoserial::Array* array = static_cast<eoserial::Array*>( (*obj)["array"] );
// Retrieves all the values from the array
for( int i = 0; i < size; ++i )
{
eoserial::unpackObject( *array, i, table[i] );
}
delete obj;
}
*/
template< class T >
void recv( int src, int tag, T* table, int size )
{
// Receives the string which contains the object
std::string asText;
recv( src, tag, asText );
// Parses the object and retrieves the table
eoserial::Object* obj = eoserial::Parser::parse( asText );
eoserial::Array* array = static_cast<eoserial::Array*>( (*obj)["array"] );
// Retrieves all the values from the array
for( int i = 0; i < size; ++i )
{
eoserial::unpackObject( *array, i, table[i] );
}
delete obj;
}
/*
* Other methods
*/
status probe( int src = any_source, int tag = any_tag )
{
MPI_Status stat;
MPI_Probe( src, tag, MPI_COMM_WORLD , &stat );
return status( stat );
}
void barrier()
{
MPI_Barrier( MPI_COMM_WORLD );
}
private:
int _rank;
int _size;
char* _buf;
int _bufsize;
};
void broadcast( communicator & comm, int value, int root );
} // namespace mpi
# endif //__EO_MPI_HPP__