10 #ifndef MPICONTAINER_H_
11 #define MPICONTAINER_H_
18 #ifdef MERCURYDPM_USE_MPI
22 #ifdef MERCURYDPM_FORCE_ASSERTS
23 #define MERCURYDPM_ASSERTS true
25 #ifdef MERCURYDPM_NO_ASSERTS
26 #define MERCURYDPM_ASSERTS false
29 #define MERCURYDPM_ASSERTS false
31 #define MERCURYDPM_ASSERTS true
71 #ifdef MERCURYDPM_USE_MPI
78 MPI_Type_match_size(MPI_TYPECLASS_INTEGER,
sizeof(
T), &
type);
88 MPI_Type_match_size(MPI_TYPECLASS_REAL,
sizeof(
T),&
type);
133 #ifdef MERCURYDPM_USE_MPI
134 MPI_Waitall(pending_.size(),pending_.data(),MPI_STATUSES_IGNORE);
136 MPI_Barrier(communicator_);
152 #if MERCURYDPM_ASSERTS
155 logger(
FATAL,
"[MPI FATAL]: Sending data to self!");
158 #ifdef MERCURYDPM_USE_MPI
160 MPI_Isend(&
t, 1, Detail::toMPIType(
t), to, tag, communicator_, &request);
161 pending_.push_back(request);
171 #if MERCURYDPM_ASSERTS
174 logger(
FATAL,
"[MPI FATAL]: Sending data to self!");
179 logger(
WARN,
"[MPI ERROR]: Sending zero data");
182 #ifdef MERCURYDPM_USE_MPI
184 MPI_Isend(
t, count, Detail::toMPIType(*
t), to, tag, communicator_, &request);
185 pending_.push_back(request);
202 #if MERCURYDPM_ASSERTS
205 logger(
FATAL,
"[MPI FATAL]: Receiving data from self!");
208 #ifdef MERCURYDPM_USE_MPI
210 MPI_Irecv(&
t, 1, Detail::toMPIType(
t), from, tag, communicator_, &request);
211 pending_.push_back(request);
221 #if MERCURYDPM_ASSERTS
224 logger(
FATAL,
"[MPI FATAL]: Receiving data fromself!");
229 logger(
WARN,
"[MPI ERROR]: Receiving zero data");
232 #ifdef MERCURYDPM_USE_MPI
234 MPI_Irecv(&
t, count, Detail::toMPIType(*
t), from, tag, communicator_, &request);
235 pending_.push_back(request);
253 #if MERCURYDPM_ASSERTS
256 logger(
FATAL,
"[MPI FATAL]: Sending data to self!");
261 logger(
WARN,
"[MPI ERROR]: Sending zero data");
264 #ifdef MERCURYDPM_USE_MPI
266 MPI_Isend(
t, count, dataTypes_[
type], to, tag, communicator_, &request);
267 pending_.push_back(request);
285 #if MERCURYDPM_ASSERTS
288 logger(
FATAL,
"[MPI FATAL]: Receiving data to self!");
293 logger(
WARN,
"[MPI ERROR]: Receiving zero data");
296 #ifdef MERCURYDPM_USE_MPI
298 MPI_Irecv(
t, count, dataTypes_[
type], from, tag, communicator_, &request);
299 pending_.push_back(request);
316 #if MERCURYDPM_ASSERTS
319 logger(
FATAL,
"[MPI FATAL]: Sending data to self!");
324 logger(
WARN,
"[MPI ERROR]: Sending zero data");
327 #ifdef MERCURYDPM_USE_MPI
328 MPI_Ssend(&
t, count, Detail::toMPIType(
t), to, tag, communicator_);
336 #if MERCURYDPM_ASSERTS
339 logger(
FATAL,
"[MPI FATAL]: Sending data to self!");
344 logger(
WARN,
"[MPI ERROR]: Sending zero data");
347 #ifdef MERCURYDPM_USE_MPI
348 MPI_Ssend(
t,count,dataTypes_[
type], to, tag, communicator_);
364 #if MERCURYDPM_ASSERTS
367 logger(
FATAL,
"[MPI FATAL]: Receiving data from self!");
372 logger(
WARN,
"[MPI ERROR]: Receiving zero data");
375 #ifdef MERCURYDPM_USE_MPI
376 MPI_Recv(&
t, count, Detail::toMPIType(
t), from, tag,communicator_, MPI_STATUS_IGNORE);
383 #if MERCURYDPM_ASSERTS
386 logger(
FATAL,
"[MPI FATAL]: Receiving data to self!");
391 logger(
WARN,
"[MPI ERROR]: Receiving zero data");
394 #ifdef MERCURYDPM_USE_MPI
395 MPI_Recv(
t, count, dataTypes_[
type], from, tag, communicator_, MPI_STATUS_IGNORE);
409 #ifdef MERCURYDPM_USE_MPI
410 MPI_Gather(&send_t, 1, Detail::toMPIType(send_t), receive_t, 1, Detail::toMPIType(send_t), 0, communicator_);
422 #ifdef MERCURYDPM_USE_MPI
423 MPI_Bcast(&
t,1,Detail::toMPIType(
t),fromProcessor,communicator_);
435 #ifdef MERCURYDPM_USE_MPI
436 MPI_Bcast((
void *)
t,
size,Detail::toMPIType(
t[0]),fromProcessor,communicator_);
448 #ifdef MERCURYDPM_USE_MPI
449 MPI_Bcast((
void *)
t,1,dataTypes_[
type],fromProcessor,communicator_);
463 #ifdef MERCURYDPM_USE_MPI
466 reduce(
T&
t, MPI_Op operation,
int id = 0)
471 MPI_Reduce(MPI_IN_PLACE, &
t, 1, Detail::toMPIType(
t), operation,
id, communicator_);
475 MPI_Reduce(&
t,
nullptr, 1, Detail::toMPIType(
t), operation,
id, communicator_);
488 #ifdef MERCURYDPM_USE_MPI
491 allReduce(
T& send_t,
T& receive_t, MPI_Op operation)
493 MPI_Allreduce(&send_t, &receive_t, 1, Detail::toMPIType(send_t), operation,communicator_);
506 #ifdef MERCURYDPM_USE_MPI
509 allGather(
T& send_t,
int send_count, std::vector<T>& receive_t,
int receive_count)
511 MPI_Allgather(&send_t, send_count, Detail::toMPIType(send_t),
512 receive_t.data(), receive_count, Detail::toMPIType(receive_t[0]),communicator_);
529 #ifdef MERCURYDPM_USE_MPI
545 #ifdef MERCURYDPM_USE_MPI
546 MPI_Datatype MPIType;
547 MPI_Type_contiguous(
sizeof(
T), MPI_BYTE, &MPIType);
548 MPI_Type_commit(&MPIType);
549 dataTypes_.push_back(MPIType);
559 #ifdef MERCURYDPM_USE_MPI
560 for(MPI_Datatype
type : dataTypes_)
562 MPI_Type_free(&
type);
590 #ifdef MERCURYDPM_USE_MPI
594 std::vector<MPI_Request> pending_;
599 MPI_Comm communicator_;
603 std::vector<MPI_Datatype> dataTypes_;
Logger< MERCURYDPM_LOGLEVEL > logger("MercuryKernel")
Definition of different loggers with certain modules. A user can define its own custom logger here.
MercuryMPITag
An enum that facilitates the creation of unique communication tags in the parallel code.
Definition: MpiContainer.h:56
@ SUPERQUADRIC_DATA
Definition: MpiContainer.h:66
@ INTERACTION_DATA
Definition: MpiContainer.h:63
@ VELOCITY_DATA
Definition: MpiContainer.h:61
@ INTERACTION_COUNT
Definition: MpiContainer.h:62
@ PARTICLE_INDEX
Definition: MpiContainer.h:65
@ PERIODIC_POSITION_DATA
Definition: MpiContainer.h:60
@ PARTICLE_COUNT
Definition: MpiContainer.h:57
@ POSITION_DATA
Definition: MpiContainer.h:59
@ PARTICLE_DATA
Definition: MpiContainer.h:58
@ PERIODIC_COMPLEXITY
Definition: MpiContainer.h:64
void initialiseMPI()
Inialises the MPI library.
Definition: MpiContainer.cc:116
MercuryMPIType
An enum that indicates what type of data is being send over MPI.
Definition: MpiContainer.h:45
@ VELOCITY
Definition: MpiContainer.h:46
@ FORCE
Definition: MpiContainer.h:46
@ SUPERQUADRIC
Definition: MpiContainer.h:46
@ INTERACTION
Definition: MpiContainer.h:46
@ PARTICLE
Definition: MpiContainer.h:46
@ POSITION
Definition: MpiContainer.h:46
Scalar Scalar int size
Definition: benchVecAdd.cpp:17
This class contains all information and functions required for communication between processors.
Definition: MpiContainer.h:109
void deleteMercuryMPITypes()
Deletes the MercuryMPITypes.
Definition: MpiContainer.h:557
std::enable_if< std::is_scalar< T >::value, void >::type broadcast(T &t, int fromProcessor=0)
Broadcasts a scalar from the root to all other processors.
Definition: MpiContainer.h:420
std::size_t getNumberOfProcessors() const
Get the total number of processors participating in this simulation.
Definition: MpiContainer.cc:83
std::enable_if< std::is_scalar< T >::value, void >::type receive(T &t, int from, int tag)
asynchronously receive a scalar from some other processor.
Definition: MpiContainer.h:200
void receive(T *t, MercuryMPIType type, int count, int from, int tag)
asynchronously receive a list of MercuryMPIType objects from some other processor.
Definition: MpiContainer.h:282
int numberOfProcessors_
The total number of processors in the communicator.
Definition: MpiContainer.h:588
void initialiseMercuryMPITypes(const SpeciesHandler &speciesHandler)
Creates the MPI types required for communication of Mercury data through the MPI interface.
Definition: MpiContainer.cc:53
std::enable_if< std::is_scalar< T >::value, void >::type send(T *t, int count, int to, int tag)
Definition: MpiContainer.h:169
std::enable_if< std::is_scalar< T >::value, void >::type directReceive(T &t, int count, int from, int tag)
synchronously receive a list of scalars from another processor. if the send command has not been issu...
Definition: MpiContainer.h:362
void directSend(T *t, MercuryMPIType type, int count, int to, int tag)
Definition: MpiContainer.h:334
void gather(T &send_t, T *receive_t)
Gathers a scaler from all processors to a vector of scalars on the root.
Definition: MpiContainer.h:407
void send(T *t, MercuryMPIType type, int count, int to, int tag)
asynchronously send a list of MercuryMPITypes objects to some other processor.
Definition: MpiContainer.h:250
int processorID_
The ID of the processor this class is running on.
Definition: MpiContainer.h:583
static MPIContainer & Instance()
fetch the instance to be used for communication
Definition: MpiContainer.h:113
MPIContainer()
Constructor.
Definition: MpiContainer.cc:22
std::enable_if< std::is_scalar< T >::value, void >::type broadcast(T *t, int size, int fromProcessor)
Broadcasts a scalar from the root to all other processors.
Definition: MpiContainer.h:433
void sync()
Process all pending asynchronous communication requests before continuing.
Definition: MpiContainer.h:131
std::enable_if< std::is_scalar< T >::value, void >::type directSend(T &t, int count, int to, int tag)
synchronously send a list of scalars to another processor. the data should be received directly or th...
Definition: MpiContainer.h:314
void directReceive(T *t, MercuryMPIType type, int count, int from, int tag)
Definition: MpiContainer.h:381
void createMercuryMPIType(T t, MercuryMPIType type)
Get the communicator used for MPI commands.
Definition: MpiContainer.h:543
void broadcast(T *t, MercuryMPIType type, int fromProcessor=0)
Broadcasts an MercuryMPIType to all other processors.
Definition: MpiContainer.h:446
std::size_t getProcessorID()
Reduces a scalar on all processors to one scalar on a target processor.
Definition: MpiContainer.cc:92
std::enable_if< std::is_scalar< T >::value, void >::type send(T &t, int to, int tag)
Asynchronously send a scalar to some other processor.
Definition: MpiContainer.h:150
std::enable_if< std::is_scalar< T >::value, void >::type receive(T *t, int count, int from, int tag)
Definition: MpiContainer.h:219
MPIContainer(const MPIContainer &orig)=delete
Copy constructor is disabled, to enforce a singleton pattern.
Container to store all ParticleSpecies.
Definition: SpeciesHandler.h:15
Definition: MpiContainer.h:70
squared absolute value
Definition: GlobalFunctions.h:87
type
Definition: compute_granudrum_aor.py:141
t
Definition: plotPSD.py:36