MpiContainer.h
Go to the documentation of this file.
1 // This file is part of the MercuryDPM project (https://www.mercurydpm.org).
2 // Copyright (c), The MercuryDPM Developers Team. All rights reserved.
3 // License: BSD 3-Clause License; see the LICENSE file in the root directory.
4 
10 #ifndef MPICONTAINER_H_
11 #define MPICONTAINER_H_
12 
13 #include <cstddef>
14 #include <functional>
15 #include <vector>
16 #include "Math/Vector.h"
17 
18 #ifdef MERCURYDPM_USE_MPI
19 #include <mpi.h>
20 #endif
21 
22 #ifdef MERCURYDPM_FORCE_ASSERTS
23 #define MERCURYDPM_ASSERTS true
24 #else
25 #ifdef MERCURYDPM_NO_ASSERTS
26 #define MERCURYDPM_ASSERTS false
27 #else
28 #ifdef NDEBUG
29 #define MERCURYDPM_ASSERTS false
30 #else
31 #define MERCURYDPM_ASSERTS true
32 #endif
33 #endif
34 #endif
35 
36 class SpeciesHandler;
37 
45 {
46  PARTICLE = 0, POSITION = 1, VELOCITY = 2, FORCE = 3, INTERACTION = 4, SUPERQUADRIC = 5
47 };
48 
56 {
67 };
68 
69 namespace Detail
70 {
71 #ifdef MERCURYDPM_USE_MPI
72 //convert integral data to the corresponding MPI type
73 template<typename T>
75 toMPIType(T t)
76 {
77  MPI_Datatype type;
78  MPI_Type_match_size(MPI_TYPECLASS_INTEGER, sizeof(T), &type);
79  return type;
80 }
81 
82 //convert floating point data to the corresponding MPI type
83 template<typename T>
85 toMPIType(T t)
86 {
87  MPI_Datatype type;
88  MPI_Type_match_size(MPI_TYPECLASS_REAL,sizeof(T),&type);
89  return type;
90 }
91 #endif
92 } /*detail*/
93 
97 void initialiseMPI();
98 
108 class MPIContainer final
109 {
110 public:
114  {
115  static MPIContainer theInstance;
116  return theInstance;
117  }
118 
122  void initialiseMercuryMPITypes(const SpeciesHandler& speciesHandler);
123 
131  void sync()
132  {
133 #ifdef MERCURYDPM_USE_MPI
134  MPI_Waitall(pending_.size(),pending_.data(),MPI_STATUSES_IGNORE);
135  pending_.clear();
136  MPI_Barrier(communicator_);
137 #endif
138  }
139 
148  template<typename T>
150  send(T& t, int to, int tag)
151  {
152 #if MERCURYDPM_ASSERTS
153  if (to == processorID_)
154  {
155  logger(FATAL, "[MPI FATAL]: Sending data to self!");
156  }
157 #endif
158 #ifdef MERCURYDPM_USE_MPI
159  MPI_Request request;
160  MPI_Isend(&t, 1, Detail::toMPIType(t), to, tag, communicator_, &request);
161  pending_.push_back(request);
162 
163 #endif
164  }
165 
167  template<typename T>
169  send(T* t, int count, int to, int tag)
170  {
171 #if MERCURYDPM_ASSERTS
172  if (to == processorID_)
173  {
174  logger(FATAL, "[MPI FATAL]: Sending data to self!");
175  }
176 
177  if (count == 0)
178  {
179  logger(WARN, "[MPI ERROR]: Sending zero data");
180  }
181 #endif
182 #ifdef MERCURYDPM_USE_MPI
183  MPI_Request request;
184  MPI_Isend(t, count, Detail::toMPIType(*t), to, tag, communicator_, &request);
185  pending_.push_back(request);
186 
187 #endif
188  }
189 
198  template<typename T>
200  receive(T& t, int from, int tag)
201  {
202 #if MERCURYDPM_ASSERTS
203  if (from == processorID_)
204  {
205  logger(FATAL, "[MPI FATAL]: Receiving data from self!");
206  }
207 #endif
208 #ifdef MERCURYDPM_USE_MPI
209  MPI_Request request;
210  MPI_Irecv(&t, 1, Detail::toMPIType(t), from, tag, communicator_, &request);
211  pending_.push_back(request);
212 
213 #endif
214  }
215 
217  template<typename T>
219  receive(T* t, int count, int from, int tag)
220  {
221 #if MERCURYDPM_ASSERTS
222  if (from == processorID_)
223  {
224  logger(FATAL, "[MPI FATAL]: Receiving data fromself!");
225  }
226 
227  if (count == 0)
228  {
229  logger(WARN, "[MPI ERROR]: Receiving zero data");
230  }
231 #endif
232 #ifdef MERCURYDPM_USE_MPI
233  MPI_Request request;
234  MPI_Irecv(&t, count, Detail::toMPIType(*t), from, tag, communicator_, &request);
235  pending_.push_back(request);
236 
237 #endif
238  }
239 
240 
249  template<typename T>
250  void send(T* t, MercuryMPIType type, int count, int to, int tag)
251  {
252  //std::cout << "[Process: " << processorID_ << "]: QUEUING SEND REQUEST with tag: " << tag << std::endl;
253 #if MERCURYDPM_ASSERTS
254  if (to == processorID_)
255  {
256  logger(FATAL, "[MPI FATAL]: Sending data to self!");
257  }
258 
259  if (count == 0)
260  {
261  logger(WARN, "[MPI ERROR]: Sending zero data");
262  }
263 #endif
264 #ifdef MERCURYDPM_USE_MPI
265  MPI_Request request;
266  MPI_Isend(t, count, dataTypes_[type], to, tag, communicator_, &request);
267  pending_.push_back(request);
268 
269 
270 #endif
271  }
272 
281  template<typename T>
282  void receive(T* t, MercuryMPIType type, int count, int from, int tag)
283  {
284  //std::cout << "[Process: " << processorID_ << "]: QUEUING RECEIVE REQUEST with tag: " << tag << std::endl;
285 #if MERCURYDPM_ASSERTS
286  if (from == processorID_)
287  {
288  logger(FATAL, "[MPI FATAL]: Receiving data to self!");
289  }
290 
291  if (count == 0)
292  {
293  logger(WARN, "[MPI ERROR]: Receiving zero data");
294  }
295 #endif
296 #ifdef MERCURYDPM_USE_MPI
297  MPI_Request request;
298  MPI_Irecv(t, count, dataTypes_[type], from, tag, communicator_, &request);
299  pending_.push_back(request);
300 
301 #endif
302  }
303 
312  template<typename T>
314  directSend(T& t, int count, int to, int tag)
315  {
316 #if MERCURYDPM_ASSERTS
317  if (to == processorID_)
318  {
319  logger(FATAL, "[MPI FATAL]: Sending data to self!");
320  }
321 
322  if (count == 0)
323  {
324  logger(WARN, "[MPI ERROR]: Sending zero data");
325  }
326 #endif
327 #ifdef MERCURYDPM_USE_MPI
328  MPI_Ssend(&t, count, Detail::toMPIType(t), to, tag, communicator_);
329 #endif
330  }
331 
332 
333  template<typename T>
334  void directSend(T* t, MercuryMPIType type, int count, int to, int tag)
335  {
336 #if MERCURYDPM_ASSERTS
337  if (to == processorID_)
338  {
339  logger(FATAL, "[MPI FATAL]: Sending data to self!");
340  }
341 
342  if (count == 0)
343  {
344  logger(WARN, "[MPI ERROR]: Sending zero data");
345  }
346 #endif
347 #ifdef MERCURYDPM_USE_MPI
348  MPI_Ssend(t,count,dataTypes_[type], to, tag, communicator_);
349 #endif
350  }
351 
360  template<typename T>
362  directReceive(T& t, int count, int from, int tag)
363  {
364 #if MERCURYDPM_ASSERTS
365  if (from == processorID_)
366  {
367  logger(FATAL, "[MPI FATAL]: Receiving data from self!");
368  }
369 
370  if (count == 0)
371  {
372  logger(WARN, "[MPI ERROR]: Receiving zero data");
373  }
374 #endif
375 #ifdef MERCURYDPM_USE_MPI
376  MPI_Recv(&t, count, Detail::toMPIType(t), from, tag,communicator_, MPI_STATUS_IGNORE);
377 #endif
378  }
379 
380  template<typename T>
381  void directReceive(T* t, MercuryMPIType type, int count, int from, int tag)
382  {
383 #if MERCURYDPM_ASSERTS
384  if (from == processorID_)
385  {
386  logger(FATAL, "[MPI FATAL]: Receiving data to self!");
387  }
388 
389  if (count == 0)
390  {
391  logger(WARN, "[MPI ERROR]: Receiving zero data");
392  }
393 #endif
394 #ifdef MERCURYDPM_USE_MPI
395  MPI_Recv(t, count, dataTypes_[type], from, tag, communicator_, MPI_STATUS_IGNORE);
396 #endif
397  }
398 
406  template<typename T>
407  void gather(T& send_t, T* receive_t)
408  {
409 #ifdef MERCURYDPM_USE_MPI
410  MPI_Gather(&send_t, 1, Detail::toMPIType(send_t), receive_t, 1, Detail::toMPIType(send_t), 0, communicator_);
411 #endif
412  }
413 
418  template<typename T>
420  broadcast(T& t, int fromProcessor = 0)
421  {
422 #ifdef MERCURYDPM_USE_MPI
423  MPI_Bcast(&t,1,Detail::toMPIType(t),fromProcessor,communicator_);
424 #endif
425  }
426 
431  template<typename T>
433  broadcast(T* t, int size, int fromProcessor)
434  {
435 #ifdef MERCURYDPM_USE_MPI
436  MPI_Bcast((void *)t,size,Detail::toMPIType(t[0]),fromProcessor,communicator_);
437 
438 #endif
439  }
440 
445  template<typename T>
446  void broadcast(T* t, MercuryMPIType type, int fromProcessor = 0)
447  {
448 #ifdef MERCURYDPM_USE_MPI
449  MPI_Bcast((void *)t,1,dataTypes_[type],fromProcessor,communicator_);
450 #endif
451  }
452 
463 #ifdef MERCURYDPM_USE_MPI
464  template<typename T>
466  reduce(T& t, MPI_Op operation, int id = 0)
467  {
468 
469  if(id == getProcessorID())
470  {
471  MPI_Reduce(MPI_IN_PLACE, &t, 1, Detail::toMPIType(t), operation, id, communicator_);
472  }
473  else
474  {
475  MPI_Reduce(&t, nullptr, 1, Detail::toMPIType(t), operation, id, communicator_);
476  }
477  }
478 #endif
479 
488 #ifdef MERCURYDPM_USE_MPI
489  template<typename T>
491  allReduce(T& send_t, T& receive_t, MPI_Op operation)
492  {
493  MPI_Allreduce(&send_t, &receive_t, 1, Detail::toMPIType(send_t), operation,communicator_);
494  }
495 #endif
496 
506 #ifdef MERCURYDPM_USE_MPI
507  template<typename T>
509  allGather(T& send_t, int send_count, std::vector<T>& receive_t, int receive_count)
510  {
511  MPI_Allgather(&send_t, send_count, Detail::toMPIType(send_t),
512  receive_t.data(), receive_count, Detail::toMPIType(receive_t[0]),communicator_);
513  }
514 #endif
515 
519  std::size_t getProcessorID();
520 
524  std::size_t getNumberOfProcessors() const;
525 
529 #ifdef MERCURYDPM_USE_MPI
530  MPI_Comm& getComm();
531 #endif
532 
542  template<typename T>
544  {
545 #ifdef MERCURYDPM_USE_MPI
546  MPI_Datatype MPIType;
547  MPI_Type_contiguous(sizeof(T), MPI_BYTE, &MPIType);
548  MPI_Type_commit(&MPIType);
549  dataTypes_.push_back(MPIType);
550 #endif
551  }
552 
553 
558  {
559 #ifdef MERCURYDPM_USE_MPI
560  for(MPI_Datatype type : dataTypes_)
561  {
562  MPI_Type_free(&type);
563  }
564 #endif
565  }
566 
570  MPIContainer(const MPIContainer& orig) = delete;
571 
572 private:
573 
577  MPIContainer();
578 
582  //std::size_t processorID_;
584 
589 
590 #ifdef MERCURYDPM_USE_MPI
594  std::vector<MPI_Request> pending_;
595 
599  MPI_Comm communicator_;
603  std::vector<MPI_Datatype> dataTypes_;
604 
605 #endif
606 
607 };
608 
609 
610 #endif /* MPICONTAINER_H_ */
Logger< MERCURYDPM_LOGLEVEL > logger("MercuryKernel")
Definition of different loggers with certain modules. A user can define its own custom logger here.
@ FATAL
@ WARN
MercuryMPITag
An enum that facilitates the creation of unique communication tags in the parallel code.
Definition: MpiContainer.h:56
@ SUPERQUADRIC_DATA
Definition: MpiContainer.h:66
@ INTERACTION_DATA
Definition: MpiContainer.h:63
@ VELOCITY_DATA
Definition: MpiContainer.h:61
@ INTERACTION_COUNT
Definition: MpiContainer.h:62
@ PARTICLE_INDEX
Definition: MpiContainer.h:65
@ PERIODIC_POSITION_DATA
Definition: MpiContainer.h:60
@ PARTICLE_COUNT
Definition: MpiContainer.h:57
@ POSITION_DATA
Definition: MpiContainer.h:59
@ PARTICLE_DATA
Definition: MpiContainer.h:58
@ PERIODIC_COMPLEXITY
Definition: MpiContainer.h:64
void initialiseMPI()
Inialises the MPI library.
Definition: MpiContainer.cc:116
MercuryMPIType
An enum that indicates what type of data is being send over MPI.
Definition: MpiContainer.h:45
@ VELOCITY
Definition: MpiContainer.h:46
@ FORCE
Definition: MpiContainer.h:46
@ SUPERQUADRIC
Definition: MpiContainer.h:46
@ INTERACTION
Definition: MpiContainer.h:46
@ PARTICLE
Definition: MpiContainer.h:46
@ POSITION
Definition: MpiContainer.h:46
Scalar Scalar int size
Definition: benchVecAdd.cpp:17
This class contains all information and functions required for communication between processors.
Definition: MpiContainer.h:109
void deleteMercuryMPITypes()
Deletes the MercuryMPITypes.
Definition: MpiContainer.h:557
std::enable_if< std::is_scalar< T >::value, void >::type broadcast(T &t, int fromProcessor=0)
Broadcasts a scalar from the root to all other processors.
Definition: MpiContainer.h:420
std::size_t getNumberOfProcessors() const
Get the total number of processors participating in this simulation.
Definition: MpiContainer.cc:83
std::enable_if< std::is_scalar< T >::value, void >::type receive(T &t, int from, int tag)
asynchronously receive a scalar from some other processor.
Definition: MpiContainer.h:200
void receive(T *t, MercuryMPIType type, int count, int from, int tag)
asynchronously receive a list of MercuryMPIType objects from some other processor.
Definition: MpiContainer.h:282
int numberOfProcessors_
The total number of processors in the communicator.
Definition: MpiContainer.h:588
void initialiseMercuryMPITypes(const SpeciesHandler &speciesHandler)
Creates the MPI types required for communication of Mercury data through the MPI interface.
Definition: MpiContainer.cc:53
std::enable_if< std::is_scalar< T >::value, void >::type send(T *t, int count, int to, int tag)
Definition: MpiContainer.h:169
std::enable_if< std::is_scalar< T >::value, void >::type directReceive(T &t, int count, int from, int tag)
synchronously receive a list of scalars from another processor. if the send command has not been issu...
Definition: MpiContainer.h:362
void directSend(T *t, MercuryMPIType type, int count, int to, int tag)
Definition: MpiContainer.h:334
void gather(T &send_t, T *receive_t)
Gathers a scaler from all processors to a vector of scalars on the root.
Definition: MpiContainer.h:407
void send(T *t, MercuryMPIType type, int count, int to, int tag)
asynchronously send a list of MercuryMPITypes objects to some other processor.
Definition: MpiContainer.h:250
int processorID_
The ID of the processor this class is running on.
Definition: MpiContainer.h:583
static MPIContainer & Instance()
fetch the instance to be used for communication
Definition: MpiContainer.h:113
MPIContainer()
Constructor.
Definition: MpiContainer.cc:22
std::enable_if< std::is_scalar< T >::value, void >::type broadcast(T *t, int size, int fromProcessor)
Broadcasts a scalar from the root to all other processors.
Definition: MpiContainer.h:433
void sync()
Process all pending asynchronous communication requests before continuing.
Definition: MpiContainer.h:131
std::enable_if< std::is_scalar< T >::value, void >::type directSend(T &t, int count, int to, int tag)
synchronously send a list of scalars to another processor. the data should be received directly or th...
Definition: MpiContainer.h:314
void directReceive(T *t, MercuryMPIType type, int count, int from, int tag)
Definition: MpiContainer.h:381
void createMercuryMPIType(T t, MercuryMPIType type)
Get the communicator used for MPI commands.
Definition: MpiContainer.h:543
void broadcast(T *t, MercuryMPIType type, int fromProcessor=0)
Broadcasts an MercuryMPIType to all other processors.
Definition: MpiContainer.h:446
std::size_t getProcessorID()
Reduces a scalar on all processors to one scalar on a target processor.
Definition: MpiContainer.cc:92
std::enable_if< std::is_scalar< T >::value, void >::type send(T &t, int to, int tag)
Asynchronously send a scalar to some other processor.
Definition: MpiContainer.h:150
std::enable_if< std::is_scalar< T >::value, void >::type receive(T *t, int count, int from, int tag)
Definition: MpiContainer.h:219
MPIContainer(const MPIContainer &orig)=delete
Copy constructor is disabled, to enforce a singleton pattern.
Container to store all ParticleSpecies.
Definition: SpeciesHandler.h:15
Definition: MpiContainer.h:70
squared absolute value
Definition: GlobalFunctions.h:87
type
Definition: compute_granudrum_aor.py:141
t
Definition: plotPSD.py:36