distribution_concatenation.cc File Reference
#include "generic.h"

Functions

int main (int argc, char *argv[])
 

Function Documentation

◆ main()

int main ( int argc  ,
char argv[] 
)

Driver code: Testing LinearAlgebraDistributionHelpers::concatenate(...)

We concatenate three uniformly distributed distributions with nrow = 7, 5, and 3 respectively.

On one core: dist_0: nrow = 7 nrow_local rank0 = 7

dist_1: nrow = 5 nrow_local rank0 = 5

dist_2: nrow = 3 nrow_local rank0 = 3

The result distribution should have: dist_r: nrow = 15 nrow_local rank0 = 15

////////////////////////////////////

On two cores: dist_0: nrow = 7 nrow_local rank0 = 3 nrow_local rank1 = 4

dist_1: nrow = 5 nrow_local rank0 = 2 nrow_local rank1 = 3

dist_2: nrow = 3 nrow_local rank0 = 1 nrow_local rank1 = 2

The result distribution should have: dist_r: nrow = 15 nrow_local rank0 = 6 nrow_local rank1 = 9

////////////////////////////////////

On three cores: dist_0: nrow = 7 nrow_local rank0 = 2 nrow_local rank1 = 2 nrow_local rank2 = 3

dist_1: nrow = 5 nrow_local rank0 = 1 nrow_local rank1 = 2 nrow_local rank2 = 2

dist_2: nrow = 3 nrow_local rank0 = 1 nrow_local rank1 = 1 nrow_local rank2 = 1

The result distribution should have: dist_r: nrow = 15 nrow_local rank0 = 4 nrow_local rank1 = 5 nrow_local rank2 = 6

////////////////////////////////////

On four cores: dist_0: nrow = 7 nrow_local rank0 = 1 nrow_local rank1 = 2 nrow_local rank2 = 2 nrow_local rank2 = 2

dist_1: nrow = 5 nrow_local rank0 = 1 nrow_local rank1 = 1 nrow_local rank1 = 1 nrow_local rank1 = 2

dist_2: nrow = 3 nrow_local rank0 = 0 nrow_local rank1 = 1 nrow_local rank1 = 1 nrow_local rank1 = 1

The result distribution should have: dist_r: nrow = 15 nrow_local rank0 = 2 nrow_local rank1 = 4 nrow_local rank1 = 4 nrow_local rank1 = 5

The script validate.sh should run this test on 1, 2, 3 and4 cores.

127 {
128 #ifdef OOMPH_HAS_MPI
129  // Initialise MPI
130  MPI_Helpers::init(argc,argv);
131 #endif
132 
133  // Get the global oomph-lib communicator
134  const OomphCommunicator* const comm_pt = MPI_Helpers::communicator_pt();
135 
136  // How many distributions do we want to generate?
137  unsigned ndistributions = 3;
138 
139  // What are the lengths of the above distributions?
140  unsigned distlengtharray[] = {7,5,3};
141 
142  // The distributions to concatenate.
144  bool distributed = true;
145 
146  // Create the distribution and get the pointers to them.
147  // This may be a bit long winded but we do not want to create pointers
148  // to distributions using "new", then we will have to remember to call
149  // delete and null the pointers. This is considered bad practice, memory
150  // management should be done automatically with smart pointers.
151  // We could use smart pointers but it is only available in C++11,
152  // or use boost smart pointers, but we do not have boost...
153  // /rant.
154  for (unsigned dist_i = 0; dist_i < ndistributions; dist_i++)
155  {
156  // Create the distribution.
157  dist_to_cat.push_back(LinearAlgebraDistribution(
158  comm_pt,
159  distlengtharray[dist_i],
160  distributed));
161  }
162 
163  // The pointers to distributions to concatenate.
164  Vector<LinearAlgebraDistribution*> dist_to_cat_pt;
165  for (unsigned dist_i = 0; dist_i < ndistributions; dist_i++)
166  {
167  dist_to_cat_pt.push_back(&dist_to_cat[dist_i]);
168  }
169 
170  // The result distribution.
171  LinearAlgebraDistribution result_distribution;
172 
173  // Call the concatenate function.
175  result_distribution);
176 
177  // Output data about the result distribution:
178  // nrow()
179  // first_row()
180  // nrow_local()
181  // distributed()
182  unsigned my_rank = comm_pt->my_rank();
183  unsigned nproc = comm_pt->nproc();
184  std::ostringstream result_stream;
185  result_stream << "out_NP" << nproc << "R" << my_rank;
186 
187  std::ofstream result_file;
188  result_file.open(result_stream.str().c_str());
189  result_file << result_distribution.nrow() << "\n";
190  result_file << result_distribution.first_row() << "\n";
191  result_file << result_distribution.nrow_local() << "\n";
192  result_file << result_distribution.distributed() << "\n";
193  result_file.close();
194 
195 #ifdef OOMPH_HAS_MPI
196  // finalize MPI
197  MPI_Helpers::finalize();
198 #endif
199  return(EXIT_SUCCESS);
200 } // end_of_main
Definition: linear_algebra_distribution.h:64
bool distributed() const
Definition: linear_algebra_distribution.h:329
unsigned first_row() const
Definition: linear_algebra_distribution.h:261
unsigned nrow() const
access function to the number of global rows.
Definition: linear_algebra_distribution.h:186
unsigned nrow_local() const
Definition: linear_algebra_distribution.h:193
Definition: communicator.h:54
int my_rank() const
my rank
Definition: communicator.h:176
int nproc() const
number of processors
Definition: communicator.h:157
Definition: oomph-lib/src/generic/Vector.h:58
void concatenate(const Vector< DoubleVector * > &in_vector_pt, DoubleVector &out_vector)
Definition: double_vector.cc:993

References oomph::MPI_Helpers::communicator_pt(), oomph::LinearAlgebraDistributionHelpers::concatenate(), oomph::LinearAlgebraDistribution::distributed(), oomph::MPI_Helpers::finalize(), oomph::LinearAlgebraDistribution::first_row(), oomph::MPI_Helpers::init(), oomph::OomphCommunicator::my_rank(), oomph::OomphCommunicator::nproc(), oomph::LinearAlgebraDistribution::nrow(), and oomph::LinearAlgebraDistribution::nrow_local().