vector_split.cc File Reference
#include "generic.h"

Functions

template<typename myType >
void construct_vector (myType given_array[], unsigned given_arraysize, Vector< myType > &result_vector)
 
template<typename myType >
void output_vector (Vector< myType > &given_vector)
 
void create_vector_ascend_row (unsigned const nrow, const OomphCommunicator *const comm_pt, bool const distributed, DoubleVector &my_vec)
 
void create_vectors_to_split (unsigned nrowarray[], const OomphCommunicator *const comm_pt, const bool distributed, Vector< DoubleVector > &out_vector)
 
int main (int argc, char *argv[])
 

Function Documentation

◆ construct_vector()

template<typename myType >
void construct_vector ( myType  given_array[],
unsigned  given_arraysize,
Vector< myType > &  result_vector 
)
37 {
38  // Clear and reserve the required memory.
39  result_vector.clear();
40  result_vector.reserve(given_arraysize);
41 
42  for (unsigned i = 0; i < given_arraysize; i++)
43  {
44  result_vector.push_back(given_array[i]);
45  }
46 }
int i
Definition: BiCGSTAB_step_by_step.cpp:9

References i.

◆ create_vector_ascend_row()

void create_vector_ascend_row ( unsigned const  nrow,
const OomphCommunicator *const  comm_pt,
bool const  distributed,
DoubleVector my_vec 
)
68 {
69  // Clear the block
70  my_vec.clear();
71 
72  // Create the distribution.
73  LinearAlgebraDistribution distri(comm_pt,nrow,distributed);
74 
75  // Build the vector
76  my_vec.build(distri,0.0);
77 
78  // The number of rows this processor is responsible for.
79  unsigned nrow_local = distri.nrow_local();
80 
81  // The first_row will be used as an offset for the values to insert.
82  unsigned first_row = distri.first_row();
83 
84  // Fill in values...
85  for (unsigned row_i = 0; row_i < nrow_local; row_i++)
86  {
87  my_vec[row_i] = first_row + row_i + 1; // Use natural numbers
88  }
89 }
void build(const DoubleVector &old_vector)
Just copys the argument DoubleVector.
Definition: double_vector.cc:35
void clear()
wipes the DoubleVector
Definition: double_vector.h:142
Definition: linear_algebra_distribution.h:64

References oomph::DoubleVector::build(), oomph::DoubleVector::clear(), oomph::LinearAlgebraDistribution::first_row(), and oomph::LinearAlgebraDistribution::nrow_local().

Referenced by main().

◆ create_vectors_to_split()

void create_vectors_to_split ( unsigned  nrowarray[],
const OomphCommunicator *const  comm_pt,
const bool  distributed,
Vector< DoubleVector > &  out_vector 
)
95 {
96  // The number of out vectors.
97  unsigned nvectors = out_vector.size();
98 
99  // Build the each sub DoubleVector with just a uniform distribution.
100  // By default, each entry is initialised to 0.
101  for (unsigned vec_i = 0; vec_i < nvectors; vec_i++)
102  {
103  // Create the distribution
104  LinearAlgebraDistribution dist(comm_pt,nrowarray[vec_i],distributed);
105  out_vector[vec_i].build(dist);
106  }
107 }

Referenced by main().

◆ main()

int main ( int argc  ,
char argv[] 
)

Driver code: Testing DoubleVectorHelpers::split(...) This is the reverse of DoubleVectorsHelpers::concatenate(...), which is demonstrated in self_test/mpi/vector_concatenation/ Given a vector with length = 7+5+3 = 15 [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15],

we split it into vectors v1, v2 and v3 of lengths 7, 5 and 3 respectively. The script validate.sh should run this on 1, 2, 3 and 4 cores.

Communication is required and the order of the entries is preserved across the vectors. We demonstrate this on two cores, p0 and p1: v1, p0: [1 2 3] v1, p1: [4 5 6 7]

v2, p0: [8 9] v2, p1: [10 11 12]

v2, p0: [13] v2, p1: [14 15]

154 {
155 #ifdef OOMPH_HAS_MPI
156  // Initialise MPI
157  MPI_Helpers::init(argc,argv);
158 #endif
159 
160  // Get the global oomph-lib communicator
161  const OomphCommunicator* const comm_pt = MPI_Helpers::communicator_pt();
162 
163  // The number of sub vectors
164  unsigned nvectors = 3;
165 
166  // Supply the nrow for the sub distributions. In C++11, we can initialise
167  // Vectors like this, but for now, we use arrays.
168  unsigned nrowarray[] = {7,5,3};
169 
170  // The vectors to split the in vector in to.
171  Vector<DoubleVector> out_vector(nvectors);
172  bool distributed = true;
173 
174  // Create the vectors and get the pointers to them.
175  // This may be a bit long winded but we do not want to create pointers
176  // to objects using "new" since then we will have to remember to call
177  // delete and null the pointers. This is considered bad practice, memory
178  // management should be done automatically with smart pointers.
179  // We could use smart pointers but it is only available in C++11,
180  // or use boost smart pointers, but we do not have boost...
181  // /rant.
182  create_vectors_to_split(nrowarray,comm_pt,distributed,out_vector);
183 
184  // Global row for the in vector (must match the sum of the global rows for
185  // the sub vectors).
186  unsigned in_nrow = 0;
187  for (unsigned vec_i = 0; vec_i < nvectors; vec_i++)
188  {
189  in_nrow += out_vector[vec_i].nrow();
190  }
191 
192  // The in vector
193  DoubleVector in_vector;
194  create_vector_ascend_row(in_nrow, comm_pt, distributed, in_vector);
195 
196  // Call the split function.
197  DoubleVectorHelpers::split(in_vector,out_vector);
198 
199  // The split is done, now we output the results.
200  // We do not use the output function from DoubleVector
201  // because it outputs the whole DoubleVector per processor
202  // (requiring communication). We want to check that the values per processor
203  // is correct.
204 
205  // My rank
206  unsigned my_rank = comm_pt->my_rank();
207  unsigned nproc = comm_pt->nproc();
208 
209  // The output file name.
210  std::ostringstream outfile_stream;
211  outfile_stream << "out_NP"<<nproc<<"R"<< my_rank;
212 
213  // The output file.
214  std::ofstream out_file;
215  out_file.open(outfile_stream.str().c_str());
216 
217  // Output data from all out vectors:
218  // nrow()
219  // first_row()
220  // nrow_local()
221  // distributed()
222  // values
223  for (unsigned vec_i = 0; vec_i < nvectors; vec_i++)
224  {
225  // The values and nrow local.
226  double* out_values = out_vector[vec_i].values_pt();
227  unsigned out_nrow_local = out_vector[vec_i].nrow_local();
228 
229  out_file << out_vector[vec_i].nrow() << "\n";
230  out_file << out_vector[vec_i].first_row() << "\n";
231  out_file << out_nrow_local << "\n";
232  out_file << out_vector[vec_i].distributed() << "\n";
233 
234  for (unsigned val_i = 0; val_i < out_nrow_local; val_i++)
235  {
236  out_file << out_values[val_i] << "\n";
237  }
238  }
239 
240  out_file.close();
241 
242 #ifdef OOMPH_HAS_MPI
243  // finalize MPI
244  MPI_Helpers::finalize();
245 #endif
246  return(EXIT_SUCCESS);
247 } // end_of_main
Definition: double_vector.h:58
Definition: communicator.h:54
int my_rank() const
my rank
Definition: communicator.h:176
int nproc() const
number of processors
Definition: communicator.h:157
Definition: oomph-lib/src/generic/Vector.h:58
void split(const DoubleVector &in_vector, Vector< DoubleVector * > &out_vector_pt)
Definition: double_vector.cc:1413
void create_vector_ascend_row(unsigned const nrow, const OomphCommunicator *const comm_pt, bool const distributed, DoubleVector &my_vec)
Definition: vector_split.cc:64
void create_vectors_to_split(unsigned nrowarray[], const OomphCommunicator *const comm_pt, const bool distributed, Vector< DoubleVector > &out_vector)
Definition: vector_split.cc:93

References oomph::MPI_Helpers::communicator_pt(), create_vector_ascend_row(), create_vectors_to_split(), oomph::MPI_Helpers::finalize(), oomph::MPI_Helpers::init(), oomph::OomphCommunicator::my_rank(), oomph::OomphCommunicator::nproc(), and oomph::DoubleVectorHelpers::split().

◆ output_vector()

template<typename myType >
void output_vector ( Vector< myType > &  given_vector)
51 {
52  typename Vector<myType>::iterator it;
53 
54  for(it = given_vector.begin(); it != given_vector.end(); ++it)
55  {
56  oomph_info << *it << std::endl;
57  }
58 }
OomphInfo oomph_info
Definition: oomph_definitions.cc:319

References oomph::oomph_info.