sparse_basic.cpp File Reference
#include "sparse.h"

Macros

#define EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN   g_realloc_count++;
 
#define EIGEN_SPARSE_ASSIGNMENT_FROM_DENSE_OP_SPARSE_PLUGIN   g_dense_op_sparse_count++;
 
#define EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_ADD_DENSE_PLUGIN   g_dense_op_sparse_count += 10;
 
#define EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_SUB_DENSE_PLUGIN   g_dense_op_sparse_count += 20;
 

Functions

template<typename SparseMatrixType >
void sparse_basic (const SparseMatrixType &ref)
 
template<typename SparseMatrixType >
void big_sparse_triplet (Index rows, Index cols, double density)
 
template<int >
void bug1105 ()
 
 EIGEN_DECLARE_TEST (sparse_basic)
 

Variables

static long g_realloc_count = 0
 
static long g_dense_op_sparse_count = 0
 

Macro Definition Documentation

◆ EIGEN_SPARSE_ASSIGNMENT_FROM_DENSE_OP_SPARSE_PLUGIN

#define EIGEN_SPARSE_ASSIGNMENT_FROM_DENSE_OP_SPARSE_PLUGIN   g_dense_op_sparse_count++;

◆ EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_ADD_DENSE_PLUGIN

#define EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_ADD_DENSE_PLUGIN   g_dense_op_sparse_count += 10;

◆ EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_SUB_DENSE_PLUGIN

#define EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_SUB_DENSE_PLUGIN   g_dense_op_sparse_count += 20;

◆ EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN

#define EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN   g_realloc_count++;

Function Documentation

◆ big_sparse_triplet()

template<typename SparseMatrixType >
void big_sparse_triplet ( Index  rows,
Index  cols,
double  density 
)
975  {
976  typedef typename SparseMatrixType::StorageIndex StorageIndex;
977  typedef typename SparseMatrixType::Scalar Scalar;
978  typedef Triplet<Scalar, Index> TripletType;
979  std::vector<TripletType> triplets;
980  double nelements = density * static_cast<double>(rows * cols);
981  VERIFY(nelements >= 0 && nelements < static_cast<double>(NumTraits<StorageIndex>::highest()));
982  Index ntriplets = Index(nelements);
983  triplets.reserve(ntriplets);
984  Scalar sum = Scalar(0);
985  for (Index i = 0; i < ntriplets; ++i) {
986  Index r = internal::random<Index>(0, rows - 1);
987  Index c = internal::random<Index>(0, cols - 1);
988  // use positive values to prevent numerical cancellation errors in sum
989  Scalar v = numext::abs(internal::random<Scalar>());
990  triplets.push_back(TripletType(r, c, v));
991  sum += v;
992  }
993  SparseMatrixType m(rows, cols);
994  m.setFromTriplets(triplets.begin(), triplets.end());
995  VERIFY(m.nonZeros() <= ntriplets);
996  VERIFY_IS_APPROX(sum, m.sum());
997 }
AnnoyingScalar abs(const AnnoyingScalar &x)
Definition: AnnoyingScalar.h:135
Array< int, Dynamic, 1 > v
Definition: Array_initializer_list_vector_cxx11.cpp:1
int i
Definition: BiCGSTAB_step_by_step.cpp:9
int rows
Definition: Tutorial_commainit_02.cpp:1
int cols
Definition: Tutorial_commainit_02.cpp:1
SCALAR Scalar
Definition: bench_gemm.cpp:45
A small structure to hold a non zero as a triplet (i,j,value).
Definition: SparseUtil.h:187
#define VERIFY_IS_APPROX(a, b)
Definition: integer_types.cpp:13
int * m
Definition: level2_cplx_impl.h:294
#define VERIFY(a)
Definition: main.h:362
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:83
density
Definition: UniformPSDSelfTest.py:19
r
Definition: UniformPSDSelfTest.py:20
int c
Definition: calibrate.py:100
Holds information about the various numeric (i.e. scalar) types allowed by Eigen.
Definition: NumTraits.h:217

References abs(), calibrate::c, cols, UniformPSDSelfTest::density, i, m, UniformPSDSelfTest::r, rows, v, VERIFY, and VERIFY_IS_APPROX.

Referenced by EIGEN_DECLARE_TEST().

◆ bug1105()

template<int >
void bug1105 ( )
1000  {
1001  // Regression test for bug 1105
1002  int n = Eigen::internal::random<int>(200, 600);
1004  std::complex<double> val;
1005 
1006  for (int i = 0; i < n; ++i) {
1007  mat.coeffRef(i, i % (n / 10)) = val;
1008  VERIFY(mat.data().allocatedSize() < 20 * n);
1009  }
1010 }
const unsigned n
Definition: CG3DPackingUnitTest.cpp:11
Eigen::SparseMatrix< double > mat
Definition: EigenUnitTest.cpp:10
A versatible sparse matrix representation.
Definition: SparseMatrix.h:121
Scalar & coeffRef(Index row, Index col)
Definition: SparseMatrix.h:275
constexpr Storage & data()
Definition: SparseMatrix.h:205
Index allocatedSize() const
Definition: CompressedStorage.h:95
val
Definition: calibrate.py:119

References Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::allocatedSize(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::coeffRef(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::data(), i, n, calibrate::val, and VERIFY.

◆ EIGEN_DECLARE_TEST()

EIGEN_DECLARE_TEST ( sparse_basic  )
1014  {
1015  g_dense_op_sparse_count = 0; // Suppresses compiler warning.
1016  for (int i = 0; i < g_repeat; i++) {
1017  int r = Eigen::internal::random<int>(1, 200), c = Eigen::internal::random<int>(1, 200);
1018  if (Eigen::internal::random<int>(0, 3) == 0) {
1019  r = c; // check square matrices in 25% of tries
1020  }
1024  CALL_SUBTEST_2((sparse_basic(SparseMatrix<std::complex<double>, ColMajor>(r, c))));
1025  CALL_SUBTEST_2((sparse_basic(SparseMatrix<std::complex<double>, RowMajor>(r, c))));
1032 
1033  r = Eigen::internal::random<int>(1, 100);
1034  c = Eigen::internal::random<int>(1, 100);
1035  if (Eigen::internal::random<int>(0, 3) == 0) {
1036  r = c; // check square matrices in 25% of tries
1037  }
1038 
1041  }
1042 
1043  // Regression test for bug 900: (manually insert higher values here, if you have enough RAM):
1046 
1047  CALL_SUBTEST_5(bug1105<0>());
1048 }
#define EIGEN_UNUSED_VARIABLE(var)
Definition: Macros.h:966
@ ColMajor
Definition: Constants.h:318
@ RowMajor
Definition: Constants.h:320
static int g_repeat
Definition: main.h:191
void sparse_basic(const SparseMatrixType &ref)
Definition: sparse_basic.cpp:25
static long g_dense_op_sparse_count
Definition: sparse_basic.cpp:16
void big_sparse_triplet(Index rows, Index cols, double density)
Definition: sparse_basic.cpp:975
#define CALL_SUBTEST_3(FUNC)
Definition: split_test_helper.h:16
#define CALL_SUBTEST_1(FUNC)
Definition: split_test_helper.h:4
#define CALL_SUBTEST_5(FUNC)
Definition: split_test_helper.h:28
#define CALL_SUBTEST_2(FUNC)
Definition: split_test_helper.h:10
#define CALL_SUBTEST_4(FUNC)
Definition: split_test_helper.h:22

References big_sparse_triplet(), calibrate::c, CALL_SUBTEST_1, CALL_SUBTEST_2, CALL_SUBTEST_3, CALL_SUBTEST_4, CALL_SUBTEST_5, Eigen::ColMajor, EIGEN_UNUSED_VARIABLE, g_dense_op_sparse_count, Eigen::g_repeat, i, UniformPSDSelfTest::r, Eigen::RowMajor, and sparse_basic().

◆ sparse_basic()

template<typename SparseMatrixType >
void sparse_basic ( const SparseMatrixType &  ref)
25  {
26  typedef typename SparseMatrixType::StorageIndex StorageIndex;
27  typedef Matrix<StorageIndex, 2, 1> Vector2;
28 
29  const Index rows = ref.rows();
30  const Index cols = ref.cols();
31  const Index inner = ref.innerSize();
32  const Index outer = ref.outerSize();
33 
34  typedef typename SparseMatrixType::Scalar Scalar;
36  enum { Flags = SparseMatrixType::Flags };
37 
38  double density = (std::max)(8. / (rows * cols), 0.01);
42  Scalar eps = Scalar(1e-6);
43 
44  Scalar s1 = internal::random<Scalar>();
45  {
46  SparseMatrixType m(rows, cols);
48  DenseVector vec1 = DenseVector::Random(rows);
49 
50  std::vector<Vector2> zeroCoords;
51  std::vector<Vector2> nonzeroCoords;
52  initSparse<Scalar>(density, refMat, m, 0, &zeroCoords, &nonzeroCoords);
53 
54  // test coeff and coeffRef
55  for (std::size_t i = 0; i < zeroCoords.size(); ++i) {
56  VERIFY_IS_MUCH_SMALLER_THAN(m.coeff(zeroCoords[i].x(), zeroCoords[i].y()), eps);
57  if (internal::is_same<SparseMatrixType, SparseMatrix<Scalar, Flags>>::value)
58  VERIFY_RAISES_ASSERT(m.coeffRef(zeroCoords[i].x(), zeroCoords[i].y()) = 5);
59  }
60  VERIFY_IS_APPROX(m, refMat);
61 
62  if (!nonzeroCoords.empty()) {
63  m.coeffRef(nonzeroCoords[0].x(), nonzeroCoords[0].y()) = Scalar(5);
64  refMat.coeffRef(nonzeroCoords[0].x(), nonzeroCoords[0].y()) = Scalar(5);
65  }
66 
67  VERIFY_IS_APPROX(m, refMat);
68 
69  // test assertion
70  VERIFY_RAISES_ASSERT(m.coeffRef(-1, 1) = 0);
71  VERIFY_RAISES_ASSERT(m.coeffRef(0, m.cols()) = 0);
72  }
73 
74  // test insert (inner random)
75  {
77  m1.setZero();
78  SparseMatrixType m2(rows, cols);
79  bool call_reserve = internal::random<int>() % 2;
80  Index nnz = internal::random<int>(1, int(rows) / 2);
81  if (call_reserve) {
82  if (internal::random<int>() % 2)
83  m2.reserve(VectorXi::Constant(m2.outerSize(), int(nnz)));
84  else
85  m2.reserve(m2.outerSize() * nnz);
86  }
87  g_realloc_count = 0;
88  for (Index j = 0; j < cols; ++j) {
89  for (Index k = 0; k < nnz; ++k) {
90  Index i = internal::random<Index>(0, rows - 1);
91  if (m1.coeff(i, j) == Scalar(0)) {
92  Scalar v = internal::random<Scalar>();
93  if (v == Scalar(0)) v = Scalar(1);
94  m1(i, j) = v;
95  m2.insert(i, j) = v;
96  }
97  }
98  }
99 
100  if (call_reserve && !SparseMatrixType::IsRowMajor) {
101  VERIFY(g_realloc_count == 0);
102  }
103 
105  }
106 
107  // test insert (fully random)
108  {
110  m1.setZero();
111  SparseMatrixType m2(rows, cols);
112  if (internal::random<int>() % 2) m2.reserve(VectorXi::Constant(m2.outerSize(), 2));
113  for (int k = 0; k < rows * cols; ++k) {
114  Index i = internal::random<Index>(0, rows - 1);
115  Index j = internal::random<Index>(0, cols - 1);
116  if ((m1.coeff(i, j) == Scalar(0)) && (internal::random<int>() % 2)) {
117  Scalar v = internal::random<Scalar>();
118  if (v == Scalar(0)) v = Scalar(1);
119  m1(i, j) = v;
120  m2.insert(i, j) = v;
121  } else {
122  Scalar v = internal::random<Scalar>();
123  if (v == Scalar(0)) v = Scalar(1);
124  m1(i, j) = v;
125  m2.coeffRef(i, j) = v;
126  }
127  }
129  }
130 
131  // test insert (un-compressed)
132  for (int mode = 0; mode < 4; ++mode) {
134  m1.setZero();
135  SparseMatrixType m2(rows, cols);
136  VectorXi r(VectorXi::Constant(m2.outerSize(),
137  ((mode % 2) == 0) ? int(m2.innerSize()) : std::max<int>(1, int(m2.innerSize()) / 8)));
138  m2.reserve(r);
139  for (Index k = 0; k < rows * cols; ++k) {
140  Index i = internal::random<Index>(0, rows - 1);
141  Index j = internal::random<Index>(0, cols - 1);
142  if (m1.coeff(i, j) == Scalar(0)) {
143  Scalar v = internal::random<Scalar>();
144  if (v == Scalar(0)) v = Scalar(1);
145  m1(i, j) = v;
146  m2.insert(i, j) = v;
147  }
148  if (mode == 3) m2.reserve(r);
149  }
150  if (internal::random<int>() % 2) m2.makeCompressed();
152  }
153 
154  // test removeOuterVectors / insertEmptyOuterVectors
155  {
156  for (int mode = 0; mode < 4; mode++) {
157  CompatibleDenseMatrix m1(rows, cols);
158  m1.setZero();
159  SparseMatrixType m2(rows, cols);
160  Vector<Index, Dynamic> reserveSizes(outer);
161  for (Index j = 0; j < outer; j++) reserveSizes(j) = internal::random<Index>(1, inner - 1);
162  m2.reserve(reserveSizes);
163  for (Index j = 0; j < outer; j++) {
164  Index i = internal::random<Index>(0, inner - 1);
165  Scalar val = internal::random<Scalar>();
166  m1.coeffRefByOuterInner(j, i) = val;
167  m2.insertByOuterInner(j, i) = val;
168  }
169  if (mode % 2 == 0) m2.makeCompressed();
170 
171  if (mode < 2) {
172  Index num = internal::random<Index>(0, outer - 1);
173  Index start = internal::random<Index>(0, outer - num);
174 
175  Index newRows = SparseMatrixType::IsRowMajor ? rows - num : rows;
176  Index newCols = SparseMatrixType::IsRowMajor ? cols : cols - num;
177 
178  CompatibleDenseMatrix m3(newRows, newCols);
179  m3.setConstant(Scalar(NumTraits<RealScalar>::quiet_NaN()));
180 
181  if (SparseMatrixType::IsRowMajor) {
182  m3.topRows(start) = m1.topRows(start);
183  m3.bottomRows(newRows - start) = m1.bottomRows(newRows - start);
184  } else {
185  m3.leftCols(start) = m1.leftCols(start);
186  m3.rightCols(newCols - start) = m1.rightCols(newCols - start);
187  }
188 
189  SparseMatrixType m4 = m2;
190  m4.removeOuterVectors(start, num);
191 
192  VERIFY_IS_CWISE_EQUAL(m3, m4.toDense());
193  } else {
194  Index num = internal::random<Index>(0, outer - 1);
195  Index start = internal::random<Index>(0, outer - 1);
196 
197  Index newRows = SparseMatrixType::IsRowMajor ? rows + num : rows;
198  Index newCols = SparseMatrixType::IsRowMajor ? cols : cols + num;
199 
200  CompatibleDenseMatrix m3(newRows, newCols);
201  m3.setConstant(Scalar(NumTraits<RealScalar>::quiet_NaN()));
202 
203  if (SparseMatrixType::IsRowMajor) {
204  m3.topRows(start) = m1.topRows(start);
205  m3.middleRows(start, num).setZero();
206  m3.bottomRows(rows - start) = m1.bottomRows(rows - start);
207  } else {
208  m3.leftCols(start) = m1.leftCols(start);
209  m3.middleCols(start, num).setZero();
210  m3.rightCols(cols - start) = m1.rightCols(cols - start);
211  }
212 
213  SparseMatrixType m4 = m2;
214  m4.insertEmptyOuterVectors(start, num);
215 
216  VERIFY_IS_CWISE_EQUAL(m3, m4.toDense());
217  }
218  }
219  }
220 
221  // test sort
222  if (inner > 1) {
223  bool StorageOrdersMatch = int(DenseMatrix::IsRowMajor) == int(SparseMatrixType::IsRowMajor);
225  m1.setZero();
226  SparseMatrixType m2(rows, cols);
227  // generate random inner indices with no repeats
228  Vector<Index, Dynamic> innerIndices(inner);
229  innerIndices.setLinSpaced(inner, 0, inner - 1);
230  std::random_device rd;
231  std::mt19937 g(rd());
232  for (Index j = 0; j < outer; j++) {
233  std::shuffle(innerIndices.begin(), innerIndices.end(), g);
234  Index nzj = internal::random<Index>(2, inner / 2);
235  for (Index k = 0; k < nzj; k++) {
236  Index i = innerIndices[k];
237  Scalar val = internal::random<Scalar>();
238  m1.coeffRefByOuterInner(StorageOrdersMatch ? j : i, StorageOrdersMatch ? i : j) = val;
239  m2.insertByOuterInner(j, i) = val;
240  }
241  }
242 
244  // sort wrt greater
245  m2.template sortInnerIndices<std::greater<>>();
246  // verify that all inner vectors are not sorted wrt less
247  VERIFY_IS_EQUAL(m2.template innerIndicesAreSorted<std::less<>>(), 0);
248  // verify that all inner vectors are sorted wrt greater
249  VERIFY_IS_EQUAL(m2.template innerIndicesAreSorted<std::greater<>>(), m2.outerSize());
250  // verify that sort does not change evaluation
252  // sort wrt less
253  m2.template sortInnerIndices<std::less<>>();
254  // verify that all inner vectors are sorted wrt less
255  VERIFY_IS_EQUAL(m2.template innerIndicesAreSorted<std::less<>>(), m2.outerSize());
256  // verify that all inner vectors are not sorted wrt greater
257  VERIFY_IS_EQUAL(m2.template innerIndicesAreSorted<std::greater<>>(), 0);
258  // verify that sort does not change evaluation
260 
261  m2.makeCompressed();
262  // sort wrt greater
263  m2.template sortInnerIndices<std::greater<>>();
264  // verify that all inner vectors are not sorted wrt less
265  VERIFY_IS_EQUAL(m2.template innerIndicesAreSorted<std::less<>>(), 0);
266  // verify that all inner vectors are sorted wrt greater
267  VERIFY_IS_EQUAL(m2.template innerIndicesAreSorted<std::greater<>>(), m2.outerSize());
268  // verify that sort does not change evaluation
270  // sort wrt less
271  m2.template sortInnerIndices<std::less<>>();
272  // verify that all inner vectors are sorted wrt less
273  VERIFY_IS_EQUAL(m2.template innerIndicesAreSorted<std::less<>>(), m2.outerSize());
274  // verify that all inner vectors are not sorted wrt greater
275  VERIFY_IS_EQUAL(m2.template innerIndicesAreSorted<std::greater<>>(), 0);
276  // verify that sort does not change evaluation
278  }
279 
280  // test basic computations
281  {
286  SparseMatrixType m1(rows, cols);
287  SparseMatrixType m2(rows, cols);
288  SparseMatrixType m3(rows, cols);
289  SparseMatrixType m4(rows, cols);
290  initSparse<Scalar>(density, refM1, m1);
291  initSparse<Scalar>(density, refM2, m2);
292  initSparse<Scalar>(density, refM3, m3);
293  initSparse<Scalar>(density, refM4, m4);
294 
295  if (internal::random<bool>()) m1.makeCompressed();
296 
297  Index m1_nnz = m1.nonZeros();
298 
299  VERIFY_IS_APPROX(m1 * s1, refM1 * s1);
300  VERIFY_IS_APPROX(m1 + m2, refM1 + refM2);
301  VERIFY_IS_APPROX(m1 + m2 + m3, refM1 + refM2 + refM3);
302  VERIFY_IS_APPROX(m3.cwiseProduct(m1 + m2), refM3.cwiseProduct(refM1 + refM2));
303  VERIFY_IS_APPROX(m1 * s1 - m2, refM1 * s1 - refM2);
304  VERIFY_IS_APPROX(m4 = m1 / s1, refM1 / s1);
305  VERIFY_IS_EQUAL(m4.nonZeros(), m1_nnz);
306 
307  if (SparseMatrixType::IsRowMajor)
308  VERIFY_IS_APPROX(m1.innerVector(0).dot(refM2.row(0)), refM1.row(0).dot(refM2.row(0)));
309  else
310  VERIFY_IS_APPROX(m1.innerVector(0).dot(refM2.col(0)), refM1.col(0).dot(refM2.col(0)));
311 
312  DenseVector rv = DenseVector::Random(m1.cols());
313  DenseVector cv = DenseVector::Random(m1.rows());
314  Index r = internal::random<Index>(0, m1.rows() - 2);
315  Index c = internal::random<Index>(0, m1.cols() - 1);
316  VERIFY_IS_APPROX((m1.template block<1, Dynamic>(r, 0, 1, m1.cols()).dot(rv)), refM1.row(r).dot(rv));
317  VERIFY_IS_APPROX(m1.row(r).dot(rv), refM1.row(r).dot(rv));
318  VERIFY_IS_APPROX(m1.col(c).dot(cv), refM1.col(c).dot(cv));
319 
320  VERIFY_IS_APPROX(m1.conjugate(), refM1.conjugate());
321  VERIFY_IS_APPROX(m1.real(), refM1.real());
322 
323  refM4.setRandom();
324  // sparse cwise* dense
325  VERIFY_IS_APPROX(m3.cwiseProduct(refM4), refM3.cwiseProduct(refM4));
326  // dense cwise* sparse
327  VERIFY_IS_APPROX(refM4.cwiseProduct(m3), refM4.cwiseProduct(refM3));
328  // VERIFY_IS_APPROX(m3.cwise()/refM4, refM3.cwise()/refM4);
329 
330  // mixed sparse-dense
331  VERIFY_IS_APPROX(refM4 + m3, refM4 + refM3);
332  VERIFY_IS_APPROX(m3 + refM4, refM3 + refM4);
333  VERIFY_IS_APPROX(refM4 - m3, refM4 - refM3);
334  VERIFY_IS_APPROX(m3 - refM4, refM3 - refM4);
335  VERIFY_IS_APPROX((RealScalar(0.5) * refM4 + RealScalar(0.5) * m3).eval(),
336  RealScalar(0.5) * refM4 + RealScalar(0.5) * refM3);
337  VERIFY_IS_APPROX((RealScalar(0.5) * refM4 + m3 * RealScalar(0.5)).eval(),
338  RealScalar(0.5) * refM4 + RealScalar(0.5) * refM3);
339  VERIFY_IS_APPROX((RealScalar(0.5) * refM4 + m3.cwiseProduct(m3)).eval(),
340  RealScalar(0.5) * refM4 + refM3.cwiseProduct(refM3));
341 
342  VERIFY_IS_APPROX((RealScalar(0.5) * refM4 + RealScalar(0.5) * m3).eval(),
343  RealScalar(0.5) * refM4 + RealScalar(0.5) * refM3);
344  VERIFY_IS_APPROX((RealScalar(0.5) * refM4 + m3 * RealScalar(0.5)).eval(),
345  RealScalar(0.5) * refM4 + RealScalar(0.5) * refM3);
346  VERIFY_IS_APPROX((RealScalar(0.5) * refM4 + (m3 + m3)).eval(), RealScalar(0.5) * refM4 + (refM3 + refM3));
347  VERIFY_IS_APPROX(((refM3 + m3) + RealScalar(0.5) * m3).eval(), RealScalar(0.5) * refM3 + (refM3 + refM3));
348  VERIFY_IS_APPROX((RealScalar(0.5) * refM4 + (refM3 + m3)).eval(), RealScalar(0.5) * refM4 + (refM3 + refM3));
349  VERIFY_IS_APPROX((RealScalar(0.5) * refM4 + (m3 + refM3)).eval(), RealScalar(0.5) * refM4 + (refM3 + refM3));
350 
351  VERIFY_IS_APPROX(m1.sum(), refM1.sum());
352 
353  m4 = m1;
354  refM4 = m4;
355 
356  VERIFY_IS_APPROX(m1 *= s1, refM1 *= s1);
357  VERIFY_IS_EQUAL(m1.nonZeros(), m1_nnz);
358  VERIFY_IS_APPROX(m1 /= s1, refM1 /= s1);
359  VERIFY_IS_EQUAL(m1.nonZeros(), m1_nnz);
360 
361  VERIFY_IS_APPROX(m1 += m2, refM1 += refM2);
362  VERIFY_IS_APPROX(m1 -= m2, refM1 -= refM2);
363 
364  refM3 = refM1;
365 
366  VERIFY_IS_APPROX(refM1 += m2, refM3 += refM2);
367  VERIFY_IS_APPROX(refM1 -= m2, refM3 -= refM2);
368 
370  VERIFY_IS_APPROX(refM1 = m2 + refM4, refM3 = refM2 + refM4);
373  VERIFY_IS_APPROX(refM1 += m2 + refM4, refM3 += refM2 + refM4);
376  VERIFY_IS_APPROX(refM1 -= m2 + refM4, refM3 -= refM2 + refM4);
379  VERIFY_IS_APPROX(refM1 = refM4 + m2, refM3 = refM2 + refM4);
382  VERIFY_IS_APPROX(refM1 += refM4 + m2, refM3 += refM2 + refM4);
385  VERIFY_IS_APPROX(refM1 -= refM4 + m2, refM3 -= refM2 + refM4);
387 
389  VERIFY_IS_APPROX(refM1 = m2 - refM4, refM3 = refM2 - refM4);
392  VERIFY_IS_APPROX(refM1 += m2 - refM4, refM3 += refM2 - refM4);
395  VERIFY_IS_APPROX(refM1 -= m2 - refM4, refM3 -= refM2 - refM4);
398  VERIFY_IS_APPROX(refM1 = refM4 - m2, refM3 = refM4 - refM2);
401  VERIFY_IS_APPROX(refM1 += refM4 - m2, refM3 += refM4 - refM2);
404  VERIFY_IS_APPROX(refM1 -= refM4 - m2, refM3 -= refM4 - refM2);
406  refM3 = m3;
407 
408  if (rows >= 2 && cols >= 2) {
409  VERIFY_RAISES_ASSERT(m1 += m1.innerVector(0));
410  VERIFY_RAISES_ASSERT(m1 -= m1.innerVector(0));
411  VERIFY_RAISES_ASSERT(refM1 -= m1.innerVector(0));
412  VERIFY_RAISES_ASSERT(refM1 += m1.innerVector(0));
413  }
414  m1 = m4;
415  refM1 = refM4;
416 
417  // test aliasing
418  VERIFY_IS_APPROX((m1 = -m1), (refM1 = -refM1));
419  VERIFY_IS_EQUAL(m1.nonZeros(), m1_nnz);
420  m1 = m4;
421  refM1 = refM4;
422  VERIFY_IS_APPROX((m1 = m1.transpose()), (refM1 = refM1.transpose().eval()));
423  VERIFY_IS_EQUAL(m1.nonZeros(), m1_nnz);
424  m1 = m4;
425  refM1 = refM4;
426  VERIFY_IS_APPROX((m1 = -m1.transpose()), (refM1 = -refM1.transpose().eval()));
427  VERIFY_IS_EQUAL(m1.nonZeros(), m1_nnz);
428  m1 = m4;
429  refM1 = refM4;
430  VERIFY_IS_APPROX((m1 += -m1), (refM1 += -refM1));
431  VERIFY_IS_EQUAL(m1.nonZeros(), m1_nnz);
432  m1 = m4;
433  refM1 = refM4;
434 
435  if (m1.isCompressed()) {
436  VERIFY_IS_APPROX(m1.coeffs().sum(), m1.sum());
437  m1.coeffs() += s1;
438  for (Index j = 0; j < m1.outerSize(); ++j)
439  for (typename SparseMatrixType::InnerIterator it(m1, j); it; ++it) refM1(it.row(), it.col()) += s1;
440  VERIFY_IS_APPROX(m1, refM1);
441  }
442 
443  // and/or
444  {
446  SpBool mb1 = m1.real().template cast<bool>();
447  SpBool mb2 = m2.real().template cast<bool>();
448  VERIFY_IS_EQUAL(mb1.template cast<int>().sum(), refM1.real().template cast<bool>().count());
449  VERIFY_IS_EQUAL((mb1 && mb2).template cast<int>().sum(),
450  (refM1.real().template cast<bool>() && refM2.real().template cast<bool>()).count());
451  VERIFY_IS_EQUAL((mb1 || mb2).template cast<int>().sum(),
452  (refM1.real().template cast<bool>() || refM2.real().template cast<bool>()).count());
453  SpBool mb3 = mb1 && mb2;
454  if (mb1.coeffs().all() && mb2.coeffs().all()) {
455  VERIFY_IS_EQUAL(mb3.nonZeros(),
456  (refM1.real().template cast<bool>() && refM2.real().template cast<bool>()).count());
457  }
458  }
459  }
460 
461  // test reverse iterators
462  {
464  SparseMatrixType m2(rows, cols);
465  initSparse<Scalar>(density, refMat2, m2);
466  std::vector<Scalar> ref_value(m2.innerSize());
467  std::vector<Index> ref_index(m2.innerSize());
468  if (internal::random<bool>()) m2.makeCompressed();
469  for (Index j = 0; j < m2.outerSize(); ++j) {
470  Index count_forward = 0;
471 
472  for (typename SparseMatrixType::InnerIterator it(m2, j); it; ++it) {
473  ref_value[ref_value.size() - 1 - count_forward] = it.value();
474  ref_index[ref_index.size() - 1 - count_forward] = it.index();
475  count_forward++;
476  }
477  Index count_reverse = 0;
478  for (typename SparseMatrixType::ReverseInnerIterator it(m2, j); it; --it) {
479  VERIFY_IS_APPROX(std::abs(ref_value[ref_value.size() - count_forward + count_reverse]) + 1,
480  std::abs(it.value()) + 1);
481  VERIFY_IS_EQUAL(ref_index[ref_index.size() - count_forward + count_reverse], it.index());
482  count_reverse++;
483  }
484  VERIFY_IS_EQUAL(count_forward, count_reverse);
485  }
486  }
487 
488  // test transpose
489  {
491  SparseMatrixType m2(rows, cols);
492  initSparse<Scalar>(density, refMat2, m2);
493  VERIFY_IS_APPROX(m2.transpose().eval(), refMat2.transpose().eval());
494  VERIFY_IS_APPROX(m2.transpose(), refMat2.transpose());
495 
496  VERIFY_IS_APPROX(SparseMatrixType(m2.adjoint()), refMat2.adjoint());
497 
498  // check isApprox handles opposite storage order
500  VERIFY(m2.isApprox(m3));
501  }
502 
503  // test prune
504  {
505  SparseMatrixType m2(rows, cols);
506  DenseMatrix refM2(rows, cols);
507  refM2.setZero();
508  int countFalseNonZero = 0;
509  int countTrueNonZero = 0;
510  m2.reserve(VectorXi::Constant(m2.outerSize(), int(m2.innerSize())));
511  for (Index j = 0; j < m2.cols(); ++j) {
512  for (Index i = 0; i < m2.rows(); ++i) {
513  float x = internal::random<float>(0, 1);
514  if (x < 0.1f) {
515  // do nothing
516  } else if (x < 0.5f) {
517  countFalseNonZero++;
518  m2.insert(i, j) = Scalar(0);
519  } else {
520  countTrueNonZero++;
521  m2.insert(i, j) = Scalar(1);
522  refM2(i, j) = Scalar(1);
523  }
524  }
525  }
526  if (internal::random<bool>()) m2.makeCompressed();
527  VERIFY(countFalseNonZero + countTrueNonZero == m2.nonZeros());
528  if (countTrueNonZero > 0) VERIFY_IS_APPROX(m2, refM2);
529  m2.prune(Scalar(1));
530  VERIFY(countTrueNonZero == m2.nonZeros());
531  VERIFY_IS_APPROX(m2, refM2);
532  }
533 
534  // test setFromTriplets / insertFromTriplets
535  {
536  typedef Triplet<Scalar, StorageIndex> TripletType;
537  Index ntriplets = rows * cols;
538 
539  std::vector<TripletType> triplets;
540 
541  triplets.reserve(ntriplets);
542  DenseMatrix refMat_sum = DenseMatrix::Zero(rows, cols);
543  DenseMatrix refMat_prod = DenseMatrix::Zero(rows, cols);
544  DenseMatrix refMat_last = DenseMatrix::Zero(rows, cols);
545 
546  for (Index i = 0; i < ntriplets; ++i) {
547  StorageIndex r = internal::random<StorageIndex>(0, StorageIndex(rows - 1));
548  StorageIndex c = internal::random<StorageIndex>(0, StorageIndex(cols - 1));
549  Scalar v = internal::random<Scalar>();
550  triplets.push_back(TripletType(r, c, v));
551  refMat_sum(r, c) += v;
552  if (std::abs(refMat_prod(r, c)) == 0)
553  refMat_prod(r, c) = v;
554  else
555  refMat_prod(r, c) *= v;
556  refMat_last(r, c) = v;
557  }
558 
559  std::vector<TripletType> moreTriplets;
560  moreTriplets.reserve(ntriplets);
561  DenseMatrix refMat_sum_more = refMat_sum;
562  DenseMatrix refMat_prod_more = refMat_prod;
563  DenseMatrix refMat_last_more = refMat_last;
564 
565  for (Index i = 0; i < ntriplets; ++i) {
566  StorageIndex r = internal::random<StorageIndex>(0, StorageIndex(rows - 1));
567  StorageIndex c = internal::random<StorageIndex>(0, StorageIndex(cols - 1));
568  Scalar v = internal::random<Scalar>();
569  moreTriplets.push_back(TripletType(r, c, v));
570  refMat_sum_more(r, c) += v;
571  if (std::abs(refMat_prod_more(r, c)) == 0)
572  refMat_prod_more(r, c) = v;
573  else
574  refMat_prod_more(r, c) *= v;
575  refMat_last_more(r, c) = v;
576  }
577 
578  SparseMatrixType m(rows, cols);
579 
580  // test setFromTriplets / insertFromTriplets
581 
582  m.setFromTriplets(triplets.begin(), triplets.end());
583  VERIFY_IS_APPROX(m, refMat_sum);
584  VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
585  VERIFY(m.isCompressed());
586  m.insertFromTriplets(moreTriplets.begin(), moreTriplets.end());
587  VERIFY_IS_APPROX(m, refMat_sum_more);
588  VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
589 
590  m.setFromTriplets(triplets.begin(), triplets.end(), std::multiplies<Scalar>());
591  VERIFY_IS_APPROX(m, refMat_prod);
592  VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
593  VERIFY(m.isCompressed());
594  m.insertFromTriplets(moreTriplets.begin(), moreTriplets.end(), std::multiplies<Scalar>());
595  VERIFY_IS_APPROX(m, refMat_prod_more);
596  VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
597 
598  m.setFromTriplets(triplets.begin(), triplets.end(), [](Scalar, Scalar b) { return b; });
599  VERIFY_IS_APPROX(m, refMat_last);
600  VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
601  m.setFromTriplets(triplets.begin(), triplets.end(), [](Scalar, Scalar b) { return b; });
602  VERIFY(m.isCompressed());
603  m.insertFromTriplets(moreTriplets.begin(), moreTriplets.end(), [](Scalar, Scalar b) { return b; });
604  VERIFY_IS_APPROX(m, refMat_last_more);
605  VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
606 
607  // insert into an uncompressed matrix
608 
609  VectorXi reserveSizes(m.outerSize());
610  for (Index i = 0; i < m.outerSize(); i++) reserveSizes[i] = internal::random<int>(1, 7);
611 
612  m.setFromTriplets(triplets.begin(), triplets.end());
613  m.reserve(reserveSizes);
614  VERIFY(!m.isCompressed());
615  m.insertFromTriplets(moreTriplets.begin(), moreTriplets.end());
616  VERIFY_IS_APPROX(m, refMat_sum_more);
617  VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
618 
619  m.setFromTriplets(triplets.begin(), triplets.end(), std::multiplies<Scalar>());
620  m.reserve(reserveSizes);
621  VERIFY(!m.isCompressed());
622  m.insertFromTriplets(moreTriplets.begin(), moreTriplets.end(), std::multiplies<Scalar>());
623  VERIFY_IS_APPROX(m, refMat_prod_more);
624  VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
625 
626  m.setFromTriplets(triplets.begin(), triplets.end(), [](Scalar, Scalar b) { return b; });
627  m.reserve(reserveSizes);
628  VERIFY(!m.isCompressed());
629  m.insertFromTriplets(moreTriplets.begin(), moreTriplets.end(), [](Scalar, Scalar b) { return b; });
630  VERIFY_IS_APPROX(m, refMat_last_more);
631  VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
632 
633  // test setFromSortedTriplets / insertFromSortedTriplets
634 
635  struct triplet_comp {
636  inline bool operator()(const TripletType& a, const TripletType& b) {
637  return SparseMatrixType::IsRowMajor ? ((a.row() != b.row()) ? (a.row() < b.row()) : (a.col() < b.col()))
638  : ((a.col() != b.col()) ? (a.col() < b.col()) : (a.row() < b.row()));
639  }
640  };
641 
642  // stable_sort is only necessary when the reduction functor is dependent on the order of the triplets
643  // this is the case with refMat_last
644  // for most cases, std::sort is sufficient and preferred
645 
646  std::stable_sort(triplets.begin(), triplets.end(), triplet_comp());
647  std::stable_sort(moreTriplets.begin(), moreTriplets.end(), triplet_comp());
648 
649  m.setFromSortedTriplets(triplets.begin(), triplets.end());
650  VERIFY_IS_APPROX(m, refMat_sum);
651  VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
652  VERIFY(m.isCompressed());
653  m.insertFromSortedTriplets(moreTriplets.begin(), moreTriplets.end());
654  VERIFY_IS_APPROX(m, refMat_sum_more);
655  VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
656 
657  m.setFromSortedTriplets(triplets.begin(), triplets.end(), std::multiplies<Scalar>());
658  VERIFY_IS_APPROX(m, refMat_prod);
659  VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
660  VERIFY(m.isCompressed());
661  m.insertFromSortedTriplets(moreTriplets.begin(), moreTriplets.end(), std::multiplies<Scalar>());
662  VERIFY_IS_APPROX(m, refMat_prod_more);
663  VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
664 
665  m.setFromSortedTriplets(triplets.begin(), triplets.end(), [](Scalar, Scalar b) { return b; });
666  VERIFY_IS_APPROX(m, refMat_last);
667  VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
668  VERIFY(m.isCompressed());
669  m.insertFromSortedTriplets(moreTriplets.begin(), moreTriplets.end(), [](Scalar, Scalar b) { return b; });
670  VERIFY_IS_APPROX(m, refMat_last_more);
671  VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
672 
673  // insert into an uncompressed matrix
674 
675  m.setFromSortedTriplets(triplets.begin(), triplets.end());
676  m.reserve(reserveSizes);
677  VERIFY(!m.isCompressed());
678  m.insertFromSortedTriplets(moreTriplets.begin(), moreTriplets.end());
679  VERIFY_IS_APPROX(m, refMat_sum_more);
680  VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
681 
682  m.setFromSortedTriplets(triplets.begin(), triplets.end(), std::multiplies<Scalar>());
683  m.reserve(reserveSizes);
684  VERIFY(!m.isCompressed());
685  m.insertFromSortedTriplets(moreTriplets.begin(), moreTriplets.end(), std::multiplies<Scalar>());
686  VERIFY_IS_APPROX(m, refMat_prod_more);
687  VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
688 
689  m.setFromSortedTriplets(triplets.begin(), triplets.end(), [](Scalar, Scalar b) { return b; });
690  m.reserve(reserveSizes);
691  VERIFY(!m.isCompressed());
692  m.insertFromSortedTriplets(moreTriplets.begin(), moreTriplets.end(), [](Scalar, Scalar b) { return b; });
693  VERIFY_IS_APPROX(m, refMat_last_more);
694  VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
695  }
696 
697  // test Map
698  {
699  DenseMatrix refMat2(rows, cols), refMat3(rows, cols);
700  SparseMatrixType m2(rows, cols), m3(rows, cols);
701  initSparse<Scalar>(density, refMat2, m2);
702  initSparse<Scalar>(density, refMat3, m3);
703  {
704  Map<SparseMatrixType> mapMat2(m2.rows(), m2.cols(), m2.nonZeros(), m2.outerIndexPtr(), m2.innerIndexPtr(),
705  m2.valuePtr(), m2.innerNonZeroPtr());
706  Map<SparseMatrixType> mapMat3(m3.rows(), m3.cols(), m3.nonZeros(), m3.outerIndexPtr(), m3.innerIndexPtr(),
707  m3.valuePtr(), m3.innerNonZeroPtr());
708  VERIFY_IS_APPROX(mapMat2 + mapMat3, refMat2 + refMat3);
709  VERIFY_IS_APPROX(mapMat2 + mapMat3, refMat2 + refMat3);
710  }
711 
712  Index i = internal::random<Index>(0, rows - 1);
713  Index j = internal::random<Index>(0, cols - 1);
714  m2.coeffRef(i, j) = 123;
715  if (internal::random<bool>()) m2.makeCompressed();
716  Map<SparseMatrixType> mapMat2(rows, cols, m2.nonZeros(), m2.outerIndexPtr(), m2.innerIndexPtr(), m2.valuePtr(),
717  m2.innerNonZeroPtr());
718  VERIFY_IS_EQUAL(m2.coeff(i, j), Scalar(123));
719  VERIFY_IS_EQUAL(mapMat2.coeff(i, j), Scalar(123));
720  mapMat2.coeffRef(i, j) = -123;
721  VERIFY_IS_EQUAL(m2.coeff(i, j), Scalar(-123));
722  }
723 
724  // test triangularView
725  {
726  DenseMatrix refMat2(rows, cols), refMat3(rows, cols);
727  SparseMatrixType m2(rows, cols), m3(rows, cols);
728  initSparse<Scalar>(density, refMat2, m2);
729  refMat3 = refMat2.template triangularView<Lower>();
730  m3 = m2.template triangularView<Lower>();
731  VERIFY_IS_APPROX(m3, refMat3);
732 
733  refMat3 = refMat2.template triangularView<Upper>();
734  m3 = m2.template triangularView<Upper>();
735  VERIFY_IS_APPROX(m3, refMat3);
736 
737  {
738  refMat3 = refMat2.template triangularView<UnitUpper>();
739  m3 = m2.template triangularView<UnitUpper>();
740  VERIFY_IS_APPROX(m3, refMat3);
741 
742  refMat3 = refMat2.template triangularView<UnitLower>();
743  m3 = m2.template triangularView<UnitLower>();
744  VERIFY_IS_APPROX(m3, refMat3);
745  }
746 
747  refMat3 = refMat2.template triangularView<StrictlyUpper>();
748  m3 = m2.template triangularView<StrictlyUpper>();
749  VERIFY_IS_APPROX(m3, refMat3);
750 
751  refMat3 = refMat2.template triangularView<StrictlyLower>();
752  m3 = m2.template triangularView<StrictlyLower>();
753  VERIFY_IS_APPROX(m3, refMat3);
754 
755  // check sparse-triangular to dense
756  refMat3 = m2.template triangularView<StrictlyUpper>();
757  VERIFY_IS_APPROX(refMat3, DenseMatrix(refMat2.template triangularView<StrictlyUpper>()));
758 
759  // check sparse triangular view iteration-based evaluation
760  m2.setZero();
761  VERIFY_IS_CWISE_EQUAL(m2.template triangularView<UnitLower>().toDense(), DenseMatrix::Identity(rows, cols));
762  VERIFY_IS_CWISE_EQUAL(m2.template triangularView<UnitUpper>().toDense(), DenseMatrix::Identity(rows, cols));
763  }
764 
765  // test selfadjointView
766  if (!SparseMatrixType::IsRowMajor) {
767  DenseMatrix refMat2(rows, rows), refMat3(rows, rows);
768  SparseMatrixType m2(rows, rows), m3(rows, rows);
769  initSparse<Scalar>(density, refMat2, m2);
770  refMat3 = refMat2.template selfadjointView<Lower>();
771  m3 = m2.template selfadjointView<Lower>();
772  VERIFY_IS_APPROX(m3, refMat3);
773 
774  refMat3 += refMat2.template selfadjointView<Lower>();
775  m3 += m2.template selfadjointView<Lower>();
776  VERIFY_IS_APPROX(m3, refMat3);
777 
778  refMat3 -= refMat2.template selfadjointView<Lower>();
779  m3 -= m2.template selfadjointView<Lower>();
780  VERIFY_IS_APPROX(m3, refMat3);
781 
782  // selfadjointView only works for square matrices:
783  SparseMatrixType m4(rows, rows + 1);
784  VERIFY_RAISES_ASSERT(m4.template selfadjointView<Lower>());
785  VERIFY_RAISES_ASSERT(m4.template selfadjointView<Upper>());
786  }
787 
788  // test sparseView
789  {
791  SparseMatrixType m2(rows, rows);
792  initSparse<Scalar>(density, refMat2, m2);
793  VERIFY_IS_APPROX(m2.eval(), refMat2.sparseView().eval());
794 
795  // sparse view on expressions:
796  VERIFY_IS_APPROX((s1 * m2).eval(), (s1 * refMat2).sparseView().eval());
797  VERIFY_IS_APPROX((m2 + m2).eval(), (refMat2 + refMat2).sparseView().eval());
798  VERIFY_IS_APPROX((m2 * m2).eval(), (refMat2.lazyProduct(refMat2)).sparseView().eval());
799  VERIFY_IS_APPROX((m2 * m2).eval(), (refMat2 * refMat2).sparseView().eval());
800  }
801 
802  // test diagonal
803  {
805  SparseMatrixType m2(rows, cols);
806  initSparse<Scalar>(density, refMat2, m2);
807  VERIFY_IS_APPROX(m2.diagonal(), refMat2.diagonal().eval());
808  DenseVector d = m2.diagonal();
809  VERIFY_IS_APPROX(d, refMat2.diagonal().eval());
810  d = m2.diagonal().array();
811  VERIFY_IS_APPROX(d, refMat2.diagonal().eval());
812  VERIFY_IS_APPROX(const_cast<const SparseMatrixType&>(m2).diagonal(), refMat2.diagonal().eval());
813 
814  initSparse<Scalar>(density, refMat2, m2, ForceNonZeroDiag);
815  m2.diagonal() += refMat2.diagonal();
816  refMat2.diagonal() += refMat2.diagonal();
817  VERIFY_IS_APPROX(m2, refMat2);
818  }
819 
820  // test diagonal to sparse
821  {
822  DenseVector d = DenseVector::Random(rows);
823  DenseMatrix refMat2 = d.asDiagonal();
824  SparseMatrixType m2;
825  m2 = d.asDiagonal();
826  VERIFY_IS_APPROX(m2, refMat2);
827  SparseMatrixType m3(d.asDiagonal());
828  VERIFY_IS_APPROX(m3, refMat2);
829  refMat2 += d.asDiagonal();
830  m2 += d.asDiagonal();
831  VERIFY_IS_APPROX(m2, refMat2);
832  m2.setZero();
833  m2 += d.asDiagonal();
834  refMat2.setZero();
835  refMat2 += d.asDiagonal();
836  VERIFY_IS_APPROX(m2, refMat2);
837  m2.setZero();
838  m2 -= d.asDiagonal();
839  refMat2.setZero();
840  refMat2 -= d.asDiagonal();
841  VERIFY_IS_APPROX(m2, refMat2);
842 
843  initSparse<Scalar>(density, refMat2, m2);
844  m2.makeCompressed();
845  m2 += d.asDiagonal();
846  refMat2 += d.asDiagonal();
847  VERIFY_IS_APPROX(m2, refMat2);
848 
849  initSparse<Scalar>(density, refMat2, m2);
850  m2.makeCompressed();
851  VectorXi res(rows);
852  for (Index i = 0; i < rows; ++i) res(i) = internal::random<int>(0, 3);
853  m2.reserve(res);
854  m2 -= d.asDiagonal();
855  refMat2 -= d.asDiagonal();
856  VERIFY_IS_APPROX(m2, refMat2);
857  }
858 
859  // test conservative resize
860  {
861  std::vector<std::pair<StorageIndex, StorageIndex>> inc;
862  if (rows > 3 && cols > 2) inc.push_back(std::pair<StorageIndex, StorageIndex>(-3, -2));
863  inc.push_back(std::pair<StorageIndex, StorageIndex>(0, 0));
864  inc.push_back(std::pair<StorageIndex, StorageIndex>(3, 2));
865  inc.push_back(std::pair<StorageIndex, StorageIndex>(3, 0));
866  inc.push_back(std::pair<StorageIndex, StorageIndex>(0, 3));
867  inc.push_back(std::pair<StorageIndex, StorageIndex>(0, -1));
868  inc.push_back(std::pair<StorageIndex, StorageIndex>(-1, 0));
869  inc.push_back(std::pair<StorageIndex, StorageIndex>(-1, -1));
870 
871  for (size_t i = 0; i < inc.size(); i++) {
872  StorageIndex incRows = inc[i].first;
873  StorageIndex incCols = inc[i].second;
874  SparseMatrixType m1(rows, cols);
876  initSparse<Scalar>(density, refMat1, m1);
877 
878  SparseMatrixType m2 = m1;
879  m2.makeCompressed();
880 
881  m1.conservativeResize(rows + incRows, cols + incCols);
882  m2.conservativeResize(rows + incRows, cols + incCols);
883  refMat1.conservativeResize(rows + incRows, cols + incCols);
884  if (incRows > 0) refMat1.bottomRows(incRows).setZero();
885  if (incCols > 0) refMat1.rightCols(incCols).setZero();
886 
887  VERIFY_IS_APPROX(m1, refMat1);
888  VERIFY_IS_APPROX(m2, refMat1);
889 
890  // Insert new values
891  if (incRows > 0) m1.insert(m1.rows() - 1, 0) = refMat1(refMat1.rows() - 1, 0) = 1;
892  if (incCols > 0) m1.insert(0, m1.cols() - 1) = refMat1(0, refMat1.cols() - 1) = 1;
893 
894  VERIFY_IS_APPROX(m1, refMat1);
895  }
896  }
897 
898  // test Identity matrix
899  {
900  DenseMatrix refMat1 = DenseMatrix::Identity(rows, rows);
901  SparseMatrixType m1(rows, rows);
902  m1.setIdentity();
903  VERIFY_IS_APPROX(m1, refMat1);
904  for (int k = 0; k < rows * rows / 4; ++k) {
905  Index i = internal::random<Index>(0, rows - 1);
906  Index j = internal::random<Index>(0, rows - 1);
907  Scalar v = internal::random<Scalar>();
908  m1.coeffRef(i, j) = v;
909  refMat1.coeffRef(i, j) = v;
910  VERIFY_IS_APPROX(m1, refMat1);
911  if (internal::random<Index>(0, 10) < 2) m1.makeCompressed();
912  }
913  m1.setIdentity();
914  refMat1.setIdentity();
915  VERIFY_IS_APPROX(m1, refMat1);
916  }
917 
918  // test array/vector of InnerIterator
919  {
920  typedef typename SparseMatrixType::InnerIterator IteratorType;
921 
923  SparseMatrixType m2(rows, cols);
924  initSparse<Scalar>(density, refMat2, m2);
925  IteratorType static_array[2];
926  static_array[0] = IteratorType(m2, 0);
927  static_array[1] = IteratorType(m2, m2.outerSize() - 1);
928  VERIFY(static_array[0] || m2.innerVector(static_array[0].outer()).nonZeros() == 0);
929  VERIFY(static_array[1] || m2.innerVector(static_array[1].outer()).nonZeros() == 0);
930  if (static_array[0] && static_array[1]) {
931  ++(static_array[1]);
932  static_array[1] = IteratorType(m2, 0);
933  VERIFY(static_array[1]);
934  VERIFY(static_array[1].index() == static_array[0].index());
935  VERIFY(static_array[1].outer() == static_array[0].outer());
936  VERIFY(static_array[1].value() == static_array[0].value());
937  }
938 
939  std::vector<IteratorType> iters(2);
940  iters[0] = IteratorType(m2, 0);
941  iters[1] = IteratorType(m2, m2.outerSize() - 1);
942  }
943 
944  // test reserve with empty rows/columns
945  {
946  SparseMatrixType m1(0, cols);
947  m1.reserve(ArrayXi::Constant(m1.outerSize(), 1));
948  SparseMatrixType m2(rows, 0);
949  m2.reserve(ArrayXi::Constant(m2.outerSize(), 1));
950  }
951 
952  // test move
953  {
954  using TransposedType = SparseMatrix<Scalar, SparseMatrixType::IsRowMajor ? ColMajor : RowMajor,
955  typename SparseMatrixType::StorageIndex>;
956  DenseMatrix refMat1 = DenseMatrix::Random(rows, cols);
957  SparseMatrixType m1(rows, cols);
958  initSparse<Scalar>(density, refMat1, m1);
959  // test move ctor
960  SparseMatrixType m2(std::move(m1));
961  VERIFY_IS_APPROX(m2, refMat1);
962  // test move assignment
963  m1 = std::move(m2);
964  VERIFY_IS_APPROX(m1, refMat1);
965  // test move ctor (SparseMatrixBase)
966  TransposedType m3(std::move(m1.transpose()));
967  VERIFY_IS_APPROX(m3, refMat1.transpose());
968  // test move assignment (SparseMatrixBase)
969  m2 = std::move(m3.transpose());
970  VERIFY_IS_APPROX(m2, refMat1);
971  }
972 }
Matrix< Scalar, Dynamic, Dynamic > DenseMatrix
Definition: BenchSparseUtil.h:23
Matrix< Scalar, Dynamic, 1 > DenseVector
Definition: BenchSparseUtil.h:24
Array< double, 1, 3 > e(1./3., 0.5, 2.)
Matrix3d m1
Definition: IOFormat.cpp:2
m col(1)
m row(1)
cout<< "Here is the matrix m:"<< endl<< m<< endl;Matrix< ptrdiff_t, 3, 1 > res
Definition: PartialRedux_count.cpp:3
RowVectorXd vec1(3)
MatrixType m2(n_dims)
Scalar * b
Definition: benchVecAdd.cpp:17
NumTraits< Scalar >::Real RealScalar
Definition: bench_gemm.cpp:46
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:96
The matrix class, also used for vectors and row-vectors.
Definition: Eigen/Eigen/src/Core/Matrix.h:186
EIGEN_DEVICE_FUNC constexpr EIGEN_STRONG_INLINE Scalar & coeffRef(Index rowId, Index colId)
Definition: PlainObjectBase.h:217
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void conservativeResize(Index rows, Index cols)
Definition: PlainObjectBase.h:398
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT
Definition: PlainObjectBase.h:192
EIGEN_DEVICE_FUNC Derived & setZero(Index size)
Definition: CwiseNullaryOp.h:569
Derived & setRandom(Index size)
Definition: Random.h:147
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT
Definition: PlainObjectBase.h:191
Expression of the transpose of a matrix.
Definition: Transpose.h:56
#define max(a, b)
Definition: datatypes.h:23
void diagonal(const MatrixType &m)
Definition: diagonal.cpp:13
Scalar * y
Definition: level1_cplx_impl.h:128
return int(ret)+1
const Scalar * a
Definition: level2_cplx_impl.h:32
char char char int int * k
Definition: level2_impl.h:374
#define VERIFY_IS_CWISE_EQUAL(a, b)
Definition: main.h:375
#define VERIFY_IS_EQUAL(a, b)
Definition: main.h:367
#define VERIFY_IS_MUCH_SMALLER_THAN(a, b)
Definition: main.h:371
#define VERIFY_RAISES_ASSERT(a)
Definition: main.h:329
double eps
Definition: crbond_bessel.cc:24
EIGEN_STRONG_INLINE Packet2d shuffle(const Packet2d &m, const Packet2d &n, int mask)
Definition: LSX/PacketMath.h:150
squared absolute value
Definition: GlobalFunctions.h:87
Extend namespace for flags.
Definition: fsi_chan_precond_driver.cc:56
void start(const unsigned &i)
(Re-)start i-th timer
Definition: oomph_utilities.cc:243
double Zero
Definition: pseudosolid_node_update_elements.cc:35
list x
Definition: plotDoE.py:28
@ ForceNonZeroDiag
Definition: sparse.h:32
static long g_realloc_count
Definition: sparse_basic.cpp:13
internal::nested_eval< T, 1 >::type eval(const T &xpr)
Definition: sparse_permutations.cpp:47
std::ptrdiff_t j
Definition: tut_arithmetic_redux_minmax.cpp:2

References a, abs(), b, calibrate::c, Eigen::Matrix< Scalar_, Rows_, Cols_, Options_, MaxRows_, MaxCols_ >::coeffRef(), Eigen::ColMajor, cols, Eigen::PlainObjectBase< Derived >::cols(), Eigen::PlainObjectBase< Derived >::conservativeResize(), UniformPSDSelfTest::density, diagonal(), e(), CRBond_Bessel::eps, eval(), ForceNonZeroDiag, g_dense_op_sparse_count, g_realloc_count, i, int(), j, k, m, m1, m2(), max, UniformPSDSelfTest::r, res, Eigen::RowMajor, rows, Eigen::PlainObjectBase< Derived >::rows(), Eigen::PlainObjectBase< Derived >::setRandom(), Eigen::PlainObjectBase< Derived >::setZero(), Eigen::internal::shuffle(), oomph::CumulativeTimings::start(), v, calibrate::val, Eigen::value, vec1(), VERIFY, VERIFY_IS_APPROX, VERIFY_IS_CWISE_EQUAL, VERIFY_IS_EQUAL, VERIFY_IS_MUCH_SMALLER_THAN, VERIFY_RAISES_ASSERT, plotDoE::x, y, and oomph::PseudoSolidHelper::Zero.

Referenced by EIGEN_DECLARE_TEST().

Variable Documentation

◆ g_dense_op_sparse_count

long g_dense_op_sparse_count = 0
static

Referenced by EIGEN_DECLARE_TEST(), and sparse_basic().

◆ g_realloc_count

long g_realloc_count = 0
static

Referenced by sparse_basic().