SparseMatrix.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSEMATRIX_H
11 #define EIGEN_SPARSEMATRIX_H
12 
13 // IWYU pragma: private
14 #include "./InternalHeaderCheck.h"
15 
16 namespace Eigen {
17 
49 namespace internal {
50 template <typename Scalar_, int Options_, typename StorageIndex_>
51 struct traits<SparseMatrix<Scalar_, Options_, StorageIndex_>> {
52  typedef Scalar_ Scalar;
53  typedef StorageIndex_ StorageIndex;
55  typedef MatrixXpr XprKind;
56  enum {
57  RowsAtCompileTime = Dynamic,
58  ColsAtCompileTime = Dynamic,
59  MaxRowsAtCompileTime = Dynamic,
60  MaxColsAtCompileTime = Dynamic,
61  Options = Options_,
63  SupportedAccessPatterns = InnerRandomAccessPattern
64  };
65 };
66 
67 template <typename Scalar_, int Options_, typename StorageIndex_, int DiagIndex>
68 struct traits<Diagonal<SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex>> {
71  typedef std::remove_reference_t<MatrixTypeNested> MatrixTypeNested_;
72 
73  typedef Scalar_ Scalar;
74  typedef Dense StorageKind;
75  typedef StorageIndex_ StorageIndex;
76  typedef MatrixXpr XprKind;
77 
78  enum {
79  RowsAtCompileTime = Dynamic,
80  ColsAtCompileTime = 1,
81  MaxRowsAtCompileTime = Dynamic,
82  MaxColsAtCompileTime = 1,
84  };
85 };
86 
87 template <typename Scalar_, int Options_, typename StorageIndex_, int DiagIndex>
88 struct traits<Diagonal<const SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex>>
89  : public traits<Diagonal<SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex>> {
90  enum { Flags = 0 };
91 };
92 
93 template <typename StorageIndex>
96  Index range = numext::mini(end - begin, size);
97  m_begin = begin;
98  m_end = begin + range;
99  m_val = StorageIndex(size / range);
100  m_remainder = StorageIndex(size % range);
101  }
102  template <typename IndexType>
103  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE StorageIndex operator()(IndexType i) const {
104  if ((i >= m_begin) && (i < m_end))
105  return m_val + ((i - m_begin) < m_remainder ? 1 : 0);
106  else
107  return 0;
108  }
109  StorageIndex m_val, m_remainder;
111 };
112 
113 template <typename Scalar>
115  enum { Cost = 1, PacketAccess = false, IsRepeatable = true };
116 };
117 
118 } // end namespace internal
119 
120 template <typename Scalar_, int Options_, typename StorageIndex_>
121 class SparseMatrix : public SparseCompressedBase<SparseMatrix<Scalar_, Options_, StorageIndex_>> {
123  using Base::convert_index;
124  friend class SparseVector<Scalar_, 0, StorageIndex_>;
125  template <typename, typename, typename, typename, typename>
126  friend struct internal::Assignment;
127 
128  public:
129  using Base::isCompressed;
130  using Base::nonZeros;
132  using Base::operator+=;
133  using Base::operator-=;
134 
140 
141  using Base::IsRowMajor;
143  enum { Options = Options_ };
144 
145  typedef typename Base::IndexVector IndexVector;
147 
148  protected:
150 
154  StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
156 
157  public:
159  inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
161  inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
162 
164  inline Index innerSize() const { return m_innerSize; }
166  inline Index outerSize() const { return m_outerSize; }
167 
171  inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
175  inline Scalar* valuePtr() { return m_data.valuePtr(); }
176 
180  inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
184  inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
185 
189  inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
194 
198  inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
203 
205  constexpr Storage& data() { return m_data; }
207  constexpr const Storage& data() const { return m_data; }
208 
211  inline Scalar coeff(Index row, Index col) const {
212  eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
213 
214  const Index outer = IsRowMajor ? row : col;
215  const Index inner = IsRowMajor ? col : row;
216  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer + 1];
217  return m_data.atInRange(m_outerIndex[outer], end, inner);
218  }
219 
231  inline Scalar& findOrInsertCoeff(Index row, Index col, bool* inserted) {
232  eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
233  const Index outer = IsRowMajor ? row : col;
234  const Index inner = IsRowMajor ? col : row;
235  Index start = m_outerIndex[outer];
236  Index end = isCompressed() ? m_outerIndex[outer + 1] : m_outerIndex[outer] + m_innerNonZeros[outer];
237  eigen_assert(end >= start && "you probably called coeffRef on a non finalized matrix");
238  Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
239  if (dst == end) {
240  Index capacity = m_outerIndex[outer + 1] - end;
241  if (capacity > 0) {
242  // implies uncompressed: push to back of vector
243  m_innerNonZeros[outer]++;
244  m_data.index(end) = StorageIndex(inner);
245  m_data.value(end) = Scalar(0);
246  if (inserted != nullptr) {
247  *inserted = true;
248  }
249  return m_data.value(end);
250  }
251  }
252  if ((dst < end) && (m_data.index(dst) == inner)) {
253  // this coefficient exists, return a reference to it
254  if (inserted != nullptr) {
255  *inserted = false;
256  }
257  return m_data.value(dst);
258  } else {
259  if (inserted != nullptr) {
260  *inserted = true;
261  }
262  // insertion will require reconfiguring the buffer
263  return insertAtByOuterInner(outer, inner, dst);
264  }
265  }
266 
275  inline Scalar& coeffRef(Index row, Index col) { return findOrInsertCoeff(row, col, nullptr); }
276 
294 
295  public:
303  inline void setZero() {
304  m_data.clear();
305  using std::fill_n;
306  fill_n(m_outerIndex, m_outerSize + 1, StorageIndex(0));
307  if (m_innerNonZeros) {
309  }
310  }
311 
315  inline void reserve(Index reserveSize) {
316  eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
317  m_data.reserve(reserveSize);
318  }
319 
320 #ifdef EIGEN_PARSED_BY_DOXYGEN
333  template <class SizesType>
334  inline void reserve(const SizesType& reserveSizes);
335 #else
336  template <class SizesType>
337  inline void reserve(const SizesType& reserveSizes,
338  const typename SizesType::value_type& enableif = typename SizesType::value_type()) {
339  EIGEN_UNUSED_VARIABLE(enableif);
340  reserveInnerVectors(reserveSizes);
341  }
342 #endif // EIGEN_PARSED_BY_DOXYGEN
343  protected:
344  template <class SizesType>
345  inline void reserveInnerVectors(const SizesType& reserveSizes) {
346  if (isCompressed()) {
347  Index totalReserveSize = 0;
348  for (Index j = 0; j < m_outerSize; ++j) totalReserveSize += internal::convert_index<Index>(reserveSizes[j]);
349 
350  // if reserveSizes is empty, don't do anything!
351  if (totalReserveSize == 0) return;
352 
353  // turn the matrix into non-compressed mode
354  m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize);
355 
356  // temporarily use m_innerSizes to hold the new starting points.
357  StorageIndex* newOuterIndex = m_innerNonZeros;
358 
359  Index count = 0;
360  for (Index j = 0; j < m_outerSize; ++j) {
361  newOuterIndex[j] = internal::convert_index<StorageIndex>(count);
362  Index reserveSize = internal::convert_index<Index>(reserveSizes[j]);
363  count += reserveSize + internal::convert_index<Index>(m_outerIndex[j + 1] - m_outerIndex[j]);
364  }
365 
366  m_data.reserve(totalReserveSize);
367  StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
368  for (Index j = m_outerSize - 1; j >= 0; --j) {
369  StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
370  StorageIndex begin = m_outerIndex[j];
371  StorageIndex end = begin + innerNNZ;
372  StorageIndex target = newOuterIndex[j];
374  internal::smart_memmove(valuePtr() + begin, valuePtr() + end, valuePtr() + target);
375  previousOuterIndex = m_outerIndex[j];
376  m_outerIndex[j] = newOuterIndex[j];
377  m_innerNonZeros[j] = innerNNZ;
378  }
379  if (m_outerSize > 0)
381  internal::convert_index<StorageIndex>(reserveSizes[m_outerSize - 1]);
382 
384  } else {
385  StorageIndex* newOuterIndex = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize + 1);
386 
387  Index count = 0;
388  for (Index j = 0; j < m_outerSize; ++j) {
389  newOuterIndex[j] = internal::convert_index<StorageIndex>(count);
390  Index alreadyReserved =
391  internal::convert_index<Index>(m_outerIndex[j + 1] - m_outerIndex[j] - m_innerNonZeros[j]);
392  Index reserveSize = internal::convert_index<Index>(reserveSizes[j]);
393  Index toReserve = numext::maxi(reserveSize, alreadyReserved);
394  count += toReserve + internal::convert_index<Index>(m_innerNonZeros[j]);
395  }
396  newOuterIndex[m_outerSize] = internal::convert_index<StorageIndex>(count);
397 
398  m_data.resize(count);
399  for (Index j = m_outerSize - 1; j >= 0; --j) {
400  StorageIndex innerNNZ = m_innerNonZeros[j];
401  StorageIndex begin = m_outerIndex[j];
402  StorageIndex target = newOuterIndex[j];
403  m_data.moveChunk(begin, target, innerNNZ);
404  }
405 
406  std::swap(m_outerIndex, newOuterIndex);
407  internal::conditional_aligned_delete_auto<StorageIndex, true>(newOuterIndex, m_outerSize + 1);
408  }
409  }
410 
411  public:
412  //--- low level purely coherent filling ---
413 
426  }
427 
430  inline Scalar& insertBackByOuterInner(Index outer, Index inner) {
431  eigen_assert(Index(m_outerIndex[outer + 1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
432  eigen_assert((m_outerIndex[outer + 1] - m_outerIndex[outer] == 0 || m_data.index(m_data.size() - 1) < inner) &&
433  "Invalid ordered insertion (invalid inner index)");
434  StorageIndex p = m_outerIndex[outer + 1];
435  ++m_outerIndex[outer + 1];
436  m_data.append(Scalar(0), inner);
437  return m_data.value(p);
438  }
439 
443  StorageIndex p = m_outerIndex[outer + 1];
444  ++m_outerIndex[outer + 1];
445  m_data.append(Scalar(0), inner);
446  return m_data.value(p);
447  }
448 
451  inline void startVec(Index outer) {
452  eigen_assert(m_outerIndex[outer] == Index(m_data.size()) &&
453  "You must call startVec for each inner vector sequentially");
454  eigen_assert(m_outerIndex[outer + 1] == 0 && "You must call startVec for each inner vector sequentially");
455  m_outerIndex[outer + 1] = m_outerIndex[outer];
456  }
457 
461  inline void finalize() {
462  if (isCompressed()) {
463  StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
464  Index i = m_outerSize;
465  // find the last filled column
466  while (i >= 0 && m_outerIndex[i] == 0) --i;
467  ++i;
468  while (i <= m_outerSize) {
469  m_outerIndex[i] = size;
470  ++i;
471  }
472  }
473  }
474 
475  // remove outer vectors j, j+1 ... j+num-1 and resize the matrix
476  void removeOuterVectors(Index j, Index num = 1) {
477  eigen_assert(num >= 0 && j >= 0 && j + num <= m_outerSize && "Invalid parameters");
478 
479  const Index newRows = IsRowMajor ? m_outerSize - num : rows();
480  const Index newCols = IsRowMajor ? cols() : m_outerSize - num;
481 
482  const Index begin = j + num;
483  const Index end = m_outerSize;
484  const Index target = j;
485 
486  // if the removed vectors are not empty, uncompress the matrix
487  if (m_outerIndex[j + num] > m_outerIndex[j]) uncompress();
488 
489  // shift m_outerIndex and m_innerNonZeros [num] to the left
491  if (!isCompressed())
493 
494  // if m_outerIndex[0] > 0, shift the data within the first vector while it is easy to do so
495  if (m_outerIndex[0] > StorageIndex(0)) {
496  uncompress();
497  const Index from = internal::convert_index<Index>(m_outerIndex[0]);
498  const Index to = Index(0);
499  const Index chunkSize = internal::convert_index<Index>(m_innerNonZeros[0]);
500  m_data.moveChunk(from, to, chunkSize);
501  m_outerIndex[0] = StorageIndex(0);
502  }
503 
504  // truncate the matrix to the smaller size
505  conservativeResize(newRows, newCols);
506  }
507 
508  // insert empty outer vectors at indices j, j+1 ... j+num-1 and resize the matrix
510  using std::fill_n;
511  eigen_assert(num >= 0 && j >= 0 && j < m_outerSize && "Invalid parameters");
512 
513  const Index newRows = IsRowMajor ? m_outerSize + num : rows();
514  const Index newCols = IsRowMajor ? cols() : m_outerSize + num;
515 
516  const Index begin = j;
517  const Index end = m_outerSize;
518  const Index target = j + num;
519 
520  // expand the matrix to the larger size
521  conservativeResize(newRows, newCols);
522 
523  // shift m_outerIndex and m_innerNonZeros [num] to the right
525  // m_outerIndex[begin] == m_outerIndex[target], set all indices in this range to same value
526  fill_n(m_outerIndex + begin, num, m_outerIndex[begin]);
527 
528  if (!isCompressed()) {
530  // set the nonzeros of the newly inserted vectors to 0
531  fill_n(m_innerNonZeros + begin, num, StorageIndex(0));
532  }
533  }
534 
535  template <typename InputIterators>
536  void setFromTriplets(const InputIterators& begin, const InputIterators& end);
537 
538  template <typename InputIterators, typename DupFunctor>
539  void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
540 
541  template <typename Derived, typename DupFunctor>
542  void collapseDuplicates(DenseBase<Derived>& wi, DupFunctor dup_func = DupFunctor());
543 
544  template <typename InputIterators>
545  void setFromSortedTriplets(const InputIterators& begin, const InputIterators& end);
546 
547  template <typename InputIterators, typename DupFunctor>
548  void setFromSortedTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
549 
550  template <typename InputIterators>
551  void insertFromTriplets(const InputIterators& begin, const InputIterators& end);
552 
553  template <typename InputIterators, typename DupFunctor>
554  void insertFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
555 
556  template <typename InputIterators>
557  void insertFromSortedTriplets(const InputIterators& begin, const InputIterators& end);
558 
559  template <typename InputIterators, typename DupFunctor>
560  void insertFromSortedTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
561 
562  //---
563 
567  eigen_assert(j >= 0 && j < m_outerSize && "invalid outer index");
568  eigen_assert(i >= 0 && i < m_innerSize && "invalid inner index");
571  Index dst = start == end ? end : m_data.searchLowerIndex(start, end, i);
572  if (dst == end) {
573  Index capacity = m_outerIndex[j + 1] - end;
574  if (capacity > 0) {
575  // implies uncompressed: push to back of vector
576  m_innerNonZeros[j]++;
578  m_data.value(end) = Scalar(0);
579  return m_data.value(end);
580  }
581  }
582  eigen_assert((dst == end || m_data.index(dst) != i) &&
583  "you cannot insert an element that already exists, you must call coeffRef to this end");
584  return insertAtByOuterInner(j, i, dst);
585  }
586 
589  void makeCompressed() {
590  if (isCompressed()) return;
591 
593 
596  // try to move fewer, larger contiguous chunks
597  Index copyStart = start;
598  Index copyTarget = m_innerNonZeros[0];
599  for (Index j = 1; j < m_outerSize; j++) {
601  StorageIndex nextStart = m_outerIndex[j + 1];
602  // dont forget to move the last chunk!
603  bool breakUpCopy = (end != nextStart) || (j == m_outerSize - 1);
604  if (breakUpCopy) {
605  Index chunkSize = end - copyStart;
606  if (chunkSize > 0) m_data.moveChunk(copyStart, copyTarget, chunkSize);
607  copyStart = nextStart;
608  copyTarget += chunkSize;
609  }
610  start = nextStart;
612  }
614 
615  // release as much memory as possible
616  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
617  m_innerNonZeros = 0;
618  m_data.squeeze();
619  }
620 
622  void uncompress() {
623  if (!isCompressed()) return;
624  m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize);
625  if (m_outerIndex[m_outerSize] == 0) {
626  using std::fill_n;
628  } else {
629  for (Index j = 0; j < m_outerSize; j++) m_innerNonZeros[j] = m_outerIndex[j + 1] - m_outerIndex[j];
630  }
631  }
632 
635  prune(default_prunning_func(reference, epsilon));
636  }
637 
645  template <typename KeepFunc>
646  void prune(const KeepFunc& keep = KeepFunc()) {
647  StorageIndex k = 0;
648  for (Index j = 0; j < m_outerSize; ++j) {
649  StorageIndex previousStart = m_outerIndex[j];
650  if (isCompressed())
651  m_outerIndex[j] = k;
652  else
653  k = m_outerIndex[j];
654  StorageIndex end = isCompressed() ? m_outerIndex[j + 1] : previousStart + m_innerNonZeros[j];
655  for (StorageIndex i = previousStart; i < end; ++i) {
658  bool keepEntry = keep(row, col, m_data.value(i));
659  if (keepEntry) {
660  m_data.value(k) = m_data.value(i);
661  m_data.index(k) = m_data.index(i);
662  ++k;
663  } else if (!isCompressed())
664  m_innerNonZeros[j]--;
665  }
666  }
667  if (isCompressed()) {
669  m_data.resize(k, 0);
670  }
671  }
672 
682  // If one dimension is null, then there is nothing to be preserved
683  if (rows == 0 || cols == 0) return resize(rows, cols);
684 
685  Index newOuterSize = IsRowMajor ? rows : cols;
686  Index newInnerSize = IsRowMajor ? cols : rows;
687 
688  Index innerChange = newInnerSize - m_innerSize;
689  Index outerChange = newOuterSize - m_outerSize;
690 
691  if (outerChange != 0) {
692  m_outerIndex = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(m_outerIndex, newOuterSize + 1,
693  m_outerSize + 1);
694 
695  if (!isCompressed())
696  m_innerNonZeros = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(m_innerNonZeros,
697  newOuterSize, m_outerSize);
698 
699  if (outerChange > 0) {
701  using std::fill_n;
702  fill_n(m_outerIndex + m_outerSize, outerChange + 1, lastIdx);
703 
704  if (!isCompressed()) fill_n(m_innerNonZeros + m_outerSize, outerChange, StorageIndex(0));
705  }
706  }
707  m_outerSize = newOuterSize;
708 
709  if (innerChange < 0) {
710  for (Index j = 0; j < m_outerSize; j++) {
713  Index lb = m_data.searchLowerIndex(start, end, newInnerSize);
714  if (lb != end) {
715  uncompress();
717  }
718  }
719  }
720  m_innerSize = newInnerSize;
721 
722  Index newSize = m_outerIndex[m_outerSize];
723  eigen_assert(newSize <= m_data.size());
724  m_data.resize(newSize);
725  }
726 
735  const Index outerSize = IsRowMajor ? rows : cols;
737  m_data.clear();
738 
739  if ((m_outerIndex == 0) || (m_outerSize != outerSize)) {
740  m_outerIndex = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(m_outerIndex, outerSize + 1,
741  m_outerSize + 1);
743  }
744 
745  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
746  m_innerNonZeros = 0;
747 
748  using std::fill_n;
749  fill_n(m_outerIndex, m_outerSize + 1, StorageIndex(0));
750  }
751 
755 
758 
764 
767 
770  resize(rows, cols);
771  }
772 
774  template <typename OtherDerived>
779  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
780  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
781  if (needToTranspose)
782  *this = other.derived();
783  else {
784 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
786 #endif
788  }
789  }
790 
792  template <typename OtherDerived, unsigned int UpLo>
795  Base::operator=(other);
796  }
797 
799  inline SparseMatrix(SparseMatrix&& other) : SparseMatrix() { this->swap(other); }
800 
801  template <typename OtherDerived>
803  *this = other.derived().markAsRValue();
804  }
805 
807  inline SparseMatrix(const SparseMatrix& other)
809  *this = other.derived();
810  }
811 
813  template <typename OtherDerived>
816  initAssignment(other);
817  other.evalTo(*this);
818  }
819 
821  template <typename OtherDerived>
824  *this = other.derived();
825  }
826 
829  inline void swap(SparseMatrix& other) {
830  // EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
835  m_data.swap(other.m_data);
836  }
838  friend EIGEN_DEVICE_FUNC void swap(SparseMatrix& a, SparseMatrix& b) { a.swap(b); }
839 
842  inline void setIdentity() {
843  eigen_assert(m_outerSize == m_innerSize && "ONLY FOR SQUARED MATRICES");
844  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
845  m_innerNonZeros = 0;
847  // is it necessary to squeeze?
848  m_data.squeeze();
849  std::iota(m_outerIndex, m_outerIndex + m_outerSize + 1, StorageIndex(0));
850  std::iota(innerIndexPtr(), innerIndexPtr() + m_outerSize, StorageIndex(0));
851  using std::fill_n;
852  fill_n(valuePtr(), m_outerSize, Scalar(1));
853  }
854 
855  inline SparseMatrix& operator=(const SparseMatrix& other) {
856  if (other.isRValue()) {
857  swap(other.const_cast_derived());
858  } else if (this != &other) {
859 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
861 #endif
862  initAssignment(other);
863  if (other.isCompressed()) {
865  m_data = other.m_data;
866  } else {
867  Base::operator=(other);
868  }
869  }
870  return *this;
871  }
872 
874  this->swap(other);
875  return *this;
876  }
877 
878 #ifndef EIGEN_PARSED_BY_DOXYGEN
879  template <typename OtherDerived>
881  return Base::operator=(other.derived());
882  }
883 
884  template <typename Lhs, typename Rhs>
886 #endif // EIGEN_PARSED_BY_DOXYGEN
887 
888  template <typename OtherDerived>
890 
891  template <typename OtherDerived>
893  *this = other.derived().markAsRValue();
894  return *this;
895  }
896 
897 #ifndef EIGEN_NO_IO
898  friend std::ostream& operator<<(std::ostream& s, const SparseMatrix& m) {
900  s << "Nonzero entries:\n"; if (m.isCompressed()) {
901  for (Index i = 0; i < m.nonZeros(); ++i) s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
902  } else {
903  for (Index i = 0; i < m.outerSize(); ++i) {
904  Index p = m.m_outerIndex[i];
905  Index pe = m.m_outerIndex[i] + m.m_innerNonZeros[i];
906  Index k = p;
907  for (; k < pe; ++k) {
908  s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
909  }
910  for (; k < m.m_outerIndex[i + 1]; ++k) {
911  s << "(_,_) ";
912  }
913  }
914  } s << std::endl;
915  s << std::endl; s << "Outer pointers:\n";
916  for (Index i = 0; i < m.outerSize(); ++i) { s << m.m_outerIndex[i] << " "; } s << " $" << std::endl;
917  if (!m.isCompressed()) {
918  s << "Inner non zeros:\n";
919  for (Index i = 0; i < m.outerSize(); ++i) {
920  s << m.m_innerNonZeros[i] << " ";
921  }
922  s << " $" << std::endl;
923  } s
924  << std::endl;);
925  s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
926  return s;
927  }
928 #endif
929 
931  inline ~SparseMatrix() {
932  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_outerIndex, m_outerSize + 1);
933  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
934  }
935 
937  Scalar sum() const;
938 
939 #ifdef EIGEN_SPARSEMATRIX_PLUGIN
940 #include EIGEN_SPARSEMATRIX_PLUGIN
941 #endif
942 
943  protected:
944  template <typename Other>
945  void initAssignment(const Other& other) {
946  resize(other.rows(), other.cols());
947  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
948  m_innerNonZeros = 0;
949  }
950 
954 
960 
961  public:
964 
965  StorageIndex operator[](Index i) const { return i == m_index ? m_value : 0; }
966  };
967 
971 
972  public:
976  const Index outer = IsRowMajor ? row : col;
977  const Index inner = IsRowMajor ? col : row;
978 
979  eigen_assert(!isCompressed());
980  eigen_assert(m_innerNonZeros[outer] <= (m_outerIndex[outer + 1] - m_outerIndex[outer]));
981 
982  Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
983  m_data.index(p) = StorageIndex(inner);
984  m_data.value(p) = Scalar(0);
985  return m_data.value(p);
986  }
987 
988  protected:
989  struct IndexPosPair {
990  IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
993  };
994 
1004  template <typename DiagXpr, typename Func>
1005  void assignDiagonal(const DiagXpr diagXpr, const Func& assignFunc) {
1006  constexpr StorageIndex kEmptyIndexVal(-1);
1007  typedef typename ScalarVector::AlignedMapType ValueMap;
1008 
1009  Index n = diagXpr.size();
1010 
1012  if (overwrite) {
1013  if ((m_outerSize != n) || (m_innerSize != n)) resize(n, n);
1014  }
1015 
1016  if (m_data.size() == 0 || overwrite) {
1017  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
1018  m_innerNonZeros = 0;
1019  resizeNonZeros(n);
1020  ValueMap valueMap(valuePtr(), n);
1021  std::iota(m_outerIndex, m_outerIndex + n + 1, StorageIndex(0));
1022  std::iota(innerIndexPtr(), innerIndexPtr() + n, StorageIndex(0));
1023  valueMap.setZero();
1024  internal::call_assignment_no_alias(valueMap, diagXpr, assignFunc);
1025  } else {
1026  internal::evaluator<DiagXpr> diaEval(diagXpr);
1027 
1029  typename IndexVector::AlignedMapType insertionLocations(tmp, n);
1030  insertionLocations.setConstant(kEmptyIndexVal);
1031 
1032  Index deferredInsertions = 0;
1033  Index shift = 0;
1034 
1035  for (Index j = 0; j < n; j++) {
1036  Index begin = m_outerIndex[j];
1037  Index end = isCompressed() ? m_outerIndex[j + 1] : begin + m_innerNonZeros[j];
1038  Index capacity = m_outerIndex[j + 1] - end;
1039  Index dst = m_data.searchLowerIndex(begin, end, j);
1040  // the entry exists: update it now
1041  if (dst != end && m_data.index(dst) == StorageIndex(j))
1042  assignFunc.assignCoeff(m_data.value(dst), diaEval.coeff(j));
1043  // the entry belongs at the back of the vector: push to back
1044  else if (dst == end && capacity > 0)
1045  assignFunc.assignCoeff(insertBackUncompressed(j, j), diaEval.coeff(j));
1046  // the insertion requires a data move, record insertion location and handle in second pass
1047  else {
1048  insertionLocations.coeffRef(j) = StorageIndex(dst);
1049  deferredInsertions++;
1050  // if there is no capacity, all vectors to the right of this are shifted
1051  if (capacity == 0) shift++;
1052  }
1053  }
1054 
1055  if (deferredInsertions > 0) {
1056  m_data.resize(m_data.size() + shift);
1057  Index copyEnd = isCompressed() ? m_outerIndex[m_outerSize]
1058  : m_outerIndex[m_outerSize - 1] + m_innerNonZeros[m_outerSize - 1];
1059  for (Index j = m_outerSize - 1; deferredInsertions > 0; j--) {
1060  Index begin = m_outerIndex[j];
1061  Index end = isCompressed() ? m_outerIndex[j + 1] : begin + m_innerNonZeros[j];
1062  Index capacity = m_outerIndex[j + 1] - end;
1063 
1064  bool doInsertion = insertionLocations(j) >= 0;
1065  bool breakUpCopy = doInsertion && (capacity > 0);
1066  // break up copy for sorted insertion into inactive nonzeros
1067  // optionally, add another criterium, i.e. 'breakUpCopy || (capacity > threhsold)'
1068  // where `threshold >= 0` to skip inactive nonzeros in each vector
1069  // this reduces the total number of copied elements, but requires more moveChunk calls
1070  if (breakUpCopy) {
1071  Index copyBegin = m_outerIndex[j + 1];
1072  Index to = copyBegin + shift;
1073  Index chunkSize = copyEnd - copyBegin;
1074  m_data.moveChunk(copyBegin, to, chunkSize);
1075  copyEnd = end;
1076  }
1077 
1078  m_outerIndex[j + 1] += shift;
1079 
1080  if (doInsertion) {
1081  // if there is capacity, shift into the inactive nonzeros
1082  if (capacity > 0) shift++;
1083  Index copyBegin = insertionLocations(j);
1084  Index to = copyBegin + shift;
1085  Index chunkSize = copyEnd - copyBegin;
1086  m_data.moveChunk(copyBegin, to, chunkSize);
1087  Index dst = to - 1;
1088  m_data.index(dst) = StorageIndex(j);
1089  m_data.value(dst) = Scalar(0);
1090  assignFunc.assignCoeff(m_data.value(dst), diaEval.coeff(j));
1091  if (!isCompressed()) m_innerNonZeros[j]++;
1092  shift--;
1093  deferredInsertions--;
1094  copyEnd = copyBegin;
1095  }
1096  }
1097  }
1098  eigen_assert((shift == 0) && (deferredInsertions == 0));
1099  }
1100  }
1101 
1102  /* These functions are used to avoid a redundant binary search operation in functions such as coeffRef() and assume
1103  * `dst` is the appropriate sorted insertion point */
1107 
1108  private:
1109  EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned, THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE)
1110  EIGEN_STATIC_ASSERT((Options & (ColMajor | RowMajor)) == Options, INVALID_MATRIX_TEMPLATE_PARAMETERS)
1111 
1112  struct default_prunning_func {
1113  default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
1114  inline bool operator()(const Index&, const Index&, const Scalar& value) const {
1115  return !internal::isMuchSmallerThan(value, reference, epsilon);
1116  }
1117  Scalar reference;
1119  };
1120 };
1121 
1122 namespace internal {
1123 
1124 // Creates a compressed sparse matrix from a range of unsorted triplets
1125 // Requires temporary storage to handle duplicate entries
1126 template <typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1127 void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat,
1128  DupFunctor dup_func) {
1129  constexpr bool IsRowMajor = SparseMatrixType::IsRowMajor;
1130  using StorageIndex = typename SparseMatrixType::StorageIndex;
1131  using IndexMap = typename VectorX<StorageIndex>::AlignedMapType;
1132  using TransposedSparseMatrix =
1134 
1135  if (begin == end) return;
1136 
1137  // There are two strategies to consider for constructing a matrix from unordered triplets:
1138  // A) construct the 'mat' in its native storage order and sort in-place (less memory); or,
1139  // B) construct the transposed matrix and use an implicit sort upon assignment to `mat` (less time).
1140  // This routine uses B) for faster execution time.
1141  TransposedSparseMatrix trmat(mat.rows(), mat.cols());
1142 
1143  // scan triplets to determine allocation size before constructing matrix
1144  Index nonZeros = 0;
1145  for (InputIterator it(begin); it != end; ++it) {
1146  eigen_assert(it->row() >= 0 && it->row() < mat.rows() && it->col() >= 0 && it->col() < mat.cols());
1147  StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1149  trmat.outerIndexPtr()[j + 1]++;
1150  nonZeros++;
1151  }
1152 
1153  std::partial_sum(trmat.outerIndexPtr(), trmat.outerIndexPtr() + trmat.outerSize() + 1, trmat.outerIndexPtr());
1154  eigen_assert(nonZeros == trmat.outerIndexPtr()[trmat.outerSize()]);
1155  trmat.resizeNonZeros(nonZeros);
1156 
1157  // construct temporary array to track insertions (outersize) and collapse duplicates (innersize)
1159  smart_copy(trmat.outerIndexPtr(), trmat.outerIndexPtr() + trmat.outerSize(), tmp);
1160 
1161  // push triplets to back of each vector
1162  for (InputIterator it(begin); it != end; ++it) {
1163  StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1164  StorageIndex i = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
1165  StorageIndex k = tmp[j];
1166  trmat.data().index(k) = i;
1167  trmat.data().value(k) = it->value();
1168  tmp[j]++;
1169  }
1170 
1171  IndexMap wi(tmp, trmat.innerSize());
1172  trmat.collapseDuplicates(wi, dup_func);
1173  // implicit sorting
1174  mat = trmat;
1175 }
1176 
1177 // Creates a compressed sparse matrix from a sorted range of triplets
1178 template <typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1179 void set_from_triplets_sorted(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat,
1180  DupFunctor dup_func) {
1181  constexpr bool IsRowMajor = SparseMatrixType::IsRowMajor;
1182  using StorageIndex = typename SparseMatrixType::StorageIndex;
1183 
1184  if (begin == end) return;
1185 
1186  constexpr StorageIndex kEmptyIndexValue(-1);
1187  // deallocate inner nonzeros if present and zero outerIndexPtr
1188  mat.resize(mat.rows(), mat.cols());
1189  // use outer indices to count non zero entries (excluding duplicate entries)
1190  StorageIndex previous_j = kEmptyIndexValue;
1191  StorageIndex previous_i = kEmptyIndexValue;
1192  // scan triplets to determine allocation size before constructing matrix
1193  Index nonZeros = 0;
1194  for (InputIterator it(begin); it != end; ++it) {
1195  eigen_assert(it->row() >= 0 && it->row() < mat.rows() && it->col() >= 0 && it->col() < mat.cols());
1196  StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
1197  StorageIndex i = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1198  eigen_assert(j > previous_j || (j == previous_j && i >= previous_i));
1199  // identify duplicates by examining previous location
1200  bool duplicate = (previous_j == j) && (previous_i == i);
1201  if (!duplicate) {
1203  nonZeros++;
1204  mat.outerIndexPtr()[j + 1]++;
1205  previous_j = j;
1206  previous_i = i;
1207  }
1208  }
1209 
1210  // finalize outer indices and allocate memory
1211  std::partial_sum(mat.outerIndexPtr(), mat.outerIndexPtr() + mat.outerSize() + 1, mat.outerIndexPtr());
1212  eigen_assert(nonZeros == mat.outerIndexPtr()[mat.outerSize()]);
1213  mat.resizeNonZeros(nonZeros);
1214 
1215  previous_i = kEmptyIndexValue;
1216  previous_j = kEmptyIndexValue;
1217  Index back = 0;
1218  for (InputIterator it(begin); it != end; ++it) {
1219  StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
1220  StorageIndex i = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1221  bool duplicate = (previous_j == j) && (previous_i == i);
1222  if (duplicate) {
1223  mat.data().value(back - 1) = dup_func(mat.data().value(back - 1), it->value());
1224  } else {
1225  // push triplets to back
1226  mat.data().index(back) = i;
1227  mat.data().value(back) = it->value();
1228  previous_j = j;
1229  previous_i = i;
1230  back++;
1231  }
1232  }
1233  eigen_assert(back == nonZeros);
1234  // matrix is finalized
1235 }
1236 
1237 // thin wrapper around a generic binary functor to use the sparse disjunction evaluator instead of the default
1238 // "arithmetic" evaluator
1239 template <typename DupFunctor, typename LhsScalar, typename RhsScalar = LhsScalar>
1241  using result_type = typename result_of<DupFunctor(LhsScalar, RhsScalar)>::type;
1242  scalar_disjunction_op(const DupFunctor& op) : m_functor(op) {}
1243  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator()(const LhsScalar& a, const RhsScalar& b) const {
1244  return m_functor(a, b);
1245  }
1246  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const DupFunctor& functor() const { return m_functor; }
1247  const DupFunctor& m_functor;
1248 };
1249 
1250 template <typename DupFunctor, typename LhsScalar, typename RhsScalar>
1251 struct functor_traits<scalar_disjunction_op<DupFunctor, LhsScalar, RhsScalar>> : public functor_traits<DupFunctor> {};
1252 
1253 // Creates a compressed sparse matrix from its existing entries and those from an unsorted range of triplets
1254 template <typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1255 void insert_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat,
1256  DupFunctor dup_func) {
1257  using Scalar = typename SparseMatrixType::Scalar;
1258  using SrcXprType =
1259  CwiseBinaryOp<scalar_disjunction_op<DupFunctor, Scalar>, const SparseMatrixType, const SparseMatrixType>;
1260 
1261  // set_from_triplets is necessary to sort the inner indices and remove the duplicate entries
1262  SparseMatrixType trips(mat.rows(), mat.cols());
1263  set_from_triplets(begin, end, trips, dup_func);
1264 
1265  SrcXprType src = mat.binaryExpr(trips, scalar_disjunction_op<DupFunctor, Scalar>(dup_func));
1266  // the sparse assignment procedure creates a temporary matrix and swaps the final result
1267  assign_sparse_to_sparse<SparseMatrixType, SrcXprType>(mat, src);
1268 }
1269 
1270 // Creates a compressed sparse matrix from its existing entries and those from an sorted range of triplets
1271 template <typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1272 void insert_from_triplets_sorted(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat,
1273  DupFunctor dup_func) {
1274  using Scalar = typename SparseMatrixType::Scalar;
1275  using SrcXprType =
1276  CwiseBinaryOp<scalar_disjunction_op<DupFunctor, Scalar>, const SparseMatrixType, const SparseMatrixType>;
1277 
1278  // TODO: process triplets without making a copy
1279  SparseMatrixType trips(mat.rows(), mat.cols());
1280  set_from_triplets_sorted(begin, end, trips, dup_func);
1281 
1282  SrcXprType src = mat.binaryExpr(trips, scalar_disjunction_op<DupFunctor, Scalar>(dup_func));
1283  // the sparse assignment procedure creates a temporary matrix and swaps the final result
1284  assign_sparse_to_sparse<SparseMatrixType, SrcXprType>(mat, src);
1285 }
1286 
1287 } // namespace internal
1288 
1326 template <typename Scalar, int Options_, typename StorageIndex_>
1327 template <typename InputIterators>
1329  const InputIterators& end) {
1330  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1331  begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1332 }
1333 
1343 template <typename Scalar, int Options_, typename StorageIndex_>
1344 template <typename InputIterators, typename DupFunctor>
1346  const InputIterators& end, DupFunctor dup_func) {
1347  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1348  begin, end, *this, dup_func);
1349 }
1350 
1355 template <typename Scalar, int Options_, typename StorageIndex_>
1356 template <typename InputIterators>
1358  const InputIterators& end) {
1359  internal::set_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1360  begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1361 }
1362 
1372 template <typename Scalar, int Options_, typename StorageIndex_>
1373 template <typename InputIterators, typename DupFunctor>
1375  const InputIterators& end,
1376  DupFunctor dup_func) {
1377  internal::set_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1378  begin, end, *this, dup_func);
1379 }
1380 
1419 template <typename Scalar, int Options_, typename StorageIndex_>
1420 template <typename InputIterators>
1422  const InputIterators& end) {
1423  internal::insert_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1424  begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1425 }
1426 
1436 template <typename Scalar, int Options_, typename StorageIndex_>
1437 template <typename InputIterators, typename DupFunctor>
1439  const InputIterators& end, DupFunctor dup_func) {
1440  internal::insert_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1441  begin, end, *this, dup_func);
1442 }
1443 
1448 template <typename Scalar, int Options_, typename StorageIndex_>
1449 template <typename InputIterators>
1451  const InputIterators& end) {
1452  internal::insert_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1453  begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1454 }
1455 
1465 template <typename Scalar, int Options_, typename StorageIndex_>
1466 template <typename InputIterators, typename DupFunctor>
1468  const InputIterators& end,
1469  DupFunctor dup_func) {
1470  internal::insert_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1471  begin, end, *this, dup_func);
1472 }
1473 
1475 template <typename Scalar_, int Options_, typename StorageIndex_>
1476 template <typename Derived, typename DupFunctor>
1478  // removes duplicate entries and compresses the matrix
1479  // the excess allocated memory is not released
1480  // the inner indices do not need to be sorted, nor is the matrix returned in a sorted state
1481  eigen_assert(wi.size() == m_innerSize);
1482  constexpr StorageIndex kEmptyIndexValue(-1);
1483  wi.setConstant(kEmptyIndexValue);
1484  StorageIndex count = 0;
1485  const bool is_compressed = isCompressed();
1486  // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1487  for (Index j = 0; j < m_outerSize; ++j) {
1488  const StorageIndex newBegin = count;
1489  const StorageIndex end = is_compressed ? m_outerIndex[j + 1] : m_outerIndex[j] + m_innerNonZeros[j];
1490  for (StorageIndex k = m_outerIndex[j]; k < end; ++k) {
1491  StorageIndex i = m_data.index(k);
1492  if (wi(i) >= newBegin) {
1493  // entry at k is a duplicate
1494  // accumulate it into the primary entry located at wi(i)
1495  m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1496  } else {
1497  // k is the primary entry in j with inner index i
1498  // shift it to the left and record its location at wi(i)
1499  m_data.index(count) = i;
1500  m_data.value(count) = m_data.value(k);
1501  wi(i) = count;
1502  ++count;
1503  }
1504  }
1505  m_outerIndex[j] = newBegin;
1506  }
1507  m_outerIndex[m_outerSize] = count;
1508  m_data.resize(count);
1509 
1510  // turn the matrix into compressed form (if it is not already)
1511  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
1512  m_innerNonZeros = 0;
1513 }
1514 
1516 template <typename Scalar, int Options_, typename StorageIndex_>
1517 template <typename OtherDerived>
1522  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1523 
1524 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1526 #endif
1527 
1528  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1529  if (needToTranspose) {
1530 #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1532 #endif
1533  // two passes algorithm:
1534  // 1 - compute the number of coeffs per dest inner vector
1535  // 2 - do the actual copy/eval
1536  // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1537  typedef
1539  OtherCopy;
1540  typedef internal::remove_all_t<OtherCopy> OtherCopy_;
1541  typedef internal::evaluator<OtherCopy_> OtherCopyEval;
1542  OtherCopy otherCopy(other.derived());
1543  OtherCopyEval otherCopyEval(otherCopy);
1544 
1545  SparseMatrix dest(other.rows(), other.cols());
1546  Eigen::Map<IndexVector>(dest.m_outerIndex, dest.outerSize()).setZero();
1547 
1548  // pass 1
1549  // FIXME the above copy could be merged with that pass
1550  for (Index j = 0; j < otherCopy.outerSize(); ++j)
1551  for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it) ++dest.m_outerIndex[it.index()];
1552 
1553  // prefix sum
1554  StorageIndex count = 0;
1555  IndexVector positions(dest.outerSize());
1556  for (Index j = 0; j < dest.outerSize(); ++j) {
1557  StorageIndex tmp = dest.m_outerIndex[j];
1558  dest.m_outerIndex[j] = count;
1559  positions[j] = count;
1560  count += tmp;
1561  }
1562  dest.m_outerIndex[dest.outerSize()] = count;
1563  // alloc
1564  dest.m_data.resize(count);
1565  // pass 2
1566  for (StorageIndex j = 0; j < otherCopy.outerSize(); ++j) {
1567  for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it) {
1568  Index pos = positions[it.index()]++;
1569  dest.m_data.index(pos) = j;
1570  dest.m_data.value(pos) = it.value();
1571  }
1572  }
1573  this->swap(dest);
1574  return *this;
1575  } else {
1576  if (other.isRValue()) {
1577  initAssignment(other.derived());
1578  }
1579  // there is no special optimization
1580  return Base::operator=(other.derived());
1581  }
1582 }
1583 
1584 template <typename Scalar_, int Options_, typename StorageIndex_>
1587  return insertByOuterInner(IsRowMajor ? row : col, IsRowMajor ? col : row);
1588 }
1589 
1590 template <typename Scalar_, int Options_, typename StorageIndex_>
1593  // random insertion into compressed matrix is very slow
1594  uncompress();
1595  return insertUncompressedAtByOuterInner(outer, inner, dst);
1596 }
1597 
1598 template <typename Scalar_, int Options_, typename StorageIndex_>
1601  eigen_assert(!isCompressed());
1602  Index outer = IsRowMajor ? row : col;
1603  Index inner = IsRowMajor ? col : row;
1604  Index start = m_outerIndex[outer];
1605  Index end = start + m_innerNonZeros[outer];
1606  Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
1607  if (dst == end) {
1608  Index capacity = m_outerIndex[outer + 1] - end;
1609  if (capacity > 0) {
1610  // implies uncompressed: push to back of vector
1611  m_innerNonZeros[outer]++;
1612  m_data.index(end) = StorageIndex(inner);
1613  m_data.value(end) = Scalar(0);
1614  return m_data.value(end);
1615  }
1616  }
1617  eigen_assert((dst == end || m_data.index(dst) != inner) &&
1618  "you cannot insert an element that already exists, you must call coeffRef to this end");
1619  return insertUncompressedAtByOuterInner(outer, inner, dst);
1620 }
1621 
1622 template <typename Scalar_, int Options_, typename StorageIndex_>
1625  eigen_assert(isCompressed());
1626  Index outer = IsRowMajor ? row : col;
1627  Index inner = IsRowMajor ? col : row;
1628  Index start = m_outerIndex[outer];
1629  Index end = m_outerIndex[outer + 1];
1630  Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
1631  eigen_assert((dst == end || m_data.index(dst) != inner) &&
1632  "you cannot insert an element that already exists, you must call coeffRef to this end");
1633  return insertCompressedAtByOuterInner(outer, inner, dst);
1634 }
1635 
1636 template <typename Scalar_, int Options_, typename StorageIndex_>
1639  eigen_assert(isCompressed());
1640  // compressed insertion always requires expanding the buffer
1641  // first, check if there is adequate allocated memory
1642  if (m_data.allocatedSize() <= m_data.size()) {
1643  // if there is no capacity for a single insertion, double the capacity
1644  // increase capacity by a minimum of 32
1645  Index minReserve = 32;
1646  Index reserveSize = numext::maxi(minReserve, m_data.allocatedSize());
1647  m_data.reserve(reserveSize);
1648  }
1649  m_data.resize(m_data.size() + 1);
1650  Index chunkSize = m_outerIndex[m_outerSize] - dst;
1651  // shift the existing data to the right if necessary
1652  m_data.moveChunk(dst, dst + 1, chunkSize);
1653  // update nonzero counts
1654  // potentially O(outerSize) bottleneck!
1655  for (Index j = outer; j < m_outerSize; j++) m_outerIndex[j + 1]++;
1656  // initialize the coefficient
1657  m_data.index(dst) = StorageIndex(inner);
1658  m_data.value(dst) = Scalar(0);
1659  // return a reference to the coefficient
1660  return m_data.value(dst);
1661 }
1662 
1663 template <typename Scalar_, int Options_, typename StorageIndex_>
1666  eigen_assert(!isCompressed());
1667  // find a vector with capacity, starting at `outer` and searching to the left and right
1668  for (Index leftTarget = outer - 1, rightTarget = outer; (leftTarget >= 0) || (rightTarget < m_outerSize);) {
1669  if (rightTarget < m_outerSize) {
1670  Index start = m_outerIndex[rightTarget];
1671  Index end = start + m_innerNonZeros[rightTarget];
1672  Index nextStart = m_outerIndex[rightTarget + 1];
1673  Index capacity = nextStart - end;
1674  if (capacity > 0) {
1675  // move [dst, end) to dst+1 and insert at dst
1676  Index chunkSize = end - dst;
1677  if (chunkSize > 0) m_data.moveChunk(dst, dst + 1, chunkSize);
1678  m_innerNonZeros[outer]++;
1679  for (Index j = outer; j < rightTarget; j++) m_outerIndex[j + 1]++;
1680  m_data.index(dst) = StorageIndex(inner);
1681  m_data.value(dst) = Scalar(0);
1682  return m_data.value(dst);
1683  }
1684  rightTarget++;
1685  }
1686  if (leftTarget >= 0) {
1687  Index start = m_outerIndex[leftTarget];
1688  Index end = start + m_innerNonZeros[leftTarget];
1689  Index nextStart = m_outerIndex[leftTarget + 1];
1690  Index capacity = nextStart - end;
1691  if (capacity > 0) {
1692  // tricky: dst is a lower bound, so we must insert at dst-1 when shifting left
1693  // move [nextStart, dst) to nextStart-1 and insert at dst-1
1694  Index chunkSize = dst - nextStart;
1695  if (chunkSize > 0) m_data.moveChunk(nextStart, nextStart - 1, chunkSize);
1696  m_innerNonZeros[outer]++;
1697  for (Index j = leftTarget; j < outer; j++) m_outerIndex[j + 1]--;
1698  m_data.index(dst - 1) = StorageIndex(inner);
1699  m_data.value(dst - 1) = Scalar(0);
1700  return m_data.value(dst - 1);
1701  }
1702  leftTarget--;
1703  }
1704  }
1705 
1706  // no room for interior insertion
1707  // nonZeros() == m_data.size()
1708  // record offset as outerIndxPtr will change
1709  Index dst_offset = dst - m_outerIndex[outer];
1710  // allocate space for random insertion
1711  if (m_data.allocatedSize() == 0) {
1712  // fast method to allocate space for one element per vector in empty matrix
1713  m_data.resize(m_outerSize);
1714  std::iota(m_outerIndex, m_outerIndex + m_outerSize + 1, StorageIndex(0));
1715  } else {
1716  // check for integer overflow: if maxReserveSize == 0, insertion is not possible
1717  Index maxReserveSize = static_cast<Index>(NumTraits<StorageIndex>::highest()) - m_data.allocatedSize();
1718  eigen_assert(maxReserveSize > 0);
1719  if (m_outerSize <= maxReserveSize) {
1720  // allocate space for one additional element per vector
1721  reserveInnerVectors(IndexVector::Constant(m_outerSize, 1));
1722  } else {
1723  // handle the edge case where StorageIndex is insufficient to reserve outerSize additional elements
1724  // allocate space for one additional element in the interval [outer,maxReserveSize)
1725  typedef internal::sparse_reserve_op<StorageIndex> ReserveSizesOp;
1726  typedef CwiseNullaryOp<ReserveSizesOp, IndexVector> ReserveSizesXpr;
1727  ReserveSizesXpr reserveSizesXpr(m_outerSize, 1, ReserveSizesOp(outer, m_outerSize, maxReserveSize));
1728  reserveInnerVectors(reserveSizesXpr);
1729  }
1730  }
1731  // insert element at `dst` with new outer indices
1732  Index start = m_outerIndex[outer];
1733  Index end = start + m_innerNonZeros[outer];
1734  Index new_dst = start + dst_offset;
1735  Index chunkSize = end - new_dst;
1736  if (chunkSize > 0) m_data.moveChunk(new_dst, new_dst + 1, chunkSize);
1737  m_innerNonZeros[outer]++;
1738  m_data.index(new_dst) = StorageIndex(inner);
1739  m_data.value(new_dst) = Scalar(0);
1740  return m_data.value(new_dst);
1741 }
1742 
1743 namespace internal {
1744 
1745 template <typename Scalar_, int Options_, typename StorageIndex_>
1746 struct evaluator<SparseMatrix<Scalar_, Options_, StorageIndex_>>
1747  : evaluator<SparseCompressedBase<SparseMatrix<Scalar_, Options_, StorageIndex_>>> {
1750  evaluator() : Base() {}
1751  explicit evaluator(const SparseMatrixType& mat) : Base(mat) {}
1752 };
1753 
1754 } // namespace internal
1755 
1756 // Specialization for SparseMatrix.
1757 // Serializes [rows, cols, isCompressed, outerSize, innerBufferSize,
1758 // innerNonZeros, outerIndices, innerIndices, values].
1759 template <typename Scalar, int Options, typename StorageIndex>
1760 class Serializer<SparseMatrix<Scalar, Options, StorageIndex>, void> {
1761  public:
1763 
1764  struct Header {
1770  };
1771 
1772  EIGEN_DEVICE_FUNC size_t size(const SparseMat& value) const {
1773  // innerNonZeros.
1774  std::size_t num_storage_indices = value.isCompressed() ? 0 : value.outerSize();
1775  // Outer indices.
1776  num_storage_indices += value.outerSize() + 1;
1777  // Inner indices.
1778  const StorageIndex inner_buffer_size = value.outerIndexPtr()[value.outerSize()];
1779  num_storage_indices += inner_buffer_size;
1780  // Values.
1781  std::size_t num_values = inner_buffer_size;
1782  return sizeof(Header) + sizeof(Scalar) * num_values + sizeof(StorageIndex) * num_storage_indices;
1783  }
1784 
1786  if (EIGEN_PREDICT_FALSE(dest == nullptr)) return nullptr;
1787  if (EIGEN_PREDICT_FALSE(dest + size(value) > end)) return nullptr;
1788 
1789  const size_t header_bytes = sizeof(Header);
1790  Header header = {value.rows(), value.cols(), value.isCompressed(), value.outerSize(),
1791  value.outerIndexPtr()[value.outerSize()]};
1792  EIGEN_USING_STD(memcpy)
1793  memcpy(dest, &header, header_bytes);
1794  dest += header_bytes;
1795 
1796  // innerNonZeros.
1797  if (!header.compressed) {
1798  std::size_t data_bytes = sizeof(StorageIndex) * header.outer_size;
1799  memcpy(dest, value.innerNonZeroPtr(), data_bytes);
1800  dest += data_bytes;
1801  }
1802 
1803  // Outer indices.
1804  std::size_t data_bytes = sizeof(StorageIndex) * (header.outer_size + 1);
1805  memcpy(dest, value.outerIndexPtr(), data_bytes);
1806  dest += data_bytes;
1807 
1808  // Inner indices.
1809  data_bytes = sizeof(StorageIndex) * header.inner_buffer_size;
1810  memcpy(dest, value.innerIndexPtr(), data_bytes);
1811  dest += data_bytes;
1812 
1813  // Values.
1814  data_bytes = sizeof(Scalar) * header.inner_buffer_size;
1815  memcpy(dest, value.valuePtr(), data_bytes);
1816  dest += data_bytes;
1817 
1818  return dest;
1819  }
1820 
1822  if (EIGEN_PREDICT_FALSE(src == nullptr)) return nullptr;
1823  if (EIGEN_PREDICT_FALSE(src + sizeof(Header) > end)) return nullptr;
1824 
1825  const size_t header_bytes = sizeof(Header);
1826  Header header;
1827  EIGEN_USING_STD(memcpy)
1828  memcpy(&header, src, header_bytes);
1829  src += header_bytes;
1830 
1831  value.setZero();
1832  value.resize(header.rows, header.cols);
1833  if (header.compressed) {
1834  value.makeCompressed();
1835  } else {
1836  value.uncompress();
1837  }
1838 
1839  // Adjust value ptr size.
1840  value.data().resize(header.inner_buffer_size);
1841 
1842  // Initialize compressed state and inner non-zeros.
1843  if (!header.compressed) {
1844  // Inner non-zero counts.
1845  std::size_t data_bytes = sizeof(StorageIndex) * header.outer_size;
1846  if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
1847  memcpy(value.innerNonZeroPtr(), src, data_bytes);
1848  src += data_bytes;
1849  }
1850 
1851  // Outer indices.
1852  std::size_t data_bytes = sizeof(StorageIndex) * (header.outer_size + 1);
1853  if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
1854  memcpy(value.outerIndexPtr(), src, data_bytes);
1855  src += data_bytes;
1856 
1857  // Inner indices.
1858  data_bytes = sizeof(StorageIndex) * header.inner_buffer_size;
1859  if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
1860  memcpy(value.innerIndexPtr(), src, data_bytes);
1861  src += data_bytes;
1862 
1863  // Values.
1864  data_bytes = sizeof(Scalar) * header.inner_buffer_size;
1865  if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
1866  memcpy(value.valuePtr(), src, data_bytes);
1867  src += data_bytes;
1868  return src;
1869  }
1870 };
1871 
1872 } // end namespace Eigen
1873 
1874 #endif // EIGEN_SPARSEMATRIX_H
Array< int, Dynamic, 1 > v
Definition: Array_initializer_list_vector_cxx11.cpp:1
int i
Definition: BiCGSTAB_step_by_step.cpp:9
const unsigned n
Definition: CG3DPackingUnitTest.cpp:11
#define EIGEN_DEPRECATED
Definition: Macros.h:931
#define EIGEN_USING_STD(FUNC)
Definition: Macros.h:1090
#define eigen_internal_assert(x)
Definition: Macros.h:916
#define EIGEN_PREDICT_FALSE(x)
Definition: Macros.h:1179
#define EIGEN_UNUSED_VARIABLE(var)
Definition: Macros.h:966
#define EIGEN_DEVICE_FUNC
Definition: Macros.h:892
#define EIGEN_DONT_INLINE
Definition: Macros.h:853
#define eigen_assert(x)
Definition: Macros.h:910
#define EIGEN_STRONG_INLINE
Definition: Macros.h:834
m col(1)
m row(1)
v resize(3)
#define ei_declare_aligned_stack_constructed_variable(TYPE, NAME, SIZE, BUFFER)
Definition: Memory.h:806
A insert(1, 2)=0
#define EIGEN_DBG_SPARSE(X)
Definition: SparseUtil.h:21
#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
Definition: SparseUtil.h:39
#define EIGEN_STATIC_ASSERT(X, MSG)
Definition: StaticAssert.h:26
float * p
Definition: Tutorial_Map_using.cpp:9
Scalar Scalar int size
Definition: benchVecAdd.cpp:17
Scalar * b
Definition: benchVecAdd.cpp:17
SCALAR Scalar
Definition: bench_gemm.cpp:45
Generic expression where a coefficient-wise binary operator is applied to two expressions.
Definition: CwiseBinaryOp.h:79
Generic expression of a matrix where all coefficients are defined by a functor.
Definition: CwiseNullaryOp.h:64
Base class for all dense matrices, vectors, and arrays.
Definition: DenseBase.h:44
EIGEN_DEVICE_FUNC Derived & setConstant(const Scalar &value)
Definition: CwiseNullaryOp.h:349
Base class for diagonal matrices and expressions.
Definition: DiagonalMatrix.h:33
EIGEN_DEVICE_FUNC const Derived & derived() const
Definition: DiagonalMatrix.h:57
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition: Diagonal.h:68
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:96
constexpr EIGEN_DEVICE_FUNC const Scalar * data() const
Definition: PlainObjectBase.h:273
Expression of the product of two arbitrary matrices or vectors.
Definition: Product.h:202
Definition: ReturnByValue.h:50
EIGEN_DEVICE_FUNC void evalTo(Dest &dst) const
Definition: ReturnByValue.h:58
EIGEN_DEVICE_FUNC size_t size(const SparseMat &value) const
Definition: SparseMatrix.h:1772
SparseMatrix< Scalar, Options, StorageIndex > SparseMat
Definition: SparseMatrix.h:1762
EIGEN_DEVICE_FUNC const uint8_t * deserialize(const uint8_t *src, const uint8_t *end, SparseMat &value) const
Definition: SparseMatrix.h:1821
EIGEN_DEVICE_FUNC uint8_t * serialize(uint8_t *dest, uint8_t *end, const SparseMat &value)
Definition: SparseMatrix.h:1785
Definition: Serializer.h:27
Definition: SparseCompressedBase.h:207
Definition: SparseCompressedBase.h:292
Common base class for sparse [compressed]-{row|column}-storage format.
Definition: SparseCompressedBase.h:43
Index nonZeros() const
Definition: SparseCompressedBase.h:64
bool isCompressed() const
Definition: SparseCompressedBase.h:114
@ IsRowMajor
Definition: SparseMatrixBase.h:99
Derived & operator=(const Derived &other)
Definition: SparseAssign.h:43
Base class of any sparse matrices or sparse expressions.
Definition: SparseMatrixBase.h:30
internal::traits< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::StorageIndex StorageIndex
Definition: SparseMatrixBase.h:44
Index rows() const
Definition: SparseMatrixBase.h:182
bool isRValue() const
Definition: SparseMatrixBase.h:200
Derived & const_cast_derived() const
Definition: SparseMatrixBase.h:146
internal::traits< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::Scalar Scalar
Definition: SparseMatrixBase.h:32
const Derived & derived() const
Definition: SparseMatrixBase.h:144
NumTraits< Scalar >::Real RealScalar
Definition: SparseMatrixBase.h:128
Index cols() const
Definition: SparseMatrixBase.h:184
static StorageIndex convert_index(const Index idx)
Definition: SparseMatrixBase.h:391
Definition: SparseMatrix.h:957
StorageIndex m_value
Definition: SparseMatrix.h:959
StorageIndex m_index
Definition: SparseMatrix.h:958
StorageIndex value_type
Definition: SparseMatrix.h:962
StorageIndex operator[](Index i) const
Definition: SparseMatrix.h:965
SingletonVector(Index i, Index v)
Definition: SparseMatrix.h:963
A versatible sparse matrix representation.
Definition: SparseMatrix.h:121
EIGEN_DEPRECATED EIGEN_DONT_INLINE Scalar & insertCompressed(Index row, Index col)
Definition: SparseMatrix.h:1624
Base::IndexVector IndexVector
Definition: SparseMatrix.h:145
Scalar coeff(Index row, Index col) const
Definition: SparseMatrix.h:211
Scalar & insertCompressedAtByOuterInner(Index outer, Index inner, Index dst)
Definition: SparseMatrix.h:1638
SparseMatrix & operator=(SparseCompressedBase< OtherDerived > &&other)
Definition: SparseMatrix.h:892
StorageIndex * innerIndexPtr()
Definition: SparseMatrix.h:184
StorageIndex * innerNonZeroPtr()
Definition: SparseMatrix.h:202
void collapseDuplicates(DenseBase< Derived > &wi, DupFunctor dup_func=DupFunctor())
Definition: SparseMatrix.h:1477
Storage m_data
Definition: SparseMatrix.h:155
SparseMatrix(SparseCompressedBase< OtherDerived > &&other)
Definition: SparseMatrix.h:802
EIGEN_DEPRECATED EIGEN_DONT_INLINE Scalar & insertUncompressed(Index row, Index col)
Definition: SparseMatrix.h:1600
EIGEN_DONT_INLINE SparseMatrix & operator=(const SparseMatrixBase< OtherDerived > &other)
const ConstDiagonalReturnType diagonal() const
Definition: SparseMatrix.h:757
void swap(SparseMatrix &other)
Definition: SparseMatrix.h:829
Scalar & insertUncompressedAtByOuterInner(Index outer, Index inner, Index dst)
Definition: SparseMatrix.h:1665
void insertFromSortedTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
Definition: SparseMatrix.h:1467
void assignDiagonal(const DiagXpr diagXpr, const Func &assignFunc)
Definition: SparseMatrix.h:1005
void setZero()
Definition: SparseMatrix.h:303
Index cols() const
Definition: SparseMatrix.h:161
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:814
void setFromSortedTriplets(const InputIterators &begin, const InputIterators &end)
Definition: SparseMatrix.h:1357
SparseMatrix & operator=(SparseMatrix &&other)
Definition: SparseMatrix.h:873
void setFromTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
Definition: SparseMatrix.h:1345
void startVec(Index outer)
Definition: SparseMatrix.h:451
EIGEN_STRONG_INLINE Scalar & insertBackUncompressed(Index row, Index col)
Definition: SparseMatrix.h:975
SparseMatrix & operator=(const EigenBase< OtherDerived > &other)
Definition: SparseMatrix.h:880
Index outerSize() const
Definition: SparseMatrix.h:166
StorageIndex * m_outerIndex
Definition: SparseMatrix.h:153
Scalar & insertBackByOuterInner(Index outer, Index inner)
Definition: SparseMatrix.h:430
SparseMatrix()
Definition: SparseMatrix.h:766
void uncompress()
Definition: SparseMatrix.h:622
void insertEmptyOuterVectors(Index j, Index num=1)
Definition: SparseMatrix.h:509
void insertFromSortedTriplets(const InputIterators &begin, const InputIterators &end)
Definition: SparseMatrix.h:1450
Scalar * valuePtr()
Definition: SparseMatrix.h:175
void finalize()
Definition: SparseMatrix.h:461
void reserveInnerVectors(const SizesType &reserveSizes)
Definition: SparseMatrix.h:345
SparseMatrix< Scalar, IsRowMajor ? ColMajor :RowMajor, StorageIndex > TransposedSparseMatrix
Definition: SparseMatrix.h:149
const StorageIndex * innerNonZeroPtr() const
Definition: SparseMatrix.h:198
void makeCompressed()
Definition: SparseMatrix.h:589
Index m_outerSize
Definition: SparseMatrix.h:151
Scalar & insertByOuterInner(Index j, Index i)
Definition: SparseMatrix.h:566
StorageIndex * outerIndexPtr()
Definition: SparseMatrix.h:193
Scalar & coeffRef(Index row, Index col)
Definition: SparseMatrix.h:275
friend EIGEN_DEVICE_FUNC void swap(SparseMatrix &a, SparseMatrix &b)
Definition: SparseMatrix.h:838
const Scalar * valuePtr() const
Definition: SparseMatrix.h:171
void initAssignment(const Other &other)
Definition: SparseMatrix.h:945
Eigen::Map< SparseMatrix< Scalar, Options_, StorageIndex > > Map
Definition: SparseMatrix.h:135
void setFromSortedTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
Definition: SparseMatrix.h:1374
EIGEN_STATIC_ASSERT((Options &(ColMajor|RowMajor))==Options, INVALID_MATRIX_TEMPLATE_PARAMETERS) struct default_prunning_func
Definition: SparseMatrix.h:1110
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Definition: SparseMatrix.h:793
void resize(Index rows, Index cols)
Definition: SparseMatrix.h:734
bool isCompressed() const
Definition: SparseCompressedBase.h:114
Index rows() const
Definition: SparseMatrix.h:159
SparseMatrix(const SparseMatrix &other)
Definition: SparseMatrix.h:807
StorageIndex * m_innerNonZeros
Definition: SparseMatrix.h:154
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition: SparseMatrix.h:1328
SparseMatrix(const DiagonalBase< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:822
constexpr const Storage & data() const
Definition: SparseMatrix.h:207
void setIdentity()
Definition: SparseMatrix.h:842
Index innerSize() const
Definition: SparseMatrix.h:164
SparseMatrix(Index rows, Index cols)
Definition: SparseMatrix.h:769
void conservativeResize(Index rows, Index cols)
Definition: SparseMatrix.h:681
void insertFromTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
Definition: SparseMatrix.h:1438
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Definition: SparseMatrix.h:634
void resizeNonZeros(Index size)
Definition: SparseMatrix.h:754
Diagonal< SparseMatrix > DiagonalReturnType
Definition: SparseMatrix.h:136
SparseMatrix & operator=(const Product< Lhs, Rhs, AliasFreeProduct > &other)
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Definition: SparseMatrix.h:775
Base::InnerIterator InnerIterator
Definition: SparseMatrix.h:138
Diagonal< const SparseMatrix > ConstDiagonalReturnType
Definition: SparseMatrix.h:137
~SparseMatrix()
Definition: SparseMatrix.h:931
void reserve(const SizesType &reserveSizes, const typename SizesType::value_type &enableif=typename SizesType::value_type())
Definition: SparseMatrix.h:337
SparseCompressedBase< SparseMatrix > Base
Definition: SparseMatrix.h:122
void prune(const KeepFunc &keep=KeepFunc())
Definition: SparseMatrix.h:646
SparseMatrix & operator=(const SparseMatrix &other)
Definition: SparseMatrix.h:855
@ Options
Definition: SparseMatrix.h:143
void removeOuterVectors(Index j, Index num=1)
Definition: SparseMatrix.h:476
const StorageIndex * outerIndexPtr() const
Definition: SparseMatrix.h:189
Scalar & insertBack(Index row, Index col)
Definition: SparseMatrix.h:424
Scalar & findOrInsertCoeff(Index row, Index col, bool *inserted)
Definition: SparseMatrix.h:231
void reserve(Index reserveSize)
Definition: SparseMatrix.h:315
@ IsRowMajor
Definition: SparseMatrixBase.h:99
const StorageIndex * innerIndexPtr() const
Definition: SparseMatrix.h:180
Scalar & insert(Index row, Index col)
Definition: SparseMatrix.h:1586
DiagonalReturnType diagonal()
Definition: SparseMatrix.h:763
friend std::ostream & operator<<(std::ostream &s, const SparseMatrix &m)
Definition: SparseMatrix.h:898
Scalar & insertBackByOuterInnerUnordered(Index outer, Index inner)
Definition: SparseMatrix.h:442
Base::ScalarVector ScalarVector
Definition: SparseMatrix.h:146
Index m_innerSize
Definition: SparseMatrix.h:152
constexpr Storage & data()
Definition: SparseMatrix.h:205
Base::ReverseInnerIterator ReverseInnerIterator
Definition: SparseMatrix.h:139
void insertFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition: SparseMatrix.h:1421
internal::CompressedStorage< Scalar, StorageIndex > Storage
Definition: SparseMatrix.h:142
SparseMatrix(SparseMatrix &&other)
Definition: SparseMatrix.h:799
EIGEN_STRONG_INLINE Scalar & insertAtByOuterInner(Index outer, Index inner, Index dst)
Definition: SparseMatrix.h:1592
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition: SparseSelfAdjointView.h:52
a sparse vector class
Definition: SparseVector.h:62
void squeeze()
Definition: CompressedStorage.h:68
Scalar & value(Index i)
Definition: CompressedStorage.h:103
StorageIndex & index(Index i)
Definition: CompressedStorage.h:112
const StorageIndex * indexPtr() const
Definition: CompressedStorage.h:100
void swap(CompressedStorage &other)
Definition: CompressedStorage.h:51
void append(const Scalar &v, Index i)
Definition: CompressedStorage.h:87
const Scalar * valuePtr() const
Definition: CompressedStorage.h:98
Scalar atInRange(Index start, Index end, Index key, const Scalar &defaultValue=Scalar(0)) const
Definition: CompressedStorage.h:143
Index searchLowerIndex(Index key) const
Definition: CompressedStorage.h:122
void reserve(Index size)
Definition: CompressedStorage.h:63
void moveChunk(Index from, Index to, Index chunkSize)
Definition: CompressedStorage.h:178
void clear()
Definition: CompressedStorage.h:96
Index allocatedSize() const
Definition: CompressedStorage.h:95
Index size() const
Definition: CompressedStorage.h:94
void resize(Index size, double reserveSizeFactor=0)
Definition: CompressedStorage.h:72
static constexpr lastp1_t end
Definition: IndexedViewHelper.h:79
@ ColMajor
Definition: Constants.h:318
@ RowMajor
Definition: Constants.h:320
const unsigned int LvalueBit
Definition: Constants.h:148
const unsigned int RowMajorBit
Definition: Constants.h:70
const unsigned int CompressedAccessBit
Definition: Constants.h:195
RealScalar s
Definition: level1_cplx_impl.h:130
EIGEN_BLAS_FUNC() swap(int *n, RealScalar *px, int *incx, RealScalar *py, int *incy)
Definition: level1_impl.h:117
const Scalar * a
Definition: level2_cplx_impl.h:32
int * m
Definition: level2_cplx_impl.h:294
if(UPLO(*uplo)==INVALID) info
Definition: level3_impl.h:428
char char char int int * k
Definition: level2_impl.h:374
char char * op
Definition: level2_impl.h:374
Eigen::Matrix< Scalar, Dynamic, Dynamic, ColMajor > tmp
Definition: level3_impl.h:365
double eps
Definition: crbond_bessel.cc:24
EIGEN_DEVICE_FUNC IndexDest convert_index(const IndexSrc &idx)
Definition: XprHelper.h:63
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void call_assignment_no_alias(Dst &dst, const Src &src, const Func &func)
Definition: AssignEvaluator.h:812
EIGEN_DEVICE_FUNC bool isMuchSmallerThan(const Scalar &x, const OtherScalar &y, const typename NumTraits< Scalar >::Real &precision=NumTraits< Scalar >::dummy_precision())
Definition: MathFunctions.h:1916
typename remove_all< T >::type remove_all_t
Definition: Meta.h:142
EIGEN_DEVICE_FUNC void throw_std_bad_alloc()
Definition: Memory.h:110
void insert_from_triplets(const InputIterator &begin, const InputIterator &end, SparseMatrixType &mat, DupFunctor dup_func)
Definition: SparseMatrix.h:1255
EIGEN_DEVICE_FUNC void smart_copy(const T *start, const T *end, T *target)
Definition: Memory.h:569
void insert_from_triplets_sorted(const InputIterator &begin, const InputIterator &end, SparseMatrixType &mat, DupFunctor dup_func)
Definition: SparseMatrix.h:1272
void smart_memmove(const T *start, const T *end, T *target)
Definition: Memory.h:594
void set_from_triplets_sorted(const InputIterator &begin, const InputIterator &end, SparseMatrixType &mat, DupFunctor dup_func)
Definition: SparseMatrix.h:1179
void set_from_triplets(const InputIterator &begin, const InputIterator &end, SparseMatrixType &mat, DupFunctor dup_func)
Definition: SparseMatrix.h:1127
std::uint8_t uint8_t
Definition: Meta.h:36
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T maxi(const T &x, const T &y)
Definition: MathFunctions.h:926
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T mini(const T &x, const T &y)
Definition: MathFunctions.h:920
Namespace containing all symbols from the Eigen library.
Definition: bench_norm.cpp:70
squared absolute value
Definition: GlobalFunctions.h:87
const unsigned int NestByRefBit
Definition: Constants.h:173
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:83
const int InnerRandomAccessPattern
Definition: SparseUtil.h:42
const int Dynamic
Definition: Constants.h:25
Extend namespace for flags.
Definition: fsi_chan_precond_driver.cc:56
type
Definition: compute_granudrum_aor.py:141
Definition: Eigen_Colamd.h:49
void start(const unsigned &i)
(Re-)start i-th timer
Definition: oomph_utilities.cc:243
double epsilon
Definition: osc_ring_sarah_asymptotics.h:43
#define EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
Definition: sparse_permutations.cpp:11
#define EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
Definition: sparse_permutations.cpp:22
Definition: Constants.h:519
Definition: EigenBase.h:33
constexpr EIGEN_DEVICE_FUNC Derived & derived()
Definition: EigenBase.h:49
Eigen::Index Index
The interface type of indices.
Definition: EigenBase.h:43
Definition: Constants.h:534
Holds information about the various numeric (i.e. scalar) types allowed by Eigen.
Definition: NumTraits.h:217
Definition: SparseMatrix.h:989
Index p
Definition: SparseMatrix.h:992
Index i
Definition: SparseMatrix.h:991
IndexPosPair(Index a_i, Index a_p)
Definition: SparseMatrix.h:990
Definition: Constants.h:522
Definition: AssignEvaluator.h:773
SparseMatrix< Scalar_, Options_, StorageIndex_ > SparseMatrixType
Definition: SparseMatrix.h:1749
evaluator< SparseCompressedBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > > > Base
Definition: SparseMatrix.h:1748
evaluator(const SparseMatrixType &mat)
Definition: SparseMatrix.h:1751
Definition: CoreEvaluators.h:104
Definition: XprHelper.h:205
@ PacketAccess
Definition: XprHelper.h:206
@ Cost
Definition: XprHelper.h:206
@ IsRepeatable
Definition: XprHelper.h:206
Definition: Meta.h:205
Definition: XprHelper.h:533
Definition: XprHelper.h:506
Definition: Meta.h:388
Definition: SparseMatrix.h:1240
const DupFunctor & m_functor
Definition: SparseMatrix.h:1247
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const DupFunctor & functor() const
Definition: SparseMatrix.h:1246
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator()(const LhsScalar &a, const RhsScalar &b) const
Definition: SparseMatrix.h:1243
typename result_of< DupFunctor(LhsScalar, RhsScalar)>::type result_type
Definition: SparseMatrix.h:1241
scalar_disjunction_op(const DupFunctor &op)
Definition: SparseMatrix.h:1242
Template functor to compute the sum of two scalars.
Definition: BinaryFunctors.h:34
Definition: SparseMatrix.h:94
StorageIndex m_remainder
Definition: SparseMatrix.h:109
Index m_end
Definition: SparseMatrix.h:110
EIGEN_DEVICE_FUNC sparse_reserve_op(Index begin, Index end, Index size)
Definition: SparseMatrix.h:95
StorageIndex m_val
Definition: SparseMatrix.h:109
Index m_begin
Definition: SparseMatrix.h:110
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE StorageIndex operator()(IndexType i) const
Definition: SparseMatrix.h:103
ref_selector< MatrixType >::type MatrixTypeNested
Definition: SparseMatrix.h:70
std::remove_reference_t< MatrixTypeNested > MatrixTypeNested_
Definition: SparseMatrix.h:71
SparseMatrix< Scalar_, Options_, StorageIndex_ > MatrixType
Definition: SparseMatrix.h:69
Definition: ForwardDeclarations.h:21
std::ptrdiff_t j
Definition: tut_arithmetic_redux_minmax.cpp:2