Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ > Class Template Reference

A versatible sparse matrix representation. More...

#include <SparseMatrix.h>

+ Inheritance diagram for Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >:

Classes

struct  IndexPosPair
 
class  SingletonVector
 

Public Types

enum  { Options = Options_ }
 
typedef Eigen::Map< SparseMatrix< Scalar, Options_, StorageIndex > > Map
 
typedef Diagonal< SparseMatrixDiagonalReturnType
 
typedef Diagonal< const SparseMatrixConstDiagonalReturnType
 
typedef Base::InnerIterator InnerIterator
 
typedef Base::ReverseInnerIterator ReverseInnerIterator
 
typedef internal::CompressedStorage< Scalar, StorageIndexStorage
 
typedef Base::IndexVector IndexVector
 
typedef Base::ScalarVector ScalarVector
 
- Public Types inherited from Eigen::SparseCompressedBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >
typedef SparseMatrixBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > > Base
 
- Public Types inherited from Eigen::SparseMatrixBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >
enum  
 
typedef internal::traits< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::Scalar Scalar
 
typedef Scalar value_type
 
typedef internal::packet_traits< Scalar >::type PacketScalar
 
typedef internal::traits< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::StorageKind StorageKind
 
typedef internal::traits< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::StorageIndex StorageIndex
 
typedef internal::add_const_on_value_type_if_arithmetic< typename internal::packet_traits< Scalar >::type >::type PacketReturnType
 
typedef SparseMatrixBase StorageBaseType
 
typedef Matrix< StorageIndex, Dynamic, 1 > IndexVector
 
typedef Matrix< Scalar, Dynamic, 1 > ScalarVector
 
typedef std::conditional_t< NumTraits< Scalar >::IsComplex, CwiseUnaryOp< internal::scalar_conjugate_op< Scalar >, Eigen::Transpose< const SparseMatrix< Scalar_, Options_, StorageIndex_ > > >, Transpose< const SparseMatrix< Scalar_, Options_, StorageIndex_ > > > AdjointReturnType
 
typedef Transpose< SparseMatrix< Scalar_, Options_, StorageIndex_ > > TransposeReturnType
 
typedef Transpose< const SparseMatrix< Scalar_, Options_, StorageIndex_ > > ConstTransposeReturnType
 
typedef SparseMatrix< Scalar, Flags &RowMajorBit ? RowMajor :ColMajor, StorageIndexPlainObject
 
typedef NumTraits< Scalar >::Real RealScalar
 
typedef std::conditional_t< HasDirectAccess_, const Scalar &, ScalarCoeffReturnType
 
typedef CwiseNullaryOp< internal::scalar_constant_op< Scalar >, Matrix< Scalar, Dynamic, Dynamic > > ConstantReturnType
 
typedef Matrix< Scalar, RowsAtCompileTime, ColsAtCompileTimeDenseMatrixType
 
typedef Matrix< Scalar, internal::max_size_prefer_dynamic(RowsAtCompileTime, ColsAtCompileTime), internal::max_size_prefer_dynamic(RowsAtCompileTime, ColsAtCompileTime)> SquareMatrixType
 
typedef EigenBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > > Base
 
- Public Types inherited from Eigen::EigenBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >
typedef Eigen::Index Index
 The interface type of indices. More...
 
typedef internal::traits< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::StorageKind StorageKind
 

Public Member Functions

Index rows () const
 
Index cols () const
 
Index innerSize () const
 
Index outerSize () const
 
const ScalarvaluePtr () const
 
ScalarvaluePtr ()
 
const StorageIndexinnerIndexPtr () const
 
StorageIndexinnerIndexPtr ()
 
const StorageIndexouterIndexPtr () const
 
StorageIndexouterIndexPtr ()
 
const StorageIndexinnerNonZeroPtr () const
 
StorageIndexinnerNonZeroPtr ()
 
constexpr Storagedata ()
 
constexpr const Storagedata () const
 
Scalar coeff (Index row, Index col) const
 
ScalarfindOrInsertCoeff (Index row, Index col, bool *inserted)
 
ScalarcoeffRef (Index row, Index col)
 
Scalarinsert (Index row, Index col)
 
void setZero ()
 
void reserve (Index reserveSize)
 
template<class SizesType >
void reserve (const SizesType &reserveSizes, const typename SizesType::value_type &enableif=typename SizesType::value_type())
 
ScalarinsertBack (Index row, Index col)
 
ScalarinsertBackByOuterInner (Index outer, Index inner)
 
ScalarinsertBackByOuterInnerUnordered (Index outer, Index inner)
 
void startVec (Index outer)
 
void finalize ()
 
void removeOuterVectors (Index j, Index num=1)
 
void insertEmptyOuterVectors (Index j, Index num=1)
 
template<typename InputIterators >
void setFromTriplets (const InputIterators &begin, const InputIterators &end)
 
template<typename InputIterators , typename DupFunctor >
void setFromTriplets (const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
 
template<typename Derived , typename DupFunctor >
void collapseDuplicates (DenseBase< Derived > &wi, DupFunctor dup_func=DupFunctor())
 
template<typename InputIterators >
void setFromSortedTriplets (const InputIterators &begin, const InputIterators &end)
 
template<typename InputIterators , typename DupFunctor >
void setFromSortedTriplets (const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
 
template<typename InputIterators >
void insertFromTriplets (const InputIterators &begin, const InputIterators &end)
 
template<typename InputIterators , typename DupFunctor >
void insertFromTriplets (const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
 
template<typename InputIterators >
void insertFromSortedTriplets (const InputIterators &begin, const InputIterators &end)
 
template<typename InputIterators , typename DupFunctor >
void insertFromSortedTriplets (const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
 
ScalarinsertByOuterInner (Index j, Index i)
 
void makeCompressed ()
 
void uncompress ()
 
void prune (const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
 
template<typename KeepFunc >
void prune (const KeepFunc &keep=KeepFunc())
 
void conservativeResize (Index rows, Index cols)
 
void resize (Index rows, Index cols)
 
void resizeNonZeros (Index size)
 
const ConstDiagonalReturnType diagonal () const
 
DiagonalReturnType diagonal ()
 
 SparseMatrix ()
 
 SparseMatrix (Index rows, Index cols)
 
template<typename OtherDerived >
 SparseMatrix (const SparseMatrixBase< OtherDerived > &other)
 
template<typename OtherDerived , unsigned int UpLo>
 SparseMatrix (const SparseSelfAdjointView< OtherDerived, UpLo > &other)
 
 SparseMatrix (SparseMatrix &&other)
 
template<typename OtherDerived >
 SparseMatrix (SparseCompressedBase< OtherDerived > &&other)
 
 SparseMatrix (const SparseMatrix &other)
 
template<typename OtherDerived >
 SparseMatrix (const ReturnByValue< OtherDerived > &other)
 Copy constructor with in-place evaluation. More...
 
template<typename OtherDerived >
 SparseMatrix (const DiagonalBase< OtherDerived > &other)
 Copy constructor with in-place evaluation. More...
 
void swap (SparseMatrix &other)
 
void setIdentity ()
 
SparseMatrixoperator= (const SparseMatrix &other)
 
SparseMatrixoperator= (SparseMatrix &&other)
 
template<typename OtherDerived >
SparseMatrixoperator= (const EigenBase< OtherDerived > &other)
 
template<typename Lhs , typename Rhs >
SparseMatrixoperator= (const Product< Lhs, Rhs, AliasFreeProduct > &other)
 
template<typename OtherDerived >
EIGEN_DONT_INLINE SparseMatrixoperator= (const SparseMatrixBase< OtherDerived > &other)
 
template<typename OtherDerived >
SparseMatrixoperator= (SparseCompressedBase< OtherDerived > &&other)
 
 ~SparseMatrix ()
 
Scalar sum () const
 
EIGEN_STRONG_INLINE ScalarinsertBackUncompressed (Index row, Index col)
 
template<typename OtherDerived >
EIGEN_DONT_INLINE SparseMatrix< Scalar, Options_, StorageIndex_ > & operator= (const SparseMatrixBase< OtherDerived > &other)
 
template<typename Lhs , typename Rhs >
SparseMatrix< Scalar, Options_, StorageIndex_ > & operator= (const Product< Lhs, Rhs, AliasFreeProduct > &src)
 
bool isCompressed () const
 
Index nonZeros () const
 
- Public Member Functions inherited from Eigen::SparseCompressedBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >
Index nonZeros () const
 
const ScalarvaluePtr () const
 
ScalarvaluePtr ()
 
const StorageIndexinnerIndexPtr () const
 
StorageIndexinnerIndexPtr ()
 
const StorageIndexouterIndexPtr () const
 
StorageIndexouterIndexPtr ()
 
const StorageIndexinnerNonZeroPtr () const
 
StorageIndexinnerNonZeroPtr ()
 
bool isCompressed () const
 
const Map< const Array< Scalar, Dynamic, 1 > > coeffs () const
 
Map< Array< Scalar, Dynamic, 1 > > coeffs ()
 
void sortInnerIndices (Index begin, Index end)
 
void sortInnerIndices ()
 
Index innerIndicesAreSorted (Index begin, Index end) const
 
Index innerIndicesAreSorted () const
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator= (const EigenBase< OtherDerived > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator= (const ReturnByValue< OtherDerived > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator= (const SparseMatrixBase< OtherDerived > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator= (const SparseMatrix< Scalar_, Options_, StorageIndex_ > &other)
 
- Public Member Functions inherited from Eigen::SparseMatrixBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator= (const EigenBase< OtherDerived > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator= (const ReturnByValue< OtherDerived > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator= (const SparseMatrixBase< OtherDerived > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator= (const SparseMatrix< Scalar_, Options_, StorageIndex_ > &other)
 
const SparseMatrix< Scalar_, Options_, StorageIndex_ > & derived () const
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & derived ()
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & const_cast_derived () const
 
Index rows () const
 
Index cols () const
 
Index size () const
 
bool isVector () const
 
Index outerSize () const
 
Index innerSize () const
 
bool isRValue () const
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & markAsRValue ()
 
 SparseMatrixBase ()
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator+= (const SparseMatrixBase< OtherDerived > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator+= (const DiagonalBase< OtherDerived > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator+= (const EigenBase< OtherDerived > &other)
 
EIGEN_STRONG_INLINE SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator+= (const SparseMatrixBase< OtherDerived > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator-= (const SparseMatrixBase< OtherDerived > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator-= (const DiagonalBase< OtherDerived > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator-= (const EigenBase< OtherDerived > &other)
 
EIGEN_STRONG_INLINE SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator-= (const SparseMatrixBase< OtherDerived > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator*= (const Scalar &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator*= (const SparseMatrixBase< OtherDerived > &other)
 
SparseMatrix< Scalar_, Options_, StorageIndex_ > & operator/= (const Scalar &other)
 
EIGEN_STRONG_INLINE const CwiseProductDenseReturnType< OtherDerived >::Type cwiseProduct (const MatrixBase< OtherDerived > &other) const
 
EIGEN_STRONG_INLINE const SparseMatrixBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::template CwiseProductDenseReturnType< OtherDerived >::Type cwiseProduct (const MatrixBase< OtherDerived > &other) const
 
const Product< SparseMatrix< Scalar_, Options_, StorageIndex_ >, OtherDerived > operator* (const DiagonalBase< OtherDerived > &other) const
 
const Product< SparseMatrix< Scalar_, Options_, StorageIndex_ >, OtherDerived, AliasFreeProduct > operator* (const SparseMatrixBase< OtherDerived > &other) const
 
const Product< SparseMatrix< Scalar_, Options_, StorageIndex_ >, OtherDerived > operator* (const MatrixBase< OtherDerived > &other) const
 
SparseSymmetricPermutationProduct< SparseMatrix< Scalar_, Options_, StorageIndex_ >, Upper|Lower > twistedBy (const PermutationMatrix< Dynamic, Dynamic, StorageIndex > &perm) const
 
const TriangularView< const SparseMatrix< Scalar_, Options_, StorageIndex_ >, Mode > triangularView () const
 
ConstSelfAdjointViewReturnType< UpLo >::Type selfadjointView () const
 
SelfAdjointViewReturnType< UpLo >::Type selfadjointView ()
 
SparseMatrixBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::template ConstSelfAdjointViewReturnType< UpLo >::Type selfadjointView () const
 
SparseMatrixBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::template SelfAdjointViewReturnType< UpLo >::Type selfadjointView ()
 
Scalar dot (const MatrixBase< OtherDerived > &other) const
 
Scalar dot (const SparseMatrixBase< OtherDerived > &other) const
 
internal::traits< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::Scalar dot (const MatrixBase< OtherDerived > &other) const
 
internal::traits< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::Scalar dot (const SparseMatrixBase< OtherDerived > &other) const
 
RealScalar squaredNorm () const
 
RealScalar norm () const
 
RealScalar blueNorm () const
 
TransposeReturnType transpose ()
 
const ConstTransposeReturnType transpose () const
 
const AdjointReturnType adjoint () const
 
DenseMatrixType toDense () const
 
bool isApprox (const SparseMatrixBase< OtherDerived > &other, const RealScalar &prec=NumTraits< Scalar >::dummy_precision()) const
 
bool isApprox (const MatrixBase< OtherDerived > &other, const RealScalar &prec=NumTraits< Scalar >::dummy_precision()) const
 
const internal::eval< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::type eval () const
 
Scalar sum () const
 
const SparseView< SparseMatrix< Scalar_, Options_, StorageIndex_ > > pruned (const Scalar &reference=Scalar(0), const RealScalar &epsilon=NumTraits< Scalar >::dummy_precision()) const
 
- Public Member Functions inherited from Eigen::EigenBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >
constexpr EIGEN_DEVICE_FUNC SparseMatrix< Scalar_, Options_, StorageIndex_ > & derived ()
 
constexpr EIGEN_DEVICE_FUNC const SparseMatrix< Scalar_, Options_, StorageIndex_ > & derived () const
 
EIGEN_DEVICE_FUNC SparseMatrix< Scalar_, Options_, StorageIndex_ > & const_cast_derived () const
 
EIGEN_DEVICE_FUNC const SparseMatrix< Scalar_, Options_, StorageIndex_ > & const_derived () const
 
EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index rows () const EIGEN_NOEXCEPT
 
EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index cols () const EIGEN_NOEXCEPT
 
EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index size () const EIGEN_NOEXCEPT
 
EIGEN_DEVICE_FUNC void evalTo (Dest &dst) const
 
EIGEN_DEVICE_FUNC void addTo (Dest &dst) const
 
EIGEN_DEVICE_FUNC void subTo (Dest &dst) const
 
EIGEN_DEVICE_FUNC void applyThisOnTheRight (Dest &dst) const
 
EIGEN_DEVICE_FUNC void applyThisOnTheLeft (Dest &dst) const
 
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DeviceWrapper< SparseMatrix< Scalar_, Options_, StorageIndex_ >, Device > device (Device &device)
 
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DeviceWrapper< const SparseMatrix< Scalar_, Options_, StorageIndex_ >, Device > device (Device &device) const
 

Protected Types

typedef SparseMatrix< Scalar, IsRowMajor ? ColMajor :RowMajor, StorageIndexTransposedSparseMatrix
 
- Protected Types inherited from Eigen::SparseCompressedBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >
typedef Base::IndexVector IndexVector
 

Protected Member Functions

template<class SizesType >
void reserveInnerVectors (const SizesType &reserveSizes)
 
template<typename Other >
void initAssignment (const Other &other)
 
EIGEN_DEPRECATED EIGEN_DONT_INLINE ScalarinsertCompressed (Index row, Index col)
 
EIGEN_DEPRECATED EIGEN_DONT_INLINE ScalarinsertUncompressed (Index row, Index col)
 
template<typename DiagXpr , typename Func >
void assignDiagonal (const DiagXpr diagXpr, const Func &assignFunc)
 
EIGEN_STRONG_INLINE ScalarinsertAtByOuterInner (Index outer, Index inner, Index dst)
 
ScalarinsertCompressedAtByOuterInner (Index outer, Index inner, Index dst)
 
ScalarinsertUncompressedAtByOuterInner (Index outer, Index inner, Index dst)
 
- Protected Member Functions inherited from Eigen::SparseCompressedBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >
Eigen::Map< IndexVectorinnerNonZeros ()
 
const Eigen::Map< const IndexVectorinnerNonZeros () const
 
 SparseCompressedBase ()
 
internal::LowerBoundIndex lower_bound (Index row, Index col) const
 
- Protected Member Functions inherited from Eigen::SparseMatrixBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >
SparseMatrix< Scalar_, Options_, StorageIndex_ > & assign (const OtherDerived &other)
 
void assignGeneric (const OtherDerived &other)
 

Protected Attributes

Index m_outerSize
 
Index m_innerSize
 
StorageIndexm_outerIndex
 
StorageIndexm_innerNonZeros
 
Storage m_data
 
- Protected Attributes inherited from Eigen::SparseMatrixBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >
bool m_isRValue
 

Private Types

typedef SparseCompressedBase< SparseMatrixBase
 

Private Member Functions

 EIGEN_STATIC_ASSERT ((Options &(ColMajor|RowMajor))==Options, INVALID_MATRIX_TEMPLATE_PARAMETERS) struct default_prunning_func
 

Friends

class SparseVector< Scalar_, 0, StorageIndex_ >
 
template<typename , typename , typename , typename , typename >
struct internal::Assignment
 
EIGEN_DEVICE_FUNC void swap (SparseMatrix &a, SparseMatrix &b)
 
std::ostream & operator<< (std::ostream &s, const SparseMatrix &m)
 

Additional Inherited Members

- Static Protected Member Functions inherited from Eigen::SparseMatrixBase< SparseMatrix< Scalar_, Options_, StorageIndex_ > >
static StorageIndex convert_index (const Index idx)
 

Detailed Description

template<typename Scalar_, int Options_, typename StorageIndex_>
class Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >

A versatible sparse matrix representation.

This class implements a more versatile variants of the common compressed row/column storage format. Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index. All the non zeros are stored in a single large buffer. Unlike the compressed format, there might be extra space in between the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero can be done with limited memory reallocation and copies.

A call to the function makeCompressed() turns the matrix into the standard compressed format compatible with many library.

More details on this storage sceheme are given in the manual pages.

Template Parameters
Scalar_the scalar type, i.e. the type of the coefficients
Options_Union of bit flags controlling the storage scheme. Currently the only possibility is ColMajor or RowMajor. The default is 0 which means column-major.
StorageIndex_the type of the indices. It has to be a signed type (e.g., short, int, std::ptrdiff_t). Default is int.
Warning
In Eigen 3.2, the undocumented type SparseMatrix::Index was improperly defined as the storage index type (e.g., int), whereas it is now (starting from Eigen 3.3) deprecated and always defined as Eigen::Index. Codes making use of SparseMatrix::Index, might thus likely have to be changed to use SparseMatrix::StorageIndex instead.

This class can be extended with the help of the plugin mechanism described on the page Extending MatrixBase (and other classes) by defining the preprocessor symbol EIGEN_SPARSEMATRIX_PLUGIN.

Member Typedef Documentation

◆ Base

template<typename Scalar_ , int Options_, typename StorageIndex_ >
typedef SparseCompressedBase<SparseMatrix> Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::Base
private

◆ ConstDiagonalReturnType

template<typename Scalar_ , int Options_, typename StorageIndex_ >
typedef Diagonal<const SparseMatrix> Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::ConstDiagonalReturnType

◆ DiagonalReturnType

template<typename Scalar_ , int Options_, typename StorageIndex_ >
typedef Diagonal<SparseMatrix> Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::DiagonalReturnType

◆ IndexVector

template<typename Scalar_ , int Options_, typename StorageIndex_ >
typedef Base::IndexVector Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::IndexVector

◆ InnerIterator

template<typename Scalar_ , int Options_, typename StorageIndex_ >
typedef Base::InnerIterator Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::InnerIterator

◆ Map

template<typename Scalar_ , int Options_, typename StorageIndex_ >
typedef Eigen::Map<SparseMatrix<Scalar, Options_, StorageIndex> > Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::Map

◆ ReverseInnerIterator

template<typename Scalar_ , int Options_, typename StorageIndex_ >
typedef Base::ReverseInnerIterator Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::ReverseInnerIterator

◆ ScalarVector

template<typename Scalar_ , int Options_, typename StorageIndex_ >
typedef Base::ScalarVector Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::ScalarVector

◆ Storage

template<typename Scalar_ , int Options_, typename StorageIndex_ >
typedef internal::CompressedStorage<Scalar, StorageIndex> Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::Storage

◆ TransposedSparseMatrix

template<typename Scalar_ , int Options_, typename StorageIndex_ >
typedef SparseMatrix<Scalar, IsRowMajor ? ColMajor : RowMajor, StorageIndex> Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::TransposedSparseMatrix
protected

Member Enumeration Documentation

◆ anonymous enum

template<typename Scalar_ , int Options_, typename StorageIndex_ >
anonymous enum
Enumerator
Options 
143 { Options = Options_ };
@ Options
Definition: SparseMatrix.h:143

Constructor & Destructor Documentation

◆ SparseMatrix() [1/9]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::SparseMatrix ( )
inline

Default constructor yielding an empty 0 x 0 matrix

766 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) { resize(0, 0); }
StorageIndex * m_outerIndex
Definition: SparseMatrix.h:153
Index m_outerSize
Definition: SparseMatrix.h:151
void resize(Index rows, Index cols)
Definition: SparseMatrix.h:734
StorageIndex * m_innerNonZeros
Definition: SparseMatrix.h:154
Index m_innerSize
Definition: SparseMatrix.h:152

References Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::resize().

◆ SparseMatrix() [2/9]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::SparseMatrix ( Index  rows,
Index  cols 
)
inline

Constructs a rows x cols empty matrix

770  resize(rows, cols);
771  }
Index cols() const
Definition: SparseMatrix.h:161
Index rows() const
Definition: SparseMatrix.h:159

References Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::cols(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::resize(), and Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::rows().

◆ SparseMatrix() [3/9]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename OtherDerived >
Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::SparseMatrix ( const SparseMatrixBase< OtherDerived > &  other)
inline

Constructs a sparse matrix from the sparse expression other

779  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
780  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
781  if (needToTranspose)
782  *this = other.derived();
783  else {
784 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
786 #endif
787  internal::call_assignment_no_alias(*this, other.derived());
788  }
789  }
EIGEN_STATIC_ASSERT((Options &(ColMajor|RowMajor))==Options, INVALID_MATRIX_TEMPLATE_PARAMETERS) struct default_prunning_func
Definition: SparseMatrix.h:1110
const unsigned int RowMajorBit
Definition: Constants.h:70
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void call_assignment_no_alias(Dst &dst, const Src &src, const Func &func)
Definition: AssignEvaluator.h:812
Extend namespace for flags.
Definition: fsi_chan_precond_driver.cc:56
#define EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
Definition: sparse_permutations.cpp:22
@ value
Definition: Meta.h:206

References Eigen::internal::call_assignment_no_alias(), Eigen::SparseMatrixBase< Derived >::derived(), EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::EIGEN_STATIC_ASSERT(), and Eigen::RowMajorBit.

◆ SparseMatrix() [4/9]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename OtherDerived , unsigned int UpLo>
Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::SparseMatrix ( const SparseSelfAdjointView< OtherDerived, UpLo > &  other)
inline

Constructs a sparse matrix from the sparse selfadjoint view other

795  Base::operator=(other);
796  }
Derived & operator=(const Derived &other)
Definition: SparseAssign.h:43

References Eigen::SparseCompressedBase< Derived >::operator=().

◆ SparseMatrix() [5/9]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::SparseMatrix ( SparseMatrix< Scalar_, Options_, StorageIndex_ > &&  other)
inline

Move constructor

799 : SparseMatrix() { this->swap(other); }
void swap(SparseMatrix &other)
Definition: SparseMatrix.h:829
SparseMatrix()
Definition: SparseMatrix.h:766

References Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::swap().

◆ SparseMatrix() [6/9]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename OtherDerived >
Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::SparseMatrix ( SparseCompressedBase< OtherDerived > &&  other)
inline
802  : SparseMatrix() {
803  *this = other.derived().markAsRValue();
804  }

References Eigen::SparseMatrixBase< Derived >::derived().

◆ SparseMatrix() [7/9]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::SparseMatrix ( const SparseMatrix< Scalar_, Options_, StorageIndex_ > &  other)
inline

Copy constructor (it performs a deep copy)

809  *this = other.derived();
810  }
SparseCompressedBase< SparseMatrix > Base
Definition: SparseMatrix.h:122

References Eigen::SparseMatrixBase< Derived >::derived().

◆ SparseMatrix() [8/9]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename OtherDerived >
Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::SparseMatrix ( const ReturnByValue< OtherDerived > &  other)
inline

Copy constructor with in-place evaluation.

816  initAssignment(other);
817  other.evalTo(*this);
818  }
void initAssignment(const Other &other)
Definition: SparseMatrix.h:945

References Eigen::ReturnByValue< Derived >::evalTo(), and Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::initAssignment().

◆ SparseMatrix() [9/9]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename OtherDerived >
Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::SparseMatrix ( const DiagonalBase< OtherDerived > &  other)
inlineexplicit

Copy constructor with in-place evaluation.

824  *this = other.derived();
825  }

References Eigen::DiagonalBase< Derived >::derived().

◆ ~SparseMatrix()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::~SparseMatrix ( )
inline

Destructor

931  {
932  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_outerIndex, m_outerSize + 1);
933  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
934  }

Member Function Documentation

◆ assignDiagonal()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename DiagXpr , typename Func >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::assignDiagonal ( const DiagXpr  diagXpr,
const Func &  assignFunc 
)
inlineprotected

assign diagXpr to the diagonal of *this There are different strategies: 1 - if *this is overwritten (Func==assign_op) or *this is empty, then we can work treat *this as a dense vector expression. 2 - otherwise, for each diagonal coeff, 2.a - if it already exists, then we update it, 2.b - if the correct position is at the end of the vector, and there is capacity, push to back 2.b - otherwise, the insertion requires a data move, record insertion locations and handle in a second pass 3 - at the end, if some entries failed to be updated in-place, then we alloc a new buffer, copy each chunk at the right position, and insert the new elements.

1005  {
1006  constexpr StorageIndex kEmptyIndexVal(-1);
1007  typedef typename ScalarVector::AlignedMapType ValueMap;
1008 
1009  Index n = diagXpr.size();
1010 
1011  const bool overwrite = internal::is_same<Func, internal::assign_op<Scalar, Scalar>>::value;
1012  if (overwrite) {
1013  if ((m_outerSize != n) || (m_innerSize != n)) resize(n, n);
1014  }
1015 
1016  if (m_data.size() == 0 || overwrite) {
1017  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
1018  m_innerNonZeros = 0;
1019  resizeNonZeros(n);
1020  ValueMap valueMap(valuePtr(), n);
1021  std::iota(m_outerIndex, m_outerIndex + n + 1, StorageIndex(0));
1022  std::iota(innerIndexPtr(), innerIndexPtr() + n, StorageIndex(0));
1023  valueMap.setZero();
1024  internal::call_assignment_no_alias(valueMap, diagXpr, assignFunc);
1025  } else {
1026  internal::evaluator<DiagXpr> diaEval(diagXpr);
1027 
1029  typename IndexVector::AlignedMapType insertionLocations(tmp, n);
1030  insertionLocations.setConstant(kEmptyIndexVal);
1031 
1032  Index deferredInsertions = 0;
1033  Index shift = 0;
1034 
1035  for (Index j = 0; j < n; j++) {
1036  Index begin = m_outerIndex[j];
1037  Index end = isCompressed() ? m_outerIndex[j + 1] : begin + m_innerNonZeros[j];
1038  Index capacity = m_outerIndex[j + 1] - end;
1039  Index dst = m_data.searchLowerIndex(begin, end, j);
1040  // the entry exists: update it now
1041  if (dst != end && m_data.index(dst) == StorageIndex(j))
1042  assignFunc.assignCoeff(m_data.value(dst), diaEval.coeff(j));
1043  // the entry belongs at the back of the vector: push to back
1044  else if (dst == end && capacity > 0)
1045  assignFunc.assignCoeff(insertBackUncompressed(j, j), diaEval.coeff(j));
1046  // the insertion requires a data move, record insertion location and handle in second pass
1047  else {
1048  insertionLocations.coeffRef(j) = StorageIndex(dst);
1049  deferredInsertions++;
1050  // if there is no capacity, all vectors to the right of this are shifted
1051  if (capacity == 0) shift++;
1052  }
1053  }
1054 
1055  if (deferredInsertions > 0) {
1056  m_data.resize(m_data.size() + shift);
1059  for (Index j = m_outerSize - 1; deferredInsertions > 0; j--) {
1060  Index begin = m_outerIndex[j];
1061  Index end = isCompressed() ? m_outerIndex[j + 1] : begin + m_innerNonZeros[j];
1062  Index capacity = m_outerIndex[j + 1] - end;
1063 
1064  bool doInsertion = insertionLocations(j) >= 0;
1065  bool breakUpCopy = doInsertion && (capacity > 0);
1066  // break up copy for sorted insertion into inactive nonzeros
1067  // optionally, add another criterium, i.e. 'breakUpCopy || (capacity > threhsold)'
1068  // where `threshold >= 0` to skip inactive nonzeros in each vector
1069  // this reduces the total number of copied elements, but requires more moveChunk calls
1070  if (breakUpCopy) {
1071  Index copyBegin = m_outerIndex[j + 1];
1072  Index to = copyBegin + shift;
1073  Index chunkSize = copyEnd - copyBegin;
1074  m_data.moveChunk(copyBegin, to, chunkSize);
1075  copyEnd = end;
1076  }
1077 
1078  m_outerIndex[j + 1] += shift;
1079 
1080  if (doInsertion) {
1081  // if there is capacity, shift into the inactive nonzeros
1082  if (capacity > 0) shift++;
1083  Index copyBegin = insertionLocations(j);
1084  Index to = copyBegin + shift;
1085  Index chunkSize = copyEnd - copyBegin;
1086  m_data.moveChunk(copyBegin, to, chunkSize);
1087  Index dst = to - 1;
1088  m_data.index(dst) = StorageIndex(j);
1089  m_data.value(dst) = Scalar(0);
1090  assignFunc.assignCoeff(m_data.value(dst), diaEval.coeff(j));
1091  if (!isCompressed()) m_innerNonZeros[j]++;
1092  shift--;
1093  deferredInsertions--;
1094  copyEnd = copyBegin;
1095  }
1096  }
1097  }
1098  eigen_assert((shift == 0) && (deferredInsertions == 0));
1099  }
1100  }
const unsigned n
Definition: CG3DPackingUnitTest.cpp:11
#define eigen_assert(x)
Definition: Macros.h:910
#define ei_declare_aligned_stack_constructed_variable(TYPE, NAME, SIZE, BUFFER)
Definition: Memory.h:806
Eigen::Map< Derived, AlignedMax > AlignedMapType
Definition: PlainObjectBase.h:144
internal::traits< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::StorageIndex StorageIndex
Definition: SparseMatrixBase.h:44
internal::traits< SparseMatrix< Scalar_, Options_, StorageIndex_ > >::Scalar Scalar
Definition: SparseMatrixBase.h:32
Storage m_data
Definition: SparseMatrix.h:155
EIGEN_STRONG_INLINE Scalar & insertBackUncompressed(Index row, Index col)
Definition: SparseMatrix.h:975
const Scalar * valuePtr() const
Definition: SparseMatrix.h:171
bool isCompressed() const
Definition: SparseCompressedBase.h:114
void resizeNonZeros(Index size)
Definition: SparseMatrix.h:754
const StorageIndex * innerIndexPtr() const
Definition: SparseMatrix.h:180
Scalar & value(Index i)
Definition: CompressedStorage.h:103
StorageIndex & index(Index i)
Definition: CompressedStorage.h:112
Index searchLowerIndex(Index key) const
Definition: CompressedStorage.h:122
void moveChunk(Index from, Index to, Index chunkSize)
Definition: CompressedStorage.h:178
Index size() const
Definition: CompressedStorage.h:94
void resize(Index size, double reserveSizeFactor=0)
Definition: CompressedStorage.h:72
static constexpr lastp1_t end
Definition: IndexedViewHelper.h:79
Eigen::Matrix< Scalar, Dynamic, Dynamic, ColMajor > tmp
Definition: level3_impl.h:365
squared absolute value
Definition: GlobalFunctions.h:87
Eigen::Index Index
The interface type of indices.
Definition: EigenBase.h:43
std::ptrdiff_t j
Definition: tut_arithmetic_redux_minmax.cpp:2

References Eigen::internal::call_assignment_no_alias(), ei_declare_aligned_stack_constructed_variable, eigen_assert, Eigen::placeholders::end, Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::index(), j, Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::moveChunk(), n, resize(), Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::resize(), Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::searchLowerIndex(), Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::size(), tmp, Eigen::value, and Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::value().

Referenced by Eigen::internal::Assignment< DstXprType, SrcXprType, Functor, Diagonal2Sparse >::run().

◆ coeff()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Scalar Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::coeff ( Index  row,
Index  col 
) const
inline
Returns
the value of the matrix at position i, j This function returns Scalar(0) if the element is an explicit zero
211  {
212  eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
213 
214  const Index outer = IsRowMajor ? row : col;
215  const Index inner = IsRowMajor ? col : row;
216  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer + 1];
217  return m_data.atInRange(m_outerIndex[outer], end, inner);
218  }
m col(1)
m row(1)
@ IsRowMajor
Definition: SparseMatrixBase.h:99
Scalar atInRange(Index start, Index end, Index key, const Scalar &defaultValue=Scalar(0)) const
Definition: CompressedStorage.h:143

References Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::atInRange(), col(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::cols(), eigen_assert, Eigen::placeholders::end, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::IsRowMajor, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_data, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_innerNonZeros, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerIndex, row(), and Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::rows().

Referenced by EIGEN_DECLARE_TEST(), Eigen::Rotation2D< Scalar_ >::fromRotationMatrix(), Eigen::internal::llt_rank_update_lower(), Eigen::internal::visitor_impl< Visitor, Derived, UnrollCount, Vectorize, false, ShortCircuitEvaluation >::run(), Eigen::internal::visitor_impl< Visitor, Derived, UnrollCount, Vectorize, true, ShortCircuitEvaluation >::run(), Eigen::internal::visitor_impl< Visitor, Derived, Dynamic, false, false, ShortCircuitEvaluation >::run(), Eigen::internal::visitor_impl< Visitor, Derived, Dynamic, true, false, ShortCircuitEvaluation >::run(), Eigen::internal::visitor_impl< Visitor, Derived, Dynamic, false, true, ShortCircuitEvaluation >::run(), Eigen::internal::visitor_impl< Visitor, Derived, Dynamic, true, true, ShortCircuitEvaluation >::run(), Eigen::internal::permutation_matrix_product< ExpressionType, Side, Transposed, DenseShape >::run(), Eigen::internal::quaternionbase_assign_impl< Other, 3, 3 >::run(), Eigen::internal::direct_selfadjoint_eigenvalues< SolverType, 2, false >::run(), Eigen::internal::llt_inplace< Scalar, Lower >::unblocked(), Eigen::internal::ldlt_inplace< Lower >::unblocked(), Eigen::internal::ldlt_inplace< Lower >::updateInPlace(), and Eigen::internal::upperbidiagonalization_inplace_unblocked().

◆ coeffRef()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Scalar& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::coeffRef ( Index  row,
Index  col 
)
inline
Returns
a non-const reference to the value of the matrix at position i, j

If the element does not exist then it is inserted via the insert(Index,Index) function which itself turns the matrix into a non compressed form if that was not the case.

This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index) function if the element does not already exist.

275 { return findOrInsertCoeff(row, col, nullptr); }
Scalar & findOrInsertCoeff(Index row, Index col, bool *inserted)
Definition: SparseMatrix.h:231

References col(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::findOrInsertCoeff(), and row().

Referenced by bug1105(), Eigen::SparseInverse< Scalar >::computeInverse(), sparseGaussianTest< Scalar >::df(), Eigen::internal::householder_qr_inplace_update(), Eigen::internal::llt_rank_update_lower(), Eigen::internal::transform_make_affine< Mode >::run(), Eigen::internal::direct_selfadjoint_eigenvalues< SolverType, 2, false >::run(), setrand_eigen_compact(), setrand_eigen_dynamic(), setrand_eigen_gnu_hash(), setrand_eigen_google_dense(), setrand_eigen_google_sparse(), setrand_eigen_sumeq(), setrand_scipy(), Eigen::internal::llt_inplace< Scalar, Lower >::unblocked(), Eigen::internal::ldlt_inplace< Lower >::unblocked(), Eigen::internal::ldlt_inplace< Lower >::updateInPlace(), and Eigen::internal::upperbidiagonalization_inplace_unblocked().

◆ collapseDuplicates()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename Derived , typename DupFunctor >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::collapseDuplicates ( DenseBase< Derived > &  wi,
DupFunctor  dup_func = DupFunctor() 
)
1477  {
1478  // removes duplicate entries and compresses the matrix
1479  // the excess allocated memory is not released
1480  // the inner indices do not need to be sorted, nor is the matrix returned in a sorted state
1481  eigen_assert(wi.size() == m_innerSize);
1482  constexpr StorageIndex kEmptyIndexValue(-1);
1483  wi.setConstant(kEmptyIndexValue);
1484  StorageIndex count = 0;
1485  const bool is_compressed = isCompressed();
1486  // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1487  for (Index j = 0; j < m_outerSize; ++j) {
1488  const StorageIndex newBegin = count;
1489  const StorageIndex end = is_compressed ? m_outerIndex[j + 1] : m_outerIndex[j] + m_innerNonZeros[j];
1490  for (StorageIndex k = m_outerIndex[j]; k < end; ++k) {
1492  if (wi(i) >= newBegin) {
1493  // entry at k is a duplicate
1494  // accumulate it into the primary entry located at wi(i)
1495  m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1496  } else {
1497  // k is the primary entry in j with inner index i
1498  // shift it to the left and record its location at wi(i)
1499  m_data.index(count) = i;
1500  m_data.value(count) = m_data.value(k);
1501  wi(i) = count;
1502  ++count;
1503  }
1504  }
1505  m_outerIndex[j] = newBegin;
1506  }
1507  m_outerIndex[m_outerSize] = count;
1508  m_data.resize(count);
1509 
1510  // turn the matrix into compressed form (if it is not already)
1511  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
1512  m_innerNonZeros = 0;
1513 }
int i
Definition: BiCGSTAB_step_by_step.cpp:9
char char char int int * k
Definition: level2_impl.h:374

References eigen_assert, Eigen::placeholders::end, i, Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::index(), j, k, Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::resize(), Eigen::DenseBase< Derived >::setConstant(), and Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::value().

◆ cols()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Index Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::cols ( ) const
inline
Returns
the number of columns of the matrix
161 { return IsRowMajor ? m_innerSize : m_outerSize; }

References Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::IsRowMajor, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_innerSize, and Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerSize.

Referenced by gdb.printers._MatrixEntryIterator::__next__(), Eigen::IncompleteCholesky< Scalar, UpLo_, OrderingType_ >::analyzePattern(), Eigen::SparseQR< MatrixType_, OrderingType_ >::analyzePattern(), Eigen::SparseLU< MatrixType_, OrderingType_ >::analyzePattern(), benchBasic(), Eigen::internal::bicgstab(), checkOptimalTraversal_impl(), gdb.printers.EigenMatrixPrinter::children(), gdb.printers.EigenSparseMatrixPrinter::children(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::coeff(), Eigen::internal::coletree(), MatrixReplacement::cols(), Eigen::SimplicialCholeskyBase< Derived >::cols(), Eigen::SparseLU< MatrixType_, OrderingType_ >::cols(), Eigen::SparseQR< MatrixType_, OrderingType_ >::cols(), Eigen::SuperLUBase< MatrixType_, Derived >::cols(), Eigen::IncompleteLU< Scalar_ >::cols(), Eigen::IncompleteCholesky< Scalar, UpLo_, OrderingType_ >::cols(), Eigen::IncompleteLUT< Scalar_, StorageIndex_ >::cols(), Eigen::PastixBase< Derived >::compute(), Eigen::IncompleteLU< Scalar_ >::compute(), Eigen::IterScaling< MatrixType_ >::compute(), Eigen::SPQR< MatrixType_ >::compute(), Eigen::internal::conjugate_gradient(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::conservativeResize(), sparseGaussianTest< Scalar >::df(), dostuff(), Eigen::SimplicialCholeskyBase< Derived >::dumpMemory(), eiToDense(), eiToGmm(), eiToMtl(), eiToUblas(), Eigen::SparseQR< MatrixType_, OrderingType_ >::factorize(), Eigen::IncompleteCholesky< Scalar, UpLo_, OrderingType_ >::factorize(), Eigen::DiagonalPreconditioner< Scalar_ >::factorize(), Eigen::LeastSquareDiagonalPreconditioner< Scalar_ >::factorize(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::findOrInsertCoeff(), Eigen::internal::householder_qr_inplace_unblocked(), Eigen::internal::householder_qr_inplace_update(), initSPD(), Eigen::internal::insert_from_triplets(), Eigen::internal::insert_from_triplets_sorted(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertEmptyOuterVectors(), Eigen::internal::least_square_conjugate_gradient(), Eigen::internal::llt_rank_update_lower(), main(), Eigen::SluMatrix::Map(), Eigen::internal::minres(), Eigen::COLAMDOrdering< StorageIndex >::operator()(), Eigen::MatrixMarketIterator< Scalar >::refX(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::removeOuterVectors(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::resize(), Eigen::MatrixMarketIterator< Scalar >::rhs(), Eigen::internal::visitor_impl< Visitor, Derived, Dynamic, false, false, ShortCircuitEvaluation >::run(), Eigen::internal::visitor_impl< Visitor, Derived, Dynamic, true, false, ShortCircuitEvaluation >::run(), Eigen::internal::permutation_matrix_product< ExpressionType, Side, Transposed, DenseShape >::run(), Eigen::internal::householder_qr_inplace_blocked< MatrixQR, HCoeffs, MatrixQRScalar, InnerStrideIsOne >::run(), Eigen::internal::lapacke_helpers::lapacke_hqr< MatrixQR, HCoeffs >::run(), Eigen::selfadjoint_product_selector< MatrixType, OtherType, UpLo, false >::run(), Eigen::general_product_to_triangular_selector< MatrixType, ProductType, UpLo, false >::run(), Eigen::SluMatrixMapHelper< Matrix< Scalar, Rows, Cols, Options, MRows, MCols > >::run(), Eigen::SluMatrixMapHelper< SparseMatrixBase< Derived > >::run(), Eigen::internal::direct_selfadjoint_eigenvalues< SolverType, 3, false >::run(), Eigen::internal::direct_selfadjoint_eigenvalues< SolverType, 2, false >::run(), Eigen::saveMarket(), Eigen::saveMarketDense(), Eigen::internal::set_from_triplets(), Eigen::internal::set_from_triplets_sorted(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::SparseMatrix(), gdb.printers.EigenMatrixPrinter::to_string(), gdb.printers.EigenSparseMatrixPrinter::to_string(), Eigen::internal::tridiagonalization_inplace(), Eigen::internal::llt_inplace< Scalar, Lower >::unblocked(), Eigen::internal::ldlt_inplace< Lower >::unblocked(), Eigen::internal::ldlt_inplace< Lower >::updateInPlace(), Eigen::internal::upperbidiagonalization_inplace_unblocked(), use_n_times(), and Eigen::viewAsCholmod().

◆ conservativeResize()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::conservativeResize ( Index  rows,
Index  cols 
)
inline

Resizes the matrix to a rows x cols matrix leaving old values untouched.

If the sizes of the matrix are decreased, then the matrix is turned to uncompressed-mode and the storage of the out of bounds coefficients is kept and reserved. Call makeCompressed() to pack the entries and squeeze extra memory.

See also
reserve(), setZero(), makeCompressed()
681  {
682  // If one dimension is null, then there is nothing to be preserved
683  if (rows == 0 || cols == 0) return resize(rows, cols);
684 
685  Index newOuterSize = IsRowMajor ? rows : cols;
686  Index newInnerSize = IsRowMajor ? cols : rows;
687 
688  Index innerChange = newInnerSize - m_innerSize;
689  Index outerChange = newOuterSize - m_outerSize;
690 
691  if (outerChange != 0) {
692  m_outerIndex = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(m_outerIndex, newOuterSize + 1,
693  m_outerSize + 1);
694 
695  if (!isCompressed())
696  m_innerNonZeros = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(m_innerNonZeros,
697  newOuterSize, m_outerSize);
698 
699  if (outerChange > 0) {
701  using std::fill_n;
702  fill_n(m_outerIndex + m_outerSize, outerChange + 1, lastIdx);
703 
704  if (!isCompressed()) fill_n(m_innerNonZeros + m_outerSize, outerChange, StorageIndex(0));
705  }
706  }
707  m_outerSize = newOuterSize;
708 
709  if (innerChange < 0) {
710  for (Index j = 0; j < m_outerSize; j++) {
713  Index lb = m_data.searchLowerIndex(start, end, newInnerSize);
714  if (lb != end) {
715  uncompress();
717  }
718  }
719  }
720  m_innerSize = newInnerSize;
721 
722  Index newSize = m_outerIndex[m_outerSize];
723  eigen_assert(newSize <= m_data.size());
724  m_data.resize(newSize);
725  }
void uncompress()
Definition: SparseMatrix.h:622
void start(const unsigned &i)
(Re-)start i-th timer
Definition: oomph_utilities.cc:243

References Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::cols(), eigen_assert, Eigen::placeholders::end, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::isCompressed(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::IsRowMajor, j, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_data, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_innerNonZeros, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_innerSize, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerIndex, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerSize, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::resize(), Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::resize(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::rows(), Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::searchLowerIndex(), Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::size(), oomph::CumulativeTimings::start(), and Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::uncompress().

Referenced by Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertEmptyOuterVectors(), and Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::removeOuterVectors().

◆ data() [1/2]

◆ data() [2/2]

◆ diagonal() [1/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
DiagonalReturnType Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::diagonal ( )
inline
Returns
a read-write expression of the diagonal coefficients.
Warning
If the diagonal entries are written, then all diagonal entries must already exist, otherwise an assertion will be raised.
763 { return DiagonalReturnType(*this); }
Diagonal< SparseMatrix > DiagonalReturnType
Definition: SparseMatrix.h:136

◆ diagonal() [2/2]

◆ EIGEN_STATIC_ASSERT()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::EIGEN_STATIC_ASSERT ( (Options &(ColMajor|RowMajor))  = Options,
INVALID_MATRIX_TEMPLATE_PARAMETERS   
)
inlineprivate
1112  {
1113  default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
1114  inline bool operator()(const Index&, const Index&, const Scalar& value) const {
1115  return !internal::isMuchSmallerThan(value, reference, epsilon);
1116  }
1117  Scalar reference;
1119  };
SCALAR Scalar
Definition: bench_gemm.cpp:45
NumTraits< Scalar >::Real RealScalar
Definition: bench_gemm.cpp:46
double eps
Definition: crbond_bessel.cc:24
EIGEN_DEVICE_FUNC bool isMuchSmallerThan(const Scalar &x, const OtherScalar &y, const typename NumTraits< Scalar >::Real &precision=NumTraits< Scalar >::dummy_precision())
Definition: MathFunctions.h:1916
double epsilon
Definition: osc_ring_sarah_asymptotics.h:43

References CRBond_Bessel::eps, oomph::SarahBL::epsilon, Eigen::internal::isMuchSmallerThan(), and Eigen::value.

Referenced by Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::SparseMatrix().

◆ finalize()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::finalize ( )
inline

◆ findOrInsertCoeff()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Scalar& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::findOrInsertCoeff ( Index  row,
Index  col,
bool inserted 
)
inline
Returns
a non-const reference to the value of the matrix at position i, j.

If the element does not exist then it is inserted via the insert(Index,Index) function which itself turns the matrix into a non compressed form if that was not the case. The output parameter inserted is set to true.

Otherwise, if the element does exist, inserted will be set to false.

This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index) function if the element does not already exist.

231  {
232  eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
233  const Index outer = IsRowMajor ? row : col;
234  const Index inner = IsRowMajor ? col : row;
235  Index start = m_outerIndex[outer];
236  Index end = isCompressed() ? m_outerIndex[outer + 1] : m_outerIndex[outer] + m_innerNonZeros[outer];
237  eigen_assert(end >= start && "you probably called coeffRef on a non finalized matrix");
238  Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
239  if (dst == end) {
240  Index capacity = m_outerIndex[outer + 1] - end;
241  if (capacity > 0) {
242  // implies uncompressed: push to back of vector
243  m_innerNonZeros[outer]++;
244  m_data.index(end) = StorageIndex(inner);
245  m_data.value(end) = Scalar(0);
246  if (inserted != nullptr) {
247  *inserted = true;
248  }
249  return m_data.value(end);
250  }
251  }
252  if ((dst < end) && (m_data.index(dst) == inner)) {
253  // this coefficient exists, return a reference to it
254  if (inserted != nullptr) {
255  *inserted = false;
256  }
257  return m_data.value(dst);
258  } else {
259  if (inserted != nullptr) {
260  *inserted = true;
261  }
262  // insertion will require reconfiguring the buffer
263  return insertAtByOuterInner(outer, inner, dst);
264  }
265  }
EIGEN_STRONG_INLINE Scalar & insertAtByOuterInner(Index outer, Index inner, Index dst)
Definition: SparseMatrix.h:1592

References col(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::cols(), eigen_assert, Eigen::placeholders::end, Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::index(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertAtByOuterInner(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::isCompressed(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::IsRowMajor, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_data, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_innerNonZeros, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerIndex, row(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::rows(), Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::searchLowerIndex(), oomph::CumulativeTimings::start(), and Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::value().

Referenced by Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::coeffRef().

◆ initAssignment()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename Other >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::initAssignment ( const Other &  other)
inlineprotected
945  {
946  resize(other.rows(), other.cols());
947  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
948  m_innerNonZeros = 0;
949  }

References resize().

Referenced by Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::operator=(), and Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::SparseMatrix().

◆ innerIndexPtr() [1/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
StorageIndex* Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::innerIndexPtr ( )
inline
Returns
a non-const pointer to the array of inner indices. This function is aimed at interoperability with other libraries.
See also
valuePtr(), outerIndexPtr()
184 { return m_data.indexPtr(); }
const StorageIndex * indexPtr() const
Definition: CompressedStorage.h:100

References Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::indexPtr(), and Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_data.

◆ innerIndexPtr() [2/2]

◆ innerNonZeroPtr() [1/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
StorageIndex* Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::innerNonZeroPtr ( )
inline
Returns
a non-const pointer to the array of the number of non zeros of the inner vectors. This function is aimed at interoperability with other libraries.
Warning
it returns the null pointer 0 in compressed mode
202 { return m_innerNonZeros; }

References Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_innerNonZeros.

◆ innerNonZeroPtr() [2/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
const StorageIndex* Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::innerNonZeroPtr ( ) const
inline
Returns
a const pointer to the array of the number of non zeros of the inner vectors. This function is aimed at interoperability with other libraries.
Warning
it returns the null pointer 0 in compressed mode
198 { return m_innerNonZeros; }

References Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_innerNonZeros.

Referenced by Eigen::SparseCompressedBase< Derived >::InnerIterator::InnerIterator(), Eigen::SparseCompressedBase< Derived >::ReverseInnerIterator::ReverseInnerIterator(), and Eigen::viewAsCholmod().

◆ innerSize()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Index Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::innerSize ( ) const
inline
Returns
the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major)
164 { return m_innerSize; }

References Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_innerSize.

Referenced by initSparse(), and Eigen::internal::set_from_triplets().

◆ insert()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
SparseMatrix< Scalar_, Options_, StorageIndex_ >::Scalar & Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insert ( Index  row,
Index  col 
)
inline
Returns
a reference to a novel non zero coefficient with coordinates row x col. The non zero coefficient must not already exist.

If the matrix *this is in compressed mode, then *this is turned into uncompressed mode while reserving room for 2 x this->innerSize() non zeros if reserve(Index) has not been called earlier. In this case, the insertion procedure is optimized for a sequential insertion mode where elements are assumed to be inserted by increasing outer-indices.

If that's not the case, then it is strongly recommended to either use a triplet-list to assemble the matrix, or to first call reserve(const SizesType &) to reserve the appropriate number of non-zero elements per inner vector.

Assuming memory has been appropriately reserved, this function performs a sorted insertion in O(1) if the elements of each inner vector are inserted in increasing inner index order, and in O(nnz_j) for a random insertion.

1586  {
1588 }
Scalar & insertByOuterInner(Index j, Index i)
Definition: SparseMatrix.h:566

References col(), and row().

Referenced by fillMatrix(), fillMatrix2(), initSPD(), setinnerrand_eigen(), Eigen::SparseLUMatrixUReturnType< MatrixLType, MatrixUType >::toSparse(), and Eigen::SparseLUMatrixLReturnType< MappedSupernodalType >::toSparse().

◆ insertAtByOuterInner()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
EIGEN_STRONG_INLINE SparseMatrix< Scalar_, Options_, StorageIndex_ >::Scalar & Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertAtByOuterInner ( Index  outer,
Index  inner,
Index  dst 
)
protected
1592  {
1593  // random insertion into compressed matrix is very slow
1594  uncompress();
1595  return insertUncompressedAtByOuterInner(outer, inner, dst);
1596 }
Scalar & insertUncompressedAtByOuterInner(Index outer, Index inner, Index dst)
Definition: SparseMatrix.h:1665

Referenced by Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::findOrInsertCoeff(), and Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertByOuterInner().

◆ insertBack()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Scalar& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertBack ( Index  row,
Index  col 
)
inline
Returns
a reference to the non zero coefficient at position row, col assuming that:
  • the nonzero does not already exist
  • the new coefficient is the last one according to the storage order

Before filling a given inner vector you must call the statVec(Index) function.

After an insertion session, you should call the finalize() function.

See also
insert, insertBackByOuterInner, startVec
424  {
426  }
Scalar & insertBackByOuterInner(Index outer, Index inner)
Definition: SparseMatrix.h:430

References col(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertBackByOuterInner(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::IsRowMajor, and row().

◆ insertBackByOuterInner()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Scalar& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertBackByOuterInner ( Index  outer,
Index  inner 
)
inline
See also
insertBack, startVec
430  {
431  eigen_assert(Index(m_outerIndex[outer + 1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
432  eigen_assert((m_outerIndex[outer + 1] - m_outerIndex[outer] == 0 || m_data.index(m_data.size() - 1) < inner) &&
433  "Invalid ordered insertion (invalid inner index)");
434  StorageIndex p = m_outerIndex[outer + 1];
435  ++m_outerIndex[outer + 1];
436  m_data.append(Scalar(0), inner);
437  return m_data.value(p);
438  }
float * p
Definition: Tutorial_Map_using.cpp:9
void append(const Scalar &v, Index i)
Definition: CompressedStorage.h:87

References Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::append(), eigen_assert, Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::index(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_data, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerIndex, p, Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::size(), and Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::value().

Referenced by Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertBack().

◆ insertBackByOuterInnerUnordered()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Scalar& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertBackByOuterInnerUnordered ( Index  outer,
Index  inner 
)
inline

◆ insertBackUncompressed()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
EIGEN_STRONG_INLINE Scalar& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertBackUncompressed ( Index  row,
Index  col 
)
inline
See also
insert(Index,Index)
975  {
976  const Index outer = IsRowMajor ? row : col;
977  const Index inner = IsRowMajor ? col : row;
978 
980  eigen_assert(m_innerNonZeros[outer] <= (m_outerIndex[outer + 1] - m_outerIndex[outer]));
981 
982  Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
983  m_data.index(p) = StorageIndex(inner);
984  m_data.value(p) = Scalar(0);
985  return m_data.value(p);
986  }

References col(), eigen_assert, Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::index(), p, row(), and Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::value().

◆ insertByOuterInner()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Scalar& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertByOuterInner ( Index  j,
Index  i 
)
inline

same as insert(Index,Index) except that the indices are given relative to the storage order

566  {
567  eigen_assert(j >= 0 && j < m_outerSize && "invalid outer index");
568  eigen_assert(i >= 0 && i < m_innerSize && "invalid inner index");
571  Index dst = start == end ? end : m_data.searchLowerIndex(start, end, i);
572  if (dst == end) {
573  Index capacity = m_outerIndex[j + 1] - end;
574  if (capacity > 0) {
575  // implies uncompressed: push to back of vector
576  m_innerNonZeros[j]++;
578  m_data.value(end) = Scalar(0);
579  return m_data.value(end);
580  }
581  }
582  eigen_assert((dst == end || m_data.index(dst) != i) &&
583  "you cannot insert an element that already exists, you must call coeffRef to this end");
584  return insertAtByOuterInner(j, i, dst);
585  }

References eigen_assert, Eigen::placeholders::end, i, Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::index(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertAtByOuterInner(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::isCompressed(), j, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_data, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_innerNonZeros, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_innerSize, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerIndex, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerSize, Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::searchLowerIndex(), oomph::CumulativeTimings::start(), and Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::value().

Referenced by initSparse().

◆ insertCompressed()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
EIGEN_DEPRECATED EIGEN_DONT_INLINE SparseMatrix< Scalar_, Options_, StorageIndex_ >::Scalar & Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertCompressed ( Index  row,
Index  col 
)
protected
See also
insert(Index,Index)
1624  {
1626  Index outer = IsRowMajor ? row : col;
1627  Index inner = IsRowMajor ? col : row;
1628  Index start = m_outerIndex[outer];
1629  Index end = m_outerIndex[outer + 1];
1630  Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
1631  eigen_assert((dst == end || m_data.index(dst) != inner) &&
1632  "you cannot insert an element that already exists, you must call coeffRef to this end");
1633  return insertCompressedAtByOuterInner(outer, inner, dst);
1634 }
Scalar & insertCompressedAtByOuterInner(Index outer, Index inner, Index dst)
Definition: SparseMatrix.h:1638

References col(), eigen_assert, Eigen::placeholders::end, Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::index(), row(), Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::searchLowerIndex(), and oomph::CumulativeTimings::start().

◆ insertCompressedAtByOuterInner()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
SparseMatrix< Scalar_, Options_, StorageIndex_ >::Scalar & Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertCompressedAtByOuterInner ( Index  outer,
Index  inner,
Index  dst 
)
protected
1638  {
1640  // compressed insertion always requires expanding the buffer
1641  // first, check if there is adequate allocated memory
1642  if (m_data.allocatedSize() <= m_data.size()) {
1643  // if there is no capacity for a single insertion, double the capacity
1644  // increase capacity by a minimum of 32
1645  Index minReserve = 32;
1646  Index reserveSize = numext::maxi(minReserve, m_data.allocatedSize());
1647  m_data.reserve(reserveSize);
1648  }
1649  m_data.resize(m_data.size() + 1);
1650  Index chunkSize = m_outerIndex[m_outerSize] - dst;
1651  // shift the existing data to the right if necessary
1652  m_data.moveChunk(dst, dst + 1, chunkSize);
1653  // update nonzero counts
1654  // potentially O(outerSize) bottleneck!
1655  for (Index j = outer; j < m_outerSize; j++) m_outerIndex[j + 1]++;
1656  // initialize the coefficient
1657  m_data.index(dst) = StorageIndex(inner);
1658  m_data.value(dst) = Scalar(0);
1659  // return a reference to the coefficient
1660  return m_data.value(dst);
1661 }
void reserve(Index size)
Definition: CompressedStorage.h:63
Index allocatedSize() const
Definition: CompressedStorage.h:95
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T maxi(const T &x, const T &y)
Definition: MathFunctions.h:926

References Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::allocatedSize(), eigen_assert, Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::index(), j, Eigen::numext::maxi(), Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::moveChunk(), Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::reserve(), Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::resize(), Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::size(), and Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::value().

◆ insertEmptyOuterVectors()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertEmptyOuterVectors ( Index  j,
Index  num = 1 
)
inline
509  {
510  using std::fill_n;
511  eigen_assert(num >= 0 && j >= 0 && j < m_outerSize && "Invalid parameters");
512 
513  const Index newRows = IsRowMajor ? m_outerSize + num : rows();
514  const Index newCols = IsRowMajor ? cols() : m_outerSize + num;
515 
516  const Index begin = j;
517  const Index end = m_outerSize;
518  const Index target = j + num;
519 
520  // expand the matrix to the larger size
521  conservativeResize(newRows, newCols);
522 
523  // shift m_outerIndex and m_innerNonZeros [num] to the right
525  // m_outerIndex[begin] == m_outerIndex[target], set all indices in this range to same value
526  fill_n(m_outerIndex + begin, num, m_outerIndex[begin]);
527 
528  if (!isCompressed()) {
530  // set the nonzeros of the newly inserted vectors to 0
531  fill_n(m_innerNonZeros + begin, num, StorageIndex(0));
532  }
533  }
void conservativeResize(Index rows, Index cols)
Definition: SparseMatrix.h:681
void smart_memmove(const T *start, const T *end, T *target)
Definition: Memory.h:594

References Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::cols(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::conservativeResize(), eigen_assert, Eigen::placeholders::end, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::isCompressed(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::IsRowMajor, j, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_innerNonZeros, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerIndex, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerSize, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::rows(), and Eigen::internal::smart_memmove().

◆ insertFromSortedTriplets() [1/2]

template<typename Scalar , int Options_, typename StorageIndex_ >
template<typename InputIterators >
void Eigen::SparseMatrix< Scalar, Options_, StorageIndex_ >::insertFromSortedTriplets ( const InputIterators &  begin,
const InputIterators &  end 
)

The same as insertFromTriplets but triplets are assumed to be pre-sorted. This is faster and requires less temporary storage. Two triplets a and b are appropriately ordered if:

ColMajor: ((a.col() != b.col()) ? (a.col() <
b.col()) : (a.row() < b.row()) RowMajor: ((a.row() != b.row()) ? (a.row() < b.row()) : (a.col() < b.col())
Scalar * b
Definition: benchVecAdd.cpp:17
@ ColMajor
Definition: Constants.h:318
@ RowMajor
Definition: Constants.h:320
const Scalar * a
Definition: level2_cplx_impl.h:32
1451  {
1452  internal::insert_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1453  begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1454 }

References Eigen::placeholders::end.

◆ insertFromSortedTriplets() [2/2]

template<typename Scalar , int Options_, typename StorageIndex_ >
template<typename InputIterators , typename DupFunctor >
void Eigen::SparseMatrix< Scalar, Options_, StorageIndex_ >::insertFromSortedTriplets ( const InputIterators &  begin,
const InputIterators &  end,
DupFunctor  dup_func 
)

The same as insertFromSortedTriplets but when duplicates are met the functor dup_func is applied:

value = dup_func(OldValue, NewValue)

Here is a C++11 example keeping the latest entry only:

mat.insertFromSortedTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
void insertFromSortedTriplets(const InputIterators &begin, const InputIterators &end)
Definition: SparseMatrix.h:1450
1469  {
1470  internal::insert_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1471  begin, end, *this, dup_func);
1472 }

References Eigen::placeholders::end.

◆ insertFromTriplets() [1/2]

template<typename Scalar , int Options_, typename StorageIndex_ >
template<typename InputIterators >
void Eigen::SparseMatrix< Scalar, Options_, StorageIndex_ >::insertFromTriplets ( const InputIterators &  begin,
const InputIterators &  end 
)

Insert a batch of elements into the matrix *this with the list of triplets defined in the half-open range from begin to end.

A triplet is a tuple (i,j,value) defining a non-zero element. The input list of triplets does not have to be sorted, and may contain duplicated elements. In any case, the result is a sorted and compressed sparse matrix where the duplicates have been summed up. This is a O(n) operation, with n the number of triplet elements. The initial contents of *this are preserved (except for the summation of duplicate elements). The matrix *this must be properly sized beforehand. The sizes are not extracted from the triplet list.

The InputIterators value_type must provide the following interface:

Scalar value() const; // the value
IndexType row() const; // the row index i
IndexType col() const; // the column index j

See for instance the Eigen::Triplet template class.

Here is a typical usage example:

SparseMatrixType m(rows,cols); // m contains nonzero entries
typedef Triplet<double> T;
std::vector<T> tripletList;
tripletList.reserve(estimation_of_entries);
for(...)
{
// ...
tripletList.push_back(T(i,j,v_ij));
}
m.insertFromTriplets(tripletList.begin(), tripletList.end());
// m is ready to go!
std::vector< T > tripletList
Definition: EigenUnitTest.cpp:12
Eigen::Triplet< double > T
Definition: EigenUnitTest.cpp:11
int * m
Definition: level2_cplx_impl.h:294
Warning
The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather be explicitly stored into a std::vector for instance.
1422  {
1423  internal::insert_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1424  begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1425 }

References Eigen::placeholders::end.

◆ insertFromTriplets() [2/2]

template<typename Scalar , int Options_, typename StorageIndex_ >
template<typename InputIterators , typename DupFunctor >
void Eigen::SparseMatrix< Scalar, Options_, StorageIndex_ >::insertFromTriplets ( const InputIterators &  begin,
const InputIterators &  end,
DupFunctor  dup_func 
)

The same as insertFromTriplets but when duplicates are met the functor dup_func is applied:

value = dup_func(OldValue, NewValue)

Here is a C++11 example keeping the latest entry only:

mat.insertFromTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
void insertFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition: SparseMatrix.h:1421
1439  {
1440  internal::insert_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1441  begin, end, *this, dup_func);
1442 }

References Eigen::placeholders::end.

◆ insertUncompressed()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
EIGEN_DEPRECATED EIGEN_DONT_INLINE SparseMatrix< Scalar_, Options_, StorageIndex_ >::Scalar & Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertUncompressed ( Index  row,
Index  col 
)
protected
See also
insert(Index,Index)
1600  {
1602  Index outer = IsRowMajor ? row : col;
1603  Index inner = IsRowMajor ? col : row;
1604  Index start = m_outerIndex[outer];
1605  Index end = start + m_innerNonZeros[outer];
1606  Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
1607  if (dst == end) {
1608  Index capacity = m_outerIndex[outer + 1] - end;
1609  if (capacity > 0) {
1610  // implies uncompressed: push to back of vector
1611  m_innerNonZeros[outer]++;
1612  m_data.index(end) = StorageIndex(inner);
1613  m_data.value(end) = Scalar(0);
1614  return m_data.value(end);
1615  }
1616  }
1617  eigen_assert((dst == end || m_data.index(dst) != inner) &&
1618  "you cannot insert an element that already exists, you must call coeffRef to this end");
1619  return insertUncompressedAtByOuterInner(outer, inner, dst);
1620 }

References col(), eigen_assert, Eigen::placeholders::end, Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::index(), row(), Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::searchLowerIndex(), oomph::CumulativeTimings::start(), and Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::value().

◆ insertUncompressedAtByOuterInner()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
SparseMatrix< Scalar_, Options_, StorageIndex_ >::Scalar & Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertUncompressedAtByOuterInner ( Index  outer,
Index  inner,
Index  dst 
)
protected
1665  {
1667  // find a vector with capacity, starting at `outer` and searching to the left and right
1668  for (Index leftTarget = outer - 1, rightTarget = outer; (leftTarget >= 0) || (rightTarget < m_outerSize);) {
1669  if (rightTarget < m_outerSize) {
1670  Index start = m_outerIndex[rightTarget];
1671  Index end = start + m_innerNonZeros[rightTarget];
1672  Index nextStart = m_outerIndex[rightTarget + 1];
1673  Index capacity = nextStart - end;
1674  if (capacity > 0) {
1675  // move [dst, end) to dst+1 and insert at dst
1676  Index chunkSize = end - dst;
1677  if (chunkSize > 0) m_data.moveChunk(dst, dst + 1, chunkSize);
1678  m_innerNonZeros[outer]++;
1679  for (Index j = outer; j < rightTarget; j++) m_outerIndex[j + 1]++;
1680  m_data.index(dst) = StorageIndex(inner);
1681  m_data.value(dst) = Scalar(0);
1682  return m_data.value(dst);
1683  }
1684  rightTarget++;
1685  }
1686  if (leftTarget >= 0) {
1687  Index start = m_outerIndex[leftTarget];
1688  Index end = start + m_innerNonZeros[leftTarget];
1689  Index nextStart = m_outerIndex[leftTarget + 1];
1690  Index capacity = nextStart - end;
1691  if (capacity > 0) {
1692  // tricky: dst is a lower bound, so we must insert at dst-1 when shifting left
1693  // move [nextStart, dst) to nextStart-1 and insert at dst-1
1694  Index chunkSize = dst - nextStart;
1695  if (chunkSize > 0) m_data.moveChunk(nextStart, nextStart - 1, chunkSize);
1696  m_innerNonZeros[outer]++;
1697  for (Index j = leftTarget; j < outer; j++) m_outerIndex[j + 1]--;
1698  m_data.index(dst - 1) = StorageIndex(inner);
1699  m_data.value(dst - 1) = Scalar(0);
1700  return m_data.value(dst - 1);
1701  }
1702  leftTarget--;
1703  }
1704  }
1705 
1706  // no room for interior insertion
1707  // nonZeros() == m_data.size()
1708  // record offset as outerIndxPtr will change
1709  Index dst_offset = dst - m_outerIndex[outer];
1710  // allocate space for random insertion
1711  if (m_data.allocatedSize() == 0) {
1712  // fast method to allocate space for one element per vector in empty matrix
1714  std::iota(m_outerIndex, m_outerIndex + m_outerSize + 1, StorageIndex(0));
1715  } else {
1716  // check for integer overflow: if maxReserveSize == 0, insertion is not possible
1717  Index maxReserveSize = static_cast<Index>(NumTraits<StorageIndex>::highest()) - m_data.allocatedSize();
1718  eigen_assert(maxReserveSize > 0);
1719  if (m_outerSize <= maxReserveSize) {
1720  // allocate space for one additional element per vector
1721  reserveInnerVectors(IndexVector::Constant(m_outerSize, 1));
1722  } else {
1723  // handle the edge case where StorageIndex is insufficient to reserve outerSize additional elements
1724  // allocate space for one additional element in the interval [outer,maxReserveSize)
1725  typedef internal::sparse_reserve_op<StorageIndex> ReserveSizesOp;
1726  typedef CwiseNullaryOp<ReserveSizesOp, IndexVector> ReserveSizesXpr;
1727  ReserveSizesXpr reserveSizesXpr(m_outerSize, 1, ReserveSizesOp(outer, m_outerSize, maxReserveSize));
1728  reserveInnerVectors(reserveSizesXpr);
1729  }
1730  }
1731  // insert element at `dst` with new outer indices
1732  Index start = m_outerIndex[outer];
1733  Index end = start + m_innerNonZeros[outer];
1734  Index new_dst = start + dst_offset;
1735  Index chunkSize = end - new_dst;
1736  if (chunkSize > 0) m_data.moveChunk(new_dst, new_dst + 1, chunkSize);
1737  m_innerNonZeros[outer]++;
1738  m_data.index(new_dst) = StorageIndex(inner);
1739  m_data.value(new_dst) = Scalar(0);
1740  return m_data.value(new_dst);
1741 }
void reserveInnerVectors(const SizesType &reserveSizes)
Definition: SparseMatrix.h:345

References Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::allocatedSize(), eigen_assert, Eigen::placeholders::end, Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::index(), j, Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::moveChunk(), Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::resize(), oomph::CumulativeTimings::start(), and Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::value().

◆ isCompressed()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
bool Eigen::SparseCompressedBase< Derived >::isCompressed
inline
Returns
whether *this is in compressed form.
114 { return innerNonZeroPtr() == 0; }
const StorageIndex * innerNonZeroPtr() const
Definition: SparseMatrix.h:198

Referenced by Eigen::SparseQR< MatrixType_, OrderingType_ >::analyzePattern(), Eigen::SparseLU< MatrixType_, OrderingType_ >::analyzePattern(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::conservativeResize(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::finalize(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::findOrInsertCoeff(), Eigen::SparseCompressedBase< Derived >::InnerIterator::InnerIterator(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertByOuterInner(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertEmptyOuterVectors(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::makeCompressed(), Eigen::COLAMDOrdering< StorageIndex >::operator()(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::operator=(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::prune(), Eigen::Ref< SparseMatrix< MatScalar, MatOptions, MatIndex >, Options, StrideType >::Ref(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::removeOuterVectors(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::reserve(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::reserveInnerVectors(), Eigen::SparseCompressedBase< Derived >::ReverseInnerIterator::ReverseInnerIterator(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::uncompress(), and Eigen::viewAsCholmod().

◆ makeCompressed()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::makeCompressed ( )
inline

Turns the matrix into the compressed format.

589  {
590  if (isCompressed()) return;
591 
593 
596  // try to move fewer, larger contiguous chunks
597  Index copyStart = start;
598  Index copyTarget = m_innerNonZeros[0];
599  for (Index j = 1; j < m_outerSize; j++) {
601  StorageIndex nextStart = m_outerIndex[j + 1];
602  // dont forget to move the last chunk!
603  bool breakUpCopy = (end != nextStart) || (j == m_outerSize - 1);
604  if (breakUpCopy) {
605  Index chunkSize = end - copyStart;
606  if (chunkSize > 0) m_data.moveChunk(copyStart, copyTarget, chunkSize);
607  copyStart = nextStart;
608  copyTarget += chunkSize;
609  }
610  start = nextStart;
612  }
614 
615  // release as much memory as possible
616  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
617  m_innerNonZeros = 0;
618  m_data.squeeze();
619  }
#define eigen_internal_assert(x)
Definition: Macros.h:916
void squeeze()
Definition: CompressedStorage.h:68

References eigen_internal_assert, Eigen::placeholders::end, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::isCompressed(), j, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_data, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_innerNonZeros, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerIndex, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerSize, Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::moveChunk(), Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::resize(), Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::squeeze(), and oomph::CumulativeTimings::start().

Referenced by Eigen::PardisoLU< MatrixType >::getMatrix(), Eigen::PardisoLLT< MatrixType, UpLo_ >::getMatrix(), Eigen::PardisoLDLT< MatrixType, Options >::getMatrix(), Eigen::SparseLUMatrixUReturnType< MatrixLType, MatrixUType >::toSparse(), and Eigen::SparseLUMatrixLReturnType< MappedSupernodalType >::toSparse().

◆ nonZeros()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Index Eigen::SparseCompressedBase< Derived >::nonZeros
inline
Returns
the number of non zero coefficients
64  {
65  if (Derived::IsVectorAtCompileTime && outerIndexPtr() == 0)
66  return derived().nonZeros();
67  else if (derived().outerSize() == 0)
68  return 0;
69  else if (isCompressed())
70  return outerIndexPtr()[derived().outerSize()] - outerIndexPtr()[0];
71  else
72  return innerNonZeros().sum();
73  }
Eigen::Map< IndexVector > innerNonZeros()
Definition: SparseCompressedBase.h:55
const SparseMatrix< Scalar_, Options_, StorageIndex_ > & derived() const
Definition: SparseMatrixBase.h:144
Index outerSize() const
Definition: SparseMatrix.h:166
const StorageIndex * outerIndexPtr() const
Definition: SparseMatrix.h:189

Referenced by Eigen::SimplicialCholeskyBase< Derived >::_solve_impl(), Eigen::SparseQR< MatrixType_, OrderingType_ >::analyzePattern(), Browse_Matrices(), Eigen::internal::c_to_fortran_numbering(), doEigen(), Eigen::SimplicialCholeskyBase< Derived >::dumpMemory(), Eigen::internal::fortran_to_c_numbering(), Eigen::SparseCompressedBase< Derived >::InnerIterator::InnerIterator(), main(), Eigen::SluMatrix::Map(), Eigen::COLAMDOrdering< StorageIndex >::operator()(), Eigen::internal::permutation_matrix_product< ExpressionType, Side, Transposed, SparseShape >::permute_inner(), Eigen::internal::permutation_matrix_product< ExpressionType, Side, Transposed, SparseShape >::permute_outer(), Eigen::SparseCompressedBase< Derived >::ReverseInnerIterator::ReverseInnerIterator(), Eigen::SluMatrixMapHelper< SparseMatrixBase< Derived > >::run(), Eigen::saveMarket(), sparse_solvers(), and Eigen::viewAsCholmod().

◆ operator=() [1/8]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename OtherDerived >
SparseMatrix& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::operator= ( const EigenBase< OtherDerived > &  other)
inline

◆ operator=() [2/8]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename Lhs , typename Rhs >
SparseMatrix& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::operator= ( const Product< Lhs, Rhs, AliasFreeProduct > &  other)
inline

◆ operator=() [3/8]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename Lhs , typename Rhs >
SparseMatrix<Scalar, Options_, StorageIndex_>& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::operator= ( const Product< Lhs, Rhs, AliasFreeProduct > &  src)
168  {
169  // std::cout << "in Assignment : " << DstOptions << "\n";
170  SparseMatrix dst(src.rows(), src.cols());
171  internal::generic_product_impl<Lhs, Rhs>::evalTo(dst, src.lhs(), src.rhs());
172  this->swap(dst);
173  return *this;
174 }

References Eigen::Product< Lhs_, Rhs_, Option >::cols(), Eigen::Product< Lhs_, Rhs_, Option >::lhs(), Eigen::Product< Lhs_, Rhs_, Option >::rhs(), Eigen::Product< Lhs_, Rhs_, Option >::rows(), and Eigen::swap().

◆ operator=() [4/8]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
SparseMatrix& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::operator= ( const SparseMatrix< Scalar_, Options_, StorageIndex_ > &  other)
inline
855  {
856  if (other.isRValue()) {
857  swap(other.const_cast_derived());
858  } else if (this != &other) {
859 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
861 #endif
862  initAssignment(other);
863  if (other.isCompressed()) {
864  internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
865  m_data = other.m_data;
866  } else {
867  Base::operator=(other);
868  }
869  }
870  return *this;
871  }
EIGEN_DEVICE_FUNC void smart_copy(const T *start, const T *end, T *target)
Definition: Memory.h:569

References Eigen::SparseMatrixBase< Derived >::const_cast_derived(), EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::initAssignment(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::isCompressed(), Eigen::SparseMatrixBase< Derived >::isRValue(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_data, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerIndex, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerSize, Eigen::SparseCompressedBase< Derived >::operator=(), Eigen::internal::smart_copy(), and Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::swap().

◆ operator=() [5/8]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename OtherDerived >
EIGEN_DONT_INLINE SparseMatrix& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::operator= ( const SparseMatrixBase< OtherDerived > &  other)

◆ operator=() [6/8]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename OtherDerived >
EIGEN_DONT_INLINE SparseMatrix<Scalar, Options_, StorageIndex_>& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::operator= ( const SparseMatrixBase< OtherDerived > &  other)
1519  {
1522  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1523 
1524 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1526 #endif
1527 
1528  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1529  if (needToTranspose) {
1530 #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1532 #endif
1533  // two passes algorithm:
1534  // 1 - compute the number of coeffs per dest inner vector
1535  // 2 - do the actual copy/eval
1536  // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1537  typedef
1539  OtherCopy;
1540  typedef internal::remove_all_t<OtherCopy> OtherCopy_;
1541  typedef internal::evaluator<OtherCopy_> OtherCopyEval;
1542  OtherCopy otherCopy(other.derived());
1543  OtherCopyEval otherCopyEval(otherCopy);
1544 
1545  SparseMatrix dest(other.rows(), other.cols());
1546  Eigen::Map<IndexVector>(dest.m_outerIndex, dest.outerSize()).setZero();
1547 
1548  // pass 1
1549  // FIXME the above copy could be merged with that pass
1550  for (Index j = 0; j < otherCopy.outerSize(); ++j)
1551  for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it) ++dest.m_outerIndex[it.index()];
1552 
1553  // prefix sum
1554  StorageIndex count = 0;
1555  IndexVector positions(dest.outerSize());
1556  for (Index j = 0; j < dest.outerSize(); ++j) {
1557  StorageIndex tmp = dest.m_outerIndex[j];
1558  dest.m_outerIndex[j] = count;
1559  positions[j] = count;
1560  count += tmp;
1561  }
1562  dest.m_outerIndex[dest.outerSize()] = count;
1563  // alloc
1564  dest.m_data.resize(count);
1565  // pass 2
1566  for (StorageIndex j = 0; j < otherCopy.outerSize(); ++j) {
1567  for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it) {
1568  Index pos = positions[it.index()]++;
1569  dest.m_data.index(pos) = j;
1570  dest.m_data.value(pos) = it.value();
1571  }
1572  }
1573  this->swap(dest);
1574  return *this;
1575  } else {
1576  if (other.isRValue()) {
1577  initAssignment(other.derived());
1578  }
1579  // there is no special optimization
1580  return Base::operator=(other.derived());
1581  }
1582 }
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:96
EIGEN_DEVICE_FUNC constexpr EIGEN_STRONG_INLINE void resize(Index rows, Index cols)
Definition: PlainObjectBase.h:294
Base::IndexVector IndexVector
Definition: SparseMatrix.h:145
type
Definition: compute_granudrum_aor.py:141
#define EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
Definition: sparse_permutations.cpp:11
std::conditional_t< Evaluate, PlainObject, typename ref_selector< T >::type > type
Definition: XprHelper.h:549

References Eigen::SparseMatrixBase< Derived >::cols(), Eigen::SparseMatrixBase< Derived >::derived(), EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN, EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN, EIGEN_STATIC_ASSERT, Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::index(), Eigen::SparseMatrixBase< Derived >::isRValue(), j, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_data, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerIndex, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::outerSize(), Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::resize(), Eigen::RowMajorBit, Eigen::SparseMatrixBase< Derived >::rows(), swap(), tmp, compute_granudrum_aor::type, and Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::value().

◆ operator=() [7/8]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename OtherDerived >
SparseMatrix& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::operator= ( SparseCompressedBase< OtherDerived > &&  other)
inline
892  {
893  *this = other.derived().markAsRValue();
894  return *this;
895  }

References Eigen::SparseMatrixBase< Derived >::derived().

◆ operator=() [8/8]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
SparseMatrix& Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::operator= ( SparseMatrix< Scalar_, Options_, StorageIndex_ > &&  other)
inline
873  {
874  this->swap(other);
875  return *this;
876  }

References Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::swap().

◆ outerIndexPtr() [1/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
StorageIndex* Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::outerIndexPtr ( )
inline
Returns
a non-const pointer to the array of the starting positions of the inner vectors. This function is aimed at interoperability with other libraries.
See also
valuePtr(), innerIndexPtr()
193 { return m_outerIndex; }

References Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerIndex.

◆ outerIndexPtr() [2/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
const StorageIndex* Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::outerIndexPtr ( ) const
inline

◆ outerSize()

◆ prune() [1/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename KeepFunc >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::prune ( const KeepFunc &  keep = KeepFunc())
inline

Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate keep. The functor type KeepFunc must implement the following function:

bool operator() (const Index& row, const Index& col, const Scalar& value) const;
See also
prune(Scalar,RealScalar)
646  {
647  StorageIndex k = 0;
648  for (Index j = 0; j < m_outerSize; ++j) {
649  StorageIndex previousStart = m_outerIndex[j];
650  if (isCompressed())
651  m_outerIndex[j] = k;
652  else
653  k = m_outerIndex[j];
654  StorageIndex end = isCompressed() ? m_outerIndex[j + 1] : previousStart + m_innerNonZeros[j];
655  for (StorageIndex i = previousStart; i < end; ++i) {
658  bool keepEntry = keep(row, col, m_data.value(i));
659  if (keepEntry) {
660  m_data.value(k) = m_data.value(i);
661  m_data.index(k) = m_data.index(i);
662  ++k;
663  } else if (!isCompressed())
664  m_innerNonZeros[j]--;
665  }
666  }
667  if (isCompressed()) {
669  m_data.resize(k, 0);
670  }
671  }

References col(), Eigen::placeholders::end, i, Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::index(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::isCompressed(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::IsRowMajor, j, k, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_data, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_innerNonZeros, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerIndex, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerSize, Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::resize(), row(), and Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::value().

◆ prune() [2/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::prune ( const Scalar reference,
const RealScalar epsilon = NumTraits<RealScalar>::dummy_precision() 
)
inline

Suppresses all nonzeros which are much smaller than reference under the tolerance epsilon

634  {
635  prune(default_prunning_func(reference, epsilon));
636  }
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Definition: SparseMatrix.h:634

References oomph::SarahBL::epsilon.

◆ removeOuterVectors()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::removeOuterVectors ( Index  j,
Index  num = 1 
)
inline
476  {
477  eigen_assert(num >= 0 && j >= 0 && j + num <= m_outerSize && "Invalid parameters");
478 
479  const Index newRows = IsRowMajor ? m_outerSize - num : rows();
480  const Index newCols = IsRowMajor ? cols() : m_outerSize - num;
481 
482  const Index begin = j + num;
483  const Index end = m_outerSize;
484  const Index target = j;
485 
486  // if the removed vectors are not empty, uncompress the matrix
487  if (m_outerIndex[j + num] > m_outerIndex[j]) uncompress();
488 
489  // shift m_outerIndex and m_innerNonZeros [num] to the left
491  if (!isCompressed())
493 
494  // if m_outerIndex[0] > 0, shift the data within the first vector while it is easy to do so
495  if (m_outerIndex[0] > StorageIndex(0)) {
496  uncompress();
497  const Index from = internal::convert_index<Index>(m_outerIndex[0]);
498  const Index to = Index(0);
499  const Index chunkSize = internal::convert_index<Index>(m_innerNonZeros[0]);
500  m_data.moveChunk(from, to, chunkSize);
501  m_outerIndex[0] = StorageIndex(0);
502  }
503 
504  // truncate the matrix to the smaller size
505  conservativeResize(newRows, newCols);
506  }

References Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::cols(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::conservativeResize(), eigen_assert, Eigen::placeholders::end, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::isCompressed(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::IsRowMajor, j, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_data, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_innerNonZeros, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerIndex, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerSize, Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::moveChunk(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::rows(), Eigen::internal::smart_memmove(), and Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::uncompress().

◆ reserve() [1/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<class SizesType >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::reserve ( const SizesType &  reserveSizes,
const typename SizesType::value_type &  enableif = typename SizesType::value_type() 
)
inline
338  {
339  EIGEN_UNUSED_VARIABLE(enableif);
340  reserveInnerVectors(reserveSizes);
341  }
#define EIGEN_UNUSED_VARIABLE(var)
Definition: Macros.h:966

References EIGEN_UNUSED_VARIABLE, and Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::reserveInnerVectors().

◆ reserve() [2/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::reserve ( Index  reserveSize)
inline

◆ reserveInnerVectors()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<class SizesType >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::reserveInnerVectors ( const SizesType &  reserveSizes)
inlineprotected
345  {
346  if (isCompressed()) {
347  Index totalReserveSize = 0;
348  for (Index j = 0; j < m_outerSize; ++j) totalReserveSize += internal::convert_index<Index>(reserveSizes[j]);
349 
350  // if reserveSizes is empty, don't do anything!
351  if (totalReserveSize == 0) return;
352 
353  // turn the matrix into non-compressed mode
354  m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize);
355 
356  // temporarily use m_innerSizes to hold the new starting points.
357  StorageIndex* newOuterIndex = m_innerNonZeros;
358 
359  Index count = 0;
360  for (Index j = 0; j < m_outerSize; ++j) {
361  newOuterIndex[j] = internal::convert_index<StorageIndex>(count);
362  Index reserveSize = internal::convert_index<Index>(reserveSizes[j]);
363  count += reserveSize + internal::convert_index<Index>(m_outerIndex[j + 1] - m_outerIndex[j]);
364  }
365 
366  m_data.reserve(totalReserveSize);
367  StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
368  for (Index j = m_outerSize - 1; j >= 0; --j) {
369  StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
370  StorageIndex begin = m_outerIndex[j];
371  StorageIndex end = begin + innerNNZ;
372  StorageIndex target = newOuterIndex[j];
374  internal::smart_memmove(valuePtr() + begin, valuePtr() + end, valuePtr() + target);
375  previousOuterIndex = m_outerIndex[j];
376  m_outerIndex[j] = newOuterIndex[j];
377  m_innerNonZeros[j] = innerNNZ;
378  }
379  if (m_outerSize > 0)
381  internal::convert_index<StorageIndex>(reserveSizes[m_outerSize - 1]);
382 
384  } else {
385  StorageIndex* newOuterIndex = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize + 1);
386 
387  Index count = 0;
388  for (Index j = 0; j < m_outerSize; ++j) {
389  newOuterIndex[j] = internal::convert_index<StorageIndex>(count);
390  Index alreadyReserved =
391  internal::convert_index<Index>(m_outerIndex[j + 1] - m_outerIndex[j] - m_innerNonZeros[j]);
392  Index reserveSize = internal::convert_index<Index>(reserveSizes[j]);
393  Index toReserve = numext::maxi(reserveSize, alreadyReserved);
394  count += toReserve + internal::convert_index<Index>(m_innerNonZeros[j]);
395  }
396  newOuterIndex[m_outerSize] = internal::convert_index<StorageIndex>(count);
397 
398  m_data.resize(count);
399  for (Index j = m_outerSize - 1; j >= 0; --j) {
400  StorageIndex innerNNZ = m_innerNonZeros[j];
401  StorageIndex begin = m_outerIndex[j];
402  StorageIndex target = newOuterIndex[j];
403  m_data.moveChunk(begin, target, innerNNZ);
404  }
405 
406  std::swap(m_outerIndex, newOuterIndex);
407  internal::conditional_aligned_delete_auto<StorageIndex, true>(newOuterIndex, m_outerSize + 1);
408  }
409  }
EIGEN_BLAS_FUNC() swap(int *n, RealScalar *px, int *incx, RealScalar *py, int *incy)
Definition: level1_impl.h:117

References Eigen::placeholders::end, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::innerIndexPtr(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::isCompressed(), j, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_data, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_innerNonZeros, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerIndex, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerSize, Eigen::numext::maxi(), Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::moveChunk(), Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::reserve(), Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::resize(), Eigen::internal::smart_memmove(), swap(), and Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::valuePtr().

Referenced by Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::reserve().

◆ resize()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::resize ( Index  rows,
Index  cols 
)
inline

Resizes the matrix to a rows x cols matrix and initializes it to zero.

This function does not free the currently allocated memory. To release as much as memory as possible, call

constexpr Storage & data()
Definition: SparseMatrix.h:205

after resizing it.

See also
reserve(), setZero()
734  {
735  const Index outerSize = IsRowMajor ? rows : cols;
737  m_data.clear();
738 
739  if ((m_outerIndex == 0) || (m_outerSize != outerSize)) {
740  m_outerIndex = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(m_outerIndex, outerSize + 1,
741  m_outerSize + 1);
743  }
744 
745  internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
746  m_innerNonZeros = 0;
747 
748  using std::fill_n;
749  fill_n(m_outerIndex, m_outerSize + 1, StorageIndex(0));
750  }
void clear()
Definition: CompressedStorage.h:96

References Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::clear(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::cols(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::IsRowMajor, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_data, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_innerNonZeros, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_innerSize, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerIndex, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerSize, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::outerSize(), and Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::rows().

Referenced by Eigen::IncompleteCholesky< Scalar, UpLo_, OrderingType_ >::analyzePattern(), cholesky_faillure_cases(), Eigen::IterScaling< MatrixType_ >::compute(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::conservativeResize(), Eigen::PardisoLLT< MatrixType, UpLo_ >::getMatrix(), Eigen::PardisoLDLT< MatrixType, Options >::getMatrix(), Eigen::loadMarket(), Eigen::loadMarketDense(), Eigen::internal::permute_symm_to_symm(), Eigen::internal::set_from_triplets_sorted(), and Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::SparseMatrix().

◆ resizeNonZeros()

◆ rows()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Index Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::rows ( ) const
inline
Returns
the number of rows of the matrix
159 { return IsRowMajor ? m_outerSize : m_innerSize; }

References Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::IsRowMajor, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_innerSize, and Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerSize.

Referenced by gdb.printers._MatrixEntryIterator::__next__(), Eigen::SimplicialCholeskyBase< Derived >::_solve_impl(), Eigen::PastixBase< Derived >::analyzePattern(), Eigen::IncompleteCholesky< Scalar, UpLo_, OrderingType_ >::analyzePattern(), Eigen::SparseQR< MatrixType_, OrderingType_ >::analyzePattern(), benchBasic(), Browse_Matrices(), Eigen::internal::c_to_fortran_numbering(), checkOptimalTraversal_impl(), gdb.printers.EigenMatrixPrinter::children(), gdb.printers.EigenSparseMatrixPrinter::children(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::coeff(), Eigen::internal::coletree(), Eigen::PastixBase< Derived >::compute(), Eigen::IterScaling< MatrixType_ >::compute(), Eigen::SPQR< MatrixType_ >::compute(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::conservativeResize(), sparseGaussianTest< Scalar >::df(), Eigen::DGMRES< MatrixType_, Preconditioner_ >::dgmres(), Eigen::DGMRES< MatrixType_, Preconditioner_ >::dgmresCycle(), dostuff(), eiToGmm(), eiToUblas(), Eigen::PastixBase< Derived >::factorize(), Eigen::SparseQR< MatrixType_, OrderingType_ >::factorize(), Eigen::IncompleteCholesky< Scalar, UpLo_, OrderingType_ >::factorize(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::findOrInsertCoeff(), Eigen::internal::fortran_to_c_numbering(), Eigen::internal::gmres(), Eigen::internal::householder_qr_inplace_unblocked(), Eigen::internal::householder_qr_inplace_update(), initSPD(), Eigen::internal::insert_from_triplets(), Eigen::internal::insert_from_triplets_sorted(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertEmptyOuterVectors(), Eigen::internal::least_square_conjugate_gradient(), Eigen::internal::llt_rank_update_lower(), main(), Eigen::SluMatrix::Map(), Eigen::COLAMDOrdering< StorageIndex >::operator()(), Eigen::internal::permute_symm_to_fullsymm(), Eigen::internal::permute_symm_to_symm(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::removeOuterVectors(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::resize(), MatrixReplacement::rows(), Eigen::SimplicialCholeskyBase< Derived >::rows(), Eigen::SparseLU< MatrixType_, OrderingType_ >::rows(), Eigen::SparseQR< MatrixType_, OrderingType_ >::rows(), Eigen::SuperLUBase< MatrixType_, Derived >::rows(), Eigen::IncompleteLU< Scalar_ >::rows(), Eigen::IncompleteCholesky< Scalar, UpLo_, OrderingType_ >::rows(), Eigen::IncompleteLUT< Scalar_, StorageIndex_ >::rows(), Eigen::internal::visitor_impl< Visitor, Derived, Dynamic, false, false, ShortCircuitEvaluation >::run(), Eigen::internal::visitor_impl< Visitor, Derived, Dynamic, true, false, ShortCircuitEvaluation >::run(), Eigen::internal::permutation_matrix_product< ExpressionType, Side, Transposed, DenseShape >::run(), Eigen::internal::householder_qr_inplace_blocked< MatrixQR, HCoeffs, MatrixQRScalar, InnerStrideIsOne >::run(), Eigen::internal::lapacke_helpers::lapacke_hqr< MatrixQR, HCoeffs >::run(), Eigen::internal::tridiagonalization_inplace_selector< MatrixType, Size, IsComplex >::run(), Eigen::SluMatrixMapHelper< Matrix< Scalar, Rows, Cols, Options, MRows, MCols > >::run(), Eigen::SluMatrixMapHelper< SparseMatrixBase< Derived > >::run(), Eigen::internal::direct_selfadjoint_eigenvalues< SolverType, 3, false >::run(), Eigen::internal::direct_selfadjoint_eigenvalues< SolverType, 2, false >::run(), Eigen::saveMarket(), Eigen::saveMarketDense(), Eigen::internal::set_from_triplets(), Eigen::internal::set_from_triplets_sorted(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::SparseMatrix(), gdb.printers.EigenMatrixPrinter::to_string(), gdb.printers.EigenSparseMatrixPrinter::to_string(), Eigen::internal::tridiagonalization_inplace(), Eigen::internal::llt_inplace< Scalar, Lower >::unblocked(), Eigen::internal::ldlt_inplace< Lower >::unblocked(), Eigen::internal::ldlt_inplace< Lower >::updateInPlace(), Eigen::internal::upperbidiagonalization_inplace_unblocked(), use_n_times(), and Eigen::viewAsCholmod().

◆ setFromSortedTriplets() [1/2]

template<typename Scalar , int Options_, typename StorageIndex_ >
template<typename InputIterators >
void Eigen::SparseMatrix< Scalar, Options_, StorageIndex_ >::setFromSortedTriplets ( const InputIterators &  begin,
const InputIterators &  end 
)

The same as setFromTriplets but triplets are assumed to be pre-sorted. This is faster and requires less temporary storage. Two triplets a and b are appropriately ordered if:

ColMajor: ((a.col() != b.col()) ? (a.col() <
b.col()) : (a.row() < b.row()) RowMajor: ((a.row() != b.row()) ? (a.row() < b.row()) : (a.col() < b.col())
1358  {
1359  internal::set_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1360  begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1361 }

References Eigen::placeholders::end.

◆ setFromSortedTriplets() [2/2]

template<typename Scalar , int Options_, typename StorageIndex_ >
template<typename InputIterators , typename DupFunctor >
void Eigen::SparseMatrix< Scalar, Options_, StorageIndex_ >::setFromSortedTriplets ( const InputIterators &  begin,
const InputIterators &  end,
DupFunctor  dup_func 
)

The same as setFromSortedTriplets but when duplicates are met the functor dup_func is applied:

value = dup_func(OldValue, NewValue)

Here is a C++11 example keeping the latest entry only:

mat.setFromSortedTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
void setFromSortedTriplets(const InputIterators &begin, const InputIterators &end)
Definition: SparseMatrix.h:1357
1376  {
1377  internal::set_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1378  begin, end, *this, dup_func);
1379 }

References Eigen::placeholders::end.

◆ setFromTriplets() [1/2]

template<typename Scalar , int Options_, typename StorageIndex_ >
template<typename InputIterators >
void Eigen::SparseMatrix< Scalar, Options_, StorageIndex_ >::setFromTriplets ( const InputIterators &  begin,
const InputIterators &  end 
)

Fill the matrix *this with the list of triplets defined in the half-open range from begin to end.

A triplet is a tuple (i,j,value) defining a non-zero element. The input list of triplets does not have to be sorted, and may contain duplicated elements. In any case, the result is a sorted and compressed sparse matrix where the duplicates have been summed up. This is a O(n) operation, with n the number of triplet elements. The initial contents of *this are destroyed. The matrix *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor, or the resize(Index,Index) method. The sizes are not extracted from the triplet list.

The InputIterators value_type must provide the following interface:

Scalar value() const; // the value
IndexType row() const; // the row index i
IndexType col() const; // the column index j

See for instance the Eigen::Triplet template class.

Here is a typical usage example:

typedef Triplet<double> T;
std::vector<T> tripletList;
tripletList.reserve(estimation_of_entries);
for(...)
{
// ...
tripletList.push_back(T(i,j,v_ij));
}
SparseMatrixType m(rows,cols);
m.setFromTriplets(tripletList.begin(), tripletList.end());
// m is ready to go!
Warning
The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather be explicitly stored into a std::vector for instance.
1329  {
1330  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1331  begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1332 }

References Eigen::placeholders::end.

Referenced by Eigen::loadMarket().

◆ setFromTriplets() [2/2]

template<typename Scalar , int Options_, typename StorageIndex_ >
template<typename InputIterators , typename DupFunctor >
void Eigen::SparseMatrix< Scalar, Options_, StorageIndex_ >::setFromTriplets ( const InputIterators &  begin,
const InputIterators &  end,
DupFunctor  dup_func 
)

The same as setFromTriplets but when duplicates are met the functor dup_func is applied:

value = dup_func(OldValue, NewValue)

Here is a C++11 example keeping the latest entry only:

mat.setFromTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition: SparseMatrix.h:1328
1346  {
1347  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1348  begin, end, *this, dup_func);
1349 }

References Eigen::placeholders::end.

◆ setIdentity()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::setIdentity ( )
inline

◆ setZero()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::setZero ( )
inline

◆ startVec()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::startVec ( Index  outer)
inline
See also
insertBack, insertBackByOuterInner
451  {
452  eigen_assert(m_outerIndex[outer] == Index(m_data.size()) &&
453  "You must call startVec for each inner vector sequentially");
454  eigen_assert(m_outerIndex[outer + 1] == 0 && "You must call startVec for each inner vector sequentially");
455  m_outerIndex[outer + 1] = m_outerIndex[outer];
456  }

References eigen_assert, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_data, Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerIndex, and Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::size().

◆ sum()

template<typename Scalar_ , int Options_, typename Index_ >
internal::traits< SparseMatrix< Scalar_, Options_, Index_ > >::Scalar Eigen::SparseMatrix< Scalar_, Options_, Index_ >::sum

Overloaded for performance

30  {
31  eigen_assert(rows() > 0 && cols() > 0 && "you are using a non initialized matrix");
32  if (this->isCompressed())
34  else
35  return Base::sum();
36 }
static ConstMapType Map(const Scalar *data)
Definition: PlainObjectBase.h:595
Scalar sum() const
Definition: SparseRedux.h:19
const Scalar * valuePtr() const
Definition: CompressedStorage.h:98

References cols, eigen_assert, Eigen::PlainObjectBase< Matrix< Scalar_, Rows_, Cols_, Options_, MaxRows_, MaxCols_ > >::Map(), and rows.

Referenced by main().

◆ swap()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::swap ( SparseMatrix< Scalar_, Options_, StorageIndex_ > &  other)
inline

◆ uncompress()

template<typename Scalar_ , int Options_, typename StorageIndex_ >
void Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::uncompress ( )
inline

◆ valuePtr() [1/2]

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Scalar* Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::valuePtr ( )
inline
Returns
a non-const pointer to the array of values. This function is aimed at interoperability with other libraries.
See also
innerIndexPtr(), outerIndexPtr()
175 { return m_data.valuePtr(); }

References Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_data, and Eigen::internal::CompressedStorage< Scalar_, StorageIndex_ >::valuePtr().

◆ valuePtr() [2/2]

Friends And Related Function Documentation

◆ internal::Assignment

template<typename Scalar_ , int Options_, typename StorageIndex_ >
template<typename , typename , typename , typename , typename >
friend struct internal::Assignment
friend

◆ operator<<

template<typename Scalar_ , int Options_, typename StorageIndex_ >
std::ostream& operator<< ( std::ostream &  s,
const SparseMatrix< Scalar_, Options_, StorageIndex_ > &  m 
)
friend
898  {
900  s << "Nonzero entries:\n"; if (m.isCompressed()) {
901  for (Index i = 0; i < m.nonZeros(); ++i) s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
902  } else {
903  for (Index i = 0; i < m.outerSize(); ++i) {
904  Index p = m.m_outerIndex[i];
905  Index pe = m.m_outerIndex[i] + m.m_innerNonZeros[i];
906  Index k = p;
907  for (; k < pe; ++k) {
908  s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
909  }
910  for (; k < m.m_outerIndex[i + 1]; ++k) {
911  s << "(_,_) ";
912  }
913  }
914  } s << std::endl;
915  s << std::endl; s << "Outer pointers:\n";
916  for (Index i = 0; i < m.outerSize(); ++i) { s << m.m_outerIndex[i] << " "; } s << " $" << std::endl;
917  if (!m.isCompressed()) {
918  s << "Inner non zeros:\n";
919  for (Index i = 0; i < m.outerSize(); ++i) {
920  s << m.m_innerNonZeros[i] << " ";
921  }
922  s << " $" << std::endl;
923  } s
924  << std::endl;);
925  s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
926  return s;
927  }
#define EIGEN_DBG_SPARSE(X)
Definition: SparseUtil.h:21
RealScalar s
Definition: level1_cplx_impl.h:130

◆ SparseVector< Scalar_, 0, StorageIndex_ >

template<typename Scalar_ , int Options_, typename StorageIndex_ >
friend class SparseVector< Scalar_, 0, StorageIndex_ >
friend

◆ swap

template<typename Scalar_ , int Options_, typename StorageIndex_ >
EIGEN_DEVICE_FUNC void swap ( SparseMatrix< Scalar_, Options_, StorageIndex_ > &  a,
SparseMatrix< Scalar_, Options_, StorageIndex_ > &  b 
)
friend

Free-function swap.

838 { a.swap(b); }

Member Data Documentation

◆ m_data

template<typename Scalar_ , int Options_, typename StorageIndex_ >
Storage Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_data
protected

Referenced by Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::coeff(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::conservativeResize(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::data(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::finalize(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::findOrInsertCoeff(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::innerIndexPtr(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertBackByOuterInner(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertBackByOuterInnerUnordered(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertByOuterInner(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::makeCompressed(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::operator=(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::prune(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::removeOuterVectors(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::reserve(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::reserveInnerVectors(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::resize(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::resizeNonZeros(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::setIdentity(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::setZero(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::startVec(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::swap(), Eigen::SparseVector< Scalar_, Options_, StorageIndex_ >::swap(), and Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::valuePtr().

◆ m_innerNonZeros

◆ m_innerSize

◆ m_outerIndex

template<typename Scalar_ , int Options_, typename StorageIndex_ >
StorageIndex* Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::m_outerIndex
protected

Referenced by Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::coeff(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::conservativeResize(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::finalize(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::findOrInsertCoeff(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertBackByOuterInner(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertBackByOuterInnerUnordered(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertByOuterInner(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::insertEmptyOuterVectors(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::makeCompressed(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::operator=(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::outerIndexPtr(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::prune(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::removeOuterVectors(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::reserveInnerVectors(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::resize(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::setIdentity(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::setZero(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::startVec(), Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::swap(), and Eigen::SparseMatrix< Scalar_, Options_, StorageIndex_ >::uncompress().

◆ m_outerSize


The documentation for this class was generated from the following files: