Prusa Slicer 2.6.0
Loading...
Searching...
No Matches
Eigen::SparseLU< _MatrixType, _OrderingType > Class Template Reference

Sparse supernodal LU factorization for general matrices. More...

#include <src/eigen/Eigen/src/SparseLU/SparseLU.h>

+ Inheritance diagram for Eigen::SparseLU< _MatrixType, _OrderingType >:
+ Collaboration diagram for Eigen::SparseLU< _MatrixType, _OrderingType >:

Public Types

enum  { ColsAtCompileTime = MatrixType::ColsAtCompileTime , MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }
 
typedef _MatrixType MatrixType
 
typedef _OrderingType OrderingType
 
typedef MatrixType::Scalar Scalar
 
typedef MatrixType::RealScalar RealScalar
 
typedef MatrixType::StorageIndex StorageIndex
 
typedef SparseMatrix< Scalar, ColMajor, StorageIndexNCMatrix
 
typedef internal::MappedSuperNodalMatrix< Scalar, StorageIndexSCMatrix
 
typedef Matrix< Scalar, Dynamic, 1 > ScalarVector
 
typedef Matrix< StorageIndex, Dynamic, 1 > IndexVector
 
typedef PermutationMatrix< Dynamic, Dynamic, StorageIndexPermutationType
 
typedef internal::SparseLUImpl< Scalar, StorageIndexBase
 
typedef Matrix< _MatrixType::Scalar, Dynamic, Dynamic, ColMajorScalarMatrix
 
typedef Map< ScalarMatrix, 0, OuterStride<> > MappedMatrixBlock
 
typedef Ref< Matrix< _MatrixType::Scalar, Dynamic, 1 > > BlockScalarVector
 
typedef Ref< Matrix< _MatrixType::StorageIndex, Dynamic, 1 > > BlockIndexVector
 
typedef LU_GlobalLU_t< IndexVector, ScalarVectorGlobalLU_t
 

Public Member Functions

 SparseLU ()
 
 SparseLU (const MatrixType &matrix)
 
 ~SparseLU ()
 
void analyzePattern (const MatrixType &matrix)
 
void factorize (const MatrixType &matrix)
 
void simplicialfactorize (const MatrixType &matrix)
 
void compute (const MatrixType &matrix)
 
Index rows () const
 
Index cols () const
 
void isSymmetric (bool sym)
 
SparseLUMatrixLReturnType< SCMatrixmatrixL () const
 
SparseLUMatrixUReturnType< SCMatrix, MappedSparseMatrix< Scalar, ColMajor, StorageIndex > > matrixU () const
 
const PermutationTyperowsPermutation () const
 
const PermutationTypecolsPermutation () const
 
void setPivotThreshold (const RealScalar &thresh)
 
ComputationInfo info () const
 Reports whether previous computation was successful.
 
std::string lastErrorMessage () const
 
template<typename Rhs , typename Dest >
bool _solve_impl (const MatrixBase< Rhs > &B, MatrixBase< Dest > &X_base) const
 
Scalar absDeterminant ()
 
Scalar logAbsDeterminant () const
 
Scalar signDeterminant ()
 
Scalar determinant ()
 
SparseLU< _MatrixType, _OrderingType > & derived ()
 
const SparseLU< _MatrixType, _OrderingType > & derived () const
 
const Solve< SparseLU< _MatrixType, _OrderingType >, Rhs > solve (const MatrixBase< Rhs > &b) const
 
const Solve< SparseLU< _MatrixType, _OrderingType >, Rhs > solve (const SparseMatrixBase< Rhs > &b) const
 
void _solve_impl (const SparseMatrixBase< Rhs > &b, SparseMatrixBase< Dest > &dest) const
 

Protected Types

typedef SparseSolverBase< SparseLU< _MatrixType, _OrderingType > > APIBase
 

Protected Member Functions

void initperfvalues ()
 
Index expand (VectorType &vec, Index &length, Index nbElts, Index keep_prev, Index &num_expansions)
 
Index memInit (Index m, Index n, Index annz, Index lwork, Index fillratio, Index panel_size, GlobalLU_t &glu)
 Allocate various working space for the numerical factorization phase.
 
Index memXpand (VectorType &vec, Index &maxlen, Index nbElts, MemType memtype, Index &num_expansions)
 Expand the existing storage.
 
void heap_relax_snode (const Index n, IndexVector &et, const Index relax_columns, IndexVector &descendants, IndexVector &relax_end)
 Identify the initial relaxed supernodes.
 
void relax_snode (const Index n, IndexVector &et, const Index relax_columns, IndexVector &descendants, IndexVector &relax_end)
 Identify the initial relaxed supernodes.
 
Index snode_dfs (const Index jcol, const Index kcol, const MatrixType &mat, IndexVector &xprune, IndexVector &marker, GlobalLU_t &glu)
 
Index snode_bmod (const Index jcol, const Index fsupc, ScalarVector &dense, GlobalLU_t &glu)
 
Index pivotL (const Index jcol, const RealScalar &diagpivotthresh, IndexVector &perm_r, IndexVector &iperm_c, Index &pivrow, GlobalLU_t &glu)
 Performs the numerical pivotin on the current column of L, and the CDIV operation.
 
void dfs_kernel (const _MatrixType::StorageIndex jj, IndexVector &perm_r, Index &nseg, IndexVector &panel_lsub, IndexVector &segrep, Ref< IndexVector > repfnz_col, IndexVector &xprune, Ref< IndexVector > marker, IndexVector &parent, IndexVector &xplore, GlobalLU_t &glu, Index &nextl_col, Index krow, Traits &traits)
 
void panel_dfs (const Index m, const Index w, const Index jcol, MatrixType &A, IndexVector &perm_r, Index &nseg, ScalarVector &dense, IndexVector &panel_lsub, IndexVector &segrep, IndexVector &repfnz, IndexVector &xprune, IndexVector &marker, IndexVector &parent, IndexVector &xplore, GlobalLU_t &glu)
 Performs a symbolic factorization on a panel of columns [jcol, jcol+w)
 
void panel_bmod (const Index m, const Index w, const Index jcol, const Index nseg, ScalarVector &dense, ScalarVector &tempv, IndexVector &segrep, IndexVector &repfnz, GlobalLU_t &glu)
 Performs numeric block updates (sup-panel) in topological order.
 
Index column_dfs (const Index m, const Index jcol, IndexVector &perm_r, Index maxsuper, Index &nseg, BlockIndexVector lsub_col, IndexVector &segrep, BlockIndexVector repfnz, IndexVector &xprune, IndexVector &marker, IndexVector &parent, IndexVector &xplore, GlobalLU_t &glu)
 Performs a symbolic factorization on column jcol and decide the supernode boundary.
 
Index column_bmod (const Index jcol, const Index nseg, BlockScalarVector dense, ScalarVector &tempv, BlockIndexVector segrep, BlockIndexVector repfnz, Index fpanelc, GlobalLU_t &glu)
 Performs numeric block updates (sup-col) in topological order.
 
Index copy_to_ucol (const Index jcol, const Index nseg, IndexVector &segrep, BlockIndexVector repfnz, IndexVector &perm_r, BlockScalarVector dense, GlobalLU_t &glu)
 Performs numeric block updates (sup-col) in topological order.
 
void pruneL (const Index jcol, const IndexVector &perm_r, const Index pivrow, const Index nseg, const IndexVector &segrep, BlockIndexVector repfnz, IndexVector &xprune, GlobalLU_t &glu)
 Prunes the L-structure.
 
void countnz (const Index n, Index &nnzL, Index &nnzU, GlobalLU_t &glu)
 Count Nonzero elements in the factors.
 
void fixupL (const Index n, const IndexVector &perm_r, GlobalLU_t &glu)
 Fix up the data storage lsub for L-subscripts.
 

Protected Attributes

ComputationInfo m_info
 
bool m_factorizationIsOk
 
bool m_analysisIsOk
 
std::string m_lastError
 
NCMatrix m_mat
 
SCMatrix m_Lstore
 
MappedSparseMatrix< Scalar, ColMajor, StorageIndexm_Ustore
 
PermutationType m_perm_c
 
PermutationType m_perm_r
 
IndexVector m_etree
 
Base::GlobalLU_t m_glu
 
bool m_symmetricmode
 
internal::perfvalues m_perfv
 
RealScalar m_diagpivotthresh
 
Index m_nnzL
 
Index m_nnzU
 
Index m_detPermR
 
Index m_detPermC
 
bool m_isInitialized
 

Private Member Functions

 SparseLU (const SparseLU &)
 

Detailed Description

template<typename _MatrixType, typename _OrderingType>
class Eigen::SparseLU< _MatrixType, _OrderingType >

Sparse supernodal LU factorization for general matrices.

This class implements the supernodal LU factorization for general matrices. It uses the main techniques from the sequential SuperLU package (http://crd-legacy.lbl.gov/~xiaoye/SuperLU/). It handles transparently real and complex arithmetics with single and double precision, depending on the scalar type of your input matrix. The code has been optimized to provide BLAS-3 operations during supernode-panel updates. It benefits directly from the built-in high-performant Eigen BLAS routines. Moreover, when the size of a supernode is very small, the BLAS calls are avoided to enable a better optimization from the compiler. For best performance, you should compile it with NDEBUG flag to avoid the numerous bounds checking on vectors.

An important parameter of this class is the ordering method. It is used to reorder the columns (and eventually the rows) of the matrix to reduce the number of new elements that are created during numerical factorization. The cheapest method available is COLAMD. See the OrderingMethods module for the list of built-in and external ordering methods.

Simple example with key steps

VectorXd x(n), b(n);
// fill A and b;
// Compute the ordering permutation vector from the structural pattern of A
solver.analyzePattern(A);
// Compute the numerical factorization
solver.factorize(A);
//Use the factors to solve the linear system
x = solver.solve(b);
Definition Ordering.h:119
Sparse supernodal LU factorization for general matrices.
Definition SparseLU.h:75
void factorize(const MatrixType &matrix)
Definition SparseLU.h:496
void analyzePattern(const MatrixType &matrix)
Definition SparseLU.h:411
A versatible sparse matrix representation.
Definition SparseMatrix.h:98
const Solve< Derived, Rhs > solve(const MatrixBase< Rhs > &b) const
Definition SparseSolverBase.h:88
Warning
The input matrix A should be in a compressed and column-major form. Otherwise an expensive copy will be made. You can call the inexpensive makeCompressed() to get a compressed matrix.
Note
Unlike the initial SuperLU implementation, there is no step to equilibrate the matrix. For badly scaled matrices, this step can be useful to reduce the pivoting during factorization. If this is the case for your matrices, you can try the basic scaling method at "unsupported/Eigen/src/IterativeSolvers/Scaling.h"
Template Parameters
_MatrixTypeThe type of the sparse matrix. It must be a column-major SparseMatrix<>
_OrderingTypeThe ordering method to use, either AMD, COLAMD or METIS. Default is COLMAD

\implsparsesolverconcept

See also
TutorialSparseSolverConcept
OrderingMethods_Module

Member Typedef Documentation

◆ APIBase

template<typename _MatrixType , typename _OrderingType >
typedef SparseSolverBase<SparseLU<_MatrixType,_OrderingType> > Eigen::SparseLU< _MatrixType, _OrderingType >::APIBase
protected

◆ Base

template<typename _MatrixType , typename _OrderingType >
typedef internal::SparseLUImpl<Scalar, StorageIndex> Eigen::SparseLU< _MatrixType, _OrderingType >::Base

◆ BlockIndexVector

typedef Ref<Matrix<_MatrixType::StorageIndex ,Dynamic,1> > Eigen::internal::SparseLUImpl< _MatrixType::Scalar , _MatrixType::StorageIndex >::BlockIndexVector
inherited

◆ BlockScalarVector

typedef Ref<Matrix<_MatrixType::Scalar ,Dynamic,1> > Eigen::internal::SparseLUImpl< _MatrixType::Scalar , _MatrixType::StorageIndex >::BlockScalarVector
inherited

◆ GlobalLU_t

typedef LU_GlobalLU_t<IndexVector, ScalarVector> Eigen::internal::SparseLUImpl< _MatrixType::Scalar , _MatrixType::StorageIndex >::GlobalLU_t
inherited

◆ IndexVector

template<typename _MatrixType , typename _OrderingType >
typedef Matrix<StorageIndex,Dynamic,1> Eigen::SparseLU< _MatrixType, _OrderingType >::IndexVector

◆ MappedMatrixBlock

typedef Map<ScalarMatrix, 0, OuterStride<> > Eigen::internal::SparseLUImpl< _MatrixType::Scalar , _MatrixType::StorageIndex >::MappedMatrixBlock
inherited

◆ MatrixType

template<typename _MatrixType , typename _OrderingType >
typedef _MatrixType Eigen::SparseLU< _MatrixType, _OrderingType >::MatrixType

◆ NCMatrix

template<typename _MatrixType , typename _OrderingType >
typedef SparseMatrix<Scalar,ColMajor,StorageIndex> Eigen::SparseLU< _MatrixType, _OrderingType >::NCMatrix

◆ OrderingType

template<typename _MatrixType , typename _OrderingType >
typedef _OrderingType Eigen::SparseLU< _MatrixType, _OrderingType >::OrderingType

◆ PermutationType

template<typename _MatrixType , typename _OrderingType >
typedef PermutationMatrix<Dynamic, Dynamic, StorageIndex> Eigen::SparseLU< _MatrixType, _OrderingType >::PermutationType

◆ RealScalar

template<typename _MatrixType , typename _OrderingType >
typedef MatrixType::RealScalar Eigen::SparseLU< _MatrixType, _OrderingType >::RealScalar

◆ Scalar

template<typename _MatrixType , typename _OrderingType >
typedef MatrixType::Scalar Eigen::SparseLU< _MatrixType, _OrderingType >::Scalar

◆ ScalarMatrix

typedef Matrix<_MatrixType::Scalar ,Dynamic,Dynamic,ColMajor> Eigen::internal::SparseLUImpl< _MatrixType::Scalar , _MatrixType::StorageIndex >::ScalarMatrix
inherited

◆ ScalarVector

template<typename _MatrixType , typename _OrderingType >
typedef Matrix<Scalar,Dynamic,1> Eigen::SparseLU< _MatrixType, _OrderingType >::ScalarVector

◆ SCMatrix

template<typename _MatrixType , typename _OrderingType >
typedef internal::MappedSuperNodalMatrix<Scalar, StorageIndex> Eigen::SparseLU< _MatrixType, _OrderingType >::SCMatrix

◆ StorageIndex

template<typename _MatrixType , typename _OrderingType >
typedef MatrixType::StorageIndex Eigen::SparseLU< _MatrixType, _OrderingType >::StorageIndex

Member Enumeration Documentation

◆ anonymous enum

template<typename _MatrixType , typename _OrderingType >
anonymous enum
Enumerator
ColsAtCompileTime 
MaxColsAtCompileTime 
94 {
95 ColsAtCompileTime = MatrixType::ColsAtCompileTime,
96 MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
97 };
@ ColsAtCompileTime
Definition SparseLU.h:95
@ MaxColsAtCompileTime
Definition SparseLU.h:96

Constructor & Destructor Documentation

◆ SparseLU() [1/3]

template<typename _MatrixType , typename _OrderingType >
Eigen::SparseLU< _MatrixType, _OrderingType >::SparseLU ( )
inline
100 :m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1)
101 {
103 }
void initperfvalues()
Definition SparseLU.h:360
MappedSparseMatrix< Scalar, ColMajor, StorageIndex > m_Ustore
Definition SparseLU.h:377
Index m_detPermR
Definition SparseLU.h:390
bool m_symmetricmode
Definition SparseLU.h:385
std::string m_lastError
Definition SparseLU.h:374
RealScalar m_diagpivotthresh
Definition SparseLU.h:388

References Eigen::SparseLU< _MatrixType, _OrderingType >::initperfvalues().

+ Here is the call graph for this function:

◆ SparseLU() [2/3]

template<typename _MatrixType , typename _OrderingType >
Eigen::SparseLU< _MatrixType, _OrderingType >::SparseLU ( const MatrixType matrix)
inlineexplicit
105 : m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1)
106 {
108 compute(matrix);
109 }
void compute(const MatrixType &matrix)
Definition SparseLU.h:124

References Eigen::SparseLU< _MatrixType, _OrderingType >::compute(), and Eigen::SparseLU< _MatrixType, _OrderingType >::initperfvalues().

+ Here is the call graph for this function:

◆ ~SparseLU()

template<typename _MatrixType , typename _OrderingType >
Eigen::SparseLU< _MatrixType, _OrderingType >::~SparseLU ( )
inline
112 {
113 // Free all explicit dynamic pointers
114 }

◆ SparseLU() [3/3]

template<typename _MatrixType , typename _OrderingType >
Eigen::SparseLU< _MatrixType, _OrderingType >::SparseLU ( const SparseLU< _MatrixType, _OrderingType > &  )
private

Member Function Documentation

◆ _solve_impl() [1/2]

template<typename _MatrixType , typename _OrderingType >
template<typename Rhs , typename Dest >
bool Eigen::SparseLU< _MatrixType, _OrderingType >::_solve_impl ( const MatrixBase< Rhs > &  B,
MatrixBase< Dest > &  X_base 
) const
inline
218 {
219 Dest& X(X_base.derived());
220 eigen_assert(m_factorizationIsOk && "The matrix should be factorized first");
221 EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,
222 THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
223
224 // Permute the right hand side to form X = Pr*B
225 // on return, X is overwritten by the computed solution
226 X.resize(B.rows(),B.cols());
227
228 // this ugly const_cast_derived() helps to detect aliasing when applying the permutations
229 for(Index j = 0; j < B.cols(); ++j)
230 X.col(j) = rowsPermutation() * B.const_cast_derived().col(j);
231
232 //Forward substitution with L
233 this->matrixL().solveInPlace(X);
234 this->matrixU().solveInPlace(X);
235
236 // Permute back the solution
237 for (Index j = 0; j < B.cols(); ++j)
238 X.col(j) = colsPermutation().inverse() * X.col(j);
239
240 return true;
241 }
#define eigen_assert(x)
Definition Macros.h:579
#define EIGEN_STATIC_ASSERT(CONDITION, MSG)
Definition StaticAssert.h:124
InverseReturnType inverse() const
Definition PermutationMatrix.h:196
const PermutationType & colsPermutation() const
Definition SparseLU.h:173
bool m_factorizationIsOk
Definition SparseLU.h:372
SparseLUMatrixLReturnType< SCMatrix > matrixL() const
Definition SparseLU.h:146
const PermutationType & rowsPermutation() const
Definition SparseLU.h:165
SparseLUMatrixUReturnType< SCMatrix, MappedSparseMatrix< Scalar, ColMajor, StorageIndex > > matrixU() const
Definition SparseLU.h:156
const unsigned int RowMajorBit
Definition Constants.h:61
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition Meta.h:33
@ X
Definition libslic3r.h:98
EIGEN_DEVICE_FUNC Derived & const_cast_derived() const
Definition EigenBase.h:51

References Eigen::SparseLU< _MatrixType, _OrderingType >::colsPermutation(), eigen_assert, EIGEN_STATIC_ASSERT, Eigen::PermutationBase< Derived >::inverse(), Eigen::SparseLU< _MatrixType, _OrderingType >::m_factorizationIsOk, Eigen::SparseLU< _MatrixType, _OrderingType >::matrixL(), Eigen::SparseLU< _MatrixType, _OrderingType >::matrixU(), Eigen::RowMajorBit, and Eigen::SparseLU< _MatrixType, _OrderingType >::rowsPermutation().

+ Here is the call graph for this function:

◆ _solve_impl() [2/2]

void Eigen::SparseSolverBase< SparseLU< _MatrixType, _OrderingType > >::_solve_impl ( const SparseMatrixBase< Rhs > &  b,
SparseMatrixBase< Dest > &  dest 
) const
inlineinherited
112 {
113 internal::solve_sparse_through_dense_panels(derived(), b.derived(), dest.derived());
114 }
SparseLU< _MatrixType, _OrderingType > & derived()
Definition SparseSolverBase.h:79

◆ absDeterminant()

template<typename _MatrixType , typename _OrderingType >
Scalar Eigen::SparseLU< _MatrixType, _OrderingType >::absDeterminant ( )
inline
Returns
the absolute value of the determinant of the matrix of which *this is the QR decomposition.
Warning
a determinant can be very big or small, so for matrices of large enough dimension, there is a risk of overflow/underflow. One way to work around that is to use logAbsDeterminant() instead.
See also
logAbsDeterminant(), signDeterminant()
254 {
255 using std::abs;
256 eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
257 // Initialize with the determinant of the row matrix
258 Scalar det = Scalar(1.);
259 // Note that the diagonal blocks of U are stored in supernodes,
260 // which are available in the L part :)
261 for (Index j = 0; j < this->cols(); ++j)
262 {
263 for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
264 {
265 if(it.index() == j)
266 {
267 det *= abs(it.value());
268 break;
269 }
270 }
271 }
272 return det;
273 }
MatrixType::Scalar Scalar
Definition SparseLU.h:84
SCMatrix m_Lstore
Definition SparseLU.h:376
Index cols() const
Definition SparseLU.h:133
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half abs(const half &a)
Definition Half.h:445

References Eigen::SparseLU< _MatrixType, _OrderingType >::cols(), eigen_assert, Eigen::SparseLU< _MatrixType, _OrderingType >::m_factorizationIsOk, and Eigen::SparseLU< _MatrixType, _OrderingType >::m_Lstore.

+ Here is the call graph for this function:

◆ analyzePattern()

template<typename MatrixType , typename OrderingType >
void Eigen::SparseLU< MatrixType, OrderingType >::analyzePattern ( const MatrixType mat)

Compute the column permutation to minimize the fill-in

  • Apply this permutation to the input matrix -
  • Compute the column elimination tree on the permuted matrix
  • Postorder the elimination tree and the column permutation
412{
413
414 //TODO It is possible as in SuperLU to compute row and columns scaling vectors to equilibrate the matrix mat.
415
416 // Firstly, copy the whole input matrix.
417 m_mat = mat;
418
419 // Compute fill-in ordering
420 OrderingType ord;
421 ord(m_mat,m_perm_c);
422
423 // Apply the permutation to the column of the input matrix
424 if (m_perm_c.size())
425 {
426 m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers. FIXME : This vector is filled but not subsequently used.
427 // Then, permute only the column pointers
428 ei_declare_aligned_stack_constructed_variable(StorageIndex,outerIndexPtr,mat.cols()+1,mat.isCompressed()?const_cast<StorageIndex*>(mat.outerIndexPtr()):0);
429
430 // If the input matrix 'mat' is uncompressed, then the outer-indices do not match the ones of m_mat, and a copy is thus needed.
431 if(!mat.isCompressed())
432 IndexVector::Map(outerIndexPtr, mat.cols()+1) = IndexVector::Map(m_mat.outerIndexPtr(),mat.cols()+1);
433
434 // Apply the permutation and compute the nnz per column.
435 for (Index i = 0; i < mat.cols(); i++)
436 {
437 m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i];
438 m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i];
439 }
440 }
441
442 // Compute the column elimination tree of the permuted matrix
443 IndexVector firstRowElt;
444 internal::coletree(m_mat, m_etree,firstRowElt);
445
446 // In symmetric mode, do not do postorder here
447 if (!m_symmetricmode) {
448 IndexVector post, iwork;
449 // Post order etree
451
452
453 // Renumber etree in postorder
454 Index m = m_mat.cols();
455 iwork.resize(m+1);
456 for (Index i = 0; i < m; ++i) iwork(post(i)) = post(m_etree(i));
457 m_etree = iwork;
458
459 // Postmultiply A*Pc by post, i.e reorder the matrix according to the postorder of the etree
460 PermutationType post_perm(m);
461 for (Index i = 0; i < m; i++)
462 post_perm.indices()(i) = post(i);
463
464 // Combine the two permutations : postorder the permutation for future use
465 if(m_perm_c.size()) {
466 m_perm_c = post_perm * m_perm_c;
467 }
468
469 } // end postordering
470
471 m_analysisIsOk = true;
472}
#define ei_declare_aligned_stack_constructed_variable(TYPE, NAME, SIZE, BUFFER)
Definition Memory.h:644
Index size() const
Definition PermutationMatrix.h:108
const IndicesType & indices() const
Definition PermutationMatrix.h:388
friend class Eigen::Map
Definition PlainObjectBase.h:121
NCMatrix m_mat
Definition SparseLU.h:375
PermutationType m_perm_c
Definition SparseLU.h:378
Matrix< StorageIndex, Dynamic, 1 > IndexVector
Definition SparseLU.h:90
_OrderingType OrderingType
Definition SparseLU.h:83
PermutationMatrix< Dynamic, Dynamic, StorageIndex > PermutationType
Definition SparseLU.h:91
MatrixType::StorageIndex StorageIndex
Definition SparseLU.h:86
IndexVector m_etree
Definition SparseLU.h:380
bool m_analysisIsOk
Definition SparseLU.h:373
const StorageIndex * innerNonZeroPtr() const
Definition SparseMatrix.h:175
void uncompress()
Definition SparseMatrix.h:495
const StorageIndex * outerIndexPtr() const
Definition SparseMatrix.h:166
Index cols() const
Definition SparseMatrix.h:138
int coletree(const MatrixType &mat, IndexVector &parent, IndexVector &firstRowElt, typename MatrixType::StorageIndex *perm=0)
Definition SparseColEtree.h:61
void treePostorder(typename IndexVector::Scalar n, IndexVector &parent, IndexVector &post)
Post order a tree.
Definition SparseColEtree.h:178

References Eigen::internal::coletree(), ei_declare_aligned_stack_constructed_variable, Eigen::PermutationMatrix< SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex >::indices(), Eigen::PlainObjectBase< Derived >::resize(), and Eigen::internal::treePostorder().

Referenced by Eigen::SparseLU< _MatrixType, _OrderingType >::compute().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ cols()

template<typename _MatrixType , typename _OrderingType >
Index Eigen::SparseLU< _MatrixType, _OrderingType >::cols ( ) const
inline
133{ return m_mat.cols(); }

References Eigen::SparseMatrix< _Scalar, _Options, _StorageIndex >::cols(), and Eigen::SparseLU< _MatrixType, _OrderingType >::m_mat.

Referenced by Eigen::SparseLU< _MatrixType, _OrderingType >::absDeterminant(), Eigen::SparseLU< _MatrixType, _OrderingType >::determinant(), Eigen::SparseLU< _MatrixType, _OrderingType >::logAbsDeterminant(), and Eigen::SparseLU< _MatrixType, _OrderingType >::signDeterminant().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ colsPermutation()

template<typename _MatrixType , typename _OrderingType >
const PermutationType & Eigen::SparseLU< _MatrixType, _OrderingType >::colsPermutation ( ) const
inline
Returns
a reference to the column matrix permutation $ P_c^T $ such that $P_r A P_c^T = L U$
See also
rowsPermutation()
174 {
175 return m_perm_c;
176 }

References Eigen::SparseLU< _MatrixType, _OrderingType >::m_perm_c.

Referenced by Eigen::SparseLU< _MatrixType, _OrderingType >::_solve_impl().

+ Here is the caller graph for this function:

◆ column_bmod()

Index SparseLUImpl< _MatrixType::Scalar , _MatrixType::StorageIndex >::column_bmod ( const Index  jcol,
const Index  nseg,
BlockScalarVector  dense,
ScalarVector tempv,
BlockIndexVector  segrep,
BlockIndexVector  repfnz,
Index  fpanelc,
GlobalLU_t glu 
)
protectedinherited

Performs numeric block updates (sup-col) in topological order.

Parameters
jcolcurrent column to update
nsegNumber of segments in the U part
denseStore the full representation of the column
tempvworking array
segrepsegment representative ...
repfnz??? First nonzero column in each row ??? ...
fpanelcFirst column in the current panel
gluGlobal LU data.
Returns
0 - successful return > 0 - number of bytes allocated when run out of space
55{
56 Index jsupno, k, ksub, krep, ksupno;
57 Index lptr, nrow, isub, irow, nextlu, new_next, ufirst;
58 Index fsupc, nsupc, nsupr, luptr, kfnz, no_zeros;
59 /* krep = representative of current k-th supernode
60 * fsupc = first supernodal column
61 * nsupc = number of columns in a supernode
62 * nsupr = number of rows in a supernode
63 * luptr = location of supernodal LU-block in storage
64 * kfnz = first nonz in the k-th supernodal segment
65 * no_zeros = no lf leading zeros in a supernodal U-segment
66 */
67
68 jsupno = glu.supno(jcol);
69 // For each nonzero supernode segment of U[*,j] in topological order
70 k = nseg - 1;
71 Index d_fsupc; // distance between the first column of the current panel and the
72 // first column of the current snode
73 Index fst_col; // First column within small LU update
74 Index segsize;
75 for (ksub = 0; ksub < nseg; ksub++)
76 {
77 krep = segrep(k); k--;
78 ksupno = glu.supno(krep);
79 if (jsupno != ksupno )
80 {
81 // outside the rectangular supernode
82 fsupc = glu.xsup(ksupno);
83 fst_col = (std::max)(fsupc, fpanelc);
84
85 // Distance from the current supernode to the current panel;
86 // d_fsupc = 0 if fsupc > fpanelc
87 d_fsupc = fst_col - fsupc;
88
89 luptr = glu.xlusup(fst_col) + d_fsupc;
90 lptr = glu.xlsub(fsupc) + d_fsupc;
91
92 kfnz = repfnz(krep);
93 kfnz = (std::max)(kfnz, fpanelc);
94
95 segsize = krep - kfnz + 1;
96 nsupc = krep - fst_col + 1;
97 nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc);
98 nrow = nsupr - d_fsupc - nsupc;
99 Index lda = glu.xlusup(fst_col+1) - glu.xlusup(fst_col);
100
101
102 // Perform a triangular solver and block update,
103 // then scatter the result of sup-col update to dense
104 no_zeros = kfnz - fst_col;
105 if(segsize==1)
106 LU_kernel_bmod<1>::run(segsize, dense, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);
107 else
108 LU_kernel_bmod<Dynamic>::run(segsize, dense, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);
109 } // end if jsupno
110 } // end for each segment
111
112 // Process the supernodal portion of L\U[*,j]
113 nextlu = glu.xlusup(jcol);
114 fsupc = glu.xsup(jsupno);
115
116 // copy the SPA dense into L\U[*,j]
117 Index mem;
118 new_next = nextlu + glu.xlsub(fsupc + 1) - glu.xlsub(fsupc);
119 Index offset = internal::first_multiple<Index>(new_next, internal::packet_traits<Scalar>::size) - new_next;
120 if(offset)
121 new_next += offset;
122 while (new_next > glu.nzlumax )
123 {
124 mem = memXpand<ScalarVector>(glu.lusup, glu.nzlumax, nextlu, LUSUP, glu.num_expansions);
125 if (mem) return mem;
126 }
127
128 for (isub = glu.xlsub(fsupc); isub < glu.xlsub(fsupc+1); isub++)
129 {
130 irow = glu.lsub(isub);
131 glu.lusup(nextlu) = dense(irow);
132 dense(irow) = Scalar(0.0);
133 ++nextlu;
134 }
135
136 if(offset)
137 {
138 glu.lusup.segment(nextlu,offset).setZero();
139 nextlu += offset;
140 }
141 glu.xlusup(jcol + 1) = StorageIndex(nextlu); // close L\U(*,jcol);
142
143 /* For more updates within the panel (also within the current supernode),
144 * should start from the first column of the panel, or the first column
145 * of the supernode, whichever is bigger. There are two cases:
146 * 1) fsupc < fpanelc, then fst_col <-- fpanelc
147 * 2) fsupc >= fpanelc, then fst_col <-- fsupc
148 */
149 fst_col = (std::max)(fsupc, fpanelc);
150
151 if (fst_col < jcol)
152 {
153 // Distance between the current supernode and the current panel
154 // d_fsupc = 0 if fsupc >= fpanelc
155 d_fsupc = fst_col - fsupc;
156
157 lptr = glu.xlsub(fsupc) + d_fsupc;
158 luptr = glu.xlusup(fst_col) + d_fsupc;
159 nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc); // leading dimension
160 nsupc = jcol - fst_col; // excluding jcol
161 nrow = nsupr - d_fsupc - nsupc;
162
163 // points to the beginning of jcol in snode L\U(jsupno)
164 ufirst = glu.xlusup(jcol) + d_fsupc;
165 Index lda = glu.xlusup(jcol+1) - glu.xlusup(jcol);
166 MappedMatrixBlock A( &(glu.lusup.data()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
167 VectorBlock<ScalarVector> u(glu.lusup, ufirst, nsupc);
168 u = A.template triangularView<UnitLower>().solve(u);
169
170 new (&A) MappedMatrixBlock ( &(glu.lusup.data()[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) );
171 VectorBlock<ScalarVector> l(glu.lusup, ufirst+nsupc, nrow);
172 l.noalias() -= A * u;
173
174 } // End if fst_col
175 return 0;
176}
Map< ScalarMatrix, 0, OuterStride<> > MappedMatrixBlock
Definition SparseLUImpl.h:26
typename Traits< remove_cvref_t< L > >::Scalar Scalar
Definition Line.hpp:36
void offset(Slic3r::ExPolygon &sh, coord_t distance, const PolygonTag &)
Definition geometries.hpp:132

◆ column_dfs()

Index SparseLUImpl< _MatrixType::Scalar , _MatrixType::StorageIndex >::column_dfs ( const Index  m,
const Index  jcol,
IndexVector perm_r,
Index  maxsuper,
Index nseg,
BlockIndexVector  lsub_col,
IndexVector segrep,
BlockIndexVector  repfnz,
IndexVector xprune,
IndexVector marker,
IndexVector parent,
IndexVector xplore,
GlobalLU_t glu 
)
protectedinherited

Performs a symbolic factorization on column jcol and decide the supernode boundary.

A supernode representative is the last column of a supernode. The nonzeros in U[*,j] are segments that end at supernodes representatives. The routine returns a list of the supernodal representatives in topological order of the dfs that generates them. The location of the first nonzero in each supernodal segment (supernodal entry location) is also returned.

Parameters
mnumber of rows in the matrix
jcolCurrent column
perm_rRow permutation
maxsuperMaximum number of column allowed in a supernode
[in,out]nsegNumber of segments in current U[*,j] - new segments appended
lsub_coldefines the rhs vector to start the dfs
[in,out]segrepSegment representatives - new segments appended
repfnzFirst nonzero location in each row
xprune
markermarker[i] == jj, if i was visited during dfs of current column jj;
parent
xploreworking array
gluglobal LU data
Returns
0 success > 0 number of bytes allocated when run out of space
96{
97
98 Index jsuper = glu.supno(jcol);
99 Index nextl = glu.xlsub(jcol);
100 VectorBlock<IndexVector> marker2(marker, 2*m, m);
101
102
103 column_dfs_traits<IndexVector, ScalarVector> traits(jcol, jsuper, glu, *this);
104
105 // For each nonzero in A(*,jcol) do dfs
106 for (Index k = 0; ((k < m) ? lsub_col[k] != emptyIdxLU : false) ; k++)
107 {
108 Index krow = lsub_col(k);
109 lsub_col(k) = emptyIdxLU;
110 Index kmark = marker2(krow);
111
112 // krow was visited before, go to the next nonz;
113 if (kmark == jcol) continue;
114
115 dfs_kernel(StorageIndex(jcol), perm_r, nseg, glu.lsub, segrep, repfnz, xprune, marker2, parent,
116 xplore, glu, nextl, krow, traits);
117 } // for each nonzero ...
118
119 Index fsupc;
120 StorageIndex nsuper = glu.supno(jcol);
121 StorageIndex jcolp1 = StorageIndex(jcol) + 1;
122 Index jcolm1 = jcol - 1;
123
124 // check to see if j belongs in the same supernode as j-1
125 if ( jcol == 0 )
126 { // Do nothing for column 0
127 nsuper = glu.supno(0) = 0 ;
128 }
129 else
130 {
131 fsupc = glu.xsup(nsuper);
132 StorageIndex jptr = glu.xlsub(jcol); // Not yet compressed
133 StorageIndex jm1ptr = glu.xlsub(jcolm1);
134
135 // Use supernodes of type T2 : see SuperLU paper
136 if ( (nextl-jptr != jptr-jm1ptr-1) ) jsuper = emptyIdxLU;
137
138 // Make sure the number of columns in a supernode doesn't
139 // exceed threshold
140 if ( (jcol - fsupc) >= maxsuper) jsuper = emptyIdxLU;
141
142 /* If jcol starts a new supernode, reclaim storage space in
143 * glu.lsub from previous supernode. Note we only store
144 * the subscript set of the first and last columns of
145 * a supernode. (first for num values, last for pruning)
146 */
147 if (jsuper == emptyIdxLU)
148 { // starts a new supernode
149 if ( (fsupc < jcolm1-1) )
150 { // >= 3 columns in nsuper
151 StorageIndex ito = glu.xlsub(fsupc+1);
152 glu.xlsub(jcolm1) = ito;
153 StorageIndex istop = ito + jptr - jm1ptr;
154 xprune(jcolm1) = istop; // intialize xprune(jcol-1)
155 glu.xlsub(jcol) = istop;
156
157 for (StorageIndex ifrom = jm1ptr; ifrom < nextl; ++ifrom, ++ito)
158 glu.lsub(ito) = glu.lsub(ifrom);
159 nextl = ito; // = istop + length(jcol)
160 }
161 nsuper++;
162 glu.supno(jcol) = nsuper;
163 } // if a new supernode
164 } // end else: jcol > 0
165
166 // Tidy up the pointers before exit
167 glu.xsup(nsuper+1) = jcolp1;
168 glu.supno(jcolp1) = nsuper;
169 xprune(jcol) = StorageIndex(nextl); // Intialize upper bound for pruning
170 glu.xlsub(jcolp1) = StorageIndex(nextl);
171
172 return 0;
173}
void dfs_kernel(const _MatrixType::StorageIndex jj, IndexVector &perm_r, Index &nseg, IndexVector &panel_lsub, IndexVector &segrep, Ref< IndexVector > repfnz_col, IndexVector &xprune, Ref< IndexVector > marker, IndexVector &parent, IndexVector &xplore, GlobalLU_t &glu, Index &nextl_col, Index krow, Traits &traits)
Definition SparseLU_panel_dfs.h:62
@ emptyIdxLU
Definition SparseLU_Memory.h:38

◆ compute()

template<typename _MatrixType , typename _OrderingType >
void Eigen::SparseLU< _MatrixType, _OrderingType >::compute ( const MatrixType matrix)
inline

Compute the symbolic and numeric factorization of the input sparse matrix. The input matrix should be in column-major storage.

125 {
126 // Analyze
127 analyzePattern(matrix);
128 //Factorize
129 factorize(matrix);
130 }

References Eigen::SparseLU< _MatrixType, _OrderingType >::analyzePattern(), and Eigen::SparseLU< _MatrixType, _OrderingType >::factorize().

Referenced by Eigen::SparseLU< _MatrixType, _OrderingType >::SparseLU(), and igl::copyleft::comiso::FrameInterpolator::interpolateSymmetric().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ copy_to_ucol()

Index SparseLUImpl< _MatrixType::Scalar , _MatrixType::StorageIndex >::copy_to_ucol ( const Index  jcol,
const Index  nseg,
IndexVector segrep,
BlockIndexVector  repfnz,
IndexVector perm_r,
BlockScalarVector  dense,
GlobalLU_t glu 
)
protectedinherited

Performs numeric block updates (sup-col) in topological order.

Parameters
jcolcurrent column to update
nsegNumber of segments in the U part
segrepsegment representative ...
repfnzFirst nonzero column in each row ...
perm_rRow permutation
denseStore the full representation of the column
gluGlobal LU data.
Returns
0 - successful return > 0 - number of bytes allocated when run out of space
52{
53 Index ksub, krep, ksupno;
54
55 Index jsupno = glu.supno(jcol);
56
57 // For each nonzero supernode segment of U[*,j] in topological order
58 Index k = nseg - 1, i;
59 StorageIndex nextu = glu.xusub(jcol);
60 Index kfnz, isub, segsize;
61 Index new_next,irow;
62 Index fsupc, mem;
63 for (ksub = 0; ksub < nseg; ksub++)
64 {
65 krep = segrep(k); k--;
66 ksupno = glu.supno(krep);
67 if (jsupno != ksupno ) // should go into ucol();
68 {
69 kfnz = repfnz(krep);
70 if (kfnz != emptyIdxLU)
71 { // Nonzero U-segment
72 fsupc = glu.xsup(ksupno);
73 isub = glu.xlsub(fsupc) + kfnz - fsupc;
74 segsize = krep - kfnz + 1;
75 new_next = nextu + segsize;
76 while (new_next > glu.nzumax)
77 {
78 mem = memXpand<ScalarVector>(glu.ucol, glu.nzumax, nextu, UCOL, glu.num_expansions);
79 if (mem) return mem;
80 mem = memXpand<IndexVector>(glu.usub, glu.nzumax, nextu, USUB, glu.num_expansions);
81 if (mem) return mem;
82
83 }
84
85 for (i = 0; i < segsize; i++)
86 {
87 irow = glu.lsub(isub);
88 glu.usub(nextu) = perm_r(irow); // Unlike the L part, the U part is stored in its final order
89 glu.ucol(nextu) = dense(irow);
90 dense(irow) = Scalar(0.0);
91 nextu++;
92 isub++;
93 }
94
95 } // end nonzero U-segment
96
97 } // end if jsupno
98
99 } // end for each segment
100 glu.xusub(jcol + 1) = nextu; // close U(*,jcol)
101 return 0;
102}

◆ countnz()

void SparseLUImpl< _MatrixType::Scalar , _MatrixType::StorageIndex >::countnz ( const Index  n,
Index nnzL,
Index nnzU,
GlobalLU_t glu 
)
protectedinherited

Count Nonzero elements in the factors.

22{
23 nnzL = 0;
24 nnzU = (glu.xusub)(n);
25 Index nsuper = (glu.supno)(n);
26 Index jlen;
27 Index i, j, fsupc;
28 if (n <= 0 ) return;
29 // For each supernode
30 for (i = 0; i <= nsuper; i++)
31 {
32 fsupc = glu.xsup(i);
33 jlen = glu.xlsub(fsupc+1) - glu.xlsub(fsupc);
34
35 for (j = fsupc; j < glu.xsup(i+1); j++)
36 {
37 nnzL += jlen;
38 nnzU += j - fsupc + 1;
39 jlen--;
40 }
41 }
42}

◆ derived() [1/2]

SparseLU< _MatrixType, _OrderingType > & Eigen::SparseSolverBase< SparseLU< _MatrixType, _OrderingType > >::derived ( )
inlineinherited
79{ return *static_cast<Derived*>(this); }

◆ derived() [2/2]

const SparseLU< _MatrixType, _OrderingType > & Eigen::SparseSolverBase< SparseLU< _MatrixType, _OrderingType > >::derived ( ) const
inlineinherited
80{ return *static_cast<const Derived*>(this); }

◆ determinant()

template<typename _MatrixType , typename _OrderingType >
Scalar Eigen::SparseLU< _MatrixType, _OrderingType >::determinant ( )
inline
Returns
The determinant of the matrix.
See also
absDeterminant(), logAbsDeterminant()
338 {
339 eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
340 // Initialize with the determinant of the row matrix
341 Scalar det = Scalar(1.);
342 // Note that the diagonal blocks of U are stored in supernodes,
343 // which are available in the L part :)
344 for (Index j = 0; j < this->cols(); ++j)
345 {
346 for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
347 {
348 if(it.index() == j)
349 {
350 det *= it.value();
351 break;
352 }
353 }
354 }
355 return (m_detPermR * m_detPermC) > 0 ? det : -det;
356 }
Index m_detPermC
Definition SparseLU.h:390

References Eigen::SparseLU< _MatrixType, _OrderingType >::cols(), eigen_assert, Eigen::SparseLU< _MatrixType, _OrderingType >::m_detPermC, Eigen::SparseLU< _MatrixType, _OrderingType >::m_detPermR, Eigen::SparseLU< _MatrixType, _OrderingType >::m_factorizationIsOk, and Eigen::SparseLU< _MatrixType, _OrderingType >::m_Lstore.

+ Here is the call graph for this function:

◆ dfs_kernel()

void SparseLUImpl< _MatrixType::Scalar , _MatrixType::StorageIndex >::dfs_kernel ( const _MatrixType::StorageIndex  jj,
IndexVector perm_r,
Index nseg,
IndexVector panel_lsub,
IndexVector segrep,
Ref< IndexVector repfnz_col,
IndexVector xprune,
Ref< IndexVector marker,
IndexVector parent,
IndexVector xplore,
GlobalLU_t glu,
Index nextl_col,
Index  krow,
Traits &  traits 
)
protectedinherited
68{
69
70 StorageIndex kmark = marker(krow);
71
72 // For each unmarked krow of jj
73 marker(krow) = jj;
74 StorageIndex kperm = perm_r(krow);
75 if (kperm == emptyIdxLU ) {
76 // krow is in L : place it in structure of L(*, jj)
77 panel_lsub(nextl_col++) = StorageIndex(krow); // krow is indexed into A
78
79 traits.mem_expand(panel_lsub, nextl_col, kmark);
80 }
81 else
82 {
83 // krow is in U : if its supernode-representative krep
84 // has been explored, update repfnz(*)
85 // krep = supernode representative of the current row
86 StorageIndex krep = glu.xsup(glu.supno(kperm)+1) - 1;
87 // First nonzero element in the current column:
88 StorageIndex myfnz = repfnz_col(krep);
89
90 if (myfnz != emptyIdxLU )
91 {
92 // Representative visited before
93 if (myfnz > kperm ) repfnz_col(krep) = kperm;
94
95 }
96 else
97 {
98 // Otherwise, perform dfs starting at krep
99 StorageIndex oldrep = emptyIdxLU;
100 parent(krep) = oldrep;
101 repfnz_col(krep) = kperm;
102 StorageIndex xdfs = glu.xlsub(krep);
103 Index maxdfs = xprune(krep);
104
105 StorageIndex kpar;
106 do
107 {
108 // For each unmarked kchild of krep
109 while (xdfs < maxdfs)
110 {
111 StorageIndex kchild = glu.lsub(xdfs);
112 xdfs++;
113 StorageIndex chmark = marker(kchild);
114
115 if (chmark != jj )
116 {
117 marker(kchild) = jj;
118 StorageIndex chperm = perm_r(kchild);
119
120 if (chperm == emptyIdxLU)
121 {
122 // case kchild is in L: place it in L(*, j)
123 panel_lsub(nextl_col++) = kchild;
124 traits.mem_expand(panel_lsub, nextl_col, chmark);
125 }
126 else
127 {
128 // case kchild is in U :
129 // chrep = its supernode-rep. If its rep has been explored,
130 // update its repfnz(*)
131 StorageIndex chrep = glu.xsup(glu.supno(chperm)+1) - 1;
132 myfnz = repfnz_col(chrep);
133
134 if (myfnz != emptyIdxLU)
135 { // Visited before
136 if (myfnz > chperm)
137 repfnz_col(chrep) = chperm;
138 }
139 else
140 { // Cont. dfs at snode-rep of kchild
141 xplore(krep) = xdfs;
142 oldrep = krep;
143 krep = chrep; // Go deeper down G(L)
144 parent(krep) = oldrep;
145 repfnz_col(krep) = chperm;
146 xdfs = glu.xlsub(krep);
147 maxdfs = xprune(krep);
148
149 } // end if myfnz != -1
150 } // end if chperm == -1
151
152 } // end if chmark !=jj
153 } // end while xdfs < maxdfs
154
155 // krow has no more unexplored nbrs :
156 // Place snode-rep krep in postorder DFS, if this
157 // segment is seen for the first time. (Note that
158 // "repfnz(krep)" may change later.)
159 // Baktrack dfs to its parent
160 if(traits.update_segrep(krep,jj))
161 //if (marker1(krep) < jcol )
162 {
163 segrep(nseg) = krep;
164 ++nseg;
165 //marker1(krep) = jj;
166 }
167
168 kpar = parent(krep); // Pop recursion, mimic recursion
169 if (kpar == emptyIdxLU)
170 break; // dfs done
171 krep = kpar;
172 xdfs = xplore(krep);
173 maxdfs = xprune(krep);
174
175 } while (kpar != emptyIdxLU); // Do until empty stack
176
177 } // end if (myfnz = -1)
178
179 } // end if (kperm == -1)
180}

◆ expand()

Index SparseLUImpl< _MatrixType::Scalar , _MatrixType::StorageIndex >::expand ( VectorType &  vec,
Index length,
Index  nbElts,
Index  keep_prev,
Index num_expansions 
)
protectedinherited

Expand the existing storage to accomodate more fill-ins

Parameters
vecValid pointer to the vector to allocate or expand
[in,out]lengthAt input, contain the current length of the vector that is to be increased. At output, length of the newly allocated vector
[in]nbEltsCurrent number of elements in the factors
keep_prev1: use length and do not expand the vector; 0: compute new_len and expand
[in,out]num_expansionsNumber of times the memory has been expanded
64{
65
66 float alpha = 1.5; // Ratio of the memory increase
67 Index new_len; // New size of the allocated memory
68
69 if(num_expansions == 0 || keep_prev)
70 new_len = length ; // First time allocate requested
71 else
72 new_len = (std::max)(length+1,Index(alpha * length));
73
74 VectorType old_vec; // Temporary vector to hold the previous values
75 if (nbElts > 0 )
76 old_vec = vec.segment(0,nbElts);
77
78 //Allocate or expand the current vector
79#ifdef EIGEN_EXCEPTIONS
80 try
81#endif
82 {
83 vec.resize(new_len);
84 }
85#ifdef EIGEN_EXCEPTIONS
86 catch(std::bad_alloc& )
87#else
88 if(!vec.size())
89#endif
90 {
91 if (!num_expansions)
92 {
93 // First time to allocate from LUMemInit()
94 // Let LUMemInit() deals with it.
95 return -1;
96 }
97 if (keep_prev)
98 {
99 // In this case, the memory length should not not be reduced
100 return new_len;
101 }
102 else
103 {
104 // Reduce the size and increase again
105 Index tries = 0; // Number of attempts
106 do
107 {
108 alpha = (alpha + 1)/2;
109 new_len = (std::max)(length+1,Index(alpha * length));
110#ifdef EIGEN_EXCEPTIONS
111 try
112#endif
113 {
114 vec.resize(new_len);
115 }
116#ifdef EIGEN_EXCEPTIONS
117 catch(std::bad_alloc& )
118#else
119 if (!vec.size())
120#endif
121 {
122 tries += 1;
123 if ( tries > 10) return new_len;
124 }
125 } while (!vec.size());
126 }
127 }
128 //Copy the previous values to the newly allocated space
129 if (nbElts > 0)
130 vec.segment(0, nbElts) = old_vec;
131
132
133 length = new_len;
134 if(num_expansions) ++num_expansions;
135 return 0;
136}
double length(std::vector< SurfacePoint > &path)
Definition exact_geodesic.cpp:1682

◆ factorize()

template<typename MatrixType , typename OrderingType >
void Eigen::SparseLU< MatrixType, OrderingType >::factorize ( const MatrixType matrix)
  • Numerical factorization
  • Interleaved with the symbolic factorization On exit, info is

    = 0: successful factorization

‍0: if info = i, and i is

<= A->ncol: U(i,i) is exactly zero. The factorization has been completed, but the factor U is exactly singular, and division by zero will occur if it is used to solve a system of equations.

> A->ncol: number of bytes allocated when memory allocation failure occurred, plus A->ncol. If lwork = -1, it is the estimated amount of space needed, plus A->ncol.

497{
499 eigen_assert(m_analysisIsOk && "analyzePattern() should be called first");
500 eigen_assert((matrix.rows() == matrix.cols()) && "Only for squared matrices");
501
502 m_isInitialized = true;
503
504
505 // Apply the column permutation computed in analyzepattern()
506 // m_mat = matrix * m_perm_c.inverse();
507 m_mat = matrix;
508 if (m_perm_c.size())
509 {
510 m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers.
511 //Then, permute only the column pointers
512 const StorageIndex * outerIndexPtr;
513 if (matrix.isCompressed()) outerIndexPtr = matrix.outerIndexPtr();
514 else
515 {
516 StorageIndex* outerIndexPtr_t = new StorageIndex[matrix.cols()+1];
517 for(Index i = 0; i <= matrix.cols(); i++) outerIndexPtr_t[i] = m_mat.outerIndexPtr()[i];
518 outerIndexPtr = outerIndexPtr_t;
519 }
520 for (Index i = 0; i < matrix.cols(); i++)
521 {
522 m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i];
523 m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i];
524 }
525 if(!matrix.isCompressed()) delete[] outerIndexPtr;
526 }
527 else
528 { //FIXME This should not be needed if the empty permutation is handled transparently
529 m_perm_c.resize(matrix.cols());
530 for(StorageIndex i = 0; i < matrix.cols(); ++i) m_perm_c.indices()(i) = i;
531 }
532
533 Index m = m_mat.rows();
534 Index n = m_mat.cols();
535 Index nnz = m_mat.nonZeros();
536 Index maxpanel = m_perfv.panel_size * m;
537 // Allocate working storage common to the factor routines
538 Index lwork = 0;
540 if (info)
541 {
542 m_lastError = "UNABLE TO ALLOCATE WORKING MEMORY\n\n" ;
543 m_factorizationIsOk = false;
544 return ;
545 }
546
547 // Set up pointers for integer working arrays
548 IndexVector segrep(m); segrep.setZero();
549 IndexVector parent(m); parent.setZero();
550 IndexVector xplore(m); xplore.setZero();
551 IndexVector repfnz(maxpanel);
552 IndexVector panel_lsub(maxpanel);
553 IndexVector xprune(n); xprune.setZero();
554 IndexVector marker(m*internal::LUNoMarker); marker.setZero();
555
556 repfnz.setConstant(-1);
557 panel_lsub.setConstant(-1);
558
559 // Set up pointers for scalar working arrays
560 ScalarVector dense;
561 dense.setZero(maxpanel);
562 ScalarVector tempv;
563 tempv.setZero(internal::LUnumTempV(m, m_perfv.panel_size, m_perfv.maxsuper, /*m_perfv.rowblk*/m) );
564
565 // Compute the inverse of perm_c
566 PermutationType iperm_c(m_perm_c.inverse());
567
568 // Identify initial relaxed snodes
569 IndexVector relax_end(n);
570 if ( m_symmetricmode == true )
571 Base::heap_relax_snode(n, m_etree, m_perfv.relax, marker, relax_end);
572 else
573 Base::relax_snode(n, m_etree, m_perfv.relax, marker, relax_end);
574
575
576 m_perm_r.resize(m);
577 m_perm_r.indices().setConstant(-1);
578 marker.setConstant(-1);
579 m_detPermR = 1; // Record the determinant of the row permutation
580
582 m_glu.xsup(0) = m_glu.xlsub(0) = m_glu.xusub(0) = m_glu.xlusup(0) = Index(0);
583
584 // Work on one 'panel' at a time. A panel is one of the following :
585 // (a) a relaxed supernode at the bottom of the etree, or
586 // (b) panel_size contiguous columns, <panel_size> defined by the user
587 Index jcol;
588 IndexVector panel_histo(n);
589 Index pivrow; // Pivotal row number in the original row matrix
590 Index nseg1; // Number of segments in U-column above panel row jcol
591 Index nseg; // Number of segments in each U-column
592 Index irep;
593 Index i, k, jj;
594 for (jcol = 0; jcol < n; )
595 {
596 // Adjust panel size so that a panel won't overlap with the next relaxed snode.
597 Index panel_size = m_perfv.panel_size; // upper bound on panel width
598 for (k = jcol + 1; k < (std::min)(jcol+panel_size, n); k++)
599 {
600 if (relax_end(k) != emptyIdxLU)
601 {
602 panel_size = k - jcol;
603 break;
604 }
605 }
606 if (k == n)
607 panel_size = n - jcol;
608
609 // Symbolic outer factorization on a panel of columns
610 Base::panel_dfs(m, panel_size, jcol, m_mat, m_perm_r.indices(), nseg1, dense, panel_lsub, segrep, repfnz, xprune, marker, parent, xplore, m_glu);
611
612 // Numeric sup-panel updates in topological order
613 Base::panel_bmod(m, panel_size, jcol, nseg1, dense, tempv, segrep, repfnz, m_glu);
614
615 // Sparse LU within the panel, and below the panel diagonal
616 for ( jj = jcol; jj< jcol + panel_size; jj++)
617 {
618 k = (jj - jcol) * m; // Column index for w-wide arrays
619
620 nseg = nseg1; // begin after all the panel segments
621 //Depth-first-search for the current column
622 VectorBlock<IndexVector> panel_lsubk(panel_lsub, k, m);
623 VectorBlock<IndexVector> repfnz_k(repfnz, k, m);
624 info = Base::column_dfs(m, jj, m_perm_r.indices(), m_perfv.maxsuper, nseg, panel_lsubk, segrep, repfnz_k, xprune, marker, parent, xplore, m_glu);
625 if ( info )
626 {
627 m_lastError = "UNABLE TO EXPAND MEMORY IN COLUMN_DFS() ";
629 m_factorizationIsOk = false;
630 return;
631 }
632 // Numeric updates to this column
633 VectorBlock<ScalarVector> dense_k(dense, k, m);
634 VectorBlock<IndexVector> segrep_k(segrep, nseg1, m-nseg1);
635 info = Base::column_bmod(jj, (nseg - nseg1), dense_k, tempv, segrep_k, repfnz_k, jcol, m_glu);
636 if ( info )
637 {
638 m_lastError = "UNABLE TO EXPAND MEMORY IN COLUMN_BMOD() ";
640 m_factorizationIsOk = false;
641 return;
642 }
643
644 // Copy the U-segments to ucol(*)
645 info = Base::copy_to_ucol(jj, nseg, segrep, repfnz_k ,m_perm_r.indices(), dense_k, m_glu);
646 if ( info )
647 {
648 m_lastError = "UNABLE TO EXPAND MEMORY IN COPY_TO_UCOL() ";
650 m_factorizationIsOk = false;
651 return;
652 }
653
654 // Form the L-segment
655 info = Base::pivotL(jj, m_diagpivotthresh, m_perm_r.indices(), iperm_c.indices(), pivrow, m_glu);
656 if ( info )
657 {
658 m_lastError = "THE MATRIX IS STRUCTURALLY SINGULAR ... ZERO COLUMN AT ";
659 std::ostringstream returnInfo;
660 returnInfo << info;
661 m_lastError += returnInfo.str();
663 m_factorizationIsOk = false;
664 return;
665 }
666
667 // Update the determinant of the row permutation matrix
668 // FIXME: the following test is not correct, we should probably take iperm_c into account and pivrow is not directly the row pivot.
669 if (pivrow != jj) m_detPermR = -m_detPermR;
670
671 // Prune columns (0:jj-1) using column jj
672 Base::pruneL(jj, m_perm_r.indices(), pivrow, nseg, segrep, repfnz_k, xprune, m_glu);
673
674 // Reset repfnz for this column
675 for (i = 0; i < nseg; i++)
676 {
677 irep = segrep(i);
678 repfnz_k(irep) = emptyIdxLU;
679 }
680 } // end SparseLU within the panel
681 jcol += panel_size; // Move to the next panel
682 } // end for -- end elimination
683
686
687 // Count the number of nonzeros in factors
689 // Apply permutation to the L subscripts
691
692 // Create supernode matrix L
694 // Create the column major upper sparse matrix U;
695 new (&m_Ustore) MappedSparseMatrix<Scalar, ColMajor, StorageIndex> ( m, n, m_nnzU, m_glu.xusub.data(), m_glu.usub.data(), m_glu.ucol.data() );
696
697 m_info = Success;
698 m_factorizationIsOk = true;
699}
void resize(Index newSize)
Definition PermutationMatrix.h:136
Index determinant() const
Definition PermutationMatrix.h:253
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar * data() const
Definition PlainObjectBase.h:255
EIGEN_DEVICE_FUNC Derived & setZero(Index size)
Definition CwiseNullaryOp.h:515
EIGEN_DEVICE_FUNC Derived & setConstant(Index size, const Scalar &val)
Definition CwiseNullaryOp.h:341
PermutationType m_perm_r
Definition SparseLU.h:379
ComputationInfo m_info
Definition SparseLU.h:371
internal::perfvalues m_perfv
Definition SparseLU.h:387
Index m_nnzL
Definition SparseLU.h:389
Index m_nnzU
Definition SparseLU.h:389
Base::GlobalLU_t m_glu
Definition SparseLU.h:382
ComputationInfo info() const
Reports whether previous computation was successful.
Definition SparseLU.h:202
Matrix< Scalar, Dynamic, 1 > ScalarVector
Definition SparseLU.h:89
Index nonZeros() const
Definition SparseCompressedBase.h:56
Index rows() const
Definition SparseMatrix.h:136
void setInfos(Index m, Index n, ScalarVector &nzval, IndexVector &nzval_colptr, IndexVector &rowind, IndexVector &rowind_colptr, IndexVector &col_to_sup, IndexVector &sup_to_col)
Definition SparseLU_SupernodalMatrix.h:61
void panel_bmod(const Index m, const Index w, const Index jcol, const Index nseg, ScalarVector &dense, ScalarVector &tempv, IndexVector &segrep, IndexVector &repfnz, GlobalLU_t &glu)
Performs numeric block updates (sup-panel) in topological order.
Definition SparseLU_panel_bmod.h:56
void relax_snode(const Index n, IndexVector &et, const Index relax_columns, IndexVector &descendants, IndexVector &relax_end)
Identify the initial relaxed supernodes.
Definition SparseLU_relax_snode.h:47
void pruneL(const Index jcol, const IndexVector &perm_r, const Index pivrow, const Index nseg, const IndexVector &segrep, BlockIndexVector repfnz, IndexVector &xprune, GlobalLU_t &glu)
Prunes the L-structure.
Definition SparseLU_pruneL.h:53
Index column_dfs(const Index m, const Index jcol, IndexVector &perm_r, Index maxsuper, Index &nseg, BlockIndexVector lsub_col, IndexVector &segrep, BlockIndexVector repfnz, IndexVector &xprune, IndexVector &marker, IndexVector &parent, IndexVector &xplore, GlobalLU_t &glu)
Performs a symbolic factorization on column jcol and decide the supernode boundary.
Definition SparseLU_column_dfs.h:93
void heap_relax_snode(const Index n, IndexVector &et, const Index relax_columns, IndexVector &descendants, IndexVector &relax_end)
Identify the initial relaxed supernodes.
Definition SparseLU_heap_relax_snode.h:46
Index pivotL(const Index jcol, const RealScalar &diagpivotthresh, IndexVector &perm_r, IndexVector &iperm_c, Index &pivrow, GlobalLU_t &glu)
Performs the numerical pivotin on the current column of L, and the CDIV operation.
Definition SparseLU_pivotL.h:60
Index memInit(Index m, Index n, Index annz, Index lwork, Index fillratio, Index panel_size, GlobalLU_t &glu)
Allocate various working space for the numerical factorization phase.
Definition SparseLU_Memory.h:151
void panel_dfs(const Index m, const Index w, const Index jcol, MatrixType &A, IndexVector &perm_r, Index &nseg, ScalarVector &dense, IndexVector &panel_lsub, IndexVector &segrep, IndexVector &repfnz, IndexVector &xprune, IndexVector &marker, IndexVector &parent, IndexVector &xplore, GlobalLU_t &glu)
Performs a symbolic factorization on a panel of columns [jcol, jcol+w)
Definition SparseLU_panel_dfs.h:219
void countnz(const Index n, Index &nnzL, Index &nnzU, GlobalLU_t &glu)
Count Nonzero elements in the factors.
Definition SparseLU_Utils.h:21
void fixupL(const Index n, const IndexVector &perm_r, GlobalLU_t &glu)
Fix up the data storage lsub for L-subscripts.
Definition SparseLU_Utils.h:52
Index column_bmod(const Index jcol, const Index nseg, BlockScalarVector dense, ScalarVector &tempv, BlockIndexVector segrep, BlockIndexVector repfnz, Index fpanelc, GlobalLU_t &glu)
Performs numeric block updates (sup-col) in topological order.
Definition SparseLU_column_bmod.h:53
Index copy_to_ucol(const Index jcol, const Index nseg, IndexVector &segrep, BlockIndexVector repfnz, IndexVector &perm_r, BlockScalarVector dense, GlobalLU_t &glu)
Performs numeric block updates (sup-col) in topological order.
Definition SparseLU_copy_to_ucol.h:50
@ NumericalIssue
Definition Constants.h:434
@ Success
Definition Constants.h:432
Index LUnumTempV(Index &m, Index &w, Index &t, Index &b)
Definition SparseLU_Memory.h:39
IndexVector usub
Definition SparseLU_Structs.h:88
Index relax
Definition SparseLU_Structs.h:98
Index panel_size
Definition SparseLU_Structs.h:97
Index maxsuper
Definition SparseLU_Structs.h:101
IndexVector xsup
Definition SparseLU_Structs.h:79
IndexVector xlusup
Definition SparseLU_Structs.h:83
IndexVector supno
Definition SparseLU_Structs.h:80
Index fillfactor
Definition SparseLU_Structs.h:104
IndexVector lsub
Definition SparseLU_Structs.h:82
IndexVector xusub
Definition SparseLU_Structs.h:89
IndexVector xlsub
Definition SparseLU_Structs.h:84
@ LUNoMarker
Definition SparseLU_Memory.h:37
ScalarVector lusup
Definition SparseLU_Structs.h:81
ScalarVector ucol
Definition SparseLU_Structs.h:87

References eigen_assert, Eigen::internal::emptyIdxLU, Eigen::PermutationMatrix< SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex >::indices(), Eigen::internal::LUNoMarker, Eigen::internal::LUnumTempV(), Eigen::NumericalIssue, Eigen::PlainObjectBase< Derived >::setConstant(), Eigen::PlainObjectBase< Derived >::setZero(), and Eigen::Success.

Referenced by Eigen::SparseLU< _MatrixType, _OrderingType >::compute().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ fixupL()

void SparseLUImpl< _MatrixType::Scalar , _MatrixType::StorageIndex >::fixupL ( const Index  n,
const IndexVector perm_r,
GlobalLU_t glu 
)
protectedinherited

Fix up the data storage lsub for L-subscripts.

It removes the subscripts sets for structural pruning, and applies permutation to the remaining subscripts

53{
54 Index fsupc, i, j, k, jstart;
55
56 StorageIndex nextl = 0;
57 Index nsuper = (glu.supno)(n);
58
59 // For each supernode
60 for (i = 0; i <= nsuper; i++)
61 {
62 fsupc = glu.xsup(i);
63 jstart = glu.xlsub(fsupc);
64 glu.xlsub(fsupc) = nextl;
65 for (j = jstart; j < glu.xlsub(fsupc + 1); j++)
66 {
67 glu.lsub(nextl) = perm_r(glu.lsub(j)); // Now indexed into P*A
68 nextl++;
69 }
70 for (k = fsupc+1; k < glu.xsup(i+1); k++)
71 glu.xlsub(k) = nextl; // other columns in supernode i
72 }
73
74 glu.xlsub(n) = nextl;
75}

◆ heap_relax_snode()

void SparseLUImpl< _MatrixType::Scalar , _MatrixType::StorageIndex >::heap_relax_snode ( const Index  n,
IndexVector et,
const Index  relax_columns,
IndexVector descendants,
IndexVector relax_end 
)
protectedinherited

Identify the initial relaxed supernodes.

This routine applied to a symmetric elimination tree. It assumes that the matrix has been reordered according to the postorder of the etree

Parameters
nThe number of columns
etelimination tree
relax_columnsMaximum number of columns allowed in a relaxed snode
descendantsNumber of descendants of each node in the etree
relax_endlast column in a supernode
47{
48
49 // The etree may not be postordered, but its heap ordered
50 IndexVector post;
51 internal::treePostorder(StorageIndex(n), et, post); // Post order etree
52 IndexVector inv_post(n+1);
53 for (StorageIndex i = 0; i < n+1; ++i) inv_post(post(i)) = i; // inv_post = post.inverse()???
54
55 // Renumber etree in postorder
56 IndexVector iwork(n);
57 IndexVector et_save(n+1);
58 for (Index i = 0; i < n; ++i)
59 {
60 iwork(post(i)) = post(et(i));
61 }
62 et_save = et; // Save the original etree
63 et = iwork;
64
65 // compute the number of descendants of each node in the etree
66 relax_end.setConstant(emptyIdxLU);
67 Index j, parent;
68 descendants.setZero();
69 for (j = 0; j < n; j++)
70 {
71 parent = et(j);
72 if (parent != n) // not the dummy root
73 descendants(parent) += descendants(j) + 1;
74 }
75 // Identify the relaxed supernodes by postorder traversal of the etree
76 Index snode_start; // beginning of a snode
77 StorageIndex k;
78 Index nsuper_et_post = 0; // Number of relaxed snodes in postordered etree
79 Index nsuper_et = 0; // Number of relaxed snodes in the original etree
80 StorageIndex l;
81 for (j = 0; j < n; )
82 {
83 parent = et(j);
84 snode_start = j;
85 while ( parent != n && descendants(parent) < relax_columns )
86 {
87 j = parent;
88 parent = et(j);
89 }
90 // Found a supernode in postordered etree, j is the last column
91 ++nsuper_et_post;
92 k = StorageIndex(n);
93 for (Index i = snode_start; i <= j; ++i)
94 k = (std::min)(k, inv_post(i));
95 l = inv_post(j);
96 if ( (l - k) == (j - snode_start) ) // Same number of columns in the snode
97 {
98 // This is also a supernode in the original etree
99 relax_end(k) = l; // Record last column
100 ++nsuper_et;
101 }
102 else
103 {
104 for (Index i = snode_start; i <= j; ++i)
105 {
106 l = inv_post(i);
107 if (descendants(i) == 0)
108 {
109 relax_end(l) = l;
110 ++nsuper_et;
111 }
112 }
113 }
114 j++;
115 // Search for a new leaf
116 while (descendants(j) != 0 && j < n) j++;
117 } // End postorder traversal of the etree
118
119 // Recover the original etree
120 et = et_save;
121}
Matrix< _MatrixType::StorageIndex, Dynamic, 1 > IndexVector
Definition SparseLUImpl.h:24

◆ info()

template<typename _MatrixType , typename _OrderingType >
ComputationInfo Eigen::SparseLU< _MatrixType, _OrderingType >::info ( ) const
inline

Reports whether previous computation was successful.

Returns
Success if computation was succesful, NumericalIssue if the LU factorization reports a problem, zero diagonal for instance InvalidInput if the input matrix is invalid
See also
iparm()
203 {
204 eigen_assert(m_isInitialized && "Decomposition is not initialized.");
205 return m_info;
206 }

References eigen_assert, Eigen::SparseLU< _MatrixType, _OrderingType >::m_info, and Eigen::SparseSolverBase< SparseLU< _MatrixType, _OrderingType > >::m_isInitialized.

Referenced by igl::copyleft::comiso::FrameInterpolator::interpolateSymmetric().

+ Here is the caller graph for this function:

◆ initperfvalues()

template<typename _MatrixType , typename _OrderingType >
void Eigen::SparseLU< _MatrixType, _OrderingType >::initperfvalues ( )
inlineprotected
361 {
362 m_perfv.panel_size = 16;
363 m_perfv.relax = 1;
364 m_perfv.maxsuper = 128;
365 m_perfv.rowblk = 16;
366 m_perfv.colblk = 8;
367 m_perfv.fillfactor = 20;
368 }
Index rowblk
Definition SparseLU_Structs.h:102
Index colblk
Definition SparseLU_Structs.h:103

References Eigen::internal::perfvalues::colblk, Eigen::internal::perfvalues::fillfactor, Eigen::SparseLU< _MatrixType, _OrderingType >::m_perfv, Eigen::internal::perfvalues::maxsuper, Eigen::internal::perfvalues::panel_size, Eigen::internal::perfvalues::relax, and Eigen::internal::perfvalues::rowblk.

Referenced by Eigen::SparseLU< _MatrixType, _OrderingType >::SparseLU(), and Eigen::SparseLU< _MatrixType, _OrderingType >::SparseLU().

+ Here is the caller graph for this function:

◆ isSymmetric()

template<typename _MatrixType , typename _OrderingType >
void Eigen::SparseLU< _MatrixType, _OrderingType >::isSymmetric ( bool  sym)
inline

Indicate that the pattern of the input matrix is symmetric

136 {
137 m_symmetricmode = sym;
138 }

References Eigen::SparseLU< _MatrixType, _OrderingType >::m_symmetricmode.

◆ lastErrorMessage()

template<typename _MatrixType , typename _OrderingType >
std::string Eigen::SparseLU< _MatrixType, _OrderingType >::lastErrorMessage ( ) const
inline
Returns
A string describing the type of error
212 {
213 return m_lastError;
214 }

References Eigen::SparseLU< _MatrixType, _OrderingType >::m_lastError.

◆ logAbsDeterminant()

template<typename _MatrixType , typename _OrderingType >
Scalar Eigen::SparseLU< _MatrixType, _OrderingType >::logAbsDeterminant ( ) const
inline
Returns
the natural log of the absolute value of the determinant of the matrix of which **this is the QR decomposition
Note
This method is useful to work around the risk of overflow/underflow that's inherent to the determinant computation.
See also
absDeterminant(), signDeterminant()
284 {
285 using std::log;
286 using std::abs;
287
288 eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
289 Scalar det = Scalar(0.);
290 for (Index j = 0; j < this->cols(); ++j)
291 {
292 for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
293 {
294 if(it.row() < j) continue;
295 if(it.row() == j)
296 {
297 det += log(abs(it.value()));
298 break;
299 }
300 }
301 }
302 return det;
303 }
EIGEN_DEVICE_FUNC const LogReturnType log() const
Definition ArrayCwiseUnaryOps.h:105

References Eigen::SparseLU< _MatrixType, _OrderingType >::cols(), eigen_assert, log(), Eigen::SparseLU< _MatrixType, _OrderingType >::m_factorizationIsOk, and Eigen::SparseLU< _MatrixType, _OrderingType >::m_Lstore.

+ Here is the call graph for this function:

◆ matrixL()

template<typename _MatrixType , typename _OrderingType >
SparseLUMatrixLReturnType< SCMatrix > Eigen::SparseLU< _MatrixType, _OrderingType >::matrixL ( ) const
inline
Returns
an expression of the matrix L, internally stored as supernodes The only operation available with this expression is the triangular solve
y = b; matrixL().solveInPlace(y);
147 {
148 return SparseLUMatrixLReturnType<SCMatrix>(m_Lstore);
149 }

References Eigen::SparseLU< _MatrixType, _OrderingType >::m_Lstore.

Referenced by Eigen::SparseLU< _MatrixType, _OrderingType >::_solve_impl().

+ Here is the caller graph for this function:

◆ matrixU()

template<typename _MatrixType , typename _OrderingType >
SparseLUMatrixUReturnType< SCMatrix, MappedSparseMatrix< Scalar, ColMajor, StorageIndex > > Eigen::SparseLU< _MatrixType, _OrderingType >::matrixU ( ) const
inline
Returns
an expression of the matrix U, The only operation available with this expression is the triangular solve
y = b; matrixU().solveInPlace(y);
157 {
158 return SparseLUMatrixUReturnType<SCMatrix, MappedSparseMatrix<Scalar,ColMajor,StorageIndex> >(m_Lstore, m_Ustore);
159 }

References Eigen::SparseLU< _MatrixType, _OrderingType >::m_Lstore, and Eigen::SparseLU< _MatrixType, _OrderingType >::m_Ustore.

Referenced by Eigen::SparseLU< _MatrixType, _OrderingType >::_solve_impl().

+ Here is the caller graph for this function:

◆ memInit()

Index SparseLUImpl< _MatrixType::Scalar , _MatrixType::StorageIndex >::memInit ( Index  m,
Index  n,
Index  annz,
Index  lwork,
Index  fillratio,
Index  panel_size,
GlobalLU_t glu 
)
protectedinherited

Allocate various working space for the numerical factorization phase.

Parameters
mnumber of rows of the input matrix
nnumber of columns
annznumber of initial nonzeros in the matrix
lworkif lwork=-1, this routine returns an estimated size of the required memory
glupersistent data to facilitate multiple factors : will be deleted later ??
fillratioestimated ratio of fill in the factors
panel_sizeSize of a panel
Returns
an estimated size of the required memory if lwork = -1; otherwise, return the size of actually allocated memory when allocation failed, and 0 on success
Note
Unlike SuperLU, this routine does not support successive factorization with the same pattern and the same row permutation
152{
153 Index& num_expansions = glu.num_expansions; //No memory expansions so far
154 num_expansions = 0;
155 glu.nzumax = glu.nzlumax = (std::min)(fillratio * (annz+1) / n, m) * n; // estimated number of nonzeros in U
156 glu.nzlmax = (std::max)(Index(4), fillratio) * (annz+1) / 4; // estimated nnz in L factor
157 // Return the estimated size to the user if necessary
158 Index tempSpace;
159 tempSpace = (2*panel_size + 4 + LUNoMarker) * m * sizeof(Index) + (panel_size + 1) * m * sizeof(Scalar);
160 if (lwork == emptyIdxLU)
161 {
162 Index estimated_size;
163 estimated_size = (5 * n + 5) * sizeof(Index) + tempSpace
164 + (glu.nzlmax + glu.nzumax) * sizeof(Index) + (glu.nzlumax+glu.nzumax) * sizeof(Scalar) + n;
165 return estimated_size;
166 }
167
168 // Setup the required space
169
170 // First allocate Integer pointers for L\U factors
171 glu.xsup.resize(n+1);
172 glu.supno.resize(n+1);
173 glu.xlsub.resize(n+1);
174 glu.xlusup.resize(n+1);
175 glu.xusub.resize(n+1);
176
177 // Reserve memory for L/U factors
178 do
179 {
180 if( (expand<ScalarVector>(glu.lusup, glu.nzlumax, 0, 0, num_expansions)<0)
181 || (expand<ScalarVector>(glu.ucol, glu.nzumax, 0, 0, num_expansions)<0)
182 || (expand<IndexVector> (glu.lsub, glu.nzlmax, 0, 0, num_expansions)<0)
183 || (expand<IndexVector> (glu.usub, glu.nzumax, 0, 1, num_expansions)<0) )
184 {
185 //Reduce the estimated size and retry
186 glu.nzlumax /= 2;
187 glu.nzumax /= 2;
188 glu.nzlmax /= 2;
189 if (glu.nzlumax < annz ) return glu.nzlumax;
190 }
191 } while (!glu.lusup.size() || !glu.ucol.size() || !glu.lsub.size() || !glu.usub.size());
192
193 ++num_expansions;
194 return 0;
195
196} // end LuMemInit

◆ memXpand()

Index SparseLUImpl< _MatrixType::Scalar , _MatrixType::StorageIndex >::memXpand ( VectorType &  vec,
Index maxlen,
Index  nbElts,
MemType  memtype,
Index num_expansions 
)
protectedinherited

Expand the existing storage.

Parameters
vecvector to expand
[in,out]maxlenOn input, previous size of vec (Number of elements to copy ). on output, new size
nbEltscurrent number of elements in the vector.
memtypeType of the element to expand
num_expansionsNumber of expansions
Returns
0 on success, > 0 size of the memory allocated so far
210{
211 Index failed_size;
212 if (memtype == USUB)
213 failed_size = this->expand<VectorType>(vec, maxlen, nbElts, 1, num_expansions);
214 else
215 failed_size = this->expand<VectorType>(vec, maxlen, nbElts, 0, num_expansions);
216
217 if (failed_size)
218 return failed_size;
219
220 return 0 ;
221}

◆ panel_bmod()

void SparseLUImpl< _MatrixType::Scalar , _MatrixType::StorageIndex >::panel_bmod ( const Index  m,
const Index  w,
const Index  jcol,
const Index  nseg,
ScalarVector dense,
ScalarVector tempv,
IndexVector segrep,
IndexVector repfnz,
GlobalLU_t glu 
)
protectedinherited

Performs numeric block updates (sup-panel) in topological order.

Before entering this routine, the original nonzeros in the panel were already copied i nto the spa[m,w]

Parameters
mnumber of rows in the matrix
wPanel size
jcolStarting column of the panel
nsegNumber of segments in the U part
denseStore the full representation of the panel
tempvworking array
segrepsegment representative... first row in the segment
repfnzFirst nonzero rows
gluGlobal LU data.
59{
60
61 Index ksub,jj,nextl_col;
62 Index fsupc, nsupc, nsupr, nrow;
63 Index krep, kfnz;
64 Index lptr; // points to the row subscripts of a supernode
65 Index luptr; // ...
66 Index segsize,no_zeros ;
67 // For each nonz supernode segment of U[*,j] in topological order
68 Index k = nseg - 1;
69 const Index PacketSize = internal::packet_traits<Scalar>::size;
70
71 for (ksub = 0; ksub < nseg; ksub++)
72 { // For each updating supernode
73 /* krep = representative of current k-th supernode
74 * fsupc = first supernodal column
75 * nsupc = number of columns in a supernode
76 * nsupr = number of rows in a supernode
77 */
78 krep = segrep(k); k--;
79 fsupc = glu.xsup(glu.supno(krep));
80 nsupc = krep - fsupc + 1;
81 nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc);
82 nrow = nsupr - nsupc;
83 lptr = glu.xlsub(fsupc);
84
85 // loop over the panel columns to detect the actual number of columns and rows
86 Index u_rows = 0;
87 Index u_cols = 0;
88 for (jj = jcol; jj < jcol + w; jj++)
89 {
90 nextl_col = (jj-jcol) * m;
91 VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row
92
93 kfnz = repfnz_col(krep);
94 if ( kfnz == emptyIdxLU )
95 continue; // skip any zero segment
96
97 segsize = krep - kfnz + 1;
98 u_cols++;
99 u_rows = (std::max)(segsize,u_rows);
100 }
101
102 if(nsupc >= 2)
103 {
104 Index ldu = internal::first_multiple<Index>(u_rows, PacketSize);
105 Map<ScalarMatrix, Aligned, OuterStride<> > U(tempv.data(), u_rows, u_cols, OuterStride<>(ldu));
106
107 // gather U
108 Index u_col = 0;
109 for (jj = jcol; jj < jcol + w; jj++)
110 {
111 nextl_col = (jj-jcol) * m;
112 VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row
113 VectorBlock<ScalarVector> dense_col(dense, nextl_col, m); // Scatter/gather entire matrix column from/to here
114
115 kfnz = repfnz_col(krep);
116 if ( kfnz == emptyIdxLU )
117 continue; // skip any zero segment
118
119 segsize = krep - kfnz + 1;
120 luptr = glu.xlusup(fsupc);
121 no_zeros = kfnz - fsupc;
122
123 Index isub = lptr + no_zeros;
124 Index off = u_rows-segsize;
125 for (Index i = 0; i < off; i++) U(i,u_col) = 0;
126 for (Index i = 0; i < segsize; i++)
127 {
128 Index irow = glu.lsub(isub);
129 U(i+off,u_col) = dense_col(irow);
130 ++isub;
131 }
132 u_col++;
133 }
134 // solve U = A^-1 U
135 luptr = glu.xlusup(fsupc);
136 Index lda = glu.xlusup(fsupc+1) - glu.xlusup(fsupc);
137 no_zeros = (krep - u_rows + 1) - fsupc;
138 luptr += lda * no_zeros + no_zeros;
139 MappedMatrixBlock A(glu.lusup.data()+luptr, u_rows, u_rows, OuterStride<>(lda) );
140 U = A.template triangularView<UnitLower>().solve(U);
141
142 // update
143 luptr += u_rows;
144 MappedMatrixBlock B(glu.lusup.data()+luptr, nrow, u_rows, OuterStride<>(lda) );
145 eigen_assert(tempv.size()>w*ldu + nrow*w + 1);
146
147 Index ldl = internal::first_multiple<Index>(nrow, PacketSize);
148 Index offset = (PacketSize-internal::first_default_aligned(B.data(), PacketSize)) % PacketSize;
149 MappedMatrixBlock L(tempv.data()+w*ldu+offset, nrow, u_cols, OuterStride<>(ldl));
150
151 L.setZero();
152 internal::sparselu_gemm<Scalar>(L.rows(), L.cols(), B.cols(), B.data(), B.outerStride(), U.data(), U.outerStride(), L.data(), L.outerStride());
153
154 // scatter U and L
155 u_col = 0;
156 for (jj = jcol; jj < jcol + w; jj++)
157 {
158 nextl_col = (jj-jcol) * m;
159 VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row
160 VectorBlock<ScalarVector> dense_col(dense, nextl_col, m); // Scatter/gather entire matrix column from/to here
161
162 kfnz = repfnz_col(krep);
163 if ( kfnz == emptyIdxLU )
164 continue; // skip any zero segment
165
166 segsize = krep - kfnz + 1;
167 no_zeros = kfnz - fsupc;
168 Index isub = lptr + no_zeros;
169
170 Index off = u_rows-segsize;
171 for (Index i = 0; i < segsize; i++)
172 {
173 Index irow = glu.lsub(isub++);
174 dense_col(irow) = U.coeff(i+off,u_col);
175 U.coeffRef(i+off,u_col) = 0;
176 }
177
178 // Scatter l into SPA dense[]
179 for (Index i = 0; i < nrow; i++)
180 {
181 Index irow = glu.lsub(isub++);
182 dense_col(irow) -= L.coeff(i,u_col);
183 L.coeffRef(i,u_col) = 0;
184 }
185 u_col++;
186 }
187 }
188 else // level 2 only
189 {
190 // Sequence through each column in the panel
191 for (jj = jcol; jj < jcol + w; jj++)
192 {
193 nextl_col = (jj-jcol) * m;
194 VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row
195 VectorBlock<ScalarVector> dense_col(dense, nextl_col, m); // Scatter/gather entire matrix column from/to here
196
197 kfnz = repfnz_col(krep);
198 if ( kfnz == emptyIdxLU )
199 continue; // skip any zero segment
200
201 segsize = krep - kfnz + 1;
202 luptr = glu.xlusup(fsupc);
203
204 Index lda = glu.xlusup(fsupc+1)-glu.xlusup(fsupc);// nsupr
205
206 // Perform a trianglar solve and block update,
207 // then scatter the result of sup-col update to dense[]
208 no_zeros = kfnz - fsupc;
209 if(segsize==1) LU_kernel_bmod<1>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);
210 else if(segsize==2) LU_kernel_bmod<2>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);
211 else if(segsize==3) LU_kernel_bmod<3>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);
212 else LU_kernel_bmod<Dynamic>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);
213 } // End for each column in the panel
214 }
215
216 } // End for each updating supernode
217} // end panel bmod
#define L(s)
Definition I18N.hpp:18

◆ panel_dfs()

void SparseLUImpl< _MatrixType::Scalar , _MatrixType::StorageIndex >::panel_dfs ( const Index  m,
const Index  w,
const Index  jcol,
MatrixType A,
IndexVector perm_r,
Index nseg,
ScalarVector dense,
IndexVector panel_lsub,
IndexVector segrep,
IndexVector repfnz,
IndexVector xprune,
IndexVector marker,
IndexVector parent,
IndexVector xplore,
GlobalLU_t glu 
)
protectedinherited

Performs a symbolic factorization on a panel of columns [jcol, jcol+w)

A supernode representative is the last column of a supernode. The nonzeros in U[*,j] are segments that end at supernodes representatives

The routine returns a list of the supernodal representatives in topological order of the dfs that generates them. This list is a superset of the topological order of each individual column within the panel. The location of the first nonzero in each supernodal segment (supernodal entry location) is also returned. Each column has a separate list for this purpose.

Two markers arrays are used for dfs : marker[i] == jj, if i was visited during dfs of current column jj; marker1[i] >= jcol, if i was visited by earlier columns in this panel;

Parameters
[in]mnumber of rows in the matrix
[in]wPanel size
[in]jcolStarting column of the panel
[in]AInput matrix in column-major storage
[in]perm_rRow permutation
[out]nsegNumber of U segments
[out]denseAccumulate the column vectors of the panel
[out]panel_lsubSubscripts of the row in the panel
[out]segrepSegment representative i.e first nonzero row of each segment
[out]repfnzFirst nonzero location in each row
[out]xpruneThe pruned elimination tree
[out]markerwork vector
parentThe elimination tree
xplorework vector
gluThe global data structure
220{
221 Index nextl_col; // Next available position in panel_lsub[*,jj]
222
223 // Initialize pointers
224 VectorBlock<IndexVector> marker1(marker, m, m);
225 nseg = 0;
226
227 panel_dfs_traits<IndexVector> traits(jcol, marker1.data());
228
229 // For each column in the panel
230 for (StorageIndex jj = StorageIndex(jcol); jj < jcol + w; jj++)
231 {
232 nextl_col = (jj - jcol) * m;
233
234 VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero location in each row
235 VectorBlock<ScalarVector> dense_col(dense,nextl_col, m); // Accumulate a column vector here
236
237
238 // For each nnz in A[*, jj] do depth first search
239 for (typename MatrixType::InnerIterator it(A, jj); it; ++it)
240 {
241 Index krow = it.row();
242 dense_col(krow) = it.value();
243
244 StorageIndex kmark = marker(krow);
245 if (kmark == jj)
246 continue; // krow visited before, go to the next nonzero
247
248 dfs_kernel(jj, perm_r, nseg, panel_lsub, segrep, repfnz_col, xprune, marker, parent,
249 xplore, glu, nextl_col, krow, traits);
250 }// end for nonzeros in column jj
251
252 } // end for column jj
253}

◆ pivotL()

Index SparseLUImpl< _MatrixType::Scalar , _MatrixType::StorageIndex >::pivotL ( const Index  jcol,
const RealScalar diagpivotthresh,
IndexVector perm_r,
IndexVector iperm_c,
Index pivrow,
GlobalLU_t glu 
)
protectedinherited

Performs the numerical pivotin on the current column of L, and the CDIV operation.

Pivot policy : (1) Compute thresh = u * max_(i>=j) abs(A_ij); (2) IF user specifies pivot row k and abs(A_kj) >= thresh THEN pivot row = k; ELSE IF abs(A_jj) >= thresh THEN pivot row = j; ELSE pivot row = m;

Note: If you absolutely want to use a given pivot order, then set u=0.0.

Parameters
jcolThe current column of L
diagpivotthreshdiagonal pivoting threshold
[in,out]perm_rRow permutation (threshold pivoting)
[in]iperm_ccolumn permutation - used to finf diagonal of Pc*A*Pc'
[out]pivrowThe pivot row
gluGlobal LU data
Returns
0 if success, i > 0 if U(i,i) is exactly zero
61{
62
63 Index fsupc = (glu.xsup)((glu.supno)(jcol)); // First column in the supernode containing the column jcol
64 Index nsupc = jcol - fsupc; // Number of columns in the supernode portion, excluding jcol; nsupc >=0
65 Index lptr = glu.xlsub(fsupc); // pointer to the starting location of the row subscripts for this supernode portion
66 Index nsupr = glu.xlsub(fsupc+1) - lptr; // Number of rows in the supernode
67 Index lda = glu.xlusup(fsupc+1) - glu.xlusup(fsupc); // leading dimension
68 Scalar* lu_sup_ptr = &(glu.lusup.data()[glu.xlusup(fsupc)]); // Start of the current supernode
69 Scalar* lu_col_ptr = &(glu.lusup.data()[glu.xlusup(jcol)]); // Start of jcol in the supernode
70 StorageIndex* lsub_ptr = &(glu.lsub.data()[lptr]); // Start of row indices of the supernode
71
72 // Determine the largest abs numerical value for partial pivoting
73 Index diagind = iperm_c(jcol); // diagonal index
74 RealScalar pivmax(-1.0);
75 Index pivptr = nsupc;
77 RealScalar rtemp;
78 Index isub, icol, itemp, k;
79 for (isub = nsupc; isub < nsupr; ++isub) {
80 using std::abs;
81 rtemp = abs(lu_col_ptr[isub]);
82 if (rtemp > pivmax) {
83 pivmax = rtemp;
84 pivptr = isub;
85 }
86 if (lsub_ptr[isub] == diagind) diag = isub;
87 }
88
89 // Test for singularity
90 if ( pivmax <= RealScalar(0.0) ) {
91 // if pivmax == -1, the column is structurally empty, otherwise it is only numerically zero
92 pivrow = pivmax < RealScalar(0.0) ? diagind : lsub_ptr[pivptr];
93 perm_r(pivrow) = StorageIndex(jcol);
94 return (jcol+1);
95 }
96
97 RealScalar thresh = diagpivotthresh * pivmax;
98
99 // Choose appropriate pivotal element
100
101 {
102 // Test if the diagonal element can be used as a pivot (given the threshold value)
103 if (diag >= 0 )
104 {
105 // Diagonal element exists
106 using std::abs;
107 rtemp = abs(lu_col_ptr[diag]);
108 if (rtemp != RealScalar(0.0) && rtemp >= thresh) pivptr = diag;
109 }
110 pivrow = lsub_ptr[pivptr];
111 }
112
113 // Record pivot row
114 perm_r(pivrow) = StorageIndex(jcol);
115 // Interchange row subscripts
116 if (pivptr != nsupc )
117 {
118 std::swap( lsub_ptr[pivptr], lsub_ptr[nsupc] );
119 // Interchange numerical values as well, for the two rows in the whole snode
120 // such that L is indexed the same way as A
121 for (icol = 0; icol <= nsupc; icol++)
122 {
123 itemp = pivptr + icol * lda;
124 std::swap(lu_sup_ptr[itemp], lu_sup_ptr[nsupc + icol * lda]);
125 }
126 }
127 // cdiv operations
128 Scalar temp = Scalar(1.0) / lu_col_ptr[nsupc];
129 for (k = nsupc+1; k < nsupr; k++)
130 lu_col_ptr[k] *= temp;
131 return 0;
132}
ScalarVector::RealScalar RealScalar
Definition SparseLUImpl.h:27
IGL_INLINE void diag(const Eigen::SparseMatrix< T > &X, Eigen::SparseVector< T > &V)
Definition diag.cpp:17

◆ pruneL()

void SparseLUImpl< _MatrixType::Scalar , _MatrixType::StorageIndex >::pruneL ( const Index  jcol,
const IndexVector perm_r,
const Index  pivrow,
const Index  nseg,
const IndexVector segrep,
BlockIndexVector  repfnz,
IndexVector xprune,
GlobalLU_t glu 
)
protectedinherited

Prunes the L-structure.

It prunes the L-structure of supernodes whose L-structure contains the current pivot row "pivrow"

Parameters
jcolThe current column of L
[in]perm_rRow permutation
[out]pivrowThe pivot row
nsegNumber of segments
segrep
repfnz
[out]xprune
gluGlobal LU data
55{
56 // For each supernode-rep irep in U(*,j]
57 Index jsupno = glu.supno(jcol);
58 Index i,irep,irep1;
59 bool movnum, do_prune = false;
60 Index kmin = 0, kmax = 0, minloc, maxloc,krow;
61 for (i = 0; i < nseg; i++)
62 {
63 irep = segrep(i);
64 irep1 = irep + 1;
65 do_prune = false;
66
67 // Don't prune with a zero U-segment
68 if (repfnz(irep) == emptyIdxLU) continue;
69
70 // If a snode overlaps with the next panel, then the U-segment
71 // is fragmented into two parts -- irep and irep1. We should let
72 // pruning occur at the rep-column in irep1s snode.
73 if (glu.supno(irep) == glu.supno(irep1) ) continue; // don't prune
74
75 // If it has not been pruned & it has a nonz in row L(pivrow,i)
76 if (glu.supno(irep) != jsupno )
77 {
78 if ( xprune (irep) >= glu.xlsub(irep1) )
79 {
80 kmin = glu.xlsub(irep);
81 kmax = glu.xlsub(irep1) - 1;
82 for (krow = kmin; krow <= kmax; krow++)
83 {
84 if (glu.lsub(krow) == pivrow)
85 {
86 do_prune = true;
87 break;
88 }
89 }
90 }
91
92 if (do_prune)
93 {
94 // do a quicksort-type partition
95 // movnum=true means that the num values have to be exchanged
96 movnum = false;
97 if (irep == glu.xsup(glu.supno(irep)) ) // Snode of size 1
98 movnum = true;
99
100 while (kmin <= kmax)
101 {
102 if (perm_r(glu.lsub(kmax)) == emptyIdxLU)
103 kmax--;
104 else if ( perm_r(glu.lsub(kmin)) != emptyIdxLU)
105 kmin++;
106 else
107 {
108 // kmin below pivrow (not yet pivoted), and kmax
109 // above pivrow: interchange the two suscripts
110 std::swap(glu.lsub(kmin), glu.lsub(kmax));
111
112 // If the supernode has only one column, then we
113 // only keep one set of subscripts. For any subscript
114 // intercnahge performed, similar interchange must be
115 // done on the numerical values.
116 if (movnum)
117 {
118 minloc = glu.xlusup(irep) + ( kmin - glu.xlsub(irep) );
119 maxloc = glu.xlusup(irep) + ( kmax - glu.xlsub(irep) );
120 std::swap(glu.lusup(minloc), glu.lusup(maxloc));
121 }
122 kmin++;
123 kmax--;
124 }
125 } // end while
126
127 xprune(irep) = StorageIndex(kmin); //Pruning
128 } // end if do_prune
129 } // end pruning
130 } // End for each U-segment
131}

◆ relax_snode()

void SparseLUImpl< _MatrixType::Scalar , _MatrixType::StorageIndex >::relax_snode ( const Index  n,
IndexVector et,
const Index  relax_columns,
IndexVector descendants,
IndexVector relax_end 
)
protectedinherited

Identify the initial relaxed supernodes.

This routine is applied to a column elimination tree. It assumes that the matrix has been reordered according to the postorder of the etree

Parameters
nthe number of columns
etelimination tree
relax_columnsMaximum number of columns allowed in a relaxed snode
descendantsNumber of descendants of each node in the etree
relax_endlast column in a supernode
48{
49
50 // compute the number of descendants of each node in the etree
51 Index parent;
52 relax_end.setConstant(emptyIdxLU);
53 descendants.setZero();
54 for (Index j = 0; j < n; j++)
55 {
56 parent = et(j);
57 if (parent != n) // not the dummy root
58 descendants(parent) += descendants(j) + 1;
59 }
60 // Identify the relaxed supernodes by postorder traversal of the etree
61 Index snode_start; // beginning of a snode
62 for (Index j = 0; j < n; )
63 {
64 parent = et(j);
65 snode_start = j;
66 while ( parent != n && descendants(parent) < relax_columns )
67 {
68 j = parent;
69 parent = et(j);
70 }
71 // Found a supernode in postordered etree, j is the last column
72 relax_end(snode_start) = StorageIndex(j); // Record last column
73 j++;
74 // Search for a new leaf
75 while (descendants(j) != 0 && j < n) j++;
76 } // End postorder traversal of the etree
77
78}

◆ rows()

template<typename _MatrixType , typename _OrderingType >
Index Eigen::SparseLU< _MatrixType, _OrderingType >::rows ( ) const
inline
132{ return m_mat.rows(); }

References Eigen::SparseLU< _MatrixType, _OrderingType >::m_mat, and Eigen::SparseMatrix< _Scalar, _Options, _StorageIndex >::rows().

+ Here is the call graph for this function:

◆ rowsPermutation()

template<typename _MatrixType , typename _OrderingType >
const PermutationType & Eigen::SparseLU< _MatrixType, _OrderingType >::rowsPermutation ( ) const
inline
Returns
a reference to the row matrix permutation $ P_r $ such that $P_r A P_c^T = L U$
See also
colsPermutation()
166 {
167 return m_perm_r;
168 }

References Eigen::SparseLU< _MatrixType, _OrderingType >::m_perm_r.

Referenced by Eigen::SparseLU< _MatrixType, _OrderingType >::_solve_impl().

+ Here is the caller graph for this function:

◆ setPivotThreshold()

template<typename _MatrixType , typename _OrderingType >
void Eigen::SparseLU< _MatrixType, _OrderingType >::setPivotThreshold ( const RealScalar thresh)
inline

Set the threshold used for a diagonal entry to be an acceptable pivot.

179 {
180 m_diagpivotthresh = thresh;
181 }

References Eigen::SparseLU< _MatrixType, _OrderingType >::m_diagpivotthresh.

◆ signDeterminant()

template<typename _MatrixType , typename _OrderingType >
Scalar Eigen::SparseLU< _MatrixType, _OrderingType >::signDeterminant ( )
inline
Returns
A number representing the sign of the determinant
See also
absDeterminant(), logAbsDeterminant()
310 {
311 eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
312 // Initialize with the determinant of the row matrix
313 Index det = 1;
314 // Note that the diagonal blocks of U are stored in supernodes,
315 // which are available in the L part :)
316 for (Index j = 0; j < this->cols(); ++j)
317 {
318 for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
319 {
320 if(it.index() == j)
321 {
322 if(it.value()<0)
323 det = -det;
324 else if(it.value()==0)
325 return 0;
326 break;
327 }
328 }
329 }
330 return det * m_detPermR * m_detPermC;
331 }

References Eigen::SparseLU< _MatrixType, _OrderingType >::cols(), eigen_assert, Eigen::SparseLU< _MatrixType, _OrderingType >::m_detPermC, Eigen::SparseLU< _MatrixType, _OrderingType >::m_detPermR, Eigen::SparseLU< _MatrixType, _OrderingType >::m_factorizationIsOk, and Eigen::SparseLU< _MatrixType, _OrderingType >::m_Lstore.

+ Here is the call graph for this function:

◆ simplicialfactorize()

template<typename _MatrixType , typename _OrderingType >
void Eigen::SparseLU< _MatrixType, _OrderingType >::simplicialfactorize ( const MatrixType matrix)

◆ snode_bmod()

Index Eigen::internal::SparseLUImpl< _MatrixType::Scalar , _MatrixType::StorageIndex >::snode_bmod ( const Index  jcol,
const Index  fsupc,
ScalarVector dense,
GlobalLU_t glu 
)
protectedinherited

◆ snode_dfs()

Index Eigen::internal::SparseLUImpl< _MatrixType::Scalar , _MatrixType::StorageIndex >::snode_dfs ( const Index  jcol,
const Index  kcol,
const MatrixType mat,
IndexVector xprune,
IndexVector marker,
GlobalLU_t glu 
)
protectedinherited

◆ solve() [1/2]

const Solve< SparseLU< _MatrixType, _OrderingType > , Rhs > Eigen::SparseSolverBase< SparseLU< _MatrixType, _OrderingType > >::solve ( const MatrixBase< Rhs > &  b) const
inlineinherited
Returns
an expression of the solution x of $ A x = b $ using the current decomposition of A.
See also
compute()
89 {
90 eigen_assert(m_isInitialized && "Solver is not initialized.");
91 eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b");
92 return Solve<Derived, Rhs>(derived(), b.derived());
93 }
size_t rows(const T &raster)
Definition MarchingSquares.hpp:55

◆ solve() [2/2]

const Solve< SparseLU< _MatrixType, _OrderingType > , Rhs > Eigen::SparseSolverBase< SparseLU< _MatrixType, _OrderingType > >::solve ( const SparseMatrixBase< Rhs > &  b) const
inlineinherited
Returns
an expression of the solution x of $ A x = b $ using the current decomposition of A.
See also
compute()
102 {
103 eigen_assert(m_isInitialized && "Solver is not initialized.");
104 eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b");
105 return Solve<Derived, Rhs>(derived(), b.derived());
106 }

Member Data Documentation

◆ m_analysisIsOk

template<typename _MatrixType , typename _OrderingType >
bool Eigen::SparseLU< _MatrixType, _OrderingType >::m_analysisIsOk
protected

◆ m_detPermC

template<typename _MatrixType , typename _OrderingType >
Index Eigen::SparseLU< _MatrixType, _OrderingType >::m_detPermC
protected

◆ m_detPermR

template<typename _MatrixType , typename _OrderingType >
Index Eigen::SparseLU< _MatrixType, _OrderingType >::m_detPermR
protected

◆ m_diagpivotthresh

template<typename _MatrixType , typename _OrderingType >
RealScalar Eigen::SparseLU< _MatrixType, _OrderingType >::m_diagpivotthresh
protected

◆ m_etree

template<typename _MatrixType , typename _OrderingType >
IndexVector Eigen::SparseLU< _MatrixType, _OrderingType >::m_etree
protected

◆ m_factorizationIsOk

◆ m_glu

template<typename _MatrixType , typename _OrderingType >
Base::GlobalLU_t Eigen::SparseLU< _MatrixType, _OrderingType >::m_glu
protected

◆ m_info

template<typename _MatrixType , typename _OrderingType >
ComputationInfo Eigen::SparseLU< _MatrixType, _OrderingType >::m_info
mutableprotected

◆ m_isInitialized

bool Eigen::SparseSolverBase< SparseLU< _MatrixType, _OrderingType > >::m_isInitialized
mutableprotectedinherited

◆ m_lastError

template<typename _MatrixType , typename _OrderingType >
std::string Eigen::SparseLU< _MatrixType, _OrderingType >::m_lastError
protected

◆ m_Lstore

◆ m_mat

template<typename _MatrixType , typename _OrderingType >
NCMatrix Eigen::SparseLU< _MatrixType, _OrderingType >::m_mat
protected

◆ m_nnzL

template<typename _MatrixType , typename _OrderingType >
Index Eigen::SparseLU< _MatrixType, _OrderingType >::m_nnzL
protected

◆ m_nnzU

template<typename _MatrixType , typename _OrderingType >
Index Eigen::SparseLU< _MatrixType, _OrderingType >::m_nnzU
protected

◆ m_perfv

template<typename _MatrixType , typename _OrderingType >
internal::perfvalues Eigen::SparseLU< _MatrixType, _OrderingType >::m_perfv
protected

◆ m_perm_c

template<typename _MatrixType , typename _OrderingType >
PermutationType Eigen::SparseLU< _MatrixType, _OrderingType >::m_perm_c
protected

◆ m_perm_r

template<typename _MatrixType , typename _OrderingType >
PermutationType Eigen::SparseLU< _MatrixType, _OrderingType >::m_perm_r
protected

◆ m_symmetricmode

template<typename _MatrixType , typename _OrderingType >
bool Eigen::SparseLU< _MatrixType, _OrderingType >::m_symmetricmode
protected

◆ m_Ustore

template<typename _MatrixType , typename _OrderingType >
MappedSparseMatrix<Scalar,ColMajor,StorageIndex> Eigen::SparseLU< _MatrixType, _OrderingType >::m_Ustore
protected

The documentation for this class was generated from the following file: