问题
In my application I need to avoid dynamic memory allocation (malloc like) except in the class constructors. I have a sparse semidefinite matrix M whose elements change during the program execution but it mantains a fixed sparsity pattern.
In order to solve many linear systems M * x = b as fast as possible, the idea is to use inplace decomposition in my class constructor as described in Inplace matrix decompositions, then call factorize method whenever M changes:
struct MyClass {
private:
SparseMatrix<double> As_;
SimplicialLDLT<Ref<SparseMatrix<double>>> solver_;
public:
/** Constructor */
MyClass( const SparseMatrix<double> &As )
: As_( As )
, solver_( As_ ) // Inplace decomposition
{}
void assign( const SparseMatrix<double> &As_new ) {
// Here As_new has the same sparsity pattern of As_
solver_.factorize( As_new );
}
void solve( const VectorXd &b, VectorXd &x )
{
x = solver_.solve( b );
}
}
However factorize method still creates one temporary with the same size of As_, so using dynamic memory allocation.
Is it possible to avoid it in some way? If the Eigen API does not allow this feature, one idea is to create a derived class of SimplicialLDLT so that dynamic memory allocation is performed only in analyzePattern method that will be called in class constructor. Suggestions are welcome...
回答1:
At the end I found a workaround using CSparse library to get H = P * A * P':
class SparseLDLTLinearSolver {
private:
/** Ordering algorithm */
AMDOrdering<int> ordering_;
/** Ordering P matrix */
PermutationMatrix<Dynamic, Dynamic, int> P_;
/** Inverse of P matrix */
PermutationMatrix<Dynamic, Dynamic, int> P_inv_;
/** Permuted matrix H = P * A * P' */
SparseMatrix<double> H_;
/** H matrix CSparse structure */
cs H_cs_;
/** Support vector for solve */
VectorXd y_;
/** Support permutation vector */
VectorXi w_;
/** LDLT sparse linear solver without ordering */
SimplicialLDLT<SparseMatrix<double>, Upper, NaturalOrdering<int>> solver_;
public:
int SparseLDLTLinearSolver( const SparseMatrix<double> &A )
: P_( A.rows() )
, P_inv_( A.rows() )
, H_( A.rows(), A.rows() )
, y_( A.rows() )
, w_( A.rows() )
{
assert( ( A.rows() == A.cols() ) && "Invalid matrix" );
ordering_( A.selfadjointView<Upper>(), P_inv_ );
P_ = P_inv_.inverse();
H_ = A.triangularView<Upper>();
H_.makeCompressed();
// Fill CSparse structure
H_cs_.nzmax = H_.nonZeros();
H_cs_.m = H_.rows();
H_cs_.n = H_.cols();
H_cs_.p = H_.outerIndexPtr();
H_cs_.i = H_.innerIndexPtr();
H_cs_.x = H_.valuePtr();
H_cs_.nz = -1;
const cs_sparse A_cs{
A.nonZeros(), A.rows(), A.cols(),
const_cast<int*>( A.outerIndexPtr() ),
const_cast<int*>( A.innerIndexPtr() ),
const_cast<double*>( A.valuePtr() ),
-1 };
cs_symperm_noalloc( &A_cs, P_.indices().data(), &H_cs_, w_.data() );
solver_.analyzePattern( H_ );
// Factorize in order to allocate internal data and avoid it on next factorization
solver_.factorize( H_ );
/*.*/
return -solver_.info();
}
int factorize( const Eigen::SparseMatrix<double> &A )
{
assert( ( A.rows() == P_.size() ) && ( A.cols() == P_.size() ) &&
"Invalid matrix size" );
// Fill CSparse structure
const cs_sparse A_cs{
A.nonZeros(), A.rows(), A.cols(),
const_cast<int*>( A.outerIndexPtr() ),
const_cast<int*>( A.innerIndexPtr() ),
const_cast<double*>( A.valuePtr() ),
-1 };
cs_symperm_noalloc( &A_cs, P_.indices().data(), &H_cs_, w_.data() );
solver_.factorize( H_ );
/*.*/
return -solver_.info();
}
void solve( const VectorXd &rhs, VectorXd &x )
{
assert( ( rhs.size() == P_.size() ) && ( x.size() == P_.size() ) &&
"Invalid vector size" );
// Solve (P * A * P') * y = P * b, then return x = P' * y
y_ = solver_.solve( P_ * rhs );
x.noalias() = P_inv_ * y_;
}
};
cs_symperm_noalloc is a minor refactorization of cs_symperm function of CSparse library.
It seems to work, at least with my special problem. In the future it would be very useful if Eigen avoided creating temporaries (into the heap) for some sparse matrix operations.
来源:https://stackoverflow.com/questions/61044017/avoiding-dynamic-memory-allocation-on-factorizing-sparse-matrix-with-eigen