1// Ceres Solver - A fast non-linear least squares minimizer
2// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
3// http://code.google.com/p/ceres-solver/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9//   this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above copyright notice,
11//   this list of conditions and the following disclaimer in the documentation
12//   and/or other materials provided with the distribution.
13// * Neither the name of Google Inc. nor the names of its contributors may be
14//   used to endorse or promote products derived from this software without
15//   specific prior written permission.
16//
17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27// POSSIBILITY OF SUCH DAMAGE.
28//
29// Author: sameeragarwal@google.com (Sameer Agarwal)
30//
31// A simple C++ interface to the SuiteSparse and CHOLMOD libraries.
32
33#ifndef CERES_INTERNAL_SUITESPARSE_H_
34#define CERES_INTERNAL_SUITESPARSE_H_
35
36// This include must come before any #ifndef check on Ceres compile options.
37#include "ceres/internal/port.h"
38
39#ifndef CERES_NO_SUITESPARSE
40
41#include <cstring>
42#include <string>
43#include <vector>
44
45#include "ceres/internal/port.h"
46#include "ceres/linear_solver.h"
47#include "cholmod.h"
48#include "glog/logging.h"
49#include "SuiteSparseQR.hpp"
50
51// Before SuiteSparse version 4.2.0, cholmod_camd was only enabled
52// if SuiteSparse was compiled with Metis support. This makes
53// calling and linking into cholmod_camd problematic even though it
54// has nothing to do with Metis. This has been fixed reliably in
55// 4.2.0.
56//
57// The fix was actually committed in 4.1.0, but there is
58// some confusion about a silent update to the tar ball, so we are
59// being conservative and choosing the next minor version where
60// things are stable.
61#if (SUITESPARSE_VERSION < 4002)
62#define CERES_NO_CAMD
63#endif
64
65// UF_long is deprecated but SuiteSparse_long is only available in
66// newer versions of SuiteSparse. So for older versions of
67// SuiteSparse, we define SuiteSparse_long to be the same as UF_long,
68// which is what recent versions of SuiteSparse do anyways.
69#ifndef SuiteSparse_long
70#define SuiteSparse_long UF_long
71#endif
72
73namespace ceres {
74namespace internal {
75
76class CompressedRowSparseMatrix;
77class TripletSparseMatrix;
78
79// The raw CHOLMOD and SuiteSparseQR libraries have a slightly
80// cumbersome c like calling format. This object abstracts it away and
81// provides the user with a simpler interface. The methods here cannot
82// be static as a cholmod_common object serves as a global variable
83// for all cholmod function calls.
84class SuiteSparse {
85 public:
86  SuiteSparse();
87  ~SuiteSparse();
88
89  // Functions for building cholmod_sparse objects from sparse
90  // matrices stored in triplet form. The matrix A is not
91  // modifed. Called owns the result.
92  cholmod_sparse* CreateSparseMatrix(TripletSparseMatrix* A);
93
94  // This function works like CreateSparseMatrix, except that the
95  // return value corresponds to A' rather than A.
96  cholmod_sparse* CreateSparseMatrixTranspose(TripletSparseMatrix* A);
97
98  // Create a cholmod_sparse wrapper around the contents of A. This is
99  // a shallow object, which refers to the contents of A and does not
100  // use the SuiteSparse machinery to allocate memory.
101  cholmod_sparse CreateSparseMatrixTransposeView(CompressedRowSparseMatrix* A);
102
103  // Given a vector x, build a cholmod_dense vector of size out_size
104  // with the first in_size entries copied from x. If x is NULL, then
105  // an all zeros vector is returned. Caller owns the result.
106  cholmod_dense* CreateDenseVector(const double* x, int in_size, int out_size);
107
108  // The matrix A is scaled using the matrix whose diagonal is the
109  // vector scale. mode describes how scaling is applied. Possible
110  // values are CHOLMOD_ROW for row scaling - diag(scale) * A,
111  // CHOLMOD_COL for column scaling - A * diag(scale) and CHOLMOD_SYM
112  // for symmetric scaling which scales both the rows and the columns
113  // - diag(scale) * A * diag(scale).
114  void Scale(cholmod_dense* scale, int mode, cholmod_sparse* A) {
115     cholmod_scale(scale, mode, A, &cc_);
116  }
117
118  // Create and return a matrix m = A * A'. Caller owns the
119  // result. The matrix A is not modified.
120  cholmod_sparse* AATranspose(cholmod_sparse* A) {
121    cholmod_sparse*m =  cholmod_aat(A, NULL, A->nrow, 1, &cc_);
122    m->stype = 1;  // Pay attention to the upper triangular part.
123    return m;
124  }
125
126  // y = alpha * A * x + beta * y. Only y is modified.
127  void SparseDenseMultiply(cholmod_sparse* A, double alpha, double beta,
128                           cholmod_dense* x, cholmod_dense* y) {
129    double alpha_[2] = {alpha, 0};
130    double beta_[2] = {beta, 0};
131    cholmod_sdmult(A, 0, alpha_, beta_, x, y, &cc_);
132  }
133
134  // Find an ordering of A or AA' (if A is unsymmetric) that minimizes
135  // the fill-in in the Cholesky factorization of the corresponding
136  // matrix. This is done by using the AMD algorithm.
137  //
138  // Using this ordering, the symbolic Cholesky factorization of A (or
139  // AA') is computed and returned.
140  //
141  // A is not modified, only the pattern of non-zeros of A is used,
142  // the actual numerical values in A are of no consequence.
143  //
144  // message contains an explanation of the failures if any.
145  //
146  // Caller owns the result.
147  cholmod_factor* AnalyzeCholesky(cholmod_sparse* A, string* message);
148
149  cholmod_factor* BlockAnalyzeCholesky(cholmod_sparse* A,
150                                       const vector<int>& row_blocks,
151                                       const vector<int>& col_blocks,
152                                       string* message);
153
154  // If A is symmetric, then compute the symbolic Cholesky
155  // factorization of A(ordering, ordering). If A is unsymmetric, then
156  // compute the symbolic factorization of
157  // A(ordering,:) A(ordering,:)'.
158  //
159  // A is not modified, only the pattern of non-zeros of A is used,
160  // the actual numerical values in A are of no consequence.
161  //
162  // message contains an explanation of the failures if any.
163  //
164  // Caller owns the result.
165  cholmod_factor* AnalyzeCholeskyWithUserOrdering(cholmod_sparse* A,
166                                                  const vector<int>& ordering,
167                                                  string* message);
168
169  // Perform a symbolic factorization of A without re-ordering A. No
170  // postordering of the elimination tree is performed. This ensures
171  // that the symbolic factor does not introduce an extra permutation
172  // on the matrix. See the documentation for CHOLMOD for more details.
173  //
174  // message contains an explanation of the failures if any.
175  cholmod_factor* AnalyzeCholeskyWithNaturalOrdering(cholmod_sparse* A,
176                                                     string* message);
177
178  // Use the symbolic factorization in L, to find the numerical
179  // factorization for the matrix A or AA^T. Return true if
180  // successful, false otherwise. L contains the numeric factorization
181  // on return.
182  //
183  // message contains an explanation of the failures if any.
184  LinearSolverTerminationType Cholesky(cholmod_sparse* A,
185                                       cholmod_factor* L,
186                                       string* message);
187
188  // Given a Cholesky factorization of a matrix A = LL^T, solve the
189  // linear system Ax = b, and return the result. If the Solve fails
190  // NULL is returned. Caller owns the result.
191  //
192  // message contains an explanation of the failures if any.
193  cholmod_dense* Solve(cholmod_factor* L, cholmod_dense* b, string* message);
194
195  // By virtue of the modeling layer in Ceres being block oriented,
196  // all the matrices used by Ceres are also block oriented. When
197  // doing sparse direct factorization of these matrices the
198  // fill-reducing ordering algorithms (in particular AMD) can either
199  // be run on the block or the scalar form of these matrices. The two
200  // SuiteSparse::AnalyzeCholesky methods allows the the client to
201  // compute the symbolic factorization of a matrix by either using
202  // AMD on the matrix or a user provided ordering of the rows.
203  //
204  // But since the underlying matrices are block oriented, it is worth
205  // running AMD on just the block structre of these matrices and then
206  // lifting these block orderings to a full scalar ordering. This
207  // preserves the block structure of the permuted matrix, and exposes
208  // more of the super-nodal structure of the matrix to the numerical
209  // factorization routines.
210  //
211  // Find the block oriented AMD ordering of a matrix A, whose row and
212  // column blocks are given by row_blocks, and col_blocks
213  // respectively. The matrix may or may not be symmetric. The entries
214  // of col_blocks do not need to sum to the number of columns in
215  // A. If this is the case, only the first sum(col_blocks) are used
216  // to compute the ordering.
217  bool BlockAMDOrdering(const cholmod_sparse* A,
218                        const vector<int>& row_blocks,
219                        const vector<int>& col_blocks,
220                        vector<int>* ordering);
221
222  // Find a fill reducing approximate minimum degree
223  // ordering. ordering is expected to be large enough to hold the
224  // ordering.
225  bool ApproximateMinimumDegreeOrdering(cholmod_sparse* matrix, int* ordering);
226
227
228  // Before SuiteSparse version 4.2.0, cholmod_camd was only enabled
229  // if SuiteSparse was compiled with Metis support. This makes
230  // calling and linking into cholmod_camd problematic even though it
231  // has nothing to do with Metis. This has been fixed reliably in
232  // 4.2.0.
233  //
234  // The fix was actually committed in 4.1.0, but there is
235  // some confusion about a silent update to the tar ball, so we are
236  // being conservative and choosing the next minor version where
237  // things are stable.
238  static bool IsConstrainedApproximateMinimumDegreeOrderingAvailable() {
239    return (SUITESPARSE_VERSION>4001);
240  }
241
242  // Find a fill reducing approximate minimum degree
243  // ordering. constraints is an array which associates with each
244  // column of the matrix an elimination group. i.e., all columns in
245  // group 0 are eliminated first, all columns in group 1 are
246  // eliminated next etc. This function finds a fill reducing ordering
247  // that obeys these constraints.
248  //
249  // Calling ApproximateMinimumDegreeOrdering is equivalent to calling
250  // ConstrainedApproximateMinimumDegreeOrdering with a constraint
251  // array that puts all columns in the same elimination group.
252  //
253  // If CERES_NO_CAMD is defined then calling this function will
254  // result in a crash.
255  bool ConstrainedApproximateMinimumDegreeOrdering(cholmod_sparse* matrix,
256                                                   int* constraints,
257                                                   int* ordering);
258
259  void Free(cholmod_sparse* m) { cholmod_free_sparse(&m, &cc_); }
260  void Free(cholmod_dense* m)  { cholmod_free_dense(&m, &cc_);  }
261  void Free(cholmod_factor* m) { cholmod_free_factor(&m, &cc_); }
262
263  void Print(cholmod_sparse* m, const string& name) {
264    cholmod_print_sparse(m, const_cast<char*>(name.c_str()), &cc_);
265  }
266
267  void Print(cholmod_dense* m, const string& name) {
268    cholmod_print_dense(m, const_cast<char*>(name.c_str()), &cc_);
269  }
270
271  void Print(cholmod_triplet* m, const string& name) {
272    cholmod_print_triplet(m, const_cast<char*>(name.c_str()), &cc_);
273  }
274
275  cholmod_common* mutable_cc() { return &cc_; }
276
277 private:
278  cholmod_common cc_;
279};
280
281}  // namespace internal
282}  // namespace ceres
283
284#else  // CERES_NO_SUITESPARSE
285
286typedef void cholmod_factor;
287
288class SuiteSparse {
289 public:
290  // Defining this static function even when SuiteSparse is not
291  // available, allows client code to check for the presence of CAMD
292  // without checking for the absence of the CERES_NO_CAMD symbol.
293  //
294  // This is safer because the symbol maybe missing due to a user
295  // accidently not including suitesparse.h in their code when
296  // checking for the symbol.
297  static bool IsConstrainedApproximateMinimumDegreeOrderingAvailable() {
298    return false;
299  }
300
301  void Free(void*) {};
302};
303
304#endif  // CERES_NO_SUITESPARSE
305
306#endif  // CERES_INTERNAL_SUITESPARSE_H_
307