1// Ceres Solver - A fast non-linear least squares minimizer 2// Copyright 2013 Google Inc. All rights reserved. 3// http://code.google.com/p/ceres-solver/ 4// 5// Redistribution and use in source and binary forms, with or without 6// modification, are permitted provided that the following conditions are met: 7// 8// * Redistributions of source code must retain the above copyright notice, 9// this list of conditions and the following disclaimer. 10// * Redistributions in binary form must reproduce the above copyright notice, 11// this list of conditions and the following disclaimer in the documentation 12// and/or other materials provided with the distribution. 13// * Neither the name of Google Inc. nor the names of its contributors may be 14// used to endorse or promote products derived from this software without 15// specific prior written permission. 16// 17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 21// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27// POSSIBILITY OF SUCH DAMAGE. 28// 29// Author: sameeragarwal@google.com (Sameer Agarwal) 30 31#include "ceres/covariance_impl.h" 32 33#ifdef CERES_USE_OPENMP 34#include <omp.h> 35#endif 36 37#include <algorithm> 38#include <utility> 39#include <vector> 40#include "Eigen/SVD" 41#include "ceres/compressed_col_sparse_matrix_utils.h" 42#include "ceres/compressed_row_sparse_matrix.h" 43#include "ceres/covariance.h" 44#include "ceres/crs_matrix.h" 45#include "ceres/internal/eigen.h" 46#include "ceres/map_util.h" 47#include "ceres/parameter_block.h" 48#include "ceres/problem_impl.h" 49#include "ceres/suitesparse.h" 50#include "ceres/wall_time.h" 51#include "glog/logging.h" 52 53namespace ceres { 54namespace internal { 55namespace { 56 57// Per thread storage for SuiteSparse. 58#ifndef CERES_NO_SUITESPARSE 59 60struct PerThreadContext { 61 explicit PerThreadContext(int num_rows) 62 : solution(NULL), 63 solution_set(NULL), 64 y_workspace(NULL), 65 e_workspace(NULL), 66 rhs(NULL) { 67 rhs = ss.CreateDenseVector(NULL, num_rows, num_rows); 68 } 69 70 ~PerThreadContext() { 71 ss.Free(solution); 72 ss.Free(solution_set); 73 ss.Free(y_workspace); 74 ss.Free(e_workspace); 75 ss.Free(rhs); 76 } 77 78 cholmod_dense* solution; 79 cholmod_sparse* solution_set; 80 cholmod_dense* y_workspace; 81 cholmod_dense* e_workspace; 82 cholmod_dense* rhs; 83 SuiteSparse ss; 84}; 85 86#endif 87 88} // namespace 89 90typedef vector<pair<const double*, const double*> > CovarianceBlocks; 91 92CovarianceImpl::CovarianceImpl(const Covariance::Options& options) 93 : options_(options), 94 is_computed_(false), 95 is_valid_(false) { 96 evaluate_options_.num_threads = options.num_threads; 97 evaluate_options_.apply_loss_function = options.apply_loss_function; 98} 99 100CovarianceImpl::~CovarianceImpl() { 101} 102 103bool CovarianceImpl::Compute(const CovarianceBlocks& covariance_blocks, 104 ProblemImpl* problem) { 105 problem_ = problem; 106 parameter_block_to_row_index_.clear(); 107 covariance_matrix_.reset(NULL); 108 is_valid_ = (ComputeCovarianceSparsity(covariance_blocks, problem) && 109 ComputeCovarianceValues()); 110 is_computed_ = true; 111 return is_valid_; 112} 113 114bool CovarianceImpl::GetCovarianceBlock(const double* original_parameter_block1, 115 const double* original_parameter_block2, 116 double* covariance_block) const { 117 CHECK(is_computed_) 118 << "Covariance::GetCovarianceBlock called before Covariance::Compute"; 119 CHECK(is_valid_) 120 << "Covariance::GetCovarianceBlock called when Covariance::Compute " 121 << "returned false."; 122 123 // If either of the two parameter blocks is constant, then the 124 // covariance block is also zero. 125 if (constant_parameter_blocks_.count(original_parameter_block1) > 0 || 126 constant_parameter_blocks_.count(original_parameter_block2) > 0) { 127 const ProblemImpl::ParameterMap& parameter_map = problem_->parameter_map(); 128 ParameterBlock* block1 = 129 FindOrDie(parameter_map, 130 const_cast<double*>(original_parameter_block1)); 131 132 ParameterBlock* block2 = 133 FindOrDie(parameter_map, 134 const_cast<double*>(original_parameter_block2)); 135 const int block1_size = block1->Size(); 136 const int block2_size = block2->Size(); 137 MatrixRef(covariance_block, block1_size, block2_size).setZero(); 138 return true; 139 } 140 141 const double* parameter_block1 = original_parameter_block1; 142 const double* parameter_block2 = original_parameter_block2; 143 const bool transpose = parameter_block1 > parameter_block2; 144 if (transpose) { 145 std::swap(parameter_block1, parameter_block2); 146 } 147 148 // Find where in the covariance matrix the block is located. 149 const int row_begin = 150 FindOrDie(parameter_block_to_row_index_, parameter_block1); 151 const int col_begin = 152 FindOrDie(parameter_block_to_row_index_, parameter_block2); 153 const int* rows = covariance_matrix_->rows(); 154 const int* cols = covariance_matrix_->cols(); 155 const int row_size = rows[row_begin + 1] - rows[row_begin]; 156 const int* cols_begin = cols + rows[row_begin]; 157 158 // The only part that requires work is walking the compressed column 159 // vector to determine where the set of columns correspnding to the 160 // covariance block begin. 161 int offset = 0; 162 while (cols_begin[offset] != col_begin && offset < row_size) { 163 ++offset; 164 } 165 166 if (offset == row_size) { 167 LOG(WARNING) << "Unable to find covariance block for " 168 << original_parameter_block1 << " " 169 << original_parameter_block2; 170 return false; 171 } 172 173 const ProblemImpl::ParameterMap& parameter_map = problem_->parameter_map(); 174 ParameterBlock* block1 = 175 FindOrDie(parameter_map, const_cast<double*>(parameter_block1)); 176 ParameterBlock* block2 = 177 FindOrDie(parameter_map, const_cast<double*>(parameter_block2)); 178 const LocalParameterization* local_param1 = block1->local_parameterization(); 179 const LocalParameterization* local_param2 = block2->local_parameterization(); 180 const int block1_size = block1->Size(); 181 const int block1_local_size = block1->LocalSize(); 182 const int block2_size = block2->Size(); 183 const int block2_local_size = block2->LocalSize(); 184 185 ConstMatrixRef cov(covariance_matrix_->values() + rows[row_begin], 186 block1_size, 187 row_size); 188 189 // Fast path when there are no local parameterizations. 190 if (local_param1 == NULL && local_param2 == NULL) { 191 if (transpose) { 192 MatrixRef(covariance_block, block2_size, block1_size) = 193 cov.block(0, offset, block1_size, block2_size).transpose(); 194 } else { 195 MatrixRef(covariance_block, block1_size, block2_size) = 196 cov.block(0, offset, block1_size, block2_size); 197 } 198 return true; 199 } 200 201 // If local parameterizations are used then the covariance that has 202 // been computed is in the tangent space and it needs to be lifted 203 // back to the ambient space. 204 // 205 // This is given by the formula 206 // 207 // C'_12 = J_1 C_12 J_2' 208 // 209 // Where C_12 is the local tangent space covariance for parameter 210 // blocks 1 and 2. J_1 and J_2 are respectively the local to global 211 // jacobians for parameter blocks 1 and 2. 212 // 213 // See Result 5.11 on page 142 of Hartley & Zisserman (2nd Edition) 214 // for a proof. 215 // 216 // TODO(sameeragarwal): Add caching of local parameterization, so 217 // that they are computed just once per parameter block. 218 Matrix block1_jacobian(block1_size, block1_local_size); 219 if (local_param1 == NULL) { 220 block1_jacobian.setIdentity(); 221 } else { 222 local_param1->ComputeJacobian(parameter_block1, block1_jacobian.data()); 223 } 224 225 Matrix block2_jacobian(block2_size, block2_local_size); 226 // Fast path if the user is requesting a diagonal block. 227 if (parameter_block1 == parameter_block2) { 228 block2_jacobian = block1_jacobian; 229 } else { 230 if (local_param2 == NULL) { 231 block2_jacobian.setIdentity(); 232 } else { 233 local_param2->ComputeJacobian(parameter_block2, block2_jacobian.data()); 234 } 235 } 236 237 if (transpose) { 238 MatrixRef(covariance_block, block2_size, block1_size) = 239 block2_jacobian * 240 cov.block(0, offset, block1_local_size, block2_local_size).transpose() * 241 block1_jacobian.transpose(); 242 } else { 243 MatrixRef(covariance_block, block1_size, block2_size) = 244 block1_jacobian * 245 cov.block(0, offset, block1_local_size, block2_local_size) * 246 block2_jacobian.transpose(); 247 } 248 249 return true; 250} 251 252// Determine the sparsity pattern of the covariance matrix based on 253// the block pairs requested by the user. 254bool CovarianceImpl::ComputeCovarianceSparsity( 255 const CovarianceBlocks& original_covariance_blocks, 256 ProblemImpl* problem) { 257 EventLogger event_logger("CovarianceImpl::ComputeCovarianceSparsity"); 258 259 // Determine an ordering for the parameter block, by sorting the 260 // parameter blocks by their pointers. 261 vector<double*> all_parameter_blocks; 262 problem->GetParameterBlocks(&all_parameter_blocks); 263 const ProblemImpl::ParameterMap& parameter_map = problem->parameter_map(); 264 constant_parameter_blocks_.clear(); 265 vector<double*>& active_parameter_blocks = evaluate_options_.parameter_blocks; 266 active_parameter_blocks.clear(); 267 for (int i = 0; i < all_parameter_blocks.size(); ++i) { 268 double* parameter_block = all_parameter_blocks[i]; 269 270 ParameterBlock* block = FindOrDie(parameter_map, parameter_block); 271 if (block->IsConstant()) { 272 constant_parameter_blocks_.insert(parameter_block); 273 } else { 274 active_parameter_blocks.push_back(parameter_block); 275 } 276 } 277 278 sort(active_parameter_blocks.begin(), active_parameter_blocks.end()); 279 280 // Compute the number of rows. Map each parameter block to the 281 // first row corresponding to it in the covariance matrix using the 282 // ordering of parameter blocks just constructed. 283 int num_rows = 0; 284 parameter_block_to_row_index_.clear(); 285 for (int i = 0; i < active_parameter_blocks.size(); ++i) { 286 double* parameter_block = active_parameter_blocks[i]; 287 const int parameter_block_size = 288 problem->ParameterBlockLocalSize(parameter_block); 289 parameter_block_to_row_index_[parameter_block] = num_rows; 290 num_rows += parameter_block_size; 291 } 292 293 // Compute the number of non-zeros in the covariance matrix. Along 294 // the way flip any covariance blocks which are in the lower 295 // triangular part of the matrix. 296 int num_nonzeros = 0; 297 CovarianceBlocks covariance_blocks; 298 for (int i = 0; i < original_covariance_blocks.size(); ++i) { 299 const pair<const double*, const double*>& block_pair = 300 original_covariance_blocks[i]; 301 if (constant_parameter_blocks_.count(block_pair.first) > 0 || 302 constant_parameter_blocks_.count(block_pair.second) > 0) { 303 continue; 304 } 305 306 int index1 = FindOrDie(parameter_block_to_row_index_, block_pair.first); 307 int index2 = FindOrDie(parameter_block_to_row_index_, block_pair.second); 308 const int size1 = problem->ParameterBlockLocalSize(block_pair.first); 309 const int size2 = problem->ParameterBlockLocalSize(block_pair.second); 310 num_nonzeros += size1 * size2; 311 312 // Make sure we are constructing a block upper triangular matrix. 313 if (index1 > index2) { 314 covariance_blocks.push_back(make_pair(block_pair.second, 315 block_pair.first)); 316 } else { 317 covariance_blocks.push_back(block_pair); 318 } 319 } 320 321 if (covariance_blocks.size() == 0) { 322 VLOG(2) << "No non-zero covariance blocks found"; 323 covariance_matrix_.reset(NULL); 324 return true; 325 } 326 327 // Sort the block pairs. As a consequence we get the covariance 328 // blocks as they will occur in the CompressedRowSparseMatrix that 329 // will store the covariance. 330 sort(covariance_blocks.begin(), covariance_blocks.end()); 331 332 // Fill the sparsity pattern of the covariance matrix. 333 covariance_matrix_.reset( 334 new CompressedRowSparseMatrix(num_rows, num_rows, num_nonzeros)); 335 336 int* rows = covariance_matrix_->mutable_rows(); 337 int* cols = covariance_matrix_->mutable_cols(); 338 339 // Iterate over parameter blocks and in turn over the rows of the 340 // covariance matrix. For each parameter block, look in the upper 341 // triangular part of the covariance matrix to see if there are any 342 // blocks requested by the user. If this is the case then fill out a 343 // set of compressed rows corresponding to this parameter block. 344 // 345 // The key thing that makes this loop work is the fact that the 346 // row/columns of the covariance matrix are ordered by the pointer 347 // values of the parameter blocks. Thus iterating over the keys of 348 // parameter_block_to_row_index_ corresponds to iterating over the 349 // rows of the covariance matrix in order. 350 int i = 0; // index into covariance_blocks. 351 int cursor = 0; // index into the covariance matrix. 352 for (map<const double*, int>::const_iterator it = 353 parameter_block_to_row_index_.begin(); 354 it != parameter_block_to_row_index_.end(); 355 ++it) { 356 const double* row_block = it->first; 357 const int row_block_size = problem->ParameterBlockLocalSize(row_block); 358 int row_begin = it->second; 359 360 // Iterate over the covariance blocks contained in this row block 361 // and count the number of columns in this row block. 362 int num_col_blocks = 0; 363 int num_columns = 0; 364 for (int j = i; j < covariance_blocks.size(); ++j, ++num_col_blocks) { 365 const pair<const double*, const double*>& block_pair = 366 covariance_blocks[j]; 367 if (block_pair.first != row_block) { 368 break; 369 } 370 num_columns += problem->ParameterBlockLocalSize(block_pair.second); 371 } 372 373 // Fill out all the compressed rows for this parameter block. 374 for (int r = 0; r < row_block_size; ++r) { 375 rows[row_begin + r] = cursor; 376 for (int c = 0; c < num_col_blocks; ++c) { 377 const double* col_block = covariance_blocks[i + c].second; 378 const int col_block_size = problem->ParameterBlockLocalSize(col_block); 379 int col_begin = FindOrDie(parameter_block_to_row_index_, col_block); 380 for (int k = 0; k < col_block_size; ++k) { 381 cols[cursor++] = col_begin++; 382 } 383 } 384 } 385 386 i+= num_col_blocks; 387 } 388 389 rows[num_rows] = cursor; 390 return true; 391} 392 393bool CovarianceImpl::ComputeCovarianceValues() { 394 switch (options_.algorithm_type) { 395 case (DENSE_SVD): 396 return ComputeCovarianceValuesUsingDenseSVD(); 397#ifndef CERES_NO_SUITESPARSE 398 case (SPARSE_CHOLESKY): 399 return ComputeCovarianceValuesUsingSparseCholesky(); 400 case (SPARSE_QR): 401 return ComputeCovarianceValuesUsingSparseQR(); 402#endif 403 default: 404 LOG(ERROR) << "Unsupported covariance estimation algorithm type: " 405 << CovarianceAlgorithmTypeToString(options_.algorithm_type); 406 return false; 407 } 408 return false; 409} 410 411bool CovarianceImpl::ComputeCovarianceValuesUsingSparseCholesky() { 412 EventLogger event_logger( 413 "CovarianceImpl::ComputeCovarianceValuesUsingSparseCholesky"); 414#ifndef CERES_NO_SUITESPARSE 415 if (covariance_matrix_.get() == NULL) { 416 // Nothing to do, all zeros covariance matrix. 417 return true; 418 } 419 420 SuiteSparse ss; 421 422 CRSMatrix jacobian; 423 problem_->Evaluate(evaluate_options_, NULL, NULL, NULL, &jacobian); 424 425 event_logger.AddEvent("Evaluate"); 426 // m is a transposed view of the Jacobian. 427 cholmod_sparse cholmod_jacobian_view; 428 cholmod_jacobian_view.nrow = jacobian.num_cols; 429 cholmod_jacobian_view.ncol = jacobian.num_rows; 430 cholmod_jacobian_view.nzmax = jacobian.values.size(); 431 cholmod_jacobian_view.nz = NULL; 432 cholmod_jacobian_view.p = reinterpret_cast<void*>(&jacobian.rows[0]); 433 cholmod_jacobian_view.i = reinterpret_cast<void*>(&jacobian.cols[0]); 434 cholmod_jacobian_view.x = reinterpret_cast<void*>(&jacobian.values[0]); 435 cholmod_jacobian_view.z = NULL; 436 cholmod_jacobian_view.stype = 0; // Matrix is not symmetric. 437 cholmod_jacobian_view.itype = CHOLMOD_INT; 438 cholmod_jacobian_view.xtype = CHOLMOD_REAL; 439 cholmod_jacobian_view.dtype = CHOLMOD_DOUBLE; 440 cholmod_jacobian_view.sorted = 1; 441 cholmod_jacobian_view.packed = 1; 442 443 cholmod_factor* factor = ss.AnalyzeCholesky(&cholmod_jacobian_view); 444 event_logger.AddEvent("Symbolic Factorization"); 445 bool factorization_succeeded = ss.Cholesky(&cholmod_jacobian_view, factor); 446 if (factorization_succeeded) { 447 const double reciprocal_condition_number = 448 cholmod_rcond(factor, ss.mutable_cc()); 449 if (reciprocal_condition_number < 450 options_.min_reciprocal_condition_number) { 451 LOG(WARNING) << "Cholesky factorization of J'J is not reliable. " 452 << "Reciprocal condition number: " 453 << reciprocal_condition_number << " " 454 << "min_reciprocal_condition_number : " 455 << options_.min_reciprocal_condition_number; 456 factorization_succeeded = false; 457 } 458 } 459 460 event_logger.AddEvent("Numeric Factorization"); 461 if (!factorization_succeeded) { 462 ss.Free(factor); 463 LOG(WARNING) << "Cholesky factorization failed."; 464 return false; 465 } 466 467 const int num_rows = covariance_matrix_->num_rows(); 468 const int* rows = covariance_matrix_->rows(); 469 const int* cols = covariance_matrix_->cols(); 470 double* values = covariance_matrix_->mutable_values(); 471 472 // The following loop exploits the fact that the i^th column of A^{-1} 473 // is given by the solution to the linear system 474 // 475 // A x = e_i 476 // 477 // where e_i is a vector with e(i) = 1 and all other entries zero. 478 // 479 // Since the covariance matrix is symmetric, the i^th row and column 480 // are equal. 481 // 482 // The ifdef separates two different version of SuiteSparse. Newer 483 // versions of SuiteSparse have the cholmod_solve2 function which 484 // re-uses memory across calls. 485#if (SUITESPARSE_VERSION < 4002) 486 cholmod_dense* rhs = ss.CreateDenseVector(NULL, num_rows, num_rows); 487 double* rhs_x = reinterpret_cast<double*>(rhs->x); 488 489 for (int r = 0; r < num_rows; ++r) { 490 int row_begin = rows[r]; 491 int row_end = rows[r + 1]; 492 if (row_end == row_begin) { 493 continue; 494 } 495 496 rhs_x[r] = 1.0; 497 cholmod_dense* solution = ss.Solve(factor, rhs); 498 double* solution_x = reinterpret_cast<double*>(solution->x); 499 for (int idx = row_begin; idx < row_end; ++idx) { 500 const int c = cols[idx]; 501 values[idx] = solution_x[c]; 502 } 503 ss.Free(solution); 504 rhs_x[r] = 0.0; 505 } 506 507 ss.Free(rhs); 508#else // SUITESPARSE_VERSION < 4002 509 510 const int num_threads = options_.num_threads; 511 vector<PerThreadContext*> contexts(num_threads); 512 for (int i = 0; i < num_threads; ++i) { 513 contexts[i] = new PerThreadContext(num_rows); 514 } 515 516 // The first call to cholmod_solve2 is not thread safe, since it 517 // changes the factorization from supernodal to simplicial etc. 518 { 519 PerThreadContext* context = contexts[0]; 520 double* context_rhs_x = reinterpret_cast<double*>(context->rhs->x); 521 context_rhs_x[0] = 1.0; 522 cholmod_solve2(CHOLMOD_A, 523 factor, 524 context->rhs, 525 NULL, 526 &context->solution, 527 &context->solution_set, 528 &context->y_workspace, 529 &context->e_workspace, 530 context->ss.mutable_cc()); 531 context_rhs_x[0] = 0.0; 532 } 533 534#pragma omp parallel for num_threads(num_threads) schedule(dynamic) 535 for (int r = 0; r < num_rows; ++r) { 536 int row_begin = rows[r]; 537 int row_end = rows[r + 1]; 538 if (row_end == row_begin) { 539 continue; 540 } 541 542# ifdef CERES_USE_OPENMP 543 int thread_id = omp_get_thread_num(); 544# else 545 int thread_id = 0; 546# endif 547 548 PerThreadContext* context = contexts[thread_id]; 549 double* context_rhs_x = reinterpret_cast<double*>(context->rhs->x); 550 context_rhs_x[r] = 1.0; 551 552 // TODO(sameeragarwal) There should be a more efficient way 553 // involving the use of Bset but I am unable to make it work right 554 // now. 555 cholmod_solve2(CHOLMOD_A, 556 factor, 557 context->rhs, 558 NULL, 559 &context->solution, 560 &context->solution_set, 561 &context->y_workspace, 562 &context->e_workspace, 563 context->ss.mutable_cc()); 564 565 double* solution_x = reinterpret_cast<double*>(context->solution->x); 566 for (int idx = row_begin; idx < row_end; ++idx) { 567 const int c = cols[idx]; 568 values[idx] = solution_x[c]; 569 } 570 context_rhs_x[r] = 0.0; 571 } 572 573 for (int i = 0; i < num_threads; ++i) { 574 delete contexts[i]; 575 } 576 577#endif // SUITESPARSE_VERSION < 4002 578 579 ss.Free(factor); 580 event_logger.AddEvent("Inversion"); 581 return true; 582 583#else // CERES_NO_SUITESPARSE 584 585 return false; 586 587#endif // CERES_NO_SUITESPARSE 588}; 589 590bool CovarianceImpl::ComputeCovarianceValuesUsingSparseQR() { 591 EventLogger event_logger( 592 "CovarianceImpl::ComputeCovarianceValuesUsingSparseQR"); 593 594#ifndef CERES_NO_SUITESPARSE 595 if (covariance_matrix_.get() == NULL) { 596 // Nothing to do, all zeros covariance matrix. 597 return true; 598 } 599 600 CRSMatrix jacobian; 601 problem_->Evaluate(evaluate_options_, NULL, NULL, NULL, &jacobian); 602 event_logger.AddEvent("Evaluate"); 603 604 // Construct a compressed column form of the Jacobian. 605 const int num_rows = jacobian.num_rows; 606 const int num_cols = jacobian.num_cols; 607 const int num_nonzeros = jacobian.values.size(); 608 609 vector<SuiteSparse_long> transpose_rows(num_cols + 1, 0); 610 vector<SuiteSparse_long> transpose_cols(num_nonzeros, 0); 611 vector<double> transpose_values(num_nonzeros, 0); 612 613 for (int idx = 0; idx < num_nonzeros; ++idx) { 614 transpose_rows[jacobian.cols[idx] + 1] += 1; 615 } 616 617 for (int i = 1; i < transpose_rows.size(); ++i) { 618 transpose_rows[i] += transpose_rows[i - 1]; 619 } 620 621 for (int r = 0; r < num_rows; ++r) { 622 for (int idx = jacobian.rows[r]; idx < jacobian.rows[r + 1]; ++idx) { 623 const int c = jacobian.cols[idx]; 624 const int transpose_idx = transpose_rows[c]; 625 transpose_cols[transpose_idx] = r; 626 transpose_values[transpose_idx] = jacobian.values[idx]; 627 ++transpose_rows[c]; 628 } 629 } 630 631 for (int i = transpose_rows.size() - 1; i > 0 ; --i) { 632 transpose_rows[i] = transpose_rows[i - 1]; 633 } 634 transpose_rows[0] = 0; 635 636 cholmod_sparse cholmod_jacobian; 637 cholmod_jacobian.nrow = num_rows; 638 cholmod_jacobian.ncol = num_cols; 639 cholmod_jacobian.nzmax = num_nonzeros; 640 cholmod_jacobian.nz = NULL; 641 cholmod_jacobian.p = reinterpret_cast<void*>(&transpose_rows[0]); 642 cholmod_jacobian.i = reinterpret_cast<void*>(&transpose_cols[0]); 643 cholmod_jacobian.x = reinterpret_cast<void*>(&transpose_values[0]); 644 cholmod_jacobian.z = NULL; 645 cholmod_jacobian.stype = 0; // Matrix is not symmetric. 646 cholmod_jacobian.itype = CHOLMOD_LONG; 647 cholmod_jacobian.xtype = CHOLMOD_REAL; 648 cholmod_jacobian.dtype = CHOLMOD_DOUBLE; 649 cholmod_jacobian.sorted = 1; 650 cholmod_jacobian.packed = 1; 651 652 cholmod_common cc; 653 cholmod_l_start(&cc); 654 655 cholmod_sparse* R = NULL; 656 SuiteSparse_long* permutation = NULL; 657 658 // Compute a Q-less QR factorization of the Jacobian. Since we are 659 // only interested in inverting J'J = R'R, we do not need Q. This 660 // saves memory and gives us R as a permuted compressed column 661 // sparse matrix. 662 // 663 // TODO(sameeragarwal): Currently the symbolic factorization and the 664 // numeric factorization is done at the same time, and this does not 665 // explicitly account for the block column and row structure in the 666 // matrix. When using AMD, we have observed in the past that 667 // computing the ordering with the block matrix is significantly 668 // more efficient, both in runtime as well as the quality of 669 // ordering computed. So, it maybe worth doing that analysis 670 // separately. 671 const SuiteSparse_long rank = 672 SuiteSparseQR<double>(SPQR_ORDERING_BESTAMD, 673 SPQR_DEFAULT_TOL, 674 cholmod_jacobian.ncol, 675 &cholmod_jacobian, 676 &R, 677 &permutation, 678 &cc); 679 event_logger.AddEvent("Numeric Factorization"); 680 CHECK_NOTNULL(permutation); 681 CHECK_NOTNULL(R); 682 683 if (rank < cholmod_jacobian.ncol) { 684 LOG(WARNING) << "Jacobian matrix is rank deficient." 685 << "Number of columns: " << cholmod_jacobian.ncol 686 << " rank: " << rank; 687 delete []permutation; 688 cholmod_l_free_sparse(&R, &cc); 689 cholmod_l_finish(&cc); 690 return false; 691 } 692 693 vector<int> inverse_permutation(num_cols); 694 for (SuiteSparse_long i = 0; i < num_cols; ++i) { 695 inverse_permutation[permutation[i]] = i; 696 } 697 698 const int* rows = covariance_matrix_->rows(); 699 const int* cols = covariance_matrix_->cols(); 700 double* values = covariance_matrix_->mutable_values(); 701 702 // The following loop exploits the fact that the i^th column of A^{-1} 703 // is given by the solution to the linear system 704 // 705 // A x = e_i 706 // 707 // where e_i is a vector with e(i) = 1 and all other entries zero. 708 // 709 // Since the covariance matrix is symmetric, the i^th row and column 710 // are equal. 711 const int num_threads = options_.num_threads; 712 scoped_array<double> workspace(new double[num_threads * num_cols]); 713 714#pragma omp parallel for num_threads(num_threads) schedule(dynamic) 715 for (int r = 0; r < num_cols; ++r) { 716 const int row_begin = rows[r]; 717 const int row_end = rows[r + 1]; 718 if (row_end == row_begin) { 719 continue; 720 } 721 722# ifdef CERES_USE_OPENMP 723 int thread_id = omp_get_thread_num(); 724# else 725 int thread_id = 0; 726# endif 727 728 double* solution = workspace.get() + thread_id * num_cols; 729 SolveRTRWithSparseRHS<SuiteSparse_long>( 730 num_cols, 731 static_cast<SuiteSparse_long*>(R->i), 732 static_cast<SuiteSparse_long*>(R->p), 733 static_cast<double*>(R->x), 734 inverse_permutation[r], 735 solution); 736 for (int idx = row_begin; idx < row_end; ++idx) { 737 const int c = cols[idx]; 738 values[idx] = solution[inverse_permutation[c]]; 739 } 740 } 741 742 delete []permutation; 743 cholmod_l_free_sparse(&R, &cc); 744 cholmod_l_finish(&cc); 745 event_logger.AddEvent("Inversion"); 746 return true; 747 748#else // CERES_NO_SUITESPARSE 749 750 return false; 751 752#endif // CERES_NO_SUITESPARSE 753} 754 755bool CovarianceImpl::ComputeCovarianceValuesUsingDenseSVD() { 756 EventLogger event_logger( 757 "CovarianceImpl::ComputeCovarianceValuesUsingDenseSVD"); 758 if (covariance_matrix_.get() == NULL) { 759 // Nothing to do, all zeros covariance matrix. 760 return true; 761 } 762 763 CRSMatrix jacobian; 764 problem_->Evaluate(evaluate_options_, NULL, NULL, NULL, &jacobian); 765 event_logger.AddEvent("Evaluate"); 766 767 Matrix dense_jacobian(jacobian.num_rows, jacobian.num_cols); 768 dense_jacobian.setZero(); 769 for (int r = 0; r < jacobian.num_rows; ++r) { 770 for (int idx = jacobian.rows[r]; idx < jacobian.rows[r + 1]; ++idx) { 771 const int c = jacobian.cols[idx]; 772 dense_jacobian(r, c) = jacobian.values[idx]; 773 } 774 } 775 event_logger.AddEvent("ConvertToDenseMatrix"); 776 777 Eigen::JacobiSVD<Matrix> svd(dense_jacobian, 778 Eigen::ComputeThinU | Eigen::ComputeThinV); 779 780 event_logger.AddEvent("SingularValueDecomposition"); 781 782 const Vector singular_values = svd.singularValues(); 783 const int num_singular_values = singular_values.rows(); 784 Vector inverse_squared_singular_values(num_singular_values); 785 inverse_squared_singular_values.setZero(); 786 787 const double max_singular_value = singular_values[0]; 788 const double min_singular_value_ratio = 789 sqrt(options_.min_reciprocal_condition_number); 790 791 const bool automatic_truncation = (options_.null_space_rank < 0); 792 const int max_rank = min(num_singular_values, 793 num_singular_values - options_.null_space_rank); 794 795 // Compute the squared inverse of the singular values. Truncate the 796 // computation based on min_singular_value_ratio and 797 // null_space_rank. When either of these two quantities are active, 798 // the resulting covariance matrix is a Moore-Penrose inverse 799 // instead of a regular inverse. 800 for (int i = 0; i < max_rank; ++i) { 801 const double singular_value_ratio = singular_values[i] / max_singular_value; 802 if (singular_value_ratio < min_singular_value_ratio) { 803 // Since the singular values are in decreasing order, if 804 // automatic truncation is enabled, then from this point on 805 // all values will fail the ratio test and there is nothing to 806 // do in this loop. 807 if (automatic_truncation) { 808 break; 809 } else { 810 LOG(WARNING) << "Cholesky factorization of J'J is not reliable. " 811 << "Reciprocal condition number: " 812 << singular_value_ratio * singular_value_ratio << " " 813 << "min_reciprocal_condition_number : " 814 << options_.min_reciprocal_condition_number; 815 return false; 816 } 817 } 818 819 inverse_squared_singular_values[i] = 820 1.0 / (singular_values[i] * singular_values[i]); 821 } 822 823 Matrix dense_covariance = 824 svd.matrixV() * 825 inverse_squared_singular_values.asDiagonal() * 826 svd.matrixV().transpose(); 827 event_logger.AddEvent("PseudoInverse"); 828 829 const int num_rows = covariance_matrix_->num_rows(); 830 const int* rows = covariance_matrix_->rows(); 831 const int* cols = covariance_matrix_->cols(); 832 double* values = covariance_matrix_->mutable_values(); 833 834 for (int r = 0; r < num_rows; ++r) { 835 for (int idx = rows[r]; idx < rows[r + 1]; ++idx) { 836 const int c = cols[idx]; 837 values[idx] = dense_covariance(r, c); 838 } 839 } 840 event_logger.AddEvent("CopyToCovarianceMatrix"); 841 return true; 842}; 843 844} // namespace internal 845} // namespace ceres 846