solver.h revision 1d2624a10e2c559f8ba9ef89eaa30832c0a83a96
1// Ceres Solver - A fast non-linear least squares minimizer 2// Copyright 2010, 2011, 2012 Google Inc. All rights reserved. 3// http://code.google.com/p/ceres-solver/ 4// 5// Redistribution and use in source and binary forms, with or without 6// modification, are permitted provided that the following conditions are met: 7// 8// * Redistributions of source code must retain the above copyright notice, 9// this list of conditions and the following disclaimer. 10// * Redistributions in binary form must reproduce the above copyright notice, 11// this list of conditions and the following disclaimer in the documentation 12// and/or other materials provided with the distribution. 13// * Neither the name of Google Inc. nor the names of its contributors may be 14// used to endorse or promote products derived from this software without 15// specific prior written permission. 16// 17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 21// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27// POSSIBILITY OF SUCH DAMAGE. 28// 29// Author: sameeragarwal@google.com (Sameer Agarwal) 30 31#ifndef CERES_PUBLIC_SOLVER_H_ 32#define CERES_PUBLIC_SOLVER_H_ 33 34#include <cmath> 35#include <string> 36#include <vector> 37#include "ceres/crs_matrix.h" 38#include "ceres/internal/macros.h" 39#include "ceres/internal/port.h" 40#include "ceres/iteration_callback.h" 41#include "ceres/ordered_groups.h" 42#include "ceres/types.h" 43 44namespace ceres { 45 46class Problem; 47 48// Interface for non-linear least squares solvers. 49class Solver { 50 public: 51 virtual ~Solver(); 52 53 // The options structure contains, not surprisingly, options that control how 54 // the solver operates. The defaults should be suitable for a wide range of 55 // problems; however, better performance is often obtainable with tweaking. 56 // 57 // The constants are defined inside types.h 58 struct Options { 59 // Default constructor that sets up a generic sparse problem. 60 Options() { 61 minimizer_type = TRUST_REGION; 62 line_search_direction_type = LBFGS; 63 line_search_type = WOLFE; 64 nonlinear_conjugate_gradient_type = FLETCHER_REEVES; 65 max_lbfgs_rank = 20; 66 use_approximate_eigenvalue_bfgs_scaling = false; 67 line_search_interpolation_type = CUBIC; 68 min_line_search_step_size = 1e-9; 69 line_search_sufficient_function_decrease = 1e-4; 70 max_line_search_step_contraction = 1e-3; 71 min_line_search_step_contraction = 0.6; 72 max_num_line_search_step_size_iterations = 20; 73 max_num_line_search_direction_restarts = 5; 74 line_search_sufficient_curvature_decrease = 0.9; 75 max_line_search_step_expansion = 10.0; 76 77 trust_region_strategy_type = LEVENBERG_MARQUARDT; 78 dogleg_type = TRADITIONAL_DOGLEG; 79 use_nonmonotonic_steps = false; 80 max_consecutive_nonmonotonic_steps = 5; 81 max_num_iterations = 50; 82 max_solver_time_in_seconds = 1e9; 83 num_threads = 1; 84 initial_trust_region_radius = 1e4; 85 max_trust_region_radius = 1e16; 86 min_trust_region_radius = 1e-32; 87 min_relative_decrease = 1e-3; 88 min_lm_diagonal = 1e-6; 89 max_lm_diagonal = 1e32; 90 max_num_consecutive_invalid_steps = 5; 91 function_tolerance = 1e-6; 92 gradient_tolerance = 1e-10; 93 parameter_tolerance = 1e-8; 94 95#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE) 96 linear_solver_type = DENSE_QR; 97#else 98 linear_solver_type = SPARSE_NORMAL_CHOLESKY; 99#endif 100 101 preconditioner_type = JACOBI; 102 103 sparse_linear_algebra_library = SUITE_SPARSE; 104#if defined(CERES_NO_SUITESPARSE) && !defined(CERES_NO_CXSPARSE) 105 sparse_linear_algebra_library = CX_SPARSE; 106#endif 107 108 num_linear_solver_threads = 1; 109 linear_solver_ordering = NULL; 110 use_postordering = false; 111 min_linear_solver_iterations = 1; 112 max_linear_solver_iterations = 500; 113 eta = 1e-1; 114 jacobi_scaling = true; 115 use_inner_iterations = false; 116 inner_iteration_tolerance = 1e-3; 117 inner_iteration_ordering = NULL; 118 logging_type = PER_MINIMIZER_ITERATION; 119 minimizer_progress_to_stdout = false; 120 trust_region_problem_dump_directory = "/tmp"; 121 trust_region_problem_dump_format_type = TEXTFILE; 122 check_gradients = false; 123 gradient_check_relative_precision = 1e-8; 124 numeric_derivative_relative_step_size = 1e-6; 125 update_state_every_iteration = false; 126 } 127 128 ~Options(); 129 // Minimizer options ---------------------------------------- 130 131 // Ceres supports the two major families of optimization strategies - 132 // Trust Region and Line Search. 133 // 134 // 1. The line search approach first finds a descent direction 135 // along which the objective function will be reduced and then 136 // computes a step size that decides how far should move along 137 // that direction. The descent direction can be computed by 138 // various methods, such as gradient descent, Newton's method and 139 // Quasi-Newton method. The step size can be determined either 140 // exactly or inexactly. 141 // 142 // 2. The trust region approach approximates the objective 143 // function using using a model function (often a quadratic) over 144 // a subset of the search space known as the trust region. If the 145 // model function succeeds in minimizing the true objective 146 // function the trust region is expanded; conversely, otherwise it 147 // is contracted and the model optimization problem is solved 148 // again. 149 // 150 // Trust region methods are in some sense dual to line search methods: 151 // trust region methods first choose a step size (the size of the 152 // trust region) and then a step direction while line search methods 153 // first choose a step direction and then a step size. 154 MinimizerType minimizer_type; 155 156 LineSearchDirectionType line_search_direction_type; 157 LineSearchType line_search_type; 158 NonlinearConjugateGradientType nonlinear_conjugate_gradient_type; 159 160 // The LBFGS hessian approximation is a low rank approximation to 161 // the inverse of the Hessian matrix. The rank of the 162 // approximation determines (linearly) the space and time 163 // complexity of using the approximation. Higher the rank, the 164 // better is the quality of the approximation. The increase in 165 // quality is however is bounded for a number of reasons. 166 // 167 // 1. The method only uses secant information and not actual 168 // derivatives. 169 // 170 // 2. The Hessian approximation is constrained to be positive 171 // definite. 172 // 173 // So increasing this rank to a large number will cost time and 174 // space complexity without the corresponding increase in solution 175 // quality. There are no hard and fast rules for choosing the 176 // maximum rank. The best choice usually requires some problem 177 // specific experimentation. 178 // 179 // For more theoretical and implementation details of the LBFGS 180 // method, please see: 181 // 182 // Nocedal, J. (1980). "Updating Quasi-Newton Matrices with 183 // Limited Storage". Mathematics of Computation 35 (151): 773–782. 184 int max_lbfgs_rank; 185 186 // As part of the (L)BFGS update step (BFGS) / right-multiply step (L-BFGS), 187 // the initial inverse Hessian approximation is taken to be the Identity. 188 // However, Oren showed that using instead I * \gamma, where \gamma is 189 // chosen to approximate an eigenvalue of the true inverse Hessian can 190 // result in improved convergence in a wide variety of cases. Setting 191 // use_approximate_eigenvalue_bfgs_scaling to true enables this scaling. 192 // 193 // It is important to note that approximate eigenvalue scaling does not 194 // always improve convergence, and that it can in fact significantly degrade 195 // performance for certain classes of problem, which is why it is disabled 196 // by default. In particular it can degrade performance when the 197 // sensitivity of the problem to different parameters varies significantly, 198 // as in this case a single scalar factor fails to capture this variation 199 // and detrimentally downscales parts of the jacobian approximation which 200 // correspond to low-sensitivity parameters. It can also reduce the 201 // robustness of the solution to errors in the jacobians. 202 // 203 // Oren S.S., Self-scaling variable metric (SSVM) algorithms 204 // Part II: Implementation and experiments, Management Science, 205 // 20(5), 863-874, 1974. 206 bool use_approximate_eigenvalue_bfgs_scaling; 207 208 // Degree of the polynomial used to approximate the objective 209 // function. Valid values are BISECTION, QUADRATIC and CUBIC. 210 // 211 // BISECTION corresponds to pure backtracking search with no 212 // interpolation. 213 LineSearchInterpolationType line_search_interpolation_type; 214 215 // If during the line search, the step_size falls below this 216 // value, it is truncated to zero. 217 double min_line_search_step_size; 218 219 // Line search parameters. 220 221 // Solving the line search problem exactly is computationally 222 // prohibitive. Fortunately, line search based optimization 223 // algorithms can still guarantee convergence if instead of an 224 // exact solution, the line search algorithm returns a solution 225 // which decreases the value of the objective function 226 // sufficiently. More precisely, we are looking for a step_size 227 // s.t. 228 // 229 // f(step_size) <= f(0) + sufficient_decrease * f'(0) * step_size 230 // 231 double line_search_sufficient_function_decrease; 232 233 // In each iteration of the line search, 234 // 235 // new_step_size >= max_line_search_step_contraction * step_size 236 // 237 // Note that by definition, for contraction: 238 // 239 // 0 < max_step_contraction < min_step_contraction < 1 240 // 241 double max_line_search_step_contraction; 242 243 // In each iteration of the line search, 244 // 245 // new_step_size <= min_line_search_step_contraction * step_size 246 // 247 // Note that by definition, for contraction: 248 // 249 // 0 < max_step_contraction < min_step_contraction < 1 250 // 251 double min_line_search_step_contraction; 252 253 // Maximum number of trial step size iterations during each line search, 254 // if a step size satisfying the search conditions cannot be found within 255 // this number of trials, the line search will terminate. 256 int max_num_line_search_step_size_iterations; 257 258 // Maximum number of restarts of the line search direction algorithm before 259 // terminating the optimization. Restarts of the line search direction 260 // algorithm occur when the current algorithm fails to produce a new descent 261 // direction. This typically indicates a numerical failure, or a breakdown 262 // in the validity of the approximations used. 263 int max_num_line_search_direction_restarts; 264 265 // The strong Wolfe conditions consist of the Armijo sufficient 266 // decrease condition, and an additional requirement that the 267 // step-size be chosen s.t. the _magnitude_ ('strong' Wolfe 268 // conditions) of the gradient along the search direction 269 // decreases sufficiently. Precisely, this second condition 270 // is that we seek a step_size s.t. 271 // 272 // |f'(step_size)| <= sufficient_curvature_decrease * |f'(0)| 273 // 274 // Where f() is the line search objective and f'() is the derivative 275 // of f w.r.t step_size (d f / d step_size). 276 double line_search_sufficient_curvature_decrease; 277 278 // During the bracketing phase of the Wolfe search, the step size is 279 // increased until either a point satisfying the Wolfe conditions is 280 // found, or an upper bound for a bracket containing a point satisfying 281 // the conditions is found. Precisely, at each iteration of the 282 // expansion: 283 // 284 // new_step_size <= max_step_expansion * step_size. 285 // 286 // By definition for expansion, max_step_expansion > 1.0. 287 double max_line_search_step_expansion; 288 289 TrustRegionStrategyType trust_region_strategy_type; 290 291 // Type of dogleg strategy to use. 292 DoglegType dogleg_type; 293 294 // The classical trust region methods are descent methods, in that 295 // they only accept a point if it strictly reduces the value of 296 // the objective function. 297 // 298 // Relaxing this requirement allows the algorithm to be more 299 // efficient in the long term at the cost of some local increase 300 // in the value of the objective function. 301 // 302 // This is because allowing for non-decreasing objective function 303 // values in a princpled manner allows the algorithm to "jump over 304 // boulders" as the method is not restricted to move into narrow 305 // valleys while preserving its convergence properties. 306 // 307 // Setting use_nonmonotonic_steps to true enables the 308 // non-monotonic trust region algorithm as described by Conn, 309 // Gould & Toint in "Trust Region Methods", Section 10.1. 310 // 311 // The parameter max_consecutive_nonmonotonic_steps controls the 312 // window size used by the step selection algorithm to accept 313 // non-monotonic steps. 314 // 315 // Even though the value of the objective function may be larger 316 // than the minimum value encountered over the course of the 317 // optimization, the final parameters returned to the user are the 318 // ones corresponding to the minimum cost over all iterations. 319 bool use_nonmonotonic_steps; 320 int max_consecutive_nonmonotonic_steps; 321 322 // Maximum number of iterations for the minimizer to run for. 323 int max_num_iterations; 324 325 // Maximum time for which the minimizer should run for. 326 double max_solver_time_in_seconds; 327 328 // Number of threads used by Ceres for evaluating the cost and 329 // jacobians. 330 int num_threads; 331 332 // Trust region minimizer settings. 333 double initial_trust_region_radius; 334 double max_trust_region_radius; 335 336 // Minimizer terminates when the trust region radius becomes 337 // smaller than this value. 338 double min_trust_region_radius; 339 340 // Lower bound for the relative decrease before a step is 341 // accepted. 342 double min_relative_decrease; 343 344 // For the Levenberg-Marquadt algorithm, the scaled diagonal of 345 // the normal equations J'J is used to control the size of the 346 // trust region. Extremely small and large values along the 347 // diagonal can make this regularization scheme 348 // fail. max_lm_diagonal and min_lm_diagonal, clamp the values of 349 // diag(J'J) from above and below. In the normal course of 350 // operation, the user should not have to modify these parameters. 351 double min_lm_diagonal; 352 double max_lm_diagonal; 353 354 // Sometimes due to numerical conditioning problems or linear 355 // solver flakiness, the trust region strategy may return a 356 // numerically invalid step that can be fixed by reducing the 357 // trust region size. So the TrustRegionMinimizer allows for a few 358 // successive invalid steps before it declares NUMERICAL_FAILURE. 359 int max_num_consecutive_invalid_steps; 360 361 // Minimizer terminates when 362 // 363 // (new_cost - old_cost) < function_tolerance * old_cost; 364 // 365 double function_tolerance; 366 367 // Minimizer terminates when 368 // 369 // max_i |gradient_i| < gradient_tolerance * max_i|initial_gradient_i| 370 // 371 // This value should typically be 1e-4 * function_tolerance. 372 double gradient_tolerance; 373 374 // Minimizer terminates when 375 // 376 // |step|_2 <= parameter_tolerance * ( |x|_2 + parameter_tolerance) 377 // 378 double parameter_tolerance; 379 380 // Linear least squares solver options ------------------------------------- 381 382 LinearSolverType linear_solver_type; 383 384 // Type of preconditioner to use with the iterative linear solvers. 385 PreconditionerType preconditioner_type; 386 387 // Ceres supports using multiple sparse linear algebra libraries 388 // for sparse matrix ordering and factorizations. Currently, 389 // SUITE_SPARSE and CX_SPARSE are the valid choices, depending on 390 // whether they are linked into Ceres at build time. 391 SparseLinearAlgebraLibraryType sparse_linear_algebra_library; 392 393 // Number of threads used by Ceres to solve the Newton 394 // step. Currently only the SPARSE_SCHUR solver is capable of 395 // using this setting. 396 int num_linear_solver_threads; 397 398 // The order in which variables are eliminated in a linear solver 399 // can have a significant of impact on the efficiency and accuracy 400 // of the method. e.g., when doing sparse Cholesky factorization, 401 // there are matrices for which a good ordering will give a 402 // Cholesky factor with O(n) storage, where as a bad ordering will 403 // result in an completely dense factor. 404 // 405 // Ceres allows the user to provide varying amounts of hints to 406 // the solver about the variable elimination ordering to use. This 407 // can range from no hints, where the solver is free to decide the 408 // best possible ordering based on the user's choices like the 409 // linear solver being used, to an exact order in which the 410 // variables should be eliminated, and a variety of possibilities 411 // in between. 412 // 413 // Instances of the ParameterBlockOrdering class are used to 414 // communicate this information to Ceres. 415 // 416 // Formally an ordering is an ordered partitioning of the 417 // parameter blocks, i.e, each parameter block belongs to exactly 418 // one group, and each group has a unique non-negative integer 419 // associated with it, that determines its order in the set of 420 // groups. 421 // 422 // Given such an ordering, Ceres ensures that the parameter blocks in 423 // the lowest numbered group are eliminated first, and then the 424 // parmeter blocks in the next lowest numbered group and so on. Within 425 // each group, Ceres is free to order the parameter blocks as it 426 // chooses. 427 // 428 // If NULL, then all parameter blocks are assumed to be in the 429 // same group and the solver is free to decide the best 430 // ordering. 431 // 432 // e.g. Consider the linear system 433 // 434 // x + y = 3 435 // 2x + 3y = 7 436 // 437 // There are two ways in which it can be solved. First eliminating x 438 // from the two equations, solving for y and then back substituting 439 // for x, or first eliminating y, solving for x and back substituting 440 // for y. The user can construct three orderings here. 441 // 442 // {0: x}, {1: y} - eliminate x first. 443 // {0: y}, {1: x} - eliminate y first. 444 // {0: x, y} - Solver gets to decide the elimination order. 445 // 446 // Thus, to have Ceres determine the ordering automatically using 447 // heuristics, put all the variables in group 0 and to control the 448 // ordering for every variable, create groups 0..N-1, one per 449 // variable, in the desired order. 450 // 451 // Bundle Adjustment 452 // ----------------- 453 // 454 // A particular case of interest is bundle adjustment, where the user 455 // has two options. The default is to not specify an ordering at all, 456 // the solver will see that the user wants to use a Schur type solver 457 // and figure out the right elimination ordering. 458 // 459 // But if the user already knows what parameter blocks are points and 460 // what are cameras, they can save preprocessing time by partitioning 461 // the parameter blocks into two groups, one for the points and one 462 // for the cameras, where the group containing the points has an id 463 // smaller than the group containing cameras. 464 // 465 // Once assigned, Solver::Options owns this pointer and will 466 // deallocate the memory when destroyed. 467 ParameterBlockOrdering* linear_solver_ordering; 468 469 // Sparse Cholesky factorization algorithms use a fill-reducing 470 // ordering to permute the columns of the Jacobian matrix. There 471 // are two ways of doing this. 472 473 // 1. Compute the Jacobian matrix in some order and then have the 474 // factorization algorithm permute the columns of the Jacobian. 475 476 // 2. Compute the Jacobian with its columns already permuted. 477 478 // The first option incurs a significant memory penalty. The 479 // factorization algorithm has to make a copy of the permuted 480 // Jacobian matrix, thus Ceres pre-permutes the columns of the 481 // Jacobian matrix and generally speaking, there is no performance 482 // penalty for doing so. 483 484 // In some rare cases, it is worth using a more complicated 485 // reordering algorithm which has slightly better runtime 486 // performance at the expense of an extra copy of the Jacobian 487 // matrix. Setting use_postordering to true enables this tradeoff. 488 bool use_postordering; 489 490 // Some non-linear least squares problems have additional 491 // structure in the way the parameter blocks interact that it is 492 // beneficial to modify the way the trust region step is computed. 493 // 494 // e.g., consider the following regression problem 495 // 496 // y = a_1 exp(b_1 x) + a_2 exp(b_3 x^2 + c_1) 497 // 498 // Given a set of pairs{(x_i, y_i)}, the user wishes to estimate 499 // a_1, a_2, b_1, b_2, and c_1. 500 // 501 // Notice here that the expression on the left is linear in a_1 502 // and a_2, and given any value for b_1, b_2 and c_1, it is 503 // possible to use linear regression to estimate the optimal 504 // values of a_1 and a_2. Indeed, its possible to analytically 505 // eliminate the variables a_1 and a_2 from the problem all 506 // together. Problems like these are known as separable least 507 // squares problem and the most famous algorithm for solving them 508 // is the Variable Projection algorithm invented by Golub & 509 // Pereyra. 510 // 511 // Similar structure can be found in the matrix factorization with 512 // missing data problem. There the corresponding algorithm is 513 // known as Wiberg's algorithm. 514 // 515 // Ruhe & Wedin (Algorithms for Separable Nonlinear Least Squares 516 // Problems, SIAM Reviews, 22(3), 1980) present an analyis of 517 // various algorithms for solving separable non-linear least 518 // squares problems and refer to "Variable Projection" as 519 // Algorithm I in their paper. 520 // 521 // Implementing Variable Projection is tedious and expensive, and 522 // they present a simpler algorithm, which they refer to as 523 // Algorithm II, where once the Newton/Trust Region step has been 524 // computed for the whole problem (a_1, a_2, b_1, b_2, c_1) and 525 // additional optimization step is performed to estimate a_1 and 526 // a_2 exactly. 527 // 528 // This idea can be generalized to cases where the residual is not 529 // linear in a_1 and a_2, i.e., Solve for the trust region step 530 // for the full problem, and then use it as the starting point to 531 // further optimize just a_1 and a_2. For the linear case, this 532 // amounts to doing a single linear least squares solve. For 533 // non-linear problems, any method for solving the a_1 and a_2 534 // optimization problems will do. The only constraint on a_1 and 535 // a_2 is that they do not co-occur in any residual block. 536 // 537 // This idea can be further generalized, by not just optimizing 538 // (a_1, a_2), but decomposing the graph corresponding to the 539 // Hessian matrix's sparsity structure in a collection of 540 // non-overlapping independent sets and optimizing each of them. 541 // 542 // Setting "use_inner_iterations" to true enables the use of this 543 // non-linear generalization of Ruhe & Wedin's Algorithm II. This 544 // version of Ceres has a higher iteration complexity, but also 545 // displays better convergence behaviour per iteration. Setting 546 // Solver::Options::num_threads to the maximum number possible is 547 // highly recommended. 548 bool use_inner_iterations; 549 550 // If inner_iterations is true, then the user has two choices. 551 // 552 // 1. Let the solver heuristically decide which parameter blocks 553 // to optimize in each inner iteration. To do this leave 554 // Solver::Options::inner_iteration_ordering untouched. 555 // 556 // 2. Specify a collection of of ordered independent sets. Where 557 // the lower numbered groups are optimized before the higher 558 // number groups. Each group must be an independent set. Not 559 // all parameter blocks need to be present in the ordering. 560 ParameterBlockOrdering* inner_iteration_ordering; 561 562 // Generally speaking, inner iterations make significant progress 563 // in the early stages of the solve and then their contribution 564 // drops down sharply, at which point the time spent doing inner 565 // iterations is not worth it. 566 // 567 // Once the relative decrease in the objective function due to 568 // inner iterations drops below inner_iteration_tolerance, the use 569 // of inner iterations in subsequent trust region minimizer 570 // iterations is disabled. 571 double inner_iteration_tolerance; 572 573 // Minimum number of iterations for which the linear solver should 574 // run, even if the convergence criterion is satisfied. 575 int min_linear_solver_iterations; 576 577 // Maximum number of iterations for which the linear solver should 578 // run. If the solver does not converge in less than 579 // max_linear_solver_iterations, then it returns MAX_ITERATIONS, 580 // as its termination type. 581 int max_linear_solver_iterations; 582 583 // Forcing sequence parameter. The truncated Newton solver uses 584 // this number to control the relative accuracy with which the 585 // Newton step is computed. 586 // 587 // This constant is passed to ConjugateGradientsSolver which uses 588 // it to terminate the iterations when 589 // 590 // (Q_i - Q_{i-1})/Q_i < eta/i 591 double eta; 592 593 // Normalize the jacobian using Jacobi scaling before calling 594 // the linear least squares solver. 595 bool jacobi_scaling; 596 597 // Logging options --------------------------------------------------------- 598 599 LoggingType logging_type; 600 601 // By default the Minimizer progress is logged to VLOG(1), which 602 // is sent to STDERR depending on the vlog level. If this flag is 603 // set to true, and logging_type is not SILENT, the logging output 604 // is sent to STDOUT. 605 bool minimizer_progress_to_stdout; 606 607 // List of iterations at which the minimizer should dump the trust 608 // region problem. Useful for testing and benchmarking. If empty 609 // (default), no problems are dumped. 610 vector<int> trust_region_minimizer_iterations_to_dump; 611 612 // Directory to which the problems should be written to. Should be 613 // non-empty if trust_region_minimizer_iterations_to_dump is 614 // non-empty and trust_region_problem_dump_format_type is not 615 // CONSOLE. 616 string trust_region_problem_dump_directory; 617 DumpFormatType trust_region_problem_dump_format_type; 618 619 // Finite differences options ---------------------------------------------- 620 621 // Check all jacobians computed by each residual block with finite 622 // differences. This is expensive since it involves computing the 623 // derivative by normal means (e.g. user specified, autodiff, 624 // etc), then also computing it using finite differences. The 625 // results are compared, and if they differ substantially, details 626 // are printed to the log. 627 bool check_gradients; 628 629 // Relative precision to check for in the gradient checker. If the 630 // relative difference between an element in a jacobian exceeds 631 // this number, then the jacobian for that cost term is dumped. 632 double gradient_check_relative_precision; 633 634 // Relative shift used for taking numeric derivatives. For finite 635 // differencing, each dimension is evaluated at slightly shifted 636 // values; for the case of central difference, this is what gets 637 // evaluated: 638 // 639 // delta = numeric_derivative_relative_step_size; 640 // f_initial = f(x) 641 // f_forward = f((1 + delta) * x) 642 // f_backward = f((1 - delta) * x) 643 // 644 // The finite differencing is done along each dimension. The 645 // reason to use a relative (rather than absolute) step size is 646 // that this way, numeric differentation works for functions where 647 // the arguments are typically large (e.g. 1e9) and when the 648 // values are small (e.g. 1e-5). It is possible to construct 649 // "torture cases" which break this finite difference heuristic, 650 // but they do not come up often in practice. 651 // 652 // TODO(keir): Pick a smarter number than the default above! In 653 // theory a good choice is sqrt(eps) * x, which for doubles means 654 // about 1e-8 * x. However, I have found this number too 655 // optimistic. This number should be exposed for users to change. 656 double numeric_derivative_relative_step_size; 657 658 // If true, the user's parameter blocks are updated at the end of 659 // every Minimizer iteration, otherwise they are updated when the 660 // Minimizer terminates. This is useful if, for example, the user 661 // wishes to visualize the state of the optimization every 662 // iteration. 663 bool update_state_every_iteration; 664 665 // Callbacks that are executed at the end of each iteration of the 666 // Minimizer. An iteration may terminate midway, either due to 667 // numerical failures or because one of the convergence tests has 668 // been satisfied. In this case none of the callbacks are 669 // executed. 670 671 // Callbacks are executed in the order that they are specified in 672 // this vector. By default, parameter blocks are updated only at 673 // the end of the optimization, i.e when the Minimizer 674 // terminates. This behaviour is controlled by 675 // update_state_every_variable. If the user wishes to have access 676 // to the update parameter blocks when his/her callbacks are 677 // executed, then set update_state_every_iteration to true. 678 // 679 // The solver does NOT take ownership of these pointers. 680 vector<IterationCallback*> callbacks; 681 682 // If non-empty, a summary of the execution of the solver is 683 // recorded to this file. 684 string solver_log; 685 }; 686 687 struct Summary { 688 Summary(); 689 690 // A brief one line description of the state of the solver after 691 // termination. 692 string BriefReport() const; 693 694 // A full multiline description of the state of the solver after 695 // termination. 696 string FullReport() const; 697 698 // Minimizer summary ------------------------------------------------- 699 MinimizerType minimizer_type; 700 701 SolverTerminationType termination_type; 702 703 // If the solver did not run, or there was a failure, a 704 // description of the error. 705 string error; 706 707 // Cost of the problem before and after the optimization. See 708 // problem.h for definition of the cost of a problem. 709 double initial_cost; 710 double final_cost; 711 712 // The part of the total cost that comes from residual blocks that 713 // were held fixed by the preprocessor because all the parameter 714 // blocks that they depend on were fixed. 715 double fixed_cost; 716 717 vector<IterationSummary> iterations; 718 719 int num_successful_steps; 720 int num_unsuccessful_steps; 721 int num_inner_iteration_steps; 722 723 // All times reported below are wall times. 724 725 // When the user calls Solve, before the actual optimization 726 // occurs, Ceres performs a number of preprocessing steps. These 727 // include error checks, memory allocations, and reorderings. This 728 // time is accounted for as preprocessing time. 729 double preprocessor_time_in_seconds; 730 731 // Time spent in the TrustRegionMinimizer. 732 double minimizer_time_in_seconds; 733 734 // After the Minimizer is finished, some time is spent in 735 // re-evaluating residuals etc. This time is accounted for in the 736 // postprocessor time. 737 double postprocessor_time_in_seconds; 738 739 // Some total of all time spent inside Ceres when Solve is called. 740 double total_time_in_seconds; 741 742 double linear_solver_time_in_seconds; 743 double residual_evaluation_time_in_seconds; 744 double jacobian_evaluation_time_in_seconds; 745 double inner_iteration_time_in_seconds; 746 747 // Preprocessor summary. 748 int num_parameter_blocks; 749 int num_parameters; 750 int num_effective_parameters; 751 int num_residual_blocks; 752 int num_residuals; 753 754 int num_parameter_blocks_reduced; 755 int num_parameters_reduced; 756 int num_effective_parameters_reduced; 757 int num_residual_blocks_reduced; 758 int num_residuals_reduced; 759 760 int num_eliminate_blocks_given; 761 int num_eliminate_blocks_used; 762 763 int num_threads_given; 764 int num_threads_used; 765 766 int num_linear_solver_threads_given; 767 int num_linear_solver_threads_used; 768 769 LinearSolverType linear_solver_type_given; 770 LinearSolverType linear_solver_type_used; 771 772 vector<int> linear_solver_ordering_given; 773 vector<int> linear_solver_ordering_used; 774 775 bool inner_iterations_given; 776 bool inner_iterations_used; 777 778 vector<int> inner_iteration_ordering_given; 779 vector<int> inner_iteration_ordering_used; 780 781 PreconditionerType preconditioner_type; 782 783 TrustRegionStrategyType trust_region_strategy_type; 784 DoglegType dogleg_type; 785 786 SparseLinearAlgebraLibraryType sparse_linear_algebra_library; 787 788 LineSearchDirectionType line_search_direction_type; 789 LineSearchType line_search_type; 790 LineSearchInterpolationType line_search_interpolation_type; 791 NonlinearConjugateGradientType nonlinear_conjugate_gradient_type; 792 793 int max_lbfgs_rank; 794 }; 795 796 // Once a least squares problem has been built, this function takes 797 // the problem and optimizes it based on the values of the options 798 // parameters. Upon return, a detailed summary of the work performed 799 // by the preprocessor, the non-linear minmizer and the linear 800 // solver are reported in the summary object. 801 virtual void Solve(const Options& options, 802 Problem* problem, 803 Solver::Summary* summary); 804}; 805 806// Helper function which avoids going through the interface. 807void Solve(const Solver::Options& options, 808 Problem* problem, 809 Solver::Summary* summary); 810 811} // namespace ceres 812 813#endif // CERES_PUBLIC_SOLVER_H_ 814