types.h revision 399f7d09e0c45af54b77b4ab9508d6f23759b927
1// Ceres Solver - A fast non-linear least squares minimizer
2// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
3// http://code.google.com/p/ceres-solver/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9//   this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above copyright notice,
11//   this list of conditions and the following disclaimer in the documentation
12//   and/or other materials provided with the distribution.
13// * Neither the name of Google Inc. nor the names of its contributors may be
14//   used to endorse or promote products derived from this software without
15//   specific prior written permission.
16//
17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27// POSSIBILITY OF SUCH DAMAGE.
28//
29// Author: sameeragarwal@google.com (Sameer Agarwal)
30//
31// Enums and other top level class definitions.
32//
33// Note: internal/types.cc defines stringification routines for some
34// of these enums. Please update those routines if you extend or
35// remove enums from here.
36
37#ifndef CERES_PUBLIC_TYPES_H_
38#define CERES_PUBLIC_TYPES_H_
39
40#include <string>
41
42#include "ceres/internal/port.h"
43
44namespace ceres {
45
46// Basic integer types. These typedefs are in the Ceres namespace to avoid
47// conflicts with other packages having similar typedefs.
48typedef short int16;
49typedef int   int32;
50
51// Argument type used in interfaces that can optionally take ownership
52// of a passed in argument. If TAKE_OWNERSHIP is passed, the called
53// object takes ownership of the pointer argument, and will call
54// delete on it upon completion.
55enum Ownership {
56  DO_NOT_TAKE_OWNERSHIP,
57  TAKE_OWNERSHIP
58};
59
60// TODO(keir): Considerably expand the explanations of each solver type.
61enum LinearSolverType {
62  // These solvers are for general rectangular systems formed from the
63  // normal equations A'A x = A'b. They are direct solvers and do not
64  // assume any special problem structure.
65
66  // Solve the normal equations using a dense Cholesky solver; based
67  // on Eigen.
68  DENSE_NORMAL_CHOLESKY,
69
70  // Solve the normal equations using a dense QR solver; based on
71  // Eigen.
72  DENSE_QR,
73
74  // Solve the normal equations using a sparse cholesky solver; requires
75  // SuiteSparse or CXSparse.
76  SPARSE_NORMAL_CHOLESKY,
77
78  // Specialized solvers, specific to problems with a generalized
79  // bi-partitite structure.
80
81  // Solves the reduced linear system using a dense Cholesky solver;
82  // based on Eigen.
83  DENSE_SCHUR,
84
85  // Solves the reduced linear system using a sparse Cholesky solver;
86  // based on CHOLMOD.
87  SPARSE_SCHUR,
88
89  // Solves the reduced linear system using Conjugate Gradients, based
90  // on a new Ceres implementation.  Suitable for large scale
91  // problems.
92  ITERATIVE_SCHUR,
93
94  // Conjugate gradients on the normal equations.
95  CGNR
96};
97
98enum PreconditionerType {
99  // Trivial preconditioner - the identity matrix.
100  IDENTITY,
101
102  // Block diagonal of the Gauss-Newton Hessian.
103  JACOBI,
104
105  // Block diagonal of the Schur complement. This preconditioner may
106  // only be used with the ITERATIVE_SCHUR solver.
107  SCHUR_JACOBI,
108
109  // Visibility clustering based preconditioners.
110  //
111  // These preconditioners are well suited for Structure from Motion
112  // problems, particularly problems arising from community photo
113  // collections. These preconditioners use the visibility structure
114  // of the scene to determine the sparsity structure of the
115  // preconditioner. Requires SuiteSparse/CHOLMOD.
116  CLUSTER_JACOBI,
117  CLUSTER_TRIDIAGONAL
118};
119
120enum SparseLinearAlgebraLibraryType {
121  // High performance sparse Cholesky factorization and approximate
122  // minimum degree ordering.
123  SUITE_SPARSE,
124
125  // A lightweight replacment for SuiteSparse.
126  CX_SPARSE
127};
128
129enum DenseLinearAlgebraLibraryType {
130  EIGEN,
131  LAPACK
132};
133
134enum LinearSolverTerminationType {
135  // Termination criterion was met. For factorization based solvers
136  // the tolerance is assumed to be zero. Any user provided values are
137  // ignored.
138  TOLERANCE,
139
140  // Solver ran for max_num_iterations and terminated before the
141  // termination tolerance could be satified.
142  MAX_ITERATIONS,
143
144  // Solver is stuck and further iterations will not result in any
145  // measurable progress.
146  STAGNATION,
147
148  // Solver failed. Solver was terminated due to numerical errors. The
149  // exact cause of failure depends on the particular solver being
150  // used.
151  FAILURE
152};
153
154// Logging options
155// The options get progressively noisier.
156enum LoggingType {
157  SILENT,
158  PER_MINIMIZER_ITERATION
159};
160
161enum MinimizerType {
162  LINE_SEARCH,
163  TRUST_REGION
164};
165
166enum LineSearchDirectionType {
167  // Negative of the gradient.
168  STEEPEST_DESCENT,
169
170  // A generalization of the Conjugate Gradient method to non-linear
171  // functions. The generalization can be performed in a number of
172  // different ways, resulting in a variety of search directions. The
173  // precise choice of the non-linear conjugate gradient algorithm
174  // used is determined by NonlinerConjuateGradientType.
175  NONLINEAR_CONJUGATE_GRADIENT,
176
177  // BFGS, and it's limited memory approximation L-BFGS, are quasi-Newton
178  // algorithms that approximate the Hessian matrix by iteratively refining
179  // an initial estimate with rank-one updates using the gradient at each
180  // iteration. They are a generalisation of the Secant method and satisfy
181  // the Secant equation.  The Secant equation has an infinium of solutions
182  // in multiple dimensions, as there are N*(N+1)/2 degrees of freedom in a
183  // symmetric matrix but only N conditions are specified by the Secant
184  // equation. The requirement that the Hessian approximation be positive
185  // definite imposes another N additional constraints, but that still leaves
186  // remaining degrees-of-freedom.  (L)BFGS methods uniquely deteremine the
187  // approximate Hessian by imposing the additional constraints that the
188  // approximation at the next iteration must be the 'closest' to the current
189  // approximation (the nature of how this proximity is measured is actually
190  // the defining difference between a family of quasi-Newton methods including
191  // (L)BFGS & DFP). (L)BFGS is currently regarded as being the best known
192  // general quasi-Newton method.
193  //
194  // The principal difference between BFGS and L-BFGS is that whilst BFGS
195  // maintains a full, dense approximation to the (inverse) Hessian, L-BFGS
196  // maintains only a window of the last M observations of the parameters and
197  // gradients. Using this observation history, the calculation of the next
198  // search direction can be computed without requiring the construction of the
199  // full dense inverse Hessian approximation. This is particularly important
200  // for problems with a large number of parameters, where storage of an N-by-N
201  // matrix in memory would be prohibitive.
202  //
203  // For more details on BFGS see:
204  //
205  // Broyden, C.G., "The Convergence of a Class of Double-rank Minimization
206  // Algorithms,"; J. Inst. Maths. Applics., Vol. 6, pp 76–90, 1970.
207  //
208  // Fletcher, R., "A New Approach to Variable Metric Algorithms,"
209  // Computer Journal, Vol. 13, pp 317–322, 1970.
210  //
211  // Goldfarb, D., "A Family of Variable Metric Updates Derived by Variational
212  // Means," Mathematics of Computing, Vol. 24, pp 23–26, 1970.
213  //
214  // Shanno, D.F., "Conditioning of Quasi-Newton Methods for Function
215  // Minimization," Mathematics of Computing, Vol. 24, pp 647–656, 1970.
216  //
217  // For more details on L-BFGS see:
218  //
219  // Nocedal, J. (1980). "Updating Quasi-Newton Matrices with Limited
220  // Storage". Mathematics of Computation 35 (151): 773–782.
221  //
222  // Byrd, R. H.; Nocedal, J.; Schnabel, R. B. (1994).
223  // "Representations of Quasi-Newton Matrices and their use in
224  // Limited Memory Methods". Mathematical Programming 63 (4):
225  // 129–156.
226  //
227  // A general reference for both methods:
228  //
229  // Nocedal J., Wright S., Numerical Optimization, 2nd Ed. Springer, 1999.
230  LBFGS,
231  BFGS,
232};
233
234// Nonliner conjugate gradient methods are a generalization of the
235// method of Conjugate Gradients for linear systems. The
236// generalization can be carried out in a number of different ways
237// leading to number of different rules for computing the search
238// direction. Ceres provides a number of different variants. For more
239// details see Numerical Optimization by Nocedal & Wright.
240enum NonlinearConjugateGradientType {
241  FLETCHER_REEVES,
242  POLAK_RIBIRERE,
243  HESTENES_STIEFEL,
244};
245
246enum LineSearchType {
247  // Backtracking line search with polynomial interpolation or
248  // bisection.
249  ARMIJO,
250  WOLFE,
251};
252
253// Ceres supports different strategies for computing the trust region
254// step.
255enum TrustRegionStrategyType {
256  // The default trust region strategy is to use the step computation
257  // used in the Levenberg-Marquardt algorithm. For more details see
258  // levenberg_marquardt_strategy.h
259  LEVENBERG_MARQUARDT,
260
261  // Powell's dogleg algorithm interpolates between the Cauchy point
262  // and the Gauss-Newton step. It is particularly useful if the
263  // LEVENBERG_MARQUARDT algorithm is making a large number of
264  // unsuccessful steps. For more details see dogleg_strategy.h.
265  //
266  // NOTES:
267  //
268  // 1. This strategy has not been experimented with or tested as
269  // extensively as LEVENBERG_MARQUARDT, and therefore it should be
270  // considered EXPERIMENTAL for now.
271  //
272  // 2. For now this strategy should only be used with exact
273  // factorization based linear solvers, i.e., SPARSE_SCHUR,
274  // DENSE_SCHUR, DENSE_QR and SPARSE_NORMAL_CHOLESKY.
275  DOGLEG
276};
277
278// Ceres supports two different dogleg strategies.
279// The "traditional" dogleg method by Powell and the
280// "subspace" method described in
281// R. H. Byrd, R. B. Schnabel, and G. A. Shultz,
282// "Approximate solution of the trust region problem by minimization
283//  over two-dimensional subspaces", Mathematical Programming,
284// 40 (1988), pp. 247--263
285enum DoglegType {
286  // The traditional approach constructs a dogleg path
287  // consisting of two line segments and finds the furthest
288  // point on that path that is still inside the trust region.
289  TRADITIONAL_DOGLEG,
290
291  // The subspace approach finds the exact minimum of the model
292  // constrained to the subspace spanned by the dogleg path.
293  SUBSPACE_DOGLEG
294};
295
296enum SolverTerminationType {
297  // The minimizer did not run at all; usually due to errors in the user's
298  // Problem or the solver options.
299  DID_NOT_RUN,
300
301  // The solver ran for maximum number of iterations specified by the
302  // user, but none of the convergence criterion specified by the user
303  // were met.
304  NO_CONVERGENCE,
305
306  // Minimizer terminated because
307  //  (new_cost - old_cost) < function_tolerance * old_cost;
308  FUNCTION_TOLERANCE,
309
310  // Minimizer terminated because
311  // max_i |gradient_i| < gradient_tolerance * max_i|initial_gradient_i|
312  GRADIENT_TOLERANCE,
313
314  // Minimized terminated because
315  //  |step|_2 <= parameter_tolerance * ( |x|_2 +  parameter_tolerance)
316  PARAMETER_TOLERANCE,
317
318  // The minimizer terminated because it encountered a numerical error
319  // that it could not recover from.
320  NUMERICAL_FAILURE,
321
322  // Using an IterationCallback object, user code can control the
323  // minimizer. The following enums indicate that the user code was
324  // responsible for termination.
325
326  // User's IterationCallback returned SOLVER_ABORT.
327  USER_ABORT,
328
329  // User's IterationCallback returned SOLVER_TERMINATE_SUCCESSFULLY
330  USER_SUCCESS
331};
332
333// Enums used by the IterationCallback instances to indicate to the
334// solver whether it should continue solving, the user detected an
335// error or the solution is good enough and the solver should
336// terminate.
337enum CallbackReturnType {
338  // Continue solving to next iteration.
339  SOLVER_CONTINUE,
340
341  // Terminate solver, and do not update the parameter blocks upon
342  // return. Unless the user has set
343  // Solver:Options:::update_state_every_iteration, in which case the
344  // state would have been updated every iteration
345  // anyways. Solver::Summary::termination_type is set to USER_ABORT.
346  SOLVER_ABORT,
347
348  // Terminate solver, update state and
349  // return. Solver::Summary::termination_type is set to USER_SUCCESS.
350  SOLVER_TERMINATE_SUCCESSFULLY
351};
352
353// The format in which linear least squares problems should be logged
354// when Solver::Options::lsqp_iterations_to_dump is non-empty.
355enum DumpFormatType {
356  // Print the linear least squares problem in a human readable format
357  // to stderr. The Jacobian is printed as a dense matrix. The vectors
358  // D, x and f are printed as dense vectors. This should only be used
359  // for small problems.
360  CONSOLE,
361
362  // Write out the linear least squares problem to the directory
363  // pointed to by Solver::Options::lsqp_dump_directory as text files
364  // which can be read into MATLAB/Octave. The Jacobian is dumped as a
365  // text file containing (i,j,s) triplets, the vectors D, x and f are
366  // dumped as text files containing a list of their values.
367  //
368  // A MATLAB/octave script called lm_iteration_???.m is also output,
369  // which can be used to parse and load the problem into memory.
370  TEXTFILE
371};
372
373// For SizedCostFunction and AutoDiffCostFunction, DYNAMIC can be specified for
374// the number of residuals. If specified, then the number of residuas for that
375// cost function can vary at runtime.
376enum DimensionType {
377  DYNAMIC = -1
378};
379
380enum NumericDiffMethod {
381  CENTRAL,
382  FORWARD
383};
384
385enum LineSearchInterpolationType {
386  BISECTION,
387  QUADRATIC,
388  CUBIC
389};
390
391enum CovarianceAlgorithmType {
392  DENSE_SVD,
393  SPARSE_CHOLESKY,
394  SPARSE_QR
395};
396
397const char* LinearSolverTypeToString(LinearSolverType type);
398bool StringToLinearSolverType(string value, LinearSolverType* type);
399
400const char* PreconditionerTypeToString(PreconditionerType type);
401bool StringToPreconditionerType(string value, PreconditionerType* type);
402
403const char* SparseLinearAlgebraLibraryTypeToString(
404    SparseLinearAlgebraLibraryType type);
405bool StringToSparseLinearAlgebraLibraryType(
406    string value,
407    SparseLinearAlgebraLibraryType* type);
408
409const char* DenseLinearAlgebraLibraryTypeToString(
410    DenseLinearAlgebraLibraryType type);
411bool StringToDenseLinearAlgebraLibraryType(
412    string value,
413    DenseLinearAlgebraLibraryType* type);
414
415const char* TrustRegionStrategyTypeToString(TrustRegionStrategyType type);
416bool StringToTrustRegionStrategyType(string value,
417                                     TrustRegionStrategyType* type);
418
419const char* DoglegTypeToString(DoglegType type);
420bool StringToDoglegType(string value, DoglegType* type);
421
422const char* MinimizerTypeToString(MinimizerType type);
423bool StringToMinimizerType(string value, MinimizerType* type);
424
425const char* LineSearchDirectionTypeToString(LineSearchDirectionType type);
426bool StringToLineSearchDirectionType(string value,
427                                     LineSearchDirectionType* type);
428
429const char* LineSearchTypeToString(LineSearchType type);
430bool StringToLineSearchType(string value, LineSearchType* type);
431
432const char* NonlinearConjugateGradientTypeToString(
433    NonlinearConjugateGradientType type);
434bool StringToNonlinearConjugateGradientType(
435    string value,
436    NonlinearConjugateGradientType* type);
437
438const char* LineSearchInterpolationTypeToString(
439    LineSearchInterpolationType type);
440bool StringToLineSearchInterpolationType(
441    string value,
442    LineSearchInterpolationType* type);
443
444const char* CovarianceAlgorithmTypeToString(
445    CovarianceAlgorithmType type);
446bool StringToCovarianceAlgorithmType(
447    string value,
448    CovarianceAlgorithmType* type);
449
450const char* LinearSolverTerminationTypeToString(
451    LinearSolverTerminationType type);
452
453const char* SolverTerminationTypeToString(SolverTerminationType type);
454
455bool IsSchurType(LinearSolverType type);
456bool IsSparseLinearAlgebraLibraryTypeAvailable(
457    SparseLinearAlgebraLibraryType type);
458bool IsDenseLinearAlgebraLibraryTypeAvailable(
459    DenseLinearAlgebraLibraryType type);
460
461}  // namespace ceres
462
463#endif  // CERES_PUBLIC_TYPES_H_
464