1// Ceres Solver - A fast non-linear least squares minimizer
2// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
3// http://code.google.com/p/ceres-solver/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9//   this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above copyright notice,
11//   this list of conditions and the following disclaimer in the documentation
12//   and/or other materials provided with the distribution.
13// * Neither the name of Google Inc. nor the names of its contributors may be
14//   used to endorse or promote products derived from this software without
15//   specific prior written permission.
16//
17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27// POSSIBILITY OF SUCH DAMAGE.
28//
29// Author: sameeragarwal@google.com (Sameer Agarwal)
30
31#include "ceres/corrector.h"
32
33#include <cstddef>
34#include <cmath>
35#include "ceres/internal/eigen.h"
36#include "glog/logging.h"
37
38namespace ceres {
39namespace internal {
40
41Corrector::Corrector(const double sq_norm, const double rho[3]) {
42  CHECK_GE(sq_norm, 0.0);
43  sqrt_rho1_ = sqrt(rho[1]);
44
45  // If sq_norm = 0.0, the correction becomes trivial, the residual
46  // and the jacobian are scaled by the squareroot of the derivative
47  // of rho. Handling this case explicitly avoids the divide by zero
48  // error that would occur below.
49  //
50  // The case where rho'' < 0 also gets special handling. Technically
51  // it shouldn't, and the computation of the scaling should proceed
52  // as below, however we found in experiments that applying the
53  // curvature correction when rho'' < 0, which is the case when we
54  // are in the outlier region slows down the convergence of the
55  // algorithm significantly.
56  //
57  // Thus, we have divided the action of the robustifier into two
58  // parts. In the inliner region, we do the full second order
59  // correction which re-wights the gradient of the function by the
60  // square root of the derivative of rho, and the Gauss-Newton
61  // Hessian gets both the scaling and the rank-1 curvature
62  // correction. Normaly, alpha is upper bounded by one, but with this
63  // change, alpha is bounded above by zero.
64  //
65  // Empirically we have observed that the full Triggs correction and
66  // the clamped correction both start out as very good approximations
67  // to the loss function when we are in the convex part of the
68  // function, but as the function starts transitioning from convex to
69  // concave, the Triggs approximation diverges more and more and
70  // ultimately becomes linear. The clamped Triggs model however
71  // remains quadratic.
72  //
73  // The reason why the Triggs approximation becomes so poor is
74  // because the curvature correction that it applies to the gauss
75  // newton hessian goes from being a full rank correction to a rank
76  // deficient correction making the inversion of the Hessian fraught
77  // with all sorts of misery and suffering.
78  //
79  // The clamped correction retains its quadratic nature and inverting it
80  // is always well formed.
81  if ((sq_norm == 0.0) || (rho[2] <= 0.0)) {
82    residual_scaling_ = sqrt_rho1_;
83    alpha_sq_norm_ = 0.0;
84    return;
85  }
86
87  // We now require that the first derivative of the loss function be
88  // positive only if the second derivative is positive. This is
89  // because when the second derivative is non-positive, we do not use
90  // the second order correction suggested by BANS and instead use a
91  // simpler first order strategy which does not use a division by the
92  // gradient of the loss function.
93  CHECK_GT(rho[1], 0.0);
94
95  // Calculate the smaller of the two solutions to the equation
96  //
97  // 0.5 *  alpha^2 - alpha - rho'' / rho' *  z'z = 0.
98  //
99  // Start by calculating the discriminant D.
100  const double D = 1.0 + 2.0 * sq_norm * rho[2] / rho[1];
101
102  // Since both rho[1] and rho[2] are guaranteed to be positive at
103  // this point, we know that D > 1.0.
104
105  const double alpha = 1.0 - sqrt(D);
106
107  // Calculate the constants needed by the correction routines.
108  residual_scaling_ = sqrt_rho1_ / (1 - alpha);
109  alpha_sq_norm_ = alpha / sq_norm;
110}
111
112void Corrector::CorrectResiduals(const int num_rows, double* residuals) {
113  DCHECK(residuals != NULL);
114  // Equation 11 in BANS.
115  VectorRef(residuals, num_rows) *= residual_scaling_;
116}
117
118void Corrector::CorrectJacobian(const int num_rows,
119                                const int num_cols,
120                                double* residuals,
121                                double* jacobian) {
122  DCHECK(residuals != NULL);
123  DCHECK(jacobian != NULL);
124
125  // The common case (rho[2] <= 0).
126  if (alpha_sq_norm_ == 0.0) {
127    VectorRef(jacobian, num_rows * num_cols) *= sqrt_rho1_;
128    return;
129  }
130
131  // Equation 11 in BANS.
132  //
133  //  J = sqrt(rho) * (J - alpha^2 r * r' J)
134  //
135  // In days gone by this loop used to be a single Eigen expression of
136  // the form
137  //
138  //  J = sqrt_rho1_ * (J - alpha_sq_norm_ * r* (r.transpose() * J));
139  //
140  // Which turns out to about 17x slower on bal problems. The reason
141  // is that Eigen is unable to figure out that this expression can be
142  // evaluated columnwise and ends up creating a temporary.
143  for (int c = 0; c < num_cols; ++c) {
144    double r_transpose_j = 0.0;
145    for (int r = 0; r < num_rows; ++r) {
146      r_transpose_j += jacobian[r * num_cols + c] * residuals[r];
147    }
148
149    for (int r = 0; r < num_rows; ++r) {
150      jacobian[r * num_cols + c] = sqrt_rho1_ *
151          (jacobian[r * num_cols + c] -
152           alpha_sq_norm_ * residuals[r] * r_transpose_j);
153    }
154  }
155}
156
157}  // namespace internal
158}  // namespace ceres
159