gradient_checking_cost_function.cc revision 0ae28bd5885b5daa526898fcf7c323dc2c3e1963
1// Ceres Solver - A fast non-linear least squares minimizer
2// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
3// http://code.google.com/p/ceres-solver/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9//   this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above copyright notice,
11//   this list of conditions and the following disclaimer in the documentation
12//   and/or other materials provided with the distribution.
13// * Neither the name of Google Inc. nor the names of its contributors may be
14//   used to endorse or promote products derived from this software without
15//   specific prior written permission.
16//
17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27// POSSIBILITY OF SUCH DAMAGE.
28//
29// Author: keir@google.com (Keir Mierle)
30
31#include "ceres/gradient_checking_cost_function.h"
32
33#include <algorithm>
34#include <cmath>
35#include <numeric>
36#include <string>
37#include <vector>
38
39#include "ceres/cost_function.h"
40#include "ceres/internal/eigen.h"
41#include "ceres/internal/scoped_ptr.h"
42#include "ceres/parameter_block.h"
43#include "ceres/problem.h"
44#include "ceres/problem_impl.h"
45#include "ceres/program.h"
46#include "ceres/residual_block.h"
47#include "ceres/runtime_numeric_diff_cost_function.h"
48#include "ceres/stringprintf.h"
49#include "ceres/types.h"
50#include "glog/logging.h"
51
52namespace ceres {
53namespace internal {
54namespace {
55
56// True if x and y have an absolute relative difference less than
57// relative_precision and false otherwise. Stores the relative and absolute
58// difference in relative/absolute_error if non-NULL.
59bool IsClose(double x, double y, double relative_precision,
60             double *relative_error,
61             double *absolute_error) {
62  double local_absolute_error;
63  double local_relative_error;
64  if (!absolute_error) {
65    absolute_error = &local_absolute_error;
66  }
67  if (!relative_error) {
68    relative_error = &local_relative_error;
69  }
70  *absolute_error = fabs(x - y);
71  *relative_error = *absolute_error / max(fabs(x), fabs(y));
72  if (x == 0 || y == 0) {
73    // If x or y is exactly zero, then relative difference doesn't have any
74    // meaning. Take the absolute difference instead.
75    *relative_error = *absolute_error;
76  }
77  return fabs(*relative_error) < fabs(relative_precision);
78}
79
80class GradientCheckingCostFunction : public CostFunction {
81 public:
82  GradientCheckingCostFunction(const CostFunction* function,
83                               double relative_step_size,
84                               double relative_precision,
85                               const string& extra_info)
86      : function_(function),
87        finite_diff_cost_function_(
88            CreateRuntimeNumericDiffCostFunction(function,
89                                                 CENTRAL,
90                                                 relative_step_size)),
91        relative_precision_(relative_precision),
92        extra_info_(extra_info) {
93    *mutable_parameter_block_sizes() = function->parameter_block_sizes();
94    set_num_residuals(function->num_residuals());
95  }
96
97  virtual ~GradientCheckingCostFunction() { }
98
99  virtual bool Evaluate(double const* const* parameters,
100                        double* residuals,
101                        double** jacobians) const {
102    if (!jacobians) {
103      // Nothing to check in this case; just forward.
104      return function_->Evaluate(parameters, residuals, NULL);
105    }
106
107    int num_residuals = function_->num_residuals();
108
109    // Make space for the jacobians of the two methods.
110    const vector<int16>& block_sizes = function_->parameter_block_sizes();
111    vector<Matrix> term_jacobians(block_sizes.size());
112    vector<Matrix> finite_difference_jacobians(block_sizes.size());
113    vector<double*> term_jacobian_pointers(block_sizes.size());
114    vector<double*> finite_difference_jacobian_pointers(block_sizes.size());
115    for (int i = 0; i < block_sizes.size(); i++) {
116      term_jacobians[i].resize(num_residuals, block_sizes[i]);
117      term_jacobian_pointers[i] = term_jacobians[i].data();
118      finite_difference_jacobians[i].resize(num_residuals, block_sizes[i]);
119      finite_difference_jacobian_pointers[i] =
120          finite_difference_jacobians[i].data();
121    }
122
123    // Evaluate the derivative using the user supplied code.
124    if (!function_->Evaluate(parameters,
125                             residuals,
126                             &term_jacobian_pointers[0])) {
127      LOG(WARNING) << "Function evaluation failed.";
128      return false;
129    }
130
131    // Evaluate the derivative using numeric derivatives.
132    finite_diff_cost_function_->Evaluate(
133        parameters,
134        residuals,
135        &finite_difference_jacobian_pointers[0]);
136
137    // See if any elements have relative error larger than the threshold.
138    int num_bad_jacobian_components = 0;
139    double worst_relative_error = 0;
140
141    // Accumulate the error message for all the jacobians, since it won't get
142    // output if there are no bad jacobian components.
143    string m;
144    for (int k = 0; k < block_sizes.size(); k++) {
145      // Copy the original jacobian blocks into the jacobians array.
146      if (jacobians[k] != NULL) {
147        MatrixRef(jacobians[k],
148                  term_jacobians[k].rows(),
149                  term_jacobians[k].cols()) = term_jacobians[k];
150      }
151
152      StringAppendF(&m,
153                    "========== "
154                    "Jacobian for " "block %d: (%ld by %ld)) "
155                    "==========\n",
156                    k,
157                    static_cast<long>(term_jacobians[k].rows()),
158                    static_cast<long>(term_jacobians[k].cols()));
159      // The funny spacing creates appropriately aligned column headers.
160      m += " block  row  col        user dx/dy    num diff dx/dy         "
161           "abs error    relative error         parameter          residual\n";
162
163      for (int i = 0; i < term_jacobians[k].rows(); i++) {
164        for (int j = 0; j < term_jacobians[k].cols(); j++) {
165          double term_jacobian = term_jacobians[k](i, j);
166          double finite_jacobian = finite_difference_jacobians[k](i, j);
167          double relative_error, absolute_error;
168          bool bad_jacobian_entry =
169              !IsClose(term_jacobian,
170                       finite_jacobian,
171                       relative_precision_,
172                       &relative_error,
173                       &absolute_error);
174          worst_relative_error = std::max(worst_relative_error,
175                                          relative_error);
176
177          StringAppendF(&m, "%6d %4d %4d %17g %17g %17g %17g %17g %17g",
178                        k, i, j,
179                        term_jacobian, finite_jacobian,
180                        absolute_error, relative_error,
181                        parameters[k][j],
182                        residuals[i]);
183
184          if (bad_jacobian_entry) {
185            num_bad_jacobian_components++;
186            StringAppendF(
187                &m, " ------ (%d,%d,%d) Relative error worse than %g",
188                k, i, j, relative_precision_);
189          }
190          m += "\n";
191        }
192      }
193    }
194
195    // Since there were some bad errors, dump comprehensive debug info.
196    if (num_bad_jacobian_components) {
197      string header = StringPrintf("Detected %d bad jacobian component(s). "
198                                   "Worst relative error was %g.\n",
199                                   num_bad_jacobian_components,
200                                   worst_relative_error);
201      if (!extra_info_.empty()) {
202        header += "Extra info for this residual: " + extra_info_ + "\n";
203      }
204      LOG(WARNING) << "\n" << header << m;
205    }
206    return true;
207  }
208
209 private:
210  const CostFunction* function_;
211  internal::scoped_ptr<CostFunction> finite_diff_cost_function_;
212  double relative_precision_;
213  string extra_info_;
214};
215
216}  // namespace
217
218CostFunction *CreateGradientCheckingCostFunction(
219    const CostFunction *cost_function,
220    double relative_step_size,
221    double relative_precision,
222    const string& extra_info) {
223  return new GradientCheckingCostFunction(cost_function,
224                                          relative_step_size,
225                                          relative_precision,
226                                          extra_info);
227}
228
229ProblemImpl* CreateGradientCheckingProblemImpl(ProblemImpl* problem_impl,
230                                               double relative_step_size,
231                                               double relative_precision) {
232  // We create new CostFunctions by wrapping the original CostFunction
233  // in a gradient checking CostFunction. So its okay for the
234  // ProblemImpl to take ownership of it and destroy it. The
235  // LossFunctions and LocalParameterizations are reused and since
236  // they are owned by problem_impl, gradient_checking_problem_impl
237  // should not take ownership of it.
238  Problem::Options gradient_checking_problem_options;
239  gradient_checking_problem_options.cost_function_ownership = TAKE_OWNERSHIP;
240  gradient_checking_problem_options.loss_function_ownership =
241      DO_NOT_TAKE_OWNERSHIP;
242  gradient_checking_problem_options.local_parameterization_ownership =
243      DO_NOT_TAKE_OWNERSHIP;
244
245  ProblemImpl* gradient_checking_problem_impl = new ProblemImpl(
246      gradient_checking_problem_options);
247
248  Program* program = problem_impl->mutable_program();
249
250  // For every ParameterBlock in problem_impl, create a new parameter
251  // block with the same local parameterization and constancy.
252  const vector<ParameterBlock*>& parameter_blocks = program->parameter_blocks();
253  for (int i = 0; i < parameter_blocks.size(); ++i) {
254    ParameterBlock* parameter_block = parameter_blocks[i];
255    gradient_checking_problem_impl->AddParameterBlock(
256        parameter_block->mutable_user_state(),
257        parameter_block->Size(),
258        parameter_block->mutable_local_parameterization());
259
260    if (parameter_block->IsConstant()) {
261      gradient_checking_problem_impl->SetParameterBlockConstant(
262          parameter_block->mutable_user_state());
263    }
264  }
265
266  // For every ResidualBlock in problem_impl, create a new
267  // ResidualBlock by wrapping its CostFunction inside a
268  // GradientCheckingCostFunction.
269  const vector<ResidualBlock*>& residual_blocks = program->residual_blocks();
270  for (int i = 0; i < residual_blocks.size(); ++i) {
271    ResidualBlock* residual_block = residual_blocks[i];
272
273    // Build a human readable string which identifies the
274    // ResidualBlock. This is used by the GradientCheckingCostFunction
275    // when logging debugging information.
276    string extra_info = StringPrintf(
277        "Residual block id %d; depends on parameters [", i);
278    vector<double*> parameter_blocks;
279    for (int j = 0; j < residual_block->NumParameterBlocks(); ++j) {
280      ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
281      parameter_blocks.push_back(parameter_block->mutable_user_state());
282      StringAppendF(&extra_info, "%p", parameter_block->mutable_user_state());
283      extra_info += (j < residual_block->NumParameterBlocks() - 1) ? ", " : "]";
284    }
285
286    // Wrap the original CostFunction in a GradientCheckingCostFunction.
287    CostFunction* gradient_checking_cost_function =
288        CreateGradientCheckingCostFunction(residual_block->cost_function(),
289                                           relative_step_size,
290                                           relative_precision,
291                                           extra_info);
292
293    // The const_cast is necessary because
294    // ProblemImpl::AddResidualBlock can potentially take ownership of
295    // the LossFunction, but in this case we are guaranteed that this
296    // will not be the case, so this const_cast is harmless.
297    gradient_checking_problem_impl->AddResidualBlock(
298        gradient_checking_cost_function,
299        const_cast<LossFunction*>(residual_block->loss_function()),
300        parameter_blocks);
301  }
302
303  return gradient_checking_problem_impl;
304}
305
306
307}  // namespace internal
308}  // namespace ceres
309