1// Ceres Solver - A fast non-linear least squares minimizer 2// Copyright 2010, 2011, 2012 Google Inc. All rights reserved. 3// http://code.google.com/p/ceres-solver/ 4// 5// Redistribution and use in source and binary forms, with or without 6// modification, are permitted provided that the following conditions are met: 7// 8// * Redistributions of source code must retain the above copyright notice, 9// this list of conditions and the following disclaimer. 10// * Redistributions in binary form must reproduce the above copyright notice, 11// this list of conditions and the following disclaimer in the documentation 12// and/or other materials provided with the distribution. 13// * Neither the name of Google Inc. nor the names of its contributors may be 14// used to endorse or promote products derived from this software without 15// specific prior written permission. 16// 17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 21// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27// POSSIBILITY OF SUCH DAMAGE. 28// 29// Author: keir@google.com (Keir Mierle) 30 31#include "ceres/gradient_checking_cost_function.h" 32 33#include <algorithm> 34#include <cmath> 35#include <numeric> 36#include <string> 37#include <vector> 38 39#include "ceres/cost_function.h" 40#include "ceres/internal/eigen.h" 41#include "ceres/internal/scoped_ptr.h" 42#include "ceres/parameter_block.h" 43#include "ceres/problem.h" 44#include "ceres/problem_impl.h" 45#include "ceres/program.h" 46#include "ceres/residual_block.h" 47#include "ceres/dynamic_numeric_diff_cost_function.h" 48#include "ceres/stringprintf.h" 49#include "ceres/types.h" 50#include "glog/logging.h" 51 52namespace ceres { 53namespace internal { 54namespace { 55 56// True if x and y have an absolute relative difference less than 57// relative_precision and false otherwise. Stores the relative and absolute 58// difference in relative/absolute_error if non-NULL. 59bool IsClose(double x, double y, double relative_precision, 60 double *relative_error, 61 double *absolute_error) { 62 double local_absolute_error; 63 double local_relative_error; 64 if (!absolute_error) { 65 absolute_error = &local_absolute_error; 66 } 67 if (!relative_error) { 68 relative_error = &local_relative_error; 69 } 70 *absolute_error = fabs(x - y); 71 *relative_error = *absolute_error / max(fabs(x), fabs(y)); 72 if (x == 0 || y == 0) { 73 // If x or y is exactly zero, then relative difference doesn't have any 74 // meaning. Take the absolute difference instead. 75 *relative_error = *absolute_error; 76 } 77 return fabs(*relative_error) < fabs(relative_precision); 78} 79 80class GradientCheckingCostFunction : public CostFunction { 81 public: 82 GradientCheckingCostFunction(const CostFunction* function, 83 double relative_step_size, 84 double relative_precision, 85 const string& extra_info) 86 : function_(function), 87 relative_precision_(relative_precision), 88 extra_info_(extra_info) { 89 DynamicNumericDiffCostFunction<CostFunction, CENTRAL>* 90 finite_diff_cost_function = 91 new DynamicNumericDiffCostFunction<CostFunction, CENTRAL>( 92 function, 93 DO_NOT_TAKE_OWNERSHIP, 94 relative_step_size); 95 96 const vector<int32>& parameter_block_sizes = 97 function->parameter_block_sizes(); 98 for (int i = 0; i < parameter_block_sizes.size(); ++i) { 99 finite_diff_cost_function->AddParameterBlock(parameter_block_sizes[i]); 100 } 101 *mutable_parameter_block_sizes() = parameter_block_sizes; 102 set_num_residuals(function->num_residuals()); 103 finite_diff_cost_function->SetNumResiduals(num_residuals()); 104 finite_diff_cost_function_.reset(finite_diff_cost_function); 105 } 106 107 virtual ~GradientCheckingCostFunction() { } 108 109 virtual bool Evaluate(double const* const* parameters, 110 double* residuals, 111 double** jacobians) const { 112 if (!jacobians) { 113 // Nothing to check in this case; just forward. 114 return function_->Evaluate(parameters, residuals, NULL); 115 } 116 117 int num_residuals = function_->num_residuals(); 118 119 // Make space for the jacobians of the two methods. 120 const vector<int32>& block_sizes = function_->parameter_block_sizes(); 121 vector<Matrix> term_jacobians(block_sizes.size()); 122 vector<Matrix> finite_difference_jacobians(block_sizes.size()); 123 vector<double*> term_jacobian_pointers(block_sizes.size()); 124 vector<double*> finite_difference_jacobian_pointers(block_sizes.size()); 125 for (int i = 0; i < block_sizes.size(); i++) { 126 term_jacobians[i].resize(num_residuals, block_sizes[i]); 127 term_jacobian_pointers[i] = term_jacobians[i].data(); 128 finite_difference_jacobians[i].resize(num_residuals, block_sizes[i]); 129 finite_difference_jacobian_pointers[i] = 130 finite_difference_jacobians[i].data(); 131 } 132 133 // Evaluate the derivative using the user supplied code. 134 if (!function_->Evaluate(parameters, 135 residuals, 136 &term_jacobian_pointers[0])) { 137 LOG(WARNING) << "Function evaluation failed."; 138 return false; 139 } 140 141 // Evaluate the derivative using numeric derivatives. 142 finite_diff_cost_function_->Evaluate( 143 parameters, 144 residuals, 145 &finite_difference_jacobian_pointers[0]); 146 147 // See if any elements have relative error larger than the threshold. 148 int num_bad_jacobian_components = 0; 149 double worst_relative_error = 0; 150 151 // Accumulate the error message for all the jacobians, since it won't get 152 // output if there are no bad jacobian components. 153 string m; 154 for (int k = 0; k < block_sizes.size(); k++) { 155 // Copy the original jacobian blocks into the jacobians array. 156 if (jacobians[k] != NULL) { 157 MatrixRef(jacobians[k], 158 term_jacobians[k].rows(), 159 term_jacobians[k].cols()) = term_jacobians[k]; 160 } 161 162 StringAppendF(&m, 163 "========== " 164 "Jacobian for " "block %d: (%ld by %ld)) " 165 "==========\n", 166 k, 167 static_cast<long>(term_jacobians[k].rows()), 168 static_cast<long>(term_jacobians[k].cols())); 169 // The funny spacing creates appropriately aligned column headers. 170 m += " block row col user dx/dy num diff dx/dy " 171 "abs error relative error parameter residual\n"; 172 173 for (int i = 0; i < term_jacobians[k].rows(); i++) { 174 for (int j = 0; j < term_jacobians[k].cols(); j++) { 175 double term_jacobian = term_jacobians[k](i, j); 176 double finite_jacobian = finite_difference_jacobians[k](i, j); 177 double relative_error, absolute_error; 178 bool bad_jacobian_entry = 179 !IsClose(term_jacobian, 180 finite_jacobian, 181 relative_precision_, 182 &relative_error, 183 &absolute_error); 184 worst_relative_error = std::max(worst_relative_error, 185 relative_error); 186 187 StringAppendF(&m, "%6d %4d %4d %17g %17g %17g %17g %17g %17g", 188 k, i, j, 189 term_jacobian, finite_jacobian, 190 absolute_error, relative_error, 191 parameters[k][j], 192 residuals[i]); 193 194 if (bad_jacobian_entry) { 195 num_bad_jacobian_components++; 196 StringAppendF( 197 &m, " ------ (%d,%d,%d) Relative error worse than %g", 198 k, i, j, relative_precision_); 199 } 200 m += "\n"; 201 } 202 } 203 } 204 205 // Since there were some bad errors, dump comprehensive debug info. 206 if (num_bad_jacobian_components) { 207 string header = StringPrintf("Detected %d bad jacobian component(s). " 208 "Worst relative error was %g.\n", 209 num_bad_jacobian_components, 210 worst_relative_error); 211 if (!extra_info_.empty()) { 212 header += "Extra info for this residual: " + extra_info_ + "\n"; 213 } 214 LOG(WARNING) << "\n" << header << m; 215 } 216 return true; 217 } 218 219 private: 220 const CostFunction* function_; 221 internal::scoped_ptr<CostFunction> finite_diff_cost_function_; 222 double relative_precision_; 223 string extra_info_; 224}; 225 226} // namespace 227 228CostFunction *CreateGradientCheckingCostFunction( 229 const CostFunction *cost_function, 230 double relative_step_size, 231 double relative_precision, 232 const string& extra_info) { 233 return new GradientCheckingCostFunction(cost_function, 234 relative_step_size, 235 relative_precision, 236 extra_info); 237} 238 239ProblemImpl* CreateGradientCheckingProblemImpl(ProblemImpl* problem_impl, 240 double relative_step_size, 241 double relative_precision) { 242 // We create new CostFunctions by wrapping the original CostFunction 243 // in a gradient checking CostFunction. So its okay for the 244 // ProblemImpl to take ownership of it and destroy it. The 245 // LossFunctions and LocalParameterizations are reused and since 246 // they are owned by problem_impl, gradient_checking_problem_impl 247 // should not take ownership of it. 248 Problem::Options gradient_checking_problem_options; 249 gradient_checking_problem_options.cost_function_ownership = TAKE_OWNERSHIP; 250 gradient_checking_problem_options.loss_function_ownership = 251 DO_NOT_TAKE_OWNERSHIP; 252 gradient_checking_problem_options.local_parameterization_ownership = 253 DO_NOT_TAKE_OWNERSHIP; 254 255 ProblemImpl* gradient_checking_problem_impl = new ProblemImpl( 256 gradient_checking_problem_options); 257 258 Program* program = problem_impl->mutable_program(); 259 260 // For every ParameterBlock in problem_impl, create a new parameter 261 // block with the same local parameterization and constancy. 262 const vector<ParameterBlock*>& parameter_blocks = program->parameter_blocks(); 263 for (int i = 0; i < parameter_blocks.size(); ++i) { 264 ParameterBlock* parameter_block = parameter_blocks[i]; 265 gradient_checking_problem_impl->AddParameterBlock( 266 parameter_block->mutable_user_state(), 267 parameter_block->Size(), 268 parameter_block->mutable_local_parameterization()); 269 270 if (parameter_block->IsConstant()) { 271 gradient_checking_problem_impl->SetParameterBlockConstant( 272 parameter_block->mutable_user_state()); 273 } 274 } 275 276 // For every ResidualBlock in problem_impl, create a new 277 // ResidualBlock by wrapping its CostFunction inside a 278 // GradientCheckingCostFunction. 279 const vector<ResidualBlock*>& residual_blocks = program->residual_blocks(); 280 for (int i = 0; i < residual_blocks.size(); ++i) { 281 ResidualBlock* residual_block = residual_blocks[i]; 282 283 // Build a human readable string which identifies the 284 // ResidualBlock. This is used by the GradientCheckingCostFunction 285 // when logging debugging information. 286 string extra_info = StringPrintf( 287 "Residual block id %d; depends on parameters [", i); 288 vector<double*> parameter_blocks; 289 for (int j = 0; j < residual_block->NumParameterBlocks(); ++j) { 290 ParameterBlock* parameter_block = residual_block->parameter_blocks()[j]; 291 parameter_blocks.push_back(parameter_block->mutable_user_state()); 292 StringAppendF(&extra_info, "%p", parameter_block->mutable_user_state()); 293 extra_info += (j < residual_block->NumParameterBlocks() - 1) ? ", " : "]"; 294 } 295 296 // Wrap the original CostFunction in a GradientCheckingCostFunction. 297 CostFunction* gradient_checking_cost_function = 298 CreateGradientCheckingCostFunction(residual_block->cost_function(), 299 relative_step_size, 300 relative_precision, 301 extra_info); 302 303 // The const_cast is necessary because 304 // ProblemImpl::AddResidualBlock can potentially take ownership of 305 // the LossFunction, but in this case we are guaranteed that this 306 // will not be the case, so this const_cast is harmless. 307 gradient_checking_problem_impl->AddResidualBlock( 308 gradient_checking_cost_function, 309 const_cast<LossFunction*>(residual_block->loss_function()), 310 parameter_blocks); 311 } 312 313 return gradient_checking_problem_impl; 314} 315 316 317} // namespace internal 318} // namespace ceres 319