1// Ceres Solver - A fast non-linear least squares minimizer
2// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
3// http://code.google.com/p/ceres-solver/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9//   this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above copyright notice,
11//   this list of conditions and the following disclaimer in the documentation
12//   and/or other materials provided with the distribution.
13// * Neither the name of Google Inc. nor the names of its contributors may be
14//   used to endorse or promote products derived from this software without
15//   specific prior written permission.
16//
17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27// POSSIBILITY OF SUCH DAMAGE.
28//
29// Author: keir@google.com (Keir Mierle)
30
31#include "ceres/solver_impl.h"
32
33#include <cstdio>
34#include <iostream>  // NOLINT
35#include <numeric>
36#include <string>
37#include "ceres/coordinate_descent_minimizer.h"
38#include "ceres/cxsparse.h"
39#include "ceres/evaluator.h"
40#include "ceres/gradient_checking_cost_function.h"
41#include "ceres/iteration_callback.h"
42#include "ceres/levenberg_marquardt_strategy.h"
43#include "ceres/line_search_minimizer.h"
44#include "ceres/linear_solver.h"
45#include "ceres/map_util.h"
46#include "ceres/minimizer.h"
47#include "ceres/ordered_groups.h"
48#include "ceres/parameter_block.h"
49#include "ceres/parameter_block_ordering.h"
50#include "ceres/problem.h"
51#include "ceres/problem_impl.h"
52#include "ceres/program.h"
53#include "ceres/residual_block.h"
54#include "ceres/stringprintf.h"
55#include "ceres/suitesparse.h"
56#include "ceres/trust_region_minimizer.h"
57#include "ceres/wall_time.h"
58
59namespace ceres {
60namespace internal {
61namespace {
62
63// Callback for updating the user's parameter blocks. Updates are only
64// done if the step is successful.
65class StateUpdatingCallback : public IterationCallback {
66 public:
67  StateUpdatingCallback(Program* program, double* parameters)
68      : program_(program), parameters_(parameters) {}
69
70  CallbackReturnType operator()(const IterationSummary& summary) {
71    if (summary.step_is_successful) {
72      program_->StateVectorToParameterBlocks(parameters_);
73      program_->CopyParameterBlockStateToUserState();
74    }
75    return SOLVER_CONTINUE;
76  }
77
78 private:
79  Program* program_;
80  double* parameters_;
81};
82
83void SetSummaryFinalCost(Solver::Summary* summary) {
84  summary->final_cost = summary->initial_cost;
85  // We need the loop here, instead of just looking at the last
86  // iteration because the minimizer maybe making non-monotonic steps.
87  for (int i = 0; i < summary->iterations.size(); ++i) {
88    const IterationSummary& iteration_summary = summary->iterations[i];
89    summary->final_cost = min(iteration_summary.cost, summary->final_cost);
90  }
91}
92
93// Callback for logging the state of the minimizer to STDERR or STDOUT
94// depending on the user's preferences and logging level.
95class TrustRegionLoggingCallback : public IterationCallback {
96 public:
97  explicit TrustRegionLoggingCallback(bool log_to_stdout)
98      : log_to_stdout_(log_to_stdout) {}
99
100  ~TrustRegionLoggingCallback() {}
101
102  CallbackReturnType operator()(const IterationSummary& summary) {
103    const char* kReportRowFormat =
104        "% 4d: f:% 8e d:% 3.2e g:% 3.2e h:% 3.2e "
105        "rho:% 3.2e mu:% 3.2e li:% 3d it:% 3.2e tt:% 3.2e";
106    string output = StringPrintf(kReportRowFormat,
107                                 summary.iteration,
108                                 summary.cost,
109                                 summary.cost_change,
110                                 summary.gradient_max_norm,
111                                 summary.step_norm,
112                                 summary.relative_decrease,
113                                 summary.trust_region_radius,
114                                 summary.linear_solver_iterations,
115                                 summary.iteration_time_in_seconds,
116                                 summary.cumulative_time_in_seconds);
117    if (log_to_stdout_) {
118      cout << output << endl;
119    } else {
120      VLOG(1) << output;
121    }
122    return SOLVER_CONTINUE;
123  }
124
125 private:
126  const bool log_to_stdout_;
127};
128
129// Callback for logging the state of the minimizer to STDERR or STDOUT
130// depending on the user's preferences and logging level.
131class LineSearchLoggingCallback : public IterationCallback {
132 public:
133  explicit LineSearchLoggingCallback(bool log_to_stdout)
134      : log_to_stdout_(log_to_stdout) {}
135
136  ~LineSearchLoggingCallback() {}
137
138  CallbackReturnType operator()(const IterationSummary& summary) {
139    const char* kReportRowFormat =
140        "% 4d: f:% 8e d:% 3.2e g:% 3.2e h:% 3.2e "
141        "s:% 3.2e e:% 3d it:% 3.2e tt:% 3.2e";
142    string output = StringPrintf(kReportRowFormat,
143                                 summary.iteration,
144                                 summary.cost,
145                                 summary.cost_change,
146                                 summary.gradient_max_norm,
147                                 summary.step_norm,
148                                 summary.step_size,
149                                 summary.line_search_function_evaluations,
150                                 summary.iteration_time_in_seconds,
151                                 summary.cumulative_time_in_seconds);
152    if (log_to_stdout_) {
153      cout << output << endl;
154    } else {
155      VLOG(1) << output;
156    }
157    return SOLVER_CONTINUE;
158  }
159
160 private:
161  const bool log_to_stdout_;
162};
163
164
165// Basic callback to record the execution of the solver to a file for
166// offline analysis.
167class FileLoggingCallback : public IterationCallback {
168 public:
169  explicit FileLoggingCallback(const string& filename)
170      : fptr_(NULL) {
171    fptr_ = fopen(filename.c_str(), "w");
172    CHECK_NOTNULL(fptr_);
173  }
174
175  virtual ~FileLoggingCallback() {
176    if (fptr_ != NULL) {
177      fclose(fptr_);
178    }
179  }
180
181  virtual CallbackReturnType operator()(const IterationSummary& summary) {
182    fprintf(fptr_,
183            "%4d %e %e\n",
184            summary.iteration,
185            summary.cost,
186            summary.cumulative_time_in_seconds);
187    return SOLVER_CONTINUE;
188  }
189 private:
190    FILE* fptr_;
191};
192
193// Iterate over each of the groups in order of their priority and fill
194// summary with their sizes.
195void SummarizeOrdering(ParameterBlockOrdering* ordering,
196                       vector<int>* summary) {
197  CHECK_NOTNULL(summary)->clear();
198  if (ordering == NULL) {
199    return;
200  }
201
202  const map<int, set<double*> >& group_to_elements =
203      ordering->group_to_elements();
204  for (map<int, set<double*> >::const_iterator it = group_to_elements.begin();
205       it != group_to_elements.end();
206       ++it) {
207    summary->push_back(it->second.size());
208  }
209}
210
211}  // namespace
212
213void SolverImpl::TrustRegionMinimize(
214    const Solver::Options& options,
215    Program* program,
216    CoordinateDescentMinimizer* inner_iteration_minimizer,
217    Evaluator* evaluator,
218    LinearSolver* linear_solver,
219    double* parameters,
220    Solver::Summary* summary) {
221  Minimizer::Options minimizer_options(options);
222
223  // TODO(sameeragarwal): Add support for logging the configuration
224  // and more detailed stats.
225  scoped_ptr<IterationCallback> file_logging_callback;
226  if (!options.solver_log.empty()) {
227    file_logging_callback.reset(new FileLoggingCallback(options.solver_log));
228    minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
229                                       file_logging_callback.get());
230  }
231
232  TrustRegionLoggingCallback logging_callback(
233      options.minimizer_progress_to_stdout);
234  if (options.logging_type != SILENT) {
235    minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
236                                       &logging_callback);
237  }
238
239  StateUpdatingCallback updating_callback(program, parameters);
240  if (options.update_state_every_iteration) {
241    // This must get pushed to the front of the callbacks so that it is run
242    // before any of the user callbacks.
243    minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
244                                       &updating_callback);
245  }
246
247  minimizer_options.evaluator = evaluator;
248  scoped_ptr<SparseMatrix> jacobian(evaluator->CreateJacobian());
249
250  minimizer_options.jacobian = jacobian.get();
251  minimizer_options.inner_iteration_minimizer = inner_iteration_minimizer;
252
253  TrustRegionStrategy::Options trust_region_strategy_options;
254  trust_region_strategy_options.linear_solver = linear_solver;
255  trust_region_strategy_options.initial_radius =
256      options.initial_trust_region_radius;
257  trust_region_strategy_options.max_radius = options.max_trust_region_radius;
258  trust_region_strategy_options.min_lm_diagonal = options.min_lm_diagonal;
259  trust_region_strategy_options.max_lm_diagonal = options.max_lm_diagonal;
260  trust_region_strategy_options.trust_region_strategy_type =
261      options.trust_region_strategy_type;
262  trust_region_strategy_options.dogleg_type = options.dogleg_type;
263  scoped_ptr<TrustRegionStrategy> strategy(
264      TrustRegionStrategy::Create(trust_region_strategy_options));
265  minimizer_options.trust_region_strategy = strategy.get();
266
267  TrustRegionMinimizer minimizer;
268  double minimizer_start_time = WallTimeInSeconds();
269  minimizer.Minimize(minimizer_options, parameters, summary);
270  summary->minimizer_time_in_seconds =
271      WallTimeInSeconds() - minimizer_start_time;
272}
273
274#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
275void SolverImpl::LineSearchMinimize(
276    const Solver::Options& options,
277    Program* program,
278    Evaluator* evaluator,
279    double* parameters,
280    Solver::Summary* summary) {
281  Minimizer::Options minimizer_options(options);
282
283  // TODO(sameeragarwal): Add support for logging the configuration
284  // and more detailed stats.
285  scoped_ptr<IterationCallback> file_logging_callback;
286  if (!options.solver_log.empty()) {
287    file_logging_callback.reset(new FileLoggingCallback(options.solver_log));
288    minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
289                                       file_logging_callback.get());
290  }
291
292  LineSearchLoggingCallback logging_callback(
293      options.minimizer_progress_to_stdout);
294  if (options.logging_type != SILENT) {
295    minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
296                                       &logging_callback);
297  }
298
299  StateUpdatingCallback updating_callback(program, parameters);
300  if (options.update_state_every_iteration) {
301    // This must get pushed to the front of the callbacks so that it is run
302    // before any of the user callbacks.
303    minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
304                                       &updating_callback);
305  }
306
307  minimizer_options.evaluator = evaluator;
308
309  LineSearchMinimizer minimizer;
310  double minimizer_start_time = WallTimeInSeconds();
311  minimizer.Minimize(minimizer_options, parameters, summary);
312  summary->minimizer_time_in_seconds =
313      WallTimeInSeconds() - minimizer_start_time;
314}
315#endif  // CERES_NO_LINE_SEARCH_MINIMIZER
316
317void SolverImpl::Solve(const Solver::Options& options,
318                       ProblemImpl* problem_impl,
319                       Solver::Summary* summary) {
320  VLOG(2) << "Initial problem: "
321          << problem_impl->NumParameterBlocks()
322          << " parameter blocks, "
323          << problem_impl->NumParameters()
324          << " parameters,  "
325          << problem_impl->NumResidualBlocks()
326          << " residual blocks, "
327          << problem_impl->NumResiduals()
328          << " residuals.";
329
330  if (options.minimizer_type == TRUST_REGION) {
331    TrustRegionSolve(options, problem_impl, summary);
332  } else {
333#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
334    LineSearchSolve(options, problem_impl, summary);
335#else
336    LOG(FATAL) << "Ceres Solver was compiled with -DLINE_SEARCH_MINIMIZER=OFF";
337#endif
338  }
339}
340
341void SolverImpl::TrustRegionSolve(const Solver::Options& original_options,
342                                  ProblemImpl* original_problem_impl,
343                                  Solver::Summary* summary) {
344  EventLogger event_logger("TrustRegionSolve");
345  double solver_start_time = WallTimeInSeconds();
346
347  Program* original_program = original_problem_impl->mutable_program();
348  ProblemImpl* problem_impl = original_problem_impl;
349
350  // Reset the summary object to its default values.
351  *CHECK_NOTNULL(summary) = Solver::Summary();
352
353  summary->minimizer_type = TRUST_REGION;
354  summary->num_parameter_blocks = problem_impl->NumParameterBlocks();
355  summary->num_parameters = problem_impl->NumParameters();
356  summary->num_effective_parameters =
357      original_program->NumEffectiveParameters();
358  summary->num_residual_blocks = problem_impl->NumResidualBlocks();
359  summary->num_residuals = problem_impl->NumResiduals();
360
361  // Empty programs are usually a user error.
362  if (summary->num_parameter_blocks == 0) {
363    summary->error = "Problem contains no parameter blocks.";
364    LOG(ERROR) << summary->error;
365    return;
366  }
367
368  if (summary->num_residual_blocks == 0) {
369    summary->error = "Problem contains no residual blocks.";
370    LOG(ERROR) << summary->error;
371    return;
372  }
373
374  SummarizeOrdering(original_options.linear_solver_ordering,
375                    &(summary->linear_solver_ordering_given));
376
377  SummarizeOrdering(original_options.inner_iteration_ordering,
378                    &(summary->inner_iteration_ordering_given));
379
380  Solver::Options options(original_options);
381  options.linear_solver_ordering = NULL;
382  options.inner_iteration_ordering = NULL;
383
384#ifndef CERES_USE_OPENMP
385  if (options.num_threads > 1) {
386    LOG(WARNING)
387        << "OpenMP support is not compiled into this binary; "
388        << "only options.num_threads=1 is supported. Switching "
389        << "to single threaded mode.";
390    options.num_threads = 1;
391  }
392  if (options.num_linear_solver_threads > 1) {
393    LOG(WARNING)
394        << "OpenMP support is not compiled into this binary; "
395        << "only options.num_linear_solver_threads=1 is supported. Switching "
396        << "to single threaded mode.";
397    options.num_linear_solver_threads = 1;
398  }
399#endif
400
401  summary->num_threads_given = original_options.num_threads;
402  summary->num_threads_used = options.num_threads;
403
404  if (options.trust_region_minimizer_iterations_to_dump.size() > 0 &&
405      options.trust_region_problem_dump_format_type != CONSOLE &&
406      options.trust_region_problem_dump_directory.empty()) {
407    summary->error =
408        "Solver::Options::trust_region_problem_dump_directory is empty.";
409    LOG(ERROR) << summary->error;
410    return;
411  }
412
413  event_logger.AddEvent("Init");
414
415  original_program->SetParameterBlockStatePtrsToUserStatePtrs();
416  event_logger.AddEvent("SetParameterBlockPtrs");
417
418  // If the user requests gradient checking, construct a new
419  // ProblemImpl by wrapping the CostFunctions of problem_impl inside
420  // GradientCheckingCostFunction and replacing problem_impl with
421  // gradient_checking_problem_impl.
422  scoped_ptr<ProblemImpl> gradient_checking_problem_impl;
423  if (options.check_gradients) {
424    VLOG(1) << "Checking Gradients";
425    gradient_checking_problem_impl.reset(
426        CreateGradientCheckingProblemImpl(
427            problem_impl,
428            options.numeric_derivative_relative_step_size,
429            options.gradient_check_relative_precision));
430
431    // From here on, problem_impl will point to the gradient checking
432    // version.
433    problem_impl = gradient_checking_problem_impl.get();
434  }
435
436  if (original_options.linear_solver_ordering != NULL) {
437    if (!IsOrderingValid(original_options, problem_impl, &summary->error)) {
438      LOG(ERROR) << summary->error;
439      return;
440    }
441    event_logger.AddEvent("CheckOrdering");
442    options.linear_solver_ordering =
443        new ParameterBlockOrdering(*original_options.linear_solver_ordering);
444    event_logger.AddEvent("CopyOrdering");
445  } else {
446    options.linear_solver_ordering = new ParameterBlockOrdering;
447    const ProblemImpl::ParameterMap& parameter_map =
448        problem_impl->parameter_map();
449    for (ProblemImpl::ParameterMap::const_iterator it = parameter_map.begin();
450         it != parameter_map.end();
451         ++it) {
452      options.linear_solver_ordering->AddElementToGroup(it->first, 0);
453    }
454    event_logger.AddEvent("ConstructOrdering");
455  }
456
457  // Create the three objects needed to minimize: the transformed program, the
458  // evaluator, and the linear solver.
459  scoped_ptr<Program> reduced_program(CreateReducedProgram(&options,
460                                                           problem_impl,
461                                                           &summary->fixed_cost,
462                                                           &summary->error));
463
464  event_logger.AddEvent("CreateReducedProgram");
465  if (reduced_program == NULL) {
466    return;
467  }
468
469  SummarizeOrdering(options.linear_solver_ordering,
470                    &(summary->linear_solver_ordering_used));
471
472  summary->num_parameter_blocks_reduced = reduced_program->NumParameterBlocks();
473  summary->num_parameters_reduced = reduced_program->NumParameters();
474  summary->num_effective_parameters_reduced =
475      reduced_program->NumEffectiveParameters();
476  summary->num_residual_blocks_reduced = reduced_program->NumResidualBlocks();
477  summary->num_residuals_reduced = reduced_program->NumResiduals();
478
479  if (summary->num_parameter_blocks_reduced == 0) {
480    summary->preprocessor_time_in_seconds =
481        WallTimeInSeconds() - solver_start_time;
482
483    double post_process_start_time = WallTimeInSeconds();
484    LOG(INFO) << "Terminating: FUNCTION_TOLERANCE reached. "
485              << "No non-constant parameter blocks found.";
486
487    summary->initial_cost = summary->fixed_cost;
488    summary->final_cost = summary->fixed_cost;
489
490    // FUNCTION_TOLERANCE is the right convergence here, as we know
491    // that the objective function is constant and cannot be changed
492    // any further.
493    summary->termination_type = FUNCTION_TOLERANCE;
494
495    // Ensure the program state is set to the user parameters on the way out.
496    original_program->SetParameterBlockStatePtrsToUserStatePtrs();
497
498    summary->postprocessor_time_in_seconds =
499        WallTimeInSeconds() - post_process_start_time;
500    return;
501  }
502
503  scoped_ptr<LinearSolver>
504      linear_solver(CreateLinearSolver(&options, &summary->error));
505  event_logger.AddEvent("CreateLinearSolver");
506  if (linear_solver == NULL) {
507    return;
508  }
509
510  summary->linear_solver_type_given = original_options.linear_solver_type;
511  summary->linear_solver_type_used = options.linear_solver_type;
512
513  summary->preconditioner_type = options.preconditioner_type;
514
515  summary->num_linear_solver_threads_given =
516      original_options.num_linear_solver_threads;
517  summary->num_linear_solver_threads_used = options.num_linear_solver_threads;
518
519  summary->dense_linear_algebra_library_type =
520      options.dense_linear_algebra_library_type;
521  summary->sparse_linear_algebra_library_type =
522      options.sparse_linear_algebra_library_type;
523
524  summary->trust_region_strategy_type = options.trust_region_strategy_type;
525  summary->dogleg_type = options.dogleg_type;
526
527  scoped_ptr<Evaluator> evaluator(CreateEvaluator(options,
528                                                  problem_impl->parameter_map(),
529                                                  reduced_program.get(),
530                                                  &summary->error));
531
532  event_logger.AddEvent("CreateEvaluator");
533
534  if (evaluator == NULL) {
535    return;
536  }
537
538  scoped_ptr<CoordinateDescentMinimizer> inner_iteration_minimizer;
539  if (options.use_inner_iterations) {
540    if (reduced_program->parameter_blocks().size() < 2) {
541      LOG(WARNING) << "Reduced problem only contains one parameter block."
542                   << "Disabling inner iterations.";
543    } else {
544      inner_iteration_minimizer.reset(
545          CreateInnerIterationMinimizer(original_options,
546                                        *reduced_program,
547                                        problem_impl->parameter_map(),
548                                        summary));
549      if (inner_iteration_minimizer == NULL) {
550        LOG(ERROR) << summary->error;
551        return;
552      }
553    }
554  }
555  event_logger.AddEvent("CreateInnerIterationMinimizer");
556
557  // The optimizer works on contiguous parameter vectors; allocate some.
558  Vector parameters(reduced_program->NumParameters());
559
560  // Collect the discontiguous parameters into a contiguous state vector.
561  reduced_program->ParameterBlocksToStateVector(parameters.data());
562
563  Vector original_parameters = parameters;
564
565  double minimizer_start_time = WallTimeInSeconds();
566  summary->preprocessor_time_in_seconds =
567      minimizer_start_time - solver_start_time;
568
569  // Run the optimization.
570  TrustRegionMinimize(options,
571                      reduced_program.get(),
572                      inner_iteration_minimizer.get(),
573                      evaluator.get(),
574                      linear_solver.get(),
575                      parameters.data(),
576                      summary);
577  event_logger.AddEvent("Minimize");
578
579  SetSummaryFinalCost(summary);
580
581  // If the user aborted mid-optimization or the optimization
582  // terminated because of a numerical failure, then return without
583  // updating user state.
584  if (summary->termination_type == USER_ABORT ||
585      summary->termination_type == NUMERICAL_FAILURE) {
586    return;
587  }
588
589  double post_process_start_time = WallTimeInSeconds();
590
591  // Push the contiguous optimized parameters back to the user's
592  // parameters.
593  reduced_program->StateVectorToParameterBlocks(parameters.data());
594  reduced_program->CopyParameterBlockStateToUserState();
595
596  // Ensure the program state is set to the user parameters on the way
597  // out.
598  original_program->SetParameterBlockStatePtrsToUserStatePtrs();
599
600  const map<string, double>& linear_solver_time_statistics =
601      linear_solver->TimeStatistics();
602  summary->linear_solver_time_in_seconds =
603      FindWithDefault(linear_solver_time_statistics,
604                      "LinearSolver::Solve",
605                      0.0);
606
607  const map<string, double>& evaluator_time_statistics =
608      evaluator->TimeStatistics();
609
610  summary->residual_evaluation_time_in_seconds =
611      FindWithDefault(evaluator_time_statistics, "Evaluator::Residual", 0.0);
612  summary->jacobian_evaluation_time_in_seconds =
613      FindWithDefault(evaluator_time_statistics, "Evaluator::Jacobian", 0.0);
614
615  // Stick a fork in it, we're done.
616  summary->postprocessor_time_in_seconds =
617      WallTimeInSeconds() - post_process_start_time;
618  event_logger.AddEvent("PostProcess");
619}
620
621
622#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
623void SolverImpl::LineSearchSolve(const Solver::Options& original_options,
624                                 ProblemImpl* original_problem_impl,
625                                 Solver::Summary* summary) {
626  double solver_start_time = WallTimeInSeconds();
627
628  Program* original_program = original_problem_impl->mutable_program();
629  ProblemImpl* problem_impl = original_problem_impl;
630
631  // Reset the summary object to its default values.
632  *CHECK_NOTNULL(summary) = Solver::Summary();
633
634  summary->minimizer_type = LINE_SEARCH;
635  summary->line_search_direction_type =
636      original_options.line_search_direction_type;
637  summary->max_lbfgs_rank = original_options.max_lbfgs_rank;
638  summary->line_search_type = original_options.line_search_type;
639  summary->line_search_interpolation_type =
640      original_options.line_search_interpolation_type;
641  summary->nonlinear_conjugate_gradient_type =
642      original_options.nonlinear_conjugate_gradient_type;
643
644  summary->num_parameter_blocks = original_program->NumParameterBlocks();
645  summary->num_parameters = original_program->NumParameters();
646  summary->num_residual_blocks = original_program->NumResidualBlocks();
647  summary->num_residuals = original_program->NumResiduals();
648  summary->num_effective_parameters =
649      original_program->NumEffectiveParameters();
650
651  // Validate values for configuration parameters supplied by user.
652  if ((original_options.line_search_direction_type == ceres::BFGS ||
653       original_options.line_search_direction_type == ceres::LBFGS) &&
654      original_options.line_search_type != ceres::WOLFE) {
655    summary->error =
656        string("Invalid configuration: require line_search_type == "
657               "ceres::WOLFE when using (L)BFGS to ensure that underlying "
658               "assumptions are guaranteed to be satisfied.");
659    LOG(ERROR) << summary->error;
660    return;
661  }
662  if (original_options.max_lbfgs_rank <= 0) {
663    summary->error =
664        string("Invalid configuration: require max_lbfgs_rank > 0");
665    LOG(ERROR) << summary->error;
666    return;
667  }
668  if (original_options.min_line_search_step_size <= 0.0) {
669    summary->error = "Invalid configuration: min_line_search_step_size <= 0.0.";
670    LOG(ERROR) << summary->error;
671    return;
672  }
673  if (original_options.line_search_sufficient_function_decrease <= 0.0) {
674    summary->error =
675        string("Invalid configuration: require ") +
676        string("line_search_sufficient_function_decrease <= 0.0.");
677    LOG(ERROR) << summary->error;
678    return;
679  }
680  if (original_options.max_line_search_step_contraction <= 0.0 ||
681      original_options.max_line_search_step_contraction >= 1.0) {
682    summary->error = string("Invalid configuration: require ") +
683        string("0.0 < max_line_search_step_contraction < 1.0.");
684    LOG(ERROR) << summary->error;
685    return;
686  }
687  if (original_options.min_line_search_step_contraction <=
688      original_options.max_line_search_step_contraction ||
689      original_options.min_line_search_step_contraction > 1.0) {
690    summary->error = string("Invalid configuration: require ") +
691        string("max_line_search_step_contraction < ") +
692        string("min_line_search_step_contraction <= 1.0.");
693    LOG(ERROR) << summary->error;
694    return;
695  }
696  // Warn user if they have requested BISECTION interpolation, but constraints
697  // on max/min step size change during line search prevent bisection scaling
698  // from occurring. Warn only, as this is likely a user mistake, but one which
699  // does not prevent us from continuing.
700  LOG_IF(WARNING,
701         (original_options.line_search_interpolation_type == ceres::BISECTION &&
702          (original_options.max_line_search_step_contraction > 0.5 ||
703           original_options.min_line_search_step_contraction < 0.5)))
704      << "Line search interpolation type is BISECTION, but specified "
705      << "max_line_search_step_contraction: "
706      << original_options.max_line_search_step_contraction << ", and "
707      << "min_line_search_step_contraction: "
708      << original_options.min_line_search_step_contraction
709      << ", prevent bisection (0.5) scaling, continuing with solve regardless.";
710  if (original_options.max_num_line_search_step_size_iterations <= 0) {
711    summary->error = string("Invalid configuration: require ") +
712        string("max_num_line_search_step_size_iterations > 0.");
713    LOG(ERROR) << summary->error;
714    return;
715  }
716  if (original_options.line_search_sufficient_curvature_decrease <=
717      original_options.line_search_sufficient_function_decrease ||
718      original_options.line_search_sufficient_curvature_decrease > 1.0) {
719    summary->error = string("Invalid configuration: require ") +
720        string("line_search_sufficient_function_decrease < ") +
721        string("line_search_sufficient_curvature_decrease < 1.0.");
722    LOG(ERROR) << summary->error;
723    return;
724  }
725  if (original_options.max_line_search_step_expansion <= 1.0) {
726    summary->error = string("Invalid configuration: require ") +
727        string("max_line_search_step_expansion > 1.0.");
728    LOG(ERROR) << summary->error;
729    return;
730  }
731
732  // Empty programs are usually a user error.
733  if (summary->num_parameter_blocks == 0) {
734    summary->error = "Problem contains no parameter blocks.";
735    LOG(ERROR) << summary->error;
736    return;
737  }
738
739  if (summary->num_residual_blocks == 0) {
740    summary->error = "Problem contains no residual blocks.";
741    LOG(ERROR) << summary->error;
742    return;
743  }
744
745  Solver::Options options(original_options);
746
747  // This ensures that we get a Block Jacobian Evaluator along with
748  // none of the Schur nonsense. This file will have to be extensively
749  // refactored to deal with the various bits of cleanups related to
750  // line search.
751  options.linear_solver_type = CGNR;
752
753  options.linear_solver_ordering = NULL;
754  options.inner_iteration_ordering = NULL;
755
756#ifndef CERES_USE_OPENMP
757  if (options.num_threads > 1) {
758    LOG(WARNING)
759        << "OpenMP support is not compiled into this binary; "
760        << "only options.num_threads=1 is supported. Switching "
761        << "to single threaded mode.";
762    options.num_threads = 1;
763  }
764#endif  // CERES_USE_OPENMP
765
766  summary->num_threads_given = original_options.num_threads;
767  summary->num_threads_used = options.num_threads;
768
769  if (original_options.linear_solver_ordering != NULL) {
770    if (!IsOrderingValid(original_options, problem_impl, &summary->error)) {
771      LOG(ERROR) << summary->error;
772      return;
773    }
774    options.linear_solver_ordering =
775        new ParameterBlockOrdering(*original_options.linear_solver_ordering);
776  } else {
777    options.linear_solver_ordering = new ParameterBlockOrdering;
778    const ProblemImpl::ParameterMap& parameter_map =
779        problem_impl->parameter_map();
780    for (ProblemImpl::ParameterMap::const_iterator it = parameter_map.begin();
781         it != parameter_map.end();
782         ++it) {
783      options.linear_solver_ordering->AddElementToGroup(it->first, 0);
784    }
785  }
786
787  original_program->SetParameterBlockStatePtrsToUserStatePtrs();
788
789  // If the user requests gradient checking, construct a new
790  // ProblemImpl by wrapping the CostFunctions of problem_impl inside
791  // GradientCheckingCostFunction and replacing problem_impl with
792  // gradient_checking_problem_impl.
793  scoped_ptr<ProblemImpl> gradient_checking_problem_impl;
794  if (options.check_gradients) {
795    VLOG(1) << "Checking Gradients";
796    gradient_checking_problem_impl.reset(
797        CreateGradientCheckingProblemImpl(
798            problem_impl,
799            options.numeric_derivative_relative_step_size,
800            options.gradient_check_relative_precision));
801
802    // From here on, problem_impl will point to the gradient checking
803    // version.
804    problem_impl = gradient_checking_problem_impl.get();
805  }
806
807  // Create the three objects needed to minimize: the transformed program, the
808  // evaluator, and the linear solver.
809  scoped_ptr<Program> reduced_program(CreateReducedProgram(&options,
810                                                           problem_impl,
811                                                           &summary->fixed_cost,
812                                                           &summary->error));
813  if (reduced_program == NULL) {
814    return;
815  }
816
817  summary->num_parameter_blocks_reduced = reduced_program->NumParameterBlocks();
818  summary->num_parameters_reduced = reduced_program->NumParameters();
819  summary->num_residual_blocks_reduced = reduced_program->NumResidualBlocks();
820  summary->num_effective_parameters_reduced =
821      reduced_program->NumEffectiveParameters();
822  summary->num_residuals_reduced = reduced_program->NumResiduals();
823
824  if (summary->num_parameter_blocks_reduced == 0) {
825    summary->preprocessor_time_in_seconds =
826        WallTimeInSeconds() - solver_start_time;
827
828    LOG(INFO) << "Terminating: FUNCTION_TOLERANCE reached. "
829              << "No non-constant parameter blocks found.";
830
831    // FUNCTION_TOLERANCE is the right convergence here, as we know
832    // that the objective function is constant and cannot be changed
833    // any further.
834    summary->termination_type = FUNCTION_TOLERANCE;
835
836    const double post_process_start_time = WallTimeInSeconds();
837
838    SetSummaryFinalCost(summary);
839
840    // Ensure the program state is set to the user parameters on the way out.
841    original_program->SetParameterBlockStatePtrsToUserStatePtrs();
842    summary->postprocessor_time_in_seconds =
843        WallTimeInSeconds() - post_process_start_time;
844    return;
845  }
846
847  scoped_ptr<Evaluator> evaluator(CreateEvaluator(options,
848                                                  problem_impl->parameter_map(),
849                                                  reduced_program.get(),
850                                                  &summary->error));
851  if (evaluator == NULL) {
852    return;
853  }
854
855  // The optimizer works on contiguous parameter vectors; allocate some.
856  Vector parameters(reduced_program->NumParameters());
857
858  // Collect the discontiguous parameters into a contiguous state vector.
859  reduced_program->ParameterBlocksToStateVector(parameters.data());
860
861  Vector original_parameters = parameters;
862
863  const double minimizer_start_time = WallTimeInSeconds();
864  summary->preprocessor_time_in_seconds =
865      minimizer_start_time - solver_start_time;
866
867  // Run the optimization.
868  LineSearchMinimize(options,
869                     reduced_program.get(),
870                     evaluator.get(),
871                     parameters.data(),
872                     summary);
873
874  // If the user aborted mid-optimization or the optimization
875  // terminated because of a numerical failure, then return without
876  // updating user state.
877  if (summary->termination_type == USER_ABORT ||
878      summary->termination_type == NUMERICAL_FAILURE) {
879    return;
880  }
881
882  const double post_process_start_time = WallTimeInSeconds();
883
884  // Push the contiguous optimized parameters back to the user's parameters.
885  reduced_program->StateVectorToParameterBlocks(parameters.data());
886  reduced_program->CopyParameterBlockStateToUserState();
887
888  SetSummaryFinalCost(summary);
889
890  // Ensure the program state is set to the user parameters on the way out.
891  original_program->SetParameterBlockStatePtrsToUserStatePtrs();
892
893  const map<string, double>& evaluator_time_statistics =
894      evaluator->TimeStatistics();
895
896  summary->residual_evaluation_time_in_seconds =
897      FindWithDefault(evaluator_time_statistics, "Evaluator::Residual", 0.0);
898  summary->jacobian_evaluation_time_in_seconds =
899      FindWithDefault(evaluator_time_statistics, "Evaluator::Jacobian", 0.0);
900
901  // Stick a fork in it, we're done.
902  summary->postprocessor_time_in_seconds =
903      WallTimeInSeconds() - post_process_start_time;
904}
905#endif  // CERES_NO_LINE_SEARCH_MINIMIZER
906
907bool SolverImpl::IsOrderingValid(const Solver::Options& options,
908                                 const ProblemImpl* problem_impl,
909                                 string* error) {
910  if (options.linear_solver_ordering->NumElements() !=
911      problem_impl->NumParameterBlocks()) {
912      *error = "Number of parameter blocks in user supplied ordering "
913          "does not match the number of parameter blocks in the problem";
914    return false;
915  }
916
917  const Program& program = problem_impl->program();
918  const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
919  for (vector<ParameterBlock*>::const_iterator it = parameter_blocks.begin();
920       it != parameter_blocks.end();
921       ++it) {
922    if (!options.linear_solver_ordering
923        ->IsMember(const_cast<double*>((*it)->user_state()))) {
924      *error = "Problem contains a parameter block that is not in "
925          "the user specified ordering.";
926      return false;
927    }
928  }
929
930  if (IsSchurType(options.linear_solver_type) &&
931      options.linear_solver_ordering->NumGroups() > 1) {
932    const vector<ResidualBlock*>& residual_blocks = program.residual_blocks();
933    const set<double*>& e_blocks  =
934        options.linear_solver_ordering->group_to_elements().begin()->second;
935    if (!IsParameterBlockSetIndependent(e_blocks, residual_blocks)) {
936      *error = "The user requested the use of a Schur type solver. "
937          "But the first elimination group in the ordering is not an "
938          "independent set.";
939      return false;
940    }
941  }
942  return true;
943}
944
945bool SolverImpl::IsParameterBlockSetIndependent(
946    const set<double*>& parameter_block_ptrs,
947    const vector<ResidualBlock*>& residual_blocks) {
948  // Loop over each residual block and ensure that no two parameter
949  // blocks in the same residual block are part of
950  // parameter_block_ptrs as that would violate the assumption that it
951  // is an independent set in the Hessian matrix.
952  for (vector<ResidualBlock*>::const_iterator it = residual_blocks.begin();
953       it != residual_blocks.end();
954       ++it) {
955    ParameterBlock* const* parameter_blocks = (*it)->parameter_blocks();
956    const int num_parameter_blocks = (*it)->NumParameterBlocks();
957    int count = 0;
958    for (int i = 0; i < num_parameter_blocks; ++i) {
959      count += parameter_block_ptrs.count(
960          parameter_blocks[i]->mutable_user_state());
961    }
962    if (count > 1) {
963      return false;
964    }
965  }
966  return true;
967}
968
969
970// Strips varying parameters and residuals, maintaining order, and updating
971// num_eliminate_blocks.
972bool SolverImpl::RemoveFixedBlocksFromProgram(Program* program,
973                                              ParameterBlockOrdering* ordering,
974                                              double* fixed_cost,
975                                              string* error) {
976  vector<ParameterBlock*>* parameter_blocks =
977      program->mutable_parameter_blocks();
978
979  scoped_array<double> residual_block_evaluate_scratch;
980  if (fixed_cost != NULL) {
981    residual_block_evaluate_scratch.reset(
982        new double[program->MaxScratchDoublesNeededForEvaluate()]);
983    *fixed_cost = 0.0;
984  }
985
986  // Mark all the parameters as unused. Abuse the index member of the parameter
987  // blocks for the marking.
988  for (int i = 0; i < parameter_blocks->size(); ++i) {
989    (*parameter_blocks)[i]->set_index(-1);
990  }
991
992  // Filter out residual that have all-constant parameters, and mark all the
993  // parameter blocks that appear in residuals.
994  {
995    vector<ResidualBlock*>* residual_blocks =
996        program->mutable_residual_blocks();
997    int j = 0;
998    for (int i = 0; i < residual_blocks->size(); ++i) {
999      ResidualBlock* residual_block = (*residual_blocks)[i];
1000      int num_parameter_blocks = residual_block->NumParameterBlocks();
1001
1002      // Determine if the residual block is fixed, and also mark varying
1003      // parameters that appear in the residual block.
1004      bool all_constant = true;
1005      for (int k = 0; k < num_parameter_blocks; k++) {
1006        ParameterBlock* parameter_block = residual_block->parameter_blocks()[k];
1007        if (!parameter_block->IsConstant()) {
1008          all_constant = false;
1009          parameter_block->set_index(1);
1010        }
1011      }
1012
1013      if (!all_constant) {
1014        (*residual_blocks)[j++] = (*residual_blocks)[i];
1015      } else if (fixed_cost != NULL) {
1016        // The residual is constant and will be removed, so its cost is
1017        // added to the variable fixed_cost.
1018        double cost = 0.0;
1019        if (!residual_block->Evaluate(true,
1020                                      &cost,
1021                                      NULL,
1022                                      NULL,
1023                                      residual_block_evaluate_scratch.get())) {
1024          *error = StringPrintf("Evaluation of the residual %d failed during "
1025                                "removal of fixed residual blocks.", i);
1026          return false;
1027        }
1028        *fixed_cost += cost;
1029      }
1030    }
1031    residual_blocks->resize(j);
1032  }
1033
1034  // Filter out unused or fixed parameter blocks, and update
1035  // the ordering.
1036  {
1037    vector<ParameterBlock*>* parameter_blocks =
1038        program->mutable_parameter_blocks();
1039    int j = 0;
1040    for (int i = 0; i < parameter_blocks->size(); ++i) {
1041      ParameterBlock* parameter_block = (*parameter_blocks)[i];
1042      if (parameter_block->index() == 1) {
1043        (*parameter_blocks)[j++] = parameter_block;
1044      } else {
1045        ordering->Remove(parameter_block->mutable_user_state());
1046      }
1047    }
1048    parameter_blocks->resize(j);
1049  }
1050
1051  if (!(((program->NumResidualBlocks() == 0) &&
1052         (program->NumParameterBlocks() == 0)) ||
1053        ((program->NumResidualBlocks() != 0) &&
1054         (program->NumParameterBlocks() != 0)))) {
1055    *error =  "Congratulations, you found a bug in Ceres. Please report it.";
1056    return false;
1057  }
1058
1059  return true;
1060}
1061
1062Program* SolverImpl::CreateReducedProgram(Solver::Options* options,
1063                                          ProblemImpl* problem_impl,
1064                                          double* fixed_cost,
1065                                          string* error) {
1066  CHECK_NOTNULL(options->linear_solver_ordering);
1067  Program* original_program = problem_impl->mutable_program();
1068  scoped_ptr<Program> transformed_program(new Program(*original_program));
1069
1070  ParameterBlockOrdering* linear_solver_ordering =
1071      options->linear_solver_ordering;
1072  const int min_group_id =
1073      linear_solver_ordering->group_to_elements().begin()->first;
1074
1075  if (!RemoveFixedBlocksFromProgram(transformed_program.get(),
1076                                    linear_solver_ordering,
1077                                    fixed_cost,
1078                                    error)) {
1079    return NULL;
1080  }
1081
1082  VLOG(2) << "Reduced problem: "
1083          << transformed_program->NumParameterBlocks()
1084          << " parameter blocks, "
1085          << transformed_program->NumParameters()
1086          << " parameters,  "
1087          << transformed_program->NumResidualBlocks()
1088          << " residual blocks, "
1089          << transformed_program->NumResiduals()
1090          << " residuals.";
1091
1092  if (transformed_program->NumParameterBlocks() == 0) {
1093    LOG(WARNING) << "No varying parameter blocks to optimize; "
1094                 << "bailing early.";
1095    return transformed_program.release();
1096  }
1097
1098  if (IsSchurType(options->linear_solver_type) &&
1099      linear_solver_ordering->GroupSize(min_group_id) == 0) {
1100    // If the user requested the use of a Schur type solver, and
1101    // supplied a non-NULL linear_solver_ordering object with more than
1102    // one elimination group, then it can happen that after all the
1103    // parameter blocks which are fixed or unused have been removed from
1104    // the program and the ordering, there are no more parameter blocks
1105    // in the first elimination group.
1106    //
1107    // In such a case, the use of a Schur type solver is not possible,
1108    // as they assume there is at least one e_block. Thus, we
1109    // automatically switch to the closest solver to the one indicated
1110    // by the user.
1111    AlternateLinearSolverForSchurTypeLinearSolver(options);
1112  }
1113
1114  if (IsSchurType(options->linear_solver_type)) {
1115    if (!ReorderProgramForSchurTypeLinearSolver(
1116            options->linear_solver_type,
1117            options->sparse_linear_algebra_library_type,
1118            problem_impl->parameter_map(),
1119            linear_solver_ordering,
1120            transformed_program.get(),
1121            error)) {
1122      return NULL;
1123    }
1124    return transformed_program.release();
1125  }
1126
1127  if (options->linear_solver_type == SPARSE_NORMAL_CHOLESKY) {
1128    if (!ReorderProgramForSparseNormalCholesky(
1129            options->sparse_linear_algebra_library_type,
1130            linear_solver_ordering,
1131            transformed_program.get(),
1132            error)) {
1133      return NULL;
1134    }
1135
1136    return transformed_program.release();
1137  }
1138
1139  transformed_program->SetParameterOffsetsAndIndex();
1140  return transformed_program.release();
1141}
1142
1143LinearSolver* SolverImpl::CreateLinearSolver(Solver::Options* options,
1144                                             string* error) {
1145  CHECK_NOTNULL(options);
1146  CHECK_NOTNULL(options->linear_solver_ordering);
1147  CHECK_NOTNULL(error);
1148
1149  if (options->trust_region_strategy_type == DOGLEG) {
1150    if (options->linear_solver_type == ITERATIVE_SCHUR ||
1151        options->linear_solver_type == CGNR) {
1152      *error = "DOGLEG only supports exact factorization based linear "
1153               "solvers. If you want to use an iterative solver please "
1154               "use LEVENBERG_MARQUARDT as the trust_region_strategy_type";
1155      return NULL;
1156    }
1157  }
1158
1159#ifdef CERES_NO_LAPACK
1160  if (options->linear_solver_type == DENSE_NORMAL_CHOLESKY &&
1161      options->dense_linear_algebra_library_type == LAPACK) {
1162    *error = "Can't use DENSE_NORMAL_CHOLESKY with LAPACK because "
1163        "LAPACK was not enabled when Ceres was built.";
1164    return NULL;
1165  }
1166
1167  if (options->linear_solver_type == DENSE_QR &&
1168      options->dense_linear_algebra_library_type == LAPACK) {
1169    *error = "Can't use DENSE_QR with LAPACK because "
1170        "LAPACK was not enabled when Ceres was built.";
1171    return NULL;
1172  }
1173
1174  if (options->linear_solver_type == DENSE_SCHUR &&
1175      options->dense_linear_algebra_library_type == LAPACK) {
1176    *error = "Can't use DENSE_SCHUR with LAPACK because "
1177        "LAPACK was not enabled when Ceres was built.";
1178    return NULL;
1179  }
1180#endif
1181
1182#ifdef CERES_NO_SUITESPARSE
1183  if (options->linear_solver_type == SPARSE_NORMAL_CHOLESKY &&
1184      options->sparse_linear_algebra_library_type == SUITE_SPARSE) {
1185    *error = "Can't use SPARSE_NORMAL_CHOLESKY with SUITESPARSE because "
1186             "SuiteSparse was not enabled when Ceres was built.";
1187    return NULL;
1188  }
1189
1190  if (options->preconditioner_type == CLUSTER_JACOBI) {
1191    *error =  "CLUSTER_JACOBI preconditioner not suppored. Please build Ceres "
1192        "with SuiteSparse support.";
1193    return NULL;
1194  }
1195
1196  if (options->preconditioner_type == CLUSTER_TRIDIAGONAL) {
1197    *error =  "CLUSTER_TRIDIAGONAL preconditioner not suppored. Please build "
1198        "Ceres with SuiteSparse support.";
1199    return NULL;
1200  }
1201#endif
1202
1203#ifdef CERES_NO_CXSPARSE
1204  if (options->linear_solver_type == SPARSE_NORMAL_CHOLESKY &&
1205      options->sparse_linear_algebra_library_type == CX_SPARSE) {
1206    *error = "Can't use SPARSE_NORMAL_CHOLESKY with CXSPARSE because "
1207             "CXSparse was not enabled when Ceres was built.";
1208    return NULL;
1209  }
1210#endif
1211
1212#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
1213  if (options->linear_solver_type == SPARSE_SCHUR) {
1214    *error = "Can't use SPARSE_SCHUR because neither SuiteSparse nor"
1215        "CXSparse was enabled when Ceres was compiled.";
1216    return NULL;
1217  }
1218#endif
1219
1220  if (options->max_linear_solver_iterations <= 0) {
1221    *error = "Solver::Options::max_linear_solver_iterations is not positive.";
1222    return NULL;
1223  }
1224  if (options->min_linear_solver_iterations <= 0) {
1225    *error = "Solver::Options::min_linear_solver_iterations is not positive.";
1226    return NULL;
1227  }
1228  if (options->min_linear_solver_iterations >
1229      options->max_linear_solver_iterations) {
1230    *error = "Solver::Options::min_linear_solver_iterations > "
1231        "Solver::Options::max_linear_solver_iterations.";
1232    return NULL;
1233  }
1234
1235  LinearSolver::Options linear_solver_options;
1236  linear_solver_options.min_num_iterations =
1237        options->min_linear_solver_iterations;
1238  linear_solver_options.max_num_iterations =
1239      options->max_linear_solver_iterations;
1240  linear_solver_options.type = options->linear_solver_type;
1241  linear_solver_options.preconditioner_type = options->preconditioner_type;
1242  linear_solver_options.sparse_linear_algebra_library_type =
1243      options->sparse_linear_algebra_library_type;
1244  linear_solver_options.dense_linear_algebra_library_type =
1245      options->dense_linear_algebra_library_type;
1246  linear_solver_options.use_postordering = options->use_postordering;
1247
1248  // Ignore user's postordering preferences and force it to be true if
1249  // cholmod_camd is not available. This ensures that the linear
1250  // solver does not assume that a fill-reducing pre-ordering has been
1251  // done.
1252#if !defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CAMD)
1253  if (IsSchurType(linear_solver_options.type) &&
1254      options->sparse_linear_algebra_library_type == SUITE_SPARSE) {
1255    linear_solver_options.use_postordering = true;
1256  }
1257#endif
1258
1259  linear_solver_options.num_threads = options->num_linear_solver_threads;
1260  options->num_linear_solver_threads = linear_solver_options.num_threads;
1261
1262  const map<int, set<double*> >& groups =
1263      options->linear_solver_ordering->group_to_elements();
1264  for (map<int, set<double*> >::const_iterator it = groups.begin();
1265       it != groups.end();
1266       ++it) {
1267    linear_solver_options.elimination_groups.push_back(it->second.size());
1268  }
1269  // Schur type solvers, expect at least two elimination groups. If
1270  // there is only one elimination group, then CreateReducedProgram
1271  // guarantees that this group only contains e_blocks. Thus we add a
1272  // dummy elimination group with zero blocks in it.
1273  if (IsSchurType(linear_solver_options.type) &&
1274      linear_solver_options.elimination_groups.size() == 1) {
1275    linear_solver_options.elimination_groups.push_back(0);
1276  }
1277
1278  return LinearSolver::Create(linear_solver_options);
1279}
1280
1281
1282// Find the minimum index of any parameter block to the given residual.
1283// Parameter blocks that have indices greater than num_eliminate_blocks are
1284// considered to have an index equal to num_eliminate_blocks.
1285static int MinParameterBlock(const ResidualBlock* residual_block,
1286                             int num_eliminate_blocks) {
1287  int min_parameter_block_position = num_eliminate_blocks;
1288  for (int i = 0; i < residual_block->NumParameterBlocks(); ++i) {
1289    ParameterBlock* parameter_block = residual_block->parameter_blocks()[i];
1290    if (!parameter_block->IsConstant()) {
1291      CHECK_NE(parameter_block->index(), -1)
1292          << "Did you forget to call Program::SetParameterOffsetsAndIndex()? "
1293          << "This is a Ceres bug; please contact the developers!";
1294      min_parameter_block_position = std::min(parameter_block->index(),
1295                                              min_parameter_block_position);
1296    }
1297  }
1298  return min_parameter_block_position;
1299}
1300
1301// Reorder the residuals for program, if necessary, so that the residuals
1302// involving each E block occur together. This is a necessary condition for the
1303// Schur eliminator, which works on these "row blocks" in the jacobian.
1304bool SolverImpl::LexicographicallyOrderResidualBlocks(
1305    const int num_eliminate_blocks,
1306    Program* program,
1307    string* error) {
1308  CHECK_GE(num_eliminate_blocks, 1)
1309      << "Congratulations, you found a Ceres bug! Please report this error "
1310      << "to the developers.";
1311
1312  // Create a histogram of the number of residuals for each E block. There is an
1313  // extra bucket at the end to catch all non-eliminated F blocks.
1314  vector<int> residual_blocks_per_e_block(num_eliminate_blocks + 1);
1315  vector<ResidualBlock*>* residual_blocks = program->mutable_residual_blocks();
1316  vector<int> min_position_per_residual(residual_blocks->size());
1317  for (int i = 0; i < residual_blocks->size(); ++i) {
1318    ResidualBlock* residual_block = (*residual_blocks)[i];
1319    int position = MinParameterBlock(residual_block, num_eliminate_blocks);
1320    min_position_per_residual[i] = position;
1321    DCHECK_LE(position, num_eliminate_blocks);
1322    residual_blocks_per_e_block[position]++;
1323  }
1324
1325  // Run a cumulative sum on the histogram, to obtain offsets to the start of
1326  // each histogram bucket (where each bucket is for the residuals for that
1327  // E-block).
1328  vector<int> offsets(num_eliminate_blocks + 1);
1329  std::partial_sum(residual_blocks_per_e_block.begin(),
1330                   residual_blocks_per_e_block.end(),
1331                   offsets.begin());
1332  CHECK_EQ(offsets.back(), residual_blocks->size())
1333      << "Congratulations, you found a Ceres bug! Please report this error "
1334      << "to the developers.";
1335
1336  CHECK(find(residual_blocks_per_e_block.begin(),
1337             residual_blocks_per_e_block.end() - 1, 0) !=
1338        residual_blocks_per_e_block.end())
1339      << "Congratulations, you found a Ceres bug! Please report this error "
1340      << "to the developers.";
1341
1342  // Fill in each bucket with the residual blocks for its corresponding E block.
1343  // Each bucket is individually filled from the back of the bucket to the front
1344  // of the bucket. The filling order among the buckets is dictated by the
1345  // residual blocks. This loop uses the offsets as counters; subtracting one
1346  // from each offset as a residual block is placed in the bucket. When the
1347  // filling is finished, the offset pointerts should have shifted down one
1348  // entry (this is verified below).
1349  vector<ResidualBlock*> reordered_residual_blocks(
1350      (*residual_blocks).size(), static_cast<ResidualBlock*>(NULL));
1351  for (int i = 0; i < residual_blocks->size(); ++i) {
1352    int bucket = min_position_per_residual[i];
1353
1354    // Decrement the cursor, which should now point at the next empty position.
1355    offsets[bucket]--;
1356
1357    // Sanity.
1358    CHECK(reordered_residual_blocks[offsets[bucket]] == NULL)
1359        << "Congratulations, you found a Ceres bug! Please report this error "
1360        << "to the developers.";
1361
1362    reordered_residual_blocks[offsets[bucket]] = (*residual_blocks)[i];
1363  }
1364
1365  // Sanity check #1: The difference in bucket offsets should match the
1366  // histogram sizes.
1367  for (int i = 0; i < num_eliminate_blocks; ++i) {
1368    CHECK_EQ(residual_blocks_per_e_block[i], offsets[i + 1] - offsets[i])
1369        << "Congratulations, you found a Ceres bug! Please report this error "
1370        << "to the developers.";
1371  }
1372  // Sanity check #2: No NULL's left behind.
1373  for (int i = 0; i < reordered_residual_blocks.size(); ++i) {
1374    CHECK(reordered_residual_blocks[i] != NULL)
1375        << "Congratulations, you found a Ceres bug! Please report this error "
1376        << "to the developers.";
1377  }
1378
1379  // Now that the residuals are collected by E block, swap them in place.
1380  swap(*program->mutable_residual_blocks(), reordered_residual_blocks);
1381  return true;
1382}
1383
1384Evaluator* SolverImpl::CreateEvaluator(
1385    const Solver::Options& options,
1386    const ProblemImpl::ParameterMap& parameter_map,
1387    Program* program,
1388    string* error) {
1389  Evaluator::Options evaluator_options;
1390  evaluator_options.linear_solver_type = options.linear_solver_type;
1391  evaluator_options.num_eliminate_blocks =
1392      (options.linear_solver_ordering->NumGroups() > 0 &&
1393       IsSchurType(options.linear_solver_type))
1394      ? (options.linear_solver_ordering
1395         ->group_to_elements().begin()
1396         ->second.size())
1397      : 0;
1398  evaluator_options.num_threads = options.num_threads;
1399  return Evaluator::Create(evaluator_options, program, error);
1400}
1401
1402CoordinateDescentMinimizer* SolverImpl::CreateInnerIterationMinimizer(
1403    const Solver::Options& options,
1404    const Program& program,
1405    const ProblemImpl::ParameterMap& parameter_map,
1406    Solver::Summary* summary) {
1407  summary->inner_iterations_given = true;
1408
1409  scoped_ptr<CoordinateDescentMinimizer> inner_iteration_minimizer(
1410      new CoordinateDescentMinimizer);
1411  scoped_ptr<ParameterBlockOrdering> inner_iteration_ordering;
1412  ParameterBlockOrdering* ordering_ptr  = NULL;
1413
1414  if (options.inner_iteration_ordering == NULL) {
1415    // Find a recursive decomposition of the Hessian matrix as a set
1416    // of independent sets of decreasing size and invert it. This
1417    // seems to work better in practice, i.e., Cameras before
1418    // points.
1419    inner_iteration_ordering.reset(new ParameterBlockOrdering);
1420    ComputeRecursiveIndependentSetOrdering(program,
1421                                           inner_iteration_ordering.get());
1422    inner_iteration_ordering->Reverse();
1423    ordering_ptr = inner_iteration_ordering.get();
1424  } else {
1425    const map<int, set<double*> >& group_to_elements =
1426        options.inner_iteration_ordering->group_to_elements();
1427
1428    // Iterate over each group and verify that it is an independent
1429    // set.
1430    map<int, set<double*> >::const_iterator it = group_to_elements.begin();
1431    for ( ; it != group_to_elements.end(); ++it) {
1432      if (!IsParameterBlockSetIndependent(it->second,
1433                                          program.residual_blocks())) {
1434        summary->error =
1435            StringPrintf("The user-provided "
1436                         "parameter_blocks_for_inner_iterations does not "
1437                         "form an independent set. Group Id: %d", it->first);
1438        return NULL;
1439      }
1440    }
1441    ordering_ptr = options.inner_iteration_ordering;
1442  }
1443
1444  if (!inner_iteration_minimizer->Init(program,
1445                                       parameter_map,
1446                                       *ordering_ptr,
1447                                       &summary->error)) {
1448    return NULL;
1449  }
1450
1451  summary->inner_iterations_used = true;
1452  summary->inner_iteration_time_in_seconds = 0.0;
1453  SummarizeOrdering(ordering_ptr, &(summary->inner_iteration_ordering_used));
1454  return inner_iteration_minimizer.release();
1455}
1456
1457void SolverImpl::AlternateLinearSolverForSchurTypeLinearSolver(
1458    Solver::Options* options) {
1459  if (!IsSchurType(options->linear_solver_type)) {
1460    return;
1461  }
1462
1463  string msg = "No e_blocks remaining. Switching from ";
1464  if (options->linear_solver_type == SPARSE_SCHUR) {
1465    options->linear_solver_type = SPARSE_NORMAL_CHOLESKY;
1466    msg += "SPARSE_SCHUR to SPARSE_NORMAL_CHOLESKY.";
1467  } else if (options->linear_solver_type == DENSE_SCHUR) {
1468    // TODO(sameeragarwal): This is probably not a great choice.
1469    // Ideally, we should have a DENSE_NORMAL_CHOLESKY, that can
1470    // take a BlockSparseMatrix as input.
1471    options->linear_solver_type = DENSE_QR;
1472    msg += "DENSE_SCHUR to DENSE_QR.";
1473  } else if (options->linear_solver_type == ITERATIVE_SCHUR) {
1474    options->linear_solver_type = CGNR;
1475    if (options->preconditioner_type != IDENTITY) {
1476      msg += StringPrintf("ITERATIVE_SCHUR with %s preconditioner "
1477                          "to CGNR with JACOBI preconditioner.",
1478                          PreconditionerTypeToString(
1479                            options->preconditioner_type));
1480      // CGNR currently only supports the JACOBI preconditioner.
1481      options->preconditioner_type = JACOBI;
1482    } else {
1483      msg += "ITERATIVE_SCHUR with IDENTITY preconditioner"
1484          "to CGNR with IDENTITY preconditioner.";
1485    }
1486  }
1487  LOG(WARNING) << msg;
1488}
1489
1490bool SolverImpl::ApplyUserOrdering(
1491    const ProblemImpl::ParameterMap& parameter_map,
1492    const ParameterBlockOrdering* parameter_block_ordering,
1493    Program* program,
1494    string* error) {
1495  const int num_parameter_blocks =  program->NumParameterBlocks();
1496  if (parameter_block_ordering->NumElements() != num_parameter_blocks) {
1497    *error = StringPrintf("User specified ordering does not have the same "
1498                          "number of parameters as the problem. The problem"
1499                          "has %d blocks while the ordering has %d blocks.",
1500                          num_parameter_blocks,
1501                          parameter_block_ordering->NumElements());
1502    return false;
1503  }
1504
1505  vector<ParameterBlock*>* parameter_blocks =
1506      program->mutable_parameter_blocks();
1507  parameter_blocks->clear();
1508
1509  const map<int, set<double*> >& groups =
1510      parameter_block_ordering->group_to_elements();
1511
1512  for (map<int, set<double*> >::const_iterator group_it = groups.begin();
1513       group_it != groups.end();
1514       ++group_it) {
1515    const set<double*>& group = group_it->second;
1516    for (set<double*>::const_iterator parameter_block_ptr_it = group.begin();
1517         parameter_block_ptr_it != group.end();
1518         ++parameter_block_ptr_it) {
1519      ProblemImpl::ParameterMap::const_iterator parameter_block_it =
1520          parameter_map.find(*parameter_block_ptr_it);
1521      if (parameter_block_it == parameter_map.end()) {
1522        *error = StringPrintf("User specified ordering contains a pointer "
1523                              "to a double that is not a parameter block in "
1524                              "the problem. The invalid double is in group: %d",
1525                              group_it->first);
1526        return false;
1527      }
1528      parameter_blocks->push_back(parameter_block_it->second);
1529    }
1530  }
1531  return true;
1532}
1533
1534
1535TripletSparseMatrix* SolverImpl::CreateJacobianBlockSparsityTranspose(
1536    const Program* program) {
1537
1538  // Matrix to store the block sparsity structure of the Jacobian.
1539  TripletSparseMatrix* tsm =
1540      new TripletSparseMatrix(program->NumParameterBlocks(),
1541                              program->NumResidualBlocks(),
1542                              10 * program->NumResidualBlocks());
1543  int num_nonzeros = 0;
1544  int* rows = tsm->mutable_rows();
1545  int* cols = tsm->mutable_cols();
1546  double* values = tsm->mutable_values();
1547
1548  const vector<ResidualBlock*>& residual_blocks = program->residual_blocks();
1549  for (int c = 0; c < residual_blocks.size(); ++c) {
1550    const ResidualBlock* residual_block = residual_blocks[c];
1551    const int num_parameter_blocks = residual_block->NumParameterBlocks();
1552    ParameterBlock* const* parameter_blocks =
1553        residual_block->parameter_blocks();
1554
1555    for (int j = 0; j < num_parameter_blocks; ++j) {
1556      if (parameter_blocks[j]->IsConstant()) {
1557        continue;
1558      }
1559
1560      // Re-size the matrix if needed.
1561      if (num_nonzeros >= tsm->max_num_nonzeros()) {
1562        tsm->set_num_nonzeros(num_nonzeros);
1563        tsm->Reserve(2 * num_nonzeros);
1564        rows = tsm->mutable_rows();
1565        cols = tsm->mutable_cols();
1566        values = tsm->mutable_values();
1567      }
1568      CHECK_LT(num_nonzeros,  tsm->max_num_nonzeros());
1569
1570      const int r = parameter_blocks[j]->index();
1571      rows[num_nonzeros] = r;
1572      cols[num_nonzeros] = c;
1573      values[num_nonzeros] = 1.0;
1574      ++num_nonzeros;
1575    }
1576  }
1577
1578  tsm->set_num_nonzeros(num_nonzeros);
1579  return tsm;
1580}
1581
1582bool SolverImpl::ReorderProgramForSchurTypeLinearSolver(
1583    const LinearSolverType linear_solver_type,
1584    const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
1585    const ProblemImpl::ParameterMap& parameter_map,
1586    ParameterBlockOrdering* parameter_block_ordering,
1587    Program* program,
1588    string* error) {
1589  if (parameter_block_ordering->NumGroups() == 1) {
1590    // If the user supplied an parameter_block_ordering with just one
1591    // group, it is equivalent to the user supplying NULL as an
1592    // parameter_block_ordering. Ceres is completely free to choose the
1593    // parameter block ordering as it sees fit. For Schur type solvers,
1594    // this means that the user wishes for Ceres to identify the
1595    // e_blocks, which we do by computing a maximal independent set.
1596    vector<ParameterBlock*> schur_ordering;
1597    const int num_eliminate_blocks =
1598        ComputeStableSchurOrdering(*program, &schur_ordering);
1599
1600    CHECK_EQ(schur_ordering.size(), program->NumParameterBlocks())
1601        << "Congratulations, you found a Ceres bug! Please report this error "
1602        << "to the developers.";
1603
1604    // Update the parameter_block_ordering object.
1605    for (int i = 0; i < schur_ordering.size(); ++i) {
1606      double* parameter_block = schur_ordering[i]->mutable_user_state();
1607      const int group_id = (i < num_eliminate_blocks) ? 0 : 1;
1608      parameter_block_ordering->AddElementToGroup(parameter_block, group_id);
1609    }
1610
1611    // We could call ApplyUserOrdering but this is cheaper and
1612    // simpler.
1613    swap(*program->mutable_parameter_blocks(), schur_ordering);
1614  } else {
1615    // The user provided an ordering with more than one elimination
1616    // group. Trust the user and apply the ordering.
1617    if (!ApplyUserOrdering(parameter_map,
1618                           parameter_block_ordering,
1619                           program,
1620                           error)) {
1621      return false;
1622    }
1623  }
1624
1625  // Pre-order the columns corresponding to the schur complement if
1626  // possible.
1627#if !defined(CERES_NO_SUITESPARSE) && !defined(CERES_NO_CAMD)
1628  if (linear_solver_type == SPARSE_SCHUR &&
1629      sparse_linear_algebra_library_type == SUITE_SPARSE) {
1630    vector<int> constraints;
1631    vector<ParameterBlock*>& parameter_blocks =
1632        *(program->mutable_parameter_blocks());
1633
1634    for (int i = 0; i < parameter_blocks.size(); ++i) {
1635      constraints.push_back(
1636          parameter_block_ordering->GroupId(
1637              parameter_blocks[i]->mutable_user_state()));
1638    }
1639
1640    // Renumber the entries of constraints to be contiguous integers
1641    // as camd requires that the group ids be in the range [0,
1642    // parameter_blocks.size() - 1].
1643    SolverImpl::CompactifyArray(&constraints);
1644
1645    // Set the offsets and index for CreateJacobianSparsityTranspose.
1646    program->SetParameterOffsetsAndIndex();
1647    // Compute a block sparse presentation of J'.
1648    scoped_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
1649        SolverImpl::CreateJacobianBlockSparsityTranspose(program));
1650
1651    SuiteSparse ss;
1652    cholmod_sparse* block_jacobian_transpose =
1653        ss.CreateSparseMatrix(tsm_block_jacobian_transpose.get());
1654
1655    vector<int> ordering(parameter_blocks.size(), 0);
1656    ss.ConstrainedApproximateMinimumDegreeOrdering(block_jacobian_transpose,
1657                                                   &constraints[0],
1658                                                   &ordering[0]);
1659    ss.Free(block_jacobian_transpose);
1660
1661    const vector<ParameterBlock*> parameter_blocks_copy(parameter_blocks);
1662    for (int i = 0; i < program->NumParameterBlocks(); ++i) {
1663      parameter_blocks[i] = parameter_blocks_copy[ordering[i]];
1664    }
1665  }
1666#endif
1667
1668  program->SetParameterOffsetsAndIndex();
1669  // Schur type solvers also require that their residual blocks be
1670  // lexicographically ordered.
1671  const int num_eliminate_blocks =
1672      parameter_block_ordering->group_to_elements().begin()->second.size();
1673  return LexicographicallyOrderResidualBlocks(num_eliminate_blocks,
1674                                              program,
1675                                              error);
1676}
1677
1678bool SolverImpl::ReorderProgramForSparseNormalCholesky(
1679    const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
1680    const ParameterBlockOrdering* parameter_block_ordering,
1681    Program* program,
1682    string* error) {
1683  // Set the offsets and index for CreateJacobianSparsityTranspose.
1684  program->SetParameterOffsetsAndIndex();
1685  // Compute a block sparse presentation of J'.
1686  scoped_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
1687      SolverImpl::CreateJacobianBlockSparsityTranspose(program));
1688
1689  vector<int> ordering(program->NumParameterBlocks(), 0);
1690  vector<ParameterBlock*>& parameter_blocks =
1691      *(program->mutable_parameter_blocks());
1692
1693  if (sparse_linear_algebra_library_type == SUITE_SPARSE) {
1694#ifdef CERES_NO_SUITESPARSE
1695    *error = "Can't use SPARSE_NORMAL_CHOLESKY with SUITE_SPARSE because "
1696        "SuiteSparse was not enabled when Ceres was built.";
1697    return false;
1698#else
1699    SuiteSparse ss;
1700    cholmod_sparse* block_jacobian_transpose =
1701        ss.CreateSparseMatrix(tsm_block_jacobian_transpose.get());
1702
1703#  ifdef CERES_NO_CAMD
1704    // No cholmod_camd, so ignore user's parameter_block_ordering and
1705    // use plain old AMD.
1706    ss.ApproximateMinimumDegreeOrdering(block_jacobian_transpose, &ordering[0]);
1707#  else
1708    if (parameter_block_ordering->NumGroups() > 1) {
1709      // If the user specified more than one elimination groups use them
1710      // to constrain the ordering.
1711      vector<int> constraints;
1712      for (int i = 0; i < parameter_blocks.size(); ++i) {
1713        constraints.push_back(
1714            parameter_block_ordering->GroupId(
1715                parameter_blocks[i]->mutable_user_state()));
1716      }
1717      ss.ConstrainedApproximateMinimumDegreeOrdering(
1718          block_jacobian_transpose,
1719          &constraints[0],
1720          &ordering[0]);
1721    } else {
1722      ss.ApproximateMinimumDegreeOrdering(block_jacobian_transpose,
1723                                          &ordering[0]);
1724    }
1725#  endif  // CERES_NO_CAMD
1726
1727    ss.Free(block_jacobian_transpose);
1728#endif  // CERES_NO_SUITESPARSE
1729
1730  } else if (sparse_linear_algebra_library_type == CX_SPARSE) {
1731#ifndef CERES_NO_CXSPARSE
1732
1733    // CXSparse works with J'J instead of J'. So compute the block
1734    // sparsity for J'J and compute an approximate minimum degree
1735    // ordering.
1736    CXSparse cxsparse;
1737    cs_di* block_jacobian_transpose;
1738    block_jacobian_transpose =
1739        cxsparse.CreateSparseMatrix(tsm_block_jacobian_transpose.get());
1740    cs_di* block_jacobian = cxsparse.TransposeMatrix(block_jacobian_transpose);
1741    cs_di* block_hessian =
1742        cxsparse.MatrixMatrixMultiply(block_jacobian_transpose, block_jacobian);
1743    cxsparse.Free(block_jacobian);
1744    cxsparse.Free(block_jacobian_transpose);
1745
1746    cxsparse.ApproximateMinimumDegreeOrdering(block_hessian, &ordering[0]);
1747    cxsparse.Free(block_hessian);
1748#else  // CERES_NO_CXSPARSE
1749    *error = "Can't use SPARSE_NORMAL_CHOLESKY with CX_SPARSE because "
1750        "CXSparse was not enabled when Ceres was built.";
1751    return false;
1752#endif  // CERES_NO_CXSPARSE
1753  } else {
1754    *error = "Unknown sparse linear algebra library.";
1755    return false;
1756  }
1757
1758  // Apply ordering.
1759  const vector<ParameterBlock*> parameter_blocks_copy(parameter_blocks);
1760  for (int i = 0; i < program->NumParameterBlocks(); ++i) {
1761    parameter_blocks[i] = parameter_blocks_copy[ordering[i]];
1762  }
1763
1764  program->SetParameterOffsetsAndIndex();
1765  return true;
1766}
1767
1768void SolverImpl::CompactifyArray(vector<int>* array_ptr) {
1769  vector<int>& array = *array_ptr;
1770  const set<int> unique_group_ids(array.begin(), array.end());
1771  map<int, int> group_id_map;
1772  for (set<int>::const_iterator it = unique_group_ids.begin();
1773       it != unique_group_ids.end();
1774       ++it) {
1775    InsertOrDie(&group_id_map, *it, group_id_map.size());
1776  }
1777
1778  for (int i = 0; i < array.size(); ++i) {
1779    array[i] = group_id_map[array[i]];
1780  }
1781}
1782
1783}  // namespace internal
1784}  // namespace ceres
1785