1// Copyright (c) 2013 libmv authors.
2//
3// Permission is hereby granted, free of charge, to any person obtaining a copy
4// of this software and associated documentation files (the "Software"), to
5// deal in the Software without restriction, including without limitation the
6// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7// sell copies of the Software, and to permit persons to whom the Software is
8// furnished to do so, subject to the following conditions:
9//
10// The above copyright notice and this permission notice shall be included in
11// all copies or substantial portions of the Software.
12//
13// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19// IN THE SOFTWARE.
20//
21// Author: mierle@gmail.com (Keir Mierle)
22//         sergey.vfx@gmail.com (Sergey Sharybin)
23//
24// This is an example application which contains bundle adjustment code used
25// in the Libmv library and Blender. It reads problems from files passed via
26// the command line and runs the bundle adjuster on the problem.
27//
28// File with problem a binary file, for which it is crucial to know in which
29// order bytes of float values are stored in. This information is provided
30// by a single character in the beginning of the file. There're two possible
31// values of this byte:
32//  - V, which means values in the file are stored with big endian type
33//  - v, which means values in the file are stored with little endian type
34//
35// The rest of the file contains data in the following order:
36//   - Space in which markers' coordinates are stored in
37//   - Camera intrinsics
38//   - Number of cameras
39//   - Cameras
40//   - Number of 3D points
41//   - 3D points
42//   - Number of markers
43//   - Markers
44//
45// Markers' space could either be normalized or image (pixels). This is defined
46// by the single character in the file. P means markers in the file is in image
47// space, and N means markers are in normalized space.
48//
49// Camera intrinsics are 8 described by 8 float 8.
50// This values goes in the following order:
51//
52//   - Focal length, principal point X, principal point Y, k1, k2, k3, p1, p2
53//
54// Every camera is described by:
55//
56//   - Image for which camera belongs to (single 4 bytes integer value).
57//   - Column-major camera rotation matrix, 9 float values.
58//   - Camera translation, 3-component vector of float values.
59//
60// Image number shall be greater or equal to zero. Order of cameras does not
61// matter and gaps are possible.
62//
63// Every 3D point is decribed by:
64//
65//  - Track number point belongs to (single 4 bytes integer value).
66//  - 3D position vector, 3-component vector of float values.
67//
68// Track number shall be greater or equal to zero. Order of tracks does not
69// matter and gaps are possible.
70//
71// Finally every marker is described by:
72//
73//  - Image marker belongs to single 4 bytes integer value).
74//  - Track marker belongs to single 4 bytes integer value).
75//  - 2D marker position vector, (two float values).
76//
77// Marker's space is used a default value for refine_intrinsics command line
78// flag. This means if there's no refine_intrinsics flag passed via command
79// line, camera intrinsics will be refined if markers in the problem are
80// stored in image space and camera intrinsics will not be refined if markers
81// are in normalized space.
82//
83// Passing refine_intrinsics command line flag defines explicitly whether
84// refinement of intrinsics will happen. Currently, only none and all
85// intrinsics refinement is supported.
86//
87// There're existing problem files dumped from blender stored in folder
88// ../data/libmv-ba-problems.
89
90#include <cstdio>
91#include <fcntl.h>
92#include <sstream>
93#include <string>
94#include <vector>
95
96#ifdef _MSC_VER
97#  include <io.h>
98#  define open _open
99#  define close _close
100typedef unsigned __int32 uint32_t;
101#else
102# include <stdint.h>
103
104// O_BINARY is not defined on unix like platforms, as there is no
105// difference between binary and text files.
106#define O_BINARY 0
107
108#endif
109
110#include "ceres/ceres.h"
111#include "ceres/rotation.h"
112#include "gflags/gflags.h"
113#include "glog/logging.h"
114
115typedef Eigen::Matrix<double, 3, 3> Mat3;
116typedef Eigen::Matrix<double, 6, 1> Vec6;
117typedef Eigen::Vector3d Vec3;
118typedef Eigen::Vector4d Vec4;
119
120using std::vector;
121
122DEFINE_string(input, "", "Input File name");
123DEFINE_string(refine_intrinsics, "", "Camera intrinsics to be refined. "
124              "Options are: none, radial.");
125
126namespace {
127
128// A EuclideanCamera is the location and rotation of the camera
129// viewing an image.
130//
131// image identifies which image this camera represents.
132// R is a 3x3 matrix representing the rotation of the camera.
133// t is a translation vector representing its positions.
134struct EuclideanCamera {
135  EuclideanCamera() : image(-1) {}
136  EuclideanCamera(const EuclideanCamera &c) : image(c.image), R(c.R), t(c.t) {}
137
138  int image;
139  Mat3 R;
140  Vec3 t;
141};
142
143// A Point is the 3D location of a track.
144//
145// track identifies which track this point corresponds to.
146// X represents the 3D position of the track.
147struct EuclideanPoint {
148  EuclideanPoint() : track(-1) {}
149  EuclideanPoint(const EuclideanPoint &p) : track(p.track), X(p.X) {}
150  int track;
151  Vec3 X;
152};
153
154// A Marker is the 2D location of a tracked point in an image.
155//
156// x and y is the position of the marker in pixels from the top left corner
157// in the image identified by an image. All markers for to the same target
158// form a track identified by a common track number.
159struct Marker {
160  int image;
161  int track;
162  double x, y;
163};
164
165// Cameras intrinsics to be bundled.
166//
167// BUNDLE_RADIAL actually implies bundling of k1 and k2 coefficients only,
168// no bundling of k3 is possible at this moment.
169enum BundleIntrinsics {
170  BUNDLE_NO_INTRINSICS = 0,
171  BUNDLE_FOCAL_LENGTH = 1,
172  BUNDLE_PRINCIPAL_POINT = 2,
173  BUNDLE_RADIAL_K1 = 4,
174  BUNDLE_RADIAL_K2 = 8,
175  BUNDLE_RADIAL = 12,
176  BUNDLE_TANGENTIAL_P1 = 16,
177  BUNDLE_TANGENTIAL_P2 = 32,
178  BUNDLE_TANGENTIAL = 48,
179};
180
181// Denotes which blocks to keep constant during bundling.
182// For example it is useful to keep camera translations constant
183// when bundling tripod motions.
184enum BundleConstraints {
185  BUNDLE_NO_CONSTRAINTS = 0,
186  BUNDLE_NO_TRANSLATION = 1,
187};
188
189// The intrinsics need to get combined into a single parameter block; use these
190// enums to index instead of numeric constants.
191enum {
192  OFFSET_FOCAL_LENGTH,
193  OFFSET_PRINCIPAL_POINT_X,
194  OFFSET_PRINCIPAL_POINT_Y,
195  OFFSET_K1,
196  OFFSET_K2,
197  OFFSET_K3,
198  OFFSET_P1,
199  OFFSET_P2,
200};
201
202// Returns a pointer to the camera corresponding to a image.
203EuclideanCamera *CameraForImage(vector<EuclideanCamera> *all_cameras,
204                                const int image) {
205  if (image < 0 || image >= all_cameras->size()) {
206    return NULL;
207  }
208  EuclideanCamera *camera = &(*all_cameras)[image];
209  if (camera->image == -1) {
210    return NULL;
211  }
212  return camera;
213}
214
215const EuclideanCamera *CameraForImage(
216    const vector<EuclideanCamera> &all_cameras,
217    const int image) {
218  if (image < 0 || image >= all_cameras.size()) {
219    return NULL;
220  }
221  const EuclideanCamera *camera = &all_cameras[image];
222  if (camera->image == -1) {
223    return NULL;
224  }
225  return camera;
226}
227
228// Returns maximal image number at which marker exists.
229int MaxImage(const vector<Marker> &all_markers) {
230  if (all_markers.size() == 0) {
231    return -1;
232  }
233
234  int max_image = all_markers[0].image;
235  for (int i = 1; i < all_markers.size(); i++) {
236    max_image = std::max(max_image, all_markers[i].image);
237  }
238  return max_image;
239}
240
241// Returns a pointer to the point corresponding to a track.
242EuclideanPoint *PointForTrack(vector<EuclideanPoint> *all_points,
243                              const int track) {
244  if (track < 0 || track >= all_points->size()) {
245    return NULL;
246  }
247  EuclideanPoint *point = &(*all_points)[track];
248  if (point->track == -1) {
249    return NULL;
250  }
251  return point;
252}
253
254// Reader of binary file which makes sure possibly needed endian
255// conversion happens when loading values like floats and integers.
256//
257// File's endian type is reading from a first character of file, which
258// could either be V for big endian or v for little endian.  This
259// means you need to design file format assuming first character
260// denotes file endianness in this way.
261class EndianAwareFileReader {
262 public:
263  EndianAwareFileReader(void) : file_descriptor_(-1) {
264    // Get an endian type of the host machine.
265    union {
266      unsigned char bytes[4];
267      uint32_t value;
268    } endian_test = { { 0, 1, 2, 3 } };
269    host_endian_type_ = endian_test.value;
270    file_endian_type_ = host_endian_type_;
271  }
272
273  ~EndianAwareFileReader(void) {
274    if (file_descriptor_ > 0) {
275      close(file_descriptor_);
276    }
277  }
278
279  bool OpenFile(const std::string &file_name) {
280    file_descriptor_ = open(file_name.c_str(), O_RDONLY | O_BINARY);
281    if (file_descriptor_ < 0) {
282      return false;
283    }
284    // Get an endian tpye of data in the file.
285    unsigned char file_endian_type_flag = Read<unsigned char>();
286    if (file_endian_type_flag == 'V') {
287      file_endian_type_ = kBigEndian;
288    } else if (file_endian_type_flag == 'v') {
289      file_endian_type_ = kLittleEndian;
290    } else {
291      LOG(FATAL) << "Problem file is stored in unknown endian type.";
292    }
293    return true;
294  }
295
296  // Read value from the file, will switch endian if needed.
297  template <typename T>
298  T Read(void) const {
299    T value;
300    CHECK_GT(read(file_descriptor_, &value, sizeof(value)), 0);
301    // Switch endian type if file contains data in different type
302    // that current machine.
303    if (file_endian_type_ != host_endian_type_) {
304      value = SwitchEndian<T>(value);
305    }
306    return value;
307  }
308 private:
309  static const long int kLittleEndian = 0x03020100ul;
310  static const long int kBigEndian = 0x00010203ul;
311
312  // Switch endian type between big to little.
313  template <typename T>
314  T SwitchEndian(const T value) const {
315    if (sizeof(T) == 4) {
316      unsigned int temp_value = static_cast<unsigned int>(value);
317      return ((temp_value >> 24)) |
318             ((temp_value << 8) & 0x00ff0000) |
319             ((temp_value >> 8) & 0x0000ff00) |
320             ((temp_value << 24));
321    } else if (sizeof(T) == 1) {
322      return value;
323    } else {
324      LOG(FATAL) << "Entered non-implemented part of endian switching function.";
325    }
326  }
327
328  int host_endian_type_;
329  int file_endian_type_;
330  int file_descriptor_;
331};
332
333// Read 3x3 column-major matrix from the file
334void ReadMatrix3x3(const EndianAwareFileReader &file_reader,
335                   Mat3 *matrix) {
336  for (int i = 0; i < 9; i++) {
337    (*matrix)(i % 3, i / 3) = file_reader.Read<float>();
338  }
339}
340
341// Read 3-vector from file
342void ReadVector3(const EndianAwareFileReader &file_reader,
343                 Vec3 *vector) {
344  for (int i = 0; i < 3; i++) {
345    (*vector)(i) = file_reader.Read<float>();
346  }
347}
348
349// Reads a bundle adjustment problem from the file.
350//
351// file_name denotes from which file to read the problem.
352// camera_intrinsics will contain initial camera intrinsics values.
353//
354// all_cameras is a vector of all reconstructed cameras to be optimized,
355// vector element with number i will contain camera for image i.
356//
357// all_points is a vector of all reconstructed 3D points to be optimized,
358// vector element with number i will contain point for track i.
359//
360// all_markers is a vector of all tracked markers existing in
361// the problem. Only used for reprojection error calculation, stay
362// unchanged during optimization.
363//
364// Returns false if any kind of error happened during
365// reading.
366bool ReadProblemFromFile(const std::string &file_name,
367                         double camera_intrinsics[8],
368                         vector<EuclideanCamera> *all_cameras,
369                         vector<EuclideanPoint> *all_points,
370                         bool *is_image_space,
371                         vector<Marker> *all_markers) {
372  EndianAwareFileReader file_reader;
373  if (!file_reader.OpenFile(file_name)) {
374    return false;
375  }
376
377  // Read markers' space flag.
378  unsigned char is_image_space_flag = file_reader.Read<unsigned char>();
379  if (is_image_space_flag == 'P') {
380    *is_image_space = true;
381  } else if (is_image_space_flag == 'N') {
382    *is_image_space = false;
383  } else {
384    LOG(FATAL) << "Problem file contains markers stored in unknown space.";
385  }
386
387  // Read camera intrinsics.
388  for (int i = 0; i < 8; i++) {
389    camera_intrinsics[i] = file_reader.Read<float>();
390  }
391
392  // Read all cameras.
393  int number_of_cameras = file_reader.Read<int>();
394  for (int i = 0; i < number_of_cameras; i++) {
395    EuclideanCamera camera;
396
397    camera.image = file_reader.Read<int>();
398    ReadMatrix3x3(file_reader, &camera.R);
399    ReadVector3(file_reader, &camera.t);
400
401    if (camera.image >= all_cameras->size()) {
402      all_cameras->resize(camera.image + 1);
403    }
404
405    (*all_cameras)[camera.image].image = camera.image;
406    (*all_cameras)[camera.image].R = camera.R;
407    (*all_cameras)[camera.image].t = camera.t;
408  }
409
410  LOG(INFO) << "Read " << number_of_cameras << " cameras.";
411
412  // Read all reconstructed 3D points.
413  int number_of_points = file_reader.Read<int>();
414  for (int i = 0; i < number_of_points; i++) {
415    EuclideanPoint point;
416
417    point.track = file_reader.Read<int>();
418    ReadVector3(file_reader, &point.X);
419
420    if (point.track >= all_points->size()) {
421      all_points->resize(point.track + 1);
422    }
423
424    (*all_points)[point.track].track = point.track;
425    (*all_points)[point.track].X = point.X;
426  }
427
428  LOG(INFO) << "Read " << number_of_points << " points.";
429
430  // And finally read all markers.
431  int number_of_markers = file_reader.Read<int>();
432  for (int i = 0; i < number_of_markers; i++) {
433    Marker marker;
434
435    marker.image = file_reader.Read<int>();
436    marker.track = file_reader.Read<int>();
437    marker.x = file_reader.Read<float>();
438    marker.y = file_reader.Read<float>();
439
440    all_markers->push_back(marker);
441  }
442
443  LOG(INFO) << "Read " << number_of_markers << " markers.";
444
445  return true;
446}
447
448// Apply camera intrinsics to the normalized point to get image coordinates.
449// This applies the radial lens distortion to a point which is in normalized
450// camera coordinates (i.e. the principal point is at (0, 0)) to get image
451// coordinates in pixels. Templated for use with autodifferentiation.
452template <typename T>
453inline void ApplyRadialDistortionCameraIntrinsics(const T &focal_length_x,
454                                                  const T &focal_length_y,
455                                                  const T &principal_point_x,
456                                                  const T &principal_point_y,
457                                                  const T &k1,
458                                                  const T &k2,
459                                                  const T &k3,
460                                                  const T &p1,
461                                                  const T &p2,
462                                                  const T &normalized_x,
463                                                  const T &normalized_y,
464                                                  T *image_x,
465                                                  T *image_y) {
466  T x = normalized_x;
467  T y = normalized_y;
468
469  // Apply distortion to the normalized points to get (xd, yd).
470  T r2 = x*x + y*y;
471  T r4 = r2 * r2;
472  T r6 = r4 * r2;
473  T r_coeff = (T(1) + k1*r2 + k2*r4 + k3*r6);
474  T xd = x * r_coeff + T(2)*p1*x*y + p2*(r2 + T(2)*x*x);
475  T yd = y * r_coeff + T(2)*p2*x*y + p1*(r2 + T(2)*y*y);
476
477  // Apply focal length and principal point to get the final image coordinates.
478  *image_x = focal_length_x * xd + principal_point_x;
479  *image_y = focal_length_y * yd + principal_point_y;
480}
481
482// Cost functor which computes reprojection error of 3D point X
483// on camera defined by angle-axis rotation and it's translation
484// (which are in the same block due to optimization reasons).
485//
486// This functor uses a radial distortion model.
487struct OpenCVReprojectionError {
488  OpenCVReprojectionError(const double observed_x, const double observed_y)
489      : observed_x(observed_x), observed_y(observed_y) {}
490
491  template <typename T>
492  bool operator()(const T* const intrinsics,
493                  const T* const R_t,  // Rotation denoted by angle axis
494                                       // followed with translation
495                  const T* const X,    // Point coordinates 3x1.
496                  T* residuals) const {
497    // Unpack the intrinsics.
498    const T& focal_length      = intrinsics[OFFSET_FOCAL_LENGTH];
499    const T& principal_point_x = intrinsics[OFFSET_PRINCIPAL_POINT_X];
500    const T& principal_point_y = intrinsics[OFFSET_PRINCIPAL_POINT_Y];
501    const T& k1                = intrinsics[OFFSET_K1];
502    const T& k2                = intrinsics[OFFSET_K2];
503    const T& k3                = intrinsics[OFFSET_K3];
504    const T& p1                = intrinsics[OFFSET_P1];
505    const T& p2                = intrinsics[OFFSET_P2];
506
507    // Compute projective coordinates: x = RX + t.
508    T x[3];
509
510    ceres::AngleAxisRotatePoint(R_t, X, x);
511    x[0] += R_t[3];
512    x[1] += R_t[4];
513    x[2] += R_t[5];
514
515    // Compute normalized coordinates: x /= x[2].
516    T xn = x[0] / x[2];
517    T yn = x[1] / x[2];
518
519    T predicted_x, predicted_y;
520
521    // Apply distortion to the normalized points to get (xd, yd).
522    // TODO(keir): Do early bailouts for zero distortion; these are expensive
523    // jet operations.
524    ApplyRadialDistortionCameraIntrinsics(focal_length,
525                                          focal_length,
526                                          principal_point_x,
527                                          principal_point_y,
528                                          k1, k2, k3,
529                                          p1, p2,
530                                          xn, yn,
531                                          &predicted_x,
532                                          &predicted_y);
533
534    // The error is the difference between the predicted and observed position.
535    residuals[0] = predicted_x - T(observed_x);
536    residuals[1] = predicted_y - T(observed_y);
537
538    return true;
539  }
540
541  const double observed_x;
542  const double observed_y;
543};
544
545// Print a message to the log which camera intrinsics are gonna to be optimized.
546void BundleIntrinsicsLogMessage(const int bundle_intrinsics) {
547  if (bundle_intrinsics == BUNDLE_NO_INTRINSICS) {
548    LOG(INFO) << "Bundling only camera positions.";
549  } else {
550    std::string bundling_message = "";
551
552#define APPEND_BUNDLING_INTRINSICS(name, flag) \
553    if (bundle_intrinsics & flag) { \
554      if (!bundling_message.empty()) { \
555        bundling_message += ", "; \
556      } \
557      bundling_message += name; \
558    } (void)0
559
560    APPEND_BUNDLING_INTRINSICS("f",      BUNDLE_FOCAL_LENGTH);
561    APPEND_BUNDLING_INTRINSICS("px, py", BUNDLE_PRINCIPAL_POINT);
562    APPEND_BUNDLING_INTRINSICS("k1",     BUNDLE_RADIAL_K1);
563    APPEND_BUNDLING_INTRINSICS("k2",     BUNDLE_RADIAL_K2);
564    APPEND_BUNDLING_INTRINSICS("p1",     BUNDLE_TANGENTIAL_P1);
565    APPEND_BUNDLING_INTRINSICS("p2",     BUNDLE_TANGENTIAL_P2);
566
567    LOG(INFO) << "Bundling " << bundling_message << ".";
568  }
569}
570
571// Print a message to the log containing all the camera intriniscs values.
572void PrintCameraIntrinsics(const char *text, const double *camera_intrinsics) {
573  std::ostringstream intrinsics_output;
574
575  intrinsics_output << "f=" << camera_intrinsics[OFFSET_FOCAL_LENGTH];
576
577  intrinsics_output <<
578    " cx=" << camera_intrinsics[OFFSET_PRINCIPAL_POINT_X] <<
579    " cy=" << camera_intrinsics[OFFSET_PRINCIPAL_POINT_Y];
580
581#define APPEND_DISTORTION_COEFFICIENT(name, offset) \
582  { \
583    if (camera_intrinsics[offset] != 0.0) { \
584      intrinsics_output << " " name "=" << camera_intrinsics[offset];  \
585    } \
586  } (void)0
587
588  APPEND_DISTORTION_COEFFICIENT("k1", OFFSET_K1);
589  APPEND_DISTORTION_COEFFICIENT("k2", OFFSET_K2);
590  APPEND_DISTORTION_COEFFICIENT("k3", OFFSET_K3);
591  APPEND_DISTORTION_COEFFICIENT("p1", OFFSET_P1);
592  APPEND_DISTORTION_COEFFICIENT("p2", OFFSET_P2);
593
594#undef APPEND_DISTORTION_COEFFICIENT
595
596  LOG(INFO) << text << intrinsics_output.str();
597}
598
599// Get a vector of camera's rotations denoted by angle axis
600// conjuncted with translations into single block
601//
602// Element with index i matches to a rotation+translation for
603// camera at image i.
604vector<Vec6> PackCamerasRotationAndTranslation(
605    const vector<Marker> &all_markers,
606    const vector<EuclideanCamera> &all_cameras) {
607  vector<Vec6> all_cameras_R_t;
608  int max_image = MaxImage(all_markers);
609
610  all_cameras_R_t.resize(max_image + 1);
611
612  for (int i = 0; i <= max_image; i++) {
613    const EuclideanCamera *camera = CameraForImage(all_cameras, i);
614
615    if (!camera) {
616      continue;
617    }
618
619    ceres::RotationMatrixToAngleAxis(&camera->R(0, 0),
620                                     &all_cameras_R_t[i](0));
621    all_cameras_R_t[i].tail<3>() = camera->t;
622  }
623
624  return all_cameras_R_t;
625}
626
627// Convert cameras rotations fro mangle axis back to rotation matrix.
628void UnpackCamerasRotationAndTranslation(
629    const vector<Marker> &all_markers,
630    const vector<Vec6> &all_cameras_R_t,
631    vector<EuclideanCamera> *all_cameras) {
632  int max_image = MaxImage(all_markers);
633
634  for (int i = 0; i <= max_image; i++) {
635    EuclideanCamera *camera = CameraForImage(all_cameras, i);
636
637    if (!camera) {
638      continue;
639    }
640
641    ceres::AngleAxisToRotationMatrix(&all_cameras_R_t[i](0),
642                                     &camera->R(0, 0));
643    camera->t = all_cameras_R_t[i].tail<3>();
644  }
645}
646
647void EuclideanBundleCommonIntrinsics(const vector<Marker> &all_markers,
648                                     const int bundle_intrinsics,
649                                     const int bundle_constraints,
650                                     double *camera_intrinsics,
651                                     vector<EuclideanCamera> *all_cameras,
652                                     vector<EuclideanPoint> *all_points) {
653  PrintCameraIntrinsics("Original intrinsics: ", camera_intrinsics);
654
655  ceres::Problem::Options problem_options;
656  ceres::Problem problem(problem_options);
657
658  // Convert cameras rotations to angle axis and merge with translation
659  // into single parameter block for maximal minimization speed
660  //
661  // Block for minimization has got the following structure:
662  //   <3 elements for angle-axis> <3 elements for translation>
663  vector<Vec6> all_cameras_R_t =
664    PackCamerasRotationAndTranslation(all_markers, *all_cameras);
665
666  // Parameterization used to restrict camera motion for modal solvers.
667  ceres::SubsetParameterization *constant_transform_parameterization = NULL;
668  if (bundle_constraints & BUNDLE_NO_TRANSLATION) {
669      std::vector<int> constant_translation;
670
671      // First three elements are rotation, last three are translation.
672      constant_translation.push_back(3);
673      constant_translation.push_back(4);
674      constant_translation.push_back(5);
675
676      constant_transform_parameterization =
677        new ceres::SubsetParameterization(6, constant_translation);
678  }
679
680  int num_residuals = 0;
681  bool have_locked_camera = false;
682  for (int i = 0; i < all_markers.size(); ++i) {
683    const Marker &marker = all_markers[i];
684    EuclideanCamera *camera = CameraForImage(all_cameras, marker.image);
685    EuclideanPoint *point = PointForTrack(all_points, marker.track);
686    if (camera == NULL || point == NULL) {
687      continue;
688    }
689
690    // Rotation of camera denoted in angle axis followed with
691    // camera translaiton.
692    double *current_camera_R_t = &all_cameras_R_t[camera->image](0);
693
694    problem.AddResidualBlock(new ceres::AutoDiffCostFunction<
695        OpenCVReprojectionError, 2, 8, 6, 3>(
696            new OpenCVReprojectionError(
697                marker.x,
698                marker.y)),
699        NULL,
700        camera_intrinsics,
701        current_camera_R_t,
702        &point->X(0));
703
704    // We lock the first camera to better deal with scene orientation ambiguity.
705    if (!have_locked_camera) {
706      problem.SetParameterBlockConstant(current_camera_R_t);
707      have_locked_camera = true;
708    }
709
710    if (bundle_constraints & BUNDLE_NO_TRANSLATION) {
711      problem.SetParameterization(current_camera_R_t,
712                                  constant_transform_parameterization);
713    }
714
715    num_residuals++;
716  }
717  LOG(INFO) << "Number of residuals: " << num_residuals;
718
719  if (!num_residuals) {
720    LOG(INFO) << "Skipping running minimizer with zero residuals";
721    return;
722  }
723
724  BundleIntrinsicsLogMessage(bundle_intrinsics);
725
726  if (bundle_intrinsics == BUNDLE_NO_INTRINSICS) {
727    // No camera intrinsics are being refined,
728    // set the whole parameter block as constant for best performance.
729    problem.SetParameterBlockConstant(camera_intrinsics);
730  } else {
731    // Set the camera intrinsics that are not to be bundled as
732    // constant using some macro trickery.
733
734    std::vector<int> constant_intrinsics;
735#define MAYBE_SET_CONSTANT(bundle_enum, offset) \
736    if (!(bundle_intrinsics & bundle_enum)) { \
737      constant_intrinsics.push_back(offset); \
738    }
739    MAYBE_SET_CONSTANT(BUNDLE_FOCAL_LENGTH,    OFFSET_FOCAL_LENGTH);
740    MAYBE_SET_CONSTANT(BUNDLE_PRINCIPAL_POINT, OFFSET_PRINCIPAL_POINT_X);
741    MAYBE_SET_CONSTANT(BUNDLE_PRINCIPAL_POINT, OFFSET_PRINCIPAL_POINT_Y);
742    MAYBE_SET_CONSTANT(BUNDLE_RADIAL_K1,       OFFSET_K1);
743    MAYBE_SET_CONSTANT(BUNDLE_RADIAL_K2,       OFFSET_K2);
744    MAYBE_SET_CONSTANT(BUNDLE_TANGENTIAL_P1,   OFFSET_P1);
745    MAYBE_SET_CONSTANT(BUNDLE_TANGENTIAL_P2,   OFFSET_P2);
746#undef MAYBE_SET_CONSTANT
747
748    // Always set K3 constant, it's not used at the moment.
749    constant_intrinsics.push_back(OFFSET_K3);
750
751    ceres::SubsetParameterization *subset_parameterization =
752      new ceres::SubsetParameterization(8, constant_intrinsics);
753
754    problem.SetParameterization(camera_intrinsics, subset_parameterization);
755  }
756
757  // Configure the solver.
758  ceres::Solver::Options options;
759  options.use_nonmonotonic_steps = true;
760  options.preconditioner_type = ceres::SCHUR_JACOBI;
761  options.linear_solver_type = ceres::ITERATIVE_SCHUR;
762  options.use_inner_iterations = true;
763  options.max_num_iterations = 100;
764  options.minimizer_progress_to_stdout = true;
765
766  // Solve!
767  ceres::Solver::Summary summary;
768  ceres::Solve(options, &problem, &summary);
769
770  std::cout << "Final report:\n" << summary.FullReport();
771
772  // Copy rotations and translations back.
773  UnpackCamerasRotationAndTranslation(all_markers,
774                                      all_cameras_R_t,
775                                      all_cameras);
776
777  PrintCameraIntrinsics("Final intrinsics: ", camera_intrinsics);
778}
779}  // namespace
780
781int main(int argc, char **argv) {
782  google::ParseCommandLineFlags(&argc, &argv, true);
783  google::InitGoogleLogging(argv[0]);
784
785  if (FLAGS_input.empty()) {
786    LOG(ERROR) << "Usage: libmv_bundle_adjuster --input=blender_problem";
787    return EXIT_FAILURE;
788  }
789
790  double camera_intrinsics[8];
791  vector<EuclideanCamera> all_cameras;
792  vector<EuclideanPoint> all_points;
793  bool is_image_space;
794  vector<Marker> all_markers;
795
796  if (!ReadProblemFromFile(FLAGS_input,
797                           camera_intrinsics,
798                           &all_cameras,
799                           &all_points,
800                           &is_image_space,
801                           &all_markers)) {
802    LOG(ERROR) << "Error reading problem file";
803    return EXIT_FAILURE;
804  }
805
806  // If there's no refine_intrinsics passed via command line
807  // (in this case FLAGS_refine_intrinsics will be an empty string)
808  // we use problem's settings to detect whether intrinsics
809  // shall be refined or not.
810  //
811  // Namely, if problem has got markers stored in image (pixel)
812  // space, we do full intrinsics refinement. If markers are
813  // stored in normalized space, and refine_intrinsics is not
814  // set, no refining will happen.
815  //
816  // Using command line argument refine_intrinsics will explicitly
817  // declare which intrinsics need to be refined and in this case
818  // refining flags does not depend on problem at all.
819  int bundle_intrinsics = BUNDLE_NO_INTRINSICS;
820  if (FLAGS_refine_intrinsics.empty()) {
821    if (is_image_space) {
822      bundle_intrinsics = BUNDLE_FOCAL_LENGTH | BUNDLE_RADIAL;
823    }
824  } else {
825    if (FLAGS_refine_intrinsics == "radial") {
826      bundle_intrinsics = BUNDLE_FOCAL_LENGTH | BUNDLE_RADIAL;
827    } else if (FLAGS_refine_intrinsics != "none") {
828      LOG(ERROR) << "Unsupported value for refine-intrinsics";
829      return EXIT_FAILURE;
830    }
831  }
832
833  // Run the bundler.
834  EuclideanBundleCommonIntrinsics(all_markers,
835                                  bundle_intrinsics,
836                                  BUNDLE_NO_CONSTRAINTS,
837                                  camera_intrinsics,
838                                  &all_cameras,
839                                  &all_points);
840
841  return EXIT_SUCCESS;
842}
843