Blender V4.3
bundle.cc
Go to the documentation of this file.
1// Copyright (c) 2011, 2012, 2013 libmv authors.
2//
3// Permission is hereby granted, free of charge, to any person obtaining a copy
4// of this software and associated documentation files (the "Software"), to
5// deal in the Software without restriction, including without limitation the
6// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7// sell copies of the Software, and to permit persons to whom the Software is
8// furnished to do so, subject to the following conditions:
9//
10// The above copyright notice and this permission notice shall be included in
11// all copies or substantial portions of the Software.
12//
13// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19// IN THE SOFTWARE.
20
22
23#include <map>
24#include <thread>
25
26#include "ceres/ceres.h"
27#include "ceres/rotation.h"
28#include "libmv/base/map.h"
29#include "libmv/base/vector.h"
39
40namespace libmv {
41
42namespace {
43
44bool NeedUseInvertIntrinsicsPipeline(const CameraIntrinsics* intrinsics) {
45 const DistortionModelType distortion_model =
46 intrinsics->GetDistortionModelType();
47 return (distortion_model == DISTORTION_MODEL_NUKE);
48}
49
50// Apply distortion model (distort the input) on the input point in the
51// normalized space to get distorted coordinate in the image space.
52//
53// Using intrinsics values from the parameter block, which makes this function
54// suitable for use from a cost functor.
55//
56// Only use for distortion models which are analytically defined for their
57// Apply() function.
58//
59// The invariant_intrinsics are used to access intrinsics which are never
60// packed into parameter block: for example, distortion model type and image
61// dimension.
62template <typename T>
63void ApplyDistortionModelUsingIntrinsicsBlock(
64 const CameraIntrinsics* invariant_intrinsics,
65 const T* const intrinsics_block,
66 const T& normalized_x,
67 const T& normalized_y,
68 T* distorted_x,
69 T* distorted_y) {
70 // Unpack the intrinsics.
71 const T& focal_length =
73 const T& principal_point_x =
75 const T& principal_point_y =
77
78 // TODO(keir): Do early bailouts for zero distortion; these are expensive
79 // jet operations.
80 switch (invariant_intrinsics->GetDistortionModelType()) {
82 const T& k1 = intrinsics_block[PackedIntrinsics::OFFSET_K1];
83 const T& k2 = intrinsics_block[PackedIntrinsics::OFFSET_K2];
84 const T& k3 = intrinsics_block[PackedIntrinsics::OFFSET_K3];
85 const T& p1 = intrinsics_block[PackedIntrinsics::OFFSET_P1];
86 const T& p2 = intrinsics_block[PackedIntrinsics::OFFSET_P2];
87
89 focal_length,
90 principal_point_x,
91 principal_point_y,
92 k1,
93 k2,
94 k3,
95 p1,
96 p2,
97 normalized_x,
98 normalized_y,
99 distorted_x,
100 distorted_y);
101 return;
102 }
103
105 const T& k1 = intrinsics_block[PackedIntrinsics::OFFSET_K1];
106 const T& k2 = intrinsics_block[PackedIntrinsics::OFFSET_K2];
107
108 ApplyDivisionDistortionModel(focal_length,
109 focal_length,
110 principal_point_x,
111 principal_point_y,
112 k1,
113 k2,
114 normalized_x,
115 normalized_y,
116 distorted_x,
117 distorted_y);
118 return;
119 }
120
122 LOG(FATAL) << "Unsupported distortion model.";
123 return;
124 }
125
127 const T& k1 = intrinsics_block[PackedIntrinsics::OFFSET_K1];
128 const T& k2 = intrinsics_block[PackedIntrinsics::OFFSET_K2];
129 const T& k3 = intrinsics_block[PackedIntrinsics::OFFSET_K3];
130 const T& k4 = intrinsics_block[PackedIntrinsics::OFFSET_K4];
131 const T& p1 = intrinsics_block[PackedIntrinsics::OFFSET_P1];
132 const T& p2 = intrinsics_block[PackedIntrinsics::OFFSET_P2];
133
134 ApplyBrownDistortionModel(focal_length,
135 focal_length,
136 principal_point_x,
137 principal_point_y,
138 k1,
139 k2,
140 k3,
141 k4,
142 p1,
143 p2,
144 normalized_x,
145 normalized_y,
146 distorted_x,
147 distorted_y);
148 return;
149 }
150 }
151
152 LOG(FATAL) << "Unknown distortion model.";
153}
154
155// Invert distortion model (undistort the input) on the input point in the
156// image space to get undistorted coordinate in the normalized space.
157//
158// Using intrinsics values from the parameter block, which makes this function
159// suitable for use from a cost functor.
160//
161// Only use for distortion models which are analytically defined for their
162// Invert() function.
163//
164// The invariant_intrinsics are used to access intrinsics which are never
165// packed into parameter block: for example, distortion model type and image
166// dimension.
167template <typename T>
168void InvertDistortionModelUsingIntrinsicsBlock(
169 const CameraIntrinsics* invariant_intrinsics,
170 const T* const intrinsics_block,
171 const T& image_x,
172 const T& image_y,
173 T* normalized_x,
174 T* normalized_y) {
175 // Unpack the intrinsics.
176 const T& focal_length =
178 const T& principal_point_x =
180 const T& principal_point_y =
182
183 // TODO(keir): Do early bailouts for zero distortion; these are expensive
184 // jet operations.
185 switch (invariant_intrinsics->GetDistortionModelType()) {
189 LOG(FATAL) << "Unsupported distortion model.";
190 return;
191
193 const T& k1 = intrinsics_block[PackedIntrinsics::OFFSET_K1];
194 const T& k2 = intrinsics_block[PackedIntrinsics::OFFSET_K2];
195
196 InvertNukeDistortionModel(focal_length,
197 focal_length,
198 principal_point_x,
199 principal_point_y,
200 invariant_intrinsics->image_width(),
201 invariant_intrinsics->image_height(),
202 k1,
203 k2,
204 image_x,
205 image_y,
206 normalized_x,
207 normalized_y);
208 return;
209 }
210 }
211
212 LOG(FATAL) << "Unknown distortion model.";
213}
214
215template <typename T>
216void NormalizedToImageSpace(const T* const intrinsics_block,
217 const T& normalized_x,
218 const T& normalized_y,
219 T* image_x,
220 T* image_y) {
221 // Unpack the intrinsics.
222 const T& focal_length =
224 const T& principal_point_x =
226 const T& principal_point_y =
228
229 *image_x = normalized_x * focal_length + principal_point_x;
230 *image_y = normalized_y * focal_length + principal_point_y;
231}
232
233// Cost functor which computes reprojection error of 3D point X on camera
234// defined by angle-axis rotation and its translation (which are in the same
235// block due to optimization reasons).
236//
237// This functor can only be used for distortion models which have analytically
238// defined Apply() function.
239struct ReprojectionErrorApplyIntrinsics {
240 ReprojectionErrorApplyIntrinsics(const CameraIntrinsics* invariant_intrinsics,
241 const double observed_distorted_x,
242 const double observed_distorted_y,
243 const double weight)
244 : invariant_intrinsics_(invariant_intrinsics),
245 observed_distorted_x_(observed_distorted_x),
246 observed_distorted_y_(observed_distorted_y),
247 weight_(weight) {}
248
249 template <typename T>
250 bool operator()(const T* const intrinsics,
251 const T* const R_t, // Rotation denoted by angle axis
252 // followed with translation
253 const T* const X, // Point coordinates 3x1.
254 T* residuals) const {
255 // Compute projective coordinates: x = RX + t.
256 T x[3];
257
258 ceres::AngleAxisRotatePoint(R_t, X, x);
259 x[0] += R_t[3];
260 x[1] += R_t[4];
261 x[2] += R_t[5];
262
263 // Prevent points from going behind the camera.
264 if (x[2] < T(0)) {
265 return false;
266 }
267
268 // Compute normalized coordinates: x /= x[2].
269 T xn = x[0] / x[2];
270 T yn = x[1] / x[2];
271
272 T predicted_distorted_x, predicted_distorted_y;
273 ApplyDistortionModelUsingIntrinsicsBlock(invariant_intrinsics_,
274 intrinsics,
275 xn,
276 yn,
277 &predicted_distorted_x,
278 &predicted_distorted_y);
279
280 // The error is the difference between the predicted and observed position.
281 residuals[0] = (predicted_distorted_x - T(observed_distorted_x_)) * weight_;
282 residuals[1] = (predicted_distorted_y - T(observed_distorted_y_)) * weight_;
283 return true;
284 }
285
286 const CameraIntrinsics* invariant_intrinsics_;
287 const double observed_distorted_x_;
288 const double observed_distorted_y_;
289 const double weight_;
290};
291
292// Cost functor which computes reprojection error of 3D point X on camera
293// defined by angle-axis rotation and its translation (which are in the same
294// block due to optimization reasons).
295//
296// This functor can only be used for distortion models which have analytically
297// defined Invert() function.
298struct ReprojectionErrorInvertIntrinsics {
299 ReprojectionErrorInvertIntrinsics(
300 const CameraIntrinsics* invariant_intrinsics,
301 const double observed_distorted_x,
302 const double observed_distorted_y,
303 const double weight)
304 : invariant_intrinsics_(invariant_intrinsics),
305 observed_distorted_x_(observed_distorted_x),
306 observed_distorted_y_(observed_distorted_y),
307 weight_(weight) {}
308
309 template <typename T>
310 bool operator()(const T* const intrinsics,
311 const T* const R_t, // Rotation denoted by angle axis
312 // followed with translation
313 const T* const X, // Point coordinates 3x1.
314 T* residuals) const {
315 // Unpack the intrinsics.
316 const T& focal_length = intrinsics[PackedIntrinsics::OFFSET_FOCAL_LENGTH];
317 const T& principal_point_x =
319 const T& principal_point_y =
321
322 // Compute projective coordinates: x = RX + t.
323 T x[3];
324
325 ceres::AngleAxisRotatePoint(R_t, X, x);
326 x[0] += R_t[3];
327 x[1] += R_t[4];
328 x[2] += R_t[5];
329
330 // Prevent points from going behind the camera.
331 if (x[2] < T(0)) {
332 return false;
333 }
334
335 // Compute normalized coordinates: x /= x[2].
336 T xn = x[0] / x[2];
337 T yn = x[1] / x[2];
338
339 // Compute image space coordinate from normalized.
340 T predicted_x = focal_length * xn + principal_point_x;
341 T predicted_y = focal_length * yn + principal_point_y;
342
343 T observed_undistorted_normalized_x, observed_undistorted_normalized_y;
344 InvertDistortionModelUsingIntrinsicsBlock(
345 invariant_intrinsics_,
346 intrinsics,
347 T(observed_distorted_x_),
348 T(observed_distorted_y_),
349 &observed_undistorted_normalized_x,
350 &observed_undistorted_normalized_y);
351
352 T observed_undistorted_image_x, observed_undistorted_image_y;
353 NormalizedToImageSpace(intrinsics,
354 observed_undistorted_normalized_x,
355 observed_undistorted_normalized_y,
356 &observed_undistorted_image_x,
357 &observed_undistorted_image_y);
358
359 // The error is the difference between the predicted and observed position.
360 residuals[0] = (predicted_x - observed_undistorted_image_x) * weight_;
361 residuals[1] = (predicted_y - observed_undistorted_image_y) * weight_;
362
363 return true;
364 }
365
366 const CameraIntrinsics* invariant_intrinsics_;
367 const double observed_distorted_x_;
368 const double observed_distorted_y_;
369 const double weight_;
370};
371
372// Print a message to the log which camera intrinsics are gonna to be optimized.
373void BundleIntrinsicsLogMessage(const int bundle_intrinsics) {
374 if (bundle_intrinsics == BUNDLE_NO_INTRINSICS) {
375 LG << "Bundling only camera positions.";
376 } else {
377 std::string bundling_message = "";
378
379#define APPEND_BUNDLING_INTRINSICS(name, flag) \
380 if (bundle_intrinsics & flag) { \
381 if (!bundling_message.empty()) { \
382 bundling_message += ", "; \
383 } \
384 bundling_message += name; \
385 } \
386 (void)0
387
396
397 LG << "Bundling " << bundling_message << ".";
398 }
399}
400
401// Get a vector of camera's rotations denoted by angle axis
402// conjuncted with translations into single block
403//
404// Element with key i matches to a rotation+translation for
405// camera at image i.
406map<int, Vec6> PackCamerasRotationAndTranslation(
407 const EuclideanReconstruction& reconstruction) {
408 map<int, Vec6> all_cameras_R_t;
409
410 vector<EuclideanCamera> all_cameras = reconstruction.AllCameras();
411 for (const EuclideanCamera& camera : all_cameras) {
412 Vec6 camera_R_t;
413 ceres::RotationMatrixToAngleAxis(&camera.R(0, 0), &camera_R_t(0));
414 camera_R_t.tail<3>() = camera.t;
415 all_cameras_R_t.insert(make_pair(camera.image, camera_R_t));
416 }
417
418 return all_cameras_R_t;
419}
420
421// Convert cameras rotations fro mangle axis back to rotation matrix.
422void UnpackCamerasRotationAndTranslation(
423 const map<int, Vec6>& all_cameras_R_t,
424 EuclideanReconstruction* reconstruction) {
425 for (map<int, Vec6>::value_type image_and_camera_R_T : all_cameras_R_t) {
426 const int image = image_and_camera_R_T.first;
427 const Vec6& camera_R_t = image_and_camera_R_T.second;
428
429 EuclideanCamera* camera = reconstruction->CameraForImage(image);
430 if (!camera) {
431 continue;
432 }
433
434 ceres::AngleAxisToRotationMatrix(&camera_R_t(0), &camera->R(0, 0));
435 camera->t = camera_R_t.tail<3>();
436 }
437}
438
439// Converts sparse CRSMatrix to Eigen matrix, so it could be used
440// all over in the pipeline.
441//
442// TODO(sergey): currently uses dense Eigen matrices, best would
443// be to use sparse Eigen matrices
444void CRSMatrixToEigenMatrix(const ceres::CRSMatrix& crs_matrix,
445 Mat* eigen_matrix) {
446 eigen_matrix->resize(crs_matrix.num_rows, crs_matrix.num_cols);
447 eigen_matrix->setZero();
448
449 for (int row = 0; row < crs_matrix.num_rows; ++row) {
450 int start = crs_matrix.rows[row];
451 int end = crs_matrix.rows[row + 1] - 1;
452
453 for (int i = start; i <= end; i++) {
454 int col = crs_matrix.cols[i];
455 double value = crs_matrix.values[i];
456
457 (*eigen_matrix)(row, col) = value;
458 }
459 }
460}
461
462void EuclideanBundlerPerformEvaluation(const Tracks& tracks,
463 EuclideanReconstruction* reconstruction,
464 map<int, Vec6>* all_cameras_R_t,
465 ceres::Problem* problem,
466 BundleEvaluation* evaluation) {
467 int max_track = tracks.MaxTrack();
468 // Number of camera rotations equals to number of translation,
469 int num_cameras = all_cameras_R_t->size();
470 int num_points = 0;
471
472 vector<EuclideanPoint*> minimized_points;
473 for (int i = 0; i <= max_track; i++) {
474 EuclideanPoint* point = reconstruction->PointForTrack(i);
475 if (point) {
476 // We need to know whether the track is a constant zero weight.
477 // If it is so it wouldn't have a parameter block in the problem.
478 //
479 // Usually getting all markers of a track is considered slow, but this
480 // code is only used by the keyframe selection code where there aren't
481 // that many tracks in the storage and there are only 2 frames for each
482 // of the tracks.
483 vector<Marker> markera_of_track = tracks.MarkersForTrack(i);
484 for (int j = 0; j < markera_of_track.size(); j++) {
485 if (markera_of_track.at(j).weight != 0.0) {
486 minimized_points.push_back(point);
487 num_points++;
488 break;
489 }
490 }
491 }
492 }
493
494 LG << "Number of cameras " << num_cameras;
495 LG << "Number of points " << num_points;
496
497 evaluation->num_cameras = num_cameras;
498 evaluation->num_points = num_points;
499
500 if (evaluation->evaluate_jacobian) { // Evaluate jacobian matrix.
501 ceres::CRSMatrix evaluated_jacobian;
502 ceres::Problem::EvaluateOptions eval_options;
503
504 // Cameras goes first in the ordering.
505 int max_image = tracks.MaxImage();
506 for (int i = 0; i <= max_image; i++) {
507 const EuclideanCamera* camera = reconstruction->CameraForImage(i);
508 if (camera) {
509 double* current_camera_R_t = &(*all_cameras_R_t)[i](0);
510
511 // All cameras are variable now.
512 problem->SetParameterBlockVariable(current_camera_R_t);
513
514 eval_options.parameter_blocks.push_back(current_camera_R_t);
515 }
516 }
517
518 // Points goes at the end of ordering,
519 for (int i = 0; i < minimized_points.size(); i++) {
520 EuclideanPoint* point = minimized_points.at(i);
521 eval_options.parameter_blocks.push_back(&point->X(0));
522 }
523
524 problem->Evaluate(eval_options, NULL, NULL, NULL, &evaluated_jacobian);
525
526 CRSMatrixToEigenMatrix(evaluated_jacobian, &evaluation->jacobian);
527 }
528}
529
530template <typename CostFunction>
531void AddResidualBlockToProblemImpl(const CameraIntrinsics* invariant_intrinsics,
532 double observed_x,
533 double observed_y,
534 double weight,
535 double* intrinsics_block,
536 double* camera_R_t,
538 ceres::Problem* problem) {
539 problem->AddResidualBlock(
540 new ceres::AutoDiffCostFunction<CostFunction,
541 2,
543 6,
544 3>(new CostFunction(
545 invariant_intrinsics, observed_x, observed_y, weight)),
546 NULL,
547 intrinsics_block,
548 camera_R_t,
549 &point->X(0));
550}
551
552void AddResidualBlockToProblem(const CameraIntrinsics* invariant_intrinsics,
553 const Marker& marker,
554 double marker_weight,
555 double* intrinsics_block,
556 double* camera_R_t,
558 ceres::Problem* problem) {
559 if (NeedUseInvertIntrinsicsPipeline(invariant_intrinsics)) {
560 AddResidualBlockToProblemImpl<ReprojectionErrorInvertIntrinsics>(
561 invariant_intrinsics,
562 marker.x,
563 marker.y,
564 marker_weight,
565 intrinsics_block,
566 camera_R_t,
567 point,
568 problem);
569 } else {
570 AddResidualBlockToProblemImpl<ReprojectionErrorApplyIntrinsics>(
571 invariant_intrinsics,
572 marker.x,
573 marker.y,
574 marker_weight,
575 intrinsics_block,
576 camera_R_t,
577 point,
578 problem);
579 }
580}
581
582// This is an utility function to only bundle 3D position of
583// given markers list.
584//
585// Main purpose of this function is to adjust positions of tracks
586// which does have constant zero weight and so far only were using
587// algebraic intersection to obtain their 3D positions.
588//
589// At this point we only need to bundle points positions, cameras
590// are to be totally still here.
591void EuclideanBundlePointsOnly(const CameraIntrinsics* invariant_intrinsics,
592 const vector<Marker>& markers,
593 map<int, Vec6>& all_cameras_R_t,
594 double* intrinsics_block,
595 EuclideanReconstruction* reconstruction) {
596 ceres::Problem::Options problem_options;
597 ceres::Problem problem(problem_options);
598 int num_residuals = 0;
599 for (int i = 0; i < markers.size(); ++i) {
600 const Marker& marker = markers[i];
601 EuclideanCamera* camera = reconstruction->CameraForImage(marker.image);
602 EuclideanPoint* point = reconstruction->PointForTrack(marker.track);
603 if (camera == NULL || point == NULL) {
604 continue;
605 }
606
607 // Rotation of camera denoted in angle axis followed with
608 // camera translation.
609 double* current_camera_R_t = &all_cameras_R_t[camera->image](0);
610
611 AddResidualBlockToProblem(invariant_intrinsics,
612 marker,
613 1.0,
614 intrinsics_block,
615 current_camera_R_t,
616 point,
617 &problem);
618
619 problem.SetParameterBlockConstant(current_camera_R_t);
620 num_residuals++;
621 }
622
623 LG << "Number of residuals: " << num_residuals;
624 if (!num_residuals) {
625 LG << "Skipping running minimizer with zero residuals";
626 return;
627 }
628
629 problem.SetParameterBlockConstant(intrinsics_block);
630
631 // Configure the solver.
632 ceres::Solver::Options options;
633 options.use_nonmonotonic_steps = true;
634 options.preconditioner_type = ceres::SCHUR_JACOBI;
635 options.linear_solver_type = ceres::ITERATIVE_SCHUR;
636 options.use_explicit_schur_complement = true;
637 options.use_inner_iterations = true;
638 options.max_num_iterations = 100;
639 options.num_threads = std::thread::hardware_concurrency();
640
641 // Solve!
642 ceres::Solver::Summary summary;
643 ceres::Solve(options, &problem, &summary);
644
645 LG << "Final report:\n" << summary.FullReport();
646}
647
648} // namespace
649
651 EuclideanReconstruction* reconstruction) {
652 PolynomialCameraIntrinsics empty_intrinsics;
656 reconstruction,
657 &empty_intrinsics,
658 NULL);
659}
660
662 const int bundle_intrinsics,
663 const int bundle_constraints,
664 EuclideanReconstruction* reconstruction,
665 CameraIntrinsics* intrinsics,
666 BundleEvaluation* evaluation) {
667 LG << "Original intrinsics: " << *intrinsics;
668 vector<Marker> markers = tracks.AllMarkers();
669
670 // N-th element denotes whether track N is a constant zero-weighted track.
671 vector<bool> zero_weight_tracks_flags(tracks.MaxTrack() + 1, true);
672
673 // Pack all intrinsics parameters into a single block and rely on local
674 // parameterizations to control which intrinsics are allowed to vary.
675 PackedIntrinsics packed_intrinsics;
676 intrinsics->Pack(&packed_intrinsics);
677 double* intrinsics_block = packed_intrinsics.GetParametersBlock();
678
679 // Convert cameras rotations to angle axis and merge with translation
680 // into single parameter block for maximal minimization speed.
681 //
682 // Block for minimization has got the following structure:
683 // <3 elements for angle-axis> <3 elements for translation>
684 map<int, Vec6> all_cameras_R_t =
685 PackCamerasRotationAndTranslation(*reconstruction);
686
687 // Parameterization used to restrict camera motion for modal solvers.
688 ceres::SubsetManifold* constant_translation_manifold = NULL;
689 if (bundle_constraints & BUNDLE_NO_TRANSLATION) {
690 std::vector<int> constant_translation;
691
692 // First three elements are rotation, ast three are translation.
693 constant_translation.push_back(3);
694 constant_translation.push_back(4);
695 constant_translation.push_back(5);
696
697 constant_translation_manifold =
698 new ceres::SubsetManifold(6, constant_translation);
699 }
700
701 // Add residual blocks to the problem.
702 ceres::Problem::Options problem_options;
703 ceres::Problem problem(problem_options);
704 int num_residuals = 0;
705 bool have_locked_camera = false;
706 for (int i = 0; i < markers.size(); ++i) {
707 const Marker& marker = markers[i];
708 EuclideanCamera* camera = reconstruction->CameraForImage(marker.image);
709 EuclideanPoint* point = reconstruction->PointForTrack(marker.track);
710 if (camera == NULL || point == NULL) {
711 continue;
712 }
713
714 // Rotation of camera denoted in angle axis followed with
715 // camera translation.
716 double* current_camera_R_t = &all_cameras_R_t[camera->image](0);
717
718 // Skip residual block for markers which does have absolutely
719 // no affect on the final solution.
720 // This way ceres is not gonna to go crazy.
721 if (marker.weight != 0.0) {
722 AddResidualBlockToProblem(intrinsics,
723 marker,
724 marker.weight,
725 intrinsics_block,
726 current_camera_R_t,
727 point,
728 &problem);
729
730 // We lock the first camera to better deal with scene orientation
731 // ambiguity.
732 if (!have_locked_camera) {
733 problem.SetParameterBlockConstant(current_camera_R_t);
734 have_locked_camera = true;
735 }
736
737 if (bundle_constraints & BUNDLE_NO_TRANSLATION) {
738 problem.SetManifold(current_camera_R_t, constant_translation_manifold);
739 }
740
741 zero_weight_tracks_flags[marker.track] = false;
742 num_residuals++;
743 }
744 }
745 LG << "Number of residuals: " << num_residuals;
746
747 if (!num_residuals) {
748 LG << "Skipping running minimizer with zero residuals";
749 return;
750 }
751
753 (bundle_intrinsics & BUNDLE_TANGENTIAL) != 0) {
754 LOG(FATAL) << "Division model doesn't support bundling "
755 "of tangential distortion";
756 }
757
758 BundleIntrinsicsLogMessage(bundle_intrinsics);
759
760 if (bundle_intrinsics == BUNDLE_NO_INTRINSICS) {
761 // No camera intrinsics are being refined,
762 // set the whole parameter block as constant for best performance.
763 problem.SetParameterBlockConstant(intrinsics_block);
764 } else {
765 // Set the camera intrinsics that are not to be bundled as
766 // constant using some macro trickery.
767
768 std::vector<int> constant_intrinsics;
769#define MAYBE_SET_CONSTANT(bundle_enum, offset) \
770 if (!(bundle_intrinsics & bundle_enum) || \
771 !packed_intrinsics.IsParameterDefined(offset)) { \
772 constant_intrinsics.push_back(offset); \
773 }
786#undef MAYBE_SET_CONSTANT
787
788 if (!constant_intrinsics.empty()) {
789 ceres::SubsetManifold* subset_parameterization =
790 new ceres::SubsetManifold(PackedIntrinsics::NUM_PARAMETERS,
791 constant_intrinsics);
792
793 problem.SetManifold(intrinsics_block, subset_parameterization);
794 }
795 }
796
797 // Configure the solver.
798 ceres::Solver::Options options;
799 options.use_nonmonotonic_steps = true;
800 options.preconditioner_type = ceres::SCHUR_JACOBI;
801 options.linear_solver_type = ceres::ITERATIVE_SCHUR;
802 options.use_explicit_schur_complement = true;
803 options.use_inner_iterations = true;
804 options.max_num_iterations = 100;
805 options.num_threads = std::thread::hardware_concurrency();
806
807 // Solve!
808 ceres::Solver::Summary summary;
809 ceres::Solve(options, &problem, &summary);
810
811 LG << "Final report:\n" << summary.FullReport();
812
813 // Copy rotations and translations back.
814 UnpackCamerasRotationAndTranslation(all_cameras_R_t, reconstruction);
815
816 // Copy intrinsics back.
817 if (bundle_intrinsics != BUNDLE_NO_INTRINSICS) {
818 intrinsics->Unpack(packed_intrinsics);
819 }
820
821 LG << "Final intrinsics: " << *intrinsics;
822
823 if (evaluation) {
824 EuclideanBundlerPerformEvaluation(
825 tracks, reconstruction, &all_cameras_R_t, &problem, evaluation);
826 }
827
828 // Separate step to adjust positions of tracks which are
829 // constant zero-weighted.
830 vector<Marker> zero_weight_markers;
831 for (int track = 0; track < tracks.MaxTrack(); ++track) {
832 if (zero_weight_tracks_flags[track]) {
833 vector<Marker> current_markers = tracks.MarkersForTrack(track);
834 zero_weight_markers.reserve(zero_weight_markers.size() +
835 current_markers.size());
836 for (int i = 0; i < current_markers.size(); ++i) {
837 zero_weight_markers.push_back(current_markers[i]);
838 }
839 }
840 }
841
842 if (zero_weight_markers.size()) {
843 LG << "Refining position of constant zero-weighted tracks";
844 EuclideanBundlePointsOnly(intrinsics,
845 zero_weight_markers,
846 all_cameras_R_t,
847 intrinsics_block,
848 reconstruction);
849 }
850}
851
852void ProjectiveBundle(const Tracks& /*tracks*/,
853 ProjectiveReconstruction* /*reconstruction*/) {
854 // TODO(keir): Implement this! This can't work until we have a better bundler
855 // than SSBA, since SSBA has no support for projective bundling.
856}
857
858} // namespace libmv
#define X
in reality light always falls off quadratically Particle Retrieve the data of the particle that spawned the object for example to give variation to multiple instances of an object Point Retrieve information about points in a point cloud Retrieve the edges of an object as it appears to Cycles topology will always appear triangulated Convert a blackbody temperature to an RGB value Normal Generate a perturbed normal from an RGB normal map image Typically used for faking highly detailed surfaces Generate an OSL shader from a file or text data block Image Sample an image file as a texture Gabor Generate Gabor noise Gradient Generate interpolated color and intensity values based on the input vector Magic Generate a psychedelic color texture Voronoi Generate Worley noise based on the distance to random points Typically used to generate textures such as or biological cells Brick Generate a procedural texture producing bricks Texture Retrieve multiple types of texture coordinates nTypically used as inputs for texture nodes Vector Convert a or normal between camera
in reality light always falls off quadratically Particle Retrieve the data of the particle that spawned the object for example to give variation to multiple instances of an object Point Retrieve information about points in a point cloud Retrieve the edges of an object as it appears to Cycles topology will always appear triangulated Convert a blackbody temperature to an RGB value Normal Generate a perturbed normal from an RGB normal map image Typically used for faking highly detailed surfaces Generate an OSL shader from a file or text data block Image Sample an image file as a texture Gabor Generate Gabor noise Gradient Generate interpolated color and intensity values based on the input vector Magic Generate a psychedelic color texture Voronoi Generate Worley noise based on the distance to random points Typically used to generate textures such as or biological cells Brick Generate a procedural texture producing bricks Texture Retrieve multiple types of texture coordinates nTypically used as inputs for texture nodes Vector Convert a point
SIMD_FORCE_INLINE btVector3 operator()(const btVector3 &x) const
Return the transform of the vector.
Definition btTransform.h:90
#define MAYBE_SET_CONSTANT(bundle_enum, offset)
#define APPEND_BUNDLING_INTRINSICS(name, flag)
virtual DistortionModelType GetDistortionModelType() const =0
vector< EuclideanCamera > AllCameras() const
Returns all cameras.
EuclideanPoint * PointForTrack(int track)
Returns a pointer to the point corresponding to track.
EuclideanCamera * CameraForImage(int image)
Returns a pointer to the camera corresponding to image.
virtual void Unpack(const PackedIntrinsics &packed_intrinsics)
virtual void Pack(PackedIntrinsics *packed_intrinsics) const
virtual DistortionModelType GetDistortionModelType() const =0
EuclideanPoint * PointForTrack(int track)
Returns a pointer to the point corresponding to track.
EuclideanCamera * CameraForImage(int image)
Returns a pointer to the camera corresponding to image.
input_tx image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "preview_img") .compute_source("compositor_compute_preview.glsl") .do_static_compilation(true)
CCL_NAMESPACE_BEGIN struct Options options
#define NULL
uint col
#define LG
#define LOG(severity)
Definition log.h:33
#define T
Eigen::Matrix< double, 6, 1 > Vec6
Definition numeric.h:109
@ BUNDLE_NO_TRANSLATION
Definition bundle.h:118
@ BUNDLE_NO_CONSTRAINTS
Definition bundle.h:117
void ProjectiveBundle(const Tracks &, ProjectiveReconstruction *)
Definition bundle.cc:852
void InvertNukeDistortionModel(const T &focal_length_x, const T &focal_length_y, const T &principal_point_x, const T &principal_point_y, const int image_width, const int image_height, const T &k1, const T &k2, const T &image_x, const T &image_y, T *normalized_x, T *normalized_y)
void ApplyPolynomialDistortionModel(const T &focal_length_x, const T &focal_length_y, const T &principal_point_x, const T &principal_point_y, const T &k1, const T &k2, const T &k3, const T &p1, const T &p2, const T &normalized_x, const T &normalized_y, T *image_x, T *image_y)
@ BUNDLE_RADIAL_K1
Definition bundle.h:105
@ BUNDLE_RADIAL_K3
Definition bundle.h:107
@ BUNDLE_RADIAL_K4
Definition bundle.h:108
@ BUNDLE_FOCAL_LENGTH
Definition bundle.h:102
@ BUNDLE_TANGENTIAL_P1
Definition bundle.h:112
@ BUNDLE_TANGENTIAL_P2
Definition bundle.h:113
@ BUNDLE_NO_INTRINSICS
Definition bundle.h:100
@ BUNDLE_PRINCIPAL_POINT
Definition bundle.h:103
@ BUNDLE_RADIAL_K2
Definition bundle.h:106
@ BUNDLE_TANGENTIAL
Definition bundle.h:114
void ApplyDivisionDistortionModel(const T &focal_length_x, const T &focal_length_y, const T &principal_point_x, const T &principal_point_y, const T &k1, const T &k2, const T &normalized_x, const T &normalized_y, T *image_x, T *image_y)
Eigen::MatrixXd Mat
Definition numeric.h:60
void EuclideanBundle(const Tracks &tracks, EuclideanReconstruction *reconstruction)
Definition bundle.cc:650
std::vector< ElementType, Eigen::aligned_allocator< ElementType > > vector
void EuclideanBundleCommonIntrinsics(const Tracks &tracks, const int bundle_intrinsics, const int bundle_constraints, EuclideanReconstruction *reconstruction, CameraIntrinsics *intrinsics, BundleEvaluation *evaluation)
Definition bundle.cc:661
@ DISTORTION_MODEL_POLYNOMIAL
@ DISTORTION_MODEL_DIVISION
@ DISTORTION_MODEL_BROWN
@ DISTORTION_MODEL_NUKE
void ApplyBrownDistortionModel(const T &focal_length_x, const T &focal_length_y, const T &principal_point_x, const T &principal_point_y, const T &k1, const T &k2, const T &k3, const T &k4, const T &p1, const T &p2, const T &normalized_x, const T &normalized_y, T *image_x, T *image_y)
int track
Definition marker.h:43
ListBase tracks
Definition tracking.cc:70