Merge branch 'develop' into fix/imu-factor-serialization
						commit
						2c67f6fd11
					
				|  | @ -537,54 +537,54 @@ endif() | |||
| 
 | ||||
| print_build_options_for_target(gtsam) | ||||
| 
 | ||||
| message(STATUS "  Use System Eigen               : ${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})") | ||||
| message(STATUS "  Use System Eigen                : ${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})") | ||||
| 
 | ||||
| if(GTSAM_USE_TBB) | ||||
| 	message(STATUS "  Use Intel TBB                  : Yes") | ||||
| 	message(STATUS "  Use Intel TBB                   : Yes") | ||||
| elseif(TBB_FOUND) | ||||
| 	message(STATUS "  Use Intel TBB                  : TBB found but GTSAM_WITH_TBB is disabled") | ||||
| 	message(STATUS "  Use Intel TBB                   : TBB found but GTSAM_WITH_TBB is disabled") | ||||
| else() | ||||
| 	message(STATUS "  Use Intel TBB                  : TBB not found") | ||||
| 	message(STATUS "  Use Intel TBB                   : TBB not found") | ||||
| endif() | ||||
| if(GTSAM_USE_EIGEN_MKL) | ||||
| 	message(STATUS "  Eigen will use MKL             : Yes") | ||||
| 	message(STATUS "  Eigen will use MKL              : Yes") | ||||
| elseif(MKL_FOUND) | ||||
| 	message(STATUS "  Eigen will use MKL             : MKL found but GTSAM_WITH_EIGEN_MKL is disabled") | ||||
| 	message(STATUS "  Eigen will use MKL              : MKL found but GTSAM_WITH_EIGEN_MKL is disabled") | ||||
| else() | ||||
| 	message(STATUS "  Eigen will use MKL             : MKL not found") | ||||
| 	message(STATUS "  Eigen will use MKL              : MKL not found") | ||||
| endif() | ||||
| if(GTSAM_USE_EIGEN_MKL_OPENMP) | ||||
| 	message(STATUS "  Eigen will use MKL and OpenMP  : Yes") | ||||
| 	message(STATUS "  Eigen will use MKL and OpenMP   : Yes") | ||||
| elseif(OPENMP_FOUND AND NOT GTSAM_WITH_EIGEN_MKL) | ||||
| 	message(STATUS "  Eigen will use MKL and OpenMP  : OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled") | ||||
| 	message(STATUS "  Eigen will use MKL and OpenMP   : OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled") | ||||
| elseif(OPENMP_FOUND AND NOT MKL_FOUND) | ||||
| 	message(STATUS "  Eigen will use MKL and OpenMP  : OpenMP found but MKL not found") | ||||
| 	message(STATUS "  Eigen will use MKL and OpenMP   : OpenMP found but MKL not found") | ||||
| elseif(OPENMP_FOUND) | ||||
| 	message(STATUS "  Eigen will use MKL and OpenMP  : OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled") | ||||
| 	message(STATUS "  Eigen will use MKL and OpenMP   : OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled") | ||||
| else() | ||||
| 	message(STATUS "  Eigen will use MKL and OpenMP  : OpenMP not found") | ||||
| 	message(STATUS "  Eigen will use MKL and OpenMP   : OpenMP not found") | ||||
| endif() | ||||
| message(STATUS "  Default allocator              : ${GTSAM_DEFAULT_ALLOCATOR}") | ||||
| message(STATUS "  Default allocator               : ${GTSAM_DEFAULT_ALLOCATOR}") | ||||
| 
 | ||||
| if(GTSAM_THROW_CHEIRALITY_EXCEPTION) | ||||
| 	message(STATUS "  Cheirality exceptions enabled  : YES") | ||||
| 	message(STATUS "  Cheirality exceptions enabled   : YES") | ||||
| else() | ||||
| 	message(STATUS "  Cheirality exceptions enabled  : NO") | ||||
| 	message(STATUS "  Cheirality exceptions enabled   : NO") | ||||
| endif() | ||||
| 
 | ||||
| if(NOT MSVC AND NOT XCODE_VERSION) | ||||
| 	if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE) | ||||
| 		message(STATUS "  Build with ccache              : Yes") | ||||
| 		message(STATUS "  Build with ccache               : Yes") | ||||
| 	elseif(CCACHE_FOUND) | ||||
| 		message(STATUS "  Build with ccache              : ccache found but GTSAM_BUILD_WITH_CCACHE is disabled") | ||||
| 		message(STATUS "  Build with ccache               : ccache found but GTSAM_BUILD_WITH_CCACHE is disabled") | ||||
| 	else() | ||||
| 		message(STATUS "  Build with ccache              : No") | ||||
| 		message(STATUS "  Build with ccache               : No") | ||||
| 	endif() | ||||
| endif() | ||||
| 
 | ||||
| message(STATUS "Packaging flags                                               ") | ||||
| message(STATUS "  CPack Source Generator         : ${CPACK_SOURCE_GENERATOR}") | ||||
| message(STATUS "  CPack Generator                : ${CPACK_GENERATOR}") | ||||
| message(STATUS "  CPack Source Generator          : ${CPACK_SOURCE_GENERATOR}") | ||||
| message(STATUS "  CPack Generator                 : ${CPACK_GENERATOR}") | ||||
| 
 | ||||
| message(STATUS "GTSAM flags                                               ") | ||||
| print_config_flag(${GTSAM_USE_QUATERNIONS}             "Quaternions as default Rot3     ") | ||||
|  | @ -595,13 +595,17 @@ print_config_flag(${GTSAM_ALLOW_DEPRECATED_SINCE_V4}   "Deprecated in GTSAM 4 al | |||
| print_config_flag(${GTSAM_TYPEDEF_POINTS_TO_VECTORS}   "Point3 is typedef to Vector3    ") | ||||
| print_config_flag(${GTSAM_SUPPORT_NESTED_DISSECTION}   "Metis-based Nested Dissection   ") | ||||
| print_config_flag(${GTSAM_TANGENT_PREINTEGRATION}      "Use tangent-space preintegration") | ||||
| print_config_flag(${GTSAM_BUILD_WRAP}                  "Build Wrap                     ") | ||||
| print_config_flag(${GTSAM_BUILD_WRAP}                  "Build Wrap                      ") | ||||
| 
 | ||||
| message(STATUS "MATLAB toolbox flags                                      ") | ||||
| print_config_flag(${GTSAM_INSTALL_MATLAB_TOOLBOX}      "Install matlab toolbox         ") | ||||
| print_config_flag(${GTSAM_INSTALL_MATLAB_TOOLBOX}      "Install MATLAB toolbox          ") | ||||
| if (${GTSAM_INSTALL_MATLAB_TOOLBOX}) | ||||
|     message(STATUS "  MATLAB root                     : ${MATLAB_ROOT}") | ||||
|     message(STATUS "  MEX binary                      : ${MEX_COMMAND}") | ||||
| endif() | ||||
| 
 | ||||
| message(STATUS "Cython toolbox flags                                      ") | ||||
| print_config_flag(${GTSAM_INSTALL_CYTHON_TOOLBOX}      "Install Cython toolbox         ") | ||||
| print_config_flag(${GTSAM_INSTALL_CYTHON_TOOLBOX}      "Install Cython toolbox          ") | ||||
| if(GTSAM_INSTALL_CYTHON_TOOLBOX) | ||||
| 	message(STATUS "  Python version                 : ${GTSAM_PYTHON_VERSION}") | ||||
| endif() | ||||
|  |  | |||
|  | @ -1,7 +1,4 @@ | |||
| set (excluded_examples | ||||
|     DiscreteBayesNet_FG.cpp | ||||
|     UGM_chain.cpp | ||||
|     UGM_small.cpp | ||||
|     elaboratePoint2KalmanFilter.cpp | ||||
| ) | ||||
| 
 | ||||
|  |  | |||
|  | @ -0,0 +1,6 @@ | |||
| VERTEX_SE3:QUAT 0 -3.865747774038187 0.06639337702667497 -0.16064874691945374 0.024595211709139555 0.49179523413089893 -0.06279232989379242 0.8680954132776109 | ||||
| VERTEX_SE3:QUAT 1 -3.614793159814815 0.04774490041587656 -0.2837650367985949 0.00991721787943912 0.4854918961891193 -0.042343290945895576 0.8731588132957809 | ||||
| VERTEX_SE3:QUAT 2 -3.255096913553434 0.013296754286114112 -0.5339792269680574 -0.027851108010665374 0.585478168397957 -0.05088341463532465 0.8086102325762403 | ||||
| EDGE_SE3:QUAT 0 1 0.2509546142233723 -0.01864847661079841 -0.12311628987914114 -0.022048798853273946 -0.01796327847857683 0.010210006313668573 0.9995433591728293 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 | ||||
| EDGE_SE3:QUAT 0 2 0.6106508604847534 -0.05309662274056086 -0.3733304800486037 -0.054972994022992064 0.10432547598981769 -0.02221474884651081 0.9927742290779572 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 | ||||
| EDGE_SE3:QUAT 1 2 0.3596962462613811 -0.03444814612976245 -0.25021419016946256 -0.03174661848656213 0.11646825423134777 -0.02951742735854383 0.9922479626852876 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 | ||||
|  | @ -0,0 +1,11 @@ | |||
| VERTEX_SE3:QUAT 0 0 0 0 0 0 0 1 | ||||
| VERTEX_SE3:QUAT 1 0 0 0 0 0 0 1 | ||||
| VERTEX_SE3:QUAT 2 0 0 0 0.00499994 0.00499994 0.00499994 0.999963 | ||||
| VERTEX_SE3:QUAT 3 0 0 0 -0.00499994 -0.00499994 -0.00499994 0.999963 | ||||
| VERTEX_SE3:QUAT 4 0 0 0 0.00499994 0.00499994 0.00499994 0.999963 | ||||
| EDGE_SE3:QUAT 1 2 1 2 0 0 0 0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100 | ||||
| EDGE_SE3:QUAT 2 3 -3.26795e-07 1 0 0 0 0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100 | ||||
| EDGE_SE3:QUAT 3 4 1 1 0 0 0 0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100 | ||||
| EDGE_SE3:QUAT 3 1 6.9282e-07 2 0 0 0 1 1.73205e-07 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100 | ||||
| EDGE_SE3:QUAT 1 4 -1 1 0 0 0 -0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100 | ||||
| EDGE_SE3:QUAT 0 1 0 0 0 0 0 0 1 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100 | ||||
|  | @ -10,110 +10,111 @@ | |||
|  * -------------------------------------------------------------------------- */ | ||||
| 
 | ||||
| /**
 | ||||
|  * @file  DiscreteBayesNet_FG.cpp | ||||
|  * @file  DiscreteBayesNet_graph.cpp | ||||
|  * @brief   Discrete Bayes Net example using Factor Graphs | ||||
|  * @author  Abhijit | ||||
|  * @date  Jun 4, 2012 | ||||
|  * | ||||
|  * We use the famous Rain/Cloudy/Sprinkler Example of [Russell & Norvig, 2009, p529] | ||||
|  * You may be familiar with other graphical model packages like BNT (available | ||||
|  * at http://bnt.googlecode.com/svn/trunk/docs/usage.html) where this is used as an
 | ||||
|  * example. The following demo is same as that in the above link, except that | ||||
|  * everything is using GTSAM. | ||||
|  * We use the famous Rain/Cloudy/Sprinkler Example of [Russell & Norvig, 2009, | ||||
|  * p529] You may be familiar with other graphical model packages like BNT | ||||
|  * (available at http://bnt.googlecode.com/svn/trunk/docs/usage.html) where this
 | ||||
|  * is used as an example. The following demo is same as that in the above link, | ||||
|  * except that everything is using GTSAM. | ||||
|  */ | ||||
| 
 | ||||
| #include <gtsam/discrete/DiscreteFactorGraph.h> | ||||
| #include <gtsam/discrete/DiscreteSequentialSolver.h> | ||||
| #include <gtsam/discrete/DiscreteMarginals.h> | ||||
| 
 | ||||
| #include <iomanip> | ||||
| 
 | ||||
| using namespace std; | ||||
| using namespace gtsam; | ||||
| 
 | ||||
| int main(int argc, char **argv) { | ||||
|   // Define keys and a print function
 | ||||
|   Key C(1), S(2), R(3), W(4); | ||||
|   auto print = [=](DiscreteFactor::sharedValues values) { | ||||
|     cout << boolalpha << "Cloudy = " << static_cast<bool>((*values)[C]) | ||||
|          << "  Sprinkler = " << static_cast<bool>((*values)[S]) | ||||
|          << "  Rain = " << boolalpha << static_cast<bool>((*values)[R]) | ||||
|          << "  WetGrass = " << static_cast<bool>((*values)[W]) << endl; | ||||
|   }; | ||||
| 
 | ||||
|   // We assume binary state variables
 | ||||
|   // we have 0 == "False" and 1 == "True"
 | ||||
|   const size_t nrStates = 2; | ||||
| 
 | ||||
|   // define variables
 | ||||
|   DiscreteKey Cloudy(1, nrStates), Sprinkler(2, nrStates), Rain(3, nrStates), | ||||
|       WetGrass(4, nrStates); | ||||
|   DiscreteKey Cloudy(C, nrStates), Sprinkler(S, nrStates), Rain(R, nrStates), | ||||
|       WetGrass(W, nrStates); | ||||
| 
 | ||||
|   // create Factor Graph of the bayes net
 | ||||
|   DiscreteFactorGraph graph; | ||||
| 
 | ||||
|   // add factors
 | ||||
|   graph.add(Cloudy, "0.5 0.5"); //P(Cloudy)
 | ||||
|   graph.add(Cloudy & Sprinkler, "0.5 0.5 0.9 0.1"); //P(Sprinkler | Cloudy)
 | ||||
|   graph.add(Cloudy & Rain, "0.8 0.2 0.2 0.8"); //P(Rain | Cloudy)
 | ||||
|   graph.add(Cloudy, "0.5 0.5");                      // P(Cloudy)
 | ||||
|   graph.add(Cloudy & Sprinkler, "0.5 0.5 0.9 0.1");  // P(Sprinkler | Cloudy)
 | ||||
|   graph.add(Cloudy & Rain, "0.8 0.2 0.2 0.8");       // P(Rain | Cloudy)
 | ||||
|   graph.add(Sprinkler & Rain & WetGrass, | ||||
|       "1 0 0.1 0.9 0.1 0.9 0.001 0.99"); //P(WetGrass | Sprinkler, Rain)
 | ||||
|             "1 0 0.1 0.9 0.1 0.9 0.001 0.99");  // P(WetGrass | Sprinkler, Rain)
 | ||||
| 
 | ||||
|   // Alternatively we can also create a DiscreteBayesNet, add DiscreteConditional
 | ||||
|   // factors and create a FactorGraph from it. (See testDiscreteBayesNet.cpp)
 | ||||
|   // Alternatively we can also create a DiscreteBayesNet, add
 | ||||
|   // DiscreteConditional factors and create a FactorGraph from it. (See
 | ||||
|   // testDiscreteBayesNet.cpp)
 | ||||
| 
 | ||||
|   // Since this is a relatively small distribution, we can as well print
 | ||||
|   // the whole distribution..
 | ||||
|   cout << "Distribution of Example: " << endl; | ||||
|   cout << setw(11) << "Cloudy(C)" << setw(14) << "Sprinkler(S)" << setw(10) | ||||
|       << "Rain(R)" << setw(14) << "WetGrass(W)" << setw(15) << "P(C,S,R,W)" | ||||
|       << endl; | ||||
|        << "Rain(R)" << setw(14) << "WetGrass(W)" << setw(15) << "P(C,S,R,W)" | ||||
|        << endl; | ||||
|   for (size_t a = 0; a < nrStates; a++) | ||||
|     for (size_t m = 0; m < nrStates; m++) | ||||
|       for (size_t h = 0; h < nrStates; h++) | ||||
|         for (size_t c = 0; c < nrStates; c++) { | ||||
|           DiscreteFactor::Values values; | ||||
|           values[Cloudy.first] = c; | ||||
|           values[Sprinkler.first] = h; | ||||
|           values[Rain.first] = m; | ||||
|           values[WetGrass.first] = a; | ||||
|           values[C] = c; | ||||
|           values[S] = h; | ||||
|           values[R] = m; | ||||
|           values[W] = a; | ||||
|           double prodPot = graph(values); | ||||
|           cout << boolalpha << setw(8) << (bool) c << setw(14) | ||||
|               << (bool) h << setw(12) << (bool) m << setw(13) | ||||
|               << (bool) a << setw(16) << prodPot << endl; | ||||
|           cout << setw(8) << static_cast<bool>(c) << setw(14) | ||||
|                << static_cast<bool>(h) << setw(12) << static_cast<bool>(m) | ||||
|                << setw(13) << static_cast<bool>(a) << setw(16) << prodPot | ||||
|                << endl; | ||||
|         } | ||||
| 
 | ||||
| 
 | ||||
|   // "Most Probable Explanation", i.e., configuration with largest value
 | ||||
|   DiscreteSequentialSolver solver(graph); | ||||
|   DiscreteFactor::sharedValues optimalDecoding = solver.optimize(); | ||||
|   cout <<"\nMost Probable Explanation (MPE):" << endl; | ||||
|   cout << boolalpha << "Cloudy = " << (bool)(*optimalDecoding)[Cloudy.first] | ||||
|                   << "  Sprinkler = " << (bool)(*optimalDecoding)[Sprinkler.first] | ||||
|                   << "  Rain = " << boolalpha << (bool)(*optimalDecoding)[Rain.first] | ||||
|                   << "  WetGrass = " << (bool)(*optimalDecoding)[WetGrass.first]<< endl; | ||||
|   DiscreteFactor::sharedValues mpe = graph.eliminateSequential()->optimize(); | ||||
|   cout << "\nMost Probable Explanation (MPE):" << endl; | ||||
|   print(mpe); | ||||
| 
 | ||||
|   // "Inference" We show an inference query like: probability that the Sprinkler
 | ||||
|   // was on; given that the grass is wet i.e. P( S | C=0) = ?
 | ||||
| 
 | ||||
|   // "Inference" We show an inference query like: probability that the Sprinkler was on;
 | ||||
|   // given that the grass is wet i.e. P( S | W=1) =?
 | ||||
|   cout << "\nInference Query: Probability of Sprinkler being on given Grass is Wet" << endl; | ||||
|   // add evidence that it is not Cloudy
 | ||||
|   graph.add(Cloudy, "1 0"); | ||||
| 
 | ||||
|   // Method 1: we can compute the joint marginal P(S,W) and from that we can compute
 | ||||
|   // P(S | W=1) = P(S,W=1)/P(W=1) We do this in following three steps..
 | ||||
|   // solve again, now with evidence
 | ||||
|   DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential(); | ||||
|   DiscreteFactor::sharedValues mpe_with_evidence = chordal->optimize(); | ||||
| 
 | ||||
|   //Step1: Compute P(S,W)
 | ||||
|   DiscreteFactorGraph jointFG; | ||||
|   jointFG = *solver.jointFactorGraph(DiscreteKeys(Sprinkler & WetGrass).indices()); | ||||
|   DecisionTreeFactor probSW = jointFG.product(); | ||||
|   cout << "\nMPE given C=0:" << endl; | ||||
|   print(mpe_with_evidence); | ||||
| 
 | ||||
|   //Step2: Compute P(W)
 | ||||
|   DiscreteFactor::shared_ptr probW = solver.marginalFactor(WetGrass.first); | ||||
| 
 | ||||
|   //Step3: Computer P(S | W=1) = P(S,W=1)/P(W=1)
 | ||||
|   DiscreteFactor::Values values; | ||||
|   values[WetGrass.first] = 1; | ||||
| 
 | ||||
|   //print P(S=0|W=1)
 | ||||
|   values[Sprinkler.first] = 0; | ||||
|   cout << "P(S=0|W=1) = " << probSW(values)/(*probW)(values) << endl; | ||||
| 
 | ||||
|   //print P(S=1|W=1)
 | ||||
|   values[Sprinkler.first] = 1; | ||||
|   cout << "P(S=1|W=1) = " << probSW(values)/(*probW)(values) << endl; | ||||
| 
 | ||||
|   // TODO: Method 2 : One way is to modify the factor graph to
 | ||||
|   // incorporate the evidence node and compute the marginal
 | ||||
|   // TODO: graph.addEvidence(Cloudy,0);
 | ||||
|   // we can also calculate arbitrary marginals:
 | ||||
|   DiscreteMarginals marginals(graph); | ||||
|   cout << "\nP(S=1|C=0):" << marginals.marginalProbabilities(Sprinkler)[1] | ||||
|        << endl; | ||||
|   cout << "\nP(R=0|C=0):" << marginals.marginalProbabilities(Rain)[0] << endl; | ||||
|   cout << "\nP(W=1|C=0):" << marginals.marginalProbabilities(WetGrass)[1] | ||||
|        << endl; | ||||
| 
 | ||||
|   // We can also sample from it
 | ||||
|   cout << "\n10 samples:" << endl; | ||||
|   for (size_t i = 0; i < 10; i++) { | ||||
|     DiscreteFactor::sharedValues sample = chordal->sample(); | ||||
|     print(sample); | ||||
|   } | ||||
|   return 0; | ||||
| } | ||||
|  |  | |||
|  | @ -0,0 +1,353 @@ | |||
| /* ----------------------------------------------------------------------------
 | ||||
| 
 | ||||
|  * GTSAM Copyright 2010, Georgia Tech Research Corporation, | ||||
|  * Atlanta, Georgia 30332-0415 | ||||
|  * All Rights Reserved | ||||
|  * Authors: Frank Dellaert, et al. (see THANKS for the full author list) | ||||
| 
 | ||||
|  * See LICENSE for the license information | ||||
| 
 | ||||
|  * -------------------------------------------------------------------------- */ | ||||
| 
 | ||||
| /**
 | ||||
|  * @file IMUKittiExampleGPS | ||||
|  * @brief Example of application of ISAM2 for GPS-aided navigation on the KITTI VISION BENCHMARK SUITE | ||||
|  * @author Ported by Thomas Jespersen (thomasj@tkjelectronics.dk), TKJ Electronics | ||||
|  */ | ||||
| 
 | ||||
| // GTSAM related includes.
 | ||||
| #include <gtsam/navigation/CombinedImuFactor.h> | ||||
| #include <gtsam/navigation/GPSFactor.h> | ||||
| #include <gtsam/navigation/ImuFactor.h> | ||||
| #include <gtsam/slam/dataset.h> | ||||
| #include <gtsam/slam/BetweenFactor.h> | ||||
| #include <gtsam/slam/PriorFactor.h> | ||||
| #include <gtsam/nonlinear/ISAM2.h> | ||||
| #include <gtsam/nonlinear/ISAM2Params.h> | ||||
| #include <gtsam/nonlinear/NonlinearFactorGraph.h> | ||||
| #include <gtsam/inference/Symbol.h> | ||||
| 
 | ||||
| #include <cstring> | ||||
| #include <fstream> | ||||
| #include <iostream> | ||||
| 
 | ||||
| using namespace std; | ||||
| using namespace gtsam; | ||||
| 
 | ||||
| using symbol_shorthand::X;  // Pose3 (x,y,z,r,p,y)
 | ||||
| using symbol_shorthand::V;  // Vel   (xdot,ydot,zdot)
 | ||||
| using symbol_shorthand::B;  // Bias  (ax,ay,az,gx,gy,gz)
 | ||||
| 
 | ||||
| struct KittiCalibration { | ||||
|     double body_ptx; | ||||
|     double body_pty; | ||||
|     double body_ptz; | ||||
|     double body_prx; | ||||
|     double body_pry; | ||||
|     double body_prz; | ||||
|     double accelerometer_sigma; | ||||
|     double gyroscope_sigma; | ||||
|     double integration_sigma; | ||||
|     double accelerometer_bias_sigma; | ||||
|     double gyroscope_bias_sigma; | ||||
|     double average_delta_t; | ||||
| }; | ||||
| 
 | ||||
| struct ImuMeasurement { | ||||
|     double time; | ||||
|     double dt; | ||||
|     Vector3 accelerometer; | ||||
|     Vector3 gyroscope;  // omega
 | ||||
| }; | ||||
| 
 | ||||
| struct GpsMeasurement { | ||||
|     double time; | ||||
|     Vector3 position;  // x,y,z
 | ||||
| }; | ||||
| 
 | ||||
| const string output_filename = "IMUKittiExampleGPSResults.csv"; | ||||
| 
 | ||||
| void loadKittiData(KittiCalibration& kitti_calibration, | ||||
|                    vector<ImuMeasurement>& imu_measurements, | ||||
|                    vector<GpsMeasurement>& gps_measurements) { | ||||
|     string line; | ||||
| 
 | ||||
|     // Read IMU metadata and compute relative sensor pose transforms
 | ||||
|     // BodyPtx BodyPty BodyPtz BodyPrx BodyPry BodyPrz AccelerometerSigma GyroscopeSigma IntegrationSigma
 | ||||
|     // AccelerometerBiasSigma GyroscopeBiasSigma AverageDeltaT
 | ||||
|     string imu_metadata_file = findExampleDataFile("KittiEquivBiasedImu_metadata.txt"); | ||||
|     ifstream imu_metadata(imu_metadata_file.c_str()); | ||||
| 
 | ||||
|     printf("-- Reading sensor metadata\n"); | ||||
| 
 | ||||
|     getline(imu_metadata, line, '\n');  // ignore the first line
 | ||||
| 
 | ||||
|     // Load Kitti calibration
 | ||||
|     getline(imu_metadata, line, '\n'); | ||||
|     sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf", | ||||
|            &kitti_calibration.body_ptx, | ||||
|            &kitti_calibration.body_pty, | ||||
|            &kitti_calibration.body_ptz, | ||||
|            &kitti_calibration.body_prx, | ||||
|            &kitti_calibration.body_pry, | ||||
|            &kitti_calibration.body_prz, | ||||
|            &kitti_calibration.accelerometer_sigma, | ||||
|            &kitti_calibration.gyroscope_sigma, | ||||
|            &kitti_calibration.integration_sigma, | ||||
|            &kitti_calibration.accelerometer_bias_sigma, | ||||
|            &kitti_calibration.gyroscope_bias_sigma, | ||||
|            &kitti_calibration.average_delta_t); | ||||
|     printf("IMU metadata: %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf\n", | ||||
|            kitti_calibration.body_ptx, | ||||
|            kitti_calibration.body_pty, | ||||
|            kitti_calibration.body_ptz, | ||||
|            kitti_calibration.body_prx, | ||||
|            kitti_calibration.body_pry, | ||||
|            kitti_calibration.body_prz, | ||||
|            kitti_calibration.accelerometer_sigma, | ||||
|            kitti_calibration.gyroscope_sigma, | ||||
|            kitti_calibration.integration_sigma, | ||||
|            kitti_calibration.accelerometer_bias_sigma, | ||||
|            kitti_calibration.gyroscope_bias_sigma, | ||||
|            kitti_calibration.average_delta_t); | ||||
| 
 | ||||
|     // Read IMU data
 | ||||
|     // Time dt accelX accelY accelZ omegaX omegaY omegaZ
 | ||||
|     string imu_data_file = findExampleDataFile("KittiEquivBiasedImu.txt"); | ||||
|     printf("-- Reading IMU measurements from file\n"); | ||||
|     { | ||||
|         ifstream imu_data(imu_data_file.c_str()); | ||||
|         getline(imu_data, line, '\n');  // ignore the first line
 | ||||
| 
 | ||||
|         double time = 0, dt = 0, acc_x = 0, acc_y = 0, acc_z = 0, gyro_x = 0, gyro_y = 0, gyro_z = 0; | ||||
|         while (!imu_data.eof()) { | ||||
|             getline(imu_data, line, '\n'); | ||||
|             sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf", | ||||
|                    &time, &dt, | ||||
|                    &acc_x, &acc_y, &acc_z, | ||||
|                    &gyro_x, &gyro_y, &gyro_z); | ||||
| 
 | ||||
|             ImuMeasurement measurement; | ||||
|             measurement.time = time; | ||||
|             measurement.dt = dt; | ||||
|             measurement.accelerometer = Vector3(acc_x, acc_y, acc_z); | ||||
|             measurement.gyroscope = Vector3(gyro_x, gyro_y, gyro_z); | ||||
|             imu_measurements.push_back(measurement); | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     // Read GPS data
 | ||||
|     // Time,X,Y,Z
 | ||||
|     string gps_data_file = findExampleDataFile("KittiGps_converted.txt"); | ||||
|     printf("-- Reading GPS measurements from file\n"); | ||||
|     { | ||||
|         ifstream gps_data(gps_data_file.c_str()); | ||||
|         getline(gps_data, line, '\n');  // ignore the first line
 | ||||
| 
 | ||||
|         double time = 0, gps_x = 0, gps_y = 0, gps_z = 0; | ||||
|         while (!gps_data.eof()) { | ||||
|             getline(gps_data, line, '\n'); | ||||
|             sscanf(line.c_str(), "%lf,%lf,%lf,%lf", &time, &gps_x, &gps_y, &gps_z); | ||||
| 
 | ||||
|             GpsMeasurement measurement; | ||||
|             measurement.time = time; | ||||
|             measurement.position = Vector3(gps_x, gps_y, gps_z); | ||||
|             gps_measurements.push_back(measurement); | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| int main(int argc, char* argv[]) { | ||||
|     KittiCalibration kitti_calibration; | ||||
|     vector<ImuMeasurement> imu_measurements; | ||||
|     vector<GpsMeasurement> gps_measurements; | ||||
|     loadKittiData(kitti_calibration, imu_measurements, gps_measurements); | ||||
| 
 | ||||
|     Vector6 BodyP = (Vector(6) << kitti_calibration.body_ptx, kitti_calibration.body_pty, kitti_calibration.body_ptz, | ||||
|                                   kitti_calibration.body_prx, kitti_calibration.body_pry, kitti_calibration.body_prz) | ||||
|                     .finished(); | ||||
|     auto body_T_imu = Pose3::Expmap(BodyP); | ||||
|     if (!body_T_imu.equals(Pose3(), 1e-5)) { | ||||
|         printf("Currently only support IMUinBody is identity, i.e. IMU and body frame are the same"); | ||||
|         exit(-1); | ||||
|     } | ||||
| 
 | ||||
|     // Configure different variables
 | ||||
|     double t_offset = gps_measurements[0].time; | ||||
|     size_t first_gps_pose = 1; | ||||
|     size_t gps_skip = 10;  // Skip this many GPS measurements each time
 | ||||
|     double g = 9.8; | ||||
|     auto w_coriolis = Vector3();  // zero vector
 | ||||
| 
 | ||||
|     // Configure noise models
 | ||||
|     auto noise_model_gps = noiseModel::Diagonal::Precisions((Vector(6) << Vector3::Constant(0), | ||||
|                                                                           Vector3::Constant(1.0/0.07)) | ||||
|                                                             .finished()); | ||||
| 
 | ||||
|     // Set initial conditions for the estimated trajectory
 | ||||
|     // initial pose is the reference frame (navigation frame)
 | ||||
|     auto current_pose_global = Pose3(Rot3(), gps_measurements[first_gps_pose].position); | ||||
|     auto current_velocity_global = Vector3();     // the vehicle is stationary at the beginning at position 0,0,0
 | ||||
|     auto current_bias = imuBias::ConstantBias();  // init with zero bias
 | ||||
| 
 | ||||
|     auto sigma_init_x = noiseModel::Diagonal::Precisions((Vector(6) << Vector3::Constant(0), | ||||
|                                                                        Vector3::Constant(1.0)) | ||||
|                                                          .finished()); | ||||
|     auto sigma_init_v = noiseModel::Diagonal::Sigmas(Vector3::Constant(1000.0)); | ||||
|     auto sigma_init_b = noiseModel::Diagonal::Sigmas((Vector(6) << Vector3::Constant(0.100), | ||||
|                                                                    Vector3::Constant(5.00e-05)) | ||||
|                                                      .finished()); | ||||
| 
 | ||||
|     // Set IMU preintegration parameters
 | ||||
|     Matrix33 measured_acc_cov = I_3x3 * pow(kitti_calibration.accelerometer_sigma, 2); | ||||
|     Matrix33 measured_omega_cov = I_3x3 * pow(kitti_calibration.gyroscope_sigma, 2); | ||||
|     // error committed in integrating position from velocities
 | ||||
|     Matrix33 integration_error_cov = I_3x3 * pow(kitti_calibration.integration_sigma, 2); | ||||
| 
 | ||||
|     auto imu_params = PreintegratedImuMeasurements::Params::MakeSharedU(g); | ||||
|     imu_params->accelerometerCovariance = measured_acc_cov;     // acc white noise in continuous
 | ||||
|     imu_params->integrationCovariance = integration_error_cov;  // integration uncertainty continuous
 | ||||
|     imu_params->gyroscopeCovariance = measured_omega_cov;       // gyro white noise in continuous
 | ||||
|     imu_params->omegaCoriolis = w_coriolis; | ||||
| 
 | ||||
|     std::shared_ptr<PreintegratedImuMeasurements> current_summarized_measurement = nullptr; | ||||
| 
 | ||||
|     // Set ISAM2 parameters and create ISAM2 solver object
 | ||||
|     ISAM2Params isam_params; | ||||
|     isam_params.factorization = ISAM2Params::CHOLESKY; | ||||
|     isam_params.relinearizeSkip = 10; | ||||
| 
 | ||||
|     ISAM2 isam(isam_params); | ||||
| 
 | ||||
|     // Create the factor graph and values object that will store new factors and values to add to the incremental graph
 | ||||
|     NonlinearFactorGraph new_factors; | ||||
|     Values new_values;  // values storing the initial estimates of new nodes in the factor graph
 | ||||
| 
 | ||||
|     /// Main loop:
 | ||||
|     /// (1) we read the measurements
 | ||||
|     /// (2) we create the corresponding factors in the graph
 | ||||
|     /// (3) we solve the graph to obtain and optimal estimate of robot trajectory
 | ||||
|     printf("-- Starting main loop: inference is performed at each time step, but we plot trajectory every 10 steps\n"); | ||||
|     size_t j = 0; | ||||
|     for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) { | ||||
|         // At each non=IMU measurement we initialize a new node in the graph
 | ||||
|         auto current_pose_key = X(i); | ||||
|         auto current_vel_key = V(i); | ||||
|         auto current_bias_key = B(i); | ||||
|         double t = gps_measurements[i].time; | ||||
| 
 | ||||
|         if (i == first_gps_pose) { | ||||
|             // Create initial estimate and prior on initial pose, velocity, and biases
 | ||||
|             new_values.insert(current_pose_key, current_pose_global); | ||||
|             new_values.insert(current_vel_key, current_velocity_global); | ||||
|             new_values.insert(current_bias_key, current_bias); | ||||
|             new_factors.emplace_shared<PriorFactor<Pose3>>(current_pose_key, current_pose_global, sigma_init_x); | ||||
|             new_factors.emplace_shared<PriorFactor<Vector3>>(current_vel_key, current_velocity_global, sigma_init_v); | ||||
|             new_factors.emplace_shared<PriorFactor<imuBias::ConstantBias>>(current_bias_key, current_bias, sigma_init_b); | ||||
|         } else { | ||||
|             double t_previous = gps_measurements[i-1].time; | ||||
| 
 | ||||
|             // Summarize IMU data between the previous GPS measurement and now
 | ||||
|             current_summarized_measurement = std::make_shared<PreintegratedImuMeasurements>(imu_params, current_bias); | ||||
|             static size_t included_imu_measurement_count = 0; | ||||
|             while (j < imu_measurements.size() && imu_measurements[j].time <= t) { | ||||
|                 if (imu_measurements[j].time >= t_previous) { | ||||
|                     current_summarized_measurement->integrateMeasurement(imu_measurements[j].accelerometer, | ||||
|                                                                          imu_measurements[j].gyroscope, | ||||
|                                                                          imu_measurements[j].dt); | ||||
|                     included_imu_measurement_count++; | ||||
|                 } | ||||
|                 j++; | ||||
|             } | ||||
| 
 | ||||
|             // Create IMU factor
 | ||||
|             auto previous_pose_key = X(i-1); | ||||
|             auto previous_vel_key = V(i-1); | ||||
|             auto previous_bias_key = B(i-1); | ||||
| 
 | ||||
|             new_factors.emplace_shared<ImuFactor>(previous_pose_key, previous_vel_key, | ||||
|                                                   current_pose_key, current_vel_key, | ||||
|                                                   previous_bias_key, *current_summarized_measurement); | ||||
| 
 | ||||
|             // Bias evolution as given in the IMU metadata
 | ||||
|             auto sigma_between_b = noiseModel::Diagonal::Sigmas((Vector(6) << | ||||
|                    Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.accelerometer_bias_sigma), | ||||
|                    Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.gyroscope_bias_sigma)) | ||||
|                  .finished()); | ||||
|             new_factors.emplace_shared<BetweenFactor<imuBias::ConstantBias>>(previous_bias_key, | ||||
|                                                                              current_bias_key, | ||||
|                                                                              imuBias::ConstantBias(), | ||||
|                                                                              sigma_between_b); | ||||
| 
 | ||||
|             // Create GPS factor
 | ||||
|             auto gps_pose = Pose3(current_pose_global.rotation(), gps_measurements[i].position); | ||||
|             if ((i % gps_skip) == 0) { | ||||
|                 new_factors.emplace_shared<PriorFactor<Pose3>>(current_pose_key, gps_pose, noise_model_gps); | ||||
|                 new_values.insert(current_pose_key, gps_pose); | ||||
| 
 | ||||
|                 printf("################ POSE INCLUDED AT TIME %lf ################\n", t); | ||||
|                 gps_pose.translation().print(); | ||||
|                 printf("\n\n"); | ||||
|             } else { | ||||
|                 new_values.insert(current_pose_key, current_pose_global); | ||||
|             } | ||||
| 
 | ||||
|             // Add initial values for velocity and bias based on the previous estimates
 | ||||
|             new_values.insert(current_vel_key, current_velocity_global); | ||||
|             new_values.insert(current_bias_key, current_bias); | ||||
| 
 | ||||
|             // Update solver
 | ||||
|             // =======================================================================
 | ||||
|             // We accumulate 2*GPSskip GPS measurements before updating the solver at
 | ||||
|             // first so that the heading becomes observable.
 | ||||
|             if (i > (first_gps_pose + 2*gps_skip)) { | ||||
|                 printf("################ NEW FACTORS AT TIME %lf ################\n", t); | ||||
|                 new_factors.print(); | ||||
| 
 | ||||
|                 isam.update(new_factors, new_values); | ||||
| 
 | ||||
|                 // Reset the newFactors and newValues list
 | ||||
|                 new_factors.resize(0); | ||||
|                 new_values.clear(); | ||||
| 
 | ||||
|                 // Extract the result/current estimates
 | ||||
|                 Values result = isam.calculateEstimate(); | ||||
| 
 | ||||
|                 current_pose_global = result.at<Pose3>(current_pose_key); | ||||
|                 current_velocity_global = result.at<Vector3>(current_vel_key); | ||||
|                 current_bias = result.at<imuBias::ConstantBias>(current_bias_key); | ||||
| 
 | ||||
|                 printf("\n################ POSE AT TIME %lf ################\n", t); | ||||
|                 current_pose_global.print(); | ||||
|                 printf("\n\n"); | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     // Save results to file
 | ||||
|     printf("\nWriting results to file...\n"); | ||||
|     FILE* fp_out = fopen(output_filename.c_str(), "w+"); | ||||
|     fprintf(fp_out, "#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m)\n"); | ||||
| 
 | ||||
|     Values result = isam.calculateEstimate(); | ||||
|     for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) { | ||||
|         auto pose_key = X(i); | ||||
|         auto vel_key = V(i); | ||||
|         auto bias_key = B(i); | ||||
| 
 | ||||
|         auto pose = result.at<Pose3>(pose_key); | ||||
|         auto velocity = result.at<Vector3>(vel_key); | ||||
|         auto bias = result.at<imuBias::ConstantBias>(bias_key); | ||||
| 
 | ||||
|         auto pose_quat = pose.rotation().toQuaternion(); | ||||
|         auto gps = gps_measurements[i].position; | ||||
| 
 | ||||
|         fprintf(fp_out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n", | ||||
|                 gps_measurements[i].time, | ||||
|                 pose.x(), pose.y(), pose.z(), | ||||
|                 pose_quat.x(), pose_quat.y(), pose_quat.z(), pose_quat.w(), | ||||
|                 gps(0), gps(1), gps(2)); | ||||
|     } | ||||
| 
 | ||||
|     fclose(fp_out); | ||||
| } | ||||
|  | @ -10,7 +10,7 @@ | |||
|  * -------------------------------------------------------------------------- */ | ||||
| 
 | ||||
| /**
 | ||||
|  * @file small.cpp | ||||
|  * @file UGM_chain.cpp | ||||
|  * @brief UGM (undirected graphical model) examples: chain | ||||
|  * @author Frank Dellaert | ||||
|  * @author Abhijit Kundu | ||||
|  | @ -19,10 +19,9 @@ | |||
|  * for more explanation. This code demos the same example using GTSAM. | ||||
|  */ | ||||
| 
 | ||||
| #include <gtsam/discrete/DiscreteFactorGraph.h> | ||||
| #include <gtsam/discrete/DiscreteSequentialSolver.h> | ||||
| #include <gtsam/discrete/DiscreteMarginals.h> | ||||
| #include <gtsam/base/timing.h> | ||||
| #include <gtsam/discrete/DiscreteFactorGraph.h> | ||||
| #include <gtsam/discrete/DiscreteMarginals.h> | ||||
| 
 | ||||
| #include <iomanip> | ||||
| 
 | ||||
|  | @ -30,9 +29,8 @@ using namespace std; | |||
| using namespace gtsam; | ||||
| 
 | ||||
| int main(int argc, char** argv) { | ||||
| 
 | ||||
|     // Set Number of Nodes in the Graph
 | ||||
|     const int nrNodes = 60; | ||||
|   // Set Number of Nodes in the Graph
 | ||||
|   const int nrNodes = 60; | ||||
| 
 | ||||
|   // Each node takes 1 of 7 possible states denoted by 0-6 in following order:
 | ||||
|   // ["VideoGames"  "Industry"  "GradSchool"  "VideoGames(with PhD)"
 | ||||
|  | @ -40,70 +38,55 @@ int main(int argc, char** argv) { | |||
|   const size_t nrStates = 7; | ||||
| 
 | ||||
|   // define variables
 | ||||
|     vector<DiscreteKey> nodes; | ||||
|     for (int i = 0; i < nrNodes; i++){ | ||||
|         DiscreteKey dk(i, nrStates); | ||||
|         nodes.push_back(dk); | ||||
|     } | ||||
|   vector<DiscreteKey> nodes; | ||||
|   for (int i = 0; i < nrNodes; i++) { | ||||
|     DiscreteKey dk(i, nrStates); | ||||
|     nodes.push_back(dk); | ||||
|   } | ||||
| 
 | ||||
|   // create graph
 | ||||
|   DiscreteFactorGraph graph; | ||||
| 
 | ||||
|   // add node potentials
 | ||||
|   graph.add(nodes[0], ".3 .6 .1 0 0 0 0"); | ||||
|     for (int i = 1; i < nrNodes; i++) | ||||
|         graph.add(nodes[i], "1 1 1 1 1 1 1"); | ||||
|   for (int i = 1; i < nrNodes; i++) graph.add(nodes[i], "1 1 1 1 1 1 1"); | ||||
| 
 | ||||
|     const std::string edgePotential =   ".08 .9 .01 0 0 0 .01 " | ||||
|                                         ".03 .95 .01 0 0 0 .01 " | ||||
|                                         ".06 .06 .75 .05 .05 .02 .01 " | ||||
|                                         "0 0 0 .3 .6 .09 .01 " | ||||
|                                         "0 0 0 .02 .95 .02 .01 " | ||||
|                                         "0 0 0 .01 .01 .97 .01 " | ||||
|                                         "0 0 0 0 0 0 1"; | ||||
|   const std::string edgePotential = | ||||
|       ".08 .9 .01 0 0 0 .01 " | ||||
|       ".03 .95 .01 0 0 0 .01 " | ||||
|       ".06 .06 .75 .05 .05 .02 .01 " | ||||
|       "0 0 0 .3 .6 .09 .01 " | ||||
|       "0 0 0 .02 .95 .02 .01 " | ||||
|       "0 0 0 .01 .01 .97 .01 " | ||||
|       "0 0 0 0 0 0 1"; | ||||
| 
 | ||||
|   // add edge potentials
 | ||||
|   for (int i = 0; i < nrNodes - 1; i++) | ||||
|     graph.add(nodes[i] & nodes[i + 1], edgePotential); | ||||
| 
 | ||||
|   cout << "Created Factor Graph with " << nrNodes << " variable nodes and " | ||||
|       << graph.size() << " factors (Unary+Edge)."; | ||||
|        << graph.size() << " factors (Unary+Edge)."; | ||||
| 
 | ||||
|   // "Decoding", i.e., configuration with largest value
 | ||||
|   // We use sequential variable elimination
 | ||||
|   DiscreteSequentialSolver solver(graph); | ||||
|   DiscreteFactor::sharedValues optimalDecoding = solver.optimize(); | ||||
|   DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential(); | ||||
|   DiscreteFactor::sharedValues optimalDecoding = chordal->optimize(); | ||||
|   optimalDecoding->print("\nMost Probable Explanation (optimalDecoding)\n"); | ||||
| 
 | ||||
|   // "Inference" Computing marginals for each node
 | ||||
| 
 | ||||
| 
 | ||||
|   cout << "\nComputing Node Marginals ..(Sequential Elimination)" << endl; | ||||
|   gttic_(Sequential); | ||||
|   for (vector<DiscreteKey>::iterator itr = nodes.begin(); itr != nodes.end(); | ||||
|       ++itr) { | ||||
|     //Compute the marginal
 | ||||
|     Vector margProbs = solver.marginalProbabilities(*itr); | ||||
| 
 | ||||
|     //Print the marginals
 | ||||
|     cout << "Node#" << setw(4) << itr->first << " :  "; | ||||
|     print(margProbs); | ||||
|   } | ||||
|   gttoc_(Sequential); | ||||
| 
 | ||||
|   // Here we'll make use of DiscreteMarginals class, which makes use of
 | ||||
|   // bayes-tree based shortcut evaluation of marginals
 | ||||
|   DiscreteMarginals marginals(graph); | ||||
| 
 | ||||
|   cout << "\nComputing Node Marginals ..(BayesTree based)" << endl; | ||||
|   gttic_(Multifrontal); | ||||
|   for (vector<DiscreteKey>::iterator itr = nodes.begin(); itr != nodes.end(); | ||||
|       ++itr) { | ||||
|     //Compute the marginal
 | ||||
|     Vector margProbs = marginals.marginalProbabilities(*itr); | ||||
|   for (vector<DiscreteKey>::iterator it = nodes.begin(); it != nodes.end(); | ||||
|        ++it) { | ||||
|     // Compute the marginal
 | ||||
|     Vector margProbs = marginals.marginalProbabilities(*it); | ||||
| 
 | ||||
|     //Print the marginals
 | ||||
|     cout << "Node#" << setw(4) << itr->first << " :  "; | ||||
|     // Print the marginals
 | ||||
|     cout << "Node#" << setw(4) << it->first << " :  "; | ||||
|     print(margProbs); | ||||
|   } | ||||
|   gttoc_(Multifrontal); | ||||
|  | @ -111,4 +94,3 @@ int main(int argc, char** argv) { | |||
|   tictoc_print_(); | ||||
|   return 0; | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -10,15 +10,16 @@ | |||
|  * -------------------------------------------------------------------------- */ | ||||
| 
 | ||||
| /**
 | ||||
|  * @file small.cpp | ||||
|  * @file UGM_small.cpp | ||||
|  * @brief UGM (undirected graphical model) examples: small | ||||
|  * @author Frank Dellaert | ||||
|  * | ||||
|  * See http://www.di.ens.fr/~mschmidt/Software/UGM/small.html
 | ||||
|  */ | ||||
| 
 | ||||
| #include <gtsam/base/Vector.h> | ||||
| #include <gtsam/discrete/DiscreteFactorGraph.h> | ||||
| #include <gtsam/discrete/DiscreteSequentialSolver.h> | ||||
| #include <gtsam/discrete/DiscreteMarginals.h> | ||||
| 
 | ||||
| using namespace std; | ||||
| using namespace gtsam; | ||||
|  | @ -61,24 +62,24 @@ int main(int argc, char** argv) { | |||
| 
 | ||||
|   // "Decoding", i.e., configuration with largest value (MPE)
 | ||||
|   // We use sequential variable elimination
 | ||||
|   DiscreteSequentialSolver solver(graph); | ||||
|   DiscreteFactor::sharedValues optimalDecoding = solver.optimize(); | ||||
|   DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential(); | ||||
|   DiscreteFactor::sharedValues optimalDecoding = chordal->optimize(); | ||||
|   optimalDecoding->print("\noptimalDecoding"); | ||||
| 
 | ||||
|   // "Inference" Computing marginals
 | ||||
|   cout << "\nComputing Node Marginals .." << endl; | ||||
|   Vector margProbs; | ||||
|   DiscreteMarginals marginals(graph); | ||||
| 
 | ||||
|   margProbs = solver.marginalProbabilities(Cathy); | ||||
|   Vector margProbs = marginals.marginalProbabilities(Cathy); | ||||
|   print(margProbs, "Cathy's Node Marginal:"); | ||||
| 
 | ||||
|   margProbs = solver.marginalProbabilities(Heather); | ||||
|   margProbs = marginals.marginalProbabilities(Heather); | ||||
|   print(margProbs, "Heather's Node Marginal"); | ||||
| 
 | ||||
|   margProbs = solver.marginalProbabilities(Mark); | ||||
|   margProbs = marginals.marginalProbabilities(Mark); | ||||
|   print(margProbs, "Mark's Node Marginal"); | ||||
| 
 | ||||
|   margProbs = solver.marginalProbabilities(Allison); | ||||
|   margProbs = marginals.marginalProbabilities(Allison); | ||||
|   print(margProbs, "Allison's Node Marginal"); | ||||
| 
 | ||||
|   return 0; | ||||
|  |  | |||
|  | @ -548,17 +548,47 @@ GTSAM_EXPORT Vector columnNormSquare(const Matrix &A); | |||
| namespace boost { | ||||
|   namespace serialization { | ||||
| 
 | ||||
|     /**
 | ||||
|      * Ref. https://stackoverflow.com/questions/18382457/eigen-and-boostserialize/22903063#22903063
 | ||||
|      *  | ||||
|      * Eigen supports calling resize() on both static and dynamic matrices. | ||||
|      * This allows for a uniform API, with resize having no effect if the static matrix | ||||
|      * is already the correct size. | ||||
|      * https://eigen.tuxfamily.org/dox/group__TutorialMatrixClass.html#TutorialMatrixSizesResizing
 | ||||
|      *  | ||||
|      * We use all the Matrix template parameters to ensure wide compatibility. | ||||
|      *  | ||||
|      * eigen_typekit in ROS uses the same code | ||||
|      * http://docs.ros.org/lunar/api/eigen_typekit/html/eigen__mqueue_8cpp_source.html
 | ||||
|      */ | ||||
| 
 | ||||
|     // split version - sends sizes ahead
 | ||||
|     template<class Archive> | ||||
|     void save(Archive & ar, const gtsam::Matrix & m, unsigned int /*version*/) { | ||||
|     template<class Archive, | ||||
|              typename Scalar_, | ||||
|              int Rows_, | ||||
|              int Cols_, | ||||
|              int Ops_, | ||||
|              int MaxRows_, | ||||
|              int MaxCols_> | ||||
|     void save(Archive & ar, | ||||
|               const Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_> & m, | ||||
|               const unsigned int /*version*/) { | ||||
|       const size_t rows = m.rows(), cols = m.cols(); | ||||
|       ar << BOOST_SERIALIZATION_NVP(rows); | ||||
|       ar << BOOST_SERIALIZATION_NVP(cols); | ||||
|       ar << make_nvp("data", make_array(m.data(), m.size())); | ||||
|     } | ||||
| 
 | ||||
|     template<class Archive> | ||||
|     void load(Archive & ar, gtsam::Matrix & m, unsigned int /*version*/) { | ||||
|     template<class Archive, | ||||
|              typename Scalar_, | ||||
|              int Rows_, | ||||
|              int Cols_, | ||||
|              int Ops_, | ||||
|              int MaxRows_, | ||||
|              int MaxCols_> | ||||
|     void load(Archive & ar, | ||||
|               Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_> & m, | ||||
|               const unsigned int /*version*/) { | ||||
|       size_t rows, cols; | ||||
|       ar >> BOOST_SERIALIZATION_NVP(rows); | ||||
|       ar >> BOOST_SERIALIZATION_NVP(cols); | ||||
|  | @ -566,8 +596,19 @@ namespace boost { | |||
|       ar >> make_nvp("data", make_array(m.data(), m.size())); | ||||
|     } | ||||
| 
 | ||||
|     // templated version of BOOST_SERIALIZATION_SPLIT_FREE(Eigen::Matrix);
 | ||||
|     template<class Archive, | ||||
|              typename Scalar_, | ||||
|              int Rows_, | ||||
|              int Cols_, | ||||
|              int Ops_, | ||||
|              int MaxRows_, | ||||
|              int MaxCols_> | ||||
|     void serialize(Archive & ar, | ||||
|               Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_> & m, | ||||
|               const unsigned int version) { | ||||
|       split_free(ar, m, version); | ||||
|     } | ||||
| 
 | ||||
|   } // namespace serialization
 | ||||
| } // namespace boost
 | ||||
| 
 | ||||
| BOOST_SERIALIZATION_SPLIT_FREE(gtsam::Matrix); | ||||
| 
 | ||||
|  |  | |||
|  | @ -107,9 +107,9 @@ public: | |||
| 
 | ||||
|     // If needed, apply chain rule
 | ||||
|     if (Dpose) | ||||
|     *Dpose = Dpi_pn * *Dpose; | ||||
|       *Dpose = Dpi_pn * *Dpose; | ||||
|     if (Dpoint) | ||||
|     *Dpoint = Dpi_pn * *Dpoint; | ||||
|       *Dpoint = Dpi_pn * *Dpoint; | ||||
| 
 | ||||
|     return pi; | ||||
|   } | ||||
|  |  | |||
|  | @ -292,6 +292,10 @@ class SO : public LieGroup<SO<N>, internal::DimensionSO(N)> { | |||
|                    boost::none) const; | ||||
|   /// @}
 | ||||
| 
 | ||||
|   template <class Archive> | ||||
|   friend void save(Archive&, SO&, const unsigned int); | ||||
|   template <class Archive> | ||||
|   friend void load(Archive&, SO&, const unsigned int); | ||||
|   template <class Archive> | ||||
|   friend void serialize(Archive&, SO&, const unsigned int); | ||||
|   friend class boost::serialization::access; | ||||
|  | @ -329,6 +333,16 @@ template <> | |||
| SOn LieGroup<SOn, Eigen::Dynamic>::between(const SOn& g, DynamicJacobian H1, | ||||
|                                            DynamicJacobian H2) const; | ||||
| 
 | ||||
| /** Serialization function */ | ||||
| template<class Archive> | ||||
| void serialize( | ||||
|   Archive& ar, SOn& Q, | ||||
|   const unsigned int file_version | ||||
| ) { | ||||
|   Matrix& M = Q.matrix_; | ||||
|   ar& M; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Define the traits. internal::LieGroup provides both Lie group and Testable | ||||
|  */ | ||||
|  |  | |||
|  | @ -90,6 +90,8 @@ public: | |||
|   /// Copy assignment
 | ||||
|   Unit3& operator=(const Unit3 & u) { | ||||
|     p_ = u.p_; | ||||
|     B_ = u.B_; | ||||
|     H_B_ = u.H_B_; | ||||
|     return *this; | ||||
|   } | ||||
| 
 | ||||
|  |  | |||
|  | @ -484,6 +484,15 @@ TEST(Unit3, ErrorBetweenFactor) { | |||
|   } | ||||
| } | ||||
| 
 | ||||
| TEST(Unit3, CopyAssign) { | ||||
|   Unit3 p{1, 0.2, 0.3}; | ||||
| 
 | ||||
|   EXPECT(p.error(p).isZero()); | ||||
| 
 | ||||
|   p = Unit3{-1, 2, 8}; | ||||
|   EXPECT(p.error(p).isZero()); | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST(actualH, Serialization) { | ||||
|   Unit3 p(0, 1, 0); | ||||
|  |  | |||
|  | @ -33,7 +33,7 @@ namespace gtsam { | |||
|  * isotropic. If it is, we extend to 'Dim' dimensions, otherwise we throw an | ||||
|  * error. If defaultToUnit == false throws an exception on unexepcted input. | ||||
|  */ | ||||
| boost::shared_ptr<noiseModel::Isotropic> ConvertPose3NoiseModel( | ||||
|   GTSAM_EXPORT boost::shared_ptr<noiseModel::Isotropic> ConvertPose3NoiseModel( | ||||
|     const SharedNoiseModel& model, size_t d, bool defaultToUnit = true); | ||||
| 
 | ||||
| /**
 | ||||
|  | @ -125,7 +125,7 @@ class FrobeniusBetweenFactor : public NoiseModelFactor2<Rot, Rot> { | |||
|  * the SO(p) matrices down to a Stiefel manifold of p*d matrices. | ||||
|  * TODO(frank): template on D=2 or 3 | ||||
|  */ | ||||
| class FrobeniusWormholeFactor : public NoiseModelFactor2<SOn, SOn> { | ||||
| class GTSAM_EXPORT FrobeniusWormholeFactor : public NoiseModelFactor2<SOn, SOn> { | ||||
|   Matrix M_;                   ///< measured rotation between R1 and R2
 | ||||
|   size_t p_, pp_, dimension_;  ///< dimensionality constants
 | ||||
|   Matrix G_;                   ///< matrix of vectorized generators
 | ||||
|  |  | |||
|  | @ -207,10 +207,18 @@ protected: | |||
|     Vector ue = cameras.reprojectionError(point, measured_, Fs, E); | ||||
|     if (body_P_sensor_ && Fs) { | ||||
|       const Pose3 sensor_P_body = body_P_sensor_->inverse(); | ||||
|       constexpr int camera_dim = traits<CAMERA>::dimension; | ||||
|       constexpr int pose_dim = traits<Pose3>::dimension; | ||||
| 
 | ||||
|       for (size_t i = 0; i < Fs->size(); i++) { | ||||
|         const Pose3 w_Pose_body = cameras[i].pose() * sensor_P_body; | ||||
|         Matrix J(6, 6); | ||||
|         const Pose3 world_P_body = w_Pose_body.compose(*body_P_sensor_, J); | ||||
|         const Pose3 world_P_body = cameras[i].pose() * sensor_P_body; | ||||
|         Eigen::Matrix<double, camera_dim, camera_dim> J; | ||||
|         J.setZero(); | ||||
|         Eigen::Matrix<double, pose_dim, pose_dim> H; | ||||
|         // Call compose to compute Jacobian for camera extrinsics
 | ||||
|         world_P_body.compose(*body_P_sensor_, H); | ||||
|         // Assign extrinsics part of the Jacobian
 | ||||
|         J.template block<pose_dim, pose_dim>(0, 0) = H; | ||||
|         Fs->at(i) = Fs->at(i) * J; | ||||
|       } | ||||
|     } | ||||
|  |  | |||
|  | @ -442,11 +442,11 @@ void writeG2o(const NonlinearFactorGraph& graph, const Values& estimate, | |||
|       auto p = dynamic_cast<const GenericValue<Pose3>*>(&key_value.value); | ||||
|       if (!p) continue; | ||||
|       const Pose3& pose = p->value(); | ||||
|       Point3 t = pose.translation(); | ||||
|       Rot3 R = pose.rotation(); | ||||
|       stream << "VERTEX_SE3:QUAT " << key_value.key << " " << t.x() << " "  << t.y() << " " << t.z() | ||||
|         << " " << R.toQuaternion().x() << " " << R.toQuaternion().y() << " " << R.toQuaternion().z() | ||||
|         << " " << R.toQuaternion().w() << endl; | ||||
|       const Point3 t = pose.translation(); | ||||
|       const auto q = pose.rotation().toQuaternion(); | ||||
|       stream << "VERTEX_SE3:QUAT " << key_value.key << " " << t.x() << " " | ||||
|              << t.y() << " " << t.z() << " " << q.x() << " " << q.y() << " " | ||||
|              << q.z() << " " << q.w() << endl; | ||||
|   } | ||||
| 
 | ||||
|   // save edges (2D or 3D)
 | ||||
|  | @ -486,13 +486,12 @@ void writeG2o(const NonlinearFactorGraph& graph, const Values& estimate, | |||
|         throw invalid_argument("writeG2o: invalid noise model!"); | ||||
|       } | ||||
|       Matrix Info = gaussianModel->R().transpose() * gaussianModel->R(); | ||||
|       Pose3 pose3D = factor3D->measured(); | ||||
|       Point3 p = pose3D.translation(); | ||||
|       Rot3 R = pose3D.rotation(); | ||||
| 
 | ||||
|       stream << "EDGE_SE3:QUAT " << factor3D->key1() << " " << factor3D->key2() << " " | ||||
|           << p.x() << " "  << p.y() << " " << p.z()  << " " << R.toQuaternion().x() | ||||
|           << " " << R.toQuaternion().y() << " " << R.toQuaternion().z()  << " " << R.toQuaternion().w(); | ||||
|       const Pose3 pose3D = factor3D->measured(); | ||||
|       const Point3 p = pose3D.translation(); | ||||
|       const auto q = pose3D.rotation().toQuaternion(); | ||||
|       stream << "EDGE_SE3:QUAT " << factor3D->key1() << " " << factor3D->key2() | ||||
|              << " " << p.x() << " " << p.y() << " " << p.z() << " " << q.x() | ||||
|              << " " << q.y() << " " << q.z() << " " << q.w(); | ||||
| 
 | ||||
|       Matrix InfoG2o = I_6x6; | ||||
|       InfoG2o.block(0,0,3,3) = Info.block(3,3,3,3); // cov translation
 | ||||
|  | @ -511,6 +510,11 @@ void writeG2o(const NonlinearFactorGraph& graph, const Values& estimate, | |||
|   stream.close(); | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| static Rot3 NormalizedRot3(double w, double x, double y, double z) { | ||||
|   const double norm = sqrt(w * w + x * x + y * y + z * z), f = 1.0 / norm; | ||||
|   return Rot3::Quaternion(f * w, f * x, f * y, f * z); | ||||
| } | ||||
| /* ************************************************************************* */ | ||||
| std::map<Key, Pose3> parse3DPoses(const string& filename) { | ||||
|   ifstream is(filename.c_str()); | ||||
|  | @ -535,14 +539,15 @@ std::map<Key, Pose3> parse3DPoses(const string& filename) { | |||
|       Key id; | ||||
|       double x, y, z, qx, qy, qz, qw; | ||||
|       ls >> id >> x >> y >> z >> qx >> qy >> qz >> qw; | ||||
|       poses.emplace(id, Pose3(Rot3::Quaternion(qw, qx, qy, qz), {x, y, z})); | ||||
|       poses.emplace(id, Pose3(NormalizedRot3(qw, qx, qy, qz), {x, y, z})); | ||||
|     } | ||||
|   } | ||||
|   return poses; | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| BetweenFactorPose3s parse3DFactors(const string& filename,  | ||||
| BetweenFactorPose3s parse3DFactors( | ||||
|     const string& filename, | ||||
|     const noiseModel::Diagonal::shared_ptr& corruptingNoise) { | ||||
|   ifstream is(filename.c_str()); | ||||
|   if (!is) throw invalid_argument("parse3DFactors: can not find file " + filename); | ||||
|  | @ -592,7 +597,7 @@ BetweenFactorPose3s parse3DFactors(const string& filename, | |||
|       mgtsam.block<3, 3>(3, 0) = m.block<3, 3>(3, 0);  // off diagonal
 | ||||
| 
 | ||||
|       SharedNoiseModel model = noiseModel::Gaussian::Information(mgtsam); | ||||
|       auto R12 = Rot3::Quaternion(qw, qx, qy, qz); | ||||
|       auto R12 = NormalizedRot3(qw, qx, qy, qz); | ||||
|       if (sampler) { | ||||
|         R12 = R12.retract(sampler->sample()); | ||||
|       } | ||||
|  |  | |||
|  | @ -122,45 +122,6 @@ TEST( dataSet, Balbianello) | |||
|   EXPECT(assert_equal(expected,actual,1)); | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST( dataSet, readG2o) | ||||
| { | ||||
|   const string g2oFile = findExampleDataFile("pose2example"); | ||||
|   NonlinearFactorGraph::shared_ptr actualGraph; | ||||
|   Values::shared_ptr actualValues; | ||||
|   boost::tie(actualGraph, actualValues) = readG2o(g2oFile); | ||||
| 
 | ||||
|   Values expectedValues; | ||||
|   expectedValues.insert(0, Pose2(0.000000, 0.000000, 0.000000)); | ||||
|   expectedValues.insert(1, Pose2(1.030390, 0.011350, -0.081596)); | ||||
|   expectedValues.insert(2, Pose2(2.036137, -0.129733, -0.301887)); | ||||
|   expectedValues.insert(3, Pose2(3.015097, -0.442395, -0.345514)); | ||||
|   expectedValues.insert(4, Pose2(3.343949, 0.506678, 1.214715)); | ||||
|   expectedValues.insert(5, Pose2(3.684491, 1.464049, 1.183785)); | ||||
|   expectedValues.insert(6, Pose2(4.064626, 2.414783, 1.176333)); | ||||
|   expectedValues.insert(7, Pose2(4.429778, 3.300180, 1.259169)); | ||||
|   expectedValues.insert(8, Pose2(4.128877, 2.321481, -1.825391)); | ||||
|   expectedValues.insert(9, Pose2(3.884653, 1.327509, -1.953016)); | ||||
|   expectedValues.insert(10, Pose2(3.531067, 0.388263, -2.148934)); | ||||
|   EXPECT(assert_equal(expectedValues,*actualValues,1e-5)); | ||||
| 
 | ||||
|   noiseModel::Diagonal::shared_ptr model = noiseModel::Diagonal::Precisions(Vector3(44.721360, 44.721360, 30.901699)); | ||||
|   NonlinearFactorGraph expectedGraph; | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(0, 1, Pose2(1.030390, 0.011350, -0.081596), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(1, 2, Pose2(1.013900, -0.058639, -0.220291), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(2, 3, Pose2(1.027650, -0.007456, -0.043627), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(3, 4, Pose2(-0.012016, 1.004360, 1.560229), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(4, 5, Pose2(1.016030, 0.014565, -0.030930), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(5, 6, Pose2(1.023890, 0.006808, -0.007452), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(6, 7, Pose2(0.957734, 0.003159, 0.082836), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(7, 8, Pose2(-1.023820, -0.013668, -3.084560), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(8, 9, Pose2(1.023440, 0.013984, -0.127624), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(9,10, Pose2(1.003350, 0.022250, -0.195918), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(5, 9, Pose2(0.033943, 0.032439, 3.073637), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(3,10, Pose2(0.044020, 0.988477, -1.553511), model); | ||||
|   EXPECT(assert_equal(expectedGraph,*actualGraph,1e-5)); | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST(dataSet, readG2o3D) { | ||||
|   const string g2oFile = findExampleDataFile("pose3example"); | ||||
|  | @ -273,59 +234,103 @@ TEST( dataSet, readG2o3DNonDiagonalNoise) | |||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST( dataSet, readG2oHuber) | ||||
| { | ||||
|   const string g2oFile = findExampleDataFile("pose2example"); | ||||
|   NonlinearFactorGraph::shared_ptr actualGraph; | ||||
|   Values::shared_ptr actualValues; | ||||
|   bool is3D = false; | ||||
|   boost::tie(actualGraph, actualValues) = readG2o(g2oFile, is3D, KernelFunctionTypeHUBER); | ||||
| TEST(dataSet, readG2oCheckDeterminants) { | ||||
|   const string g2oFile = findExampleDataFile("toyExample.g2o"); | ||||
| 
 | ||||
|   noiseModel::Diagonal::shared_ptr baseModel = noiseModel::Diagonal::Precisions(Vector3(44.721360, 44.721360, 30.901699)); | ||||
|   SharedNoiseModel model = noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(1.345), baseModel); | ||||
|   // Check determinants in factors
 | ||||
|   auto factors = parse3DFactors(g2oFile); | ||||
|   EXPECT_LONGS_EQUAL(6, factors.size()); | ||||
|   for (const auto& factor : factors) { | ||||
|     const Rot3 R = factor->measured().rotation(); | ||||
|     EXPECT_DOUBLES_EQUAL(1.0, R.matrix().determinant(), 1e-9); | ||||
|   } | ||||
| 
 | ||||
|   NonlinearFactorGraph expectedGraph; | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(0, 1, Pose2(1.030390, 0.011350, -0.081596), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(1, 2, Pose2(1.013900, -0.058639, -0.220291), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(2, 3, Pose2(1.027650, -0.007456, -0.043627), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(3, 4, Pose2(-0.012016, 1.004360, 1.560229), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(4, 5, Pose2(1.016030, 0.014565, -0.030930), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(5, 6, Pose2(1.023890, 0.006808, -0.007452), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(6, 7, Pose2(0.957734, 0.003159, 0.082836), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(7, 8, Pose2(-1.023820, -0.013668, -3.084560), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(8, 9, Pose2(1.023440, 0.013984, -0.127624), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(9,10, Pose2(1.003350, 0.022250, -0.195918), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(5, 9, Pose2(0.033943, 0.032439, 3.073637), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(3,10, Pose2(0.044020, 0.988477, -1.553511), model); | ||||
|   EXPECT(assert_equal(expectedGraph,*actualGraph,1e-5)); | ||||
|   // Check determinants in initial values
 | ||||
|   const map<Key, Pose3> poses = parse3DPoses(g2oFile); | ||||
|   EXPECT_LONGS_EQUAL(5, poses.size()); | ||||
|   for (const auto& key_value : poses) { | ||||
|     const Rot3 R = key_value.second.rotation(); | ||||
|     EXPECT_DOUBLES_EQUAL(1.0, R.matrix().determinant(), 1e-9); | ||||
|   } | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST( dataSet, readG2oTukey) | ||||
| { | ||||
| static NonlinearFactorGraph expectedGraph(const SharedNoiseModel& model) { | ||||
|   NonlinearFactorGraph g; | ||||
|   using Factor = BetweenFactor<Pose2>; | ||||
|   g.emplace_shared<Factor>(0, 1, Pose2(1.030390, 0.011350, -0.081596), model); | ||||
|   g.emplace_shared<Factor>(1, 2, Pose2(1.013900, -0.058639, -0.220291), model); | ||||
|   g.emplace_shared<Factor>(2, 3, Pose2(1.027650, -0.007456, -0.043627), model); | ||||
|   g.emplace_shared<Factor>(3, 4, Pose2(-0.012016, 1.004360, 1.560229), model); | ||||
|   g.emplace_shared<Factor>(4, 5, Pose2(1.016030, 0.014565, -0.030930), model); | ||||
|   g.emplace_shared<Factor>(5, 6, Pose2(1.023890, 0.006808, -0.007452), model); | ||||
|   g.emplace_shared<Factor>(6, 7, Pose2(0.957734, 0.003159, 0.082836), model); | ||||
|   g.emplace_shared<Factor>(7, 8, Pose2(-1.023820, -0.013668, -3.084560), model); | ||||
|   g.emplace_shared<Factor>(8, 9, Pose2(1.023440, 0.013984, -0.127624), model); | ||||
|   g.emplace_shared<Factor>(9, 10, Pose2(1.003350, 0.022250, -0.195918), model); | ||||
|   g.emplace_shared<Factor>(5, 9, Pose2(0.033943, 0.032439, 3.073637), model); | ||||
|   g.emplace_shared<Factor>(3, 10, Pose2(0.044020, 0.988477, -1.553511), model); | ||||
|   return g; | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST(dataSet, readG2o) { | ||||
|   const string g2oFile = findExampleDataFile("pose2example"); | ||||
|   NonlinearFactorGraph::shared_ptr actualGraph; | ||||
|   Values::shared_ptr actualValues; | ||||
|   boost::tie(actualGraph, actualValues) = readG2o(g2oFile); | ||||
| 
 | ||||
|   auto model = noiseModel::Diagonal::Precisions( | ||||
|       Vector3(44.721360, 44.721360, 30.901699)); | ||||
|   EXPECT(assert_equal(expectedGraph(model), *actualGraph, 1e-5)); | ||||
| 
 | ||||
|   Values expectedValues; | ||||
|   expectedValues.insert(0, Pose2(0.000000, 0.000000, 0.000000)); | ||||
|   expectedValues.insert(1, Pose2(1.030390, 0.011350, -0.081596)); | ||||
|   expectedValues.insert(2, Pose2(2.036137, -0.129733, -0.301887)); | ||||
|   expectedValues.insert(3, Pose2(3.015097, -0.442395, -0.345514)); | ||||
|   expectedValues.insert(4, Pose2(3.343949, 0.506678, 1.214715)); | ||||
|   expectedValues.insert(5, Pose2(3.684491, 1.464049, 1.183785)); | ||||
|   expectedValues.insert(6, Pose2(4.064626, 2.414783, 1.176333)); | ||||
|   expectedValues.insert(7, Pose2(4.429778, 3.300180, 1.259169)); | ||||
|   expectedValues.insert(8, Pose2(4.128877, 2.321481, -1.825391)); | ||||
|   expectedValues.insert(9, Pose2(3.884653, 1.327509, -1.953016)); | ||||
|   expectedValues.insert(10, Pose2(3.531067, 0.388263, -2.148934)); | ||||
|   EXPECT(assert_equal(expectedValues, *actualValues, 1e-5)); | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST(dataSet, readG2oHuber) { | ||||
|   const string g2oFile = findExampleDataFile("pose2example"); | ||||
|   NonlinearFactorGraph::shared_ptr actualGraph; | ||||
|   Values::shared_ptr actualValues; | ||||
|   bool is3D = false; | ||||
|   boost::tie(actualGraph, actualValues) = readG2o(g2oFile, is3D, KernelFunctionTypeTUKEY); | ||||
|   boost::tie(actualGraph, actualValues) = | ||||
|       readG2o(g2oFile, is3D, KernelFunctionTypeHUBER); | ||||
| 
 | ||||
|   noiseModel::Diagonal::shared_ptr baseModel = noiseModel::Diagonal::Precisions(Vector3(44.721360, 44.721360, 30.901699)); | ||||
|   SharedNoiseModel model = noiseModel::Robust::Create(noiseModel::mEstimator::Tukey::Create(4.6851), baseModel); | ||||
|   auto baseModel = noiseModel::Diagonal::Precisions( | ||||
|       Vector3(44.721360, 44.721360, 30.901699)); | ||||
|   auto model = noiseModel::Robust::Create( | ||||
|       noiseModel::mEstimator::Huber::Create(1.345), baseModel); | ||||
| 
 | ||||
|   NonlinearFactorGraph expectedGraph; | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(0, 1, Pose2(1.030390, 0.011350, -0.081596), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(1, 2, Pose2(1.013900, -0.058639, -0.220291), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(2, 3, Pose2(1.027650, -0.007456, -0.043627), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(3, 4, Pose2(-0.012016, 1.004360, 1.560229), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(4, 5, Pose2(1.016030, 0.014565, -0.030930), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(5, 6, Pose2(1.023890, 0.006808, -0.007452), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(6, 7, Pose2(0.957734, 0.003159, 0.082836), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(7, 8, Pose2(-1.023820, -0.013668, -3.084560), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(8, 9, Pose2(1.023440, 0.013984, -0.127624), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(9,10, Pose2(1.003350, 0.022250, -0.195918), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(5, 9, Pose2(0.033943, 0.032439, 3.073637), model); | ||||
|   expectedGraph.emplace_shared<BetweenFactor<Pose2> >(3,10, Pose2(0.044020, 0.988477, -1.553511), model); | ||||
|   EXPECT(assert_equal(expectedGraph,*actualGraph,1e-5)); | ||||
|   EXPECT(assert_equal(expectedGraph(model), *actualGraph, 1e-5)); | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST(dataSet, readG2oTukey) { | ||||
|   const string g2oFile = findExampleDataFile("pose2example"); | ||||
|   NonlinearFactorGraph::shared_ptr actualGraph; | ||||
|   Values::shared_ptr actualValues; | ||||
|   bool is3D = false; | ||||
|   boost::tie(actualGraph, actualValues) = | ||||
|       readG2o(g2oFile, is3D, KernelFunctionTypeTUKEY); | ||||
| 
 | ||||
|   auto baseModel = noiseModel::Diagonal::Precisions( | ||||
|       Vector3(44.721360, 44.721360, 30.901699)); | ||||
|   auto model = noiseModel::Robust::Create( | ||||
|       noiseModel::mEstimator::Tukey::Create(4.6851), baseModel); | ||||
| 
 | ||||
|   EXPECT(assert_equal(expectedGraph(model), *actualGraph, 1e-5)); | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
|  | @ -495,7 +500,7 @@ TEST( dataSet, writeBALfromValues_Dubrovnik){ | |||
|   SfmData readData; | ||||
|   readBAL(filenameToRead, readData); | ||||
| 
 | ||||
|   Pose3 poseChange = Pose3(Rot3::Ypr(-M_PI/10, 0., -M_PI/10), gtsam::Point3(0.3,0.1,0.3)); | ||||
|   Pose3 poseChange = Pose3(Rot3::Ypr(-M_PI/10, 0., -M_PI/10), Point3(0.3,0.1,0.3)); | ||||
| 
 | ||||
|   Values value; | ||||
|   for(size_t i=0; i < readData.number_cameras(); i++){ // for each camera
 | ||||
|  |  | |||
|  | @ -37,11 +37,11 @@ class PinholeFactor: public SmartFactorBase<PinholeCamera<Cal3Bundler> > { | |||
| public: | ||||
|   typedef SmartFactorBase<PinholeCamera<Cal3Bundler> > Base; | ||||
|   PinholeFactor() {} | ||||
|   PinholeFactor(const SharedNoiseModel& sharedNoiseModel): Base(sharedNoiseModel) { | ||||
|   } | ||||
|   virtual double error(const Values& values) const { | ||||
|     return 0.0; | ||||
|   } | ||||
|   PinholeFactor(const SharedNoiseModel& sharedNoiseModel, | ||||
|                 boost::optional<Pose3> body_P_sensor = boost::none, | ||||
|                 size_t expectedNumberCameras = 10) | ||||
|       : Base(sharedNoiseModel, body_P_sensor, expectedNumberCameras) {} | ||||
|   virtual double error(const Values& values) const { return 0.0; } | ||||
|   virtual boost::shared_ptr<GaussianFactor> linearize( | ||||
|       const Values& values) const { | ||||
|     return boost::shared_ptr<GaussianFactor>(new JacobianFactor()); | ||||
|  | @ -60,6 +60,40 @@ TEST(SmartFactorBase, Pinhole) { | |||
|   EXPECT_LONGS_EQUAL(2 * 2, f.dim()); | ||||
| } | ||||
| 
 | ||||
| TEST(SmartFactorBase, PinholeWithSensor) { | ||||
|   Pose3 body_P_sensor(Rot3(), Point3(1, 0, 0)); | ||||
|   PinholeFactor f = PinholeFactor(unit2, body_P_sensor); | ||||
|   EXPECT(assert_equal<Pose3>(f.body_P_sensor(), body_P_sensor)); | ||||
| 
 | ||||
|   PinholeFactor::Cameras cameras; | ||||
|   // Assume body at origin.
 | ||||
|   Pose3 world_P_body = Pose3(); | ||||
|   // Camera coordinates in world frame.
 | ||||
|   Pose3 wTc = world_P_body * body_P_sensor; | ||||
|   cameras.push_back(PinholeCamera<Cal3Bundler>(wTc)); | ||||
|    | ||||
|   // Simple point to project slightly off image center
 | ||||
|   Point3 p(0, 0, 10); | ||||
|   Point2 measurement = cameras[0].project(p); | ||||
|   f.add(measurement, 1); | ||||
| 
 | ||||
|   PinholeFactor::Cameras::FBlocks Fs; | ||||
|   Matrix E; | ||||
|   Vector error = f.unwhitenedError<Point3>(cameras, p, Fs, E); | ||||
| 
 | ||||
|   Vector expectedError = Vector::Zero(2);   | ||||
|   Matrix29 expectedFs; | ||||
|   expectedFs << -0.001, -1.00001, 0, -0.1, 0, -0.01, 0, 0, 0, 1, 0, 0, 0, -0.1, 0, 0, 0, 0; | ||||
|   Matrix23 expectedE; | ||||
|   expectedE << 0.1, 0, 0.01, 0, 0.1, 0; | ||||
| 
 | ||||
|   EXPECT(assert_equal(error, expectedError)); | ||||
|   // We only have the jacobian for the 1 camera
 | ||||
|   // Use of a lower tolerance value due to compiler precision mismatch.
 | ||||
|   EXPECT(assert_equal(expectedFs, Fs[0], 1e-3)); | ||||
|   EXPECT(assert_equal(expectedE, E)); | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| #include <gtsam/geometry/StereoCamera.h> | ||||
| 
 | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue