Merge branch 'develop'
Conflicts: .cproject examples/Pose3SLAMExample_g2o.cpp examples/Pose3SLAMExample_initializePose3Chordal.cpp examples/Pose3SLAMExample_initializePose3Gradient.cpp gtsam/slam/InitializePose3.cpp gtsam/slam/InitializePose3.h gtsam/slam/tests/testInitializePose3.cpprelease/4.3a0
						commit
						d57ca93b7a
					
				
							
								
								
									
										106
									
								
								.cproject
								
								
								
								
							
							
						
						
									
										106
									
								
								.cproject
								
								
								
								
							|  | @ -582,7 +582,6 @@ | |||
| 			</target> | ||||
| 			<target name="tests/testBayesTree.run" path="inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments/> | ||||
| 				<buildTarget>tests/testBayesTree.run</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>false</useDefaultCommand> | ||||
|  | @ -590,7 +589,6 @@ | |||
| 			</target> | ||||
| 			<target name="testBinaryBayesNet.run" path="inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments/> | ||||
| 				<buildTarget>testBinaryBayesNet.run</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>false</useDefaultCommand> | ||||
|  | @ -638,7 +636,6 @@ | |||
| 			</target> | ||||
| 			<target name="testSymbolicBayesNet.run" path="inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments/> | ||||
| 				<buildTarget>testSymbolicBayesNet.run</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>false</useDefaultCommand> | ||||
|  | @ -646,7 +643,6 @@ | |||
| 			</target> | ||||
| 			<target name="tests/testSymbolicFactor.run" path="inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments/> | ||||
| 				<buildTarget>tests/testSymbolicFactor.run</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>false</useDefaultCommand> | ||||
|  | @ -654,7 +650,6 @@ | |||
| 			</target> | ||||
| 			<target name="testSymbolicFactorGraph.run" path="inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments/> | ||||
| 				<buildTarget>testSymbolicFactorGraph.run</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>false</useDefaultCommand> | ||||
|  | @ -670,7 +665,6 @@ | |||
| 			</target> | ||||
| 			<target name="tests/testBayesTree" path="inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments/> | ||||
| 				<buildTarget>tests/testBayesTree</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>false</useDefaultCommand> | ||||
|  | @ -1006,7 +1000,6 @@ | |||
| 			</target> | ||||
| 			<target name="testErrors.run" path="linear" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments/> | ||||
| 				<buildTarget>testErrors.run</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>false</useDefaultCommand> | ||||
|  | @ -1236,46 +1229,6 @@ | |||
| 				<useDefaultCommand>true</useDefaultCommand> | ||||
| 				<runAllBuilders>true</runAllBuilders> | ||||
| 			</target> | ||||
| 			<target name="testBTree.run" path="build/gtsam_unstable/base/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments>-j5</buildArguments> | ||||
| 				<buildTarget>testBTree.run</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>true</useDefaultCommand> | ||||
| 				<runAllBuilders>true</runAllBuilders> | ||||
| 			</target> | ||||
| 			<target name="testDSF.run" path="build/gtsam_unstable/base/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments>-j5</buildArguments> | ||||
| 				<buildTarget>testDSF.run</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>true</useDefaultCommand> | ||||
| 				<runAllBuilders>true</runAllBuilders> | ||||
| 			</target> | ||||
| 			<target name="testDSFMap.run" path="build/gtsam_unstable/base/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments>-j5</buildArguments> | ||||
| 				<buildTarget>testDSFMap.run</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>true</useDefaultCommand> | ||||
| 				<runAllBuilders>true</runAllBuilders> | ||||
| 			</target> | ||||
| 			<target name="testDSFVector.run" path="build/gtsam_unstable/base/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments>-j5</buildArguments> | ||||
| 				<buildTarget>testDSFVector.run</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>true</useDefaultCommand> | ||||
| 				<runAllBuilders>true</runAllBuilders> | ||||
| 			</target> | ||||
| 			<target name="testFixedVector.run" path="build/gtsam_unstable/base/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments>-j5</buildArguments> | ||||
| 				<buildTarget>testFixedVector.run</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>true</useDefaultCommand> | ||||
| 				<runAllBuilders>true</runAllBuilders> | ||||
| 			</target> | ||||
| 			<target name="all" path="slam" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments>-j2</buildArguments> | ||||
|  | @ -1358,6 +1311,7 @@ | |||
| 			</target> | ||||
| 			<target name="testSimulated2DOriented.run" path="slam" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments/> | ||||
| 				<buildTarget>testSimulated2DOriented.run</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>false</useDefaultCommand> | ||||
|  | @ -1397,6 +1351,7 @@ | |||
| 			</target> | ||||
| 			<target name="testSimulated2D.run" path="slam" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments/> | ||||
| 				<buildTarget>testSimulated2D.run</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>false</useDefaultCommand> | ||||
|  | @ -1404,6 +1359,7 @@ | |||
| 			</target> | ||||
| 			<target name="testSimulated3D.run" path="slam" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments/> | ||||
| 				<buildTarget>testSimulated3D.run</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>false</useDefaultCommand> | ||||
|  | @ -1417,6 +1373,46 @@ | |||
| 				<useDefaultCommand>true</useDefaultCommand> | ||||
| 				<runAllBuilders>true</runAllBuilders> | ||||
| 			</target> | ||||
| 			<target name="testBTree.run" path="build/gtsam_unstable/base/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments>-j5</buildArguments> | ||||
| 				<buildTarget>testBTree.run</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>true</useDefaultCommand> | ||||
| 				<runAllBuilders>true</runAllBuilders> | ||||
| 			</target> | ||||
| 			<target name="testDSF.run" path="build/gtsam_unstable/base/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments>-j5</buildArguments> | ||||
| 				<buildTarget>testDSF.run</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>true</useDefaultCommand> | ||||
| 				<runAllBuilders>true</runAllBuilders> | ||||
| 			</target> | ||||
| 			<target name="testDSFMap.run" path="build/gtsam_unstable/base/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments>-j5</buildArguments> | ||||
| 				<buildTarget>testDSFMap.run</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>true</useDefaultCommand> | ||||
| 				<runAllBuilders>true</runAllBuilders> | ||||
| 			</target> | ||||
| 			<target name="testDSFVector.run" path="build/gtsam_unstable/base/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments>-j5</buildArguments> | ||||
| 				<buildTarget>testDSFVector.run</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>true</useDefaultCommand> | ||||
| 				<runAllBuilders>true</runAllBuilders> | ||||
| 			</target> | ||||
| 			<target name="testFixedVector.run" path="build/gtsam_unstable/base/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments>-j5</buildArguments> | ||||
| 				<buildTarget>testFixedVector.run</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>true</useDefaultCommand> | ||||
| 				<runAllBuilders>true</runAllBuilders> | ||||
| 			</target> | ||||
| 			<target name="testEliminationTree.run" path="build/gtsam/inference/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments>-j5</buildArguments> | ||||
|  | @ -1674,7 +1670,6 @@ | |||
| 			</target> | ||||
| 			<target name="Generate DEB Package" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>cpack</buildCommand> | ||||
| 				<buildArguments/> | ||||
| 				<buildTarget>-G DEB</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>false</useDefaultCommand> | ||||
|  | @ -1682,7 +1677,6 @@ | |||
| 			</target> | ||||
| 			<target name="Generate RPM Package" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>cpack</buildCommand> | ||||
| 				<buildArguments/> | ||||
| 				<buildTarget>-G RPM</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>false</useDefaultCommand> | ||||
|  | @ -1690,7 +1684,6 @@ | |||
| 			</target> | ||||
| 			<target name="Generate TGZ Package" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>cpack</buildCommand> | ||||
| 				<buildArguments/> | ||||
| 				<buildTarget>-G TGZ</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>false</useDefaultCommand> | ||||
|  | @ -1698,7 +1691,6 @@ | |||
| 			</target> | ||||
| 			<target name="Generate TGZ Source Package" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>cpack</buildCommand> | ||||
| 				<buildArguments/> | ||||
| 				<buildTarget>--config CPackSourceConfig.cmake</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>false</useDefaultCommand> | ||||
|  | @ -2425,7 +2417,6 @@ | |||
| 			</target> | ||||
| 			<target name="testGraph.run" path="build/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments/> | ||||
| 				<buildTarget>testGraph.run</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>false</useDefaultCommand> | ||||
|  | @ -2433,7 +2424,6 @@ | |||
| 			</target> | ||||
| 			<target name="testJunctionTree.run" path="build/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments/> | ||||
| 				<buildTarget>testJunctionTree.run</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>false</useDefaultCommand> | ||||
|  | @ -2441,7 +2431,6 @@ | |||
| 			</target> | ||||
| 			<target name="testSymbolicBayesNetB.run" path="build/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments/> | ||||
| 				<buildTarget>testSymbolicBayesNetB.run</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>false</useDefaultCommand> | ||||
|  | @ -2647,14 +2636,6 @@ | |||
| 				<useDefaultCommand>true</useDefaultCommand> | ||||
| 				<runAllBuilders>true</runAllBuilders> | ||||
| 			</target> | ||||
| 			<target name="testInitializePose3.run" path="build/gtsam/slam/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments>-j5</buildArguments> | ||||
| 				<buildTarget>testInitializePose3.run</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>true</useDefaultCommand> | ||||
| 				<runAllBuilders>true</runAllBuilders> | ||||
| 			</target> | ||||
| 			<target name="SimpleRotation.run" path="build/examples" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments>-j2</buildArguments> | ||||
|  | @ -2929,6 +2910,7 @@ | |||
| 			</target> | ||||
| 			<target name="tests/testGaussianISAM2" path="build/slam" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> | ||||
| 				<buildCommand>make</buildCommand> | ||||
| 				<buildArguments/> | ||||
| 				<buildTarget>tests/testGaussianISAM2</buildTarget> | ||||
| 				<stopOnError>true</stopOnError> | ||||
| 				<useDefaultCommand>false</useDefaultCommand> | ||||
|  |  | |||
|  | @ -123,6 +123,11 @@ else() | |||
| endif() | ||||
| 
 | ||||
| 
 | ||||
| if(${Boost_VERSION} EQUAL 105600) | ||||
| 	message("Ignoring Boost restriction on optional lvalue assignment from rvalues") | ||||
| 	add_definitions(-DBOOST_OPTIONAL_ALLOW_BINDING_TO_RVALUES) | ||||
| endif() | ||||
| 
 | ||||
| ############################################################################### | ||||
| # Find TBB | ||||
| find_package(TBB) | ||||
|  | @ -169,9 +174,9 @@ endif() | |||
| 
 | ||||
| ############################################################################### | ||||
| # Find OpenMP (if we're also using MKL) | ||||
| if(GTSAM_WITH_EIGEN_MKL AND GTSAM_USE_EIGEN_MKL_OPENMP AND GTSAM_USE_EIGEN_MKL) | ||||
|     find_package(OpenMP) | ||||
| find_package(OpenMP)  # do this here to generate correct message if disabled | ||||
| 
 | ||||
| if(GTSAM_WITH_EIGEN_MKL AND GTSAM_WITH_EIGEN_MKL_OPENMP AND GTSAM_USE_EIGEN_MKL) | ||||
|     if(OPENMP_FOUND AND GTSAM_USE_EIGEN_MKL AND GTSAM_WITH_EIGEN_MKL_OPENMP) | ||||
|         set(GTSAM_USE_EIGEN_MKL_OPENMP 1) # This will go into config.h | ||||
|         set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") | ||||
|  |  | |||
|  | @ -58,6 +58,7 @@ FIND_PATH(MKL_ROOT_DIR | |||
|     /opt/intel/mkl/*/ | ||||
|     /opt/intel/cmkl/ | ||||
|     /opt/intel/cmkl/*/ | ||||
|     /opt/intel/*/mkl/ | ||||
|     /Library/Frameworks/Intel_MKL.framework/Versions/Current/lib/universal | ||||
|         "C:/Program Files (x86)/Intel/ComposerXE-2011/mkl" | ||||
|         "C:/Program Files (x86)/Intel/Composer XE 2013/mkl" | ||||
|  |  | |||
|  | @ -1,3 +1,3 @@ | |||
| VERTEX_SE3:QUAT 0 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 1.000000 | ||||
| VERTEX_SE3:QUAT 1 1.001367 0.015390 0.004948 0.190253 0.283162 -0.392318 0.854230 | ||||
| EDGE_SE3:QUAT 0 1   1.001367 0.015390 0.004948   0.190253 0.283162 -0.392318 0.854230   10000.000000 1.000000 1.000000 1.000000 1.000000 1.000000   10000.000000 2.000000 2.000000 2.000000 2.000000   10000.000000 3.000000 3.000000 3.000000   10000.000000 4.000000 4.000000   10000.000000 5.000000   10000.000000 | ||||
| EDGE_SE3:QUAT 0 1   1.001367 0.015390 0.004948   0.190253 0.283162 -0.392318 0.854230   10000.000000 1.000000 1.000000 1.000000 1.000000 1.000000   10000.000000 2.000000 2.000000 2.000000 2.000000   10000.000000 3.000000 3.000000 3.000000   10000.000000 4.000000 4.000000   10000.000000 5.000000   10000.0000 | ||||
|  | @ -120,15 +120,15 @@ int main(int argc, char** argv) { | |||
|   // For simplicity, we will use the same noise model for each odometry factor
 | ||||
|   noiseModel::Diagonal::shared_ptr odometryNoise = noiseModel::Diagonal::Sigmas((Vector(3) << 0.2, 0.2, 0.1)); | ||||
|   // Create odometry (Between) factors between consecutive poses
 | ||||
|   graph.push_back(BetweenFactor<Pose2>(1, 2, Pose2(2.0, 0.0, 0.0), odometryNoise)); | ||||
|   graph.push_back(BetweenFactor<Pose2>(2, 3, Pose2(2.0, 0.0, 0.0), odometryNoise)); | ||||
|   graph.add(BetweenFactor<Pose2>(1, 2, Pose2(2.0, 0.0, 0.0), odometryNoise)); | ||||
|   graph.add(BetweenFactor<Pose2>(2, 3, Pose2(2.0, 0.0, 0.0), odometryNoise)); | ||||
| 
 | ||||
|   // 2b. Add "GPS-like" measurements
 | ||||
|   // We will use our custom UnaryFactor for this.
 | ||||
|   noiseModel::Diagonal::shared_ptr unaryNoise = noiseModel::Diagonal::Sigmas((Vector(2) << 0.1, 0.1)); // 10cm std on x,y
 | ||||
|   graph.push_back(boost::make_shared<UnaryFactor>(1, 0.0, 0.0, unaryNoise)); | ||||
|   graph.push_back(boost::make_shared<UnaryFactor>(2, 2.0, 0.0, unaryNoise)); | ||||
|   graph.push_back(boost::make_shared<UnaryFactor>(3, 4.0, 0.0, unaryNoise)); | ||||
|   graph.add(boost::make_shared<UnaryFactor>(1, 0.0, 0.0, unaryNoise)); | ||||
|   graph.add(boost::make_shared<UnaryFactor>(2, 2.0, 0.0, unaryNoise)); | ||||
|   graph.add(boost::make_shared<UnaryFactor>(3, 4.0, 0.0, unaryNoise)); | ||||
|   graph.print("\nFactor Graph:\n"); // print
 | ||||
| 
 | ||||
|   // 3. Create the data structure to hold the initialEstimate estimate to the solution
 | ||||
|  |  | |||
|  | @ -65,15 +65,15 @@ int main(int argc, char** argv) { | |||
|   // A prior factor consists of a mean and a noise model (covariance matrix)
 | ||||
|   Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin
 | ||||
|   noiseModel::Diagonal::shared_ptr priorNoise = noiseModel::Diagonal::Sigmas((Vector(3) << 0.3, 0.3, 0.1)); | ||||
|   graph.push_back(PriorFactor<Pose2>(1, priorMean, priorNoise)); | ||||
|   graph.add(PriorFactor<Pose2>(1, priorMean, priorNoise)); | ||||
| 
 | ||||
|   // Add odometry factors
 | ||||
|   Pose2 odometry(2.0, 0.0, 0.0); | ||||
|   // For simplicity, we will use the same noise model for each odometry factor
 | ||||
|   noiseModel::Diagonal::shared_ptr odometryNoise = noiseModel::Diagonal::Sigmas((Vector(3) << 0.2, 0.2, 0.1)); | ||||
|   // Create odometry (Between) factors between consecutive poses
 | ||||
|   graph.push_back(BetweenFactor<Pose2>(1, 2, odometry, odometryNoise)); | ||||
|   graph.push_back(BetweenFactor<Pose2>(2, 3, odometry, odometryNoise)); | ||||
|   graph.add(BetweenFactor<Pose2>(1, 2, odometry, odometryNoise)); | ||||
|   graph.add(BetweenFactor<Pose2>(2, 3, odometry, odometryNoise)); | ||||
|   graph.print("\nFactor Graph:\n"); // print
 | ||||
| 
 | ||||
|   // Create the data structure to hold the initialEstimate estimate to the solution
 | ||||
|  |  | |||
|  | @ -81,13 +81,13 @@ int main(int argc, char** argv) { | |||
|   // Add a prior on pose x1 at the origin. A prior factor consists of a mean and a noise model (covariance matrix)
 | ||||
|   Pose2 prior(0.0, 0.0, 0.0); // prior mean is at origin
 | ||||
|   noiseModel::Diagonal::shared_ptr priorNoise = noiseModel::Diagonal::Sigmas((Vector(3) << 0.3, 0.3, 0.1)); // 30cm std on x,y, 0.1 rad on theta
 | ||||
|   graph.push_back(PriorFactor<Pose2>(x1, prior, priorNoise)); // add directly to graph
 | ||||
|   graph.add(PriorFactor<Pose2>(x1, prior, priorNoise)); // add directly to graph
 | ||||
| 
 | ||||
|   // Add two odometry factors
 | ||||
|   Pose2 odometry(2.0, 0.0, 0.0); // create a measurement for both factors (the same in this case)
 | ||||
|   noiseModel::Diagonal::shared_ptr odometryNoise = noiseModel::Diagonal::Sigmas((Vector(3) << 0.2, 0.2, 0.1)); // 20cm std on x,y, 0.1 rad on theta
 | ||||
|   graph.push_back(BetweenFactor<Pose2>(x1, x2, odometry, odometryNoise)); | ||||
|   graph.push_back(BetweenFactor<Pose2>(x2, x3, odometry, odometryNoise)); | ||||
|   graph.add(BetweenFactor<Pose2>(x1, x2, odometry, odometryNoise)); | ||||
|   graph.add(BetweenFactor<Pose2>(x2, x3, odometry, odometryNoise)); | ||||
| 
 | ||||
|   // Add Range-Bearing measurements to two different landmarks
 | ||||
|   // create a noise model for the landmark measurements
 | ||||
|  | @ -101,9 +101,9 @@ int main(int argc, char** argv) { | |||
|          range32 = 2.0; | ||||
| 
 | ||||
|   // Add Bearing-Range factors
 | ||||
|   graph.push_back(BearingRangeFactor<Pose2, Point2>(x1, l1, bearing11, range11, measurementNoise)); | ||||
|   graph.push_back(BearingRangeFactor<Pose2, Point2>(x2, l1, bearing21, range21, measurementNoise)); | ||||
|   graph.push_back(BearingRangeFactor<Pose2, Point2>(x3, l2, bearing32, range32, measurementNoise)); | ||||
|   graph.add(BearingRangeFactor<Pose2, Point2>(x1, l1, bearing11, range11, measurementNoise)); | ||||
|   graph.add(BearingRangeFactor<Pose2, Point2>(x2, l1, bearing21, range21, measurementNoise)); | ||||
|   graph.add(BearingRangeFactor<Pose2, Point2>(x3, l2, bearing32, range32, measurementNoise)); | ||||
| 
 | ||||
|   // Print
 | ||||
|   graph.print("Factor Graph:\n"); | ||||
|  |  | |||
|  | @ -72,23 +72,23 @@ int main(int argc, char** argv) { | |||
|   // 2a. Add a prior on the first pose, setting it to the origin
 | ||||
|   // A prior factor consists of a mean and a noise model (covariance matrix)
 | ||||
|   noiseModel::Diagonal::shared_ptr priorNoise = noiseModel::Diagonal::Sigmas((Vector(3) << 0.3, 0.3, 0.1)); | ||||
|   graph.push_back(PriorFactor<Pose2>(1, Pose2(0, 0, 0), priorNoise)); | ||||
|   graph.add(PriorFactor<Pose2>(1, Pose2(0, 0, 0), priorNoise)); | ||||
| 
 | ||||
|   // For simplicity, we will use the same noise model for odometry and loop closures
 | ||||
|   noiseModel::Diagonal::shared_ptr model = noiseModel::Diagonal::Sigmas((Vector(3) << 0.2, 0.2, 0.1)); | ||||
| 
 | ||||
|   // 2b. Add odometry factors
 | ||||
|   // Create odometry (Between) factors between consecutive poses
 | ||||
|   graph.push_back(BetweenFactor<Pose2>(1, 2, Pose2(2, 0, 0     ), model)); | ||||
|   graph.push_back(BetweenFactor<Pose2>(2, 3, Pose2(2, 0, M_PI_2), model)); | ||||
|   graph.push_back(BetweenFactor<Pose2>(3, 4, Pose2(2, 0, M_PI_2), model)); | ||||
|   graph.push_back(BetweenFactor<Pose2>(4, 5, Pose2(2, 0, M_PI_2), model)); | ||||
|   graph.add(BetweenFactor<Pose2>(1, 2, Pose2(2, 0, 0     ), model)); | ||||
|   graph.add(BetweenFactor<Pose2>(2, 3, Pose2(2, 0, M_PI_2), model)); | ||||
|   graph.add(BetweenFactor<Pose2>(3, 4, Pose2(2, 0, M_PI_2), model)); | ||||
|   graph.add(BetweenFactor<Pose2>(4, 5, Pose2(2, 0, M_PI_2), model)); | ||||
| 
 | ||||
|   // 2c. Add the loop closure constraint
 | ||||
|   // This factor encodes the fact that we have returned to the same pose. In real systems,
 | ||||
|   // these constraints may be identified in many ways, such as appearance-based techniques
 | ||||
|   // with camera images. We will use another Between Factor to enforce this constraint:
 | ||||
|   graph.push_back(BetweenFactor<Pose2>(5, 2, Pose2(2, 0, M_PI_2), model)); | ||||
|   graph.add(BetweenFactor<Pose2>(5, 2, Pose2(2, 0, M_PI_2), model)); | ||||
|   graph.print("\nFactor Graph:\n"); // print
 | ||||
| 
 | ||||
|   // 3. Create the data structure to hold the initialEstimate estimate to the solution
 | ||||
|  |  | |||
|  | @ -17,7 +17,6 @@ | |||
|  * @author Luca Carlone | ||||
|  */ | ||||
| 
 | ||||
| #include <gtsam/slam/InitializePose3.h> | ||||
| #include <gtsam/slam/dataset.h> | ||||
| #include <gtsam/slam/BetweenFactor.h> | ||||
| #include <gtsam/slam/PriorFactor.h> | ||||
|  |  | |||
|  | @ -17,7 +17,6 @@ | |||
|  * @author Luca Carlone | ||||
|  */ | ||||
| 
 | ||||
| #include <gtsam/slam/InitializePose3.h> | ||||
| #include <gtsam/slam/dataset.h> | ||||
| #include <gtsam/slam/BetweenFactor.h> | ||||
| #include <gtsam/slam/PriorFactor.h> | ||||
|  |  | |||
|  | @ -1,95 +0,0 @@ | |||
| /* ----------------------------------------------------------------------------
 | ||||
| 
 | ||||
|  * GTSAM Copyright 2010, Georgia Tech Research Corporation, | ||||
|  * Atlanta, Georgia 30332-0415 | ||||
|  * All Rights Reserved | ||||
|  * Authors: Frank Dellaert, et al. (see THANKS for the full author list) | ||||
| 
 | ||||
|  * See LICENSE for the license information | ||||
| 
 | ||||
|  * -------------------------------------------------------------------------- */ | ||||
| 
 | ||||
| /**
 | ||||
|  * @file Pose3SLAMExample_initializePose3.cpp | ||||
|  * @brief A 3D Pose SLAM example that reads input from g2o, and initializes the Pose3 using InitializePose3 | ||||
|  * Syntax for the script is ./Pose3SLAMExample_initializePose3 input.g2o output.g2o | ||||
|  * @date Aug 25, 2014 | ||||
|  * @author Luca Carlone | ||||
|  */ | ||||
| 
 | ||||
| #include <gtsam/slam/InitializePose3.h> | ||||
| #include <gtsam/slam/dataset.h> | ||||
| #include <gtsam/slam/BetweenFactor.h> | ||||
| #include <gtsam/slam/PriorFactor.h> | ||||
| #include <gtsam/nonlinear/GaussNewtonOptimizer.h> | ||||
| #include <fstream> | ||||
| 
 | ||||
| using namespace std; | ||||
| using namespace gtsam; | ||||
| 
 | ||||
| int main(const int argc, const char *argv[]) { | ||||
| 
 | ||||
|   // Read graph from file
 | ||||
|   string g2oFile; | ||||
|   if (argc < 2) | ||||
|     g2oFile = findExampleDataFile("pose3example.txt"); | ||||
|   else | ||||
|     g2oFile = argv[1]; | ||||
| 
 | ||||
|   NonlinearFactorGraph::shared_ptr graph; | ||||
|   Values::shared_ptr initial; | ||||
|   bool is3D = true; | ||||
|   boost::tie(graph, initial) = readG2o(g2oFile, is3D); | ||||
| 
 | ||||
|   Values initialRot; | ||||
|   BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, *initial) { | ||||
|     Key key = key_value.key; | ||||
|     Pose3 pose = initial->at<Pose3>(key); | ||||
|     Rot3 R = pose.rotation(); | ||||
|     initialRot.insert(key,R); | ||||
|   } | ||||
| 
 | ||||
|   noiseModel::Unit::shared_ptr identityModel = noiseModel::Unit::Create(3); | ||||
|   NonlinearFactorGraph graphRot; | ||||
|   BOOST_FOREACH(const NonlinearFactor::shared_ptr& factor, *graph) { | ||||
|     boost::shared_ptr<BetweenFactor<Pose3> > pose3Between = | ||||
|         boost::dynamic_pointer_cast<BetweenFactor<Pose3> >(factor); | ||||
|     if(pose3Between){ | ||||
|       Key key1 = pose3Between->key1(); | ||||
|       Key key2 = pose3Between->key2(); | ||||
|       Pose3 Pij = pose3Between->measured(); | ||||
|       Rot3 Rij = Pij.rotation(); | ||||
|       NonlinearFactor::shared_ptr factorRot(new BetweenFactor<Rot3>(key1, key2, Rij, identityModel)); | ||||
|       graphRot.add(factorRot); | ||||
|     }else{ | ||||
|       std::cout << "Found a factor that is not a Between<Pose3>: not admitted" << std::endl; | ||||
|       return 1; | ||||
|     } | ||||
|   } | ||||
|   // Add prior on the first key
 | ||||
|   graphRot.add(PriorFactor<Rot3>(0, Rot3(), identityModel)); | ||||
| 
 | ||||
|   std::cout << "Optimizing Rot3 via GN" << std::endl; | ||||
|   // GaussNewtonParams params;
 | ||||
|   GaussNewtonOptimizer optimizer(graphRot, initialRot); | ||||
|   Values GNrot = optimizer.optimize(); | ||||
|   std::cout << "done!" << std::endl; | ||||
| 
 | ||||
|   // Wrap estimate as poses to write in g2o format
 | ||||
|   Values GNposes; | ||||
|   BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, GNrot) { | ||||
|     Key key = key_value.key; | ||||
|     Rot3 R = GNrot.at<Rot3>(key); | ||||
|     GNposes.insert(key,Pose3(R,Point3())); | ||||
|   } | ||||
| 
 | ||||
|   if (argc < 3) { | ||||
|     GNrot.print("initialization"); | ||||
|   } else { | ||||
|     const string outputFile = argv[2]; | ||||
|     std::cout << "Writing results to file: " << outputFile << std::endl; | ||||
|     writeG2o(*graph, GNposes, outputFile); | ||||
|     std::cout << "done! " << std::endl; | ||||
|   } | ||||
|   return 0; | ||||
| } | ||||
|  | @ -13,6 +13,22 @@ | |||
| * @brief   Incremental and batch solving, timing, and accuracy comparisons | ||||
| * @author  Richard Roberts | ||||
| * @date    August, 2013 | ||||
| * | ||||
| * Here is an example. Below, to run in batch mode, we first generate an initialization in incremental mode. | ||||
| * | ||||
| * Solve in incremental and write to file w_inc: | ||||
| * ./SolverComparer --incremental -d w10000 -o w_inc | ||||
| * | ||||
| * You can then perturb that initialization to get batch something to optimize. | ||||
| * Read in w_inc, perturb it with noise of stddev 0.6, and write to w_pert: | ||||
| * ./SolverComparer --perturb 0.6 -i w_inc -o w_pert | ||||
| * | ||||
| * Then optimize with batch, read in w_pert, solve in batch, and write to w_batch: | ||||
| * ./SolverComparer --batch -d w10000 -i w_pert -o w_batch | ||||
| * | ||||
| * And finally compare solutions in w_inc and w_batch to check that batch converged to the global minimum | ||||
| * ./SolverComparer --compare w_inc w_batch | ||||
| * | ||||
| */ | ||||
| 
 | ||||
| #include <gtsam/base/timing.h> | ||||
|  |  | |||
|  | @ -14,6 +14,7 @@ | |||
|  * @brief   A visualSLAM example for the structure-from-motion problem on a simulated dataset | ||||
|  * This version uses iSAM to solve the problem incrementally | ||||
|  * @author  Duy-Nguyen Ta | ||||
|  * @author  Frank Dellaert | ||||
|  */ | ||||
| 
 | ||||
| /**
 | ||||
|  | @ -61,7 +62,8 @@ int main(int argc, char* argv[]) { | |||
|   Cal3_S2::shared_ptr K(new Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)); | ||||
| 
 | ||||
|   // Define the camera observation noise model
 | ||||
|   noiseModel::Isotropic::shared_ptr measurementNoise = noiseModel::Isotropic::Sigma(2, 1.0); // one pixel in u and v
 | ||||
|   noiseModel::Isotropic::shared_ptr noise = //
 | ||||
|       noiseModel::Isotropic::Sigma(2, 1.0); // one pixel in u and v
 | ||||
| 
 | ||||
|   // Create the set of ground-truth landmarks
 | ||||
|   vector<Point3> points = createPoints(); | ||||
|  | @ -69,7 +71,8 @@ int main(int argc, char* argv[]) { | |||
|   // Create the set of ground-truth poses
 | ||||
|   vector<Pose3> poses = createPoses(); | ||||
| 
 | ||||
|   // Create a NonlinearISAM object which will relinearize and reorder the variables every "relinearizeInterval" updates
 | ||||
|   // Create a NonlinearISAM object which will relinearize and reorder the variables
 | ||||
|   // every "relinearizeInterval" updates
 | ||||
|   int relinearizeInterval = 3; | ||||
|   NonlinearISAM isam(relinearizeInterval); | ||||
| 
 | ||||
|  | @ -82,32 +85,44 @@ int main(int argc, char* argv[]) { | |||
| 
 | ||||
|     // Add factors for each landmark observation
 | ||||
|     for (size_t j = 0; j < points.size(); ++j) { | ||||
|       // Create ground truth measurement
 | ||||
|       SimpleCamera camera(poses[i], *K); | ||||
|       Point2 measurement = camera.project(points[j]); | ||||
|       graph.push_back(GenericProjectionFactor<Pose3, Point3, Cal3_S2>(measurement, measurementNoise, Symbol('x', i), Symbol('l', j), K)); | ||||
|       // Add measurement
 | ||||
|       graph.add( | ||||
|           GenericProjectionFactor<Pose3, Point3, Cal3_S2>(measurement, noise, | ||||
|               Symbol('x', i), Symbol('l', j), K)); | ||||
|     } | ||||
| 
 | ||||
|     // Add an initial guess for the current pose
 | ||||
|     // Intentionally initialize the variables off from the ground truth
 | ||||
|     initialEstimate.insert(Symbol('x', i), poses[i].compose(Pose3(Rot3::rodriguez(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20)))); | ||||
|     Pose3 noise(Rot3::rodriguez(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20)); | ||||
|     Pose3 initial_xi = poses[i].compose(noise); | ||||
| 
 | ||||
|     // Add an initial guess for the current pose
 | ||||
|     initialEstimate.insert(Symbol('x', i), initial_xi); | ||||
| 
 | ||||
|     // If this is the first iteration, add a prior on the first pose to set the coordinate frame
 | ||||
|     // and a prior on the first landmark to set the scale
 | ||||
|     // Also, as iSAM solves incrementally, we must wait until each is observed at least twice before
 | ||||
|     // adding it to iSAM.
 | ||||
|     if (i == 0) { | ||||
|       // Add a prior on pose x0
 | ||||
|       noiseModel::Diagonal::shared_ptr poseNoise = noiseModel::Diagonal::Sigmas((Vector(6) << Vector3::Constant(0.3), Vector3::Constant(0.1))); // 30cm std on x,y,z 0.1 rad on roll,pitch,yaw
 | ||||
|       graph.push_back(PriorFactor<Pose3>(Symbol('x', 0), poses[0], poseNoise)); | ||||
|       // Add a prior on pose x0, with 30cm std on x,y,z 0.1 rad on roll,pitch,yaw
 | ||||
|       noiseModel::Diagonal::shared_ptr poseNoise = noiseModel::Diagonal::Sigmas( | ||||
|           (Vector(6) << Vector3::Constant(0.3), Vector3::Constant(0.1))); | ||||
|       graph.add(PriorFactor<Pose3>(Symbol('x', 0), poses[0], poseNoise)); | ||||
| 
 | ||||
|       // Add a prior on landmark l0
 | ||||
|       noiseModel::Isotropic::shared_ptr pointNoise = noiseModel::Isotropic::Sigma(3, 0.1); | ||||
|       graph.push_back(PriorFactor<Point3>(Symbol('l', 0), points[0], pointNoise)); // add directly to graph
 | ||||
|       noiseModel::Isotropic::shared_ptr pointNoise = | ||||
|           noiseModel::Isotropic::Sigma(3, 0.1); | ||||
|       graph.add(PriorFactor<Point3>(Symbol('l', 0), points[0], pointNoise)); | ||||
| 
 | ||||
|       // Add initial guesses to all observed landmarks
 | ||||
|       Point3 noise(-0.25, 0.20, 0.15); | ||||
|       for (size_t j = 0; j < points.size(); ++j) { | ||||
|         // Intentionally initialize the variables off from the ground truth
 | ||||
|       for (size_t j = 0; j < points.size(); ++j) | ||||
|         initialEstimate.insert(Symbol('l', j), points[j].compose(Point3(-0.25, 0.20, 0.15))); | ||||
|         Point3 initial_lj = points[j].compose(noise); | ||||
|         initialEstimate.insert(Symbol('l', j), initial_lj); | ||||
|       } | ||||
| 
 | ||||
|     } else { | ||||
|       // Update iSAM with the new factors
 | ||||
|  |  | |||
|  | @ -4,14 +4,10 @@ | |||
| ## # The following are required to uses Dart and the Cdash dashboard | ||||
| ##   ENABLE_TESTING() | ||||
| ##   INCLUDE(CTest) | ||||
| set(CTEST_PROJECT_NAME "Eigen") | ||||
| set(CTEST_PROJECT_NAME "Eigen3.2") | ||||
| set(CTEST_NIGHTLY_START_TIME "00:00:00 UTC") | ||||
| 
 | ||||
| set(CTEST_DROP_METHOD "http") | ||||
| set(CTEST_DROP_SITE "manao.inria.fr") | ||||
| set(CTEST_DROP_LOCATION "/CDash/submit.php?project=Eigen") | ||||
| set(CTEST_DROP_LOCATION "/CDash/submit.php?project=Eigen3.2") | ||||
| set(CTEST_DROP_SITE_CDASH TRUE) | ||||
| set(CTEST_PROJECT_SUBPROJECTS | ||||
| Official | ||||
| Unsupported | ||||
| ) | ||||
|  |  | |||
|  | @ -95,7 +95,7 @@ | |||
|     extern "C" { | ||||
|       // In theory we should only include immintrin.h and not the other *mmintrin.h header files directly. | ||||
|       // Doing so triggers some issues with ICC. However old gcc versions seems to not have this file, thus: | ||||
|       #ifdef __INTEL_COMPILER | ||||
|       #if defined(__INTEL_COMPILER) && __INTEL_COMPILER >= 1110 | ||||
|         #include <immintrin.h> | ||||
|       #else | ||||
|         #include <emmintrin.h> | ||||
|  | @ -165,7 +165,7 @@ | |||
| #endif | ||||
| 
 | ||||
| // required for __cpuid, needs to be included after cmath | ||||
| #if defined(_MSC_VER) && (defined(_M_IX86)||defined(_M_X64)) | ||||
| #if defined(_MSC_VER) && (defined(_M_IX86)||defined(_M_X64)) && (!defined(_WIN32_WCE)) | ||||
|   #include <intrin.h> | ||||
| #endif | ||||
| 
 | ||||
|  |  | |||
|  | @ -274,30 +274,13 @@ template<> struct ldlt_inplace<Lower> | |||
|       return true; | ||||
|     } | ||||
| 
 | ||||
|     RealScalar cutoff(0), biggest_in_corner; | ||||
| 
 | ||||
|     for (Index k = 0; k < size; ++k) | ||||
|     { | ||||
|       // Find largest diagonal element
 | ||||
|       Index index_of_biggest_in_corner; | ||||
|       biggest_in_corner = mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner); | ||||
|       mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner); | ||||
|       index_of_biggest_in_corner += k; | ||||
| 
 | ||||
|       if(k == 0) | ||||
|       { | ||||
|         // The biggest overall is the point of reference to which further diagonals
 | ||||
|         // are compared; if any diagonal is negligible compared
 | ||||
|         // to the largest overall, the algorithm bails.
 | ||||
|         cutoff = abs(NumTraits<Scalar>::epsilon() * biggest_in_corner); | ||||
|       } | ||||
| 
 | ||||
|       // Finish early if the matrix is not full rank.
 | ||||
|       if(biggest_in_corner < cutoff) | ||||
|       { | ||||
|         for(Index i = k; i < size; i++) transpositions.coeffRef(i) = i; | ||||
|         break; | ||||
|       } | ||||
| 
 | ||||
|       transpositions.coeffRef(k) = index_of_biggest_in_corner; | ||||
|       if(k != index_of_biggest_in_corner) | ||||
|       { | ||||
|  | @ -328,15 +311,20 @@ template<> struct ldlt_inplace<Lower> | |||
| 
 | ||||
|       if(k>0) | ||||
|       { | ||||
|         temp.head(k) = mat.diagonal().head(k).asDiagonal() * A10.adjoint(); | ||||
|         temp.head(k) = mat.diagonal().real().head(k).asDiagonal() * A10.adjoint(); | ||||
|         mat.coeffRef(k,k) -= (A10 * temp.head(k)).value(); | ||||
|         if(rs>0) | ||||
|           A21.noalias() -= A20 * temp.head(k); | ||||
|       } | ||||
|       if((rs>0) && (abs(mat.coeffRef(k,k)) > cutoff)) | ||||
|         A21 /= mat.coeffRef(k,k); | ||||
|        | ||||
|       // In some previous versions of Eigen (e.g., 3.2.1), the scaling was omitted if the pivot
 | ||||
|       // was smaller than the cutoff value. However, soince LDLT is not rank-revealing
 | ||||
|       // we should only make sure we do not introduce INF or NaN values.
 | ||||
|       // LAPACK also uses 0 as the cutoff value.
 | ||||
|       RealScalar realAkk = numext::real(mat.coeffRef(k,k)); | ||||
|       if((rs>0) && (abs(realAkk) > RealScalar(0))) | ||||
|         A21 /= realAkk; | ||||
| 
 | ||||
|       if (sign == PositiveSemiDef) { | ||||
|         if (realAkk < 0) sign = Indefinite; | ||||
|       } else if (sign == NegativeSemiDef) { | ||||
|  | @ -516,9 +504,15 @@ struct solve_retval<LDLT<_MatrixType,_UpLo>, Rhs> | |||
|     typedef typename LDLTType::MatrixType MatrixType; | ||||
|     typedef typename LDLTType::Scalar Scalar; | ||||
|     typedef typename LDLTType::RealScalar RealScalar; | ||||
|     const Diagonal<const MatrixType> vectorD = dec().vectorD(); | ||||
|     RealScalar tolerance = (max)(vectorD.array().abs().maxCoeff() * NumTraits<Scalar>::epsilon(), | ||||
| 				 RealScalar(1) / NumTraits<RealScalar>::highest()); // motivated by LAPACK's xGELSS
 | ||||
|     const typename Diagonal<const MatrixType>::RealReturnType vectorD(dec().vectorD()); | ||||
|     // In some previous versions, tolerance was set to the max of 1/highest and the maximal diagonal entry * epsilon
 | ||||
|     // as motivated by LAPACK's xGELSS:
 | ||||
|     // RealScalar tolerance = (max)(vectorD.array().abs().maxCoeff() *NumTraits<RealScalar>::epsilon(),RealScalar(1) / NumTraits<RealScalar>::highest());
 | ||||
|     // However, LDLT is not rank revealing, and so adjusting the tolerance wrt to the highest
 | ||||
|     // diagonal element is not well justified and to numerical issues in some cases.
 | ||||
|     // Moreover, Lapack's xSYTRS routines use 0 for the tolerance.
 | ||||
|     RealScalar tolerance = RealScalar(1) / NumTraits<RealScalar>::highest(); | ||||
|      | ||||
|     for (Index i = 0; i < vectorD.size(); ++i) { | ||||
|       if(abs(vectorD(i)) > tolerance) | ||||
|         dst.row(i) /= vectorD(i); | ||||
|  | @ -576,7 +570,7 @@ MatrixType LDLT<MatrixType,_UpLo>::reconstructedMatrix() const | |||
|   // L^* P
 | ||||
|   res = matrixU() * res; | ||||
|   // D(L^*P)
 | ||||
|   res = vectorD().asDiagonal() * res; | ||||
|   res = vectorD().real().asDiagonal() * res; | ||||
|   // L(DL^*P)
 | ||||
|   res = matrixL() * res; | ||||
|   // P^T (LDL^*P)
 | ||||
|  |  | |||
|  | @ -81,7 +81,7 @@ struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel> > : traits<XprTyp | |||
|                        && (InnerStrideAtCompileTime == 1) | ||||
|                         ? PacketAccessBit : 0, | ||||
|     MaskAlignedBit = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % 16) == 0)) ? AlignedBit : 0, | ||||
|     FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0, | ||||
|     FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (traits<XprType>::Flags&LinearAccessBit))) ? LinearAccessBit : 0, | ||||
|     FlagsLvalueBit = is_lvalue<XprType>::value ? LvalueBit : 0, | ||||
|     FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0, | ||||
|     Flags0 = traits<XprType>::Flags & ( (HereditaryBits & ~RowMajorBit) | | ||||
|  |  | |||
|  | @ -47,6 +47,17 @@ struct CommaInitializer : | |||
|     m_xpr.block(0, 0, other.rows(), other.cols()) = other; | ||||
|   } | ||||
| 
 | ||||
|   /* Copy/Move constructor which transfers ownership. This is crucial in 
 | ||||
|    * absence of return value optimization to avoid assertions during destruction. */ | ||||
|   // FIXME in C++11 mode this could be replaced by a proper RValue constructor
 | ||||
|   inline CommaInitializer(const CommaInitializer& o) | ||||
|   : m_xpr(o.m_xpr), m_row(o.m_row), m_col(o.m_col), m_currentBlockRows(o.m_currentBlockRows) { | ||||
|     // Mark original object as finished. In absence of R-value references we need to const_cast:
 | ||||
|     const_cast<CommaInitializer&>(o).m_row = m_xpr.rows(); | ||||
|     const_cast<CommaInitializer&>(o).m_col = m_xpr.cols(); | ||||
|     const_cast<CommaInitializer&>(o).m_currentBlockRows = 0; | ||||
|   } | ||||
| 
 | ||||
|   /* inserts a scalar value in the target matrix */ | ||||
|   CommaInitializer& operator,(const Scalar& s) | ||||
|   { | ||||
|  |  | |||
|  | @ -0,0 +1,154 @@ | |||
| // This file is part of Eigen, a lightweight C++ template library
 | ||||
| // for linear algebra.
 | ||||
| //
 | ||||
| // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
 | ||||
| // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
 | ||||
| //
 | ||||
| // This Source Code Form is subject to the terms of the Mozilla
 | ||||
| // Public License v. 2.0. If a copy of the MPL was not distributed
 | ||||
| // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
 | ||||
| 
 | ||||
| #ifndef EIGEN_COMMAINITIALIZER_H | ||||
| #define EIGEN_COMMAINITIALIZER_H | ||||
| 
 | ||||
| namespace Eigen {  | ||||
| 
 | ||||
| /** \class CommaInitializer
 | ||||
|   * \ingroup Core_Module | ||||
|   * | ||||
|   * \brief Helper class used by the comma initializer operator | ||||
|   * | ||||
|   * This class is internally used to implement the comma initializer feature. It is | ||||
|   * the return type of MatrixBase::operator<<, and most of the time this is the only | ||||
|   * way it is used. | ||||
|   * | ||||
|   * \sa \ref MatrixBaseCommaInitRef "MatrixBase::operator<<", CommaInitializer::finished() | ||||
|   */ | ||||
| template<typename XprType> | ||||
| struct CommaInitializer | ||||
| { | ||||
|   typedef typename XprType::Scalar Scalar; | ||||
|   typedef typename XprType::Index Index; | ||||
| 
 | ||||
|   inline CommaInitializer(XprType& xpr, const Scalar& s) | ||||
|     : m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1) | ||||
|   { | ||||
|     m_xpr.coeffRef(0,0) = s; | ||||
|   } | ||||
| 
 | ||||
|   template<typename OtherDerived> | ||||
|   inline CommaInitializer(XprType& xpr, const DenseBase<OtherDerived>& other) | ||||
|     : m_xpr(xpr), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows()) | ||||
|   { | ||||
|     m_xpr.block(0, 0, other.rows(), other.cols()) = other; | ||||
|   } | ||||
| 
 | ||||
|   /* Copy/Move constructor which transfers ownership. This is crucial in 
 | ||||
|    * absence of return value optimization to avoid assertions during destruction. */ | ||||
|   // FIXME in C++11 mode this could be replaced by a proper RValue constructor
 | ||||
|   inline CommaInitializer(const CommaInitializer& o) | ||||
|   : m_xpr(o.m_xpr), m_row(o.m_row), m_col(o.m_col), m_currentBlockRows(o.m_currentBlockRows) { | ||||
|     // Mark original object as finished. In absence of R-value references we need to const_cast:
 | ||||
|     const_cast<CommaInitializer&>(o).m_row = m_xpr.rows(); | ||||
|     const_cast<CommaInitializer&>(o).m_col = m_xpr.cols(); | ||||
|     const_cast<CommaInitializer&>(o).m_currentBlockRows = 0; | ||||
|   } | ||||
| 
 | ||||
|   /* inserts a scalar value in the target matrix */ | ||||
|   CommaInitializer& operator,(const Scalar& s) | ||||
|   { | ||||
|     if (m_col==m_xpr.cols()) | ||||
|     { | ||||
|       m_row+=m_currentBlockRows; | ||||
|       m_col = 0; | ||||
|       m_currentBlockRows = 1; | ||||
|       eigen_assert(m_row<m_xpr.rows() | ||||
|         && "Too many rows passed to comma initializer (operator<<)"); | ||||
|     } | ||||
|     eigen_assert(m_col<m_xpr.cols() | ||||
|       && "Too many coefficients passed to comma initializer (operator<<)"); | ||||
|     eigen_assert(m_currentBlockRows==1); | ||||
|     m_xpr.coeffRef(m_row, m_col++) = s; | ||||
|     return *this; | ||||
|   } | ||||
| 
 | ||||
|   /* inserts a matrix expression in the target matrix */ | ||||
|   template<typename OtherDerived> | ||||
|   CommaInitializer& operator,(const DenseBase<OtherDerived>& other) | ||||
|   { | ||||
|     if(other.cols()==0 || other.rows()==0) | ||||
|       return *this; | ||||
|     if (m_col==m_xpr.cols()) | ||||
|     { | ||||
|       m_row+=m_currentBlockRows; | ||||
|       m_col = 0; | ||||
|       m_currentBlockRows = other.rows(); | ||||
|       eigen_assert(m_row+m_currentBlockRows<=m_xpr.rows() | ||||
|         && "Too many rows passed to comma initializer (operator<<)"); | ||||
|     } | ||||
|     eigen_assert(m_col<m_xpr.cols() | ||||
|       && "Too many coefficients passed to comma initializer (operator<<)"); | ||||
|     eigen_assert(m_currentBlockRows==other.rows()); | ||||
|     if (OtherDerived::SizeAtCompileTime != Dynamic) | ||||
|       m_xpr.template block<OtherDerived::RowsAtCompileTime != Dynamic ? OtherDerived::RowsAtCompileTime : 1, | ||||
|                               OtherDerived::ColsAtCompileTime != Dynamic ? OtherDerived::ColsAtCompileTime : 1> | ||||
|                     (m_row, m_col) = other; | ||||
|     else | ||||
|       m_xpr.block(m_row, m_col, other.rows(), other.cols()) = other; | ||||
|     m_col += other.cols(); | ||||
|     return *this; | ||||
|   } | ||||
| 
 | ||||
|   inline ~CommaInitializer() | ||||
|   { | ||||
|     eigen_assert((m_row+m_currentBlockRows) == m_xpr.rows() | ||||
|          && m_col == m_xpr.cols() | ||||
|          && "Too few coefficients passed to comma initializer (operator<<)"); | ||||
|   } | ||||
| 
 | ||||
|   /** \returns the built matrix once all its coefficients have been set.
 | ||||
|     * Calling finished is 100% optional. Its purpose is to write expressions | ||||
|     * like this: | ||||
|     * \code | ||||
|     * quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished()); | ||||
|     * \endcode | ||||
|     */ | ||||
|   inline XprType& finished() { return m_xpr; } | ||||
| 
 | ||||
|   XprType& m_xpr;   // target expression
 | ||||
|   Index m_row;              // current row id
 | ||||
|   Index m_col;              // current col id
 | ||||
|   Index m_currentBlockRows; // current block height
 | ||||
| }; | ||||
| 
 | ||||
| /** \anchor MatrixBaseCommaInitRef
 | ||||
|   * Convenient operator to set the coefficients of a matrix. | ||||
|   * | ||||
|   * The coefficients must be provided in a row major order and exactly match | ||||
|   * the size of the matrix. Otherwise an assertion is raised. | ||||
|   * | ||||
|   * Example: \include MatrixBase_set.cpp | ||||
|   * Output: \verbinclude MatrixBase_set.out | ||||
|   *  | ||||
|   * \note According the c++ standard, the argument expressions of this comma initializer are evaluated in arbitrary order. | ||||
|   * | ||||
|   * \sa CommaInitializer::finished(), class CommaInitializer | ||||
|   */ | ||||
| template<typename Derived> | ||||
| inline CommaInitializer<Derived> DenseBase<Derived>::operator<< (const Scalar& s) | ||||
| { | ||||
|   return CommaInitializer<Derived>(*static_cast<Derived*>(this), s); | ||||
| } | ||||
| 
 | ||||
| /** \sa operator<<(const Scalar&) */ | ||||
| template<typename Derived> | ||||
| template<typename OtherDerived> | ||||
| inline CommaInitializer<Derived> | ||||
| DenseBase<Derived>::operator<<(const DenseBase<OtherDerived>& other) | ||||
| { | ||||
|   return CommaInitializer<Derived>(*static_cast<Derived *>(this), other); | ||||
| } | ||||
| 
 | ||||
| } // end namespace Eigen
 | ||||
| 
 | ||||
| #endif // EIGEN_COMMAINITIALIZER_H
 | ||||
|  | @ -24,6 +24,14 @@ namespace internal { | |||
| 
 | ||||
| struct constructor_without_unaligned_array_assert {}; | ||||
| 
 | ||||
| template<typename T, int Size> void check_static_allocation_size() | ||||
| { | ||||
|   // if EIGEN_STACK_ALLOCATION_LIMIT is defined to 0, then no limit
 | ||||
|   #if EIGEN_STACK_ALLOCATION_LIMIT | ||||
|   EIGEN_STATIC_ASSERT(Size * sizeof(T) <= EIGEN_STACK_ALLOCATION_LIMIT, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG); | ||||
|   #endif | ||||
| } | ||||
| 
 | ||||
| /** \internal
 | ||||
|   * Static array. If the MatrixOrArrayOptions require auto-alignment, the array will be automatically aligned: | ||||
|   * to 16 bytes boundary if the total size is a multiple of 16 bytes. | ||||
|  | @ -38,12 +46,12 @@ struct plain_array | |||
| 
 | ||||
|   plain_array()  | ||||
|   {  | ||||
|     EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG); | ||||
|     check_static_allocation_size<T,Size>(); | ||||
|   } | ||||
| 
 | ||||
|   plain_array(constructor_without_unaligned_array_assert)  | ||||
|   {  | ||||
|     EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG); | ||||
|     check_static_allocation_size<T,Size>(); | ||||
|   } | ||||
| }; | ||||
| 
 | ||||
|  | @ -76,12 +84,12 @@ struct plain_array<T, Size, MatrixOrArrayOptions, 16> | |||
|   plain_array()  | ||||
|   {  | ||||
|     EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(0xf); | ||||
|     EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG); | ||||
|     check_static_allocation_size<T,Size>(); | ||||
|   } | ||||
| 
 | ||||
|   plain_array(constructor_without_unaligned_array_assert)  | ||||
|   {  | ||||
|     EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG); | ||||
|     check_static_allocation_size<T,Size>(); | ||||
|   } | ||||
| }; | ||||
| 
 | ||||
|  |  | |||
|  | @ -589,7 +589,7 @@ struct linspaced_op_impl<Scalar,true> | |||
| 
 | ||||
|   template<typename Index> | ||||
|   EIGEN_STRONG_INLINE const Packet packetOp(Index i) const | ||||
|   { return internal::padd(m_lowPacket, pmul(m_stepPacket, padd(pset1<Packet>(i),m_interPacket))); } | ||||
|   { return internal::padd(m_lowPacket, pmul(m_stepPacket, padd(pset1<Packet>(Scalar(i)),m_interPacket))); } | ||||
| 
 | ||||
|   const Scalar m_low; | ||||
|   const Scalar m_step; | ||||
|  | @ -609,7 +609,7 @@ template <typename Scalar, bool RandomAccess> struct functor_traits< linspaced_o | |||
| template <typename Scalar, bool RandomAccess> struct linspaced_op | ||||
| { | ||||
|   typedef typename packet_traits<Scalar>::type Packet; | ||||
|   linspaced_op(const Scalar& low, const Scalar& high, DenseIndex num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/(num_steps-1))) {} | ||||
|   linspaced_op(const Scalar& low, const Scalar& high, DenseIndex num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/Scalar(num_steps-1))) {} | ||||
| 
 | ||||
|   template<typename Index> | ||||
|   EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return impl(i); } | ||||
|  |  | |||
|  | @ -237,6 +237,8 @@ template<typename Derived> class MapBase<Derived, WriteAccessors> | |||
|     using Base::Base::operator=; | ||||
| }; | ||||
| 
 | ||||
| #undef EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS | ||||
| 
 | ||||
| } // end namespace Eigen
 | ||||
| 
 | ||||
| #endif // EIGEN_MAPBASE_H
 | ||||
|  |  | |||
|  | @ -101,7 +101,7 @@ struct traits<Ref<_PlainObjectType, _Options, _StrideType> > | |||
|   template<typename Derived> struct match { | ||||
|     enum { | ||||
|       HasDirectAccess = internal::has_direct_access<Derived>::ret, | ||||
|       StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)), | ||||
|       StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)), | ||||
|       InnerStrideMatch = int(StrideType::InnerStrideAtCompileTime)==int(Dynamic) | ||||
|                       || int(StrideType::InnerStrideAtCompileTime)==int(Derived::InnerStrideAtCompileTime) | ||||
|                       || (int(StrideType::InnerStrideAtCompileTime)==0 && int(Derived::InnerStrideAtCompileTime)==1), | ||||
|  | @ -172,6 +172,10 @@ protected: | |||
|     } | ||||
|     else | ||||
|       ::new (static_cast<Base*>(this)) Base(expr.data(), expr.rows(), expr.cols()); | ||||
|      | ||||
|     if(Expression::IsVectorAtCompileTime && (!PlainObjectType::IsVectorAtCompileTime) && ((Expression::Flags&RowMajorBit)!=(PlainObjectType::Flags&RowMajorBit))) | ||||
|       ::new (&m_stride) StrideBase(expr.innerStride(), StrideType::InnerStrideAtCompileTime==0?0:1); | ||||
|     else | ||||
|       ::new (&m_stride) StrideBase(StrideType::OuterStrideAtCompileTime==0?0:expr.outerStride(), | ||||
|                                    StrideType::InnerStrideAtCompileTime==0?0:expr.innerStride());     | ||||
|   } | ||||
|  |  | |||
|  | @ -278,21 +278,21 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView | |||
| 
 | ||||
|     /** Efficient triangular matrix times vector/matrix product */ | ||||
|     template<typename OtherDerived> | ||||
|     TriangularProduct<Mode,true,MatrixType,false,OtherDerived, OtherDerived::IsVectorAtCompileTime> | ||||
|     TriangularProduct<Mode, true, MatrixType, false, OtherDerived, OtherDerived::ColsAtCompileTime==1> | ||||
|     operator*(const MatrixBase<OtherDerived>& rhs) const | ||||
|     { | ||||
|       return TriangularProduct | ||||
|               <Mode,true,MatrixType,false,OtherDerived,OtherDerived::IsVectorAtCompileTime> | ||||
|               <Mode, true, MatrixType, false, OtherDerived, OtherDerived::ColsAtCompileTime==1> | ||||
|               (m_matrix, rhs.derived()); | ||||
|     } | ||||
| 
 | ||||
|     /** Efficient vector/matrix times triangular matrix product */ | ||||
|     template<typename OtherDerived> friend | ||||
|     TriangularProduct<Mode,false,OtherDerived,OtherDerived::IsVectorAtCompileTime,MatrixType,false> | ||||
|     TriangularProduct<Mode, false, OtherDerived, OtherDerived::RowsAtCompileTime==1, MatrixType, false> | ||||
|     operator*(const MatrixBase<OtherDerived>& lhs, const TriangularView& rhs) | ||||
|     { | ||||
|       return TriangularProduct | ||||
|               <Mode,false,OtherDerived,OtherDerived::IsVectorAtCompileTime,MatrixType,false> | ||||
|               <Mode, false, OtherDerived, OtherDerived::RowsAtCompileTime==1, MatrixType, false> | ||||
|               (lhs.derived(),rhs.m_matrix); | ||||
|     } | ||||
| 
 | ||||
|  |  | |||
|  | @ -54,8 +54,25 @@ | |||
| #endif | ||||
| 
 | ||||
| #if defined EIGEN_USE_MKL | ||||
| 
 | ||||
| #   include <mkl.h>  | ||||
| /*Check IMKL version for compatibility: < 10.3 is not usable with Eigen*/ | ||||
| #   ifndef INTEL_MKL_VERSION | ||||
| #       undef EIGEN_USE_MKL /* INTEL_MKL_VERSION is not even defined on older versions */ | ||||
| #   elif INTEL_MKL_VERSION < 100305    /* the intel-mkl-103-release-notes say this was when the lapacke.h interface was added*/ | ||||
| #       undef EIGEN_USE_MKL | ||||
| #   endif | ||||
| #   ifndef EIGEN_USE_MKL | ||||
|     /*If the MKL version is too old, undef everything*/ | ||||
| #       undef   EIGEN_USE_MKL_ALL | ||||
| #       undef   EIGEN_USE_BLAS | ||||
| #       undef   EIGEN_USE_LAPACKE | ||||
| #       undef   EIGEN_USE_MKL_VML | ||||
| #       undef   EIGEN_USE_LAPACKE_STRICT | ||||
| #       undef   EIGEN_USE_LAPACKE | ||||
| #   endif | ||||
| #endif | ||||
| 
 | ||||
| #if defined EIGEN_USE_MKL | ||||
| #include <mkl_lapacke.h> | ||||
| #define EIGEN_MKL_VML_THRESHOLD 128 | ||||
| 
 | ||||
|  |  | |||
|  | @ -13,7 +13,7 @@ | |||
| 
 | ||||
| #define EIGEN_WORLD_VERSION 3 | ||||
| #define EIGEN_MAJOR_VERSION 2 | ||||
| #define EIGEN_MINOR_VERSION 1 | ||||
| #define EIGEN_MINOR_VERSION 2 | ||||
| 
 | ||||
| #define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \ | ||||
|                                       (EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \ | ||||
|  | @ -289,7 +289,8 @@ namespace Eigen { | |||
| #endif | ||||
| 
 | ||||
| #ifndef EIGEN_STACK_ALLOCATION_LIMIT | ||||
| #define EIGEN_STACK_ALLOCATION_LIMIT 20000 | ||||
| // 131072 == 128 KB
 | ||||
| #define EIGEN_STACK_ALLOCATION_LIMIT 131072 | ||||
| #endif | ||||
| 
 | ||||
| #ifndef EIGEN_DEFAULT_IO_FORMAT | ||||
|  |  | |||
|  | @ -272,12 +272,12 @@ inline void* aligned_realloc(void *ptr, size_t new_size, size_t old_size) | |||
|   // The defined(_mm_free) is just here to verify that this MSVC version
 | ||||
|   // implements _mm_malloc/_mm_free based on the corresponding _aligned_
 | ||||
|   // functions. This may not always be the case and we just try to be safe.
 | ||||
|   #if defined(_MSC_VER) && defined(_mm_free) | ||||
|   #if defined(_MSC_VER) && (!defined(_WIN32_WCE)) && defined(_mm_free) | ||||
|     result = _aligned_realloc(ptr,new_size,16); | ||||
|   #else | ||||
|     result = generic_aligned_realloc(ptr,new_size,old_size); | ||||
|   #endif | ||||
| #elif defined(_MSC_VER) | ||||
| #elif defined(_MSC_VER) && (!defined(_WIN32_WCE)) | ||||
|   result = _aligned_realloc(ptr,new_size,16); | ||||
| #else | ||||
|   result = handmade_aligned_realloc(ptr,new_size,old_size); | ||||
|  | @ -630,6 +630,8 @@ template<typename T> class aligned_stack_memory_handler | |||
|       } \ | ||||
|       void operator delete(void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \ | ||||
|       void operator delete[](void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \ | ||||
|       void operator delete(void * ptr, std::size_t /* sz */) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \ | ||||
|       void operator delete[](void * ptr, std::size_t /* sz */) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \ | ||||
|       /* in-place new and delete. since (at least afaik) there is no actual   */ \ | ||||
|       /* memory allocated we can safely let the default implementation handle */ \ | ||||
|       /* this particular case. */ \ | ||||
|  | @ -777,9 +779,9 @@ namespace internal { | |||
| 
 | ||||
| #ifdef EIGEN_CPUID | ||||
| 
 | ||||
| inline bool cpuid_is_vendor(int abcd[4], const char* vendor) | ||||
| inline bool cpuid_is_vendor(int abcd[4], const int vendor[3]) | ||||
| { | ||||
|   return abcd[1]==(reinterpret_cast<const int*>(vendor))[0] && abcd[3]==(reinterpret_cast<const int*>(vendor))[1] && abcd[2]==(reinterpret_cast<const int*>(vendor))[2]; | ||||
|   return abcd[1]==vendor[0] && abcd[3]==vendor[1] && abcd[2]==vendor[2]; | ||||
| } | ||||
| 
 | ||||
| inline void queryCacheSizes_intel_direct(int& l1, int& l2, int& l3) | ||||
|  | @ -921,13 +923,16 @@ inline void queryCacheSizes(int& l1, int& l2, int& l3) | |||
| { | ||||
|   #ifdef EIGEN_CPUID | ||||
|   int abcd[4]; | ||||
|   const int GenuineIntel[] = {0x756e6547, 0x49656e69, 0x6c65746e}; | ||||
|   const int AuthenticAMD[] = {0x68747541, 0x69746e65, 0x444d4163}; | ||||
|   const int AMDisbetter_[] = {0x69444d41, 0x74656273, 0x21726574}; // "AMDisbetter!"
 | ||||
| 
 | ||||
|   // identify the CPU vendor
 | ||||
|   EIGEN_CPUID(abcd,0x0,0); | ||||
|   int max_std_funcs = abcd[1]; | ||||
|   if(cpuid_is_vendor(abcd,"GenuineIntel")) | ||||
|   if(cpuid_is_vendor(abcd,GenuineIntel)) | ||||
|     queryCacheSizes_intel(l1,l2,l3,max_std_funcs); | ||||
|   else if(cpuid_is_vendor(abcd,"AuthenticAMD") || cpuid_is_vendor(abcd,"AMDisbetter!")) | ||||
|   else if(cpuid_is_vendor(abcd,AuthenticAMD) || cpuid_is_vendor(abcd,AMDisbetter_)) | ||||
|     queryCacheSizes_amd(l1,l2,l3); | ||||
|   else | ||||
|     // by default let's use Intel's API
 | ||||
|  |  | |||
|  | @ -203,6 +203,8 @@ public: | |||
|   * \li \c Quaternionf for \c float | ||||
|   * \li \c Quaterniond for \c double | ||||
|   * | ||||
|   * \warning Operations interpreting the quaternion as rotation have undefined behavior if the quaternion is not normalized. | ||||
|   * | ||||
|   * \sa  class AngleAxis, class Transform | ||||
|   */ | ||||
| 
 | ||||
|  | @ -344,7 +346,7 @@ class Map<const Quaternion<_Scalar>, _Options > | |||
| 
 | ||||
|     /** Constructs a Mapped Quaternion object from the pointer \a coeffs
 | ||||
|       * | ||||
|       * The pointer \a coeffs must reference the four coeffecients of Quaternion in the following order: | ||||
|       * The pointer \a coeffs must reference the four coefficients of Quaternion in the following order: | ||||
|       * \code *coeffs == {x, y, z, w} \endcode | ||||
|       * | ||||
|       * If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */ | ||||
|  | @ -464,7 +466,7 @@ QuaternionBase<Derived>::_transformVector(Vector3 v) const | |||
|     // Note that this algorithm comes from the optimization by hand
 | ||||
|     // of the conversion to a Matrix followed by a Matrix/Vector product.
 | ||||
|     // It appears to be much faster than the common algorithm found
 | ||||
|     // in the litterature (30 versus 39 flops). It also requires two
 | ||||
|     // in the literature (30 versus 39 flops). It also requires two
 | ||||
|     // Vector3 as temporaries.
 | ||||
|     Vector3 uv = this->vec().cross(v); | ||||
|     uv += uv; | ||||
|  | @ -584,7 +586,7 @@ inline Derived& QuaternionBase<Derived>::setFromTwoVectors(const MatrixBase<Deri | |||
|   //    which yields a singular value problem
 | ||||
|   if (c < Scalar(-1)+NumTraits<Scalar>::dummy_precision()) | ||||
|   { | ||||
|     c = max<Scalar>(c,-1); | ||||
|     c = (max)(c,Scalar(-1)); | ||||
|     Matrix<Scalar,2,3> m; m << v0.transpose(), v1.transpose(); | ||||
|     JacobiSVD<Matrix<Scalar,2,3> > svd(m, ComputeFullV); | ||||
|     Vector3 axis = svd.matrixV().col(2); | ||||
|  | @ -667,10 +669,10 @@ QuaternionBase<Derived>::angularDistance(const QuaternionBase<OtherDerived>& oth | |||
| { | ||||
|   using std::acos; | ||||
|   using std::abs; | ||||
|   double d = abs(this->dot(other)); | ||||
|   if (d>=1.0) | ||||
|   Scalar d = abs(this->dot(other)); | ||||
|   if (d>=Scalar(1)) | ||||
|     return Scalar(0); | ||||
|   return static_cast<Scalar>(2 * acos(d)); | ||||
|   return Scalar(2) * acos(d); | ||||
| } | ||||
| 
 | ||||
|   | ||||
|  |  | |||
|  | @ -194,9 +194,9 @@ public: | |||
|   /** type of the matrix used to represent the linear part of the transformation */ | ||||
|   typedef Matrix<Scalar,Dim,Dim,Options> LinearMatrixType; | ||||
|   /** type of read/write reference to the linear part of the transformation */ | ||||
|   typedef Block<MatrixType,Dim,Dim,int(Mode)==(AffineCompact)> LinearPart; | ||||
|   typedef Block<MatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (Options&RowMajor)==0> LinearPart; | ||||
|   /** type of read reference to the linear part of the transformation */ | ||||
|   typedef const Block<ConstMatrixType,Dim,Dim,int(Mode)==(AffineCompact)> ConstLinearPart; | ||||
|   typedef const Block<ConstMatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (Options&RowMajor)==0> ConstLinearPart; | ||||
|   /** type of read/write reference to the affine part of the transformation */ | ||||
|   typedef typename internal::conditional<int(Mode)==int(AffineCompact), | ||||
|                               MatrixType&, | ||||
|  |  | |||
|  | @ -113,7 +113,7 @@ umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, boo | |||
|   const Index n = src.cols(); // number of measurements
 | ||||
| 
 | ||||
|   // required for demeaning ...
 | ||||
|   const RealScalar one_over_n = 1 / static_cast<RealScalar>(n); | ||||
|   const RealScalar one_over_n = RealScalar(1) / static_cast<RealScalar>(n); | ||||
| 
 | ||||
|   // computation of mean
 | ||||
|   const VectorType src_mean = src.rowwise().sum() * one_over_n; | ||||
|  | @ -136,16 +136,16 @@ umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, boo | |||
| 
 | ||||
|   // Eq. (39)
 | ||||
|   VectorType S = VectorType::Ones(m); | ||||
|   if (sigma.determinant()<0) S(m-1) = -1; | ||||
|   if (sigma.determinant()<Scalar(0)) S(m-1) = Scalar(-1); | ||||
| 
 | ||||
|   // Eq. (40) and (43)
 | ||||
|   const VectorType& d = svd.singularValues(); | ||||
|   Index rank = 0; for (Index i=0; i<m; ++i) if (!internal::isMuchSmallerThan(d.coeff(i),d.coeff(0))) ++rank; | ||||
|   if (rank == m-1) { | ||||
|     if ( svd.matrixU().determinant() * svd.matrixV().determinant() > 0 ) { | ||||
|     if ( svd.matrixU().determinant() * svd.matrixV().determinant() > Scalar(0) ) { | ||||
|       Rt.block(0,0,m,m).noalias() = svd.matrixU()*svd.matrixV().transpose(); | ||||
|     } else { | ||||
|       const Scalar s = S(m-1); S(m-1) = -1; | ||||
|       const Scalar s = S(m-1); S(m-1) = Scalar(-1); | ||||
|       Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose(); | ||||
|       S(m-1) = s; | ||||
|     } | ||||
|  | @ -156,7 +156,7 @@ umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, boo | |||
|   if (with_scaling) | ||||
|   { | ||||
|     // Eq. (42)
 | ||||
|     const Scalar c = 1/src_var * svd.singularValues().dot(S); | ||||
|     const Scalar c = Scalar(1)/src_var * svd.singularValues().dot(S); | ||||
| 
 | ||||
|     // Eq. (41)
 | ||||
|     Rt.col(m).head(m) = dst_mean; | ||||
|  |  | |||
|  | @ -48,7 +48,7 @@ void apply_block_householder_on_the_left(MatrixType& mat, const VectorsType& vec | |||
|   typedef typename MatrixType::Index Index; | ||||
|   enum { TFactorSize = MatrixType::ColsAtCompileTime }; | ||||
|   Index nbVecs = vectors.cols(); | ||||
|   Matrix<typename MatrixType::Scalar, TFactorSize, TFactorSize> T(nbVecs,nbVecs); | ||||
|   Matrix<typename MatrixType::Scalar, TFactorSize, TFactorSize, ColMajor> T(nbVecs,nbVecs); | ||||
|   make_block_householder_triangular_factor(T, vectors, hCoeffs); | ||||
| 
 | ||||
|   const TriangularView<const VectorsType, UnitLower>& V(vectors); | ||||
|  |  | |||
|  | @ -61,6 +61,7 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x, | |||
|   VectorType s(n), t(n); | ||||
| 
 | ||||
|   RealScalar tol2 = tol*tol; | ||||
|   RealScalar eps2 = NumTraits<Scalar>::epsilon()*NumTraits<Scalar>::epsilon(); | ||||
|   int i = 0; | ||||
|   int restarts = 0; | ||||
| 
 | ||||
|  | @ -69,7 +70,7 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x, | |||
|     Scalar rho_old = rho; | ||||
| 
 | ||||
|     rho = r0.dot(r); | ||||
|     if (internal::isMuchSmallerThan(rho,r0_sqnorm)) | ||||
|     if (abs(rho) < eps2*r0_sqnorm) | ||||
|     { | ||||
|       // The new residual vector became too orthogonal to the arbitrarily choosen direction r0
 | ||||
|       // Let's restart with a new r0:
 | ||||
|  |  | |||
|  | @ -20,10 +20,11 @@ namespace Eigen { | |||
|   * | ||||
|   * \param MatrixType the type of the matrix of which we are computing the LU decomposition | ||||
|   * | ||||
|   * This class represents a LU decomposition of any matrix, with complete pivoting: the matrix A | ||||
|   * is decomposed as A = PLUQ where L is unit-lower-triangular, U is upper-triangular, and P and Q | ||||
|   * are permutation matrices. This is a rank-revealing LU decomposition. The eigenvalues (diagonal | ||||
|   * coefficients) of U are sorted in such a way that any zeros are at the end. | ||||
|   * This class represents a LU decomposition of any matrix, with complete pivoting: the matrix A is | ||||
|   * decomposed as \f$ A = P^{-1} L U Q^{-1} \f$ where L is unit-lower-triangular, U is | ||||
|   * upper-triangular, and P and Q are permutation matrices. This is a rank-revealing LU | ||||
|   * decomposition. The eigenvalues (diagonal coefficients) of U are sorted in such a way that any | ||||
|   * zeros are at the end. | ||||
|   * | ||||
|   * This decomposition provides the generic approach to solving systems of linear equations, computing | ||||
|   * the rank, invertibility, inverse, kernel, and determinant. | ||||
|  | @ -511,8 +512,8 @@ typename internal::traits<MatrixType>::Scalar FullPivLU<MatrixType>::determinant | |||
| } | ||||
| 
 | ||||
| /** \returns the matrix represented by the decomposition,
 | ||||
|  * i.e., it returns the product: P^{-1} L U Q^{-1}. | ||||
|  * This function is provided for debug purpose. */ | ||||
|  * i.e., it returns the product: \f$ P^{-1} L U Q^{-1} \f$. | ||||
|  * This function is provided for debug purposes. */ | ||||
| template<typename MatrixType> | ||||
| MatrixType FullPivLU<MatrixType>::reconstructedMatrix() const | ||||
| { | ||||
|  |  | |||
|  | @ -109,7 +109,7 @@ class NaturalOrdering | |||
|   * \class COLAMDOrdering | ||||
|   * | ||||
|   * Functor computing the \em column \em approximate \em minimum \em degree ordering  | ||||
|   * The matrix should be in column-major format | ||||
|   * The matrix should be in column-major and \b compressed format (see SparseMatrix::makeCompressed()). | ||||
|   */ | ||||
| template<typename Index> | ||||
| class COLAMDOrdering | ||||
|  | @ -118,10 +118,14 @@ class COLAMDOrdering | |||
|     typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;  | ||||
|     typedef Matrix<Index, Dynamic, 1> IndexVector; | ||||
|      | ||||
|     /** Compute the permutation vector form a sparse matrix */ | ||||
|     /** Compute the permutation vector \a perm form the sparse matrix \a mat
 | ||||
|       * \warning The input sparse matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()). | ||||
|       */ | ||||
|     template <typename MatrixType> | ||||
|     void operator() (const MatrixType& mat, PermutationType& perm) | ||||
|     { | ||||
|       eigen_assert(mat.isCompressed() && "COLAMDOrdering requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to COLAMDOrdering"); | ||||
|        | ||||
|       Index m = mat.rows(); | ||||
|       Index n = mat.cols(); | ||||
|       Index nnz = mat.nonZeros(); | ||||
|  | @ -132,12 +136,12 @@ class COLAMDOrdering | |||
|       Index stats [COLAMD_STATS]; | ||||
|       internal::colamd_set_defaults(knobs); | ||||
|        | ||||
|       Index info; | ||||
|       IndexVector p(n+1), A(Alen);  | ||||
|       for(Index i=0; i <= n; i++)   p(i) = mat.outerIndexPtr()[i]; | ||||
|       for(Index i=0; i < nnz; i++)  A(i) = mat.innerIndexPtr()[i]; | ||||
|       // Call Colamd routine to compute the ordering 
 | ||||
|       info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats);  | ||||
|       Index info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats);  | ||||
|       EIGEN_UNUSED_VARIABLE(info); | ||||
|       eigen_assert( info && "COLAMD failed " ); | ||||
|        | ||||
|       perm.resize(n); | ||||
|  |  | |||
|  | @ -76,7 +76,8 @@ template<typename _MatrixType> class ColPivHouseholderQR | |||
|         m_colsTranspositions(), | ||||
|         m_temp(), | ||||
|         m_colSqNorms(), | ||||
|         m_isInitialized(false) {} | ||||
|         m_isInitialized(false), | ||||
|         m_usePrescribedThreshold(false) {} | ||||
| 
 | ||||
|     /** \brief Default Constructor with memory preallocation
 | ||||
|       * | ||||
|  |  | |||
|  | @ -375,18 +375,20 @@ struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true> | |||
|     Scalar z; | ||||
|     JacobiRotation<Scalar> rot; | ||||
|     RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p))); | ||||
|      | ||||
|     if(n==0) | ||||
|     { | ||||
|       z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q); | ||||
|       work_matrix.row(p) *= z; | ||||
|       if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z); | ||||
|       if(work_matrix.coeff(q,q)!=Scalar(0)) | ||||
|       { | ||||
|         z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q); | ||||
|       else | ||||
|         z = Scalar(0); | ||||
|         work_matrix.row(q) *= z; | ||||
|         if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z); | ||||
|       } | ||||
|       // otherwise the second row is already zero, so we have nothing to do.
 | ||||
|     } | ||||
|     else | ||||
|     { | ||||
|       rot.c() = conj(work_matrix.coeff(p,p)) / n; | ||||
|  | @ -415,6 +417,7 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q, | |||
|                             JacobiRotation<RealScalar> *j_right) | ||||
| { | ||||
|   using std::sqrt; | ||||
|   using std::abs; | ||||
|   Matrix<RealScalar,2,2> m; | ||||
|   m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)), | ||||
|        numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q)); | ||||
|  | @ -428,9 +431,11 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q, | |||
|   } | ||||
|   else | ||||
|   { | ||||
|     RealScalar u = d / t; | ||||
|     rot1.c() = RealScalar(1) / sqrt(RealScalar(1) + numext::abs2(u)); | ||||
|     rot1.s() = rot1.c() * u; | ||||
|     RealScalar t2d2 = numext::hypot(t,d); | ||||
|     rot1.c() = abs(t)/t2d2; | ||||
|     rot1.s() = d/t2d2; | ||||
|     if(t<RealScalar(0)) | ||||
|       rot1.s() = -rot1.s(); | ||||
|   } | ||||
|   m.applyOnTheLeft(0,1,rot1); | ||||
|   j_right->makeJacobi(m,0,1); | ||||
|  | @ -531,8 +536,9 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD | |||
|     JacobiSVD() | ||||
|       : m_isInitialized(false), | ||||
|         m_isAllocated(false), | ||||
|         m_usePrescribedThreshold(false), | ||||
|         m_computationOptions(0), | ||||
|         m_rows(-1), m_cols(-1) | ||||
|         m_rows(-1), m_cols(-1), m_diagSize(0) | ||||
|     {} | ||||
| 
 | ||||
| 
 | ||||
|  | @ -545,6 +551,7 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD | |||
|     JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0) | ||||
|       : m_isInitialized(false), | ||||
|         m_isAllocated(false), | ||||
|         m_usePrescribedThreshold(false), | ||||
|         m_computationOptions(0), | ||||
|         m_rows(-1), m_cols(-1) | ||||
|     { | ||||
|  | @ -564,6 +571,7 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD | |||
|     JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0) | ||||
|       : m_isInitialized(false), | ||||
|         m_isAllocated(false), | ||||
|         m_usePrescribedThreshold(false), | ||||
|         m_computationOptions(0), | ||||
|         m_rows(-1), m_cols(-1) | ||||
|     { | ||||
|  | @ -666,6 +674,69 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD | |||
|       return m_nonzeroSingularValues; | ||||
|     } | ||||
|      | ||||
|     /** \returns the rank of the matrix of which \c *this is the SVD.
 | ||||
|       * | ||||
|       * \note This method has to determine which singular values should be considered nonzero. | ||||
|       *       For that, it uses the threshold value that you can control by calling | ||||
|       *       setThreshold(const RealScalar&). | ||||
|       */ | ||||
|     inline Index rank() const | ||||
|     { | ||||
|       using std::abs; | ||||
|       eigen_assert(m_isInitialized && "JacobiSVD is not initialized."); | ||||
|       if(m_singularValues.size()==0) return 0; | ||||
|       RealScalar premultiplied_threshold = m_singularValues.coeff(0) * threshold(); | ||||
|       Index i = m_nonzeroSingularValues-1; | ||||
|       while(i>=0 && m_singularValues.coeff(i) < premultiplied_threshold) --i; | ||||
|       return i+1; | ||||
|     } | ||||
|      | ||||
|     /** Allows to prescribe a threshold to be used by certain methods, such as rank() and solve(),
 | ||||
|       * which need to determine when singular values are to be considered nonzero. | ||||
|       * This is not used for the SVD decomposition itself. | ||||
|       * | ||||
|       * When it needs to get the threshold value, Eigen calls threshold(). | ||||
|       * The default is \c NumTraits<Scalar>::epsilon() | ||||
|       * | ||||
|       * \param threshold The new value to use as the threshold. | ||||
|       * | ||||
|       * A singular value will be considered nonzero if its value is strictly greater than | ||||
|       *  \f$ \vert singular value \vert \leqslant threshold \times \vert max singular value \vert \f$. | ||||
|       * | ||||
|       * If you want to come back to the default behavior, call setThreshold(Default_t) | ||||
|       */ | ||||
|     JacobiSVD& setThreshold(const RealScalar& threshold) | ||||
|     { | ||||
|       m_usePrescribedThreshold = true; | ||||
|       m_prescribedThreshold = threshold; | ||||
|       return *this; | ||||
|     } | ||||
| 
 | ||||
|     /** Allows to come back to the default behavior, letting Eigen use its default formula for
 | ||||
|       * determining the threshold. | ||||
|       * | ||||
|       * You should pass the special object Eigen::Default as parameter here. | ||||
|       * \code svd.setThreshold(Eigen::Default); \endcode | ||||
|       * | ||||
|       * See the documentation of setThreshold(const RealScalar&). | ||||
|       */ | ||||
|     JacobiSVD& setThreshold(Default_t) | ||||
|     { | ||||
|       m_usePrescribedThreshold = false; | ||||
|       return *this; | ||||
|     } | ||||
| 
 | ||||
|     /** Returns the threshold that will be used by certain methods such as rank().
 | ||||
|       * | ||||
|       * See the documentation of setThreshold(const RealScalar&). | ||||
|       */ | ||||
|     RealScalar threshold() const | ||||
|     { | ||||
|       eigen_assert(m_isInitialized || m_usePrescribedThreshold); | ||||
|       return m_usePrescribedThreshold ? m_prescribedThreshold | ||||
|                                       : (std::max<Index>)(1,m_diagSize)*NumTraits<Scalar>::epsilon(); | ||||
|     } | ||||
| 
 | ||||
|     inline Index rows() const { return m_rows; } | ||||
|     inline Index cols() const { return m_cols; } | ||||
| 
 | ||||
|  | @ -677,11 +748,12 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD | |||
|     MatrixVType m_matrixV; | ||||
|     SingularValuesType m_singularValues; | ||||
|     WorkMatrixType m_workMatrix; | ||||
|     bool m_isInitialized, m_isAllocated; | ||||
|     bool m_isInitialized, m_isAllocated, m_usePrescribedThreshold; | ||||
|     bool m_computeFullU, m_computeThinU; | ||||
|     bool m_computeFullV, m_computeThinV; | ||||
|     unsigned int m_computationOptions; | ||||
|     Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize; | ||||
|     RealScalar m_prescribedThreshold; | ||||
| 
 | ||||
|     template<typename __MatrixType, int _QRPreconditioner, bool _IsComplex> | ||||
|     friend struct internal::svd_precondition_2x2_block_to_be_real; | ||||
|  | @ -765,6 +837,11 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig | |||
|     if(m_computeThinV) m_matrixV.setIdentity(m_cols, m_diagSize); | ||||
|   } | ||||
|    | ||||
|   // Scaling factor to reduce over/under-flows
 | ||||
|   RealScalar scale = m_workMatrix.cwiseAbs().maxCoeff(); | ||||
|   if(scale==RealScalar(0)) scale = RealScalar(1); | ||||
|   m_workMatrix /= scale; | ||||
| 
 | ||||
|   /*** step 2. The main Jacobi SVD iteration. ***/ | ||||
| 
 | ||||
|   bool finished = false; | ||||
|  | @ -834,6 +911,8 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig | |||
|     } | ||||
|   } | ||||
|    | ||||
|   m_singularValues *= scale; | ||||
| 
 | ||||
|   m_isInitialized = true; | ||||
|   return *this; | ||||
| } | ||||
|  | @ -854,11 +933,11 @@ struct solve_retval<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs> | |||
|     // So A^{-1} = V S^{-1} U^*
 | ||||
| 
 | ||||
|     Matrix<Scalar, Dynamic, Rhs::ColsAtCompileTime, 0, _MatrixType::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime> tmp; | ||||
|     Index nonzeroSingVals = dec().nonzeroSingularValues(); | ||||
|     Index rank = dec().rank(); | ||||
|      | ||||
|     tmp.noalias() = dec().matrixU().leftCols(nonzeroSingVals).adjoint() * rhs(); | ||||
|     tmp = dec().singularValues().head(nonzeroSingVals).asDiagonal().inverse() * tmp; | ||||
|     dst = dec().matrixV().leftCols(nonzeroSingVals) * tmp; | ||||
|     tmp.noalias() = dec().matrixU().leftCols(rank).adjoint() * rhs(); | ||||
|     tmp = dec().singularValues().head(rank).asDiagonal().inverse() * tmp; | ||||
|     dst = dec().matrixV().leftCols(rank) * tmp; | ||||
|   } | ||||
| }; | ||||
| } // end namespace internal
 | ||||
|  |  | |||
|  | @ -37,6 +37,7 @@ class SimplicialCholeskyBase : internal::noncopyable | |||
| { | ||||
|   public: | ||||
|     typedef typename internal::traits<Derived>::MatrixType MatrixType; | ||||
|     typedef typename internal::traits<Derived>::OrderingType OrderingType; | ||||
|     enum { UpLo = internal::traits<Derived>::UpLo }; | ||||
|     typedef typename MatrixType::Scalar Scalar; | ||||
|     typedef typename MatrixType::RealScalar RealScalar; | ||||
|  | @ -240,15 +241,16 @@ class SimplicialCholeskyBase : internal::noncopyable | |||
|     RealScalar m_shiftScale; | ||||
| }; | ||||
| 
 | ||||
| template<typename _MatrixType, int _UpLo = Lower> class SimplicialLLT; | ||||
| template<typename _MatrixType, int _UpLo = Lower> class SimplicialLDLT; | ||||
| template<typename _MatrixType, int _UpLo = Lower> class SimplicialCholesky; | ||||
| template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::Index> > class SimplicialLLT; | ||||
| template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::Index> > class SimplicialLDLT; | ||||
| template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::Index> > class SimplicialCholesky; | ||||
| 
 | ||||
| namespace internal { | ||||
| 
 | ||||
| template<typename _MatrixType, int _UpLo> struct traits<SimplicialLLT<_MatrixType,_UpLo> > | ||||
| template<typename _MatrixType, int _UpLo, typename _Ordering> struct traits<SimplicialLLT<_MatrixType,_UpLo,_Ordering> > | ||||
| { | ||||
|   typedef _MatrixType MatrixType; | ||||
|   typedef _Ordering OrderingType; | ||||
|   enum { UpLo = _UpLo }; | ||||
|   typedef typename MatrixType::Scalar                         Scalar; | ||||
|   typedef typename MatrixType::Index                          Index; | ||||
|  | @ -259,9 +261,10 @@ template<typename _MatrixType, int _UpLo> struct traits<SimplicialLLT<_MatrixTyp | |||
|   static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); } | ||||
| }; | ||||
| 
 | ||||
| template<typename _MatrixType,int _UpLo> struct traits<SimplicialLDLT<_MatrixType,_UpLo> > | ||||
| template<typename _MatrixType,int _UpLo, typename _Ordering> struct traits<SimplicialLDLT<_MatrixType,_UpLo,_Ordering> > | ||||
| { | ||||
|   typedef _MatrixType MatrixType; | ||||
|   typedef _Ordering OrderingType; | ||||
|   enum { UpLo = _UpLo }; | ||||
|   typedef typename MatrixType::Scalar                             Scalar; | ||||
|   typedef typename MatrixType::Index                              Index; | ||||
|  | @ -272,9 +275,10 @@ template<typename _MatrixType,int _UpLo> struct traits<SimplicialLDLT<_MatrixTyp | |||
|   static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); } | ||||
| }; | ||||
| 
 | ||||
| template<typename _MatrixType, int _UpLo> struct traits<SimplicialCholesky<_MatrixType,_UpLo> > | ||||
| template<typename _MatrixType, int _UpLo, typename _Ordering> struct traits<SimplicialCholesky<_MatrixType,_UpLo,_Ordering> > | ||||
| { | ||||
|   typedef _MatrixType MatrixType; | ||||
|   typedef _Ordering OrderingType; | ||||
|   enum { UpLo = _UpLo }; | ||||
| }; | ||||
| 
 | ||||
|  | @ -294,11 +298,12 @@ template<typename _MatrixType, int _UpLo> struct traits<SimplicialCholesky<_Matr | |||
|   * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> | ||||
|   * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower | ||||
|   *               or Upper. Default is Lower. | ||||
|   * \tparam _Ordering The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<> | ||||
|   * | ||||
|   * \sa class SimplicialLDLT | ||||
|   * \sa class SimplicialLDLT, class AMDOrdering, class NaturalOrdering | ||||
|   */ | ||||
| template<typename _MatrixType, int _UpLo> | ||||
|     class SimplicialLLT : public SimplicialCholeskyBase<SimplicialLLT<_MatrixType,_UpLo> > | ||||
| template<typename _MatrixType, int _UpLo, typename _Ordering> | ||||
|     class SimplicialLLT : public SimplicialCholeskyBase<SimplicialLLT<_MatrixType,_UpLo,_Ordering> > | ||||
| { | ||||
| public: | ||||
|     typedef _MatrixType MatrixType; | ||||
|  | @ -382,11 +387,12 @@ public: | |||
|   * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> | ||||
|   * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower | ||||
|   *               or Upper. Default is Lower. | ||||
|   * \tparam _Ordering The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<> | ||||
|   * | ||||
|   * \sa class SimplicialLLT | ||||
|   * \sa class SimplicialLLT, class AMDOrdering, class NaturalOrdering | ||||
|   */ | ||||
| template<typename _MatrixType, int _UpLo> | ||||
|     class SimplicialLDLT : public SimplicialCholeskyBase<SimplicialLDLT<_MatrixType,_UpLo> > | ||||
| template<typename _MatrixType, int _UpLo, typename _Ordering> | ||||
|     class SimplicialLDLT : public SimplicialCholeskyBase<SimplicialLDLT<_MatrixType,_UpLo,_Ordering> > | ||||
| { | ||||
| public: | ||||
|     typedef _MatrixType MatrixType; | ||||
|  | @ -467,8 +473,8 @@ public: | |||
|   * | ||||
|   * \sa class SimplicialLDLT, class SimplicialLLT | ||||
|   */ | ||||
| template<typename _MatrixType, int _UpLo> | ||||
|     class SimplicialCholesky : public SimplicialCholeskyBase<SimplicialCholesky<_MatrixType,_UpLo> > | ||||
| template<typename _MatrixType, int _UpLo, typename _Ordering> | ||||
|     class SimplicialCholesky : public SimplicialCholeskyBase<SimplicialCholesky<_MatrixType,_UpLo,_Ordering> > | ||||
| { | ||||
| public: | ||||
|     typedef _MatrixType MatrixType; | ||||
|  | @ -612,15 +618,13 @@ void SimplicialCholeskyBase<Derived>::ordering(const MatrixType& a, CholMatrixTy | |||
| { | ||||
|   eigen_assert(a.rows()==a.cols()); | ||||
|   const Index size = a.rows(); | ||||
|   // TODO allows to configure the permutation
 | ||||
|   // Note that amd compute the inverse permutation
 | ||||
|   { | ||||
|     CholMatrixType C; | ||||
|     C = a.template selfadjointView<UpLo>(); | ||||
|     // remove diagonal entries:
 | ||||
|     // seems not to be needed
 | ||||
|     // C.prune(keep_diag());
 | ||||
|     internal::minimum_degree_ordering(C, m_Pinv); | ||||
|      | ||||
|     OrderingType ordering; | ||||
|     ordering(C,m_Pinv); | ||||
|   } | ||||
| 
 | ||||
|   if(m_Pinv.size()>0) | ||||
|  |  | |||
|  | @ -51,8 +51,8 @@ class CompressedStorage | |||
|     CompressedStorage& operator=(const CompressedStorage& other) | ||||
|     { | ||||
|       resize(other.size()); | ||||
|       memcpy(m_values, other.m_values, m_size * sizeof(Scalar)); | ||||
|       memcpy(m_indices, other.m_indices, m_size * sizeof(Index)); | ||||
|       internal::smart_copy(other.m_values,  other.m_values  + m_size, m_values); | ||||
|       internal::smart_copy(other.m_indices, other.m_indices + m_size, m_indices); | ||||
|       return *this; | ||||
|     } | ||||
| 
 | ||||
|  | @ -83,10 +83,10 @@ class CompressedStorage | |||
|         reallocate(m_size); | ||||
|     } | ||||
| 
 | ||||
|     void resize(size_t size, float reserveSizeFactor = 0) | ||||
|     void resize(size_t size, double reserveSizeFactor = 0) | ||||
|     { | ||||
|       if (m_allocatedSize<size) | ||||
|         reallocate(size + size_t(reserveSizeFactor*size)); | ||||
|         reallocate(size + size_t(reserveSizeFactor*double(size))); | ||||
|       m_size = size; | ||||
|     } | ||||
| 
 | ||||
|  |  | |||
|  | @ -73,7 +73,8 @@ class CwiseBinaryOpImpl<BinaryOp,Lhs,Rhs,Sparse>::InnerIterator | |||
|     typedef internal::sparse_cwise_binary_op_inner_iterator_selector< | ||||
|       BinaryOp,Lhs,Rhs, InnerIterator> Base; | ||||
| 
 | ||||
|     EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, Index outer) | ||||
|     // NOTE: we have to prefix Index by "typename Lhs::" to avoid an ICE with VC11
 | ||||
|     EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, typename Lhs::Index outer) | ||||
|       : Base(binOp.derived(),outer) | ||||
|     {} | ||||
| }; | ||||
|  |  | |||
|  | @ -19,7 +19,10 @@ template<typename Lhs, typename Rhs, int InnerSize> struct SparseDenseProductRet | |||
| 
 | ||||
| template<typename Lhs, typename Rhs> struct SparseDenseProductReturnType<Lhs,Rhs,1> | ||||
| { | ||||
|   typedef SparseDenseOuterProduct<Lhs,Rhs,false> Type; | ||||
|   typedef typename internal::conditional< | ||||
|     Lhs::IsRowMajor, | ||||
|     SparseDenseOuterProduct<Rhs,Lhs,true>, | ||||
|     SparseDenseOuterProduct<Lhs,Rhs,false> >::type Type; | ||||
| }; | ||||
| 
 | ||||
| template<typename Lhs, typename Rhs, int InnerSize> struct DenseSparseProductReturnType | ||||
|  | @ -29,7 +32,10 @@ template<typename Lhs, typename Rhs, int InnerSize> struct DenseSparseProductRet | |||
| 
 | ||||
| template<typename Lhs, typename Rhs> struct DenseSparseProductReturnType<Lhs,Rhs,1> | ||||
| { | ||||
|   typedef SparseDenseOuterProduct<Rhs,Lhs,true> Type; | ||||
|   typedef typename internal::conditional< | ||||
|     Rhs::IsRowMajor, | ||||
|     SparseDenseOuterProduct<Rhs,Lhs,true>, | ||||
|     SparseDenseOuterProduct<Lhs,Rhs,false> >::type Type; | ||||
| }; | ||||
| 
 | ||||
| namespace internal { | ||||
|  | @ -114,17 +120,30 @@ class SparseDenseOuterProduct<Lhs,Rhs,Transpose>::InnerIterator : public _LhsNes | |||
|     typedef typename SparseDenseOuterProduct::Index Index; | ||||
|   public: | ||||
|     EIGEN_STRONG_INLINE InnerIterator(const SparseDenseOuterProduct& prod, Index outer) | ||||
|       : Base(prod.lhs(), 0), m_outer(outer), m_factor(prod.rhs().coeff(outer)) | ||||
|     { | ||||
|     } | ||||
|       : Base(prod.lhs(), 0), m_outer(outer), m_factor(get(prod.rhs(), outer, typename internal::traits<Rhs>::StorageKind() )) | ||||
|     { } | ||||
| 
 | ||||
|     inline Index outer() const { return m_outer; } | ||||
|     inline Index row() const { return Transpose ? Base::row() : m_outer; } | ||||
|     inline Index col() const { return Transpose ? m_outer : Base::row(); } | ||||
|     inline Index row() const { return Transpose ? m_outer : Base::index(); } | ||||
|     inline Index col() const { return Transpose ? Base::index() : m_outer; } | ||||
| 
 | ||||
|     inline Scalar value() const { return Base::value() * m_factor; } | ||||
| 
 | ||||
|   protected: | ||||
|     static Scalar get(const _RhsNested &rhs, Index outer, Dense = Dense()) | ||||
|     { | ||||
|       return rhs.coeff(outer); | ||||
|     } | ||||
|      | ||||
|     static Scalar get(const _RhsNested &rhs, Index outer, Sparse = Sparse()) | ||||
|     { | ||||
|       typename Traits::_RhsNested::InnerIterator it(rhs, outer); | ||||
|       if (it && it.index()==0) | ||||
|         return it.value(); | ||||
|        | ||||
|       return Scalar(0); | ||||
|     } | ||||
|      | ||||
|     Index m_outer; | ||||
|     Scalar m_factor; | ||||
| }; | ||||
|  |  | |||
|  | @ -940,7 +940,7 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa | |||
|   enum { IsRowMajor = SparseMatrixType::IsRowMajor }; | ||||
|   typedef typename SparseMatrixType::Scalar Scalar; | ||||
|   typedef typename SparseMatrixType::Index Index; | ||||
|   SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor> trMat(mat.rows(),mat.cols()); | ||||
|   SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,Index> trMat(mat.rows(),mat.cols()); | ||||
| 
 | ||||
|   if(begin!=end) | ||||
|   { | ||||
|  | @ -1178,7 +1178,7 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse | |||
|   size_t p = m_outerIndex[outer+1]; | ||||
|   ++m_outerIndex[outer+1]; | ||||
| 
 | ||||
|   float reallocRatio = 1; | ||||
|   double reallocRatio = 1; | ||||
|   if (m_data.allocatedSize()<=m_data.size()) | ||||
|   { | ||||
|     // if there is no preallocated memory, let's reserve a minimum of 32 elements
 | ||||
|  | @ -1190,13 +1190,13 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse | |||
|     { | ||||
|       // we need to reallocate the data, to reduce multiple reallocations
 | ||||
|       // we use a smart resize algorithm based on the current filling ratio
 | ||||
|       // in addition, we use float to avoid integers overflows
 | ||||
|       float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1); | ||||
|       reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size()); | ||||
|       // in addition, we use double to avoid integers overflows
 | ||||
|       double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1); | ||||
|       reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size()); | ||||
|       // furthermore we bound the realloc ratio to:
 | ||||
|       //   1) reduce multiple minor realloc when the matrix is almost filled
 | ||||
|       //   2) avoid to allocate too much memory when the matrix is almost empty
 | ||||
|       reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f); | ||||
|       reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.); | ||||
|     } | ||||
|   } | ||||
|   m_data.resize(m_data.size()+1,reallocRatio); | ||||
|  |  | |||
|  | @ -26,7 +26,7 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Sparse> | |||
|     inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); } | ||||
| }; | ||||
| 
 | ||||
| // NOTE: VC10 trigger an ICE if don't put typename TransposeImpl<MatrixType,Sparse>:: in front of Index,
 | ||||
| // NOTE: VC10 and VC11 trigger an ICE if don't put typename TransposeImpl<MatrixType,Sparse>:: in front of Index,
 | ||||
| // a typedef typename TransposeImpl<MatrixType,Sparse>::Index Index;
 | ||||
| // does not fix the issue.
 | ||||
| // An alternative is to define the nested class in the parent class itself.
 | ||||
|  | @ -40,8 +40,8 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::InnerItera | |||
|     EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, typename TransposeImpl<MatrixType,Sparse>::Index outer) | ||||
|       : Base(trans.derived().nestedExpression(), outer) | ||||
|     {} | ||||
|     Index row() const { return Base::col(); } | ||||
|     Index col() const { return Base::row(); } | ||||
|     typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); } | ||||
|     typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); } | ||||
| }; | ||||
| 
 | ||||
| template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::ReverseInnerIterator | ||||
|  | @ -54,8 +54,8 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::ReverseInn | |||
|     EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, typename TransposeImpl<MatrixType,Sparse>::Index outer) | ||||
|       : Base(xpr.derived().nestedExpression(), outer) | ||||
|     {} | ||||
|     Index row() const { return Base::col(); } | ||||
|     Index col() const { return Base::row(); } | ||||
|     typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); } | ||||
|     typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); } | ||||
| }; | ||||
| 
 | ||||
| } // end namespace Eigen
 | ||||
|  |  | |||
|  | @ -84,8 +84,10 @@ template<typename Lhs, typename Rhs>        class DenseTimeSparseProduct; | |||
| template<typename Lhs, typename Rhs, bool Transpose> class SparseDenseOuterProduct; | ||||
| 
 | ||||
| template<typename Lhs, typename Rhs> struct SparseSparseProductReturnType; | ||||
| template<typename Lhs, typename Rhs, int InnerSize = internal::traits<Lhs>::ColsAtCompileTime> struct DenseSparseProductReturnType; | ||||
| template<typename Lhs, typename Rhs, int InnerSize = internal::traits<Lhs>::ColsAtCompileTime> struct SparseDenseProductReturnType; | ||||
| template<typename Lhs, typename Rhs, | ||||
|          int InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(internal::traits<Lhs>::ColsAtCompileTime,internal::traits<Rhs>::RowsAtCompileTime)> struct DenseSparseProductReturnType;          | ||||
| template<typename Lhs, typename Rhs, | ||||
|          int InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(internal::traits<Lhs>::ColsAtCompileTime,internal::traits<Rhs>::RowsAtCompileTime)> struct SparseDenseProductReturnType; | ||||
| template<typename MatrixType,int UpLo> class SparseSymmetricPermutationProduct; | ||||
| 
 | ||||
| namespace internal { | ||||
|  |  | |||
|  | @ -2,7 +2,7 @@ | |||
| // for linear algebra.
 | ||||
| //
 | ||||
| // Copyright (C) 2012-2013 Desire Nuentsa <desire.nuentsa_wakam@inria.fr>
 | ||||
| // Copyright (C) 2012-2013 Gael Guennebaud <gael.guennebaud@inria.fr>
 | ||||
| // Copyright (C) 2012-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
 | ||||
| //
 | ||||
| // This Source Code Form is subject to the terms of the Mozilla
 | ||||
| // Public License v. 2.0. If a copy of the MPL was not distributed
 | ||||
|  | @ -58,6 +58,7 @@ namespace internal { | |||
|   * \tparam _OrderingType The fill-reducing ordering method. See the \link OrderingMethods_Module  | ||||
|   *  OrderingMethods \endlink module for the list of built-in and external ordering methods. | ||||
|   *  | ||||
|   * \warning The input sparse matrix A must be in compressed mode (see SparseMatrix::makeCompressed()). | ||||
|   *  | ||||
|   */ | ||||
| template<typename _MatrixType, typename _OrderingType> | ||||
|  | @ -77,10 +78,23 @@ class SparseQR | |||
|     SparseQR () : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false) | ||||
|     { } | ||||
|      | ||||
|     /** Construct a QR factorization of the matrix \a mat.
 | ||||
|       *  | ||||
|       * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()). | ||||
|       *  | ||||
|       * \sa compute() | ||||
|       */ | ||||
|     SparseQR(const MatrixType& mat) : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false) | ||||
|     { | ||||
|       compute(mat); | ||||
|     } | ||||
|      | ||||
|     /** Computes the QR factorization of the sparse matrix \a mat.
 | ||||
|       *  | ||||
|       * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()). | ||||
|       *  | ||||
|       * \sa analyzePattern(), factorize() | ||||
|       */ | ||||
|     void compute(const MatrixType& mat) | ||||
|     { | ||||
|       analyzePattern(mat); | ||||
|  | @ -166,7 +180,7 @@ class SparseQR | |||
|       y.bottomRows(y.rows()-rank).setZero(); | ||||
| 
 | ||||
|       // Apply the column permutation
 | ||||
|       if (m_perm_c.size())  dest.topRows(cols()) = colsPermutation() * y.topRows(cols()); | ||||
|       if (m_perm_c.size())  dest = colsPermutation() * y.topRows(cols()); | ||||
|       else                  dest = y.topRows(cols()); | ||||
|        | ||||
|       m_info = Success; | ||||
|  | @ -206,7 +220,7 @@ class SparseQR | |||
|      | ||||
|     /** \brief Reports whether previous computation was successful.
 | ||||
|       * | ||||
|       * \returns \c Success if computation was succesful, | ||||
|       * \returns \c Success if computation was successful, | ||||
|       *          \c NumericalIssue if the QR factorization reports a numerical problem | ||||
|       *          \c InvalidInput if the input matrix is invalid | ||||
|       * | ||||
|  | @ -255,20 +269,24 @@ class SparseQR | |||
| }; | ||||
| 
 | ||||
| /** \brief Preprocessing step of a QR factorization 
 | ||||
|   *  | ||||
|   * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()). | ||||
|   *  | ||||
|   * In this step, the fill-reducing permutation is computed and applied to the columns of A | ||||
|   * and the column elimination tree is computed as well. Only the sparcity pattern of \a mat is exploited. | ||||
|   * and the column elimination tree is computed as well. Only the sparsity pattern of \a mat is exploited. | ||||
|   *  | ||||
|   * \note In this step it is assumed that there is no empty row in the matrix \a mat. | ||||
|   */ | ||||
| template <typename MatrixType, typename OrderingType> | ||||
| void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat) | ||||
| { | ||||
|   eigen_assert(mat.isCompressed() && "SparseQR requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to SparseQR"); | ||||
|   // Compute the column fill reducing ordering
 | ||||
|   OrderingType ord;  | ||||
|   ord(mat, m_perm_c);  | ||||
|   Index n = mat.cols(); | ||||
|   Index m = mat.rows(); | ||||
|   Index diagSize = (std::min)(m,n); | ||||
|    | ||||
|   if (!m_perm_c.size()) | ||||
|   { | ||||
|  | @ -280,20 +298,20 @@ void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat) | |||
|   m_outputPerm_c = m_perm_c.inverse(); | ||||
|   internal::coletree(mat, m_etree, m_firstRowElt, m_outputPerm_c.indices().data()); | ||||
|    | ||||
|   m_R.resize(n, n); | ||||
|   m_Q.resize(m, n); | ||||
|   m_R.resize(m, n); | ||||
|   m_Q.resize(m, diagSize); | ||||
|    | ||||
|   // Allocate space for nonzero elements : rough estimation
 | ||||
|   m_R.reserve(2*mat.nonZeros()); //FIXME Get a more accurate estimation through symbolic factorization with the etree
 | ||||
|   m_Q.reserve(2*mat.nonZeros()); | ||||
|   m_hcoeffs.resize(n); | ||||
|   m_hcoeffs.resize(diagSize); | ||||
|   m_analysisIsok = true; | ||||
| } | ||||
| 
 | ||||
| /** \brief Performs the numerical QR factorization of the input matrix
 | ||||
|   *  | ||||
|   * The function SparseQR::analyzePattern(const MatrixType&) must have been called beforehand with | ||||
|   * a matrix having the same sparcity pattern than \a mat. | ||||
|   * a matrix having the same sparsity pattern than \a mat. | ||||
|   *  | ||||
|   * \param mat The sparse column-major matrix | ||||
|   */ | ||||
|  | @ -306,11 +324,12 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat) | |||
|   eigen_assert(m_analysisIsok && "analyzePattern() should be called before this step"); | ||||
|   Index m = mat.rows(); | ||||
|   Index n = mat.cols(); | ||||
|   IndexVector mark(m); mark.setConstant(-1);  // Record the visited nodes
 | ||||
|   Index diagSize = (std::min)(m,n); | ||||
|   IndexVector mark((std::max)(m,n)); mark.setConstant(-1);  // Record the visited nodes
 | ||||
|   IndexVector Ridx(n), Qidx(m);                             // Store temporarily the row indexes for the current column of R and Q
 | ||||
|   Index nzcolR, nzcolQ;                                     // Number of nonzero for the current column of R and Q
 | ||||
|   ScalarVector tval(m);                                     // The dense vector used to compute the current column
 | ||||
|   bool found_diag; | ||||
|   RealScalar pivotThreshold = m_threshold; | ||||
|      | ||||
|   m_pmat = mat; | ||||
|   m_pmat.uncompress(); // To have the innerNonZeroPtr allocated
 | ||||
|  | @ -322,7 +341,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat) | |||
|     m_pmat.innerNonZeroPtr()[p] = mat.outerIndexPtr()[i+1] - mat.outerIndexPtr()[i];  | ||||
|   } | ||||
|    | ||||
|   /* Compute the default threshold, see : 
 | ||||
|   /* Compute the default threshold as in MatLab, see:
 | ||||
|    * Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing | ||||
|    * Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3  | ||||
|    */ | ||||
|  | @ -330,24 +349,24 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat) | |||
|   { | ||||
|     RealScalar max2Norm = 0.0; | ||||
|     for (int j = 0; j < n; j++) max2Norm = (max)(max2Norm, m_pmat.col(j).norm()); | ||||
|     m_threshold = 20 * (m + n) * max2Norm * NumTraits<RealScalar>::epsilon(); | ||||
|     pivotThreshold = 20 * (m + n) * max2Norm * NumTraits<RealScalar>::epsilon(); | ||||
|   } | ||||
|    | ||||
|   // Initialize the numerical permutation
 | ||||
|   m_pivotperm.setIdentity(n); | ||||
|    | ||||
|   Index nonzeroCol = 0; // Record the number of valid pivots
 | ||||
|   m_Q.startVec(0); | ||||
|    | ||||
|   // Left looking rank-revealing QR factorization: compute a column of R and Q at a time
 | ||||
|   for (Index col = 0; col < (std::min)(n,m); ++col) | ||||
|   for (Index col = 0; col < n; ++col) | ||||
|   { | ||||
|     mark.setConstant(-1); | ||||
|     m_R.startVec(col); | ||||
|     m_Q.startVec(col); | ||||
|     mark(nonzeroCol) = col; | ||||
|     Qidx(0) = nonzeroCol; | ||||
|     nzcolR = 0; nzcolQ = 1; | ||||
|     found_diag = col>=m; | ||||
|     bool found_diag = nonzeroCol>=m; | ||||
|     tval.setZero();  | ||||
|      | ||||
|     // Symbolic factorization: find the nonzero locations of the column k of the factors R and Q, i.e.,
 | ||||
|  | @ -398,7 +417,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat) | |||
|     // Browse all the indexes of R(:,col) in reverse order
 | ||||
|     for (Index i = nzcolR-1; i >= 0; i--) | ||||
|     { | ||||
|       Index curIdx = m_pivotperm.indices()(Ridx(i)); | ||||
|       Index curIdx = Ridx(i); | ||||
|        | ||||
|       // Apply the curIdx-th householder vector to the current column (temporarily stored into tval)
 | ||||
|       Scalar tdot(0); | ||||
|  | @ -428,16 +447,18 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat) | |||
|       } | ||||
|     } // End update current column
 | ||||
|      | ||||
|     Scalar tau; | ||||
|     RealScalar beta = 0; | ||||
|      | ||||
|     if(nonzeroCol < diagSize) | ||||
|     { | ||||
|       // Compute the Householder reflection that eliminate the current column
 | ||||
|       // FIXME this step should call the Householder module.
 | ||||
|     Scalar tau; | ||||
|     RealScalar beta; | ||||
|       Scalar c0 = nzcolQ ? tval(Qidx(0)) : Scalar(0); | ||||
|        | ||||
|       // First, the squared norm of Q((col+1):m, col)
 | ||||
|       RealScalar sqrNorm = 0.; | ||||
|       for (Index itq = 1; itq < nzcolQ; ++itq) sqrNorm += numext::abs2(tval(Qidx(itq))); | ||||
|      | ||||
|       if(sqrNorm == RealScalar(0) && numext::imag(c0) == RealScalar(0)) | ||||
|       { | ||||
|         tau = RealScalar(0); | ||||
|  | @ -446,7 +467,8 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat) | |||
|       } | ||||
|       else | ||||
|       { | ||||
|       beta = std::sqrt(numext::abs2(c0) + sqrNorm); | ||||
|         using std::sqrt; | ||||
|         beta = sqrt(numext::abs2(c0) + sqrNorm); | ||||
|         if(numext::real(c0) >= RealScalar(0)) | ||||
|           beta = -beta; | ||||
|         tval(Qidx(0)) = 1; | ||||
|  | @ -455,6 +477,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat) | |||
|         tau = numext::conj((beta-c0) / beta); | ||||
|            | ||||
|       } | ||||
|     } | ||||
| 
 | ||||
|     // Insert values in R
 | ||||
|     for (Index  i = nzcolR-1; i >= 0; i--) | ||||
|  | @ -467,24 +490,25 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat) | |||
|       } | ||||
|     } | ||||
| 
 | ||||
|     if(abs(beta) >= m_threshold) | ||||
|     if(nonzeroCol < diagSize && abs(beta) >= pivotThreshold) | ||||
|     { | ||||
|       m_R.insertBackByOuterInner(col, nonzeroCol) = beta; | ||||
|       nonzeroCol++; | ||||
|       // The householder coefficient
 | ||||
|       m_hcoeffs(col) = tau; | ||||
|       m_hcoeffs(nonzeroCol) = tau; | ||||
|       // Record the householder reflections
 | ||||
|       for (Index itq = 0; itq < nzcolQ; ++itq) | ||||
|       { | ||||
|         Index iQ = Qidx(itq); | ||||
|         m_Q.insertBackByOuterInnerUnordered(col,iQ) = tval(iQ); | ||||
|         m_Q.insertBackByOuterInnerUnordered(nonzeroCol,iQ) = tval(iQ); | ||||
|         tval(iQ) = Scalar(0.); | ||||
|       } | ||||
|       nonzeroCol++; | ||||
|       if(nonzeroCol<diagSize) | ||||
|         m_Q.startVec(nonzeroCol); | ||||
|     } | ||||
|     else | ||||
|     { | ||||
|       // Zero pivot found: move implicitly this column to the end
 | ||||
|       m_hcoeffs(col) = Scalar(0); | ||||
|       for (Index j = nonzeroCol; j < n-1; j++)  | ||||
|         std::swap(m_pivotperm.indices()(j), m_pivotperm.indices()[j+1]); | ||||
|        | ||||
|  | @ -493,6 +517,8 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat) | |||
|     } | ||||
|   } | ||||
|    | ||||
|   m_hcoeffs.tail(diagSize-nonzeroCol).setZero(); | ||||
|    | ||||
|   // Finalize the column pointers of the sparse matrices R and Q
 | ||||
|   m_Q.finalize(); | ||||
|   m_Q.makeCompressed(); | ||||
|  | @ -561,14 +587,16 @@ struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived | |||
|   template<typename DesType> | ||||
|   void evalTo(DesType& res) const | ||||
|   { | ||||
|     Index m = m_qr.rows(); | ||||
|     Index n = m_qr.cols(); | ||||
|     Index diagSize = (std::min)(m,n); | ||||
|     res = m_other; | ||||
|     if (m_transpose) | ||||
|     { | ||||
|       eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes"); | ||||
|       //Compute res = Q' * other column by column
 | ||||
|       for(Index j = 0; j < res.cols(); j++){ | ||||
|         for (Index k = 0; k < n; k++) | ||||
|         for (Index k = 0; k < diagSize; k++) | ||||
|         { | ||||
|           Scalar tau = Scalar(0); | ||||
|           tau = m_qr.m_Q.col(k).dot(res.col(j)); | ||||
|  | @ -581,10 +609,10 @@ struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived | |||
|     else | ||||
|     { | ||||
|       eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes"); | ||||
|       // Compute res = Q' * other column by column
 | ||||
|       // Compute res = Q * other column by column
 | ||||
|       for(Index j = 0; j < res.cols(); j++) | ||||
|       { | ||||
|         for (Index k = n-1; k >=0; k--) | ||||
|         for (Index k = diagSize-1; k >=0; k--) | ||||
|         { | ||||
|           Scalar tau = Scalar(0); | ||||
|           tau = m_qr.m_Q.col(k).dot(res.col(j)); | ||||
|  | @ -618,7 +646,7 @@ struct SparseQRMatrixQReturnType : public EigenBase<SparseQRMatrixQReturnType<Sp | |||
|     return SparseQRMatrixQTransposeReturnType<SparseQRType>(m_qr); | ||||
|   } | ||||
|   inline Index rows() const { return m_qr.rows(); } | ||||
|   inline Index cols() const { return m_qr.cols(); } | ||||
|   inline Index cols() const { return (std::min)(m_qr.rows(),m_qr.cols()); } | ||||
|   // To use for operations with the transpose of Q
 | ||||
|   SparseQRMatrixQTransposeReturnType<SparseQRType> transpose() const | ||||
|   { | ||||
|  |  | |||
|  | @ -11,7 +11,7 @@ | |||
| #ifndef EIGEN_STDDEQUE_H | ||||
| #define EIGEN_STDDEQUE_H | ||||
| 
 | ||||
| #include "Eigen/src/StlSupport/details.h" | ||||
| #include "details.h" | ||||
| 
 | ||||
| // Define the explicit instantiation (e.g. necessary for the Intel compiler)
 | ||||
| #if defined(__INTEL_COMPILER) || defined(__GNUC__) | ||||
|  |  | |||
|  | @ -10,7 +10,7 @@ | |||
| #ifndef EIGEN_STDLIST_H | ||||
| #define EIGEN_STDLIST_H | ||||
| 
 | ||||
| #include "Eigen/src/StlSupport/details.h" | ||||
| #include "details.h" | ||||
| 
 | ||||
| // Define the explicit instantiation (e.g. necessary for the Intel compiler)
 | ||||
| #if defined(__INTEL_COMPILER) || defined(__GNUC__) | ||||
|  |  | |||
|  | @ -11,7 +11,7 @@ | |||
| #ifndef EIGEN_STDVECTOR_H | ||||
| #define EIGEN_STDVECTOR_H | ||||
| 
 | ||||
| #include "Eigen/src/StlSupport/details.h" | ||||
| #include "details.h" | ||||
| 
 | ||||
| /**
 | ||||
|  * This section contains a convenience MACRO which allows an easy specialization of | ||||
|  |  | |||
|  | @ -1,9 +1,6 @@ | |||
| 
 | ||||
| This directory contains a BLAS library built on top of Eigen. | ||||
| 
 | ||||
| This is currently a work in progress which is far to be ready for use, | ||||
| but feel free to contribute to it if you wish. | ||||
| 
 | ||||
| This module is not built by default. In order to compile it, you need to | ||||
| type 'make blas' from within your build dir. | ||||
| 
 | ||||
|  |  | |||
|  | @ -41,7 +41,7 @@ endif() | |||
| 
 | ||||
| # copy ctest properties, which currently | ||||
| # o raise the warning levels | ||||
| configure_file(${CMAKE_BINARY_DIR}/DartConfiguration.tcl ${CMAKE_BINARY_DIR}/DartConfiguration.tcl) | ||||
| configure_file(${CMAKE_CURRENT_BINARY_DIR}/DartConfiguration.tcl ${CMAKE_BINARY_DIR}/DartConfiguration.tcl) | ||||
| 
 | ||||
| # restore default CMAKE_MAKE_PROGRAM | ||||
| set(CMAKE_MAKE_PROGRAM ${CMAKE_MAKE_PROGRAM_SAVE}) | ||||
|  | @ -50,7 +50,7 @@ set(CMAKE_MAKE_PROGRAM ${CMAKE_MAKE_PROGRAM_SAVE}) | |||
| set(CMAKE_MAKE_PROGRAM_SAVE)  | ||||
| set(EIGEN_MAKECOMMAND_PLACEHOLDER) | ||||
| 
 | ||||
| configure_file(${CMAKE_SOURCE_DIR}/CTestCustom.cmake.in ${CMAKE_BINARY_DIR}/CTestCustom.cmake) | ||||
| configure_file(${CMAKE_CURRENT_SOURCE_DIR}/CTestCustom.cmake.in ${CMAKE_BINARY_DIR}/CTestCustom.cmake) | ||||
| 
 | ||||
| # some documentation of this function would be nice | ||||
| ei_init_testing() | ||||
|  |  | |||
|  | @ -41,8 +41,8 @@ MatrixXd::Ones(rows,cols)           // ones(rows,cols) | |||
| C.setOnes(rows,cols)                // C = ones(rows,cols) | ||||
| MatrixXd::Random(rows,cols)         // rand(rows,cols)*2-1        // MatrixXd::Random returns uniform random numbers in (-1, 1). | ||||
| C.setRandom(rows,cols)              // C = rand(rows,cols)*2-1 | ||||
| VectorXd::LinSpace(size,low,high)   // linspace(low,high,size)' | ||||
| v.setLinSpace(size,low,high)        // v = linspace(low,high,size)' | ||||
| VectorXd::LinSpaced(size,low,high)   // linspace(low,high,size)' | ||||
| v.setLinSpaced(size,low,high)        // v = linspace(low,high,size)' | ||||
| 
 | ||||
| 
 | ||||
| // Matrix slicing and blocks. All expressions listed here are read/write. | ||||
|  | @ -91,6 +91,8 @@ R.adjoint()                        // R' | |||
| R.transpose()                      // R.' or conj(R') | ||||
| R.diagonal()                       // diag(R) | ||||
| x.asDiagonal()                     // diag(x) | ||||
| R.transpose().colwise().reverse(); // rot90(R) | ||||
| R.conjugate()                      // conj(R) | ||||
| 
 | ||||
| // All the same as Matlab, but matlab doesn't have *= style operators. | ||||
| // Matrix-vector.  Matrix-matrix.   Matrix-scalar. | ||||
|  | @ -167,6 +169,8 @@ x.cross(y)                // cross(x, y) Requires #include <Eigen/Geometry> | |||
| A.cast<double>();                  // double(A) | ||||
| A.cast<float>();                   // single(A) | ||||
| A.cast<int>();                     // int32(A) | ||||
| A.real();                          // real(A) | ||||
| A.imag();                          // imag(A) | ||||
| // if the original type equals destination type, no work is done | ||||
| 
 | ||||
| // Note that for most operations Eigen requires all operands to have the same type: | ||||
|  |  | |||
|  | @ -1,27 +0,0 @@ | |||
| namespace Eigen { | ||||
| 
 | ||||
| /** \eigenManualPage LinearLeastSquares Solving linear least squares problems | ||||
| 
 | ||||
| lede | ||||
| 
 | ||||
| \eigenAutoToc | ||||
| 
 | ||||
| \section LinearLeastSquaresCopied Copied | ||||
| 
 | ||||
| The best way to do least squares solving is with a SVD decomposition. Eigen provides one as the JacobiSVD class, and its solve() | ||||
| is doing least-squares solving. | ||||
| 
 | ||||
| Here is an example: | ||||
| <table class="example"> | ||||
| <tr><th>Example:</th><th>Output:</th></tr> | ||||
| <tr> | ||||
|   <td>\include TutorialLinAlgSVDSolve.cpp </td> | ||||
|   <td>\verbinclude TutorialLinAlgSVDSolve.out </td> | ||||
| </tr> | ||||
| </table> | ||||
| 
 | ||||
| For more information, including faster but less reliable methods, read our page concentrating on \ref LinearLeastSquares "linear least squares problems". | ||||
| 
 | ||||
| */ | ||||
| 
 | ||||
| } | ||||
|  | @ -62,6 +62,8 @@ run time. However, these assertions do cost time and can thus be turned off. | |||
|    expect that any objects passed to it are aligned. This will turn off vectorization. Not defined by default. | ||||
|  - \b EIGEN_DONT_ALIGN_STATICALLY - disables alignment of arrays on the stack. Not defined by default, unless | ||||
|    \c EIGEN_DONT_ALIGN is defined. | ||||
|  - \b EIGEN_DONT_PARALLELIZE - if defined, this disables multi-threading. This is only relevant if you enabled OpenMP. | ||||
|    See \ref TopicMultiThreading for details. | ||||
|  - \b EIGEN_DONT_VECTORIZE - disables explicit vectorization when defined. Not defined by default, unless  | ||||
|    alignment is disabled by %Eigen's platform test or the user defining \c EIGEN_DONT_ALIGN. | ||||
|  - \b EIGEN_FAST_MATH - enables some optimizations which might affect the accuracy of the result. This currently | ||||
|  | @ -70,6 +72,9 @@ run time. However, these assertions do cost time and can thus be turned off. | |||
|  - \b EIGEN_UNROLLING_LIMIT - defines the size of a loop to enable meta unrolling. Set it to zero to disable | ||||
|    unrolling. The size of a loop here is expressed in %Eigen's own notion of "number of FLOPS", it does not | ||||
|    correspond to the number of iterations or the number of instructions. The default is value 100. | ||||
|  - \b EIGEN_STACK_ALLOCATION_LIMIT - defines the maximum bytes for a buffer to be allocated on the stack. For internal | ||||
|    temporary buffers, dynamic memory allocation is employed as a fall back. For fixed-size matrices or arrays, exceeding | ||||
|    this threshold raises a compile time assertion. Use 0 to set no limit. Default is 128 KB. | ||||
| 
 | ||||
| 
 | ||||
| \section TopicPreprocessorDirectivesPlugins Plugins | ||||
|  |  | |||
|  | @ -253,12 +253,15 @@ SparseMatrix<double> A, B; | |||
| B = SparseMatrix<double>(A.transpose()) + A; | ||||
| \endcode | ||||
| 
 | ||||
| Binary coefficient wise operators can also mix sparse and dense expressions: | ||||
| Some binary coefficient-wise operators can also mix sparse and dense expressions: | ||||
| \code | ||||
| sm2 = sm1.cwiseProduct(dm1); | ||||
| dm2 = sm1 + dm1; | ||||
| dm1 += sm1; | ||||
| \endcode | ||||
| 
 | ||||
| However, it is not yet possible to add a sparse and a dense matrix as in <tt>dm2 = sm1 + dm1</tt>. | ||||
| Please write this as the equivalent <tt>dm2 = dm1; dm2 += sm1</tt> (we plan to lift this restriction | ||||
| in the next release of %Eigen). | ||||
| 
 | ||||
| %Sparse expressions also support transposition: | ||||
| \code | ||||
|  |  | |||
|  | @ -10,6 +10,26 @@ | |||
| #define EIGEN_NO_STATIC_ASSERT // otherwise we fail at compile time on unused paths
 | ||||
| #include "main.h" | ||||
| 
 | ||||
| template<typename MatrixType, typename Index, typename Scalar> | ||||
| typename Eigen::internal::enable_if<!NumTraits<typename MatrixType::Scalar>::IsComplex,typename MatrixType::Scalar>::type | ||||
| block_real_only(const MatrixType &m1, Index r1, Index r2, Index c1, Index c2, const Scalar& s1) { | ||||
|   // check cwise-Functions:
 | ||||
|   VERIFY_IS_APPROX(m1.row(r1).cwiseMax(s1), m1.cwiseMax(s1).row(r1)); | ||||
|   VERIFY_IS_APPROX(m1.col(c1).cwiseMin(s1), m1.cwiseMin(s1).col(c1)); | ||||
| 
 | ||||
|   VERIFY_IS_APPROX(m1.block(r1,c1,r2-r1+1,c2-c1+1).cwiseMin(s1), m1.cwiseMin(s1).block(r1,c1,r2-r1+1,c2-c1+1)); | ||||
|   VERIFY_IS_APPROX(m1.block(r1,c1,r2-r1+1,c2-c1+1).cwiseMax(s1), m1.cwiseMax(s1).block(r1,c1,r2-r1+1,c2-c1+1)); | ||||
|    | ||||
|   return Scalar(0); | ||||
| } | ||||
| 
 | ||||
| template<typename MatrixType, typename Index, typename Scalar> | ||||
| typename Eigen::internal::enable_if<NumTraits<typename MatrixType::Scalar>::IsComplex,typename MatrixType::Scalar>::type | ||||
| block_real_only(const MatrixType &, Index, Index, Index, Index, const Scalar&) { | ||||
|   return Scalar(0); | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| template<typename MatrixType> void block(const MatrixType& m) | ||||
| { | ||||
|   typedef typename MatrixType::Index Index; | ||||
|  | @ -37,6 +57,8 @@ template<typename MatrixType> void block(const MatrixType& m) | |||
|   Index c1 = internal::random<Index>(0,cols-1); | ||||
|   Index c2 = internal::random<Index>(c1,cols-1); | ||||
| 
 | ||||
|   block_real_only(m1, r1, r2, c1, c1, s1); | ||||
| 
 | ||||
|   //check row() and col()
 | ||||
|   VERIFY_IS_EQUAL(m1.col(c1).transpose(), m1.transpose().row(c1)); | ||||
|   //check operator(), both constant and non-constant, on row() and col()
 | ||||
|  | @ -52,6 +74,7 @@ template<typename MatrixType> void block(const MatrixType& m) | |||
|   m1.col(c1).col(0) += s1 * m1_copy.col(c2); | ||||
|   VERIFY_IS_APPROX(m1.col(c1), m1_copy.col(c1) + Scalar(2) * s1 * m1_copy.col(c2)); | ||||
|    | ||||
|    | ||||
|   //check block()
 | ||||
|   Matrix<Scalar,Dynamic,Dynamic> b1(1,1); b1(0,0) = m1(r1,c1); | ||||
| 
 | ||||
|  |  | |||
|  | @ -68,6 +68,7 @@ template<typename MatrixType> void cholesky(const MatrixType& m) | |||
|   Index cols = m.cols(); | ||||
| 
 | ||||
|   typedef typename MatrixType::Scalar Scalar; | ||||
|   typedef typename NumTraits<Scalar>::Real RealScalar; | ||||
|   typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime> SquareMatrixType; | ||||
|   typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType; | ||||
| 
 | ||||
|  | @ -179,6 +180,57 @@ template<typename MatrixType> void cholesky(const MatrixType& m) | |||
|     // restore
 | ||||
|     if(sign == -1) | ||||
|       symm = -symm; | ||||
| 
 | ||||
|     // check matrices coming from linear constraints with Lagrange multipliers
 | ||||
|     if(rows>=3) | ||||
|     { | ||||
|       SquareMatrixType A = symm; | ||||
|       int c = internal::random<int>(0,rows-2); | ||||
|       A.bottomRightCorner(c,c).setZero(); | ||||
|       // Make sure a solution exists:
 | ||||
|       vecX.setRandom(); | ||||
|       vecB = A * vecX; | ||||
|       vecX.setZero(); | ||||
|       ldltlo.compute(A); | ||||
|       VERIFY_IS_APPROX(A, ldltlo.reconstructedMatrix()); | ||||
|       vecX = ldltlo.solve(vecB); | ||||
|       VERIFY_IS_APPROX(A * vecX, vecB); | ||||
|     } | ||||
|      | ||||
|     // check non-full rank matrices
 | ||||
|     if(rows>=3) | ||||
|     { | ||||
|       int r = internal::random<int>(1,rows-1); | ||||
|       Matrix<Scalar,Dynamic,Dynamic> a = Matrix<Scalar,Dynamic,Dynamic>::Random(rows,r); | ||||
|       SquareMatrixType A = a * a.adjoint(); | ||||
|       // Make sure a solution exists:
 | ||||
|       vecX.setRandom(); | ||||
|       vecB = A * vecX; | ||||
|       vecX.setZero(); | ||||
|       ldltlo.compute(A); | ||||
|       VERIFY_IS_APPROX(A, ldltlo.reconstructedMatrix()); | ||||
|       vecX = ldltlo.solve(vecB); | ||||
|       VERIFY_IS_APPROX(A * vecX, vecB); | ||||
|     } | ||||
|      | ||||
|     // check matrices with a wide spectrum
 | ||||
|     if(rows>=3) | ||||
|     { | ||||
|       RealScalar s = (std::min)(16,std::numeric_limits<RealScalar>::max_exponent10/8); | ||||
|       Matrix<Scalar,Dynamic,Dynamic> a = Matrix<Scalar,Dynamic,Dynamic>::Random(rows,rows); | ||||
|       Matrix<RealScalar,Dynamic,1> d =  Matrix<RealScalar,Dynamic,1>::Random(rows); | ||||
|       for(int k=0; k<rows; ++k) | ||||
|         d(k) = d(k)*std::pow(RealScalar(10),internal::random<RealScalar>(-s,s)); | ||||
|       SquareMatrixType A = a * d.asDiagonal() * a.adjoint(); | ||||
|       // Make sure a solution exists:
 | ||||
|       vecX.setRandom(); | ||||
|       vecB = A * vecX; | ||||
|       vecX.setZero(); | ||||
|       ldltlo.compute(A); | ||||
|       VERIFY_IS_APPROX(A, ldltlo.reconstructedMatrix()); | ||||
|       vecX = ldltlo.solve(vecB); | ||||
|       VERIFY_IS_APPROX(A * vecX, vecB); | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   // update/downdate
 | ||||
|  |  | |||
|  | @ -53,7 +53,7 @@ void check_aligned_new() | |||
| 
 | ||||
| void check_aligned_stack_alloc() | ||||
| { | ||||
|   for(int i = 1; i < 1000; i++) | ||||
|   for(int i = 1; i < 400; i++) | ||||
|   { | ||||
|     ei_declare_aligned_stack_constructed_variable(float,p,i,0); | ||||
|     VERIFY(size_t(p)%ALIGNMENT==0); | ||||
|  | @ -87,6 +87,32 @@ template<typename T> void check_dynaligned() | |||
|   delete obj; | ||||
| } | ||||
| 
 | ||||
| template<typename T> void check_custom_new_delete() | ||||
| { | ||||
|   { | ||||
|     T* t = new T; | ||||
|     delete t; | ||||
|   } | ||||
|    | ||||
|   { | ||||
|     std::size_t N = internal::random<std::size_t>(1,10); | ||||
|     T* t = new T[N]; | ||||
|     delete[] t; | ||||
|   } | ||||
|    | ||||
| #ifdef EIGEN_ALIGN | ||||
|   { | ||||
|     T* t = static_cast<T *>((T::operator new)(sizeof(T))); | ||||
|     (T::operator delete)(t, sizeof(T)); | ||||
|   } | ||||
|    | ||||
|   { | ||||
|     T* t = static_cast<T *>((T::operator new)(sizeof(T))); | ||||
|     (T::operator delete)(t); | ||||
|   } | ||||
| #endif | ||||
| } | ||||
| 
 | ||||
| void test_dynalloc() | ||||
| { | ||||
|   // low level dynamic memory allocation
 | ||||
|  | @ -102,6 +128,12 @@ void test_dynalloc() | |||
|     CALL_SUBTEST(check_dynaligned<Matrix4f>() ); | ||||
|     CALL_SUBTEST(check_dynaligned<Vector4d>() ); | ||||
|     CALL_SUBTEST(check_dynaligned<Vector4i>() ); | ||||
|     CALL_SUBTEST(check_dynaligned<Vector8f>() ); | ||||
| 
 | ||||
|     CALL_SUBTEST( check_custom_new_delete<Vector4f>() ); | ||||
|     CALL_SUBTEST( check_custom_new_delete<Vector2f>() ); | ||||
|     CALL_SUBTEST( check_custom_new_delete<Matrix4f>() ); | ||||
|     CALL_SUBTEST( check_custom_new_delete<MatrixXi>() ); | ||||
|   } | ||||
|    | ||||
|   // check static allocation, who knows ?
 | ||||
|  |  | |||
|  | @ -67,6 +67,7 @@ template<typename MatrixType, int QRPreconditioner> | |||
| void jacobisvd_solve(const MatrixType& m, unsigned int computationOptions) | ||||
| { | ||||
|   typedef typename MatrixType::Scalar Scalar; | ||||
|   typedef typename MatrixType::RealScalar RealScalar; | ||||
|   typedef typename MatrixType::Index Index; | ||||
|   Index rows = m.rows(); | ||||
|   Index cols = m.cols(); | ||||
|  | @ -81,20 +82,100 @@ void jacobisvd_solve(const MatrixType& m, unsigned int computationOptions) | |||
| 
 | ||||
|   RhsType rhs = RhsType::Random(rows, internal::random<Index>(1, cols)); | ||||
|   JacobiSVD<MatrixType, QRPreconditioner> svd(m, computationOptions); | ||||
| 
 | ||||
|        if(internal::is_same<RealScalar,double>::value) svd.setThreshold(1e-8); | ||||
|   else if(internal::is_same<RealScalar,float>::value)  svd.setThreshold(1e-4); | ||||
|    | ||||
|   SolutionType x = svd.solve(rhs); | ||||
|    | ||||
|   RealScalar residual = (m*x-rhs).norm(); | ||||
|   // Check that there is no significantly better solution in the neighborhood of x
 | ||||
|   if(!test_isMuchSmallerThan(residual,rhs.norm())) | ||||
|   { | ||||
|     // If the residual is very small, then we have an exact solution, so we are already good.
 | ||||
|     for(int k=0;k<x.rows();++k) | ||||
|     { | ||||
|       SolutionType y(x); | ||||
|       y.row(k).array() += 2*NumTraits<RealScalar>::epsilon(); | ||||
|       RealScalar residual_y = (m*y-rhs).norm(); | ||||
|       VERIFY( test_isApprox(residual_y,residual) || residual < residual_y ); | ||||
|        | ||||
|       y.row(k) = x.row(k).array() - 2*NumTraits<RealScalar>::epsilon(); | ||||
|       residual_y = (m*y-rhs).norm(); | ||||
|       VERIFY( test_isApprox(residual_y,residual) || residual < residual_y ); | ||||
|     } | ||||
|   } | ||||
|    | ||||
|   // evaluate normal equation which works also for least-squares solutions
 | ||||
|   if(internal::is_same<RealScalar,double>::value) | ||||
|   { | ||||
|     // This test is not stable with single precision.
 | ||||
|     // This is probably because squaring m signicantly affects the precision.
 | ||||
|     VERIFY_IS_APPROX(m.adjoint()*m*x,m.adjoint()*rhs); | ||||
|   } | ||||
|    | ||||
|   // check minimal norm solutions
 | ||||
|   { | ||||
|     // generate a full-rank m x n problem with m<n
 | ||||
|     enum { | ||||
|       RankAtCompileTime2 = ColsAtCompileTime==Dynamic ? Dynamic : (ColsAtCompileTime)/2+1, | ||||
|       RowsAtCompileTime3 = ColsAtCompileTime==Dynamic ? Dynamic : ColsAtCompileTime+1 | ||||
|     }; | ||||
|     typedef Matrix<Scalar, RankAtCompileTime2, ColsAtCompileTime> MatrixType2; | ||||
|     typedef Matrix<Scalar, RankAtCompileTime2, 1> RhsType2; | ||||
|     typedef Matrix<Scalar, ColsAtCompileTime, RankAtCompileTime2> MatrixType2T; | ||||
|     Index rank = RankAtCompileTime2==Dynamic ? internal::random<Index>(1,cols) : Index(RankAtCompileTime2); | ||||
|     MatrixType2 m2(rank,cols); | ||||
|     int guard = 0; | ||||
|     do { | ||||
|       m2.setRandom(); | ||||
|     } while(m2.jacobiSvd().setThreshold(test_precision<Scalar>()).rank()!=rank && (++guard)<10); | ||||
|     VERIFY(guard<10); | ||||
|     RhsType2 rhs2 = RhsType2::Random(rank); | ||||
|     // use QR to find a reference minimal norm solution
 | ||||
|     HouseholderQR<MatrixType2T> qr(m2.adjoint()); | ||||
|     Matrix<Scalar,Dynamic,1> tmp = qr.matrixQR().topLeftCorner(rank,rank).template triangularView<Upper>().adjoint().solve(rhs2); | ||||
|     tmp.conservativeResize(cols); | ||||
|     tmp.tail(cols-rank).setZero(); | ||||
|     SolutionType x21 = qr.householderQ() * tmp; | ||||
|     // now check with SVD
 | ||||
|     JacobiSVD<MatrixType2, ColPivHouseholderQRPreconditioner> svd2(m2, computationOptions); | ||||
|     SolutionType x22 = svd2.solve(rhs2); | ||||
|     VERIFY_IS_APPROX(m2*x21, rhs2); | ||||
|     VERIFY_IS_APPROX(m2*x22, rhs2); | ||||
|     VERIFY_IS_APPROX(x21, x22); | ||||
|      | ||||
|     // Now check with a rank deficient matrix
 | ||||
|     typedef Matrix<Scalar, RowsAtCompileTime3, ColsAtCompileTime> MatrixType3; | ||||
|     typedef Matrix<Scalar, RowsAtCompileTime3, 1> RhsType3; | ||||
|     Index rows3 = RowsAtCompileTime3==Dynamic ? internal::random<Index>(rank+1,2*cols) : Index(RowsAtCompileTime3); | ||||
|     Matrix<Scalar,RowsAtCompileTime3,Dynamic> C = Matrix<Scalar,RowsAtCompileTime3,Dynamic>::Random(rows3,rank); | ||||
|     MatrixType3 m3 = C * m2; | ||||
|     RhsType3 rhs3 = C * rhs2; | ||||
|     JacobiSVD<MatrixType3, ColPivHouseholderQRPreconditioner> svd3(m3, computationOptions); | ||||
|     SolutionType x3 = svd3.solve(rhs3); | ||||
|     if(svd3.rank()!=rank) { | ||||
|       std::cout << m3 << "\n\n"; | ||||
|       std::cout << svd3.singularValues().transpose() << "\n"; | ||||
|     std::cout << svd3.rank() << " == " << rank << "\n"; | ||||
|     std::cout << x21.norm() << " == " << x3.norm() << "\n"; | ||||
|     } | ||||
| //     VERIFY_IS_APPROX(m3*x3, rhs3);
 | ||||
|     VERIFY_IS_APPROX(m3*x21, rhs3); | ||||
|     VERIFY_IS_APPROX(m2*x3, rhs2); | ||||
|      | ||||
|     VERIFY_IS_APPROX(x21, x3); | ||||
|   } | ||||
| } | ||||
| 
 | ||||
| template<typename MatrixType, int QRPreconditioner> | ||||
| void jacobisvd_test_all_computation_options(const MatrixType& m) | ||||
| { | ||||
|   if (QRPreconditioner == NoQRPreconditioner && m.rows() != m.cols()) | ||||
|     return; | ||||
|   JacobiSVD<MatrixType, QRPreconditioner> fullSvd(m, ComputeFullU|ComputeFullV); | ||||
| 
 | ||||
|   jacobisvd_check_full(m, fullSvd); | ||||
|   jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeFullU | ComputeFullV); | ||||
|   CALL_SUBTEST(( jacobisvd_check_full(m, fullSvd) )); | ||||
|   CALL_SUBTEST(( jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeFullU | ComputeFullV) )); | ||||
|    | ||||
|   #if defined __INTEL_COMPILER | ||||
|   // remark #111: statement is unreachable
 | ||||
|  | @ -103,20 +184,20 @@ void jacobisvd_test_all_computation_options(const MatrixType& m) | |||
|   if(QRPreconditioner == FullPivHouseholderQRPreconditioner) | ||||
|     return; | ||||
| 
 | ||||
|   jacobisvd_compare_to_full(m, ComputeFullU, fullSvd); | ||||
|   jacobisvd_compare_to_full(m, ComputeFullV, fullSvd); | ||||
|   jacobisvd_compare_to_full(m, 0, fullSvd); | ||||
|   CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeFullU, fullSvd) )); | ||||
|   CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeFullV, fullSvd) )); | ||||
|   CALL_SUBTEST(( jacobisvd_compare_to_full(m, 0, fullSvd) )); | ||||
| 
 | ||||
|   if (MatrixType::ColsAtCompileTime == Dynamic) { | ||||
|     // thin U/V are only available with dynamic number of columns
 | ||||
|     jacobisvd_compare_to_full(m, ComputeFullU|ComputeThinV, fullSvd); | ||||
|     jacobisvd_compare_to_full(m,              ComputeThinV, fullSvd); | ||||
|     jacobisvd_compare_to_full(m, ComputeThinU|ComputeFullV, fullSvd); | ||||
|     jacobisvd_compare_to_full(m, ComputeThinU             , fullSvd); | ||||
|     jacobisvd_compare_to_full(m, ComputeThinU|ComputeThinV, fullSvd); | ||||
|     jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeFullU | ComputeThinV); | ||||
|     jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeThinU | ComputeFullV); | ||||
|     jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeThinU | ComputeThinV); | ||||
|     CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeFullU|ComputeThinV, fullSvd) )); | ||||
|     CALL_SUBTEST(( jacobisvd_compare_to_full(m,              ComputeThinV, fullSvd) )); | ||||
|     CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeThinU|ComputeFullV, fullSvd) )); | ||||
|     CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeThinU             , fullSvd) )); | ||||
|     CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeThinU|ComputeThinV, fullSvd) )); | ||||
|     CALL_SUBTEST(( jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeFullU | ComputeThinV) )); | ||||
|     CALL_SUBTEST(( jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeThinU | ComputeFullV) )); | ||||
|     CALL_SUBTEST(( jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeThinU | ComputeThinV) )); | ||||
| 
 | ||||
|     // test reconstruction
 | ||||
|     typedef typename MatrixType::Index Index; | ||||
|  | @ -129,12 +210,29 @@ void jacobisvd_test_all_computation_options(const MatrixType& m) | |||
| template<typename MatrixType> | ||||
| void jacobisvd(const MatrixType& a = MatrixType(), bool pickrandom = true) | ||||
| { | ||||
|   MatrixType m = pickrandom ? MatrixType::Random(a.rows(), a.cols()) : a; | ||||
|   MatrixType m = a; | ||||
|   if(pickrandom) | ||||
|   { | ||||
|     typedef typename MatrixType::Scalar Scalar; | ||||
|     typedef typename MatrixType::RealScalar RealScalar; | ||||
|     typedef typename MatrixType::Index Index; | ||||
|     Index diagSize = (std::min)(a.rows(), a.cols()); | ||||
|     RealScalar s = std::numeric_limits<RealScalar>::max_exponent10/4; | ||||
|     s = internal::random<RealScalar>(1,s); | ||||
|     Matrix<RealScalar,Dynamic,1> d =  Matrix<RealScalar,Dynamic,1>::Random(diagSize); | ||||
|     for(Index k=0; k<diagSize; ++k) | ||||
|       d(k) = d(k)*std::pow(RealScalar(10),internal::random<RealScalar>(-s,s)); | ||||
|     m = Matrix<Scalar,Dynamic,Dynamic>::Random(a.rows(),diagSize) * d.asDiagonal() * Matrix<Scalar,Dynamic,Dynamic>::Random(diagSize,a.cols()); | ||||
|     // cancel some coeffs
 | ||||
|     Index n  = internal::random<Index>(0,m.size()-1); | ||||
|     for(Index i=0; i<n; ++i) | ||||
|       m(internal::random<Index>(0,m.rows()-1), internal::random<Index>(0,m.cols()-1)) = Scalar(0); | ||||
|   } | ||||
| 
 | ||||
|   jacobisvd_test_all_computation_options<MatrixType, FullPivHouseholderQRPreconditioner>(m); | ||||
|   jacobisvd_test_all_computation_options<MatrixType, ColPivHouseholderQRPreconditioner>(m); | ||||
|   jacobisvd_test_all_computation_options<MatrixType, HouseholderQRPreconditioner>(m); | ||||
|   jacobisvd_test_all_computation_options<MatrixType, NoQRPreconditioner>(m); | ||||
|   CALL_SUBTEST(( jacobisvd_test_all_computation_options<MatrixType, FullPivHouseholderQRPreconditioner>(m) )); | ||||
|   CALL_SUBTEST(( jacobisvd_test_all_computation_options<MatrixType, ColPivHouseholderQRPreconditioner>(m) )); | ||||
|   CALL_SUBTEST(( jacobisvd_test_all_computation_options<MatrixType, HouseholderQRPreconditioner>(m) )); | ||||
|   CALL_SUBTEST(( jacobisvd_test_all_computation_options<MatrixType, NoQRPreconditioner>(m) )); | ||||
| } | ||||
| 
 | ||||
| template<typename MatrixType> void jacobisvd_verify_assert(const MatrixType& m) | ||||
|  | @ -328,6 +426,7 @@ void test_jacobisvd() | |||
|     TEST_SET_BUT_UNUSED_VARIABLE(r) | ||||
|     TEST_SET_BUT_UNUSED_VARIABLE(c) | ||||
|      | ||||
|     CALL_SUBTEST_10(( jacobisvd<MatrixXd>(MatrixXd(r,c)) )); | ||||
|     CALL_SUBTEST_7(( jacobisvd<MatrixXf>(MatrixXf(r,c)) )); | ||||
|     CALL_SUBTEST_8(( jacobisvd<MatrixXcd>(MatrixXcd(r,c)) )); | ||||
|     (void) r; | ||||
|  |  | |||
|  | @ -154,59 +154,79 @@ template<typename PlainObjectType> void check_const_correctness(const PlainObjec | |||
|   VERIFY( !(Ref<ConstPlainObjectType, Aligned>::Flags & LvalueBit) ); | ||||
| } | ||||
| 
 | ||||
| EIGEN_DONT_INLINE void call_ref_1(Ref<VectorXf> ) { } | ||||
| EIGEN_DONT_INLINE void call_ref_2(const Ref<const VectorXf>& ) { } | ||||
| EIGEN_DONT_INLINE void call_ref_3(Ref<VectorXf,0,InnerStride<> > ) { } | ||||
| EIGEN_DONT_INLINE void call_ref_4(const Ref<const VectorXf,0,InnerStride<> >& ) { } | ||||
| EIGEN_DONT_INLINE void call_ref_5(Ref<MatrixXf,0,OuterStride<> > ) { } | ||||
| EIGEN_DONT_INLINE void call_ref_6(const Ref<const MatrixXf,0,OuterStride<> >& ) { } | ||||
| template<typename B> | ||||
| EIGEN_DONT_INLINE void call_ref_1(Ref<VectorXf> a, const B &b) { VERIFY_IS_EQUAL(a,b); } | ||||
| template<typename B> | ||||
| EIGEN_DONT_INLINE void call_ref_2(const Ref<const VectorXf>& a, const B &b) { VERIFY_IS_EQUAL(a,b); } | ||||
| template<typename B> | ||||
| EIGEN_DONT_INLINE void call_ref_3(Ref<VectorXf,0,InnerStride<> > a, const B &b) { VERIFY_IS_EQUAL(a,b); } | ||||
| template<typename B> | ||||
| EIGEN_DONT_INLINE void call_ref_4(const Ref<const VectorXf,0,InnerStride<> >& a, const B &b) { VERIFY_IS_EQUAL(a,b); } | ||||
| template<typename B> | ||||
| EIGEN_DONT_INLINE void call_ref_5(Ref<MatrixXf,0,OuterStride<> > a, const B &b) { VERIFY_IS_EQUAL(a,b); } | ||||
| template<typename B> | ||||
| EIGEN_DONT_INLINE void call_ref_6(const Ref<const MatrixXf,0,OuterStride<> >& a, const B &b) { VERIFY_IS_EQUAL(a,b); } | ||||
| template<typename B> | ||||
| EIGEN_DONT_INLINE void call_ref_7(Ref<Matrix<float,Dynamic,3> > a, const B &b) { VERIFY_IS_EQUAL(a,b); } | ||||
| 
 | ||||
| void call_ref() | ||||
| { | ||||
|   VectorXcf ca(10); | ||||
|   VectorXf a(10); | ||||
|   VectorXcf ca  = VectorXcf::Random(10); | ||||
|   VectorXf a    = VectorXf::Random(10); | ||||
|   RowVectorXf b = RowVectorXf::Random(10); | ||||
|   MatrixXf A    = MatrixXf::Random(10,10); | ||||
|   RowVector3f c = RowVector3f::Random(); | ||||
|   const VectorXf& ac(a); | ||||
|   VectorBlock<VectorXf> ab(a,0,3); | ||||
|   MatrixXf A(10,10); | ||||
|   const VectorBlock<VectorXf> abc(a,0,3); | ||||
|    | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_1(a), 0); | ||||
| 
 | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_1(a,a), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_1(b,b.transpose()), 0); | ||||
| //   call_ref_1(ac);           // does not compile because ac is const
 | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_1(ab), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_1(a.head(4)), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_1(abc), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_1(A.col(3)), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_1(ab,ab), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_1(a.head(4),a.head(4)), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_1(abc,abc), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_1(A.col(3),A.col(3)), 0); | ||||
| //   call_ref_1(A.row(3));    // does not compile because innerstride!=1
 | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_3(A.row(3)), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_4(A.row(3)), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_3(A.row(3),A.row(3).transpose()), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_4(A.row(3),A.row(3).transpose()), 0); | ||||
| //   call_ref_1(a+a);          // does not compile for obvious reason
 | ||||
| 
 | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_2(A*A.col(1)), 1);     // evaluated into a temp
 | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_2(ac.head(5)), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_2(ac), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_2(a), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_2(ab), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_2(a.head(4)), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_2(a+a), 1);            // evaluated into a temp
 | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_2(ca.imag()), 1);      // evaluated into a temp
 | ||||
|   MatrixXf tmp = A*A.col(1); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_2(A*A.col(1), tmp), 1);     // evaluated into a temp
 | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_2(ac.head(5),ac.head(5)), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_2(ac,ac), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_2(a,a), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_2(ab,ab), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_2(a.head(4),a.head(4)), 0); | ||||
|   tmp = a+a; | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_2(a+a,tmp), 1);            // evaluated into a temp
 | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_2(ca.imag(),ca.imag()), 1);      // evaluated into a temp
 | ||||
| 
 | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_4(ac.head(5)), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_4(a+a), 1);           // evaluated into a temp
 | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_4(ca.imag()), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_4(ac.head(5),ac.head(5)), 0); | ||||
|   tmp = a+a; | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_4(a+a,tmp), 1);           // evaluated into a temp
 | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_4(ca.imag(),ca.imag()), 0); | ||||
| 
 | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_5(a), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_5(a.head(3)), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_5(A), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_5(a,a), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_5(a.head(3),a.head(3)), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_5(A,A), 0); | ||||
| //   call_ref_5(A.transpose());   // does not compile
 | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_5(A.block(1,1,2,2)), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_5(A.block(1,1,2,2),A.block(1,1,2,2)), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_5(b,b), 0);             // storage order do not match, but this is a degenerate case that should work
 | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_5(a.row(3),a.row(3)), 0); | ||||
| 
 | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_6(a), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_6(a.head(3)), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_6(A.row(3)), 1);           // evaluated into a temp thouth it could be avoided by viewing it as a 1xn matrix
 | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_6(A+A), 1);                // evaluated into a temp
 | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_6(A), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_6(A.transpose()), 1);      // evaluated into a temp because the storage orders do not match
 | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_6(A.block(1,1,2,2)), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_6(a,a), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_6(a.head(3),a.head(3)), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_6(A.row(3),A.row(3)), 1);           // evaluated into a temp thouth it could be avoided by viewing it as a 1xn matrix
 | ||||
|   tmp = A+A; | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_6(A+A,tmp), 1);                // evaluated into a temp
 | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_6(A,A), 0); | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_6(A.transpose(),A.transpose()), 1);      // evaluated into a temp because the storage orders do not match
 | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_6(A.block(1,1,2,2),A.block(1,1,2,2)), 0); | ||||
|    | ||||
|   VERIFY_EVALUATION_COUNT( call_ref_7(c,c), 0); | ||||
| } | ||||
| 
 | ||||
| void test_ref() | ||||
|  |  | |||
|  | @ -11,26 +11,31 @@ | |||
| 
 | ||||
| template<typename T> void test_simplicial_cholesky_T() | ||||
| { | ||||
|   SimplicialCholesky<SparseMatrix<T>, Lower> chol_colmajor_lower; | ||||
|   SimplicialCholesky<SparseMatrix<T>, Upper> chol_colmajor_upper; | ||||
|   SimplicialLLT<SparseMatrix<T>, Lower> llt_colmajor_lower; | ||||
|   SimplicialLDLT<SparseMatrix<T>, Upper> llt_colmajor_upper; | ||||
|   SimplicialLDLT<SparseMatrix<T>, Lower> ldlt_colmajor_lower; | ||||
|   SimplicialLDLT<SparseMatrix<T>, Upper> ldlt_colmajor_upper; | ||||
|   SimplicialCholesky<SparseMatrix<T>, Lower> chol_colmajor_lower_amd; | ||||
|   SimplicialCholesky<SparseMatrix<T>, Upper> chol_colmajor_upper_amd; | ||||
|   SimplicialLLT<SparseMatrix<T>, Lower> llt_colmajor_lower_amd; | ||||
|   SimplicialLLT<SparseMatrix<T>, Upper> llt_colmajor_upper_amd; | ||||
|   SimplicialLDLT<SparseMatrix<T>, Lower> ldlt_colmajor_lower_amd; | ||||
|   SimplicialLDLT<SparseMatrix<T>, Upper> ldlt_colmajor_upper_amd; | ||||
|   SimplicialLDLT<SparseMatrix<T>, Lower, NaturalOrdering<int> > ldlt_colmajor_lower_nat; | ||||
|   SimplicialLDLT<SparseMatrix<T>, Upper, NaturalOrdering<int> > ldlt_colmajor_upper_nat; | ||||
| 
 | ||||
|   check_sparse_spd_solving(chol_colmajor_lower); | ||||
|   check_sparse_spd_solving(chol_colmajor_upper); | ||||
|   check_sparse_spd_solving(llt_colmajor_lower); | ||||
|   check_sparse_spd_solving(llt_colmajor_upper); | ||||
|   check_sparse_spd_solving(ldlt_colmajor_lower); | ||||
|   check_sparse_spd_solving(ldlt_colmajor_upper); | ||||
|   check_sparse_spd_solving(chol_colmajor_lower_amd); | ||||
|   check_sparse_spd_solving(chol_colmajor_upper_amd); | ||||
|   check_sparse_spd_solving(llt_colmajor_lower_amd); | ||||
|   check_sparse_spd_solving(llt_colmajor_upper_amd); | ||||
|   check_sparse_spd_solving(ldlt_colmajor_lower_amd); | ||||
|   check_sparse_spd_solving(ldlt_colmajor_upper_amd); | ||||
|    | ||||
|   check_sparse_spd_determinant(chol_colmajor_lower); | ||||
|   check_sparse_spd_determinant(chol_colmajor_upper); | ||||
|   check_sparse_spd_determinant(llt_colmajor_lower); | ||||
|   check_sparse_spd_determinant(llt_colmajor_upper); | ||||
|   check_sparse_spd_determinant(ldlt_colmajor_lower); | ||||
|   check_sparse_spd_determinant(ldlt_colmajor_upper); | ||||
|   check_sparse_spd_determinant(chol_colmajor_lower_amd); | ||||
|   check_sparse_spd_determinant(chol_colmajor_upper_amd); | ||||
|   check_sparse_spd_determinant(llt_colmajor_lower_amd); | ||||
|   check_sparse_spd_determinant(llt_colmajor_upper_amd); | ||||
|   check_sparse_spd_determinant(ldlt_colmajor_lower_amd); | ||||
|   check_sparse_spd_determinant(ldlt_colmajor_upper_amd); | ||||
|    | ||||
|   check_sparse_spd_solving(ldlt_colmajor_lower_nat); | ||||
|   check_sparse_spd_solving(ldlt_colmajor_upper_nat); | ||||
| } | ||||
| 
 | ||||
| void test_simplicial_cholesky() | ||||
|  |  | |||
|  | @ -2,24 +2,24 @@ | |||
| // for linear algebra.
 | ||||
| //
 | ||||
| // Copyright (C) 2012 Desire Nuentsa Wakam <desire.nuentsa_wakam@inria.fr>
 | ||||
| // Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>
 | ||||
| //
 | ||||
| // This Source Code Form is subject to the terms of the Mozilla
 | ||||
| // Public License v. 2.0. If a copy of the MPL was not distributed
 | ||||
| #include "sparse.h" | ||||
| #include <Eigen/SparseQR> | ||||
| 
 | ||||
| 
 | ||||
| template<typename MatrixType,typename DenseMat> | ||||
| int generate_sparse_rectangular_problem(MatrixType& A, DenseMat& dA, int maxRows = 300, int maxCols = 300) | ||||
| int generate_sparse_rectangular_problem(MatrixType& A, DenseMat& dA, int maxRows = 300, int maxCols = 150) | ||||
| { | ||||
|   eigen_assert(maxRows >= maxCols); | ||||
|   typedef typename MatrixType::Scalar Scalar; | ||||
|   int rows = internal::random<int>(1,maxRows); | ||||
|   int cols = internal::random<int>(1,rows); | ||||
|   int cols = internal::random<int>(1,maxCols); | ||||
|   double density = (std::max)(8./(rows*cols), 0.01); | ||||
|    | ||||
|   A.resize(rows,rows); | ||||
|   dA.resize(rows,rows); | ||||
|   A.resize(rows,cols); | ||||
|   dA.resize(rows,cols); | ||||
|   initSparse<Scalar>(density, dA, A,ForceNonZeroDiag); | ||||
|   A.makeCompressed(); | ||||
|   int nop = internal::random<int>(0, internal::random<double>(0,1) > 0.5 ? cols/2 : 0); | ||||
|  | @ -31,6 +31,13 @@ int generate_sparse_rectangular_problem(MatrixType& A, DenseMat& dA, int maxRows | |||
|     A.col(j0)  = s * A.col(j1); | ||||
|     dA.col(j0) = s * dA.col(j1); | ||||
|   } | ||||
|    | ||||
| //   if(rows<cols) {
 | ||||
| //     A.conservativeResize(cols,cols);
 | ||||
| //     dA.conservativeResize(cols,cols);
 | ||||
| //     dA.bottomRows(cols-rows).setZero();
 | ||||
| //   }
 | ||||
|    | ||||
|   return rows; | ||||
| } | ||||
| 
 | ||||
|  | @ -42,11 +49,10 @@ template<typename Scalar> void test_sparseqr_scalar() | |||
|   MatrixType A; | ||||
|   DenseMat dA; | ||||
|   DenseVector refX,x,b;  | ||||
|   SparseQR<MatrixType, AMDOrdering<int> > solver;  | ||||
|   SparseQR<MatrixType, COLAMDOrdering<int> > solver;  | ||||
|   generate_sparse_rectangular_problem(A,dA); | ||||
|    | ||||
|   int n = A.cols(); | ||||
|   b = DenseVector::Random(n); | ||||
|   b = dA * DenseVector::Random(A.cols()); | ||||
|   solver.compute(A); | ||||
|   if (solver.info() != Success) | ||||
|   { | ||||
|  | @ -61,16 +67,18 @@ template<typename Scalar> void test_sparseqr_scalar() | |||
|     exit(0); | ||||
|     return; | ||||
|   } | ||||
|    | ||||
|   VERIFY_IS_APPROX(A * x, b); | ||||
|    | ||||
|   //Compare with a dense QR solver
 | ||||
|   ColPivHouseholderQR<DenseMat> dqr(dA); | ||||
|   refX = dqr.solve(b); | ||||
|    | ||||
|   VERIFY_IS_EQUAL(dqr.rank(), solver.rank()); | ||||
|    | ||||
|   if(solver.rank()<A.cols()) | ||||
|     VERIFY((dA * refX - b).norm() * 2 > (A * x - b).norm() ); | ||||
|   else | ||||
|   if(solver.rank()==A.cols()) // full rank
 | ||||
|     VERIFY_IS_APPROX(x, refX); | ||||
| //   else
 | ||||
| //     VERIFY((dA * refX - b).norm() * 2 > (A * x - b).norm() );
 | ||||
| 
 | ||||
|   // Compute explicitly the matrix Q
 | ||||
|   MatrixType Q, QtQ, idM; | ||||
|  | @ -88,3 +96,4 @@ void test_sparseqr() | |||
|     CALL_SUBTEST_2(test_sparseqr_scalar<std::complex<double> >()); | ||||
|   } | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -2,7 +2,7 @@ | |||
| // for linear algebra.
 | ||||
| //
 | ||||
| // Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
 | ||||
| // Copyright (C) 2012 Kolja Brix <brix@igpm.rwth-aaachen.de>
 | ||||
| // Copyright (C) 2012, 2014 Kolja Brix <brix@igpm.rwth-aaachen.de>
 | ||||
| //
 | ||||
| // This Source Code Form is subject to the terms of the Mozilla
 | ||||
| // Public License v. 2.0. If a copy of the MPL was not distributed
 | ||||
|  | @ -72,16 +72,20 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition | |||
| 
 | ||||
| 	VectorType p0 = rhs - mat*x; | ||||
| 	VectorType r0 = precond.solve(p0); | ||||
| // 	RealScalar r0_sqnorm = r0.squaredNorm();
 | ||||
|   | ||||
| 	// is initial guess already good enough?
 | ||||
| 	if(abs(r0.norm()) < tol) { | ||||
| 		return true;  | ||||
| 	} | ||||
| 
 | ||||
| 	VectorType w = VectorType::Zero(restart + 1); | ||||
| 
 | ||||
| 	FMatrixType H = FMatrixType::Zero(m, restart + 1); | ||||
| 	FMatrixType H = FMatrixType::Zero(m, restart + 1); // Hessenberg matrix
 | ||||
| 	VectorType tau = VectorType::Zero(restart + 1); | ||||
| 	std::vector < JacobiRotation < Scalar > > G(restart); | ||||
| 
 | ||||
| 	// generate first Householder vector
 | ||||
| 	VectorType e; | ||||
| 	VectorType e(m-1); | ||||
| 	RealScalar beta; | ||||
| 	r0.makeHouseholder(e, tau.coeffRef(0), beta); | ||||
| 	w(0)=(Scalar) beta; | ||||
|  |  | |||
|  | @ -127,46 +127,47 @@ template<typename Func> void forward_jacobian(const Func& f) | |||
|     VERIFY_IS_APPROX(j, jref); | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| // TODO also check actual derivatives!
 | ||||
| void test_autodiff_scalar() | ||||
| { | ||||
|   std::cerr << foo<float>(1,2) << "\n"; | ||||
|   Vector2f p = Vector2f::Random(); | ||||
|   typedef AutoDiffScalar<Vector2f> AD; | ||||
|   AD ax(1,Vector2f::UnitX()); | ||||
|   AD ay(2,Vector2f::UnitY()); | ||||
|   AD ax(p.x(),Vector2f::UnitX()); | ||||
|   AD ay(p.y(),Vector2f::UnitY()); | ||||
|   AD res = foo<AD>(ax,ay); | ||||
|   std::cerr << res.value() << " <> " | ||||
|             << res.derivatives().transpose() << "\n\n"; | ||||
|   VERIFY_IS_APPROX(res.value(), foo(p.x(),p.y())); | ||||
| } | ||||
| 
 | ||||
| // TODO also check actual derivatives!
 | ||||
| void test_autodiff_vector() | ||||
| { | ||||
|   std::cerr << foo<Vector2f>(Vector2f(1,2)) << "\n"; | ||||
|   Vector2f p = Vector2f::Random(); | ||||
|   typedef AutoDiffScalar<Vector2f> AD; | ||||
|   typedef Matrix<AD,2,1> VectorAD; | ||||
|   VectorAD p(AD(1),AD(-1)); | ||||
|   p.x().derivatives() = Vector2f::UnitX(); | ||||
|   p.y().derivatives() = Vector2f::UnitY(); | ||||
|   VectorAD ap = p.cast<AD>(); | ||||
|   ap.x().derivatives() = Vector2f::UnitX(); | ||||
|   ap.y().derivatives() = Vector2f::UnitY(); | ||||
|    | ||||
|   AD res = foo<VectorAD>(p); | ||||
|   std::cerr << res.value() << " <> " | ||||
|             << res.derivatives().transpose() << "\n\n"; | ||||
|   AD res = foo<VectorAD>(ap); | ||||
|   VERIFY_IS_APPROX(res.value(), foo(p)); | ||||
| } | ||||
| 
 | ||||
| void test_autodiff_jacobian() | ||||
| { | ||||
|   for(int i = 0; i < g_repeat; i++) { | ||||
|   CALL_SUBTEST(( forward_jacobian(TestFunc1<double,2,2>()) )); | ||||
|   CALL_SUBTEST(( forward_jacobian(TestFunc1<double,2,3>()) )); | ||||
|   CALL_SUBTEST(( forward_jacobian(TestFunc1<double,3,2>()) )); | ||||
|   CALL_SUBTEST(( forward_jacobian(TestFunc1<double,3,3>()) )); | ||||
|   CALL_SUBTEST(( forward_jacobian(TestFunc1<double>(3,3)) )); | ||||
| } | ||||
| } | ||||
| 
 | ||||
| void test_autodiff() | ||||
| { | ||||
|     test_autodiff_scalar(); | ||||
|     test_autodiff_vector(); | ||||
| //     test_autodiff_jacobian();
 | ||||
|   for(int i = 0; i < g_repeat; i++) { | ||||
|     CALL_SUBTEST_1( test_autodiff_scalar() ); | ||||
|     CALL_SUBTEST_2( test_autodiff_vector() ); | ||||
|     CALL_SUBTEST_3( test_autodiff_jacobian() ); | ||||
|   } | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -17,6 +17,10 @@ | |||
| 
 | ||||
| #pragma once | ||||
| 
 | ||||
| #ifndef MKL_BLAS | ||||
| #define MKL_BLAS MKL_DOMAIN_BLAS | ||||
| #endif | ||||
| 
 | ||||
| #cmakedefine EIGEN_USE_MKL_ALL // This is also defined in config.h
 | ||||
| #include <@GTSAM_EIGEN_INCLUDE_PREFIX@Eigen/Dense> | ||||
| #include <@GTSAM_EIGEN_INCLUDE_PREFIX@Eigen/QR> | ||||
|  |  | |||
|  | @ -9,7 +9,9 @@ if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") | |||
|   endif() | ||||
| endif() | ||||
| 
 | ||||
| add_definitions(-Wno-unknown-pragmas) | ||||
| if(NOT ("${CMAKE_C_COMPILER_ID}" MATCHES "MSVC" OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "MSVC")) | ||||
|   #add_definitions(-Wno-unknown-pragmas) | ||||
| endif() | ||||
| 
 | ||||
| if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") | ||||
|   if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.6 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 4.6) | ||||
|  |  | |||
|  | @ -59,9 +59,10 @@ typedef ptrdiff_t ssize_t; | |||
| #endif | ||||
| 
 | ||||
| #ifdef __MSC__ | ||||
| #if(_MSC_VER < 1700) | ||||
| /* MSC does not have rint() function */ | ||||
| #define rint(x) ((int)((x)+0.5))   | ||||
| 
 | ||||
| #endif | ||||
| /* MSC does not have INFINITY defined */ | ||||
| #ifndef INFINITY | ||||
| #define INFINITY FLT_MAX | ||||
|  |  | |||
|  | @ -16,8 +16,9 @@ | |||
|  */ | ||||
| 
 | ||||
| #pragma once | ||||
| #include <boost/make_shared.hpp> | ||||
| 
 | ||||
| #include <gtsam/base/Value.h> | ||||
| #include <boost/make_shared.hpp> | ||||
| 
 | ||||
| //////////////////
 | ||||
| // The following includes windows.h in some MSVC versions, so we undef min, max, and ERROR
 | ||||
|  |  | |||
|  | @ -19,9 +19,9 @@ | |||
| 
 | ||||
| #include <cstdarg> | ||||
| 
 | ||||
| #include <gtsam/base/DerivedValue.h> | ||||
| #include <gtsam/base/Lie.h> | ||||
| #include <gtsam/base/Matrix.h> | ||||
| #include <gtsam/base/DerivedValue.h> | ||||
| #include <boost/serialization/nvp.hpp> | ||||
| 
 | ||||
| namespace gtsam { | ||||
|  | @ -40,9 +40,12 @@ struct LieMatrix : public Matrix, public DerivedValue<LieMatrix> { | |||
|   /** initialize from a normal matrix */ | ||||
|   LieMatrix(const Matrix& v) : Matrix(v) {} | ||||
| 
 | ||||
| // Currently TMP constructor causes ICE on MSVS 2013
 | ||||
| #if (_MSC_VER < 1800) | ||||
|   /** initialize from a fixed size normal vector */ | ||||
|   template<int M, int N> | ||||
|   LieMatrix(const Eigen::Matrix<double, M, N>& v) : Matrix(v) {} | ||||
| #endif | ||||
| 
 | ||||
|   /** constructor with size and initial data, row order ! */ | ||||
|   LieMatrix(size_t m, size_t n, const double* const data) : | ||||
|  | @ -82,6 +85,7 @@ struct LieMatrix : public Matrix, public DerivedValue<LieMatrix> { | |||
|   inline LieMatrix retract(const Vector& v) const { | ||||
|     if(v.size() != this->size()) | ||||
|       throw std::invalid_argument("LieMatrix::retract called with Vector of incorrect size"); | ||||
| 
 | ||||
|     return LieMatrix(*this + | ||||
|       Eigen::Map<const Eigen::Matrix<double,Eigen::Dynamic,Eigen::Dynamic,Eigen::RowMajor> >( | ||||
|       &v(0), this->rows(), this->cols())); | ||||
|  |  | |||
|  | @ -34,9 +34,12 @@ struct LieVector : public Vector, public DerivedValue<LieVector> { | |||
|   /** initialize from a normal vector */ | ||||
|   LieVector(const Vector& v) : Vector(v) {} | ||||
|    | ||||
| // Currently TMP constructor causes ICE on MSVS 2013
 | ||||
| #if (_MSC_VER < 1800) | ||||
|   /** initialize from a fixed size normal vector */ | ||||
|   template<int N> | ||||
|   LieVector(const Eigen::Matrix<double, N, 1>& v) : Vector(v) {} | ||||
| #endif | ||||
| 
 | ||||
|   /** wrap a double */ | ||||
|   LieVector(double d) : Vector((Vector(1) << d)) {} | ||||
|  |  | |||
|  | @ -467,7 +467,7 @@ GTSAM_EXPORT Matrix Cayley(const Matrix& A); | |||
| /// Implementation of Cayley transform using fixed size matrices to let
 | ||||
| /// Eigen do more optimization
 | ||||
| template<int N> | ||||
| Eigen::Matrix<double, N, N> Cayley(const Eigen::Matrix<double, N, N>& A) { | ||||
| Eigen::Matrix<double, N, N> CayleyFixed(const Eigen::Matrix<double, N, N>& A) { | ||||
|   typedef Eigen::Matrix<double, N, N> FMat; | ||||
|   return (FMat::Identity() - A)*(FMat::Identity() + A).inverse(); | ||||
| } | ||||
|  |  | |||
|  | @ -36,18 +36,19 @@ namespace gtsam { | |||
|    * Values can operate generically on Value objects, retracting or computing | ||||
|    * local coordinates for many Value objects of different types. | ||||
|    * | ||||
|    * When you implement retract_(), localCoordinates_(), and equals_(), we | ||||
|    * suggest first implementing versions of these functions that work directly | ||||
|    * with derived objects, then using the provided helper functions to | ||||
|    * implement the generic Value versions.  This makes your implementation | ||||
|    * easier, and also improves performance in situations where the derived type | ||||
|    * is in fact known, such as in most implementations of \c evaluateError() in | ||||
|    * classes derived from NonlinearFactor. | ||||
|    * Inhereting from the DerivedValue class templated provides a generic implementation of | ||||
|    * the pure virtual functions retract_(), localCoordinates_(), and equals_(), eliminating | ||||
|    * the need to implement these functions in your class. Note that you must inheret from  | ||||
|    * DerivedValue templated on the class you are defining. For example you cannot define | ||||
|    * the following | ||||
|    * \code | ||||
|    * class Rot3 : public DerivedValue<Point3>{ \\classdef } | ||||
|    * \endcode | ||||
|    * | ||||
|    * Using the above practice, here is an example of implementing a typical | ||||
|    * class derived from Value: | ||||
|    * \code | ||||
|      class Rot3 : public Value { | ||||
|      class GTSAM_EXPORT Rot3 : public DerivedValue<Rot3> { | ||||
|      public: | ||||
|        // Constructor, there is never a need to call the Value base class constructor.
 | ||||
|        Rot3() { ... } | ||||
|  | @ -74,27 +75,6 @@ namespace gtsam { | |||
|          // Math to implement 3D rotation localCoordinates, e.g. logarithm map
 | ||||
|          return Vector(result); | ||||
|        } | ||||
| 
 | ||||
|        // Equals implementing the generic Value interface (virtual, implements Value::equals_())
 | ||||
|        virtual bool equals_(const Value& other, double tol = 1e-9) const { | ||||
|          // Call our provided helper function to call your Rot3-specific
 | ||||
|          // equals with appropriate casting.
 | ||||
|          return CallDerivedEquals(this, other, tol); | ||||
|        } | ||||
| 
 | ||||
|        // retract implementing the generic Value interface (virtual, implements Value::retract_())
 | ||||
|        virtual std::auto_ptr<Value> retract_(const Vector& delta) const { | ||||
|          // Call our provided helper function to call your Rot3-specific
 | ||||
|          // retract and do the appropriate casting and allocation.
 | ||||
|          return CallDerivedRetract(this, delta); | ||||
|        } | ||||
| 
 | ||||
|        // localCoordinates implementing the generic Value interface (virtual, implements Value::localCoordinates_())
 | ||||
|        virtual Vector localCoordinates_(const Value& value) const { | ||||
|          // Call our provided helper function to call your Rot3-specific
 | ||||
|          // localCoordinates and do the appropriate casting.
 | ||||
|          return CallDerivedLocalCoordinates(this, value); | ||||
|        } | ||||
|      }; | ||||
|      \endcode | ||||
|    */ | ||||
|  |  | |||
|  | @ -30,55 +30,11 @@ | |||
| 
 | ||||
| #include <gtsam/base/Vector.h> | ||||
| 
 | ||||
| //#ifdef WIN32
 | ||||
| //#include <Windows.h>
 | ||||
| //#endif
 | ||||
| 
 | ||||
| using namespace std; | ||||
| 
 | ||||
| namespace gtsam { | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| void odprintf_(const char *format, ostream& stream, ...) { | ||||
|   char buf[4096], *p = buf; | ||||
| 
 | ||||
|   va_list args; | ||||
|   va_start(args, stream); | ||||
| #ifdef WIN32 | ||||
|   _vsnprintf(p, sizeof buf - 3, format, args); // buf-3 is room for CR/LF/NUL
 | ||||
| #else | ||||
|   vsnprintf(p, sizeof buf - 3, format, args); // buf-3 is room for CR/LF/NUL
 | ||||
| #endif | ||||
|   va_end(args); | ||||
| 
 | ||||
| //#ifdef WIN32
 | ||||
| //  OutputDebugString(buf);
 | ||||
| //#else
 | ||||
|   stream << buf; | ||||
| //#endif
 | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| 
 | ||||
| void odprintf(const char *format, ...) { | ||||
|   char buf[4096], *p = buf; | ||||
| 
 | ||||
|   va_list args; | ||||
|   va_start(args, format); | ||||
| #ifdef WIN32 | ||||
|   _vsnprintf(p, sizeof buf - 3, format, args); // buf-3 is room for CR/LF/NUL
 | ||||
| #else | ||||
|   vsnprintf(p, sizeof buf - 3, format, args); // buf-3 is room for CR/LF/NUL
 | ||||
| #endif | ||||
|   va_end(args); | ||||
| 
 | ||||
| //#ifdef WIN32
 | ||||
| //  OutputDebugString(buf);
 | ||||
| //#else
 | ||||
|   cout << buf; | ||||
| //#endif
 | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| bool zero(const Vector& v) { | ||||
|   bool result = true; | ||||
|  | @ -101,10 +57,12 @@ Vector delta(size_t n, size_t i, double value) { | |||
| /* ************************************************************************* */ | ||||
| void print(const Vector& v, const string& s, ostream& stream) { | ||||
|   size_t n = v.size(); | ||||
|   odprintf_("%s [", stream, s.c_str()); | ||||
|   for(size_t i=0; i<n; i++) | ||||
|     odprintf_("%g%s", stream, v[i], (i<n-1 ? "; " : "")); | ||||
|   odprintf_("];\n", stream); | ||||
| 
 | ||||
|   stream << s << "["; | ||||
|   for(size_t i=0; i<n; i++) { | ||||
|       stream << setprecision(9) << v(i) << (i<n-1 ? "; " : ""); | ||||
|   } | ||||
|   stream << "];" << endl; | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
|  |  | |||
|  | @ -41,11 +41,6 @@ typedef Eigen::Matrix<double, 6, 1> Vector6; | |||
| typedef Eigen::VectorBlock<Vector> SubVector; | ||||
| typedef Eigen::VectorBlock<const Vector> ConstSubVector; | ||||
| 
 | ||||
| /**
 | ||||
|  * An auxiliary function to printf for Win32 compatibility, added by Kai | ||||
|  */ | ||||
| GTSAM_EXPORT void odprintf(const char *format, ...); | ||||
| 
 | ||||
| /**
 | ||||
|  * Create vector initialized to a constant value | ||||
|  * @param n is the size of the vector | ||||
|  |  | |||
|  | @ -1127,6 +1127,12 @@ TEST( matrix, svd2 ) | |||
| 
 | ||||
|   svd(sampleA, U, s, V); | ||||
| 
 | ||||
|   // take care of sign ambiguity
 | ||||
|   if (U(0, 1) > 0) { | ||||
|     U = -U; | ||||
|     V = -V; | ||||
|   } | ||||
| 
 | ||||
|   EXPECT(assert_equal(expectedU,U)); | ||||
|   EXPECT(assert_equal(expected_s,s,1e-9)); | ||||
|   EXPECT(assert_equal(expectedV,V)); | ||||
|  | @ -1143,6 +1149,13 @@ TEST( matrix, svd3 ) | |||
|   Matrix expectedV = (Matrix(3, 2) << 0.,1.,0.,0.,-1.,0.); | ||||
| 
 | ||||
|   svd(sampleAt, U, s, V); | ||||
| 
 | ||||
|   // take care of sign ambiguity
 | ||||
|   if (U(0, 0) > 0) { | ||||
|     U = -U; | ||||
|     V = -V; | ||||
|   } | ||||
| 
 | ||||
|   Matrix S = diag(s); | ||||
|   Matrix t = U * S; | ||||
|   Matrix Vt = trans(V); | ||||
|  | @ -1176,6 +1189,17 @@ TEST( matrix, svd4 ) | |||
|      0.6723,   0.7403); | ||||
| 
 | ||||
|   svd(A, U, s, V); | ||||
| 
 | ||||
|   // take care of sign ambiguity
 | ||||
|   if (U(0, 0) < 0) { | ||||
|     U.col(0) = -U.col(0); | ||||
|     V.col(0) = -V.col(0); | ||||
|   } | ||||
|   if (U(0, 1) < 0) { | ||||
|     U.col(1) = -U.col(1); | ||||
|     V.col(1) = -V.col(1); | ||||
|   } | ||||
| 
 | ||||
|   Matrix reconstructed = U * diag(s) * trans(V); | ||||
| 
 | ||||
|   EXPECT(assert_equal(A, reconstructed, 1e-4)); | ||||
|  |  | |||
|  | @ -165,6 +165,16 @@ public: | |||
|    */ | ||||
|   Vector3 calibrate(const Vector3& p) const; | ||||
| 
 | ||||
|   /// "Between", subtracts calibrations. between(p,q) == compose(inverse(p),q)
 | ||||
|   inline Cal3_S2 between(const Cal3_S2& q, | ||||
|       boost::optional<Matrix&> H1=boost::none, | ||||
|       boost::optional<Matrix&> H2=boost::none) const { | ||||
|     if(H1) *H1 = -eye(5); | ||||
|     if(H2) *H2 = eye(5); | ||||
|     return Cal3_S2(q.fx_-fx_, q.fy_-fy_, q.s_-s_, q.u0_-u0_, q.v0_-v0_); | ||||
|   } | ||||
| 
 | ||||
| 
 | ||||
|   /// @}
 | ||||
|   /// @name Manifold
 | ||||
|   /// @{
 | ||||
|  |  | |||
|  | @ -240,7 +240,7 @@ Rot3 Rot3::retract(const Vector& omega, Rot3::CoordinatesMode mode) const { | |||
|     return retractCayley(omega); | ||||
|   } else if(mode == Rot3::SLOW_CAYLEY) { | ||||
|     Matrix Omega = skewSymmetric(omega); | ||||
|     return (*this)*Cayley<3>(-Omega/2); | ||||
|     return (*this)*CayleyFixed<3>(-Omega/2); | ||||
|   } else { | ||||
|     assert(false); | ||||
|     exit(1); | ||||
|  | @ -269,7 +269,7 @@ Vector3 Rot3::localCoordinates(const Rot3& T, Rot3::CoordinatesMode mode) const | |||
|     // Create a fixed-size matrix
 | ||||
|     Eigen::Matrix3d A(between(T).matrix()); | ||||
|     // using templated version of Cayley
 | ||||
|     Eigen::Matrix3d Omega = Cayley<3>(A); | ||||
|     Eigen::Matrix3d Omega = CayleyFixed<3>(A); | ||||
|     return -2*Vector3(Omega(2,1),Omega(0,2),Omega(1,0)); | ||||
|   } else { | ||||
|     assert(false); | ||||
|  |  | |||
|  | @ -93,6 +93,17 @@ TEST( Cal3_S2, retract) | |||
|   CHECK(assert_equal(d,K.localCoordinates(actual),1e-7)); | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST(Cal3_S2, between) { | ||||
|   Cal3_S2 k1(5, 5, 5, 5, 5), k2(5, 6, 7, 8, 9); | ||||
|   Matrix H1, H2; | ||||
| 
 | ||||
|   EXPECT(assert_equal(Cal3_S2(0,1,2,3,4), k1.between(k2, H1, H2))); | ||||
|   EXPECT(assert_equal(-eye(5), H1)); | ||||
|   EXPECT(assert_equal(eye(5), H2)); | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| int main() { | ||||
|   TestResult tr; | ||||
|  |  | |||
|  | @ -173,6 +173,12 @@ TEST (EssentialMatrix, epipoles) { | |||
|   Vector S; | ||||
|   gtsam::svd(E.matrix(), U, S, V); | ||||
| 
 | ||||
|   // take care of SVD sign ambiguity
 | ||||
|   if (U(0, 2) > 0) { | ||||
|     U = -U; | ||||
|     V = -V; | ||||
|   } | ||||
| 
 | ||||
|   // check rank 2 constraint
 | ||||
|   CHECK(fabs(S(2))<1e-10); | ||||
| 
 | ||||
|  | @ -182,8 +188,15 @@ TEST (EssentialMatrix, epipoles) { | |||
|   // Check epipoles
 | ||||
| 
 | ||||
|   // Epipole in image 1 is just E.direction()
 | ||||
|   Unit3 e1(U(0, 2), U(1, 2), U(2, 2)); | ||||
|   EXPECT(assert_equal(e1, E.epipole_a())); | ||||
|   Unit3 e1(-U(0, 2), -U(1, 2), -U(2, 2)); | ||||
|   Unit3 actual = E.epipole_a(); | ||||
|   EXPECT(assert_equal(e1, actual)); | ||||
| 
 | ||||
|   // take care of SVD sign ambiguity
 | ||||
|   if (V(0, 2) < 0) { | ||||
|     U = -U; | ||||
|     V = -V; | ||||
|   } | ||||
| 
 | ||||
|   // Epipole in image 2 is E.rotation().unrotate(E.direction())
 | ||||
|   Unit3 e2(V(0, 2), V(1, 2), V(2, 2)); | ||||
|  |  | |||
|  | @ -268,7 +268,7 @@ TEST( triangulation, TriangulationFactor ) { | |||
|   Key pointKey(1); | ||||
|   SharedNoiseModel model; | ||||
|   typedef TriangulationFactor<> Factor; | ||||
|   Factor factor(camera1, z1, model, pointKey, sharedCal); | ||||
|   Factor factor(camera1, z1, model, pointKey); | ||||
| 
 | ||||
|   // Use the factor to calculate the Jacobians
 | ||||
|   Matrix HActual; | ||||
|  |  | |||
|  | @ -38,7 +38,7 @@ Errors::Errors(const VectorValues& V) { | |||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| void Errors::print(const std::string& s) const { | ||||
|   odprintf("%s:\n", s.c_str()); | ||||
|   cout << s << endl; | ||||
|   BOOST_FOREACH(const Vector& v, *this) | ||||
|     gtsam::print(v); | ||||
| } | ||||
|  |  | |||
|  | @ -19,10 +19,6 @@ | |||
| #pragma once | ||||
| 
 | ||||
| #include <gtsam/linear/linearExceptions.h> | ||||
| #include <boost/assign/list_of.hpp> | ||||
| #include <boost/range/adaptor/transformed.hpp> | ||||
| #include <boost/range/algorithm/for_each.hpp> | ||||
| #include <boost/foreach.hpp> | ||||
| 
 | ||||
| namespace gtsam { | ||||
| 
 | ||||
|  | @ -59,20 +55,10 @@ namespace gtsam { | |||
|     model_ = model; | ||||
|   } | ||||
| 
 | ||||
|   /* ************************************************************************* */ | ||||
|   namespace internal { | ||||
|     static inline DenseIndex getColsJF(const std::pair<Key,Matrix>& p) { | ||||
|       return p.second.cols(); | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   /* ************************************************************************* */ | ||||
|   template<typename TERMS> | ||||
|   void JacobianFactor::fillTerms(const TERMS& terms, const Vector& b, const SharedDiagonal& noiseModel) | ||||
|   { | ||||
|     using boost::adaptors::transformed; | ||||
|     namespace br = boost::range; | ||||
| 
 | ||||
|     // Check noise model dimension
 | ||||
|     if(noiseModel && (DenseIndex)noiseModel->dim() != b.size()) | ||||
|       throw InvalidNoiseModel(b.size(), noiseModel->dim()); | ||||
|  | @ -80,25 +66,32 @@ namespace gtsam { | |||
|     // Resize base class key vector
 | ||||
|     Base::keys_.resize(terms.size()); | ||||
| 
 | ||||
|     // Construct block matrix - this uses the boost::range 'transformed' construct to apply
 | ||||
|     // internal::getColsJF (defined just above here in this file) to each term.  This
 | ||||
|     // transforms the list of terms into a list of variable dimensions, by extracting the
 | ||||
|     // number of columns in each matrix.  This is done to avoid separately allocating an
 | ||||
|     // array of dimensions before constructing the VerticalBlockMatrix.
 | ||||
|     Ab_ = VerticalBlockMatrix(terms | transformed(&internal::getColsJF), b.size(), true); | ||||
|     // Get dimensions of matrices
 | ||||
|     std::vector<size_t> dimensions; | ||||
|     dimensions.reserve(terms.size()); | ||||
|     for(typename TERMS::const_iterator it = terms.begin(); it != terms.end(); ++it) { | ||||
|       const std::pair<Key, Matrix>& term = *it; | ||||
|       const Matrix& Ai = term.second; | ||||
|       dimensions.push_back(Ai.cols()); | ||||
|     } | ||||
| 
 | ||||
|     // Construct block matrix
 | ||||
|     Ab_ = VerticalBlockMatrix(dimensions, b.size(), true); | ||||
| 
 | ||||
|     // Check and add terms
 | ||||
|     DenseIndex i = 0; // For block index
 | ||||
|     for(typename TERMS::const_iterator termIt = terms.begin(); termIt != terms.end(); ++termIt) { | ||||
|       const std::pair<Key, Matrix>& term = *termIt; | ||||
|     for(typename TERMS::const_iterator it = terms.begin(); it != terms.end(); ++it) { | ||||
|       const std::pair<Key, Matrix>& term = *it; | ||||
|       Key key = term.first; | ||||
|       const Matrix& Ai = term.second; | ||||
| 
 | ||||
|       // Check block rows
 | ||||
|       if(term.second.rows() != Ab_.rows()) | ||||
|         throw InvalidMatrixBlock(Ab_.rows(), term.second.rows()); | ||||
|       if(Ai.rows() != Ab_.rows()) | ||||
|         throw InvalidMatrixBlock(Ab_.rows(), Ai.rows()); | ||||
| 
 | ||||
|       // Assign key and matrix
 | ||||
|       Base::keys_[i] = term.first; | ||||
|       Ab_(i) = term.second; | ||||
|       Base::keys_[i] = key; | ||||
|       Ab_(i) = Ai; | ||||
| 
 | ||||
|       // Increment block index
 | ||||
|       ++ i; | ||||
|  |  | |||
|  | @ -15,6 +15,17 @@ | |||
|  * @author  Frank Dellaert | ||||
|  */ | ||||
| 
 | ||||
| #include <gtsam/linear/GaussianBayesNet.h> | ||||
| #include <gtsam/linear/JacobianFactor.h> | ||||
| #include <gtsam/linear/GaussianFactorGraph.h> | ||||
| #include <gtsam/base/Testable.h> | ||||
| #include <gtsam/base/LieVector.h> | ||||
| #include <gtsam/base/numericalDerivative.h> | ||||
| 
 | ||||
| #include <boost/assign/list_of.hpp> | ||||
| #include <boost/assign/std/list.hpp> // for operator += | ||||
| using namespace boost::assign; | ||||
| 
 | ||||
| // STL/C++
 | ||||
| #include <iostream> | ||||
| #include <sstream> | ||||
|  | @ -22,16 +33,6 @@ | |||
| #include <boost/tuple/tuple.hpp> | ||||
| #include <boost/foreach.hpp> | ||||
| 
 | ||||
| #include <boost/assign/std/list.hpp> // for operator += | ||||
| using namespace boost::assign; | ||||
| 
 | ||||
| #include <gtsam/base/Testable.h> | ||||
| #include <gtsam/base/LieVector.h> | ||||
| #include <gtsam/base/numericalDerivative.h> | ||||
| #include <gtsam/linear/GaussianBayesNet.h> | ||||
| #include <gtsam/linear/JacobianFactor.h> | ||||
| #include <gtsam/linear/GaussianFactorGraph.h> | ||||
| 
 | ||||
| using namespace std; | ||||
| using namespace gtsam; | ||||
| 
 | ||||
|  |  | |||
|  | @ -25,13 +25,15 @@ | |||
| #include <gtsam/linear/GaussianConditional.h> | ||||
| #include <gtsam/linear/GaussianBayesNet.h> | ||||
| 
 | ||||
| #include <iostream> | ||||
| #include <sstream> | ||||
| #include <vector> | ||||
| #include <boost/assign/std/list.hpp> | ||||
| #include <boost/assign/std/vector.hpp> | ||||
| #include <boost/assign/list_inserter.hpp> | ||||
| #include <boost/make_shared.hpp> | ||||
| #include <boost/assign/list_of.hpp> | ||||
| 
 | ||||
| #include <iostream> | ||||
| #include <sstream> | ||||
| #include <vector> | ||||
| 
 | ||||
| using namespace gtsam; | ||||
| using namespace std; | ||||
|  |  | |||
|  | @ -18,20 +18,21 @@ | |||
|  *  @author Richard Roberts | ||||
|  **/ | ||||
| 
 | ||||
| #include <gtsam/linear/GaussianFactorGraph.h> | ||||
| #include <gtsam/linear/GaussianConditional.h> | ||||
| #include <gtsam/linear/GaussianBayesNet.h> | ||||
| #include <gtsam/inference/VariableSlots.h> | ||||
| #include <gtsam/inference/VariableIndex.h> | ||||
| #include <gtsam/base/debug.h> | ||||
| #include <gtsam/base/VerticalBlockMatrix.h> | ||||
| 
 | ||||
| #include <boost/assign/list_of.hpp> | ||||
| #include <boost/assign/std/list.hpp> // for operator += | ||||
| using namespace boost::assign; | ||||
| 
 | ||||
| #include <gtsam/base/TestableAssertions.h> | ||||
| #include <CppUnitLite/TestHarness.h> | ||||
| 
 | ||||
| #include <gtsam/base/debug.h> | ||||
| #include <gtsam/base/VerticalBlockMatrix.h> | ||||
| #include <gtsam/inference/VariableSlots.h> | ||||
| #include <gtsam/inference/VariableIndex.h> | ||||
| #include <gtsam/linear/GaussianFactorGraph.h> | ||||
| #include <gtsam/linear/GaussianConditional.h> | ||||
| #include <gtsam/linear/GaussianBayesNet.h> | ||||
| 
 | ||||
| using namespace std; | ||||
| using namespace gtsam; | ||||
| 
 | ||||
|  |  | |||
|  | @ -15,24 +15,25 @@ | |||
|  * @date    Dec 15, 2010 | ||||
|  */ | ||||
| 
 | ||||
| #include <vector> | ||||
| #include <utility> | ||||
| 
 | ||||
| #include <boost/assign/std/vector.hpp> | ||||
| #include <boost/assign/std/map.hpp> | ||||
| 
 | ||||
| #include <gtsam/base/debug.h> | ||||
| #include <gtsam/linear/HessianFactor.h> | ||||
| #include <gtsam/linear/JacobianFactor.h> | ||||
| #include <gtsam/linear/GaussianFactorGraph.h> | ||||
| #include <gtsam/linear/GaussianConditional.h> | ||||
| #include <gtsam/linear/VectorValues.h> | ||||
| 
 | ||||
| #include <gtsam/base/debug.h> | ||||
| #include <gtsam/base/TestableAssertions.h> | ||||
| 
 | ||||
| #include <CppUnitLite/TestHarness.h> | ||||
| 
 | ||||
| using namespace std; | ||||
| #include <boost/assign/list_of.hpp> | ||||
| #include <boost/assign/std/vector.hpp> | ||||
| #include <boost/assign/std/map.hpp> | ||||
| using namespace boost::assign; | ||||
| 
 | ||||
| #include <vector> | ||||
| #include <utility> | ||||
| 
 | ||||
| using namespace std; | ||||
| using namespace gtsam; | ||||
| 
 | ||||
| const double tol = 1e-5; | ||||
|  |  | |||
|  | @ -105,6 +105,24 @@ TEST(JacobianFactor, constructors_and_accessors) | |||
|     EXPECT(noise == expected.get_model()); | ||||
|     EXPECT(noise == actual.get_model()); | ||||
|   } | ||||
|   { | ||||
|     // Test three-term constructor with std::map
 | ||||
|     JacobianFactor expected( | ||||
|       boost::make_iterator_range(terms.begin(), terms.begin() + 3), b, noise); | ||||
|     map<Key,Matrix> mapTerms; | ||||
|     // note order of insertion plays no role: order will be determined by keys
 | ||||
|     mapTerms.insert(terms[2]); | ||||
|     mapTerms.insert(terms[1]); | ||||
|     mapTerms.insert(terms[0]); | ||||
|     JacobianFactor actual(mapTerms, b, noise); | ||||
|     EXPECT(assert_equal(expected, actual)); | ||||
|     LONGS_EQUAL((long)terms[2].first, (long)actual.keys().back()); | ||||
|     EXPECT(assert_equal(terms[2].second, actual.getA(actual.end() - 1))); | ||||
|     EXPECT(assert_equal(b, expected.getb())); | ||||
|     EXPECT(assert_equal(b, actual.getb())); | ||||
|     EXPECT(noise == expected.get_model()); | ||||
|     EXPECT(noise == actual.get_model()); | ||||
|   } | ||||
|   { | ||||
|     // VerticalBlockMatrix constructor
 | ||||
|     JacobianFactor expected( | ||||
|  |  | |||
|  | @ -37,7 +37,15 @@ class MagFactor: public NoiseModelFactor1<Rot2> { | |||
| 
 | ||||
| public: | ||||
| 
 | ||||
|   /** Constructor */ | ||||
|   /**
 | ||||
|    * Constructor of factor that estimates nav to body rotation bRn | ||||
|    * @param key of the unknown rotation bRn in the factor graph | ||||
|    * @param measured magnetometer reading, a 3-vector | ||||
|    * @param scale by which a unit vector is scaled to yield a magnetometer reading | ||||
|    * @param direction of the local magnetic field, see e.g. http://www.ngdc.noaa.gov/geomag-web/#igrfwmm
 | ||||
|    * @param bias of the magnetometer, modeled as purely additive (after scaling) | ||||
|    * @param model of the additive Gaussian noise that is assumed | ||||
|    */ | ||||
|   MagFactor(Key key, const Point3& measured, double scale, | ||||
|       const Unit3& direction, const Point3& bias, | ||||
|       const SharedNoiseModel& model) : | ||||
|  |  | |||
|  | @ -117,7 +117,7 @@ public: | |||
| 
 | ||||
|   // casting syntactic sugar
 | ||||
| 
 | ||||
|   inline bool hasLinearizationPoint() const { return linearizationPoint_; } | ||||
|   inline bool hasLinearizationPoint() const { return linearizationPoint_.is_initialized(); } | ||||
| 
 | ||||
|   /**
 | ||||
|    * Simple checks whether this is a Jacobian or Hessian factor | ||||
|  |  | |||
|  | @ -76,9 +76,6 @@ void NonlinearOptimizer::defaultOptimize() { | |||
|       !checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, | ||||
|             params.errorTol, currentError, this->error(), params.verbosity)); | ||||
| 
 | ||||
|   if (params.verbosity >= NonlinearOptimizerParams::TERMINATION) | ||||
|     cout << "Number of iterations: " << this->iterations() << endl; | ||||
| 
 | ||||
|   // Printing if verbose
 | ||||
|   if (params.verbosity >= NonlinearOptimizerParams::TERMINATION && | ||||
|       this->iterations() >= params.maxIterations) | ||||
|  |  | |||
|  | @ -572,7 +572,7 @@ public: | |||
| 
 | ||||
|     FastMap<Key,size_t> KeySlotMap; | ||||
|     for (size_t slot=0; slot < allKeys.size(); slot++) | ||||
|       KeySlotMap.insert(std::make_pair<Key,size_t>(allKeys[slot],slot)); | ||||
|       KeySlotMap.insert(std::make_pair(allKeys[slot],slot)); | ||||
| 
 | ||||
|     // a single point is observed in numKeys cameras
 | ||||
|     size_t numKeys = this->keys_.size(); // cameras observing current point
 | ||||
|  |  | |||
|  | @ -177,7 +177,7 @@ GraphAndValues load2D(const string& filename, SharedNoiseModel model, int maxID, | |||
|   string tag; | ||||
| 
 | ||||
|   // load the poses
 | ||||
|   while (is) { | ||||
|   while (!is.eof()) { | ||||
|     if (!(is >> tag)) | ||||
|       break; | ||||
| 
 | ||||
|  | @ -213,7 +213,7 @@ GraphAndValues load2D(const string& filename, SharedNoiseModel model, int maxID, | |||
|   // Parse the pose constraints
 | ||||
|   int id1, id2; | ||||
|   bool haveLandmark = false; | ||||
|   while (is) { | ||||
|   while (!is.eof()) { | ||||
|     if (!(is >> tag)) | ||||
|       break; | ||||
| 
 | ||||
|  | @ -461,12 +461,12 @@ GraphAndValues load3D(const string& filename) { | |||
| 
 | ||||
|   ifstream is(filename.c_str()); | ||||
|   if (!is) | ||||
|     throw invalid_argument("load2D: can not find file " + filename); | ||||
|     throw invalid_argument("load3D: can not find file " + filename); | ||||
| 
 | ||||
|   Values::shared_ptr initial(new Values); | ||||
|   NonlinearFactorGraph::shared_ptr graph(new NonlinearFactorGraph); | ||||
| 
 | ||||
|   while (is) { | ||||
|   while (!is.eof()) { | ||||
|     char buf[LINESIZE]; | ||||
|     is.getline(buf, LINESIZE); | ||||
|     istringstream ls(buf); | ||||
|  | @ -493,7 +493,7 @@ GraphAndValues load3D(const string& filename) { | |||
|   is.clear(); /* clears the end-of-file and error flags */ | ||||
|   is.seekg(0, ios::beg); | ||||
| 
 | ||||
|   while (is) { | ||||
|   while (!is.eof()) { | ||||
|     char buf[LINESIZE]; | ||||
|     is.getline(buf, LINESIZE); | ||||
|     istringstream ls(buf); | ||||
|  |  | |||
|  | @ -251,7 +251,7 @@ TEST( BayesTree, shortcutCheck ) | |||
|   // Check if all the cached shortcuts are cleared
 | ||||
|   rootClique->deleteCachedShortcuts(); | ||||
|   BOOST_FOREACH(SymbolicBayesTree::sharedClique& clique, allCliques) { | ||||
|     bool notCleared = clique->cachedSeparatorMarginal(); | ||||
|     bool notCleared = clique->cachedSeparatorMarginal().is_initialized(); | ||||
|     CHECK( notCleared == false); | ||||
|   } | ||||
|   EXPECT_LONGS_EQUAL(0, (long)rootClique->numCachedSeparatorMarginals()); | ||||
|  |  | |||
|  | @ -359,6 +359,11 @@ virtual class TransformBtwRobotsUnaryFactorEM : gtsam::NonlinearFactor { | |||
|   Vector calcIndicatorProb(const gtsam::Values& x); | ||||
|   void setValAValB(const gtsam::Values valA, const gtsam::Values valB); | ||||
| 
 | ||||
|   void updateNoiseModels(const gtsam::Values& values, const gtsam::NonlinearFactorGraph& graph); | ||||
|   void updateNoiseModels_givenCovs(const gtsam::Values& values, Matrix cov1, Matrix cov2, Matrix cov12); | ||||
|   Matrix get_model_inlier_cov(); | ||||
|   Matrix get_model_outlier_cov(); | ||||
| 
 | ||||
|   void serializable() const; // enabling serialization functionality
 | ||||
| }; | ||||
| 
 | ||||
|  |  | |||
|  | @ -0,0 +1,105 @@ | |||
| /* ----------------------------------------------------------------------------
 | ||||
| 
 | ||||
|  * GTSAM Copyright 2010, Georgia Tech Research Corporation,  | ||||
|  * Atlanta, Georgia 30332-0415 | ||||
|  * All Rights Reserved | ||||
|  * Authors: Frank Dellaert, et al. (see THANKS for the full author list) | ||||
| 
 | ||||
|  * See LICENSE for the license information | ||||
| 
 | ||||
|  * -------------------------------------------------------------------------- */ | ||||
| 
 | ||||
| /**
 | ||||
|  *  @file  BiasedGPSFactor.h | ||||
|  *  @author Luca Carlone | ||||
|  **/ | ||||
| #pragma once | ||||
| 
 | ||||
| #include <ostream> | ||||
| 
 | ||||
| #include <gtsam/geometry/Pose3.h> | ||||
| #include <gtsam/nonlinear/NonlinearFactor.h> | ||||
| 
 | ||||
| namespace gtsam { | ||||
| 
 | ||||
|   /**
 | ||||
|    * A class to model GPS measurements, including a bias term which models | ||||
|    * common-mode errors and that can be partially corrected if other sensors are used | ||||
|    * @addtogroup SLAM | ||||
|    */ | ||||
|   class BiasedGPSFactor: public NoiseModelFactor2<Pose3, Point3> { | ||||
| 
 | ||||
|   private: | ||||
| 
 | ||||
|     typedef BiasedGPSFactor This; | ||||
|     typedef NoiseModelFactor2<Pose3, Point3> Base; | ||||
| 
 | ||||
|     Point3 measured_; /** The measurement */ | ||||
| 
 | ||||
|   public: | ||||
| 
 | ||||
|     // shorthand for a smart pointer to a factor
 | ||||
|     typedef typename boost::shared_ptr<BiasedGPSFactor> shared_ptr; | ||||
| 
 | ||||
|     /** default constructor - only use for serialization */ | ||||
|     BiasedGPSFactor() {} | ||||
| 
 | ||||
|     /** Constructor */ | ||||
|     BiasedGPSFactor(Key posekey, Key biaskey, const Point3 measured, | ||||
|         const SharedNoiseModel& model) : | ||||
|       Base(model, posekey, biaskey), measured_(measured) { | ||||
|     } | ||||
| 
 | ||||
|     virtual ~BiasedGPSFactor() {} | ||||
| 
 | ||||
|     /** implement functions needed for Testable */ | ||||
| 
 | ||||
|     /** print */ | ||||
|     virtual void print(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { | ||||
|       std::cout << s << "BiasedGPSFactor(" | ||||
|           << keyFormatter(this->key1()) << "," | ||||
|           << keyFormatter(this->key2()) << ")\n"; | ||||
|       measured_.print("  measured: "); | ||||
|       this->noiseModel_->print("  noise model: "); | ||||
|     } | ||||
| 
 | ||||
|     /** equals */ | ||||
|     virtual bool equals(const NonlinearFactor& expected, double tol=1e-9) const { | ||||
|       const This *e =  dynamic_cast<const This*> (&expected); | ||||
|       return e != NULL && Base::equals(*e, tol) && this->measured_.equals(e->measured_, tol); | ||||
|     } | ||||
| 
 | ||||
|     /** implement functions needed to derive from Factor */ | ||||
| 
 | ||||
|     /** vector of errors */ | ||||
|     Vector evaluateError(const Pose3& pose, const Point3& bias, | ||||
|         boost::optional<Matrix&> H1 = boost::none, boost::optional<Matrix&> H2 = | ||||
|             boost::none) const { | ||||
| 
 | ||||
|       if (H1 || H2){ | ||||
|         H1->resize(3,6); // jacobian wrt pose
 | ||||
|         (*H1) << Matrix3::Zero(),  pose.rotation().matrix(); | ||||
|         H2->resize(3,3); // jacobian wrt bias
 | ||||
|         (*H2) << Matrix3::Identity(); | ||||
|       } | ||||
|       return pose.translation().vector() + bias.vector() - measured_.vector(); | ||||
|     } | ||||
| 
 | ||||
|     /** return the measured */ | ||||
|     const Point3 measured() const { | ||||
|       return measured_; | ||||
|     } | ||||
| 
 | ||||
|   private: | ||||
| 
 | ||||
|     /** Serialization function */ | ||||
|     friend class boost::serialization::access; | ||||
|     template<class ARCHIVE> | ||||
|     void serialize(ARCHIVE & ar, const unsigned int version) { | ||||
|       ar & boost::serialization::make_nvp("NoiseModelFactor2", | ||||
|           boost::serialization::base_object<Base>(*this)); | ||||
|       ar & BOOST_SERIALIZATION_NVP(measured_); | ||||
|     } | ||||
|   }; // \class BiasedGPSFactor
 | ||||
| 
 | ||||
| } /// namespace gtsam
 | ||||
|  | @ -0,0 +1,135 @@ | |||
| /* ----------------------------------------------------------------------------
 | ||||
| 
 | ||||
|  * GTSAM Copyright 2010, Georgia Tech Research Corporation, | ||||
|  * Atlanta, Georgia 30332-0415 | ||||
|  * All Rights Reserved | ||||
|  * Authors: Frank Dellaert, et al. (see THANKS for the full author list) | ||||
| 
 | ||||
|  * See LICENSE for the license information | ||||
| 
 | ||||
|  * -------------------------------------------------------------------------- */ | ||||
| 
 | ||||
| /**
 | ||||
|  *  @file   GaussMarkov1stOrderFactor.h | ||||
|  *  @author Vadim Indelman, Stephen Williams, Luca Carlone | ||||
|  *  @date   Jan 17, 2012 | ||||
|  **/ | ||||
| #pragma once | ||||
| 
 | ||||
| #include <ostream> | ||||
| 
 | ||||
| #include <gtsam/base/Testable.h> | ||||
| #include <gtsam/base/Lie.h> | ||||
| #include <gtsam/nonlinear/NonlinearFactor.h> | ||||
| #include <gtsam/linear/GaussianFactor.h> | ||||
| #include <gtsam/linear/NoiseModel.h> | ||||
| 
 | ||||
| namespace gtsam { | ||||
| 
 | ||||
| /*
 | ||||
|  * - The 1st order GaussMarkov factor relates two keys of the same type. This relation is given via | ||||
|  *      		key_2 = exp(-1/tau*delta_t) * key1 + w_d | ||||
|  * 		where tau is the time constant and delta_t is the time difference between the two keys. | ||||
|  * 		w_d is the equivalent discrete noise, whose covariance is calculated from the continuous noise model and delta_t. | ||||
|  * - w_d is approximated as a Gaussian noise. | ||||
|  * - In the multi-dimensional case, tau is a vector, and the above equation is applied on each element | ||||
|  *      in the state (represented by keys), using the appropriate time constant in the vector tau. | ||||
|  */ | ||||
| 
 | ||||
| /*
 | ||||
|  * A class for a measurement predicted by "GaussMarkov1stOrderFactor(config[key1],config[key2])" | ||||
|  * KEY1::Value is the Lie Group type | ||||
|  * T is the measurement type, by default the same | ||||
|  */ | ||||
| template<class VALUE> | ||||
| class GaussMarkov1stOrderFactor: public NoiseModelFactor2<VALUE, VALUE> { | ||||
| 
 | ||||
| private: | ||||
| 
 | ||||
|   typedef GaussMarkov1stOrderFactor<VALUE> This; | ||||
|   typedef NoiseModelFactor2<VALUE, VALUE> Base; | ||||
| 
 | ||||
|   double dt_; | ||||
|   Vector tau_; | ||||
| 
 | ||||
| public: | ||||
| 
 | ||||
|   // shorthand for a smart pointer to a factor
 | ||||
|   typedef typename boost::shared_ptr<GaussMarkov1stOrderFactor> shared_ptr; | ||||
| 
 | ||||
|   /** default constructor - only use for serialization */ | ||||
|   GaussMarkov1stOrderFactor() {} | ||||
| 
 | ||||
|   /** Constructor */ | ||||
|   GaussMarkov1stOrderFactor(const Key& key1, const Key& key2, double delta_t, Vector tau, | ||||
|       const SharedGaussian& model) : | ||||
|         Base(calcDiscreteNoiseModel(model, delta_t), key1, key2), dt_(delta_t), tau_(tau) { | ||||
|   } | ||||
| 
 | ||||
|   virtual ~GaussMarkov1stOrderFactor() {} | ||||
| 
 | ||||
|   /** implement functions needed for Testable */ | ||||
| 
 | ||||
|   /** print */ | ||||
|   virtual void print(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { | ||||
|     std::cout << s << "GaussMarkov1stOrderFactor(" | ||||
|         << keyFormatter(this->key1()) << "," | ||||
|         << keyFormatter(this->key2()) << ")\n"; | ||||
|     this->noiseModel_->print("  noise model"); | ||||
|   } | ||||
| 
 | ||||
|   /** equals */ | ||||
|   virtual bool equals(const NonlinearFactor& expected, double tol=1e-9) const { | ||||
|     const This *e =	dynamic_cast<const This*> (&expected); | ||||
|     return e != NULL && Base::equals(*e, tol); | ||||
|   } | ||||
| 
 | ||||
|   /** implement functions needed to derive from Factor */ | ||||
| 
 | ||||
|   /** vector of errors */ | ||||
|   Vector evaluateError(const VALUE& p1, const VALUE& p2, | ||||
|       boost::optional<Matrix&> H1 = boost::none, | ||||
|       boost::optional<Matrix&> H2 = boost::none) const { | ||||
| 
 | ||||
|     Vector v1( VALUE::Logmap(p1) ); | ||||
|     Vector v2( VALUE::Logmap(p2) ); | ||||
| 
 | ||||
|     Vector alpha(tau_.size()); | ||||
|     Vector alpha_v1(tau_.size()); | ||||
|     for(int i=0; i<tau_.size(); i++){ | ||||
|       alpha(i) = exp(- 1/tau_(i)*dt_ ); | ||||
|       alpha_v1(i) = alpha(i) * v1(i); | ||||
|     } | ||||
| 
 | ||||
|     Vector hx(v2 - alpha_v1); | ||||
| 
 | ||||
|     if(H1) *H1 = - diag(alpha); | ||||
|     if(H2) *H2 = eye(v2.size()); | ||||
| 
 | ||||
|     return hx; | ||||
|   } | ||||
| 
 | ||||
| private: | ||||
| 
 | ||||
|   /** Serialization function */ | ||||
|   friend class boost::serialization::access; | ||||
|   template<class ARCHIVE> | ||||
|   void serialize(ARCHIVE & ar, const unsigned int version) { | ||||
|     ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base); | ||||
|     ar & BOOST_SERIALIZATION_NVP(dt_); | ||||
|     ar & BOOST_SERIALIZATION_NVP(tau_); | ||||
|   } | ||||
| 
 | ||||
|   SharedGaussian calcDiscreteNoiseModel(const SharedGaussian& model, double delta_t){ | ||||
|     /* Q_d (approx)= Q * delta_t */ | ||||
|     /* In practice, square root of the information matrix is represented, so that:
 | ||||
|      *  R_d (approx)= R / sqrt(delta_t) | ||||
|      * */ | ||||
|     noiseModel::Gaussian::shared_ptr gaussian_model = boost::dynamic_pointer_cast<noiseModel::Gaussian>(model); | ||||
|     SharedGaussian model_d(noiseModel::Gaussian::SqrtInformation(gaussian_model->R()/sqrt(delta_t))); | ||||
|     return model_d; | ||||
|   } | ||||
| 
 | ||||
| }; // \class GaussMarkov1stOrderFactor
 | ||||
| 
 | ||||
| } /// namespace gtsam
 | ||||
|  | @ -21,6 +21,8 @@ | |||
| #include <gtsam/base/Testable.h> | ||||
| #include <gtsam/base/Lie.h> | ||||
| #include <gtsam/nonlinear/NonlinearFactor.h> | ||||
| #include <gtsam/nonlinear/NonlinearFactorGraph.h> | ||||
| #include <gtsam/nonlinear/Marginals.h> | ||||
| #include <gtsam/linear/GaussianFactor.h> | ||||
| #include <gtsam/slam/BetweenFactor.h> | ||||
| 
 | ||||
|  | @ -302,6 +304,101 @@ namespace gtsam { | |||
|       return currA_T_currB_msr.localCoordinates(currA_T_currB_pred); | ||||
|     } | ||||
| 
 | ||||
|     /* ************************************************************************* */ | ||||
|     SharedGaussian get_model_inlier() const { | ||||
|       return model_inlier_; | ||||
|     } | ||||
| 
 | ||||
|     /* ************************************************************************* */ | ||||
|     SharedGaussian get_model_outlier() const { | ||||
|       return model_outlier_; | ||||
|     } | ||||
| 
 | ||||
|     /* ************************************************************************* */ | ||||
|     Matrix get_model_inlier_cov() const { | ||||
|       return (model_inlier_->R().transpose()*model_inlier_->R()).inverse(); | ||||
|     } | ||||
| 
 | ||||
|     /* ************************************************************************* */ | ||||
|     Matrix get_model_outlier_cov() const { | ||||
|       return (model_outlier_->R().transpose()*model_outlier_->R()).inverse(); | ||||
|     } | ||||
| 
 | ||||
|     /* ************************************************************************* */ | ||||
|     void updateNoiseModels(const gtsam::Values& values, const gtsam::Marginals& marginals) { | ||||
|       /* given marginals version, don't need to marginal multiple times if update a lot */ | ||||
| 
 | ||||
|       std::vector<gtsam::Key> Keys; | ||||
|       Keys.push_back(keyA_); | ||||
|       Keys.push_back(keyB_); | ||||
|       JointMarginal joint_marginal12 = marginals.jointMarginalCovariance(Keys); | ||||
|       Matrix cov1 = joint_marginal12(keyA_, keyA_); | ||||
|       Matrix cov2 = joint_marginal12(keyB_, keyB_); | ||||
|       Matrix cov12 = joint_marginal12(keyA_, keyB_); | ||||
| 
 | ||||
|       updateNoiseModels_givenCovs(values, cov1, cov2, cov12); | ||||
|     } | ||||
| 
 | ||||
|     /* ************************************************************************* */ | ||||
|     void updateNoiseModels(const gtsam::Values& values, const gtsam::NonlinearFactorGraph& graph){ | ||||
|       /* Update model_inlier_ and model_outlier_ to account for uncertainty in robot trajectories
 | ||||
|        * (note these are given in the E step, where indicator probabilities are calculated). | ||||
|        * | ||||
|        * Principle: R += [H1 H2] * joint_cov12 * [H1 H2]', where H1, H2 are Jacobians of the | ||||
|        * unwhitened error w.r.t. states, and R is the measurement covariance (inlier or outlier modes). | ||||
|        * | ||||
|        * TODO: improve efficiency (info form) | ||||
|        */ | ||||
| 
 | ||||
|        // get joint covariance of the involved states
 | ||||
| 
 | ||||
|        Marginals marginals(graph, values, Marginals::QR); | ||||
| 
 | ||||
|        this->updateNoiseModels(values, marginals); | ||||
|     } | ||||
| 
 | ||||
|     /* ************************************************************************* */ | ||||
|     void updateNoiseModels_givenCovs(const gtsam::Values& values, const Matrix& cov1, const Matrix& cov2, const Matrix& cov12){ | ||||
|       /* Update model_inlier_ and model_outlier_ to account for uncertainty in robot trajectories
 | ||||
|        * (note these are given in the E step, where indicator probabilities are calculated). | ||||
|        * | ||||
|        * Principle: R += [H1 H2] * joint_cov12 * [H1 H2]', where H1, H2 are Jacobians of the | ||||
|        * unwhitened error w.r.t. states, and R is the measurement covariance (inlier or outlier modes). | ||||
|        * | ||||
|        * TODO: improve efficiency (info form) | ||||
|        */ | ||||
| 
 | ||||
|       const T& p1 = values.at<T>(keyA_); | ||||
|       const T& p2 = values.at<T>(keyB_); | ||||
| 
 | ||||
|       Matrix H1, H2; | ||||
|       T hx = p1.between(p2, H1, H2); // h(x)
 | ||||
| 
 | ||||
|       Matrix H; | ||||
|       H.resize(H1.rows(), H1.rows()+H2.rows()); | ||||
|       H << H1, H2; // H = [H1 H2]
 | ||||
| 
 | ||||
|       Matrix joint_cov; | ||||
|       joint_cov.resize(cov1.rows()+cov2.rows(), cov1.cols()+cov2.cols()); | ||||
|       joint_cov << cov1, cov12, | ||||
|           cov12.transpose(), cov2; | ||||
| 
 | ||||
|       Matrix cov_state = H*joint_cov*H.transpose(); | ||||
| 
 | ||||
|       //       model_inlier_->print("before:");
 | ||||
| 
 | ||||
|       // update inlier and outlier noise models
 | ||||
|       Matrix covRinlier = (model_inlier_->R().transpose()*model_inlier_->R()).inverse(); | ||||
|       model_inlier_ = gtsam::noiseModel::Gaussian::Covariance(covRinlier + cov_state); | ||||
| 
 | ||||
|       Matrix covRoutlier = (model_outlier_->R().transpose()*model_outlier_->R()).inverse(); | ||||
|       model_outlier_ = gtsam::noiseModel::Gaussian::Covariance(covRoutlier + cov_state); | ||||
| 
 | ||||
|       //       model_inlier_->print("after:");
 | ||||
|       //       std::cout<<"covRinlier + cov_state: "<<covRinlier + cov_state<<std::endl;
 | ||||
|     } | ||||
| 
 | ||||
| 
 | ||||
|     /* ************************************************************************* */ | ||||
| 
 | ||||
|     /** number of variables attached to this factor */ | ||||
|  |  | |||
|  | @ -0,0 +1,85 @@ | |||
| /**
 | ||||
|  * @file    testBiasedGPSFactor.cpp | ||||
|  * @brief   | ||||
|  * @author Luca Carlone | ||||
|  * @date   July 30, 2014 | ||||
|  */ | ||||
| 
 | ||||
| #include <gtsam/base/numericalDerivative.h> | ||||
| #include <gtsam/geometry/Pose3.h> | ||||
| #include <gtsam/inference/Symbol.h> | ||||
| #include <gtsam_unstable/slam/BiasedGPSFactor.h> | ||||
| #include <CppUnitLite/TestHarness.h> | ||||
| 
 | ||||
| using namespace gtsam; | ||||
| using namespace gtsam::symbol_shorthand; | ||||
| using namespace gtsam::noiseModel; | ||||
| // Convenience for named keys
 | ||||
| 
 | ||||
| using symbol_shorthand::X; | ||||
| using symbol_shorthand::B; | ||||
| 
 | ||||
| TEST(BiasedGPSFactor, errorNoiseless) { | ||||
| 
 | ||||
|   Rot3 R = Rot3::rodriguez(0.1, 0.2, 0.3); | ||||
|   Point3 t(1.0, 0.5, 0.2); | ||||
|   Pose3 pose(R,t); | ||||
|   Point3 bias(0.0,0.0,0.0); | ||||
|   Point3 noise(0.0,0.0,0.0); | ||||
|   Point3 measured = t + noise; | ||||
| 
 | ||||
|   BiasedGPSFactor factor(X(1), B(1), measured, Isotropic::Sigma(3, 0.05)); | ||||
|   Vector expectedError = (Vector(3) << 0.0, 0.0, 0.0 ); | ||||
|   Vector actualError = factor.evaluateError(pose,bias); | ||||
|   EXPECT(assert_equal(expectedError,actualError, 1E-5)); | ||||
| } | ||||
| 
 | ||||
| TEST(BiasedGPSFactor, errorNoisy) { | ||||
| 
 | ||||
|   Rot3 R = Rot3::rodriguez(0.1, 0.2, 0.3); | ||||
|   Point3 t(1.0, 0.5, 0.2); | ||||
|   Pose3 pose(R,t); | ||||
|   Point3 bias(0.0,0.0,0.0); | ||||
|   Point3 noise(1.0,2.0,3.0); | ||||
|   Point3 measured = t - noise; | ||||
| 
 | ||||
|   BiasedGPSFactor factor(X(1), B(1), measured, Isotropic::Sigma(3, 0.05)); | ||||
|   Vector expectedError = (Vector(3) << 1.0, 2.0, 3.0 ); | ||||
|   Vector actualError = factor.evaluateError(pose,bias); | ||||
|   EXPECT(assert_equal(expectedError,actualError, 1E-5)); | ||||
| } | ||||
| 
 | ||||
| TEST(BiasedGPSFactor, jacobian) { | ||||
| 
 | ||||
|   Rot3 R = Rot3::rodriguez(0.1, 0.2, 0.3); | ||||
|   Point3 t(1.0, 0.5, 0.2); | ||||
|   Pose3 pose(R,t); | ||||
|   Point3 bias(0.0,0.0,0.0); | ||||
| 
 | ||||
|   Point3 noise(0.0,0.0,0.0); | ||||
|   Point3 measured = t + noise; | ||||
| 
 | ||||
|   BiasedGPSFactor factor(X(1), B(1), measured, Isotropic::Sigma(3, 0.05)); | ||||
| 
 | ||||
|   Matrix actualH1, actualH2; | ||||
|   factor.evaluateError(pose,bias, actualH1, actualH2); | ||||
| 
 | ||||
|   Matrix numericalH1 = numericalDerivative21( | ||||
|       boost::function<Vector(const Pose3&, const Point3&)>(boost::bind( | ||||
|           &BiasedGPSFactor::evaluateError, factor, _1, _2, boost::none, | ||||
|           boost::none)), pose, bias, 1e-5); | ||||
|   EXPECT(assert_equal(numericalH1,actualH1, 1E-5)); | ||||
| 
 | ||||
|   Matrix numericalH2 = numericalDerivative22( | ||||
|       boost::function<Vector(const Pose3&, const Point3&)>(boost::bind( | ||||
|           &BiasedGPSFactor::evaluateError, factor, _1, _2, boost::none, | ||||
|           boost::none)), pose, bias, 1e-5); | ||||
|   EXPECT(assert_equal(numericalH2,actualH2, 1E-5)); | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| int main() { | ||||
|   TestResult tr; | ||||
|   return TestRegistry::runAllTests(tr); | ||||
| } | ||||
| /* ************************************************************************* */ | ||||
|  | @ -0,0 +1,122 @@ | |||
| /* ----------------------------------------------------------------------------
 | ||||
| 
 | ||||
|  * GTSAM Copyright 2010, Georgia Tech Research Corporation, | ||||
|  * Atlanta, Georgia 30332-0415 | ||||
|  * All Rights Reserved | ||||
|  * Authors: Frank Dellaert, et al. (see THANKS for the full author list) | ||||
| 
 | ||||
|  * See LICENSE for the license information | ||||
| 
 | ||||
|  * -------------------------------------------------------------------------- */ | ||||
| 
 | ||||
| /**
 | ||||
|  * @file    testGaussMarkov1stOrderFactor.cpp | ||||
|  * @brief   Unit tests for the GaussMarkov1stOrder factor | ||||
|  * @author  Vadim Indelman | ||||
|  * @date    Jan 17, 2012 | ||||
|  */ | ||||
| 
 | ||||
| #include <CppUnitLite/TestHarness.h> | ||||
| #include <gtsam_unstable/slam/GaussMarkov1stOrderFactor.h> | ||||
| #include <gtsam/nonlinear/Values.h> | ||||
| #include <gtsam/inference/Key.h> | ||||
| #include <gtsam/base/numericalDerivative.h> | ||||
| 
 | ||||
| using namespace std; | ||||
| using namespace gtsam; | ||||
| 
 | ||||
| //! Factors
 | ||||
| typedef GaussMarkov1stOrderFactor<gtsam::LieVector> GaussMarkovFactor; | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| gtsam::LieVector predictionError(const gtsam::LieVector& v1, const gtsam::LieVector& v2, const GaussMarkovFactor factor) { | ||||
|   return factor.evaluateError(v1, v2); | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST( GaussMarkovFactor, equals ) | ||||
| { | ||||
|   // Create two identical factors and make sure they're equal
 | ||||
|   gtsam::Key x1(1); | ||||
|   gtsam::Key x2(2); | ||||
|   double delta_t = 0.10; | ||||
|   Vector tau = (Vector(3) << 100.0, 150.0, 10.0); | ||||
|   gtsam::SharedGaussian model = gtsam::noiseModel::Isotropic::Sigma(3, 1.0); | ||||
| 
 | ||||
|   GaussMarkovFactor factor1(x1, x2, delta_t, tau, model); | ||||
|   GaussMarkovFactor factor2(x1, x2, delta_t, tau, model); | ||||
| 
 | ||||
|   CHECK(gtsam::assert_equal(factor1, factor2)); | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST( GaussMarkovFactor, error ) | ||||
| { | ||||
|   gtsam::Values linPoint; | ||||
|   gtsam::Key x1(1); | ||||
|   gtsam::Key x2(2); | ||||
|   double delta_t = 0.10; | ||||
|   Vector tau = (Vector(3) << 100.0, 150.0, 10.0); | ||||
|   gtsam::SharedGaussian model = gtsam::noiseModel::Isotropic::Sigma(3, 1.0); | ||||
| 
 | ||||
|   gtsam::LieVector v1 = gtsam::LieVector((gtsam::Vector(3) << 10.0, 12.0, 13.0)); | ||||
|   gtsam::LieVector v2 = gtsam::LieVector((gtsam::Vector(3) << 10.0, 15.0, 14.0)); | ||||
| 
 | ||||
|   // Create two nodes
 | ||||
|   linPoint.insert(x1, v1); | ||||
|   linPoint.insert(x2, v2); | ||||
| 
 | ||||
|   GaussMarkovFactor factor(x1, x2, delta_t, tau, model); | ||||
|   gtsam::Vector Err1( factor.evaluateError(v1, v2) ); | ||||
| 
 | ||||
|   // Manually calculate the error
 | ||||
|   Vector alpha(tau.size()); | ||||
|   Vector alpha_v1(tau.size()); | ||||
|   for(int i=0; i<tau.size(); i++){ | ||||
|     alpha(i) = exp(- 1/tau(i)*delta_t ); | ||||
|     alpha_v1(i) = alpha(i) * v1(i); | ||||
|   } | ||||
|   gtsam::Vector Err2( v2 - alpha_v1 ); | ||||
| 
 | ||||
|   CHECK(gtsam::assert_equal(Err1, Err2, 1e-9)); | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST (GaussMarkovFactor, jacobian ) { | ||||
| 
 | ||||
|   gtsam::Values linPoint; | ||||
|   gtsam::Key x1(1); | ||||
|   gtsam::Key x2(2); | ||||
|   double delta_t = 0.10; | ||||
|   Vector tau = (Vector(3) << 100.0, 150.0, 10.0); | ||||
|   gtsam::SharedGaussian model = gtsam::noiseModel::Isotropic::Sigma(3, 1.0); | ||||
| 
 | ||||
|   GaussMarkovFactor factor(x1, x2, delta_t, tau, model); | ||||
| 
 | ||||
|   // Update the linearization point
 | ||||
|   gtsam::LieVector v1_upd = gtsam::LieVector((gtsam::Vector(3) << 0.5, -0.7, 0.3)); | ||||
|   gtsam::LieVector v2_upd = gtsam::LieVector((gtsam::Vector(3) << -0.7, 0.4, 0.9)); | ||||
| 
 | ||||
|   // Calculate the Jacobian matrix using the factor
 | ||||
|   Matrix computed_H1, computed_H2; | ||||
|   factor.evaluateError(v1_upd, v2_upd, computed_H1, computed_H2); | ||||
| 
 | ||||
|   // Calculate the Jacobian matrices H1 and H2 using the numerical derivative function
 | ||||
|   gtsam::Matrix numerical_H1, numerical_H2; | ||||
|   numerical_H1 = gtsam::numericalDerivative21<gtsam::LieVector, gtsam::LieVector, | ||||
|       gtsam::LieVector>(boost::bind(&predictionError, _1, _2, factor), v1_upd, v2_upd); | ||||
|   numerical_H2 = gtsam::numericalDerivative22<gtsam::LieVector, gtsam::LieVector, | ||||
|       gtsam::LieVector>(boost::bind(&predictionError, _1, _2, factor), v1_upd, v2_upd); | ||||
| 
 | ||||
|   // Verify they are equal for this choice of state
 | ||||
|   CHECK( gtsam::assert_equal(numerical_H1, computed_H1, 1e-9)); | ||||
|   CHECK( gtsam::assert_equal(numerical_H2, computed_H2, 1e-9)); | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| int main() | ||||
| { | ||||
|   TestResult tr; return TestRegistry::runAllTests(tr); | ||||
| } | ||||
| /* ************************************************************************* */ | ||||
| 
 | ||||
|  | @ -49,12 +49,12 @@ using symbol_shorthand::T; | |||
| typedef ProjectionFactorPPP<Pose3, Point3> TestProjectionFactor; | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST( ProjectionFactor, nonStandard ) { | ||||
| TEST( ProjectionFactorPPP, nonStandard ) { | ||||
|   ProjectionFactorPPP<Pose3, Point3, Cal3DS2> f; | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST( ProjectionFactor, Constructor) { | ||||
| TEST( ProjectionFactorPPP, Constructor) { | ||||
|   Key poseKey(X(1)); | ||||
|   Key transformKey(T(1)); | ||||
|   Key pointKey(L(1)); | ||||
|  | @ -65,7 +65,7 @@ TEST( ProjectionFactor, Constructor) { | |||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST( ProjectionFactor, ConstructorWithTransform) { | ||||
| TEST( ProjectionFactorPPP, ConstructorWithTransform) { | ||||
|   Key poseKey(X(1)); | ||||
|   Key transformKey(T(1)); | ||||
|   Key pointKey(L(1)); | ||||
|  | @ -75,7 +75,7 @@ TEST( ProjectionFactor, ConstructorWithTransform) { | |||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST( ProjectionFactor, Equals ) { | ||||
| TEST( ProjectionFactorPPP, Equals ) { | ||||
|   // Create two identical factors and make sure they're equal
 | ||||
|   Point2 measurement(323.0, 240.0); | ||||
| 
 | ||||
|  | @ -86,7 +86,7 @@ TEST( ProjectionFactor, Equals ) { | |||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST( ProjectionFactor, EqualsWithTransform ) { | ||||
| TEST( ProjectionFactorPPP, EqualsWithTransform ) { | ||||
|   // Create two identical factors and make sure they're equal
 | ||||
|   Point2 measurement(323.0, 240.0); | ||||
|   Pose3 body_P_sensor(Rot3::RzRyRx(-M_PI_2, 0.0, -M_PI_2), Point3(0.25, -0.10, 1.0)); | ||||
|  | @ -98,7 +98,7 @@ TEST( ProjectionFactor, EqualsWithTransform ) { | |||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST( ProjectionFactor, Error ) { | ||||
| TEST( ProjectionFactorPPP, Error ) { | ||||
|   // Create the factor with a measurement that is 3 pixels off in x
 | ||||
|   Key poseKey(X(1)); | ||||
|   Key transformKey(T(1)); | ||||
|  | @ -121,7 +121,7 @@ TEST( ProjectionFactor, Error ) { | |||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST( ProjectionFactor, ErrorWithTransform ) { | ||||
| TEST( ProjectionFactorPPP, ErrorWithTransform ) { | ||||
|   // Create the factor with a measurement that is 3 pixels off in x
 | ||||
|   Key poseKey(X(1)); | ||||
|   Key transformKey(T(1)); | ||||
|  | @ -145,7 +145,7 @@ TEST( ProjectionFactor, ErrorWithTransform ) { | |||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST( ProjectionFactor, Jacobian ) { | ||||
| TEST( ProjectionFactorPPP, Jacobian ) { | ||||
|   // Create the factor with a measurement that is 3 pixels off in x
 | ||||
|   Key poseKey(X(1)); | ||||
|   Key transformKey(T(1)); | ||||
|  | @ -179,7 +179,7 @@ TEST( ProjectionFactor, Jacobian ) { | |||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST( ProjectionFactor, JacobianWithTransform ) { | ||||
| TEST( ProjectionFactorPPP, JacobianWithTransform ) { | ||||
|   // Create the factor with a measurement that is 3 pixels off in x
 | ||||
|   Key poseKey(X(1)); | ||||
|   Key transformKey(T(1)); | ||||
|  |  | |||
|  | @ -50,19 +50,19 @@ using symbol_shorthand::K; | |||
| typedef ProjectionFactorPPPC<Pose3, Point3, Cal3_S2> TestProjectionFactor; | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST( ProjectionFactor, nonStandard ) { | ||||
| TEST( ProjectionFactorPPPC, nonStandard ) { | ||||
|   ProjectionFactorPPPC<Pose3, Point3, Cal3DS2> f; | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST( ProjectionFactor, Constructor) { | ||||
| TEST( ProjectionFactorPPPC, Constructor) { | ||||
|   Point2 measurement(323.0, 240.0); | ||||
|   TestProjectionFactor factor(measurement, model, X(1), T(1), L(1), K(1)); | ||||
|   // TODO: Actually check something
 | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST( ProjectionFactor, Equals ) { | ||||
| TEST( ProjectionFactorPPPC, Equals ) { | ||||
|   // Create two identical factors and make sure they're equal
 | ||||
|   Point2 measurement(323.0, 240.0); | ||||
| 
 | ||||
|  | @ -73,7 +73,7 @@ TEST( ProjectionFactor, Equals ) { | |||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST( ProjectionFactor, Error ) { | ||||
| TEST( ProjectionFactorPPPC, Error ) { | ||||
|   // Create the factor with a measurement that is 3 pixels off in x
 | ||||
|   Point2 measurement(323.0, 240.0); | ||||
|   TestProjectionFactor factor(measurement, model, X(1), T(1), L(1), K(1)); | ||||
|  | @ -93,7 +93,7 @@ TEST( ProjectionFactor, Error ) { | |||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST( ProjectionFactor, ErrorWithTransform ) { | ||||
| TEST( ProjectionFactorPPPC, ErrorWithTransform ) { | ||||
|   // Create the factor with a measurement that is 3 pixels off in x
 | ||||
|   Point2 measurement(323.0, 240.0); | ||||
|   Pose3 transform(Rot3::RzRyRx(-M_PI_2, 0.0, -M_PI_2), Point3(0.25, -0.10, 1.0)); | ||||
|  | @ -114,7 +114,7 @@ TEST( ProjectionFactor, ErrorWithTransform ) { | |||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST( ProjectionFactor, Jacobian ) { | ||||
| TEST( ProjectionFactorPPPC, Jacobian ) { | ||||
|   // Create the factor with a measurement that is 3 pixels off in x
 | ||||
|   Point2 measurement(323.0, 240.0); | ||||
|   TestProjectionFactor factor(measurement, model, X(1), T(1), L(1), K(1)); | ||||
|  | @ -149,7 +149,7 @@ TEST( ProjectionFactor, Jacobian ) { | |||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST( ProjectionFactor, JacobianWithTransform ) { | ||||
| TEST( ProjectionFactorPPPC, JacobianWithTransform ) { | ||||
|   // Create the factor with a measurement that is 3 pixels off in x
 | ||||
|   Point2 measurement(323.0, 240.0); | ||||
|   Pose3 body_P_sensor(Rot3::RzRyRx(-M_PI_2, 0.0, -M_PI_2), Point3(0.25, -0.10, 1.0)); | ||||
|  |  | |||
|  | @ -0,0 +1,16 @@ | |||
| <?xml version="1.0"?> | ||||
| <package format="2"> | ||||
|   <name>gtsam</name> | ||||
|   <version>3.1.0</version> | ||||
|   <description>gtsam</description> | ||||
| 
 | ||||
|   <maintainer email="gtsam@lists.gatech.edu">Frank Dellaert</maintainer> | ||||
|   <license>BSD</license> | ||||
| 
 | ||||
|   <buildtool_depend>cmake</buildtool_depend> | ||||
|   <export> | ||||
|     <!-- Specify that this is not really a Catkin package--> | ||||
|     <build_type>cmake</build_type> | ||||
|   </export> | ||||
| </package> | ||||
| 
 | ||||
		Loading…
	
		Reference in New Issue