Merge remote-tracking branch 'origin/develop' into feature/TemplatedSmartFactors

release/4.3a0
cbeall3 2014-11-14 14:35:25 -05:00
commit d24b799988
389 changed files with 164878 additions and 7819 deletions

392
.cproject
View File

@ -518,6 +518,22 @@
<useDefaultCommand>true</useDefaultCommand> <useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders> <runAllBuilders>true</runAllBuilders>
</target> </target>
<target name="SFMExampleExpressions.run" path="build/gtsam_unstable/examples" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>SFMExampleExpressions.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="Pose2SLAMExampleExpressions.run" path="build/gtsam_unstable/examples" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>Pose2SLAMExampleExpressions.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testInvDepthCamera3.run" path="build/gtsam_unstable/geometry/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="testInvDepthCamera3.run" path="build/gtsam_unstable/geometry/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments> <buildArguments>-j5</buildArguments>
@ -584,7 +600,6 @@
</target> </target>
<target name="tests/testBayesTree.run" path="inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="tests/testBayesTree.run" path="inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments/>
<buildTarget>tests/testBayesTree.run</buildTarget> <buildTarget>tests/testBayesTree.run</buildTarget>
<stopOnError>true</stopOnError> <stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand> <useDefaultCommand>false</useDefaultCommand>
@ -592,7 +607,6 @@
</target> </target>
<target name="testBinaryBayesNet.run" path="inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="testBinaryBayesNet.run" path="inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments/>
<buildTarget>testBinaryBayesNet.run</buildTarget> <buildTarget>testBinaryBayesNet.run</buildTarget>
<stopOnError>true</stopOnError> <stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand> <useDefaultCommand>false</useDefaultCommand>
@ -640,7 +654,6 @@
</target> </target>
<target name="testSymbolicBayesNet.run" path="inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="testSymbolicBayesNet.run" path="inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments/>
<buildTarget>testSymbolicBayesNet.run</buildTarget> <buildTarget>testSymbolicBayesNet.run</buildTarget>
<stopOnError>true</stopOnError> <stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand> <useDefaultCommand>false</useDefaultCommand>
@ -648,7 +661,6 @@
</target> </target>
<target name="tests/testSymbolicFactor.run" path="inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="tests/testSymbolicFactor.run" path="inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments/>
<buildTarget>tests/testSymbolicFactor.run</buildTarget> <buildTarget>tests/testSymbolicFactor.run</buildTarget>
<stopOnError>true</stopOnError> <stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand> <useDefaultCommand>false</useDefaultCommand>
@ -656,7 +668,6 @@
</target> </target>
<target name="testSymbolicFactorGraph.run" path="inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="testSymbolicFactorGraph.run" path="inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments/>
<buildTarget>testSymbolicFactorGraph.run</buildTarget> <buildTarget>testSymbolicFactorGraph.run</buildTarget>
<stopOnError>true</stopOnError> <stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand> <useDefaultCommand>false</useDefaultCommand>
@ -672,7 +683,6 @@
</target> </target>
<target name="tests/testBayesTree" path="inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="tests/testBayesTree" path="inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments/>
<buildTarget>tests/testBayesTree</buildTarget> <buildTarget>tests/testBayesTree</buildTarget>
<stopOnError>true</stopOnError> <stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand> <useDefaultCommand>false</useDefaultCommand>
@ -726,14 +736,6 @@
<useDefaultCommand>true</useDefaultCommand> <useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders> <runAllBuilders>true</runAllBuilders>
</target> </target>
<target name="testImuFactor.run" path="build/gtsam_unstable/slam/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testImuFactor.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testInvDepthFactor3.run" path="build/gtsam_unstable/slam/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="testInvDepthFactor3.run" path="build/gtsam_unstable/slam/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments> <buildArguments>-j5</buildArguments>
@ -790,7 +792,71 @@
<useDefaultCommand>true</useDefaultCommand> <useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders> <runAllBuilders>true</runAllBuilders>
</target> </target>
<target name="testGaussianFactorGraphUnordered.run" path="build/gtsam/linear/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="testInertialNavFactor_GlobalVelocity.run" path="build/gtsam_unstable/slam/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testInertialNavFactor_GlobalVelocity.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testInvDepthFactorVariant3.run" path="build/gtsam_unstable/slam/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testInvDepthFactorVariant3.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testInvDepthFactorVariant1.run" path="build/gtsam_unstable/slam/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testInvDepthFactorVariant1.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testEquivInertialNavFactor_GlobalVel.run" path="build/gtsam_unstable/slam/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testEquivInertialNavFactor_GlobalVel.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testInvDepthFactorVariant2.run" path="build/gtsam_unstable/slam/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testInvDepthFactorVariant2.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testRelativeElevationFactor.run" path="build/gtsam_unstable/slam/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testRelativeElevationFactor.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testPoseBetweenFactor.run" path="build/gtsam_unstable/slam/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testPoseBetweenFactor.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testGaussMarkov1stOrderFactor.run" path="build/gtsam_unstable/slam/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testGaussMarkov1stOrderFactor.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testGaussianFactorGraph.run" path="build/gtsam/linear/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments> <buildArguments>-j5</buildArguments>
<buildTarget>testGaussianFactorGraphUnordered.run</buildTarget> <buildTarget>testGaussianFactorGraphUnordered.run</buildTarget>
@ -886,6 +952,46 @@
<useDefaultCommand>true</useDefaultCommand> <useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders> <runAllBuilders>true</runAllBuilders>
</target> </target>
<target name="testGaussianBayesTree.run" path="build/gtsam/linear/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testGaussianBayesTree.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="timeCameraExpression.run" path="build/gtsam_unstable/timing" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>timeCameraExpression.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="timeOneCameraExpression.run" path="build/gtsam_unstable/timing" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>timeOneCameraExpression.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="timeSFMExpressions.run" path="build/gtsam_unstable/timing" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>timeSFMExpressions.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="timeAdaptAutoDiff.run" path="build/gtsam_unstable/timing" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>timeAdaptAutoDiff.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testCombinedImuFactor.run" path="build/gtsam/navigation/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="testCombinedImuFactor.run" path="build/gtsam/navigation/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments> <buildArguments>-j5</buildArguments>
@ -1008,7 +1114,6 @@
</target> </target>
<target name="testErrors.run" path="linear" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="testErrors.run" path="linear" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments/>
<buildTarget>testErrors.run</buildTarget> <buildTarget>testErrors.run</buildTarget>
<stopOnError>true</stopOnError> <stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand> <useDefaultCommand>false</useDefaultCommand>
@ -1238,46 +1343,6 @@
<useDefaultCommand>true</useDefaultCommand> <useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders> <runAllBuilders>true</runAllBuilders>
</target> </target>
<target name="testBTree.run" path="build/gtsam_unstable/base/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testBTree.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testDSF.run" path="build/gtsam_unstable/base/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testDSF.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testDSFMap.run" path="build/gtsam_unstable/base/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testDSFMap.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testDSFVector.run" path="build/gtsam_unstable/base/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testDSFVector.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testFixedVector.run" path="build/gtsam_unstable/base/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testFixedVector.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="all" path="slam" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="all" path="slam" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments>-j2</buildArguments> <buildArguments>-j2</buildArguments>
@ -1360,6 +1425,7 @@
</target> </target>
<target name="testSimulated2DOriented.run" path="slam" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="testSimulated2DOriented.run" path="slam" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments/>
<buildTarget>testSimulated2DOriented.run</buildTarget> <buildTarget>testSimulated2DOriented.run</buildTarget>
<stopOnError>true</stopOnError> <stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand> <useDefaultCommand>false</useDefaultCommand>
@ -1399,6 +1465,7 @@
</target> </target>
<target name="testSimulated2D.run" path="slam" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="testSimulated2D.run" path="slam" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments/>
<buildTarget>testSimulated2D.run</buildTarget> <buildTarget>testSimulated2D.run</buildTarget>
<stopOnError>true</stopOnError> <stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand> <useDefaultCommand>false</useDefaultCommand>
@ -1406,6 +1473,7 @@
</target> </target>
<target name="testSimulated3D.run" path="slam" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="testSimulated3D.run" path="slam" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments/>
<buildTarget>testSimulated3D.run</buildTarget> <buildTarget>testSimulated3D.run</buildTarget>
<stopOnError>true</stopOnError> <stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand> <useDefaultCommand>false</useDefaultCommand>
@ -1419,6 +1487,46 @@
<useDefaultCommand>true</useDefaultCommand> <useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders> <runAllBuilders>true</runAllBuilders>
</target> </target>
<target name="testBTree.run" path="build/gtsam_unstable/base/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testBTree.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testDSF.run" path="build/gtsam_unstable/base/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testDSF.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testDSFMap.run" path="build/gtsam_unstable/base/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testDSFMap.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testDSFVector.run" path="build/gtsam_unstable/base/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testDSFVector.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testFixedVector.run" path="build/gtsam_unstable/base/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testFixedVector.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testEliminationTree.run" path="build/gtsam/inference/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="testEliminationTree.run" path="build/gtsam/inference/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments> <buildArguments>-j5</buildArguments>
@ -1676,7 +1784,6 @@
</target> </target>
<target name="Generate DEB Package" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="Generate DEB Package" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>cpack</buildCommand> <buildCommand>cpack</buildCommand>
<buildArguments/>
<buildTarget>-G DEB</buildTarget> <buildTarget>-G DEB</buildTarget>
<stopOnError>true</stopOnError> <stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand> <useDefaultCommand>false</useDefaultCommand>
@ -1684,7 +1791,6 @@
</target> </target>
<target name="Generate RPM Package" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="Generate RPM Package" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>cpack</buildCommand> <buildCommand>cpack</buildCommand>
<buildArguments/>
<buildTarget>-G RPM</buildTarget> <buildTarget>-G RPM</buildTarget>
<stopOnError>true</stopOnError> <stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand> <useDefaultCommand>false</useDefaultCommand>
@ -1692,7 +1798,6 @@
</target> </target>
<target name="Generate TGZ Package" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="Generate TGZ Package" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>cpack</buildCommand> <buildCommand>cpack</buildCommand>
<buildArguments/>
<buildTarget>-G TGZ</buildTarget> <buildTarget>-G TGZ</buildTarget>
<stopOnError>true</stopOnError> <stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand> <useDefaultCommand>false</useDefaultCommand>
@ -1700,7 +1805,6 @@
</target> </target>
<target name="Generate TGZ Source Package" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="Generate TGZ Source Package" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>cpack</buildCommand> <buildCommand>cpack</buildCommand>
<buildArguments/>
<buildTarget>--config CPackSourceConfig.cmake</buildTarget> <buildTarget>--config CPackSourceConfig.cmake</buildTarget>
<stopOnError>true</stopOnError> <stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand> <useDefaultCommand>false</useDefaultCommand>
@ -2033,6 +2137,30 @@
<useDefaultCommand>true</useDefaultCommand> <useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders> <runAllBuilders>true</runAllBuilders>
</target> </target>
<target name="testRot2.run" path="build/gtsam/geometry/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testRot2.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testRot3Q.run" path="build/gtsam/geometry/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testRot3Q.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testRot3.run" path="build/gtsam/geometry/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testRot3.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="all" path="release" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="all" path="release" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments>-j2</buildArguments> <buildArguments>-j2</buildArguments>
@ -2129,6 +2257,22 @@
<useDefaultCommand>true</useDefaultCommand> <useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders> <runAllBuilders>true</runAllBuilders>
</target> </target>
<target name="testVelocityConstraint.run" path="build/gtsam_unstable/dynamics/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testVelocityConstraint.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testVelocityConstraint3.run" path="build/gtsam_unstable/dynamics/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testVelocityConstraint3.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testDiscreteBayesTree.run" path="build/gtsam/discrete/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="testDiscreteBayesTree.run" path="build/gtsam/discrete/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments>-j1</buildArguments> <buildArguments>-j1</buildArguments>
@ -2209,6 +2353,38 @@
<useDefaultCommand>true</useDefaultCommand> <useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders> <runAllBuilders>true</runAllBuilders>
</target> </target>
<target name="testSpirit.run" path="build/wrap/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testSpirit.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="check.wrap" path="build/wrap/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>check.wrap</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testMethod.run" path="build/wrap/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testMethod.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testClass.run" path="build/wrap/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testClass.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="schedulingExample.run" path="build/gtsam_unstable/discrete/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="schedulingExample.run" path="build/gtsam_unstable/discrete/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments> <buildArguments>-j5</buildArguments>
@ -2289,6 +2465,22 @@
<useDefaultCommand>true</useDefaultCommand> <useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders> <runAllBuilders>true</runAllBuilders>
</target> </target>
<target name="testNumericalDerivative.run" path="build/gtsam/base/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testNumericalDerivative.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testVerticalBlockMatrix.run" path="build/gtsam/base/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testVerticalBlockMatrix.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="check.tests" path="build/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="check.tests" path="build/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments> <buildArguments>-j5</buildArguments>
@ -2427,7 +2619,6 @@
</target> </target>
<target name="testGraph.run" path="build/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="testGraph.run" path="build/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments/>
<buildTarget>testGraph.run</buildTarget> <buildTarget>testGraph.run</buildTarget>
<stopOnError>true</stopOnError> <stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand> <useDefaultCommand>false</useDefaultCommand>
@ -2435,7 +2626,6 @@
</target> </target>
<target name="testJunctionTree.run" path="build/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="testJunctionTree.run" path="build/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments/>
<buildTarget>testJunctionTree.run</buildTarget> <buildTarget>testJunctionTree.run</buildTarget>
<stopOnError>true</stopOnError> <stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand> <useDefaultCommand>false</useDefaultCommand>
@ -2443,7 +2633,6 @@
</target> </target>
<target name="testSymbolicBayesNetB.run" path="build/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="testSymbolicBayesNetB.run" path="build/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments/>
<buildTarget>testSymbolicBayesNetB.run</buildTarget> <buildTarget>testSymbolicBayesNetB.run</buildTarget>
<stopOnError>true</stopOnError> <stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand> <useDefaultCommand>false</useDefaultCommand>
@ -2505,6 +2694,14 @@
<useDefaultCommand>true</useDefaultCommand> <useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders> <runAllBuilders>true</runAllBuilders>
</target> </target>
<target name="testManifold.run" path="build/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testManifold.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testParticleFactor.run" path="build/gtsam_unstable/nonlinear/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="testParticleFactor.run" path="build/gtsam_unstable/nonlinear/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments> <buildArguments>-j5</buildArguments>
@ -2513,6 +2710,38 @@
<useDefaultCommand>true</useDefaultCommand> <useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders> <runAllBuilders>true</runAllBuilders>
</target> </target>
<target name="testExpressionFactor.run" path="build/gtsam_unstable/nonlinear/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testExpressionFactor.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testExpression.run" path="build/gtsam_unstable/nonlinear/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testExpression.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testExpressionMeta.run" path="build/gtsam_unstable/nonlinear/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testExpressionMeta.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testAdaptAutoDiff.run" path="build/gtsam_unstable/nonlinear/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testAdaptAutoDiff.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testGaussianFactor.run" path="build/linear/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="testGaussianFactor.run" path="build/linear/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments>-j2</buildArguments> <buildArguments>-j2</buildArguments>
@ -2577,10 +2806,10 @@
<useDefaultCommand>true</useDefaultCommand> <useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders> <runAllBuilders>true</runAllBuilders>
</target> </target>
<target name="testBetweenFactor.run" path="build/gtsam/slam/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="testPriorFactor.run" path="build/gtsam/slam/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments> <buildArguments>-j5</buildArguments>
<buildTarget>testBetweenFactor.run</buildTarget> <buildTarget>testPriorFactor.run</buildTarget>
<stopOnError>true</stopOnError> <stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand> <useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders> <runAllBuilders>true</runAllBuilders>
@ -2633,6 +2862,22 @@
<useDefaultCommand>true</useDefaultCommand> <useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders> <runAllBuilders>true</runAllBuilders>
</target> </target>
<target name="testPoseRotationPrior.run" path="build/gtsam/slam/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testPoseRotationPrior.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testImplicitSchurFactor.run" path="build/gtsam/slam/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testImplicitSchurFactor.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="SimpleRotation.run" path="build/examples" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="SimpleRotation.run" path="build/examples" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments>-j2</buildArguments> <buildArguments>-j2</buildArguments>
@ -2907,6 +3152,7 @@
</target> </target>
<target name="tests/testGaussianISAM2" path="build/slam" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="tests/testGaussianISAM2" path="build/slam" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments/>
<buildTarget>tests/testGaussianISAM2</buildTarget> <buildTarget>tests/testGaussianISAM2</buildTarget>
<stopOnError>true</stopOnError> <stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand> <useDefaultCommand>false</useDefaultCommand>
@ -3008,22 +3254,6 @@
<useDefaultCommand>true</useDefaultCommand> <useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders> <runAllBuilders>true</runAllBuilders>
</target> </target>
<target name="testSpirit.run" path="build/wrap" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testSpirit.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="check.wrap" path="build/wrap" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>check.wrap</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="wrap" path="build/wrap" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="wrap" path="build/wrap" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments> <buildArguments>-j5</buildArguments>

3
.gitignore vendored
View File

@ -3,4 +3,5 @@
*.pyc *.pyc
*.DS_Store *.DS_Store
/examples/Data/dubrovnik-3-7-pre-rewritten.txt /examples/Data/dubrovnik-3-7-pre-rewritten.txt
/examples/Data/pose2example-rewritten.txt /examples/Data/pose2example-rewritten.txt
/examples/Data/pose3example-rewritten.txt

View File

@ -2,9 +2,15 @@
project(GTSAM CXX C) project(GTSAM CXX C)
cmake_minimum_required(VERSION 2.6) cmake_minimum_required(VERSION 2.6)
# new feature to Cmake Version > 2.8.12
# Mac ONLY. Define Relative Path on Mac OS
if(NOT DEFINED CMAKE_MACOSX_RPATH)
set(CMAKE_MACOSX_RPATH 0)
endif()
# Set the version number for the library # Set the version number for the library
set (GTSAM_VERSION_MAJOR 3) set (GTSAM_VERSION_MAJOR 4)
set (GTSAM_VERSION_MINOR 1) set (GTSAM_VERSION_MINOR 0)
set (GTSAM_VERSION_PATCH 0) set (GTSAM_VERSION_PATCH 0)
math (EXPR GTSAM_VERSION_NUMERIC "10000 * ${GTSAM_VERSION_MAJOR} + 100 * ${GTSAM_VERSION_MINOR} + ${GTSAM_VERSION_PATCH}") math (EXPR GTSAM_VERSION_NUMERIC "10000 * ${GTSAM_VERSION_MAJOR} + 100 * ${GTSAM_VERSION_MINOR} + ${GTSAM_VERSION_PATCH}")
set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}") set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
@ -123,6 +129,11 @@ else()
endif() endif()
if(${Boost_VERSION} EQUAL 105600)
message("Ignoring Boost restriction on optional lvalue assignment from rvalues")
add_definitions(-DBOOST_OPTIONAL_ALLOW_BINDING_TO_RVALUES)
endif()
############################################################################### ###############################################################################
# Find TBB # Find TBB
find_package(TBB) find_package(TBB)
@ -169,9 +180,9 @@ endif()
############################################################################### ###############################################################################
# Find OpenMP (if we're also using MKL) # Find OpenMP (if we're also using MKL)
if(GTSAM_WITH_EIGEN_MKL AND GTSAM_USE_EIGEN_MKL_OPENMP AND GTSAM_USE_EIGEN_MKL) find_package(OpenMP) # do this here to generate correct message if disabled
find_package(OpenMP)
if(GTSAM_WITH_EIGEN_MKL AND GTSAM_WITH_EIGEN_MKL_OPENMP AND GTSAM_USE_EIGEN_MKL)
if(OPENMP_FOUND AND GTSAM_USE_EIGEN_MKL AND GTSAM_WITH_EIGEN_MKL_OPENMP) if(OPENMP_FOUND AND GTSAM_USE_EIGEN_MKL AND GTSAM_WITH_EIGEN_MKL_OPENMP)
set(GTSAM_USE_EIGEN_MKL_OPENMP 1) # This will go into config.h set(GTSAM_USE_EIGEN_MKL_OPENMP 1) # This will go into config.h
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")

View File

@ -58,6 +58,7 @@ FIND_PATH(MKL_ROOT_DIR
/opt/intel/mkl/*/ /opt/intel/mkl/*/
/opt/intel/cmkl/ /opt/intel/cmkl/
/opt/intel/cmkl/*/ /opt/intel/cmkl/*/
/opt/intel/*/mkl/
/Library/Frameworks/Intel_MKL.framework/Versions/Current/lib/universal /Library/Frameworks/Intel_MKL.framework/Versions/Current/lib/universal
"C:/Program Files (x86)/Intel/ComposerXE-2011/mkl" "C:/Program Files (x86)/Intel/ComposerXE-2011/mkl"
"C:/Program Files (x86)/Intel/Composer XE 2013/mkl" "C:/Program Files (x86)/Intel/Composer XE 2013/mkl"
@ -136,13 +137,16 @@ ELSE() # UNIX and macOS
${MKL_ROOT_DIR}/lib/${MKL_ARCH_DIR} ${MKL_ROOT_DIR}/lib/${MKL_ARCH_DIR}
${MKL_ROOT_DIR}/lib/ ${MKL_ROOT_DIR}/lib/
) )
FIND_LIBRARY(MKL_GNUTHREAD_LIBRARY # MKL on Mac OS doesn't ship with GNU thread versions, only Intel versions (see above)
mkl_gnu_thread IF(NOT APPLE)
PATHS FIND_LIBRARY(MKL_GNUTHREAD_LIBRARY
${MKL_ROOT_DIR}/lib/${MKL_ARCH_DIR} mkl_gnu_thread
${MKL_ROOT_DIR}/lib/ PATHS
) ${MKL_ROOT_DIR}/lib/${MKL_ARCH_DIR}
${MKL_ROOT_DIR}/lib/
)
ENDIF()
# Intel Libraries # Intel Libraries
IF("${MKL_ARCH_DIR}" STREQUAL "32") IF("${MKL_ARCH_DIR}" STREQUAL "32")
@ -226,7 +230,12 @@ ELSE() # UNIX and macOS
endforeach() endforeach()
endforeach() endforeach()
SET(MKL_LIBRARIES ${MKL_LP_GNUTHREAD_LIBRARIES}) IF(APPLE)
SET(MKL_LIBRARIES ${MKL_LP_INTELTHREAD_LIBRARIES})
ELSE()
SET(MKL_LIBRARIES ${MKL_LP_GNUTHREAD_LIBRARIES})
ENDIF()
MARK_AS_ADVANCED(MKL_CORE_LIBRARY MKL_LP_LIBRARY MKL_ILP_LIBRARY MARK_AS_ADVANCED(MKL_CORE_LIBRARY MKL_LP_LIBRARY MKL_ILP_LIBRARY
MKL_SEQUENTIAL_LIBRARY MKL_INTELTHREAD_LIBRARY MKL_GNUTHREAD_LIBRARY) MKL_SEQUENTIAL_LIBRARY MKL_INTELTHREAD_LIBRARY MKL_GNUTHREAD_LIBRARY)
ENDIF() ENDIF()

View File

@ -48,7 +48,7 @@ public:
virtual Vector evaluateError(const Pose3& pose, boost::optional<Matrix&> H = virtual Vector evaluateError(const Pose3& pose, boost::optional<Matrix&> H =
boost::none) const { boost::none) const {
SimpleCamera camera(pose, *K_); SimpleCamera camera(pose, *K_);
Point2 reprojectionError(camera.project(P_, H) - p_); Point2 reprojectionError(camera.project(P_, H, boost::none, boost::none) - p_);
return reprojectionError.vector(); return reprojectionError.vector();
} }
}; };

View File

@ -0,0 +1 @@
718.856 718.856 0.0 607.1928 185.2157 0.5371657189

View File

@ -0,0 +1 @@
718.856 718.856 0.0 607.1928 185.2157 0.5371657189

View File

@ -0,0 +1,135 @@
0 1 0 0 0 0 1 0 0 -0 0 1 0 0 0 0 1
1 0.99999 -0.00268679 -0.00354618 6.43221e-05 0.00267957 0.999994 -0.00204036 -0.0073023 0.00355164 0.00203084 0.999992 0.676456 0 0 0 1
2 0.999969 -0.00120771 -0.00772489 -0.0100328 0.00117985 0.999993 -0.003611 -0.0111185 0.00772919 0.00360178 0.999964 1.37125 0 0 0 1
3 0.999931 -0.00128098 -0.0117006 -0.0237327 0.00122052 0.999986 -0.00517227 -0.0136538 0.0117071 0.00515763 0.999918 2.08563 0 0 0 1
4 0.99986 5.79321e-05 -0.0167106 -0.0402272 -0.000155312 0.999983 -0.00582618 -0.0194327 0.01671 0.00582796 0.999843 2.81528 0 0 0 1
5 0.999772 -0.00118366 -0.0213077 -0.0572378 0.0010545 0.999981 -0.00607208 -0.0278191 0.0213145 0.00604822 0.999755 3.56204 0 0 0 1
6 0.999662 0.000544425 -0.0259946 -0.081545 -0.000735472 0.999973 -0.00734051 -0.0358844 0.0259899 0.00735714 0.999635 4.32265 0 0 0 1
7 0.999513 0.0032602 -0.0310324 -0.112137 -0.0035101 0.999962 -0.00800188 -0.0447209 0.0310051 0.00810691 0.999486 5.09668 0 0 0 1
8 0.999361 0.00349173 -0.0355658 -0.143594 -0.00372162 0.999973 -0.00639979 -0.0532611 0.0355425 0.00652807 0.999347 5.88701 0 0 0 1
9 0.999185 0.00268131 -0.040271 -0.176401 -0.0028332 0.999989 -0.00371493 -0.0632884 0.0402606 0.003826 0.999182 6.6897 0 0 0 1
10 0.99903 0.00226305 -0.0439747 -0.211687 -0.00231163 0.999997 -0.00105382 -0.072362 0.0439722 0.00115445 0.999032 7.50361 0 0 0 1
11 0.998896 0.00366482 -0.0468376 -0.254125 -0.00374515 0.999992 -0.00162734 -0.0820263 0.0468312 0.00180096 0.998901 8.32333 0 0 0 1
12 0.998775 0.00304285 -0.0493866 -0.295424 -0.00313866 0.999993 -0.00186268 -0.0885739 0.0493806 0.00201541 0.998778 9.15211 0 0 0 1
13 0.998682 7.09894e-05 -0.0513155 -0.334647 -0.000203775 0.999997 -0.00258241 -0.0938889 0.0513152 0.00258946 0.998679 9.98839 0 0 0 1
14 0.998565 -8.82523e-05 -0.0535542 -0.380835 -9.36659e-06 0.999998 -0.00182255 -0.10173 0.0535542 0.00182044 0.998563 10.832 0 0 0 1
15 0.998481 -0.00146793 -0.0550718 -0.429135 0.0013525 0.999997 -0.00213307 -0.111427 0.0550748 0.00205535 0.99848 11.687 0 0 0 1
16 0.998373 0.000738731 -0.0570218 -0.483426 -0.000993083 0.99999 -0.00443241 -0.122139 0.0570179 0.00448183 0.998363 12.5483 0 0 0 1
17 0.998285 0.00120595 -0.0585258 -0.540056 -0.00162301 0.999974 -0.00707907 -0.132598 0.0585158 0.00716191 0.998261 13.4179 0 0 0 1
18 0.998165 0.00516151 -0.060337 -0.6023 -0.00570195 0.999945 -0.00878826 -0.143753 0.0602883 0.00911617 0.998139 14.2952 0 0 0 1
19 0.998101 0.00610094 -0.0612993 -0.66308 -0.00663017 0.999942 -0.00843386 -0.157854 0.0612443 0.00882427 0.998084 15.1802 0 0 0 1
20 0.998014 0.0052997 -0.0627662 -0.722045 -0.00574767 0.999959 -0.0069587 -0.172847 0.0627268 0.00730564 0.998004 16.074 0 0 0 1
21 0.99792 0.00591748 -0.0641975 -0.78346 -0.00627924 0.999966 -0.00543487 -0.186221 0.0641631 0.00582667 0.997922 16.9738 0 0 0 1
22 0.997857 0.00547694 -0.0651993 -0.845347 -0.00584101 0.999968 -0.00539455 -0.199741 0.0651677 0.00576382 0.997858 17.8786 0 0 0 1
23 0.997737 0.00536917 -0.0670282 -0.908218 -0.00579979 0.999964 -0.0062316 -0.212775 0.0669924 0.00660624 0.997732 18.7877 0 0 0 1
24 0.997663 0.00386695 -0.0682185 -0.971291 -0.00435203 0.999966 -0.00696344 -0.226442 0.0681893 0.00724406 0.997646 19.7046 0 0 0 1
25 0.997629 0.00410637 -0.0687004 -1.03663 -0.00448288 0.999976 -0.00532714 -0.239555 0.0686769 0.00562249 0.997623 20.6257 0 0 0 1
26 0.997617 0.00588773 -0.0687501 -1.10557 -0.0060349 0.99998 -0.00193325 -0.254273 0.0687373 0.00234355 0.997632 21.55 0 0 0 1
27 0.997662 0.00693766 -0.0679906 -1.17297 -0.00682806 0.999975 0.0018442 -0.26563 0.0680017 -0.00137565 0.997684 22.4875 0 0 0 1
28 0.997774 0.00579785 -0.0664343 -1.23728 -0.00550265 0.999974 0.00462554 -0.271962 0.0664594 -0.00424968 0.99778 23.4285 0 0 0 1
29 0.997872 0.00589563 -0.0649408 -1.30214 -0.00556012 0.99997 0.00534586 -0.277922 0.0649704 -0.0049734 0.997875 24.3732 0 0 0 1
30 0.997958 0.00627024 -0.0635595 -1.36462 -0.00612984 0.999978 0.00240374 -0.285335 0.0635732 -0.00200922 0.997975 25.314 0 0 0 1
31 0.998004 0.00714074 -0.0627411 -1.42783 -0.00731158 0.99997 -0.00249375 -0.293171 0.0627215 0.00294751 0.998027 26.2605 0 0 0 1
32 0.99808 0.0063692 -0.0616159 -1.48954 -0.00671918 0.999962 -0.00547459 -0.302321 0.0615787 0.00587809 0.998085 27.2168 0 0 0 1
33 0.99813 0.00376787 -0.0610159 -1.54654 -0.00404632 0.999982 -0.0044408 -0.313516 0.0609981 0.00467938 0.998127 28.1829 0 0 0 1
34 0.998113 0.00193972 -0.0613743 -1.60668 -0.00191171 0.999998 0.000515183 -0.324411 0.0613752 -0.000396881 0.998115 29.1626 0 0 0 1
35 0.99806 -0.0017885 -0.062228 -1.66532 0.00203402 0.99999 0.00388232 -0.335656 0.0622204 -0.00400136 0.998054 30.1428 0 0 0 1
36 0.997945 -0.00917543 -0.0634115 -1.72059 0.00939451 0.999951 0.00315749 -0.343316 0.0633794 -0.00374672 0.997982 31.1244 0 0 0 1
37 0.997825 -0.0112684 -0.0649459 -1.78049 0.011242 0.999937 -0.000771312 -0.350864 0.0649504 3.95099e-05 0.997888 32.1064 0 0 0 1
38 0.997739 -0.0110126 -0.0662983 -1.85007 0.0107254 0.999932 -0.00468596 -0.361068 0.0663454 0.00396429 0.997789 33.0886 0 0 0 1
39 0.997597 -0.00959503 -0.0686163 -1.92119 0.00924037 0.999942 -0.00548426 -0.373466 0.0686649 0.00483704 0.997628 34.0774 0 0 0 1
40 0.99755 -0.0095802 -0.0693031 -1.99331 0.00931271 0.999948 -0.00418184 -0.387047 0.0693396 0.00352619 0.997587 35.0736 0 0 0 1
41 0.997473 -0.00634387 -0.0707596 -2.0707 0.00626661 0.99998 -0.0013139 -0.403858 0.0707665 0.00086716 0.997493 36.0721 0 0 0 1
42 0.99739 -0.00624366 -0.0719343 -2.14553 0.00625582 0.99998 -5.62375e-05 -0.416888 0.0719332 -0.000393917 0.997409 37.0728 0 0 0 1
43 0.997312 -0.00473093 -0.0731254 -2.21909 0.00492848 0.999985 0.00252135 -0.428625 0.0731123 -0.00287497 0.99732 38.0643 0 0 0 1
44 0.997318 -0.00467696 -0.0730348 -2.29215 0.00509473 0.999972 0.00553481 -0.440023 0.0730068 -0.00589206 0.997314 39.0618 0 0 0 1
45 0.997274 0.00138304 -0.0737801 -2.37574 -0.000811217 0.999969 0.00777971 -0.447869 0.0737886 -0.00769865 0.997244 40.0548 0 0 0 1
46 0.997262 0.00149131 -0.0739326 -2.45529 -0.000969511 0.999974 0.00709318 -0.454763 0.0739413 -0.00700208 0.997238 41.0557 0 0 0 1
47 0.997266 0.00175929 -0.0738699 -2.53081 -0.00136899 0.999985 0.00533379 -0.460519 0.0738782 -0.00521809 0.997254 42.0518 0 0 0 1
48 0.997253 0.00408494 -0.0739555 -2.61212 -0.00386552 0.999988 0.00310988 -0.469863 0.0739673 -0.00281546 0.997257 43.0493 0 0 0 1
49 0.997185 0.00365371 -0.0748884 -2.68799 -0.00342799 0.999989 0.00314243 -0.47951 0.0748991 -0.00287687 0.997187 44.0473 0 0 0 1
50 0.997077 0.00181435 -0.0763845 -2.76071 -0.00149292 0.99999 0.00426495 -0.487845 0.0763915 -0.00413845 0.997069 45.0403 0 0 0 1
51 0.997018 0.00246727 -0.0771352 -2.84117 -0.00206285 0.999984 0.00532227 -0.499132 0.0771471 -0.00514727 0.997006 46.0244 0 0 0 1
52 0.996991 0.00504805 -0.0773507 -2.92304 -0.00493379 0.999986 0.00166824 -0.510863 0.0773581 -0.00128158 0.997003 46.994 0 0 0 1
53 0.996911 0.00581773 -0.0783264 -3.00373 -0.00604061 0.999978 -0.00260888 -0.521193 0.0783095 0.00307396 0.996924 47.9551 0 0 0 1
54 0.996846 0.00678413 -0.0790757 -3.08343 -0.00711636 0.999967 -0.00392044 -0.534186 0.0790465 0.00447081 0.996861 48.9236 0 0 0 1
55 0.996843 0.00557268 -0.0792034 -3.16262 -0.00562268 0.999984 -0.000408328 -0.54901 0.0791999 0.000852374 0.996858 49.9005 0 0 0 1
56 0.996831 0.00375007 -0.0794568 -3.23868 -0.00354655 0.99999 0.00270227 -0.563036 0.0794661 -0.0024119 0.996835 50.8752 0 0 0 1
57 0.996805 0.00190455 -0.0798474 -3.31582 -0.00164885 0.999993 0.00326822 -0.574113 0.0798531 -0.00312612 0.996802 51.8394 0 0 0 1
58 0.996782 -0.00124932 -0.0801505 -3.39153 0.00141878 0.999997 0.0020573 -0.586659 0.0801477 -0.00216439 0.996781 52.8005 0 0 0 1
59 0.996745 -0.0038025 -0.0805262 -3.4676 0.0038689 0.999992 0.000668539 -0.59892 0.080523 -0.00097791 0.996752 53.7575 0 0 0 1
60 0.996643 -0.00519016 -0.0817059 -3.54489 0.00535256 0.999984 0.00176869 -0.60864 0.0816955 -0.00220009 0.996655 54.708 0 0 0 1
61 0.996534 -0.0079249 -0.0828082 -3.62139 0.00842977 0.999948 0.00574894 -0.618858 0.0827583 -0.00642707 0.996549 55.6588 0 0 0 1
62 0.996473 -0.00854289 -0.0834829 -3.69959 0.00945654 0.9999 0.0105549 -0.624401 0.0833844 -0.0113071 0.996453 56.6119 0 0 0 1
63 0.996447 -0.00664747 -0.083957 -3.78502 0.00773966 0.99989 0.0126902 -0.629769 0.0838633 -0.0132949 0.996389 57.5607 0 0 0 1
64 0.996335 -0.00522633 -0.0853755 -3.8689 0.00597793 0.999946 0.00855017 -0.636709 0.0853262 -0.0090292 0.996312 58.4941 0 0 0 1
65 0.996221 -0.00343661 -0.0867892 -3.95276 0.00350579 0.999994 0.000644619 -0.644008 0.0867865 -0.000946448 0.996226 59.4131 0 0 0 1
66 0.996144 -0.00149623 -0.0877201 -4.03806 0.00112725 0.99999 -0.00425562 -0.655271 0.0877256 0.00414033 0.996136 60.3236 0 0 0 1
67 0.996055 0.00375138 -0.0886573 -4.12895 -0.00406723 0.999986 -0.00338223 -0.671324 0.0886434 0.00372948 0.996056 61.2274 0 0 0 1
68 0.995922 0.00719305 -0.0899263 -4.21985 -0.0073202 0.999973 -0.00108421 -0.691307 0.089916 0.00173807 0.995948 62.125 0 0 0 1
69 0.99582 0.00967277 -0.0908194 -4.30702 -0.00966905 0.999953 0.000481019 -0.708494 0.0908198 0.000399128 0.995867 63.0134 0 0 0 1
70 0.995713 0.0102896 -0.0919182 -4.39131 -0.0103098 0.999947 0.000255248 -0.721276 0.091916 0.000693502 0.995767 63.8776 0 0 0 1
71 0.99554 0.0119225 -0.0935844 -4.477 -0.0118725 0.999929 0.00109156 -0.734766 0.0935908 2.43836e-05 0.995611 64.7307 0 0 0 1
72 0.995397 0.0126524 -0.0950024 -4.56121 -0.0125521 0.99992 0.00165348 -0.749039 0.0950157 -0.000453392 0.995476 65.5703 0 0 0 1
73 0.995256 0.0126635 -0.0964665 -4.64297 -0.0125254 0.999919 0.00203772 -0.761909 0.0964846 -0.00081977 0.995334 66.3938 0 0 0 1
74 0.995133 0.0127023 -0.0977168 -4.72623 -0.0124698 0.999918 0.00298947 -0.7711 0.0977468 -0.00175641 0.99521 67.2017 0 0 0 1
75 0.994948 0.015548 -0.0991814 -4.81287 -0.0150604 0.999871 0.00566291 -0.780301 0.0992566 -0.0041406 0.995053 67.9995 0 0 0 1
76 0.994794 0.0171065 -0.100462 -4.90076 -0.0162095 0.999821 0.009738 -0.788037 0.100611 -0.00805885 0.994893 68.7922 0 0 0 1
77 0.994568 0.0201446 -0.102122 -4.98771 -0.0190681 0.999752 0.0115061 -0.794955 0.102328 -0.00949629 0.994705 69.5653 0 0 0 1
78 0.99437 0.0229894 -0.10344 -5.07707 -0.0223435 0.999723 0.00739861 -0.804841 0.103581 -0.00504575 0.994608 70.3178 0 0 0 1
79 0.99423 0.0228747 -0.1048 -5.16223 -0.0229899 0.999736 0.000108822 -0.81531 0.104774 0.00230114 0.994493 71.0475 0 0 0 1
80 0.994077 0.0219938 -0.106431 -5.24094 -0.0227304 0.999725 -0.00571252 -0.825441 0.106276 0.00809789 0.994304 71.7584 0 0 0 1
81 0.994023 0.0228054 -0.106762 -5.32437 -0.0236929 0.999694 -0.00705161 -0.831667 0.106569 0.00953897 0.99426 72.4548 0 0 0 1
82 0.99386 0.0255543 -0.107653 -5.40648 -0.0260808 0.999654 -0.00348505 -0.846106 0.107527 0.00627133 0.994182 73.1337 0 0 0 1
83 0.993702 0.0257681 -0.109048 -5.48436 -0.02605 0.99966 -0.00116096 -0.865059 0.108981 0.00399435 0.994036 73.7942 0 0 0 1
84 0.99367 0.0225468 -0.110051 -5.5561 -0.0231761 0.999722 -0.00444219 -0.879535 0.10992 0.00696462 0.993916 74.4324 0 0 0 1
85 0.993802 0.0143509 -0.110234 -5.61528 -0.0155198 0.999832 -0.00975281 -0.89282 0.110075 0.0114032 0.993858 75.0504 0 0 0 1
86 0.993949 0.0102 -0.10937 -5.67796 -0.0118817 0.999821 -0.0147354 -0.904058 0.1092 0.0159457 0.993892 75.6535 0 0 0 1
87 0.994244 0.0126451 -0.106395 -5.74524 -0.014328 0.999784 -0.0150673 -0.916949 0.106181 0.016505 0.99421 76.2455 0 0 0 1
88 0.994592 0.0175824 -0.102356 -5.81375 -0.0189231 0.999747 -0.0121417 -0.930972 0.102117 0.0140129 0.994674 76.8222 0 0 0 1
89 0.995077 0.0159295 -0.0978149 -5.86699 -0.0169123 0.999814 -0.00922648 -0.942909 0.0976498 0.0108353 0.995162 77.3862 0 0 0 1
90 0.995665 0.0122046 -0.0922106 -5.90659 -0.0127358 0.999906 -0.00517412 -0.954487 0.0921387 0.00632606 0.995726 77.9355 0 0 0 1
91 0.996426 0.00781104 -0.084105 -5.94257 -0.00798227 0.999967 -0.0016998 -0.970792 0.0840889 0.00236507 0.996455 78.4692 0 0 0 1
92 0.997233 0.0114593 -0.0734453 -5.98049 -0.0116369 0.99993 -0.00198965 -0.981019 0.0734174 0.00283882 0.997297 78.9883 0 0 0 1
93 0.998165 0.0165636 -0.0582361 -6.0154 -0.0167456 0.999856 -0.0026387 -0.99003 0.058184 0.00360906 0.998299 79.4952 0 0 0 1
95 0.999635 0.0200255 -0.0181151 -6.02981 -0.0200623 0.999797 -0.00185529 -1.00502 0.0180742 0.00221804 0.999834 80.4727 0 0 0 1
97 0.999162 0.015548 0.037857 -5.96801 -0.0155684 0.999879 0.000243918 -1.02389 -0.0378486 -0.000833085 0.999283 81.4025 0 0 0 1
99 0.993959 0.0151454 0.108698 -5.84553 -0.0154328 0.999879 0.0018028 -1.04109 -0.108657 -0.00346942 0.994073 82.2952 0 0 0 1
101 0.980499 0.0151504 0.195937 -5.64466 -0.0157106 0.999876 0.00130478 -1.05761 -0.195893 -0.00435763 0.980616 83.1489 0 0 0 1
103 0.954186 0.0182833 0.298656 -5.36588 -0.0179595 0.999831 -0.00382887 -1.08348 -0.298675 -0.00171027 0.954353 83.9397 0 0 0 1
105 0.910736 0.0194893 0.412529 -4.99648 -0.0175815 0.99981 -0.00842014 -1.10057 -0.412615 0.000415655 0.910905 84.6633 0 0 0 1
107 0.848724 0.0183908 0.528517 -4.54701 -0.0135972 0.999824 -0.0129557 -1.12003 -0.528662 0.00380946 0.848824 85.2983 0 0 0 1
109 0.772259 0.0170098 0.63508 -4.0183 -0.0106749 0.999848 -0.0137989 -1.14244 -0.635218 0.00387688 0.772323 85.8601 0 0 0 1
111 0.684256 0.0156411 0.729074 -3.42903 -0.0102231 0.999877 -0.0118561 -1.16079 -0.72917 0.000659179 0.684332 86.3474 0 0 0 1
113 0.590745 0.011826 0.806772 -2.77931 -0.000173089 0.999894 -0.0145301 -1.17886 -0.806858 0.00844396 0.590684 86.7443 0 0 0 1
115 0.496173 0.0169181 0.868059 -2.10955 -0.00504039 0.999849 -0.0166057 -1.20339 -0.868209 0.00386392 0.496183 87.0847 0 0 0 1
117 0.408192 0.0165355 0.912746 -1.40862 0.00231553 0.999814 -0.0191484 -1.2249 -0.912893 0.00992974 0.408078 87.3396 0 0 0 1
119 0.333443 0.00543386 0.942754 -0.671521 0.0223493 0.999657 -0.0136666 -1.23662 -0.942505 0.025627 0.333208 87.522 0 0 0 1
121 0.269054 0.0173163 0.962969 0.0638526 0.00961829 0.99974 -0.0206648 -1.26307 -0.963077 0.0148221 0.268818 87.7199 0 0 0 1
123 0.214897 0.0233915 0.976357 0.843046 0.00677025 0.999653 -0.0254398 -1.30009 -0.976613 0.0120771 0.214664 87.8763 0 0 0 1
125 0.171479 0.031054 0.984698 1.66216 0.0212619 0.999154 -0.0352125 -1.3179 -0.984958 0.0269747 0.170674 87.9743 0 0 0 1
127 0.134011 0.0386308 0.990227 2.52547 0.0207141 0.998912 -0.041773 -1.34147 -0.990763 0.0261097 0.133065 88.0809 0 0 0 1
129 0.10418 0.0310179 0.994075 3.44652 0.0195614 0.999256 -0.0332297 -1.39013 -0.994366 0.0229074 0.103496 88.1692 0 0 0 1
131 0.0794366 0.027788 0.996453 4.42556 0.0261822 0.999208 -0.0299521 -1.42776 -0.996496 0.0284686 0.0786462 88.2295 0 0 0 1
132 0.0693462 0.028443 0.997187 4.93885 0.0294969 0.999098 -0.0305488 -1.44582 -0.997156 0.0315324 0.0684447 88.2553 0 0 0 1
133 0.0615414 0.0290168 0.997683 5.46907 0.0316982 0.999016 -0.0310108 -1.46406 -0.997601 0.0335332 0.0605611 88.2814 0 0 0 1
134 0.0559347 0.029371 0.998002 6.0151 0.0334765 0.99895 -0.0312751 -1.48373 -0.997873 0.035159 0.0548927 88.307 0 0 0 1
135 0.0504312 0.0304374 0.998264 6.58025 0.0349281 0.99887 -0.0322204 -1.50267 -0.998117 0.0364923 0.0493112 88.3306 0 0 0 1
136 0.0445067 0.0311103 0.998525 7.16082 0.0355578 0.998832 -0.0327048 -1.52353 -0.998376 0.0369609 0.0433485 88.3531 0 0 0 1
137 0.040243 0.0311989 0.998703 7.76375 0.0381603 0.998735 -0.0327376 -1.54487 -0.998461 0.0394283 0.0390016 88.3716 0 0 0 1
138 0.0373982 0.0312027 0.998813 8.38568 0.0397152 0.998676 -0.0326855 -1.56772 -0.998511 0.0408905 0.0361095 88.3901 0 0 0 1
139 0.0343726 0.0307634 0.998936 9.02449 0.0406913 0.998654 -0.0321549 -1.59059 -0.99858 0.0417533 0.0330745 88.4092 0 0 0 1
140 0.0320861 0.0302694 0.999027 9.68038 0.0427798 0.998584 -0.03163 -1.61442 -0.998569 0.043753 0.0307457 88.4263 0 0 0 1
141 0.0316452 0.0299561 0.99905 10.3542 0.0473602 0.998383 -0.0314363 -1.63856 -0.998376 0.04831 0.0301753 88.4381 0 0 0 1
142 0.0327723 0.029714 0.999021 11.0457 0.0507142 0.998221 -0.0313539 -1.66282 -0.998175 0.0516921 0.031207 88.4556 0 0 0 1
143 0.0353027 0.0297602 0.998933 11.7546 0.0522842 0.998133 -0.0315841 -1.68678 -0.998008 0.0533435 0.0336808 88.4781 0 0 0 1
144 0.0392372 0.0297502 0.998787 12.4771 0.0547241 0.997993 -0.0318763 -1.71289 -0.99773 0.0559084 0.0375304 88.5062 0 0 0 1
145 0.0437096 0.0293188 0.998614 13.219 0.0550685 0.997979 -0.0317105 -1.73922 -0.997525 0.0563782 0.0420067 88.5387 0 0 0 1
146 0.0477725 0.0278103 0.998471 13.9764 0.0564652 0.997939 -0.0304971 -1.76499 -0.997261 0.0578358 0.0461037 88.5751 0 0 0 1
147 0.0518486 0.0263145 0.998308 14.7472 0.0562418 0.997989 -0.0292271 -1.79222 -0.99707 0.057662 0.0502644 88.6178 0 0 0 1
148 0.0560658 0.0242863 0.998132 15.5313 0.0531494 0.998214 -0.0272738 -1.82056 -0.997011 0.0545792 0.0546748 88.6693 0 0 0 1
149 0.0600218 0.0233355 0.997924 16.3271 0.0522059 0.998285 -0.0264839 -1.84733 -0.996831 0.0536871 0.0587006 88.7243 0 0 0 1
150 0.0641513 0.0243795 0.997642 17.1258 0.0492204 0.998408 -0.0275632 -1.87761 -0.996726 0.0508726 0.0628492 88.7821 0 0 0 1
151 0.0672583 0.028483 0.997329 17.929 0.0470717 0.998389 -0.0316877 -1.91204 -0.996625 0.0490772 0.0658092 88.842 0 0 0 1
152 0.0688453 0.0337446 0.997056 18.7357 0.0413971 0.99847 -0.0366509 -1.9468 -0.996768 0.0437985 0.067343 88.9041 0 0 0 1
153 0.0686545 0.0370247 0.996953 19.5482 0.0387033 0.99846 -0.0397459 -1.98038 -0.996889 0.0413142 0.0671158 88.9665 0 0 0 1

View File

@ -0,0 +1,77 @@
0 1 0 0 0 0 1 0 0 -0 0 1 0 0 0 0 1
1 0.99999 -0.00268679 -0.00354618 6.43221e-05 0.00267957 0.999994 -0.00204036 -0.0073023 0.00355164 0.00203084 0.999992 0.676456 0 0 0 1
2 0.999969 -0.00120771 -0.00772489 -0.0100328 0.00117985 0.999993 -0.003611 -0.0111185 0.00772919 0.00360178 0.999964 1.37125 0 0 0 1
3 0.999931 -0.00128098 -0.0117006 -0.0237327 0.00122052 0.999986 -0.00517227 -0.0136538 0.0117071 0.00515763 0.999918 2.08563 0 0 0 1
4 0.99986 5.79321e-05 -0.0167106 -0.0402272 -0.000155312 0.999983 -0.00582618 -0.0194327 0.01671 0.00582796 0.999843 2.81528 0 0 0 1
5 0.999772 -0.00118366 -0.0213077 -0.0572378 0.0010545 0.999981 -0.00607208 -0.0278191 0.0213145 0.00604822 0.999755 3.56204 0 0 0 1
6 0.999662 0.000544425 -0.0259946 -0.081545 -0.000735472 0.999973 -0.00734051 -0.0358844 0.0259899 0.00735714 0.999635 4.32265 0 0 0 1
7 0.999513 0.0032602 -0.0310324 -0.112137 -0.0035101 0.999962 -0.00800188 -0.0447209 0.0310051 0.00810691 0.999486 5.09668 0 0 0 1
8 0.999361 0.00349173 -0.0355658 -0.143594 -0.00372162 0.999973 -0.00639979 -0.0532611 0.0355425 0.00652807 0.999347 5.88701 0 0 0 1
9 0.999185 0.00268131 -0.040271 -0.176401 -0.0028332 0.999989 -0.00371493 -0.0632884 0.0402606 0.003826 0.999182 6.6897 0 0 0 1
10 0.99903 0.00226305 -0.0439747 -0.211687 -0.00231163 0.999997 -0.00105382 -0.072362 0.0439722 0.00115445 0.999032 7.50361 0 0 0 1
11 0.998896 0.00366482 -0.0468376 -0.254125 -0.00374515 0.999992 -0.00162734 -0.0820263 0.0468312 0.00180096 0.998901 8.32333 0 0 0 1
12 0.998775 0.00304285 -0.0493866 -0.295424 -0.00313866 0.999993 -0.00186268 -0.0885739 0.0493806 0.00201541 0.998778 9.15211 0 0 0 1
13 0.998682 7.09894e-05 -0.0513155 -0.334647 -0.000203775 0.999997 -0.00258241 -0.0938889 0.0513152 0.00258946 0.998679 9.98839 0 0 0 1
14 0.998565 -8.82523e-05 -0.0535542 -0.380835 -9.36659e-06 0.999998 -0.00182255 -0.10173 0.0535542 0.00182044 0.998563 10.832 0 0 0 1
15 0.998481 -0.00146793 -0.0550718 -0.429135 0.0013525 0.999997 -0.00213307 -0.111427 0.0550748 0.00205535 0.99848 11.687 0 0 0 1
16 0.998373 0.000738731 -0.0570218 -0.483426 -0.000993083 0.99999 -0.00443241 -0.122139 0.0570179 0.00448183 0.998363 12.5483 0 0 0 1
17 0.998285 0.00120595 -0.0585258 -0.540056 -0.00162301 0.999974 -0.00707907 -0.132598 0.0585158 0.00716191 0.998261 13.4179 0 0 0 1
18 0.998165 0.00516151 -0.060337 -0.6023 -0.00570195 0.999945 -0.00878826 -0.143753 0.0602883 0.00911617 0.998139 14.2952 0 0 0 1
19 0.998101 0.00610094 -0.0612993 -0.66308 -0.00663017 0.999942 -0.00843386 -0.157854 0.0612443 0.00882427 0.998084 15.1802 0 0 0 1
20 0.998014 0.0052997 -0.0627662 -0.722045 -0.00574767 0.999959 -0.0069587 -0.172847 0.0627268 0.00730564 0.998004 16.074 0 0 0 1
21 0.99792 0.00591748 -0.0641975 -0.78346 -0.00627924 0.999966 -0.00543487 -0.186221 0.0641631 0.00582667 0.997922 16.9738 0 0 0 1
22 0.997857 0.00547694 -0.0651993 -0.845347 -0.00584101 0.999968 -0.00539455 -0.199741 0.0651677 0.00576382 0.997858 17.8786 0 0 0 1
23 0.997737 0.00536917 -0.0670282 -0.908218 -0.00579979 0.999964 -0.0062316 -0.212775 0.0669924 0.00660624 0.997732 18.7877 0 0 0 1
24 0.997663 0.00386695 -0.0682185 -0.971291 -0.00435203 0.999966 -0.00696344 -0.226442 0.0681893 0.00724406 0.997646 19.7046 0 0 0 1
25 0.997629 0.00410637 -0.0687004 -1.03663 -0.00448288 0.999976 -0.00532714 -0.239555 0.0686769 0.00562249 0.997623 20.6257 0 0 0 1
26 0.997617 0.00588773 -0.0687501 -1.10557 -0.0060349 0.99998 -0.00193325 -0.254273 0.0687373 0.00234355 0.997632 21.55 0 0 0 1
27 0.997662 0.00693766 -0.0679906 -1.17297 -0.00682806 0.999975 0.0018442 -0.26563 0.0680017 -0.00137565 0.997684 22.4875 0 0 0 1
28 0.997774 0.00579785 -0.0664343 -1.23728 -0.00550265 0.999974 0.00462554 -0.271962 0.0664594 -0.00424968 0.99778 23.4285 0 0 0 1
29 0.997872 0.00589563 -0.0649408 -1.30214 -0.00556012 0.99997 0.00534586 -0.277922 0.0649704 -0.0049734 0.997875 24.3732 0 0 0 1
30 0.997958 0.00627024 -0.0635595 -1.36462 -0.00612984 0.999978 0.00240374 -0.285335 0.0635732 -0.00200922 0.997975 25.314 0 0 0 1
31 0.998004 0.00714074 -0.0627411 -1.42783 -0.00731158 0.99997 -0.00249375 -0.293171 0.0627215 0.00294751 0.998027 26.2605 0 0 0 1
32 0.99808 0.0063692 -0.0616159 -1.48954 -0.00671918 0.999962 -0.00547459 -0.302321 0.0615787 0.00587809 0.998085 27.2168 0 0 0 1
33 0.99813 0.00376787 -0.0610159 -1.54654 -0.00404632 0.999982 -0.0044408 -0.313516 0.0609981 0.00467938 0.998127 28.1829 0 0 0 1
34 0.998113 0.00193972 -0.0613743 -1.60668 -0.00191171 0.999998 0.000515183 -0.324411 0.0613752 -0.000396881 0.998115 29.1626 0 0 0 1
35 0.99806 -0.0017885 -0.062228 -1.66532 0.00203402 0.99999 0.00388232 -0.335656 0.0622204 -0.00400136 0.998054 30.1428 0 0 0 1
36 0.997945 -0.00917543 -0.0634115 -1.72059 0.00939451 0.999951 0.00315749 -0.343316 0.0633794 -0.00374672 0.997982 31.1244 0 0 0 1
37 0.997825 -0.0112684 -0.0649459 -1.78049 0.011242 0.999937 -0.000771312 -0.350864 0.0649504 3.95099e-05 0.997888 32.1064 0 0 0 1
38 0.997739 -0.0110126 -0.0662983 -1.85007 0.0107254 0.999932 -0.00468596 -0.361068 0.0663454 0.00396429 0.997789 33.0886 0 0 0 1
39 0.997597 -0.00959503 -0.0686163 -1.92119 0.00924037 0.999942 -0.00548426 -0.373466 0.0686649 0.00483704 0.997628 34.0774 0 0 0 1
40 0.99755 -0.0095802 -0.0693031 -1.99331 0.00931271 0.999948 -0.00418184 -0.387047 0.0693396 0.00352619 0.997587 35.0736 0 0 0 1
41 0.997473 -0.00634387 -0.0707596 -2.0707 0.00626661 0.99998 -0.0013139 -0.403858 0.0707665 0.00086716 0.997493 36.0721 0 0 0 1
42 0.99739 -0.00624366 -0.0719343 -2.14553 0.00625582 0.99998 -5.62375e-05 -0.416888 0.0719332 -0.000393917 0.997409 37.0728 0 0 0 1
43 0.997312 -0.00473093 -0.0731254 -2.21909 0.00492848 0.999985 0.00252135 -0.428625 0.0731123 -0.00287497 0.99732 38.0643 0 0 0 1
44 0.997318 -0.00467696 -0.0730348 -2.29215 0.00509473 0.999972 0.00553481 -0.440023 0.0730068 -0.00589206 0.997314 39.0618 0 0 0 1
45 0.997274 0.00138304 -0.0737801 -2.37574 -0.000811217 0.999969 0.00777971 -0.447869 0.0737886 -0.00769865 0.997244 40.0548 0 0 0 1
46 0.997262 0.00149131 -0.0739326 -2.45529 -0.000969511 0.999974 0.00709318 -0.454763 0.0739413 -0.00700208 0.997238 41.0557 0 0 0 1
47 0.997266 0.00175929 -0.0738699 -2.53081 -0.00136899 0.999985 0.00533379 -0.460519 0.0738782 -0.00521809 0.997254 42.0518 0 0 0 1
48 0.997253 0.00408494 -0.0739555 -2.61212 -0.00386552 0.999988 0.00310988 -0.469863 0.0739673 -0.00281546 0.997257 43.0493 0 0 0 1
49 0.997185 0.00365371 -0.0748884 -2.68799 -0.00342799 0.999989 0.00314243 -0.47951 0.0748991 -0.00287687 0.997187 44.0473 0 0 0 1
50 0.997077 0.00181435 -0.0763845 -2.76071 -0.00149292 0.99999 0.00426495 -0.487845 0.0763915 -0.00413845 0.997069 45.0403 0 0 0 1
51 0.997018 0.00246727 -0.0771352 -2.84117 -0.00206285 0.999984 0.00532227 -0.499132 0.0771471 -0.00514727 0.997006 46.0244 0 0 0 1
52 0.996991 0.00504805 -0.0773507 -2.92304 -0.00493379 0.999986 0.00166824 -0.510863 0.0773581 -0.00128158 0.997003 46.994 0 0 0 1
53 0.996911 0.00581773 -0.0783264 -3.00373 -0.00604061 0.999978 -0.00260888 -0.521193 0.0783095 0.00307396 0.996924 47.9551 0 0 0 1
54 0.996846 0.00678413 -0.0790757 -3.08343 -0.00711636 0.999967 -0.00392044 -0.534186 0.0790465 0.00447081 0.996861 48.9236 0 0 0 1
55 0.996843 0.00557268 -0.0792034 -3.16262 -0.00562268 0.999984 -0.000408328 -0.54901 0.0791999 0.000852374 0.996858 49.9005 0 0 0 1
56 0.996831 0.00375007 -0.0794568 -3.23868 -0.00354655 0.99999 0.00270227 -0.563036 0.0794661 -0.0024119 0.996835 50.8752 0 0 0 1
57 0.996805 0.00190455 -0.0798474 -3.31582 -0.00164885 0.999993 0.00326822 -0.574113 0.0798531 -0.00312612 0.996802 51.8394 0 0 0 1
58 0.996782 -0.00124932 -0.0801505 -3.39153 0.00141878 0.999997 0.0020573 -0.586659 0.0801477 -0.00216439 0.996781 52.8005 0 0 0 1
59 0.996745 -0.0038025 -0.0805262 -3.4676 0.0038689 0.999992 0.000668539 -0.59892 0.080523 -0.00097791 0.996752 53.7575 0 0 0 1
60 0.996643 -0.00519016 -0.0817059 -3.54489 0.00535256 0.999984 0.00176869 -0.60864 0.0816955 -0.00220009 0.996655 54.708 0 0 0 1
61 0.996534 -0.0079249 -0.0828082 -3.62139 0.00842977 0.999948 0.00574894 -0.618858 0.0827583 -0.00642707 0.996549 55.6588 0 0 0 1
62 0.996473 -0.00854289 -0.0834829 -3.69959 0.00945654 0.9999 0.0105549 -0.624401 0.0833844 -0.0113071 0.996453 56.6119 0 0 0 1
63 0.996447 -0.00664747 -0.083957 -3.78502 0.00773966 0.99989 0.0126902 -0.629769 0.0838633 -0.0132949 0.996389 57.5607 0 0 0 1
64 0.996335 -0.00522633 -0.0853755 -3.8689 0.00597793 0.999946 0.00855017 -0.636709 0.0853262 -0.0090292 0.996312 58.4941 0 0 0 1
65 0.996221 -0.00343661 -0.0867892 -3.95276 0.00350579 0.999994 0.000644619 -0.644008 0.0867865 -0.000946448 0.996226 59.4131 0 0 0 1
66 0.996144 -0.00149623 -0.0877201 -4.03806 0.00112725 0.99999 -0.00425562 -0.655271 0.0877256 0.00414033 0.996136 60.3236 0 0 0 1
67 0.996055 0.00375138 -0.0886573 -4.12895 -0.00406723 0.999986 -0.00338223 -0.671324 0.0886434 0.00372948 0.996056 61.2274 0 0 0 1
68 0.995922 0.00719305 -0.0899263 -4.21985 -0.0073202 0.999973 -0.00108421 -0.691307 0.089916 0.00173807 0.995948 62.125 0 0 0 1
69 0.99582 0.00967277 -0.0908194 -4.30702 -0.00966905 0.999953 0.000481019 -0.708494 0.0908198 0.000399128 0.995867 63.0134 0 0 0 1
70 0.995713 0.0102896 -0.0919182 -4.39131 -0.0103098 0.999947 0.000255248 -0.721276 0.091916 0.000693502 0.995767 63.8776 0 0 0 1
71 0.99554 0.0119225 -0.0935844 -4.477 -0.0118725 0.999929 0.00109156 -0.734766 0.0935908 2.43836e-05 0.995611 64.7307 0 0 0 1
72 0.995397 0.0126524 -0.0950024 -4.56121 -0.0125521 0.99992 0.00165348 -0.749039 0.0950157 -0.000453392 0.995476 65.5703 0 0 0 1
73 0.995256 0.0126635 -0.0964665 -4.64297 -0.0125254 0.999919 0.00203772 -0.761909 0.0964846 -0.00081977 0.995334 66.3938 0 0 0 1
74 0.995133 0.0127023 -0.0977168 -4.72623 -0.0124698 0.999918 0.00298947 -0.7711 0.0977468 -0.00175641 0.99521 67.2017 0 0 0 1
75 0.994948 0.015548 -0.0991814 -4.81287 -0.0150604 0.999871 0.00566291 -0.780301 0.0992566 -0.0041406 0.995053 67.9995 0 0 0 1
76 0.994794 0.0171065 -0.100462 -4.90076 -0.0162095 0.999821 0.009738 -0.788037 0.100611 -0.00805885 0.994893 68.7922 0 0 0 1

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,71 @@
VERTEX_SE3:QUAT 0 1.63791e-12 7.56548e-14 -3.02811e-12 5.35657e-13 2.43616e-13 9.71152e-14 1
VERTEX_SE3:QUAT 1 1.01609 0.00274307 -0.0351514 -0.499545 0.247735 0.723569 -0.406854
VERTEX_SE3:QUAT 2 1.99996 0.0304956 -0.040662 0.403501 -0.294714 -0.4254 0.754563
VERTEX_SE3:QUAT 3 1.94371 1.06535 0.0118614 -0.0471731 -0.541615 0.820893 0.17482
VERTEX_SE3:QUAT 4 0.962753 0.999477 0.0211017 -0.19663 -0.66009 0.470743 0.551379
VERTEX_SE3:QUAT 5 -0.00956768 0.965396 -0.021854 -0.320221 -0.518368 0.47521 0.634766
VERTEX_SE3:QUAT 6 -0.0863793 1.97682 0.000531117 -0.0173439 -0.573793 -0.450627 0.683663
VERTEX_SE3:QUAT 7 0.918905 2.01556 -0.0139773 0.56169 -0.440513 0.199057 0.671438
VERTEX_SE3:QUAT 8 1.92094 2.05524 0.0469884 0.0073084 -0.372357 -0.467582 0.801663
VERTEX_SE3:QUAT 9 1.86182 2.05449 1.09237 0.0131731 -0.05784 0.0335652 0.997674
VERTEX_SE3:QUAT 10 0.880176 2.02406 1.00997 -0.39342 -0.287909 0.757918 0.433462
VERTEX_SE3:QUAT 11 -0.0960463 1.98653 0.995791 0.434103 -0.199044 0.585176 0.655367
VERTEX_SE3:QUAT 12 -0.0911401 0.997117 0.988217 -0.0925477 0.572872 0.537294 0.612019
VERTEX_SE3:QUAT 13 0.948316 1.02239 0.991745 0.142484 0.560062 0.750078 0.321578
VERTEX_SE3:QUAT 14 1.92631 1.08945 1.06749 0.23878 0.380837 0.796564 -0.404269
VERTEX_SE3:QUAT 15 1.95398 0.0777667 0.982353 -0.384392 0.58733 0.685207 -0.194366
VERTEX_SE3:QUAT 16 0.946032 0.0482667 0.952308 -0.218979 0.186315 -0.494185 0.820437
VERTEX_SE3:QUAT 17 -0.0625076 -0.034424 0.942171 0.514725 -0.185043 -0.44771 0.707371
VERTEX_SE3:QUAT 18 -0.083807 -0.0106666 1.9853 0.00792651 1.98919e-05 -0.00128106 0.999968
VERTEX_SE3:QUAT 19 0.918067 -0.000897795 1.92157 -0.342141 0.241241 -0.726975 0.544288
VERTEX_SE3:QUAT 20 1.90041 0.0323631 2.00636 0.412572 -0.0930131 -0.133075 0.896339
VERTEX_SE3:QUAT 21 1.84895 1.05013 2.0738 -0.580757 0.35427 0.729393 -0.0721062
VERTEX_SE3:QUAT 22 0.880221 1.00671 1.99021 0.147752 0.355662 0.917953 0.095058
VERTEX_SE3:QUAT 23 -0.0950872 1.00374 1.95013 -0.29909 -0.0578461 0.857019 0.415594
VERTEX_SE3:QUAT 24 -0.111581 1.97979 1.98762 0.565153 0.214463 -0.523058 0.600848
VERTEX_SE3:QUAT 25 0.837568 2.01589 2.03075 -0.284756 0.369992 0.875484 -0.124692
VERTEX_SE3:QUAT 26 1.82708 2.05081 2.07052 0.254696 0.250865 0.653216 0.667462
EDGE_SE3:QUAT 0 1 1.00497 0.002077 -0.015539 -0.508004 0.250433 0.711222 -0.416386 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 1 2 -0.200593 0.339956 -0.908079 -0.093598 0.151993 0.42829 0.885836 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 2 3 -0.922791 0.330629 -0.292682 0.365657 -0.051986 0.924849 -0.090813 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 3 4 0.893075 0.246476 0.331154 -0.285927 0.341221 -0.267609 0.854517 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 4 5 0.280674 0.244242 0.923726 0.035064 0.21101 0.083834 0.973251 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 5 6 0.955621 0.355669 -0.025152 -0.306713 0.131221 -0.781587 0.527096 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 6 7 -0.076631 0.636081 -0.771439 0.702021 0.326514 0.122181 0.620988 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 7 8 0.582761 -0.721177 -0.376875 -0.733841 -0.170725 -0.256653 0.605359 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 8 9 0.600312 0.298765 0.767014 0.057612 0.332574 0.486324 0.805956 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 9 10 -0.986649 0.03008 -0.008766 -0.362177 -0.253215 0.763748 0.470531 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 10 11 0.275109 0.534769 0.823463 0.450708 -0.472399 -0.432689 0.621677 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 11 12 -0.61882 0.024878 0.773748 0.0927029 0.786162 -0.21122 0.573359 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 12 13 -0.175537 -0.730832 0.634529 -0.018628 0.006375 0.428306 0.903419 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 13 14 -0.700208 -0.245198 0.637353 -0.035865 0.273394 0.645363 0.712374 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 14 15 0.373495 0.373768 -0.846199 0.400323 0.310362 -0.422222 0.751762 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 15 16 0.648588 0.157829 0.72252 0.781502 -0.210141 -0.501005 -0.30674 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 16 17 -0.390339 -0.702656 -0.572321 0.765815 0.055816 0.032478 0.63981 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 17 18 -0.261114 0.908685 0.421318 -0.501833 0.166567 0.448468 0.720622 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 18 19 1.00815 0.012634 -0.029822 -0.347007 0.205082 -0.740641 0.537569 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 19 20 -0.162376 0.581623 0.810804 0.628338 0.075411 0.650639 0.41973 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 20 21 -0.358942 0.627689 -0.704045 -0.469133 0.542456 0.530583 -0.451816 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 21 22 0.362417 0.298352 0.854822 0.004058 -0.696926 0.140345 0.703265 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 22 23 0.934942 0.020321 -0.358044 -0.445461 0.260916 -0.379862 0.767589 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 23 24 0.741887 -0.657659 0.215293 -0.584859 0.196138 0.688031 0.38221 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 24 25 0.300145 0.82011 -0.39974 0.46538 -0.593595 -0.202131 0.624668 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 25 26 -0.85591 0.022701 -0.510794 0.12929 -0.685192 -0.503707 0.509978 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 0 5 0.026721 0.990497 -0.007651 -0.317476 -0.510239 0.467341 0.648427 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 3 8 0.390516 -0.401461 -0.830724 0.503106 -0.367814 0.780584 0.047806 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 4 1 -0.813838 -0.446181 0.319175 0.224903 -0.031827 0.97265 0.048561 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 4 13 0.571273 -0.805401 0.077339 0.892031 0.329761 0.275468 0.140201 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 5 12 0.389794 -0.882655 0.268063 0.712423 0.550662 0.275339 0.33677 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 6 11 0.800298 0.505022 0.361738 0.739335 0.419366 0.443817 0.283801 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 10 13 -0.912531 0.430955 -0.018942 0.830493 -0.093519 0.272041 0.477001 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 12 23 -0.797606 0.437737 0.311476 -0.657137 -0.196625 0.136652 0.714728 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 13 22 -0.116836 0.952032 0.269398 -0.216437 0.086571 0.260965 0.936781 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 14 21 0.749295 0.373389 0.581641 0.253048 0.511007 -0.537262 0.621439 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 16 1 0.160985 0.555966 -0.811911 0.748057 0.122381 -0.369631 0.537407 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 18 23 0.028909 1.02689 -0.00265 -0.294167 -0.071607 0.850901 0.429308 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 19 16 -0.230711 0.750637 -0.607511 0.14647 -0.102538 0.297899 0.937704 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 20 15 -0.031986 -0.741129 -0.728721 -0.278926 0.731172 0.404675 -0.473103 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 22 19 -0.332601 0.704401 -0.687251 -0.372165 -0.054346 0.713024 0.591725 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 22 25 0.347067 -0.634646 0.657147 0.018567 0.476762 0.040939 0.877882 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 25 10 0.388971 -0.723981 -0.559653 -0.373459 -0.014654 -0.696123 0.612965 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400
EDGE_SE3:QUAT 26 21 -0.979482 -0.024822 0.043763 -0.326753 0.819942 0.292615 0.367837 2500 0 0 0 0 0 2500 0 0 0 0 2500 0 0 0 400 0 0 400 0 400

View File

@ -0,0 +1,3 @@
VERTEX_SE3:QUAT 0 0 0 0 0 0 0 1
VERTEX_SE3:QUAT 1 1.00137 0.01539 0.004948 0.190253 0.283162 -0.392318 0.85423
EDGE_SE3:QUAT 0 1 1.00137 0.01539 0.004948 0.190253 0.283162 -0.392318 0.85423 10000 1 1 1 1 1 10000 2 2 2 2 10000 3 3 3 10000 4 4 10000 5 10000

View File

@ -0,0 +1,3 @@
VERTEX_SE3:QUAT 0 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 1.000000
VERTEX_SE3:QUAT 1 1.001367 0.015390 0.004948 0.190253 0.283162 -0.392318 0.854230
EDGE_SE3:QUAT 0 1 1.001367 0.015390 0.004948 0.190253 0.283162 -0.392318 0.854230 10000.000000 1.000000 1.000000 1.000000 1.000000 1.000000 10000.000000 2.000000 2.000000 2.000000 2.000000 10000.000000 3.000000 3.000000 3.000000 10000.000000 4.000000 4.000000 10000.000000 5.000000 10000.0000

View File

@ -0,0 +1,11 @@
VERTEX_SE3:QUAT 0 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 1.000000
VERTEX_SE3:QUAT 1 1.001367 0.015390 0.004948 0.190253 0.283162 -0.392318 0.854230
VERTEX_SE3:QUAT 2 1.993500 0.023275 0.003793 -0.351729 -0.597838 0.584174 0.421446
VERTEX_SE3:QUAT 3 2.004291 1.024305 0.018047 0.331798 -0.200659 0.919323 0.067024
VERTEX_SE3:QUAT 4 0.999908 1.055073 0.020212 -0.035697 -0.462490 0.445933 0.765488
EDGE_SE3:QUAT 0 1 1.001367 0.015390 0.004948 0.190253 0.283162 -0.392318 0.854230 10000.000000 0.000000 0.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 10000.000000 0.000000 10000.000000
EDGE_SE3:QUAT 1 2 0.523923 0.776654 0.326659 0.311512 0.656877 -0.678505 0.105373 10000.000000 0.000000 0.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 10000.000000 0.000000 10000.000000
EDGE_SE3:QUAT 2 3 0.910927 0.055169 -0.411761 0.595795 -0.561677 0.079353 0.568551 10000.000000 0.000000 0.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 10000.000000 0.000000 10000.000000
EDGE_SE3:QUAT 3 4 0.775288 0.228798 -0.596923 -0.592077 0.303380 -0.513226 0.542221 10000.000000 0.000000 0.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 10000.000000 0.000000 10000.000000
EDGE_SE3:QUAT 1 4 -0.577841 0.628016 -0.543592 -0.125250 -0.534379 0.769122 0.327419 10000.000000 0.000000 0.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 10000.000000 0.000000 10000.000000
EDGE_SE3:QUAT 3 0 -0.623267 0.086928 0.773222 0.104639 0.627755 0.766795 0.083672 10000.000000 0.000000 0.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 10000.000000 0.000000 10000.000000

View File

@ -0,0 +1,11 @@
VERTEX_SE3:QUAT 0 0.000000 0.000000 0.000000 0.0008187 0.0011723 0.0895466 0.9959816
VERTEX_SE3:QUAT 1 0.000000 -0.000000 0.000000 0.0010673 0.0015636 0.1606931 0.9870026
VERTEX_SE3:QUAT 2 -0.388822 0.632954 0.001223 0.0029920 0.0014066 0.0258235 0.9996610
VERTEX_SE3:QUAT 3 -1.143204 0.050638 0.006026 -0.0012800 -0.0002767 -0.2850291 0.9585180
VERTEX_SE3:QUAT 4 -0.512416 0.486441 0.005171 0.0002681 0.0023574 0.0171476 0.9998502
EDGE_SE3:QUAT 1 2 1.000000 2.000000 0.000000 0.0000000 0.0000000 0.7071068 0.7071068 100.000000 0.000000 0.000000 0.000000 0.000000 0.000000 100.000000 0.000000 0.000000 0.000000 0.000000 100.000000 0.000000 0.000000 0.000000 100.000000 0.000000 0.000000 100.000000 0.000000 100.000000
EDGE_SE3:QUAT 2 3 -0.000000 1.000000 0.000000 0.0000000 0.0000000 0.7071068 0.7071068 100.000000 0.000000 0.000000 0.000000 0.000000 0.000000 100.000000 0.000000 0.000000 0.000000 0.000000 100.000000 0.000000 0.000000 0.000000 100.000000 0.000000 0.000000 100.000000 0.000000 100.000000
EDGE_SE3:QUAT 3 4 1.000000 1.000000 0.000000 0.0000000 0.0000000 0.7071068 0.7071068 100.000000 0.000000 0.000000 0.000000 0.000000 0.000000 100.000000 0.000000 0.000000 0.000000 0.000000 100.000000 0.000000 0.000000 0.000000 100.000000 0.000000 0.000000 100.000000 0.000000 100.000000
EDGE_SE3:QUAT 3 1 0.000001 2.000000 0.000000 0.0000000 0.0000000 1.0000000 0.0000002 100.000000 0.000000 0.000000 0.000000 0.000000 0.000000 100.000000 0.000000 0.000000 0.000000 0.000000 100.000000 0.000000 0.000000 0.000000 100.000000 0.000000 0.000000 100.000000 0.000000 100.000000
EDGE_SE3:QUAT 1 4 -1.000000 1.000000 0.000000 0.0000000 0.0000000 -0.7071068 0.7071068 100.000000 0.000000 0.000000 0.000000 0.000000 0.000000 100.000000 0.000000 0.000000 0.000000 0.000000 100.000000 0.000000 0.000000 0.000000 100.000000 0.000000 0.000000 100.000000 0.000000 100.000000
EDGE_SE3:QUAT 0 1 0.000000 0.000000 0.000000 0.0000000 0.0000000 0.0000000 1.0000000 100.000000 0.000000 0.000000 0.000000 0.000000 0.000000 100.000000 0.000000 0.000000 0.000000 0.000000 100.000000 0.000000 0.000000 0.000000 100.000000 0.000000 0.000000 100.000000 0.000000 100.000000

View File

@ -120,15 +120,15 @@ int main(int argc, char** argv) {
// For simplicity, we will use the same noise model for each odometry factor // For simplicity, we will use the same noise model for each odometry factor
noiseModel::Diagonal::shared_ptr odometryNoise = noiseModel::Diagonal::Sigmas((Vector(3) << 0.2, 0.2, 0.1)); noiseModel::Diagonal::shared_ptr odometryNoise = noiseModel::Diagonal::Sigmas((Vector(3) << 0.2, 0.2, 0.1));
// Create odometry (Between) factors between consecutive poses // Create odometry (Between) factors between consecutive poses
graph.push_back(BetweenFactor<Pose2>(1, 2, Pose2(2.0, 0.0, 0.0), odometryNoise)); graph.add(BetweenFactor<Pose2>(1, 2, Pose2(2.0, 0.0, 0.0), odometryNoise));
graph.push_back(BetweenFactor<Pose2>(2, 3, Pose2(2.0, 0.0, 0.0), odometryNoise)); graph.add(BetweenFactor<Pose2>(2, 3, Pose2(2.0, 0.0, 0.0), odometryNoise));
// 2b. Add "GPS-like" measurements // 2b. Add "GPS-like" measurements
// We will use our custom UnaryFactor for this. // We will use our custom UnaryFactor for this.
noiseModel::Diagonal::shared_ptr unaryNoise = noiseModel::Diagonal::Sigmas((Vector(2) << 0.1, 0.1)); // 10cm std on x,y noiseModel::Diagonal::shared_ptr unaryNoise = noiseModel::Diagonal::Sigmas((Vector(2) << 0.1, 0.1)); // 10cm std on x,y
graph.push_back(boost::make_shared<UnaryFactor>(1, 0.0, 0.0, unaryNoise)); graph.add(boost::make_shared<UnaryFactor>(1, 0.0, 0.0, unaryNoise));
graph.push_back(boost::make_shared<UnaryFactor>(2, 2.0, 0.0, unaryNoise)); graph.add(boost::make_shared<UnaryFactor>(2, 2.0, 0.0, unaryNoise));
graph.push_back(boost::make_shared<UnaryFactor>(3, 4.0, 0.0, unaryNoise)); graph.add(boost::make_shared<UnaryFactor>(3, 4.0, 0.0, unaryNoise));
graph.print("\nFactor Graph:\n"); // print graph.print("\nFactor Graph:\n"); // print
// 3. Create the data structure to hold the initialEstimate estimate to the solution // 3. Create the data structure to hold the initialEstimate estimate to the solution

View File

@ -65,15 +65,15 @@ int main(int argc, char** argv) {
// A prior factor consists of a mean and a noise model (covariance matrix) // A prior factor consists of a mean and a noise model (covariance matrix)
Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin
noiseModel::Diagonal::shared_ptr priorNoise = noiseModel::Diagonal::Sigmas((Vector(3) << 0.3, 0.3, 0.1)); noiseModel::Diagonal::shared_ptr priorNoise = noiseModel::Diagonal::Sigmas((Vector(3) << 0.3, 0.3, 0.1));
graph.push_back(PriorFactor<Pose2>(1, priorMean, priorNoise)); graph.add(PriorFactor<Pose2>(1, priorMean, priorNoise));
// Add odometry factors // Add odometry factors
Pose2 odometry(2.0, 0.0, 0.0); Pose2 odometry(2.0, 0.0, 0.0);
// For simplicity, we will use the same noise model for each odometry factor // For simplicity, we will use the same noise model for each odometry factor
noiseModel::Diagonal::shared_ptr odometryNoise = noiseModel::Diagonal::Sigmas((Vector(3) << 0.2, 0.2, 0.1)); noiseModel::Diagonal::shared_ptr odometryNoise = noiseModel::Diagonal::Sigmas((Vector(3) << 0.2, 0.2, 0.1));
// Create odometry (Between) factors between consecutive poses // Create odometry (Between) factors between consecutive poses
graph.push_back(BetweenFactor<Pose2>(1, 2, odometry, odometryNoise)); graph.add(BetweenFactor<Pose2>(1, 2, odometry, odometryNoise));
graph.push_back(BetweenFactor<Pose2>(2, 3, odometry, odometryNoise)); graph.add(BetweenFactor<Pose2>(2, 3, odometry, odometryNoise));
graph.print("\nFactor Graph:\n"); // print graph.print("\nFactor Graph:\n"); // print
// Create the data structure to hold the initialEstimate estimate to the solution // Create the data structure to hold the initialEstimate estimate to the solution

View File

@ -81,13 +81,13 @@ int main(int argc, char** argv) {
// Add a prior on pose x1 at the origin. A prior factor consists of a mean and a noise model (covariance matrix) // Add a prior on pose x1 at the origin. A prior factor consists of a mean and a noise model (covariance matrix)
Pose2 prior(0.0, 0.0, 0.0); // prior mean is at origin Pose2 prior(0.0, 0.0, 0.0); // prior mean is at origin
noiseModel::Diagonal::shared_ptr priorNoise = noiseModel::Diagonal::Sigmas((Vector(3) << 0.3, 0.3, 0.1)); // 30cm std on x,y, 0.1 rad on theta noiseModel::Diagonal::shared_ptr priorNoise = noiseModel::Diagonal::Sigmas((Vector(3) << 0.3, 0.3, 0.1)); // 30cm std on x,y, 0.1 rad on theta
graph.push_back(PriorFactor<Pose2>(x1, prior, priorNoise)); // add directly to graph graph.add(PriorFactor<Pose2>(x1, prior, priorNoise)); // add directly to graph
// Add two odometry factors // Add two odometry factors
Pose2 odometry(2.0, 0.0, 0.0); // create a measurement for both factors (the same in this case) Pose2 odometry(2.0, 0.0, 0.0); // create a measurement for both factors (the same in this case)
noiseModel::Diagonal::shared_ptr odometryNoise = noiseModel::Diagonal::Sigmas((Vector(3) << 0.2, 0.2, 0.1)); // 20cm std on x,y, 0.1 rad on theta noiseModel::Diagonal::shared_ptr odometryNoise = noiseModel::Diagonal::Sigmas((Vector(3) << 0.2, 0.2, 0.1)); // 20cm std on x,y, 0.1 rad on theta
graph.push_back(BetweenFactor<Pose2>(x1, x2, odometry, odometryNoise)); graph.add(BetweenFactor<Pose2>(x1, x2, odometry, odometryNoise));
graph.push_back(BetweenFactor<Pose2>(x2, x3, odometry, odometryNoise)); graph.add(BetweenFactor<Pose2>(x2, x3, odometry, odometryNoise));
// Add Range-Bearing measurements to two different landmarks // Add Range-Bearing measurements to two different landmarks
// create a noise model for the landmark measurements // create a noise model for the landmark measurements
@ -101,9 +101,9 @@ int main(int argc, char** argv) {
range32 = 2.0; range32 = 2.0;
// Add Bearing-Range factors // Add Bearing-Range factors
graph.push_back(BearingRangeFactor<Pose2, Point2>(x1, l1, bearing11, range11, measurementNoise)); graph.add(BearingRangeFactor<Pose2, Point2>(x1, l1, bearing11, range11, measurementNoise));
graph.push_back(BearingRangeFactor<Pose2, Point2>(x2, l1, bearing21, range21, measurementNoise)); graph.add(BearingRangeFactor<Pose2, Point2>(x2, l1, bearing21, range21, measurementNoise));
graph.push_back(BearingRangeFactor<Pose2, Point2>(x3, l2, bearing32, range32, measurementNoise)); graph.add(BearingRangeFactor<Pose2, Point2>(x3, l2, bearing32, range32, measurementNoise));
// Print // Print
graph.print("Factor Graph:\n"); graph.print("Factor Graph:\n");

View File

@ -72,23 +72,23 @@ int main(int argc, char** argv) {
// 2a. Add a prior on the first pose, setting it to the origin // 2a. Add a prior on the first pose, setting it to the origin
// A prior factor consists of a mean and a noise model (covariance matrix) // A prior factor consists of a mean and a noise model (covariance matrix)
noiseModel::Diagonal::shared_ptr priorNoise = noiseModel::Diagonal::Sigmas((Vector(3) << 0.3, 0.3, 0.1)); noiseModel::Diagonal::shared_ptr priorNoise = noiseModel::Diagonal::Sigmas((Vector(3) << 0.3, 0.3, 0.1));
graph.push_back(PriorFactor<Pose2>(1, Pose2(0, 0, 0), priorNoise)); graph.add(PriorFactor<Pose2>(1, Pose2(0, 0, 0), priorNoise));
// For simplicity, we will use the same noise model for odometry and loop closures // For simplicity, we will use the same noise model for odometry and loop closures
noiseModel::Diagonal::shared_ptr model = noiseModel::Diagonal::Sigmas((Vector(3) << 0.2, 0.2, 0.1)); noiseModel::Diagonal::shared_ptr model = noiseModel::Diagonal::Sigmas((Vector(3) << 0.2, 0.2, 0.1));
// 2b. Add odometry factors // 2b. Add odometry factors
// Create odometry (Between) factors between consecutive poses // Create odometry (Between) factors between consecutive poses
graph.push_back(BetweenFactor<Pose2>(1, 2, Pose2(2, 0, 0 ), model)); graph.add(BetweenFactor<Pose2>(1, 2, Pose2(2, 0, 0 ), model));
graph.push_back(BetweenFactor<Pose2>(2, 3, Pose2(2, 0, M_PI_2), model)); graph.add(BetweenFactor<Pose2>(2, 3, Pose2(2, 0, M_PI_2), model));
graph.push_back(BetweenFactor<Pose2>(3, 4, Pose2(2, 0, M_PI_2), model)); graph.add(BetweenFactor<Pose2>(3, 4, Pose2(2, 0, M_PI_2), model));
graph.push_back(BetweenFactor<Pose2>(4, 5, Pose2(2, 0, M_PI_2), model)); graph.add(BetweenFactor<Pose2>(4, 5, Pose2(2, 0, M_PI_2), model));
// 2c. Add the loop closure constraint // 2c. Add the loop closure constraint
// This factor encodes the fact that we have returned to the same pose. In real systems, // This factor encodes the fact that we have returned to the same pose. In real systems,
// these constraints may be identified in many ways, such as appearance-based techniques // these constraints may be identified in many ways, such as appearance-based techniques
// with camera images. We will use another Between Factor to enforce this constraint: // with camera images. We will use another Between Factor to enforce this constraint:
graph.push_back(BetweenFactor<Pose2>(5, 2, Pose2(2, 0, M_PI_2), model)); graph.add(BetweenFactor<Pose2>(5, 2, Pose2(2, 0, M_PI_2), model));
graph.print("\nFactor Graph:\n"); // print graph.print("\nFactor Graph:\n"); // print
// 3. Create the data structure to hold the initialEstimate estimate to the solution // 3. Create the data structure to hold the initialEstimate estimate to the solution

View File

@ -26,36 +26,72 @@
using namespace std; using namespace std;
using namespace gtsam; using namespace gtsam;
// HOWTO: ./Pose2SLAMExample_g2o inputFile outputFile (maxIterations) (tukey/huber)
int main(const int argc, const char *argv[]) { int main(const int argc, const char *argv[]) {
// Read graph from file string kernelType = "none";
string g2oFile; int maxIterations = 100; // default
if (argc < 2) string g2oFile = findExampleDataFile("noisyToyGraph.txt"); // default
g2oFile = findExampleDataFile("noisyToyGraph.txt");
else
g2oFile = argv[1];
// Parse user's inputs
if (argc > 1){
g2oFile = argv[1]; // input dataset filename
// outputFile = g2oFile = argv[2]; // done later
}
if (argc > 3){
maxIterations = atoi(argv[3]); // user can specify either tukey or huber
}
if (argc > 4){
kernelType = argv[4]; // user can specify either tukey or huber
}
// reading file and creating factor graph
NonlinearFactorGraph::shared_ptr graph; NonlinearFactorGraph::shared_ptr graph;
Values::shared_ptr initial; Values::shared_ptr initial;
boost::tie(graph, initial) = readG2o(g2oFile); bool is3D = false;
if(kernelType.compare("none") == 0){
boost::tie(graph, initial) = readG2o(g2oFile,is3D);
}
if(kernelType.compare("huber") == 0){
std::cout << "Using robust kernel: huber " << std::endl;
boost::tie(graph, initial) = readG2o(g2oFile,is3D, KernelFunctionTypeHUBER);
}
if(kernelType.compare("tukey") == 0){
std::cout << "Using robust kernel: tukey " << std::endl;
boost::tie(graph, initial) = readG2o(g2oFile,is3D, KernelFunctionTypeTUKEY);
}
// Add prior on the pose having index (key) = 0 // Add prior on the pose having index (key) = 0
NonlinearFactorGraph graphWithPrior = *graph; NonlinearFactorGraph graphWithPrior = *graph;
noiseModel::Diagonal::shared_ptr priorModel = // noiseModel::Diagonal::shared_ptr priorModel = //
noiseModel::Diagonal::Variances((Vector(3) << 1e-6, 1e-6, 1e-8)); noiseModel::Diagonal::Variances((Vector(3) << 1e-6, 1e-6, 1e-8));
graphWithPrior.add(PriorFactor<Pose2>(0, Pose2(), priorModel)); graphWithPrior.add(PriorFactor<Pose2>(0, Pose2(), priorModel));
std::cout << "Adding prior on pose 0 " << std::endl;
GaussNewtonParams params;
params.setVerbosity("TERMINATION");
if (argc > 3) {
params.maxIterations = maxIterations;
std::cout << "User required to perform maximum " << params.maxIterations << " iterations "<< std::endl;
}
std::cout << "Optimizing the factor graph" << std::endl; std::cout << "Optimizing the factor graph" << std::endl;
GaussNewtonOptimizer optimizer(graphWithPrior, *initial); GaussNewtonOptimizer optimizer(graphWithPrior, *initial, params);
Values result = optimizer.optimize(); Values result = optimizer.optimize();
std::cout << "Optimization complete" << std::endl; std::cout << "Optimization complete" << std::endl;
std::cout << "initial error=" <<graph->error(*initial)<< std::endl;
std::cout << "final error=" <<graph->error(result)<< std::endl;
if (argc < 3) { if (argc < 3) {
result.print("result"); result.print("result");
} else { } else {
const string outputFile = argv[2]; const string outputFile = argv[2];
std::cout << "Writing results to file: " << outputFile << std::endl; std::cout << "Writing results to file: " << outputFile << std::endl;
writeG2o(*graph, result, outputFile); NonlinearFactorGraph::shared_ptr graphNoKernel;
Values::shared_ptr initial2;
boost::tie(graphNoKernel, initial2) = readG2o(g2oFile);
writeG2o(*graphNoKernel, result, outputFile);
std::cout << "done! " << std::endl; std::cout << "done! " << std::endl;
} }
return 0; return 0;

View File

@ -0,0 +1,89 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file Pose3SLAMExample_initializePose3.cpp
* @brief A 3D Pose SLAM example that reads input from g2o, and initializes the Pose3 using InitializePose3
* Syntax for the script is ./Pose3SLAMExample_changeKeys input.g2o rewritted.g2o
* @date Aug 25, 2014
* @author Luca Carlone
*/
#include <gtsam/slam/dataset.h>
#include <gtsam/slam/BetweenFactor.h>
#include <gtsam/slam/PriorFactor.h>
#include <fstream>
using namespace std;
using namespace gtsam;
int main(const int argc, const char *argv[]) {
// Read graph from file
string g2oFile;
if (argc < 2)
g2oFile = findExampleDataFile("pose3example.txt");
else
g2oFile = argv[1];
NonlinearFactorGraph::shared_ptr graph;
Values::shared_ptr initial;
bool is3D = true;
boost::tie(graph, initial) = readG2o(g2oFile, is3D);
bool add = false;
Key firstKey = 8646911284551352320;
std::cout << "Using reference key: " << firstKey << std::endl;
if(add)
std::cout << "adding key " << std::endl;
else
std::cout << "subtracting key " << std::endl;
if (argc < 3) {
std::cout << "Please provide output file to write " << std::endl;
} else {
const string inputFileRewritten = argv[2];
std::cout << "Rewriting input to file: " << inputFileRewritten << std::endl;
// Additional: rewrite input with simplified keys 0,1,...
Values simpleInitial;
BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, *initial) {
Key key;
if(add)
key = key_value.key + firstKey;
else
key = key_value.key - firstKey;
simpleInitial.insert(key, initial->at(key_value.key));
}
NonlinearFactorGraph simpleGraph;
BOOST_FOREACH(const boost::shared_ptr<NonlinearFactor>& factor, *graph) {
boost::shared_ptr<BetweenFactor<Pose3> > pose3Between =
boost::dynamic_pointer_cast<BetweenFactor<Pose3> >(factor);
if (pose3Between){
Key key1, key2;
if(add){
key1 = pose3Between->key1() + firstKey;
key2 = pose3Between->key2() + firstKey;
}else{
key1 = pose3Between->key1() - firstKey;
key2 = pose3Between->key2() - firstKey;
}
NonlinearFactor::shared_ptr simpleFactor(
new BetweenFactor<Pose3>(key1, key2, pose3Between->measured(), pose3Between->get_noiseModel()));
simpleGraph.add(simpleFactor);
}
}
writeG2o(simpleGraph, simpleInitial, inputFileRewritten);
}
return 0;
}

View File

@ -0,0 +1,74 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file Pose3SLAMExample_initializePose3.cpp
* @brief A 3D Pose SLAM example that reads input from g2o, and initializes the Pose3 using InitializePose3
* Syntax for the script is ./Pose3SLAMExample_initializePose3 input.g2o output.g2o
* @date Aug 25, 2014
* @author Luca Carlone
*/
#include <gtsam/slam/dataset.h>
#include <gtsam/slam/BetweenFactor.h>
#include <gtsam/slam/PriorFactor.h>
#include <gtsam/nonlinear/GaussNewtonOptimizer.h>
#include <fstream>
using namespace std;
using namespace gtsam;
int main(const int argc, const char *argv[]) {
// Read graph from file
string g2oFile;
if (argc < 2)
g2oFile = findExampleDataFile("pose3example.txt");
else
g2oFile = argv[1];
NonlinearFactorGraph::shared_ptr graph;
Values::shared_ptr initial;
bool is3D = true;
boost::tie(graph, initial) = readG2o(g2oFile, is3D);
// Add prior on the first key
NonlinearFactorGraph graphWithPrior = *graph;
noiseModel::Diagonal::shared_ptr priorModel = //
noiseModel::Diagonal::Variances((Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4));
Key firstKey = 0;
BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, *initial) {
std::cout << "Adding prior to g2o file " << std::endl;
firstKey = key_value.key;
graphWithPrior.add(PriorFactor<Pose3>(firstKey, Pose3(), priorModel));
break;
}
std::cout << "Optimizing the factor graph" << std::endl;
GaussNewtonParams params;
params.setVerbosity("TERMINATION"); // this will show info about stopping conditions
GaussNewtonOptimizer optimizer(graphWithPrior, *initial, params);
Values result = optimizer.optimize();
std::cout << "Optimization complete" << std::endl;
std::cout << "initial error=" <<graph->error(*initial)<< std::endl;
std::cout << "final error=" <<graph->error(result)<< std::endl;
if (argc < 3) {
result.print("result");
} else {
const string outputFile = argv[2];
std::cout << "Writing results to file: " << outputFile << std::endl;
writeG2o(*graph, result, outputFile);
std::cout << "done! " << std::endl;
}
return 0;
}

View File

@ -0,0 +1,68 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file Pose3SLAMExample_initializePose3.cpp
* @brief A 3D Pose SLAM example that reads input from g2o, and initializes the Pose3 using InitializePose3
* Syntax for the script is ./Pose3SLAMExample_initializePose3 input.g2o output.g2o
* @date Aug 25, 2014
* @author Luca Carlone
*/
#include <gtsam/slam/InitializePose3.h>
#include <gtsam/slam/dataset.h>
#include <gtsam/slam/BetweenFactor.h>
#include <gtsam/slam/PriorFactor.h>
#include <fstream>
using namespace std;
using namespace gtsam;
int main(const int argc, const char *argv[]) {
// Read graph from file
string g2oFile;
if (argc < 2)
g2oFile = findExampleDataFile("pose3example.txt");
else
g2oFile = argv[1];
NonlinearFactorGraph::shared_ptr graph;
Values::shared_ptr initial;
bool is3D = true;
boost::tie(graph, initial) = readG2o(g2oFile, is3D);
// Add prior on the first key
NonlinearFactorGraph graphWithPrior = *graph;
noiseModel::Diagonal::shared_ptr priorModel = //
noiseModel::Diagonal::Variances((Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4));
Key firstKey = 0;
BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, *initial) {
std::cout << "Adding prior to g2o file " << std::endl;
firstKey = key_value.key;
graphWithPrior.add(PriorFactor<Pose3>(firstKey, Pose3(), priorModel));
break;
}
std::cout << "Initializing Pose3 - chordal relaxation" << std::endl;
Values initialization = InitializePose3::initialize(graphWithPrior);
std::cout << "done!" << std::endl;
if (argc < 3) {
initialization.print("initialization");
} else {
const string outputFile = argv[2];
std::cout << "Writing results to file: " << outputFile << std::endl;
writeG2o(*graph, initialization, outputFile);
std::cout << "done! " << std::endl;
}
return 0;
}

View File

@ -0,0 +1,72 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file Pose3SLAMExample_initializePose3.cpp
* @brief A 3D Pose SLAM example that reads input from g2o, and initializes the Pose3 using InitializePose3
* Syntax for the script is ./Pose3SLAMExample_initializePose3 input.g2o output.g2o
* @date Aug 25, 2014
* @author Luca Carlone
*/
#include <gtsam/slam/InitializePose3.h>
#include <gtsam/slam/dataset.h>
#include <gtsam/slam/BetweenFactor.h>
#include <gtsam/slam/PriorFactor.h>
#include <fstream>
using namespace std;
using namespace gtsam;
int main(const int argc, const char *argv[]) {
// Read graph from file
string g2oFile;
if (argc < 2)
g2oFile = findExampleDataFile("pose3example.txt");
else
g2oFile = argv[1];
NonlinearFactorGraph::shared_ptr graph;
Values::shared_ptr initial;
bool is3D = true;
boost::tie(graph, initial) = readG2o(g2oFile, is3D);
// Add prior on the first key
NonlinearFactorGraph graphWithPrior = *graph;
noiseModel::Diagonal::shared_ptr priorModel = //
noiseModel::Diagonal::Variances((Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4));
Key firstKey = 0;
BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, *initial) {
std::cout << "Adding prior to g2o file " << std::endl;
firstKey = key_value.key;
graphWithPrior.add(PriorFactor<Pose3>(firstKey, Pose3(), priorModel));
break;
}
std::cout << "Initializing Pose3 - Riemannian gradient" << std::endl;
bool useGradient = true;
Values initialization = InitializePose3::initialize(graphWithPrior, *initial, useGradient);
std::cout << "done!" << std::endl;
std::cout << "initial error=" <<graph->error(*initial)<< std::endl;
std::cout << "initialization error=" <<graph->error(initialization)<< std::endl;
if (argc < 3) {
initialization.print("initialization");
} else {
const string outputFile = argv[2];
std::cout << "Writing results to file: " << outputFile << std::endl;
writeG2o(*graph, initialization, outputFile);
std::cout << "done! " << std::endl;
}
return 0;
}

View File

@ -85,8 +85,8 @@ int main(int argc, char* argv[]) {
// Simulated measurements from each camera pose, adding them to the factor graph // Simulated measurements from each camera pose, adding them to the factor graph
for (size_t i = 0; i < poses.size(); ++i) { for (size_t i = 0; i < poses.size(); ++i) {
SimpleCamera camera(poses[i], *K);
for (size_t j = 0; j < points.size(); ++j) { for (size_t j = 0; j < points.size(); ++j) {
SimpleCamera camera(poses[i], *K);
Point2 measurement = camera.project(points[j]); Point2 measurement = camera.project(points[j]);
graph.push_back(GenericProjectionFactor<Pose3, Point3, Cal3_S2>(measurement, measurementNoise, Symbol('x', i), Symbol('l', j), K)); graph.push_back(GenericProjectionFactor<Pose3, Point3, Cal3_S2>(measurement, measurementNoise, Symbol('x', i), Symbol('l', j), K));
} }
@ -111,6 +111,8 @@ int main(int argc, char* argv[]) {
/* Optimize the graph and print results */ /* Optimize the graph and print results */
Values result = DoglegOptimizer(graph, initialEstimate).optimize(); Values result = DoglegOptimizer(graph, initialEstimate).optimize();
result.print("Final results:\n"); result.print("Final results:\n");
cout << "initial error = " << graph.error(initialEstimate) << endl;
cout << "final error = " << graph.error(result) << endl;
return 0; return 0;
} }

View File

@ -13,6 +13,22 @@
* @brief Incremental and batch solving, timing, and accuracy comparisons * @brief Incremental and batch solving, timing, and accuracy comparisons
* @author Richard Roberts * @author Richard Roberts
* @date August, 2013 * @date August, 2013
*
* Here is an example. Below, to run in batch mode, we first generate an initialization in incremental mode.
*
* Solve in incremental and write to file w_inc:
* ./SolverComparer --incremental -d w10000 -o w_inc
*
* You can then perturb that initialization to get batch something to optimize.
* Read in w_inc, perturb it with noise of stddev 0.6, and write to w_pert:
* ./SolverComparer --perturb 0.6 -i w_inc -o w_pert
*
* Then optimize with batch, read in w_pert, solve in batch, and write to w_batch:
* ./SolverComparer --batch -d w10000 -i w_pert -o w_batch
*
* And finally compare solutions in w_inc and w_batch to check that batch converged to the global minimum
* ./SolverComparer --compare w_inc w_batch
*
*/ */
#include <gtsam/base/timing.h> #include <gtsam/base/timing.h>

View File

@ -14,6 +14,7 @@
* @brief A visualSLAM example for the structure-from-motion problem on a simulated dataset * @brief A visualSLAM example for the structure-from-motion problem on a simulated dataset
* This version uses iSAM to solve the problem incrementally * This version uses iSAM to solve the problem incrementally
* @author Duy-Nguyen Ta * @author Duy-Nguyen Ta
* @author Frank Dellaert
*/ */
/** /**
@ -61,7 +62,8 @@ int main(int argc, char* argv[]) {
Cal3_S2::shared_ptr K(new Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)); Cal3_S2::shared_ptr K(new Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0));
// Define the camera observation noise model // Define the camera observation noise model
noiseModel::Isotropic::shared_ptr measurementNoise = noiseModel::Isotropic::Sigma(2, 1.0); // one pixel in u and v noiseModel::Isotropic::shared_ptr noise = //
noiseModel::Isotropic::Sigma(2, 1.0); // one pixel in u and v
// Create the set of ground-truth landmarks // Create the set of ground-truth landmarks
vector<Point3> points = createPoints(); vector<Point3> points = createPoints();
@ -69,7 +71,8 @@ int main(int argc, char* argv[]) {
// Create the set of ground-truth poses // Create the set of ground-truth poses
vector<Pose3> poses = createPoses(); vector<Pose3> poses = createPoses();
// Create a NonlinearISAM object which will relinearize and reorder the variables every "relinearizeInterval" updates // Create a NonlinearISAM object which will relinearize and reorder the variables
// every "relinearizeInterval" updates
int relinearizeInterval = 3; int relinearizeInterval = 3;
NonlinearISAM isam(relinearizeInterval); NonlinearISAM isam(relinearizeInterval);
@ -82,32 +85,44 @@ int main(int argc, char* argv[]) {
// Add factors for each landmark observation // Add factors for each landmark observation
for (size_t j = 0; j < points.size(); ++j) { for (size_t j = 0; j < points.size(); ++j) {
// Create ground truth measurement
SimpleCamera camera(poses[i], *K); SimpleCamera camera(poses[i], *K);
Point2 measurement = camera.project(points[j]); Point2 measurement = camera.project(points[j]);
graph.push_back(GenericProjectionFactor<Pose3, Point3, Cal3_S2>(measurement, measurementNoise, Symbol('x', i), Symbol('l', j), K)); // Add measurement
graph.add(
GenericProjectionFactor<Pose3, Point3, Cal3_S2>(measurement, noise,
Symbol('x', i), Symbol('l', j), K));
} }
// Add an initial guess for the current pose
// Intentionally initialize the variables off from the ground truth // Intentionally initialize the variables off from the ground truth
initialEstimate.insert(Symbol('x', i), poses[i].compose(Pose3(Rot3::rodriguez(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20)))); Pose3 noise(Rot3::rodriguez(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20));
Pose3 initial_xi = poses[i].compose(noise);
// Add an initial guess for the current pose
initialEstimate.insert(Symbol('x', i), initial_xi);
// If this is the first iteration, add a prior on the first pose to set the coordinate frame // If this is the first iteration, add a prior on the first pose to set the coordinate frame
// and a prior on the first landmark to set the scale // and a prior on the first landmark to set the scale
// Also, as iSAM solves incrementally, we must wait until each is observed at least twice before // Also, as iSAM solves incrementally, we must wait until each is observed at least twice before
// adding it to iSAM. // adding it to iSAM.
if( i == 0) { if (i == 0) {
// Add a prior on pose x0 // Add a prior on pose x0, with 30cm std on x,y,z 0.1 rad on roll,pitch,yaw
noiseModel::Diagonal::shared_ptr poseNoise = noiseModel::Diagonal::Sigmas((Vector(6) << Vector3::Constant(0.3), Vector3::Constant(0.1))); // 30cm std on x,y,z 0.1 rad on roll,pitch,yaw noiseModel::Diagonal::shared_ptr poseNoise = noiseModel::Diagonal::Sigmas(
graph.push_back(PriorFactor<Pose3>(Symbol('x', 0), poses[0], poseNoise)); (Vector(6) << Vector3::Constant(0.3), Vector3::Constant(0.1)));
graph.add(PriorFactor<Pose3>(Symbol('x', 0), poses[0], poseNoise));
// Add a prior on landmark l0 // Add a prior on landmark l0
noiseModel::Isotropic::shared_ptr pointNoise = noiseModel::Isotropic::Sigma(3, 0.1); noiseModel::Isotropic::shared_ptr pointNoise =
graph.push_back(PriorFactor<Point3>(Symbol('l', 0), points[0], pointNoise)); // add directly to graph noiseModel::Isotropic::Sigma(3, 0.1);
graph.add(PriorFactor<Point3>(Symbol('l', 0), points[0], pointNoise));
// Add initial guesses to all observed landmarks // Add initial guesses to all observed landmarks
// Intentionally initialize the variables off from the ground truth Point3 noise(-0.25, 0.20, 0.15);
for (size_t j = 0; j < points.size(); ++j) for (size_t j = 0; j < points.size(); ++j) {
initialEstimate.insert(Symbol('l', j), points[j].compose(Point3(-0.25, 0.20, 0.15))); // Intentionally initialize the variables off from the ground truth
Point3 initial_lj = points[j].compose(noise);
initialEstimate.insert(Symbol('l', j), initial_lj);
}
} else { } else {
// Update iSAM with the new factors // Update iSAM with the new factors

126
gtsam.h
View File

@ -156,8 +156,14 @@ virtual class Value {
size_t dim() const; size_t dim() const;
}; };
class Vector3 {
Vector3(Vector v);
};
class Vector6 {
Vector6(Vector v);
};
#include <gtsam/base/LieScalar.h> #include <gtsam/base/LieScalar.h>
virtual class LieScalar : gtsam::Value { class LieScalar {
// Standard constructors // Standard constructors
LieScalar(); LieScalar();
LieScalar(double d); LieScalar(double d);
@ -186,7 +192,7 @@ virtual class LieScalar : gtsam::Value {
}; };
#include <gtsam/base/LieVector.h> #include <gtsam/base/LieVector.h>
virtual class LieVector : gtsam::Value { class LieVector {
// Standard constructors // Standard constructors
LieVector(); LieVector();
LieVector(Vector v); LieVector(Vector v);
@ -218,7 +224,7 @@ virtual class LieVector : gtsam::Value {
}; };
#include <gtsam/base/LieMatrix.h> #include <gtsam/base/LieMatrix.h>
virtual class LieMatrix : gtsam::Value { class LieMatrix {
// Standard constructors // Standard constructors
LieMatrix(); LieMatrix();
LieMatrix(Matrix v); LieMatrix(Matrix v);
@ -253,7 +259,7 @@ virtual class LieMatrix : gtsam::Value {
// geometry // geometry
//************************************************************************* //*************************************************************************
virtual class Point2 : gtsam::Value { class Point2 {
// Standard Constructors // Standard Constructors
Point2(); Point2();
Point2(double x, double y); Point2(double x, double y);
@ -290,7 +296,7 @@ virtual class Point2 : gtsam::Value {
void serialize() const; void serialize() const;
}; };
virtual class StereoPoint2 : gtsam::Value { class StereoPoint2 {
// Standard Constructors // Standard Constructors
StereoPoint2(); StereoPoint2();
StereoPoint2(double uL, double uR, double v); StereoPoint2(double uL, double uR, double v);
@ -325,7 +331,7 @@ virtual class StereoPoint2 : gtsam::Value {
void serialize() const; void serialize() const;
}; };
virtual class Point3 : gtsam::Value { class Point3 {
// Standard Constructors // Standard Constructors
Point3(); Point3();
Point3(double x, double y, double z); Point3(double x, double y, double z);
@ -361,7 +367,7 @@ virtual class Point3 : gtsam::Value {
void serialize() const; void serialize() const;
}; };
virtual class Rot2 : gtsam::Value { class Rot2 {
// Standard Constructors and Named Constructors // Standard Constructors and Named Constructors
Rot2(); Rot2();
Rot2(double theta); Rot2(double theta);
@ -406,7 +412,7 @@ virtual class Rot2 : gtsam::Value {
void serialize() const; void serialize() const;
}; };
virtual class Rot3 : gtsam::Value { class Rot3 {
// Standard Constructors and Named Constructors // Standard Constructors and Named Constructors
Rot3(); Rot3();
Rot3(Matrix R); Rot3(Matrix R);
@ -462,7 +468,7 @@ virtual class Rot3 : gtsam::Value {
void serialize() const; void serialize() const;
}; };
virtual class Pose2 : gtsam::Value { class Pose2 {
// Standard Constructor // Standard Constructor
Pose2(); Pose2();
Pose2(const gtsam::Pose2& pose); Pose2(const gtsam::Pose2& pose);
@ -512,7 +518,7 @@ virtual class Pose2 : gtsam::Value {
void serialize() const; void serialize() const;
}; };
virtual class Pose3 : gtsam::Value { class Pose3 {
// Standard Constructors // Standard Constructors
Pose3(); Pose3();
Pose3(const gtsam::Pose3& pose); Pose3(const gtsam::Pose3& pose);
@ -564,7 +570,7 @@ virtual class Pose3 : gtsam::Value {
}; };
#include <gtsam/geometry/Unit3.h> #include <gtsam/geometry/Unit3.h>
virtual class Unit3 : gtsam::Value { class Unit3 {
// Standard Constructors // Standard Constructors
Unit3(); Unit3();
Unit3(const gtsam::Point3& pose); Unit3(const gtsam::Point3& pose);
@ -585,7 +591,7 @@ virtual class Unit3 : gtsam::Value {
}; };
#include <gtsam/geometry/EssentialMatrix.h> #include <gtsam/geometry/EssentialMatrix.h>
virtual class EssentialMatrix : gtsam::Value { class EssentialMatrix {
// Standard Constructors // Standard Constructors
EssentialMatrix(const gtsam::Rot3& aRb, const gtsam::Unit3& aTb); EssentialMatrix(const gtsam::Rot3& aRb, const gtsam::Unit3& aTb);
@ -606,7 +612,7 @@ virtual class EssentialMatrix : gtsam::Value {
double error(Vector vA, Vector vB); double error(Vector vA, Vector vB);
}; };
virtual class Cal3_S2 : gtsam::Value { class Cal3_S2 {
// Standard Constructors // Standard Constructors
Cal3_S2(); Cal3_S2();
Cal3_S2(double fx, double fy, double s, double u0, double v0); Cal3_S2(double fx, double fy, double s, double u0, double v0);
@ -643,7 +649,7 @@ virtual class Cal3_S2 : gtsam::Value {
}; };
#include <gtsam/geometry/Cal3DS2.h> #include <gtsam/geometry/Cal3DS2.h>
virtual class Cal3DS2 : gtsam::Value { class Cal3DS2 {
// Standard Constructors // Standard Constructors
Cal3DS2(); Cal3DS2();
Cal3DS2(double fx, double fy, double s, double u0, double v0, double k1, double k2, double k3, double k4); Cal3DS2(double fx, double fy, double s, double u0, double v0, double k1, double k2, double k3, double k4);
@ -699,7 +705,43 @@ class Cal3_S2Stereo {
double baseline() const; double baseline() const;
}; };
virtual class CalibratedCamera : gtsam::Value { #include <gtsam/geometry/Cal3Bundler.h>
class Cal3Bundler {
// Standard Constructors
Cal3Bundler();
Cal3Bundler(double fx, double k1, double k2, double u0, double v0);
// Testable
void print(string s) const;
bool equals(const gtsam::Cal3Bundler& rhs, double tol) const;
// Manifold
static size_t Dim();
size_t dim() const;
gtsam::Cal3Bundler retract(Vector v) const;
Vector localCoordinates(const gtsam::Cal3Bundler& c) const;
// Action on Point2
gtsam::Point2 calibrate(const gtsam::Point2& p, double tol) const;
gtsam::Point2 calibrate(const gtsam::Point2& p) const;
gtsam::Point2 uncalibrate(const gtsam::Point2& p) const;
// Standard Interface
double fx() const;
double fy() const;
double k1() const;
double k2() const;
double u0() const;
double v0() const;
Vector vector() const;
Vector k() const;
//Matrix K() const; //FIXME: Uppercase
// enabling serialization functionality
void serialize() const;
};
class CalibratedCamera {
// Standard Constructors and Named Constructors // Standard Constructors and Named Constructors
CalibratedCamera(); CalibratedCamera();
CalibratedCamera(const gtsam::Pose3& pose); CalibratedCamera(const gtsam::Pose3& pose);
@ -732,7 +774,7 @@ virtual class CalibratedCamera : gtsam::Value {
void serialize() const; void serialize() const;
}; };
virtual class SimpleCamera : gtsam::Value { class SimpleCamera {
// Standard Constructors and Named Constructors // Standard Constructors and Named Constructors
SimpleCamera(); SimpleCamera();
SimpleCamera(const gtsam::Pose3& pose); SimpleCamera(const gtsam::Pose3& pose);
@ -771,7 +813,7 @@ virtual class SimpleCamera : gtsam::Value {
}; };
template<CALIBRATION = {gtsam::Cal3DS2}> template<CALIBRATION = {gtsam::Cal3DS2}>
virtual class PinholeCamera : gtsam::Value { class PinholeCamera {
// Standard Constructors and Named Constructors // Standard Constructors and Named Constructors
PinholeCamera(); PinholeCamera();
PinholeCamera(const gtsam::Pose3& pose); PinholeCamera(const gtsam::Pose3& pose);
@ -809,7 +851,7 @@ virtual class PinholeCamera : gtsam::Value {
void serialize() const; void serialize() const;
}; };
virtual class StereoCamera : gtsam::Value { class StereoCamera {
// Standard Constructors and Named Constructors // Standard Constructors and Named Constructors
StereoCamera(); StereoCamera();
StereoCamera(const gtsam::Pose3& pose, const gtsam::Cal3_S2Stereo* K); StereoCamera(const gtsam::Pose3& pose, const gtsam::Cal3_S2Stereo* K);
@ -862,7 +904,7 @@ virtual class SymbolicFactor {
}; };
#include <gtsam/symbolic/SymbolicFactorGraph.h> #include <gtsam/symbolic/SymbolicFactorGraph.h>
class SymbolicFactorGraph { virtual class SymbolicFactorGraph {
SymbolicFactorGraph(); SymbolicFactorGraph();
SymbolicFactorGraph(const gtsam::SymbolicBayesNet& bayesNet); SymbolicFactorGraph(const gtsam::SymbolicBayesNet& bayesNet);
SymbolicFactorGraph(const gtsam::SymbolicBayesTree& bayesTree); SymbolicFactorGraph(const gtsam::SymbolicBayesTree& bayesTree);
@ -1664,15 +1706,12 @@ class Values {
void print(string s) const; void print(string s) const;
bool equals(const gtsam::Values& other, double tol) const; bool equals(const gtsam::Values& other, double tol) const;
void insert(size_t j, const gtsam::Value& value);
void insert(const gtsam::Values& values); void insert(const gtsam::Values& values);
void update(size_t j, const gtsam::Value& val);
void update(const gtsam::Values& values); void update(const gtsam::Values& values);
void erase(size_t j); void erase(size_t j);
void swap(gtsam::Values& values); void swap(gtsam::Values& values);
bool exists(size_t j) const; bool exists(size_t j) const;
gtsam::Value at(size_t j) const;
gtsam::KeyList keys() const; gtsam::KeyList keys() const;
gtsam::VectorValues zeroVectors() const; gtsam::VectorValues zeroVectors() const;
@ -1682,6 +1721,37 @@ class Values {
// enabling serialization functionality // enabling serialization functionality
void serialize() const; void serialize() const;
// New in 4.0, we have to specialize every insert/update/at to generate wrappers
// Instead of the old:
// void insert(size_t j, const gtsam::Value& value);
// void update(size_t j, const gtsam::Value& val);
// gtsam::Value at(size_t j) const;
void insert(size_t j, const gtsam::Point2& t);
void insert(size_t j, const gtsam::Point3& t);
void insert(size_t j, const gtsam::Rot2& t);
void insert(size_t j, const gtsam::Pose2& t);
void insert(size_t j, const gtsam::Rot3& t);
void insert(size_t j, const gtsam::Pose3& t);
void insert(size_t j, const gtsam::Cal3_S2& t);
void insert(size_t j, const gtsam::Cal3DS2& t);
void insert(size_t j, const gtsam::Cal3Bundler& t);
void insert(size_t j, const gtsam::EssentialMatrix& t);
void update(size_t j, const gtsam::Point2& t);
void update(size_t j, const gtsam::Point3& t);
void update(size_t j, const gtsam::Rot2& t);
void update(size_t j, const gtsam::Pose2& t);
void update(size_t j, const gtsam::Rot3& t);
void update(size_t j, const gtsam::Pose3& t);
void update(size_t j, const gtsam::Cal3_S2& t);
void update(size_t j, const gtsam::Cal3DS2& t);
void update(size_t j, const gtsam::Cal3Bundler& t);
void update(size_t j, const gtsam::EssentialMatrix& t);
template<T = {gtsam::Point2, gtsam::Point3, gtsam::Rot2, gtsam::Pose2,
gtsam::Rot3, gtsam::Pose3, gtsam::Cal3_S2, gtsam::Cal3DS2}>
T at(size_t j);
}; };
// Actually a FastList<Key> // Actually a FastList<Key>
@ -2077,7 +2147,7 @@ class NonlinearISAM {
#include <gtsam/geometry/StereoPoint2.h> #include <gtsam/geometry/StereoPoint2.h>
#include <gtsam/slam/PriorFactor.h> #include <gtsam/slam/PriorFactor.h>
template<T = {gtsam::LieScalar, gtsam::LieVector, gtsam::LieMatrix, gtsam::Point2, gtsam::StereoPoint2, gtsam::Point3, gtsam::Rot2, gtsam::Rot3, gtsam::Pose2, gtsam::Pose3, gtsam::Cal3_S2, gtsam::CalibratedCamera, gtsam::SimpleCamera, gtsam::imuBias::ConstantBias}> template<T = {gtsam::Point2, gtsam::StereoPoint2, gtsam::Point3, gtsam::Rot2, gtsam::Rot3, gtsam::Pose2, gtsam::Pose3, gtsam::Cal3_S2, gtsam::CalibratedCamera, gtsam::SimpleCamera, gtsam::imuBias::ConstantBias}>
virtual class PriorFactor : gtsam::NoiseModelFactor { virtual class PriorFactor : gtsam::NoiseModelFactor {
PriorFactor(size_t key, const T& prior, const gtsam::noiseModel::Base* noiseModel); PriorFactor(size_t key, const T& prior, const gtsam::noiseModel::Base* noiseModel);
T prior() const; T prior() const;
@ -2088,7 +2158,7 @@ virtual class PriorFactor : gtsam::NoiseModelFactor {
#include <gtsam/slam/BetweenFactor.h> #include <gtsam/slam/BetweenFactor.h>
template<T = {gtsam::LieScalar, gtsam::LieVector, gtsam::LieMatrix, gtsam::Point2, gtsam::Point3, gtsam::Rot2, gtsam::Rot3, gtsam::Pose2, gtsam::Pose3, gtsam::imuBias::ConstantBias}> template<T = {gtsam::Point2, gtsam::Point3, gtsam::Rot2, gtsam::Rot3, gtsam::Pose2, gtsam::Pose3, gtsam::imuBias::ConstantBias}>
virtual class BetweenFactor : gtsam::NoiseModelFactor { virtual class BetweenFactor : gtsam::NoiseModelFactor {
BetweenFactor(size_t key1, size_t key2, const T& relativePose, const gtsam::noiseModel::Base* noiseModel); BetweenFactor(size_t key1, size_t key2, const T& relativePose, const gtsam::noiseModel::Base* noiseModel);
T measured() const; T measured() const;
@ -2099,7 +2169,7 @@ virtual class BetweenFactor : gtsam::NoiseModelFactor {
#include <gtsam/nonlinear/NonlinearEquality.h> #include <gtsam/nonlinear/NonlinearEquality.h>
template<T = {gtsam::LieScalar, gtsam::LieVector, gtsam::LieMatrix, gtsam::Point2, gtsam::StereoPoint2, gtsam::Point3, gtsam::Rot2, gtsam::Rot3, gtsam::Pose2, gtsam::Pose3, gtsam::Cal3_S2, gtsam::CalibratedCamera, gtsam::SimpleCamera, gtsam::imuBias::ConstantBias}> template<T = {gtsam::Point2, gtsam::StereoPoint2, gtsam::Point3, gtsam::Rot2, gtsam::Rot3, gtsam::Pose2, gtsam::Pose3, gtsam::Cal3_S2, gtsam::CalibratedCamera, gtsam::SimpleCamera, gtsam::imuBias::ConstantBias}>
virtual class NonlinearEquality : gtsam::NoiseModelFactor { virtual class NonlinearEquality : gtsam::NoiseModelFactor {
// Constructor - forces exact evaluation // Constructor - forces exact evaluation
NonlinearEquality(size_t j, const T& feasible); NonlinearEquality(size_t j, const T& feasible);
@ -2280,7 +2350,7 @@ void writeG2o(const gtsam::NonlinearFactorGraph& graph,
namespace imuBias { namespace imuBias {
#include <gtsam/navigation/ImuBias.h> #include <gtsam/navigation/ImuBias.h>
virtual class ConstantBias : gtsam::Value { class ConstantBias {
// Standard Constructor // Standard Constructor
ConstantBias(); ConstantBias();
ConstantBias(Vector biasAcc, Vector biasGyro); ConstantBias(Vector biasAcc, Vector biasGyro);
@ -2340,7 +2410,7 @@ virtual class ImuFactor : gtsam::NonlinearFactor {
// Standard Interface // Standard Interface
gtsam::ImuFactorPreintegratedMeasurements preintegratedMeasurements() const; gtsam::ImuFactorPreintegratedMeasurements preintegratedMeasurements() const;
void Predict(const gtsam::Pose3& pose_i, const gtsam::LieVector& vel_i, gtsam::Pose3& pose_j, gtsam::LieVector& vel_j, void Predict(const gtsam::Pose3& pose_i, const gtsam::Vector3& vel_i, gtsam::Pose3& pose_j, gtsam::Vector3& vel_j,
const gtsam::imuBias::ConstantBias& bias, const gtsam::imuBias::ConstantBias& bias,
const gtsam::ImuFactorPreintegratedMeasurements& preintegratedMeasurements, const gtsam::ImuFactorPreintegratedMeasurements& preintegratedMeasurements,
Vector gravity, Vector omegaCoriolis) const; Vector gravity, Vector omegaCoriolis) const;
@ -2383,7 +2453,7 @@ virtual class CombinedImuFactor : gtsam::NonlinearFactor {
// Standard Interface // Standard Interface
gtsam::CombinedImuFactorPreintegratedMeasurements preintegratedMeasurements() const; gtsam::CombinedImuFactorPreintegratedMeasurements preintegratedMeasurements() const;
void Predict(const gtsam::Pose3& pose_i, const gtsam::LieVector& vel_i, gtsam::Pose3& pose_j, gtsam::LieVector& vel_j, void Predict(const gtsam::Pose3& pose_i, const gtsam::Vector3& vel_i, gtsam::Pose3& pose_j, gtsam::Vector3& vel_j,
const gtsam::imuBias::ConstantBias& bias_i, const gtsam::imuBias::ConstantBias& bias_j, const gtsam::imuBias::ConstantBias& bias_i, const gtsam::imuBias::ConstantBias& bias_j,
const gtsam::CombinedImuFactorPreintegratedMeasurements& preintegratedMeasurements, const gtsam::CombinedImuFactorPreintegratedMeasurements& preintegratedMeasurements,
Vector gravity, Vector omegaCoriolis) const; Vector gravity, Vector omegaCoriolis) const;

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -4,14 +4,10 @@
## # The following are required to uses Dart and the Cdash dashboard ## # The following are required to uses Dart and the Cdash dashboard
## ENABLE_TESTING() ## ENABLE_TESTING()
## INCLUDE(CTest) ## INCLUDE(CTest)
set(CTEST_PROJECT_NAME "Eigen") set(CTEST_PROJECT_NAME "Eigen3.2")
set(CTEST_NIGHTLY_START_TIME "00:00:00 UTC") set(CTEST_NIGHTLY_START_TIME "00:00:00 UTC")
set(CTEST_DROP_METHOD "http") set(CTEST_DROP_METHOD "http")
set(CTEST_DROP_SITE "manao.inria.fr") set(CTEST_DROP_SITE "manao.inria.fr")
set(CTEST_DROP_LOCATION "/CDash/submit.php?project=Eigen") set(CTEST_DROP_LOCATION "/CDash/submit.php?project=Eigen3.2")
set(CTEST_DROP_SITE_CDASH TRUE) set(CTEST_DROP_SITE_CDASH TRUE)
set(CTEST_PROJECT_SUBPROJECTS
Official
Unsupported
)

View File

@ -95,7 +95,7 @@
extern "C" { extern "C" {
// In theory we should only include immintrin.h and not the other *mmintrin.h header files directly. // In theory we should only include immintrin.h and not the other *mmintrin.h header files directly.
// Doing so triggers some issues with ICC. However old gcc versions seems to not have this file, thus: // Doing so triggers some issues with ICC. However old gcc versions seems to not have this file, thus:
#ifdef __INTEL_COMPILER #if defined(__INTEL_COMPILER) && __INTEL_COMPILER >= 1110
#include <immintrin.h> #include <immintrin.h>
#else #else
#include <emmintrin.h> #include <emmintrin.h>
@ -165,7 +165,7 @@
#endif #endif
// required for __cpuid, needs to be included after cmath // required for __cpuid, needs to be included after cmath
#if defined(_MSC_VER) && (defined(_M_IX86)||defined(_M_X64)) #if defined(_MSC_VER) && (defined(_M_IX86)||defined(_M_X64)) && (!defined(_WIN32_WCE))
#include <intrin.h> #include <intrin.h>
#endif #endif

View File

@ -274,30 +274,13 @@ template<> struct ldlt_inplace<Lower>
return true; return true;
} }
RealScalar cutoff(0), biggest_in_corner;
for (Index k = 0; k < size; ++k) for (Index k = 0; k < size; ++k)
{ {
// Find largest diagonal element // Find largest diagonal element
Index index_of_biggest_in_corner; Index index_of_biggest_in_corner;
biggest_in_corner = mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner); mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner);
index_of_biggest_in_corner += k; index_of_biggest_in_corner += k;
if(k == 0)
{
// The biggest overall is the point of reference to which further diagonals
// are compared; if any diagonal is negligible compared
// to the largest overall, the algorithm bails.
cutoff = abs(NumTraits<Scalar>::epsilon() * biggest_in_corner);
}
// Finish early if the matrix is not full rank.
if(biggest_in_corner < cutoff)
{
for(Index i = k; i < size; i++) transpositions.coeffRef(i) = i;
break;
}
transpositions.coeffRef(k) = index_of_biggest_in_corner; transpositions.coeffRef(k) = index_of_biggest_in_corner;
if(k != index_of_biggest_in_corner) if(k != index_of_biggest_in_corner)
{ {
@ -328,15 +311,20 @@ template<> struct ldlt_inplace<Lower>
if(k>0) if(k>0)
{ {
temp.head(k) = mat.diagonal().head(k).asDiagonal() * A10.adjoint(); temp.head(k) = mat.diagonal().real().head(k).asDiagonal() * A10.adjoint();
mat.coeffRef(k,k) -= (A10 * temp.head(k)).value(); mat.coeffRef(k,k) -= (A10 * temp.head(k)).value();
if(rs>0) if(rs>0)
A21.noalias() -= A20 * temp.head(k); A21.noalias() -= A20 * temp.head(k);
} }
if((rs>0) && (abs(mat.coeffRef(k,k)) > cutoff))
A21 /= mat.coeffRef(k,k); // In some previous versions of Eigen (e.g., 3.2.1), the scaling was omitted if the pivot
// was smaller than the cutoff value. However, soince LDLT is not rank-revealing
// we should only make sure we do not introduce INF or NaN values.
// LAPACK also uses 0 as the cutoff value.
RealScalar realAkk = numext::real(mat.coeffRef(k,k)); RealScalar realAkk = numext::real(mat.coeffRef(k,k));
if((rs>0) && (abs(realAkk) > RealScalar(0)))
A21 /= realAkk;
if (sign == PositiveSemiDef) { if (sign == PositiveSemiDef) {
if (realAkk < 0) sign = Indefinite; if (realAkk < 0) sign = Indefinite;
} else if (sign == NegativeSemiDef) { } else if (sign == NegativeSemiDef) {
@ -516,14 +504,20 @@ struct solve_retval<LDLT<_MatrixType,_UpLo>, Rhs>
typedef typename LDLTType::MatrixType MatrixType; typedef typename LDLTType::MatrixType MatrixType;
typedef typename LDLTType::Scalar Scalar; typedef typename LDLTType::Scalar Scalar;
typedef typename LDLTType::RealScalar RealScalar; typedef typename LDLTType::RealScalar RealScalar;
const Diagonal<const MatrixType> vectorD = dec().vectorD(); const typename Diagonal<const MatrixType>::RealReturnType vectorD(dec().vectorD());
RealScalar tolerance = (max)(vectorD.array().abs().maxCoeff() * NumTraits<Scalar>::epsilon(), // In some previous versions, tolerance was set to the max of 1/highest and the maximal diagonal entry * epsilon
RealScalar(1) / NumTraits<RealScalar>::highest()); // motivated by LAPACK's xGELSS // as motivated by LAPACK's xGELSS:
// RealScalar tolerance = (max)(vectorD.array().abs().maxCoeff() *NumTraits<RealScalar>::epsilon(),RealScalar(1) / NumTraits<RealScalar>::highest());
// However, LDLT is not rank revealing, and so adjusting the tolerance wrt to the highest
// diagonal element is not well justified and to numerical issues in some cases.
// Moreover, Lapack's xSYTRS routines use 0 for the tolerance.
RealScalar tolerance = RealScalar(1) / NumTraits<RealScalar>::highest();
for (Index i = 0; i < vectorD.size(); ++i) { for (Index i = 0; i < vectorD.size(); ++i) {
if(abs(vectorD(i)) > tolerance) if(abs(vectorD(i)) > tolerance)
dst.row(i) /= vectorD(i); dst.row(i) /= vectorD(i);
else else
dst.row(i).setZero(); dst.row(i).setZero();
} }
// dst = L^-T (D^-1 L^-1 P b) // dst = L^-T (D^-1 L^-1 P b)
@ -576,7 +570,7 @@ MatrixType LDLT<MatrixType,_UpLo>::reconstructedMatrix() const
// L^* P // L^* P
res = matrixU() * res; res = matrixU() * res;
// D(L^*P) // D(L^*P)
res = vectorD().asDiagonal() * res; res = vectorD().real().asDiagonal() * res;
// L(DL^*P) // L(DL^*P)
res = matrixL() * res; res = matrixL() * res;
// P^T (LDL^*P) // P^T (LDL^*P)

View File

@ -81,7 +81,7 @@ struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel> > : traits<XprTyp
&& (InnerStrideAtCompileTime == 1) && (InnerStrideAtCompileTime == 1)
? PacketAccessBit : 0, ? PacketAccessBit : 0,
MaskAlignedBit = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % 16) == 0)) ? AlignedBit : 0, MaskAlignedBit = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % 16) == 0)) ? AlignedBit : 0,
FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0, FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (traits<XprType>::Flags&LinearAccessBit))) ? LinearAccessBit : 0,
FlagsLvalueBit = is_lvalue<XprType>::value ? LvalueBit : 0, FlagsLvalueBit = is_lvalue<XprType>::value ? LvalueBit : 0,
FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0, FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0,
Flags0 = traits<XprType>::Flags & ( (HereditaryBits & ~RowMajorBit) | Flags0 = traits<XprType>::Flags & ( (HereditaryBits & ~RowMajorBit) |

View File

@ -47,6 +47,17 @@ struct CommaInitializer :
m_xpr.block(0, 0, other.rows(), other.cols()) = other; m_xpr.block(0, 0, other.rows(), other.cols()) = other;
} }
/* Copy/Move constructor which transfers ownership. This is crucial in
* absence of return value optimization to avoid assertions during destruction. */
// FIXME in C++11 mode this could be replaced by a proper RValue constructor
inline CommaInitializer(const CommaInitializer& o)
: m_xpr(o.m_xpr), m_row(o.m_row), m_col(o.m_col), m_currentBlockRows(o.m_currentBlockRows) {
// Mark original object as finished. In absence of R-value references we need to const_cast:
const_cast<CommaInitializer&>(o).m_row = m_xpr.rows();
const_cast<CommaInitializer&>(o).m_col = m_xpr.cols();
const_cast<CommaInitializer&>(o).m_currentBlockRows = 0;
}
/* inserts a scalar value in the target matrix */ /* inserts a scalar value in the target matrix */
CommaInitializer& operator,(const Scalar& s) CommaInitializer& operator,(const Scalar& s)
{ {

View File

@ -0,0 +1,154 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_COMMAINITIALIZER_H
#define EIGEN_COMMAINITIALIZER_H
namespace Eigen {
/** \class CommaInitializer
* \ingroup Core_Module
*
* \brief Helper class used by the comma initializer operator
*
* This class is internally used to implement the comma initializer feature. It is
* the return type of MatrixBase::operator<<, and most of the time this is the only
* way it is used.
*
* \sa \ref MatrixBaseCommaInitRef "MatrixBase::operator<<", CommaInitializer::finished()
*/
template<typename XprType>
struct CommaInitializer
{
typedef typename XprType::Scalar Scalar;
typedef typename XprType::Index Index;
inline CommaInitializer(XprType& xpr, const Scalar& s)
: m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1)
{
m_xpr.coeffRef(0,0) = s;
}
template<typename OtherDerived>
inline CommaInitializer(XprType& xpr, const DenseBase<OtherDerived>& other)
: m_xpr(xpr), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows())
{
m_xpr.block(0, 0, other.rows(), other.cols()) = other;
}
/* Copy/Move constructor which transfers ownership. This is crucial in
* absence of return value optimization to avoid assertions during destruction. */
// FIXME in C++11 mode this could be replaced by a proper RValue constructor
inline CommaInitializer(const CommaInitializer& o)
: m_xpr(o.m_xpr), m_row(o.m_row), m_col(o.m_col), m_currentBlockRows(o.m_currentBlockRows) {
// Mark original object as finished. In absence of R-value references we need to const_cast:
const_cast<CommaInitializer&>(o).m_row = m_xpr.rows();
const_cast<CommaInitializer&>(o).m_col = m_xpr.cols();
const_cast<CommaInitializer&>(o).m_currentBlockRows = 0;
}
/* inserts a scalar value in the target matrix */
CommaInitializer& operator,(const Scalar& s)
{
if (m_col==m_xpr.cols())
{
m_row+=m_currentBlockRows;
m_col = 0;
m_currentBlockRows = 1;
eigen_assert(m_row<m_xpr.rows()
&& "Too many rows passed to comma initializer (operator<<)");
}
eigen_assert(m_col<m_xpr.cols()
&& "Too many coefficients passed to comma initializer (operator<<)");
eigen_assert(m_currentBlockRows==1);
m_xpr.coeffRef(m_row, m_col++) = s;
return *this;
}
/* inserts a matrix expression in the target matrix */
template<typename OtherDerived>
CommaInitializer& operator,(const DenseBase<OtherDerived>& other)
{
if(other.cols()==0 || other.rows()==0)
return *this;
if (m_col==m_xpr.cols())
{
m_row+=m_currentBlockRows;
m_col = 0;
m_currentBlockRows = other.rows();
eigen_assert(m_row+m_currentBlockRows<=m_xpr.rows()
&& "Too many rows passed to comma initializer (operator<<)");
}
eigen_assert(m_col<m_xpr.cols()
&& "Too many coefficients passed to comma initializer (operator<<)");
eigen_assert(m_currentBlockRows==other.rows());
if (OtherDerived::SizeAtCompileTime != Dynamic)
m_xpr.template block<OtherDerived::RowsAtCompileTime != Dynamic ? OtherDerived::RowsAtCompileTime : 1,
OtherDerived::ColsAtCompileTime != Dynamic ? OtherDerived::ColsAtCompileTime : 1>
(m_row, m_col) = other;
else
m_xpr.block(m_row, m_col, other.rows(), other.cols()) = other;
m_col += other.cols();
return *this;
}
inline ~CommaInitializer()
{
eigen_assert((m_row+m_currentBlockRows) == m_xpr.rows()
&& m_col == m_xpr.cols()
&& "Too few coefficients passed to comma initializer (operator<<)");
}
/** \returns the built matrix once all its coefficients have been set.
* Calling finished is 100% optional. Its purpose is to write expressions
* like this:
* \code
* quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished());
* \endcode
*/
inline XprType& finished() { return m_xpr; }
XprType& m_xpr; // target expression
Index m_row; // current row id
Index m_col; // current col id
Index m_currentBlockRows; // current block height
};
/** \anchor MatrixBaseCommaInitRef
* Convenient operator to set the coefficients of a matrix.
*
* The coefficients must be provided in a row major order and exactly match
* the size of the matrix. Otherwise an assertion is raised.
*
* Example: \include MatrixBase_set.cpp
* Output: \verbinclude MatrixBase_set.out
*
* \note According the c++ standard, the argument expressions of this comma initializer are evaluated in arbitrary order.
*
* \sa CommaInitializer::finished(), class CommaInitializer
*/
template<typename Derived>
inline CommaInitializer<Derived> DenseBase<Derived>::operator<< (const Scalar& s)
{
return CommaInitializer<Derived>(*static_cast<Derived*>(this), s);
}
/** \sa operator<<(const Scalar&) */
template<typename Derived>
template<typename OtherDerived>
inline CommaInitializer<Derived>
DenseBase<Derived>::operator<<(const DenseBase<OtherDerived>& other)
{
return CommaInitializer<Derived>(*static_cast<Derived *>(this), other);
}
} // end namespace Eigen
#endif // EIGEN_COMMAINITIALIZER_H

View File

@ -24,6 +24,14 @@ namespace internal {
struct constructor_without_unaligned_array_assert {}; struct constructor_without_unaligned_array_assert {};
template<typename T, int Size> void check_static_allocation_size()
{
// if EIGEN_STACK_ALLOCATION_LIMIT is defined to 0, then no limit
#if EIGEN_STACK_ALLOCATION_LIMIT
EIGEN_STATIC_ASSERT(Size * sizeof(T) <= EIGEN_STACK_ALLOCATION_LIMIT, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
#endif
}
/** \internal /** \internal
* Static array. If the MatrixOrArrayOptions require auto-alignment, the array will be automatically aligned: * Static array. If the MatrixOrArrayOptions require auto-alignment, the array will be automatically aligned:
* to 16 bytes boundary if the total size is a multiple of 16 bytes. * to 16 bytes boundary if the total size is a multiple of 16 bytes.
@ -38,12 +46,12 @@ struct plain_array
plain_array() plain_array()
{ {
EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG); check_static_allocation_size<T,Size>();
} }
plain_array(constructor_without_unaligned_array_assert) plain_array(constructor_without_unaligned_array_assert)
{ {
EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG); check_static_allocation_size<T,Size>();
} }
}; };
@ -76,12 +84,12 @@ struct plain_array<T, Size, MatrixOrArrayOptions, 16>
plain_array() plain_array()
{ {
EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(0xf); EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(0xf);
EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG); check_static_allocation_size<T,Size>();
} }
plain_array(constructor_without_unaligned_array_assert) plain_array(constructor_without_unaligned_array_assert)
{ {
EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG); check_static_allocation_size<T,Size>();
} }
}; };

View File

@ -589,7 +589,7 @@ struct linspaced_op_impl<Scalar,true>
template<typename Index> template<typename Index>
EIGEN_STRONG_INLINE const Packet packetOp(Index i) const EIGEN_STRONG_INLINE const Packet packetOp(Index i) const
{ return internal::padd(m_lowPacket, pmul(m_stepPacket, padd(pset1<Packet>(i),m_interPacket))); } { return internal::padd(m_lowPacket, pmul(m_stepPacket, padd(pset1<Packet>(Scalar(i)),m_interPacket))); }
const Scalar m_low; const Scalar m_low;
const Scalar m_step; const Scalar m_step;
@ -609,7 +609,7 @@ template <typename Scalar, bool RandomAccess> struct functor_traits< linspaced_o
template <typename Scalar, bool RandomAccess> struct linspaced_op template <typename Scalar, bool RandomAccess> struct linspaced_op
{ {
typedef typename packet_traits<Scalar>::type Packet; typedef typename packet_traits<Scalar>::type Packet;
linspaced_op(const Scalar& low, const Scalar& high, DenseIndex num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/(num_steps-1))) {} linspaced_op(const Scalar& low, const Scalar& high, DenseIndex num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/Scalar(num_steps-1))) {}
template<typename Index> template<typename Index>
EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return impl(i); } EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return impl(i); }

View File

@ -237,6 +237,8 @@ template<typename Derived> class MapBase<Derived, WriteAccessors>
using Base::Base::operator=; using Base::Base::operator=;
}; };
#undef EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS
} // end namespace Eigen } // end namespace Eigen
#endif // EIGEN_MAPBASE_H #endif // EIGEN_MAPBASE_H

View File

@ -101,7 +101,7 @@ struct traits<Ref<_PlainObjectType, _Options, _StrideType> >
template<typename Derived> struct match { template<typename Derived> struct match {
enum { enum {
HasDirectAccess = internal::has_direct_access<Derived>::ret, HasDirectAccess = internal::has_direct_access<Derived>::ret,
StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)), StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)),
InnerStrideMatch = int(StrideType::InnerStrideAtCompileTime)==int(Dynamic) InnerStrideMatch = int(StrideType::InnerStrideAtCompileTime)==int(Dynamic)
|| int(StrideType::InnerStrideAtCompileTime)==int(Derived::InnerStrideAtCompileTime) || int(StrideType::InnerStrideAtCompileTime)==int(Derived::InnerStrideAtCompileTime)
|| (int(StrideType::InnerStrideAtCompileTime)==0 && int(Derived::InnerStrideAtCompileTime)==1), || (int(StrideType::InnerStrideAtCompileTime)==0 && int(Derived::InnerStrideAtCompileTime)==1),
@ -172,8 +172,12 @@ protected:
} }
else else
::new (static_cast<Base*>(this)) Base(expr.data(), expr.rows(), expr.cols()); ::new (static_cast<Base*>(this)) Base(expr.data(), expr.rows(), expr.cols());
::new (&m_stride) StrideBase(StrideType::OuterStrideAtCompileTime==0?0:expr.outerStride(),
StrideType::InnerStrideAtCompileTime==0?0:expr.innerStride()); if(Expression::IsVectorAtCompileTime && (!PlainObjectType::IsVectorAtCompileTime) && ((Expression::Flags&RowMajorBit)!=(PlainObjectType::Flags&RowMajorBit)))
::new (&m_stride) StrideBase(expr.innerStride(), StrideType::InnerStrideAtCompileTime==0?0:1);
else
::new (&m_stride) StrideBase(StrideType::OuterStrideAtCompileTime==0?0:expr.outerStride(),
StrideType::InnerStrideAtCompileTime==0?0:expr.innerStride());
} }
StrideBase m_stride; StrideBase m_stride;

View File

@ -278,21 +278,21 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
/** Efficient triangular matrix times vector/matrix product */ /** Efficient triangular matrix times vector/matrix product */
template<typename OtherDerived> template<typename OtherDerived>
TriangularProduct<Mode,true,MatrixType,false,OtherDerived, OtherDerived::IsVectorAtCompileTime> TriangularProduct<Mode, true, MatrixType, false, OtherDerived, OtherDerived::ColsAtCompileTime==1>
operator*(const MatrixBase<OtherDerived>& rhs) const operator*(const MatrixBase<OtherDerived>& rhs) const
{ {
return TriangularProduct return TriangularProduct
<Mode,true,MatrixType,false,OtherDerived,OtherDerived::IsVectorAtCompileTime> <Mode, true, MatrixType, false, OtherDerived, OtherDerived::ColsAtCompileTime==1>
(m_matrix, rhs.derived()); (m_matrix, rhs.derived());
} }
/** Efficient vector/matrix times triangular matrix product */ /** Efficient vector/matrix times triangular matrix product */
template<typename OtherDerived> friend template<typename OtherDerived> friend
TriangularProduct<Mode,false,OtherDerived,OtherDerived::IsVectorAtCompileTime,MatrixType,false> TriangularProduct<Mode, false, OtherDerived, OtherDerived::RowsAtCompileTime==1, MatrixType, false>
operator*(const MatrixBase<OtherDerived>& lhs, const TriangularView& rhs) operator*(const MatrixBase<OtherDerived>& lhs, const TriangularView& rhs)
{ {
return TriangularProduct return TriangularProduct
<Mode,false,OtherDerived,OtherDerived::IsVectorAtCompileTime,MatrixType,false> <Mode, false, OtherDerived, OtherDerived::RowsAtCompileTime==1, MatrixType, false>
(lhs.derived(),rhs.m_matrix); (lhs.derived(),rhs.m_matrix);
} }

View File

@ -54,8 +54,25 @@
#endif #endif
#if defined EIGEN_USE_MKL #if defined EIGEN_USE_MKL
# include <mkl.h>
/*Check IMKL version for compatibility: < 10.3 is not usable with Eigen*/
# ifndef INTEL_MKL_VERSION
# undef EIGEN_USE_MKL /* INTEL_MKL_VERSION is not even defined on older versions */
# elif INTEL_MKL_VERSION < 100305 /* the intel-mkl-103-release-notes say this was when the lapacke.h interface was added*/
# undef EIGEN_USE_MKL
# endif
# ifndef EIGEN_USE_MKL
/*If the MKL version is too old, undef everything*/
# undef EIGEN_USE_MKL_ALL
# undef EIGEN_USE_BLAS
# undef EIGEN_USE_LAPACKE
# undef EIGEN_USE_MKL_VML
# undef EIGEN_USE_LAPACKE_STRICT
# undef EIGEN_USE_LAPACKE
# endif
#endif
#include <mkl.h> #if defined EIGEN_USE_MKL
#include <mkl_lapacke.h> #include <mkl_lapacke.h>
#define EIGEN_MKL_VML_THRESHOLD 128 #define EIGEN_MKL_VML_THRESHOLD 128

View File

@ -13,7 +13,7 @@
#define EIGEN_WORLD_VERSION 3 #define EIGEN_WORLD_VERSION 3
#define EIGEN_MAJOR_VERSION 2 #define EIGEN_MAJOR_VERSION 2
#define EIGEN_MINOR_VERSION 1 #define EIGEN_MINOR_VERSION 2
#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \ #define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
(EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \ (EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
@ -289,7 +289,8 @@ namespace Eigen {
#endif #endif
#ifndef EIGEN_STACK_ALLOCATION_LIMIT #ifndef EIGEN_STACK_ALLOCATION_LIMIT
#define EIGEN_STACK_ALLOCATION_LIMIT 20000 // 131072 == 128 KB
#define EIGEN_STACK_ALLOCATION_LIMIT 131072
#endif #endif
#ifndef EIGEN_DEFAULT_IO_FORMAT #ifndef EIGEN_DEFAULT_IO_FORMAT

View File

@ -272,12 +272,12 @@ inline void* aligned_realloc(void *ptr, size_t new_size, size_t old_size)
// The defined(_mm_free) is just here to verify that this MSVC version // The defined(_mm_free) is just here to verify that this MSVC version
// implements _mm_malloc/_mm_free based on the corresponding _aligned_ // implements _mm_malloc/_mm_free based on the corresponding _aligned_
// functions. This may not always be the case and we just try to be safe. // functions. This may not always be the case and we just try to be safe.
#if defined(_MSC_VER) && defined(_mm_free) #if defined(_MSC_VER) && (!defined(_WIN32_WCE)) && defined(_mm_free)
result = _aligned_realloc(ptr,new_size,16); result = _aligned_realloc(ptr,new_size,16);
#else #else
result = generic_aligned_realloc(ptr,new_size,old_size); result = generic_aligned_realloc(ptr,new_size,old_size);
#endif #endif
#elif defined(_MSC_VER) #elif defined(_MSC_VER) && (!defined(_WIN32_WCE))
result = _aligned_realloc(ptr,new_size,16); result = _aligned_realloc(ptr,new_size,16);
#else #else
result = handmade_aligned_realloc(ptr,new_size,old_size); result = handmade_aligned_realloc(ptr,new_size,old_size);
@ -630,6 +630,8 @@ template<typename T> class aligned_stack_memory_handler
} \ } \
void operator delete(void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \ void operator delete(void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
void operator delete[](void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \ void operator delete[](void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
void operator delete(void * ptr, std::size_t /* sz */) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
void operator delete[](void * ptr, std::size_t /* sz */) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
/* in-place new and delete. since (at least afaik) there is no actual */ \ /* in-place new and delete. since (at least afaik) there is no actual */ \
/* memory allocated we can safely let the default implementation handle */ \ /* memory allocated we can safely let the default implementation handle */ \
/* this particular case. */ \ /* this particular case. */ \
@ -777,9 +779,9 @@ namespace internal {
#ifdef EIGEN_CPUID #ifdef EIGEN_CPUID
inline bool cpuid_is_vendor(int abcd[4], const char* vendor) inline bool cpuid_is_vendor(int abcd[4], const int vendor[3])
{ {
return abcd[1]==(reinterpret_cast<const int*>(vendor))[0] && abcd[3]==(reinterpret_cast<const int*>(vendor))[1] && abcd[2]==(reinterpret_cast<const int*>(vendor))[2]; return abcd[1]==vendor[0] && abcd[3]==vendor[1] && abcd[2]==vendor[2];
} }
inline void queryCacheSizes_intel_direct(int& l1, int& l2, int& l3) inline void queryCacheSizes_intel_direct(int& l1, int& l2, int& l3)
@ -921,13 +923,16 @@ inline void queryCacheSizes(int& l1, int& l2, int& l3)
{ {
#ifdef EIGEN_CPUID #ifdef EIGEN_CPUID
int abcd[4]; int abcd[4];
const int GenuineIntel[] = {0x756e6547, 0x49656e69, 0x6c65746e};
const int AuthenticAMD[] = {0x68747541, 0x69746e65, 0x444d4163};
const int AMDisbetter_[] = {0x69444d41, 0x74656273, 0x21726574}; // "AMDisbetter!"
// identify the CPU vendor // identify the CPU vendor
EIGEN_CPUID(abcd,0x0,0); EIGEN_CPUID(abcd,0x0,0);
int max_std_funcs = abcd[1]; int max_std_funcs = abcd[1];
if(cpuid_is_vendor(abcd,"GenuineIntel")) if(cpuid_is_vendor(abcd,GenuineIntel))
queryCacheSizes_intel(l1,l2,l3,max_std_funcs); queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
else if(cpuid_is_vendor(abcd,"AuthenticAMD") || cpuid_is_vendor(abcd,"AMDisbetter!")) else if(cpuid_is_vendor(abcd,AuthenticAMD) || cpuid_is_vendor(abcd,AMDisbetter_))
queryCacheSizes_amd(l1,l2,l3); queryCacheSizes_amd(l1,l2,l3);
else else
// by default let's use Intel's API // by default let's use Intel's API

View File

@ -203,6 +203,8 @@ public:
* \li \c Quaternionf for \c float * \li \c Quaternionf for \c float
* \li \c Quaterniond for \c double * \li \c Quaterniond for \c double
* *
* \warning Operations interpreting the quaternion as rotation have undefined behavior if the quaternion is not normalized.
*
* \sa class AngleAxis, class Transform * \sa class AngleAxis, class Transform
*/ */
@ -344,7 +346,7 @@ class Map<const Quaternion<_Scalar>, _Options >
/** Constructs a Mapped Quaternion object from the pointer \a coeffs /** Constructs a Mapped Quaternion object from the pointer \a coeffs
* *
* The pointer \a coeffs must reference the four coeffecients of Quaternion in the following order: * The pointer \a coeffs must reference the four coefficients of Quaternion in the following order:
* \code *coeffs == {x, y, z, w} \endcode * \code *coeffs == {x, y, z, w} \endcode
* *
* If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */ * If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */
@ -464,7 +466,7 @@ QuaternionBase<Derived>::_transformVector(Vector3 v) const
// Note that this algorithm comes from the optimization by hand // Note that this algorithm comes from the optimization by hand
// of the conversion to a Matrix followed by a Matrix/Vector product. // of the conversion to a Matrix followed by a Matrix/Vector product.
// It appears to be much faster than the common algorithm found // It appears to be much faster than the common algorithm found
// in the litterature (30 versus 39 flops). It also requires two // in the literature (30 versus 39 flops). It also requires two
// Vector3 as temporaries. // Vector3 as temporaries.
Vector3 uv = this->vec().cross(v); Vector3 uv = this->vec().cross(v);
uv += uv; uv += uv;
@ -584,7 +586,7 @@ inline Derived& QuaternionBase<Derived>::setFromTwoVectors(const MatrixBase<Deri
// which yields a singular value problem // which yields a singular value problem
if (c < Scalar(-1)+NumTraits<Scalar>::dummy_precision()) if (c < Scalar(-1)+NumTraits<Scalar>::dummy_precision())
{ {
c = max<Scalar>(c,-1); c = (max)(c,Scalar(-1));
Matrix<Scalar,2,3> m; m << v0.transpose(), v1.transpose(); Matrix<Scalar,2,3> m; m << v0.transpose(), v1.transpose();
JacobiSVD<Matrix<Scalar,2,3> > svd(m, ComputeFullV); JacobiSVD<Matrix<Scalar,2,3> > svd(m, ComputeFullV);
Vector3 axis = svd.matrixV().col(2); Vector3 axis = svd.matrixV().col(2);
@ -667,10 +669,10 @@ QuaternionBase<Derived>::angularDistance(const QuaternionBase<OtherDerived>& oth
{ {
using std::acos; using std::acos;
using std::abs; using std::abs;
double d = abs(this->dot(other)); Scalar d = abs(this->dot(other));
if (d>=1.0) if (d>=Scalar(1))
return Scalar(0); return Scalar(0);
return static_cast<Scalar>(2 * acos(d)); return Scalar(2) * acos(d);
} }

View File

@ -194,9 +194,9 @@ public:
/** type of the matrix used to represent the linear part of the transformation */ /** type of the matrix used to represent the linear part of the transformation */
typedef Matrix<Scalar,Dim,Dim,Options> LinearMatrixType; typedef Matrix<Scalar,Dim,Dim,Options> LinearMatrixType;
/** type of read/write reference to the linear part of the transformation */ /** type of read/write reference to the linear part of the transformation */
typedef Block<MatrixType,Dim,Dim,int(Mode)==(AffineCompact)> LinearPart; typedef Block<MatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (Options&RowMajor)==0> LinearPart;
/** type of read reference to the linear part of the transformation */ /** type of read reference to the linear part of the transformation */
typedef const Block<ConstMatrixType,Dim,Dim,int(Mode)==(AffineCompact)> ConstLinearPart; typedef const Block<ConstMatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (Options&RowMajor)==0> ConstLinearPart;
/** type of read/write reference to the affine part of the transformation */ /** type of read/write reference to the affine part of the transformation */
typedef typename internal::conditional<int(Mode)==int(AffineCompact), typedef typename internal::conditional<int(Mode)==int(AffineCompact),
MatrixType&, MatrixType&,

View File

@ -113,7 +113,7 @@ umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, boo
const Index n = src.cols(); // number of measurements const Index n = src.cols(); // number of measurements
// required for demeaning ... // required for demeaning ...
const RealScalar one_over_n = 1 / static_cast<RealScalar>(n); const RealScalar one_over_n = RealScalar(1) / static_cast<RealScalar>(n);
// computation of mean // computation of mean
const VectorType src_mean = src.rowwise().sum() * one_over_n; const VectorType src_mean = src.rowwise().sum() * one_over_n;
@ -136,16 +136,16 @@ umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, boo
// Eq. (39) // Eq. (39)
VectorType S = VectorType::Ones(m); VectorType S = VectorType::Ones(m);
if (sigma.determinant()<0) S(m-1) = -1; if (sigma.determinant()<Scalar(0)) S(m-1) = Scalar(-1);
// Eq. (40) and (43) // Eq. (40) and (43)
const VectorType& d = svd.singularValues(); const VectorType& d = svd.singularValues();
Index rank = 0; for (Index i=0; i<m; ++i) if (!internal::isMuchSmallerThan(d.coeff(i),d.coeff(0))) ++rank; Index rank = 0; for (Index i=0; i<m; ++i) if (!internal::isMuchSmallerThan(d.coeff(i),d.coeff(0))) ++rank;
if (rank == m-1) { if (rank == m-1) {
if ( svd.matrixU().determinant() * svd.matrixV().determinant() > 0 ) { if ( svd.matrixU().determinant() * svd.matrixV().determinant() > Scalar(0) ) {
Rt.block(0,0,m,m).noalias() = svd.matrixU()*svd.matrixV().transpose(); Rt.block(0,0,m,m).noalias() = svd.matrixU()*svd.matrixV().transpose();
} else { } else {
const Scalar s = S(m-1); S(m-1) = -1; const Scalar s = S(m-1); S(m-1) = Scalar(-1);
Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose(); Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose();
S(m-1) = s; S(m-1) = s;
} }
@ -156,7 +156,7 @@ umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, boo
if (with_scaling) if (with_scaling)
{ {
// Eq. (42) // Eq. (42)
const Scalar c = 1/src_var * svd.singularValues().dot(S); const Scalar c = Scalar(1)/src_var * svd.singularValues().dot(S);
// Eq. (41) // Eq. (41)
Rt.col(m).head(m) = dst_mean; Rt.col(m).head(m) = dst_mean;

View File

@ -48,7 +48,7 @@ void apply_block_householder_on_the_left(MatrixType& mat, const VectorsType& vec
typedef typename MatrixType::Index Index; typedef typename MatrixType::Index Index;
enum { TFactorSize = MatrixType::ColsAtCompileTime }; enum { TFactorSize = MatrixType::ColsAtCompileTime };
Index nbVecs = vectors.cols(); Index nbVecs = vectors.cols();
Matrix<typename MatrixType::Scalar, TFactorSize, TFactorSize> T(nbVecs,nbVecs); Matrix<typename MatrixType::Scalar, TFactorSize, TFactorSize, ColMajor> T(nbVecs,nbVecs);
make_block_householder_triangular_factor(T, vectors, hCoeffs); make_block_householder_triangular_factor(T, vectors, hCoeffs);
const TriangularView<const VectorsType, UnitLower>& V(vectors); const TriangularView<const VectorsType, UnitLower>& V(vectors);

View File

@ -61,6 +61,7 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
VectorType s(n), t(n); VectorType s(n), t(n);
RealScalar tol2 = tol*tol; RealScalar tol2 = tol*tol;
RealScalar eps2 = NumTraits<Scalar>::epsilon()*NumTraits<Scalar>::epsilon();
int i = 0; int i = 0;
int restarts = 0; int restarts = 0;
@ -69,7 +70,7 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
Scalar rho_old = rho; Scalar rho_old = rho;
rho = r0.dot(r); rho = r0.dot(r);
if (internal::isMuchSmallerThan(rho,r0_sqnorm)) if (abs(rho) < eps2*r0_sqnorm)
{ {
// The new residual vector became too orthogonal to the arbitrarily choosen direction r0 // The new residual vector became too orthogonal to the arbitrarily choosen direction r0
// Let's restart with a new r0: // Let's restart with a new r0:

View File

@ -20,10 +20,11 @@ namespace Eigen {
* *
* \param MatrixType the type of the matrix of which we are computing the LU decomposition * \param MatrixType the type of the matrix of which we are computing the LU decomposition
* *
* This class represents a LU decomposition of any matrix, with complete pivoting: the matrix A * This class represents a LU decomposition of any matrix, with complete pivoting: the matrix A is
* is decomposed as A = PLUQ where L is unit-lower-triangular, U is upper-triangular, and P and Q * decomposed as \f$ A = P^{-1} L U Q^{-1} \f$ where L is unit-lower-triangular, U is
* are permutation matrices. This is a rank-revealing LU decomposition. The eigenvalues (diagonal * upper-triangular, and P and Q are permutation matrices. This is a rank-revealing LU
* coefficients) of U are sorted in such a way that any zeros are at the end. * decomposition. The eigenvalues (diagonal coefficients) of U are sorted in such a way that any
* zeros are at the end.
* *
* This decomposition provides the generic approach to solving systems of linear equations, computing * This decomposition provides the generic approach to solving systems of linear equations, computing
* the rank, invertibility, inverse, kernel, and determinant. * the rank, invertibility, inverse, kernel, and determinant.
@ -511,8 +512,8 @@ typename internal::traits<MatrixType>::Scalar FullPivLU<MatrixType>::determinant
} }
/** \returns the matrix represented by the decomposition, /** \returns the matrix represented by the decomposition,
* i.e., it returns the product: P^{-1} L U Q^{-1}. * i.e., it returns the product: \f$ P^{-1} L U Q^{-1} \f$.
* This function is provided for debug purpose. */ * This function is provided for debug purposes. */
template<typename MatrixType> template<typename MatrixType>
MatrixType FullPivLU<MatrixType>::reconstructedMatrix() const MatrixType FullPivLU<MatrixType>::reconstructedMatrix() const
{ {

View File

@ -109,7 +109,7 @@ class NaturalOrdering
* \class COLAMDOrdering * \class COLAMDOrdering
* *
* Functor computing the \em column \em approximate \em minimum \em degree ordering * Functor computing the \em column \em approximate \em minimum \em degree ordering
* The matrix should be in column-major format * The matrix should be in column-major and \b compressed format (see SparseMatrix::makeCompressed()).
*/ */
template<typename Index> template<typename Index>
class COLAMDOrdering class COLAMDOrdering
@ -118,10 +118,14 @@ class COLAMDOrdering
typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType; typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
typedef Matrix<Index, Dynamic, 1> IndexVector; typedef Matrix<Index, Dynamic, 1> IndexVector;
/** Compute the permutation vector form a sparse matrix */ /** Compute the permutation vector \a perm form the sparse matrix \a mat
* \warning The input sparse matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
*/
template <typename MatrixType> template <typename MatrixType>
void operator() (const MatrixType& mat, PermutationType& perm) void operator() (const MatrixType& mat, PermutationType& perm)
{ {
eigen_assert(mat.isCompressed() && "COLAMDOrdering requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to COLAMDOrdering");
Index m = mat.rows(); Index m = mat.rows();
Index n = mat.cols(); Index n = mat.cols();
Index nnz = mat.nonZeros(); Index nnz = mat.nonZeros();
@ -132,12 +136,12 @@ class COLAMDOrdering
Index stats [COLAMD_STATS]; Index stats [COLAMD_STATS];
internal::colamd_set_defaults(knobs); internal::colamd_set_defaults(knobs);
Index info;
IndexVector p(n+1), A(Alen); IndexVector p(n+1), A(Alen);
for(Index i=0; i <= n; i++) p(i) = mat.outerIndexPtr()[i]; for(Index i=0; i <= n; i++) p(i) = mat.outerIndexPtr()[i];
for(Index i=0; i < nnz; i++) A(i) = mat.innerIndexPtr()[i]; for(Index i=0; i < nnz; i++) A(i) = mat.innerIndexPtr()[i];
// Call Colamd routine to compute the ordering // Call Colamd routine to compute the ordering
info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats); Index info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats);
EIGEN_UNUSED_VARIABLE(info);
eigen_assert( info && "COLAMD failed " ); eigen_assert( info && "COLAMD failed " );
perm.resize(n); perm.resize(n);

View File

@ -76,7 +76,8 @@ template<typename _MatrixType> class ColPivHouseholderQR
m_colsTranspositions(), m_colsTranspositions(),
m_temp(), m_temp(),
m_colSqNorms(), m_colSqNorms(),
m_isInitialized(false) {} m_isInitialized(false),
m_usePrescribedThreshold(false) {}
/** \brief Default Constructor with memory preallocation /** \brief Default Constructor with memory preallocation
* *

View File

@ -375,17 +375,19 @@ struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
Scalar z; Scalar z;
JacobiRotation<Scalar> rot; JacobiRotation<Scalar> rot;
RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p))); RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p)));
if(n==0) if(n==0)
{ {
z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q); z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
work_matrix.row(p) *= z; work_matrix.row(p) *= z;
if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z); if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z);
if(work_matrix.coeff(q,q)!=Scalar(0)) if(work_matrix.coeff(q,q)!=Scalar(0))
{
z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q); z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
else work_matrix.row(q) *= z;
z = Scalar(0); if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
work_matrix.row(q) *= z; }
if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z); // otherwise the second row is already zero, so we have nothing to do.
} }
else else
{ {
@ -415,6 +417,7 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
JacobiRotation<RealScalar> *j_right) JacobiRotation<RealScalar> *j_right)
{ {
using std::sqrt; using std::sqrt;
using std::abs;
Matrix<RealScalar,2,2> m; Matrix<RealScalar,2,2> m;
m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)), m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)),
numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q)); numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q));
@ -428,9 +431,11 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
} }
else else
{ {
RealScalar u = d / t; RealScalar t2d2 = numext::hypot(t,d);
rot1.c() = RealScalar(1) / sqrt(RealScalar(1) + numext::abs2(u)); rot1.c() = abs(t)/t2d2;
rot1.s() = rot1.c() * u; rot1.s() = d/t2d2;
if(t<RealScalar(0))
rot1.s() = -rot1.s();
} }
m.applyOnTheLeft(0,1,rot1); m.applyOnTheLeft(0,1,rot1);
j_right->makeJacobi(m,0,1); j_right->makeJacobi(m,0,1);
@ -531,8 +536,9 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
JacobiSVD() JacobiSVD()
: m_isInitialized(false), : m_isInitialized(false),
m_isAllocated(false), m_isAllocated(false),
m_usePrescribedThreshold(false),
m_computationOptions(0), m_computationOptions(0),
m_rows(-1), m_cols(-1) m_rows(-1), m_cols(-1), m_diagSize(0)
{} {}
@ -545,6 +551,7 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0) JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0)
: m_isInitialized(false), : m_isInitialized(false),
m_isAllocated(false), m_isAllocated(false),
m_usePrescribedThreshold(false),
m_computationOptions(0), m_computationOptions(0),
m_rows(-1), m_cols(-1) m_rows(-1), m_cols(-1)
{ {
@ -564,6 +571,7 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0) JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
: m_isInitialized(false), : m_isInitialized(false),
m_isAllocated(false), m_isAllocated(false),
m_usePrescribedThreshold(false),
m_computationOptions(0), m_computationOptions(0),
m_rows(-1), m_cols(-1) m_rows(-1), m_cols(-1)
{ {
@ -665,6 +673,69 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
eigen_assert(m_isInitialized && "JacobiSVD is not initialized."); eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
return m_nonzeroSingularValues; return m_nonzeroSingularValues;
} }
/** \returns the rank of the matrix of which \c *this is the SVD.
*
* \note This method has to determine which singular values should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
inline Index rank() const
{
using std::abs;
eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
if(m_singularValues.size()==0) return 0;
RealScalar premultiplied_threshold = m_singularValues.coeff(0) * threshold();
Index i = m_nonzeroSingularValues-1;
while(i>=0 && m_singularValues.coeff(i) < premultiplied_threshold) --i;
return i+1;
}
/** Allows to prescribe a threshold to be used by certain methods, such as rank() and solve(),
* which need to determine when singular values are to be considered nonzero.
* This is not used for the SVD decomposition itself.
*
* When it needs to get the threshold value, Eigen calls threshold().
* The default is \c NumTraits<Scalar>::epsilon()
*
* \param threshold The new value to use as the threshold.
*
* A singular value will be considered nonzero if its value is strictly greater than
* \f$ \vert singular value \vert \leqslant threshold \times \vert max singular value \vert \f$.
*
* If you want to come back to the default behavior, call setThreshold(Default_t)
*/
JacobiSVD& setThreshold(const RealScalar& threshold)
{
m_usePrescribedThreshold = true;
m_prescribedThreshold = threshold;
return *this;
}
/** Allows to come back to the default behavior, letting Eigen use its default formula for
* determining the threshold.
*
* You should pass the special object Eigen::Default as parameter here.
* \code svd.setThreshold(Eigen::Default); \endcode
*
* See the documentation of setThreshold(const RealScalar&).
*/
JacobiSVD& setThreshold(Default_t)
{
m_usePrescribedThreshold = false;
return *this;
}
/** Returns the threshold that will be used by certain methods such as rank().
*
* See the documentation of setThreshold(const RealScalar&).
*/
RealScalar threshold() const
{
eigen_assert(m_isInitialized || m_usePrescribedThreshold);
return m_usePrescribedThreshold ? m_prescribedThreshold
: (std::max<Index>)(1,m_diagSize)*NumTraits<Scalar>::epsilon();
}
inline Index rows() const { return m_rows; } inline Index rows() const { return m_rows; }
inline Index cols() const { return m_cols; } inline Index cols() const { return m_cols; }
@ -677,11 +748,12 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
MatrixVType m_matrixV; MatrixVType m_matrixV;
SingularValuesType m_singularValues; SingularValuesType m_singularValues;
WorkMatrixType m_workMatrix; WorkMatrixType m_workMatrix;
bool m_isInitialized, m_isAllocated; bool m_isInitialized, m_isAllocated, m_usePrescribedThreshold;
bool m_computeFullU, m_computeThinU; bool m_computeFullU, m_computeThinU;
bool m_computeFullV, m_computeThinV; bool m_computeFullV, m_computeThinV;
unsigned int m_computationOptions; unsigned int m_computationOptions;
Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize; Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize;
RealScalar m_prescribedThreshold;
template<typename __MatrixType, int _QRPreconditioner, bool _IsComplex> template<typename __MatrixType, int _QRPreconditioner, bool _IsComplex>
friend struct internal::svd_precondition_2x2_block_to_be_real; friend struct internal::svd_precondition_2x2_block_to_be_real;
@ -764,6 +836,11 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
if(m_computeFullV) m_matrixV.setIdentity(m_cols,m_cols); if(m_computeFullV) m_matrixV.setIdentity(m_cols,m_cols);
if(m_computeThinV) m_matrixV.setIdentity(m_cols, m_diagSize); if(m_computeThinV) m_matrixV.setIdentity(m_cols, m_diagSize);
} }
// Scaling factor to reduce over/under-flows
RealScalar scale = m_workMatrix.cwiseAbs().maxCoeff();
if(scale==RealScalar(0)) scale = RealScalar(1);
m_workMatrix /= scale;
/*** step 2. The main Jacobi SVD iteration. ***/ /*** step 2. The main Jacobi SVD iteration. ***/
@ -833,6 +910,8 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
if(computeV()) m_matrixV.col(pos).swap(m_matrixV.col(i)); if(computeV()) m_matrixV.col(pos).swap(m_matrixV.col(i));
} }
} }
m_singularValues *= scale;
m_isInitialized = true; m_isInitialized = true;
return *this; return *this;
@ -854,11 +933,11 @@ struct solve_retval<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
// So A^{-1} = V S^{-1} U^* // So A^{-1} = V S^{-1} U^*
Matrix<Scalar, Dynamic, Rhs::ColsAtCompileTime, 0, _MatrixType::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime> tmp; Matrix<Scalar, Dynamic, Rhs::ColsAtCompileTime, 0, _MatrixType::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime> tmp;
Index nonzeroSingVals = dec().nonzeroSingularValues(); Index rank = dec().rank();
tmp.noalias() = dec().matrixU().leftCols(nonzeroSingVals).adjoint() * rhs(); tmp.noalias() = dec().matrixU().leftCols(rank).adjoint() * rhs();
tmp = dec().singularValues().head(nonzeroSingVals).asDiagonal().inverse() * tmp; tmp = dec().singularValues().head(rank).asDiagonal().inverse() * tmp;
dst = dec().matrixV().leftCols(nonzeroSingVals) * tmp; dst = dec().matrixV().leftCols(rank) * tmp;
} }
}; };
} // end namespace internal } // end namespace internal

View File

@ -37,6 +37,7 @@ class SimplicialCholeskyBase : internal::noncopyable
{ {
public: public:
typedef typename internal::traits<Derived>::MatrixType MatrixType; typedef typename internal::traits<Derived>::MatrixType MatrixType;
typedef typename internal::traits<Derived>::OrderingType OrderingType;
enum { UpLo = internal::traits<Derived>::UpLo }; enum { UpLo = internal::traits<Derived>::UpLo };
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::RealScalar RealScalar;
@ -240,15 +241,16 @@ class SimplicialCholeskyBase : internal::noncopyable
RealScalar m_shiftScale; RealScalar m_shiftScale;
}; };
template<typename _MatrixType, int _UpLo = Lower> class SimplicialLLT; template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::Index> > class SimplicialLLT;
template<typename _MatrixType, int _UpLo = Lower> class SimplicialLDLT; template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::Index> > class SimplicialLDLT;
template<typename _MatrixType, int _UpLo = Lower> class SimplicialCholesky; template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::Index> > class SimplicialCholesky;
namespace internal { namespace internal {
template<typename _MatrixType, int _UpLo> struct traits<SimplicialLLT<_MatrixType,_UpLo> > template<typename _MatrixType, int _UpLo, typename _Ordering> struct traits<SimplicialLLT<_MatrixType,_UpLo,_Ordering> >
{ {
typedef _MatrixType MatrixType; typedef _MatrixType MatrixType;
typedef _Ordering OrderingType;
enum { UpLo = _UpLo }; enum { UpLo = _UpLo };
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index; typedef typename MatrixType::Index Index;
@ -259,9 +261,10 @@ template<typename _MatrixType, int _UpLo> struct traits<SimplicialLLT<_MatrixTyp
static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); } static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); }
}; };
template<typename _MatrixType,int _UpLo> struct traits<SimplicialLDLT<_MatrixType,_UpLo> > template<typename _MatrixType,int _UpLo, typename _Ordering> struct traits<SimplicialLDLT<_MatrixType,_UpLo,_Ordering> >
{ {
typedef _MatrixType MatrixType; typedef _MatrixType MatrixType;
typedef _Ordering OrderingType;
enum { UpLo = _UpLo }; enum { UpLo = _UpLo };
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index; typedef typename MatrixType::Index Index;
@ -272,9 +275,10 @@ template<typename _MatrixType,int _UpLo> struct traits<SimplicialLDLT<_MatrixTyp
static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); } static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); }
}; };
template<typename _MatrixType, int _UpLo> struct traits<SimplicialCholesky<_MatrixType,_UpLo> > template<typename _MatrixType, int _UpLo, typename _Ordering> struct traits<SimplicialCholesky<_MatrixType,_UpLo,_Ordering> >
{ {
typedef _MatrixType MatrixType; typedef _MatrixType MatrixType;
typedef _Ordering OrderingType;
enum { UpLo = _UpLo }; enum { UpLo = _UpLo };
}; };
@ -294,11 +298,12 @@ template<typename _MatrixType, int _UpLo> struct traits<SimplicialCholesky<_Matr
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
* \tparam _UpLo the triangular part that will be used for the computations. It can be Lower * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
* or Upper. Default is Lower. * or Upper. Default is Lower.
* \tparam _Ordering The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<>
* *
* \sa class SimplicialLDLT * \sa class SimplicialLDLT, class AMDOrdering, class NaturalOrdering
*/ */
template<typename _MatrixType, int _UpLo> template<typename _MatrixType, int _UpLo, typename _Ordering>
class SimplicialLLT : public SimplicialCholeskyBase<SimplicialLLT<_MatrixType,_UpLo> > class SimplicialLLT : public SimplicialCholeskyBase<SimplicialLLT<_MatrixType,_UpLo,_Ordering> >
{ {
public: public:
typedef _MatrixType MatrixType; typedef _MatrixType MatrixType;
@ -382,11 +387,12 @@ public:
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
* \tparam _UpLo the triangular part that will be used for the computations. It can be Lower * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
* or Upper. Default is Lower. * or Upper. Default is Lower.
* \tparam _Ordering The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<>
* *
* \sa class SimplicialLLT * \sa class SimplicialLLT, class AMDOrdering, class NaturalOrdering
*/ */
template<typename _MatrixType, int _UpLo> template<typename _MatrixType, int _UpLo, typename _Ordering>
class SimplicialLDLT : public SimplicialCholeskyBase<SimplicialLDLT<_MatrixType,_UpLo> > class SimplicialLDLT : public SimplicialCholeskyBase<SimplicialLDLT<_MatrixType,_UpLo,_Ordering> >
{ {
public: public:
typedef _MatrixType MatrixType; typedef _MatrixType MatrixType;
@ -467,8 +473,8 @@ public:
* *
* \sa class SimplicialLDLT, class SimplicialLLT * \sa class SimplicialLDLT, class SimplicialLLT
*/ */
template<typename _MatrixType, int _UpLo> template<typename _MatrixType, int _UpLo, typename _Ordering>
class SimplicialCholesky : public SimplicialCholeskyBase<SimplicialCholesky<_MatrixType,_UpLo> > class SimplicialCholesky : public SimplicialCholeskyBase<SimplicialCholesky<_MatrixType,_UpLo,_Ordering> >
{ {
public: public:
typedef _MatrixType MatrixType; typedef _MatrixType MatrixType;
@ -612,15 +618,13 @@ void SimplicialCholeskyBase<Derived>::ordering(const MatrixType& a, CholMatrixTy
{ {
eigen_assert(a.rows()==a.cols()); eigen_assert(a.rows()==a.cols());
const Index size = a.rows(); const Index size = a.rows();
// TODO allows to configure the permutation
// Note that amd compute the inverse permutation // Note that amd compute the inverse permutation
{ {
CholMatrixType C; CholMatrixType C;
C = a.template selfadjointView<UpLo>(); C = a.template selfadjointView<UpLo>();
// remove diagonal entries:
// seems not to be needed OrderingType ordering;
// C.prune(keep_diag()); ordering(C,m_Pinv);
internal::minimum_degree_ordering(C, m_Pinv);
} }
if(m_Pinv.size()>0) if(m_Pinv.size()>0)

View File

@ -51,8 +51,8 @@ class CompressedStorage
CompressedStorage& operator=(const CompressedStorage& other) CompressedStorage& operator=(const CompressedStorage& other)
{ {
resize(other.size()); resize(other.size());
memcpy(m_values, other.m_values, m_size * sizeof(Scalar)); internal::smart_copy(other.m_values, other.m_values + m_size, m_values);
memcpy(m_indices, other.m_indices, m_size * sizeof(Index)); internal::smart_copy(other.m_indices, other.m_indices + m_size, m_indices);
return *this; return *this;
} }
@ -83,10 +83,10 @@ class CompressedStorage
reallocate(m_size); reallocate(m_size);
} }
void resize(size_t size, float reserveSizeFactor = 0) void resize(size_t size, double reserveSizeFactor = 0)
{ {
if (m_allocatedSize<size) if (m_allocatedSize<size)
reallocate(size + size_t(reserveSizeFactor*size)); reallocate(size + size_t(reserveSizeFactor*double(size)));
m_size = size; m_size = size;
} }

View File

@ -73,7 +73,8 @@ class CwiseBinaryOpImpl<BinaryOp,Lhs,Rhs,Sparse>::InnerIterator
typedef internal::sparse_cwise_binary_op_inner_iterator_selector< typedef internal::sparse_cwise_binary_op_inner_iterator_selector<
BinaryOp,Lhs,Rhs, InnerIterator> Base; BinaryOp,Lhs,Rhs, InnerIterator> Base;
EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, Index outer) // NOTE: we have to prefix Index by "typename Lhs::" to avoid an ICE with VC11
EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, typename Lhs::Index outer)
: Base(binOp.derived(),outer) : Base(binOp.derived(),outer)
{} {}
}; };

View File

@ -19,7 +19,10 @@ template<typename Lhs, typename Rhs, int InnerSize> struct SparseDenseProductRet
template<typename Lhs, typename Rhs> struct SparseDenseProductReturnType<Lhs,Rhs,1> template<typename Lhs, typename Rhs> struct SparseDenseProductReturnType<Lhs,Rhs,1>
{ {
typedef SparseDenseOuterProduct<Lhs,Rhs,false> Type; typedef typename internal::conditional<
Lhs::IsRowMajor,
SparseDenseOuterProduct<Rhs,Lhs,true>,
SparseDenseOuterProduct<Lhs,Rhs,false> >::type Type;
}; };
template<typename Lhs, typename Rhs, int InnerSize> struct DenseSparseProductReturnType template<typename Lhs, typename Rhs, int InnerSize> struct DenseSparseProductReturnType
@ -29,7 +32,10 @@ template<typename Lhs, typename Rhs, int InnerSize> struct DenseSparseProductRet
template<typename Lhs, typename Rhs> struct DenseSparseProductReturnType<Lhs,Rhs,1> template<typename Lhs, typename Rhs> struct DenseSparseProductReturnType<Lhs,Rhs,1>
{ {
typedef SparseDenseOuterProduct<Rhs,Lhs,true> Type; typedef typename internal::conditional<
Rhs::IsRowMajor,
SparseDenseOuterProduct<Rhs,Lhs,true>,
SparseDenseOuterProduct<Lhs,Rhs,false> >::type Type;
}; };
namespace internal { namespace internal {
@ -114,17 +120,30 @@ class SparseDenseOuterProduct<Lhs,Rhs,Transpose>::InnerIterator : public _LhsNes
typedef typename SparseDenseOuterProduct::Index Index; typedef typename SparseDenseOuterProduct::Index Index;
public: public:
EIGEN_STRONG_INLINE InnerIterator(const SparseDenseOuterProduct& prod, Index outer) EIGEN_STRONG_INLINE InnerIterator(const SparseDenseOuterProduct& prod, Index outer)
: Base(prod.lhs(), 0), m_outer(outer), m_factor(prod.rhs().coeff(outer)) : Base(prod.lhs(), 0), m_outer(outer), m_factor(get(prod.rhs(), outer, typename internal::traits<Rhs>::StorageKind() ))
{ { }
}
inline Index outer() const { return m_outer; } inline Index outer() const { return m_outer; }
inline Index row() const { return Transpose ? Base::row() : m_outer; } inline Index row() const { return Transpose ? m_outer : Base::index(); }
inline Index col() const { return Transpose ? m_outer : Base::row(); } inline Index col() const { return Transpose ? Base::index() : m_outer; }
inline Scalar value() const { return Base::value() * m_factor; } inline Scalar value() const { return Base::value() * m_factor; }
protected: protected:
static Scalar get(const _RhsNested &rhs, Index outer, Dense = Dense())
{
return rhs.coeff(outer);
}
static Scalar get(const _RhsNested &rhs, Index outer, Sparse = Sparse())
{
typename Traits::_RhsNested::InnerIterator it(rhs, outer);
if (it && it.index()==0)
return it.value();
return Scalar(0);
}
Index m_outer; Index m_outer;
Scalar m_factor; Scalar m_factor;
}; };

View File

@ -940,7 +940,7 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
enum { IsRowMajor = SparseMatrixType::IsRowMajor }; enum { IsRowMajor = SparseMatrixType::IsRowMajor };
typedef typename SparseMatrixType::Scalar Scalar; typedef typename SparseMatrixType::Scalar Scalar;
typedef typename SparseMatrixType::Index Index; typedef typename SparseMatrixType::Index Index;
SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor> trMat(mat.rows(),mat.cols()); SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,Index> trMat(mat.rows(),mat.cols());
if(begin!=end) if(begin!=end)
{ {
@ -1178,7 +1178,7 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse
size_t p = m_outerIndex[outer+1]; size_t p = m_outerIndex[outer+1];
++m_outerIndex[outer+1]; ++m_outerIndex[outer+1];
float reallocRatio = 1; double reallocRatio = 1;
if (m_data.allocatedSize()<=m_data.size()) if (m_data.allocatedSize()<=m_data.size())
{ {
// if there is no preallocated memory, let's reserve a minimum of 32 elements // if there is no preallocated memory, let's reserve a minimum of 32 elements
@ -1190,13 +1190,13 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse
{ {
// we need to reallocate the data, to reduce multiple reallocations // we need to reallocate the data, to reduce multiple reallocations
// we use a smart resize algorithm based on the current filling ratio // we use a smart resize algorithm based on the current filling ratio
// in addition, we use float to avoid integers overflows // in addition, we use double to avoid integers overflows
float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1); double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size()); reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
// furthermore we bound the realloc ratio to: // furthermore we bound the realloc ratio to:
// 1) reduce multiple minor realloc when the matrix is almost filled // 1) reduce multiple minor realloc when the matrix is almost filled
// 2) avoid to allocate too much memory when the matrix is almost empty // 2) avoid to allocate too much memory when the matrix is almost empty
reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f); reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
} }
} }
m_data.resize(m_data.size()+1,reallocRatio); m_data.resize(m_data.size()+1,reallocRatio);

View File

@ -26,7 +26,7 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>
inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); } inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); }
}; };
// NOTE: VC10 trigger an ICE if don't put typename TransposeImpl<MatrixType,Sparse>:: in front of Index, // NOTE: VC10 and VC11 trigger an ICE if don't put typename TransposeImpl<MatrixType,Sparse>:: in front of Index,
// a typedef typename TransposeImpl<MatrixType,Sparse>::Index Index; // a typedef typename TransposeImpl<MatrixType,Sparse>::Index Index;
// does not fix the issue. // does not fix the issue.
// An alternative is to define the nested class in the parent class itself. // An alternative is to define the nested class in the parent class itself.
@ -40,8 +40,8 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::InnerItera
EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, typename TransposeImpl<MatrixType,Sparse>::Index outer) EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, typename TransposeImpl<MatrixType,Sparse>::Index outer)
: Base(trans.derived().nestedExpression(), outer) : Base(trans.derived().nestedExpression(), outer)
{} {}
Index row() const { return Base::col(); } typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); }
Index col() const { return Base::row(); } typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); }
}; };
template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::ReverseInnerIterator template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::ReverseInnerIterator
@ -54,8 +54,8 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::ReverseInn
EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, typename TransposeImpl<MatrixType,Sparse>::Index outer) EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, typename TransposeImpl<MatrixType,Sparse>::Index outer)
: Base(xpr.derived().nestedExpression(), outer) : Base(xpr.derived().nestedExpression(), outer)
{} {}
Index row() const { return Base::col(); } typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); }
Index col() const { return Base::row(); } typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); }
}; };
} // end namespace Eigen } // end namespace Eigen

View File

@ -84,8 +84,10 @@ template<typename Lhs, typename Rhs> class DenseTimeSparseProduct;
template<typename Lhs, typename Rhs, bool Transpose> class SparseDenseOuterProduct; template<typename Lhs, typename Rhs, bool Transpose> class SparseDenseOuterProduct;
template<typename Lhs, typename Rhs> struct SparseSparseProductReturnType; template<typename Lhs, typename Rhs> struct SparseSparseProductReturnType;
template<typename Lhs, typename Rhs, int InnerSize = internal::traits<Lhs>::ColsAtCompileTime> struct DenseSparseProductReturnType; template<typename Lhs, typename Rhs,
template<typename Lhs, typename Rhs, int InnerSize = internal::traits<Lhs>::ColsAtCompileTime> struct SparseDenseProductReturnType; int InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(internal::traits<Lhs>::ColsAtCompileTime,internal::traits<Rhs>::RowsAtCompileTime)> struct DenseSparseProductReturnType;
template<typename Lhs, typename Rhs,
int InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(internal::traits<Lhs>::ColsAtCompileTime,internal::traits<Rhs>::RowsAtCompileTime)> struct SparseDenseProductReturnType;
template<typename MatrixType,int UpLo> class SparseSymmetricPermutationProduct; template<typename MatrixType,int UpLo> class SparseSymmetricPermutationProduct;
namespace internal { namespace internal {

View File

@ -2,7 +2,7 @@
// for linear algebra. // for linear algebra.
// //
// Copyright (C) 2012-2013 Desire Nuentsa <desire.nuentsa_wakam@inria.fr> // Copyright (C) 2012-2013 Desire Nuentsa <desire.nuentsa_wakam@inria.fr>
// Copyright (C) 2012-2013 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2012-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
// //
// This Source Code Form is subject to the terms of the Mozilla // This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed // Public License v. 2.0. If a copy of the MPL was not distributed
@ -58,6 +58,7 @@ namespace internal {
* \tparam _OrderingType The fill-reducing ordering method. See the \link OrderingMethods_Module * \tparam _OrderingType The fill-reducing ordering method. See the \link OrderingMethods_Module
* OrderingMethods \endlink module for the list of built-in and external ordering methods. * OrderingMethods \endlink module for the list of built-in and external ordering methods.
* *
* \warning The input sparse matrix A must be in compressed mode (see SparseMatrix::makeCompressed()).
* *
*/ */
template<typename _MatrixType, typename _OrderingType> template<typename _MatrixType, typename _OrderingType>
@ -77,10 +78,23 @@ class SparseQR
SparseQR () : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false) SparseQR () : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false)
{ } { }
/** Construct a QR factorization of the matrix \a mat.
*
* \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
*
* \sa compute()
*/
SparseQR(const MatrixType& mat) : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false) SparseQR(const MatrixType& mat) : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false)
{ {
compute(mat); compute(mat);
} }
/** Computes the QR factorization of the sparse matrix \a mat.
*
* \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
*
* \sa analyzePattern(), factorize()
*/
void compute(const MatrixType& mat) void compute(const MatrixType& mat)
{ {
analyzePattern(mat); analyzePattern(mat);
@ -166,7 +180,7 @@ class SparseQR
y.bottomRows(y.rows()-rank).setZero(); y.bottomRows(y.rows()-rank).setZero();
// Apply the column permutation // Apply the column permutation
if (m_perm_c.size()) dest.topRows(cols()) = colsPermutation() * y.topRows(cols()); if (m_perm_c.size()) dest = colsPermutation() * y.topRows(cols());
else dest = y.topRows(cols()); else dest = y.topRows(cols());
m_info = Success; m_info = Success;
@ -206,7 +220,7 @@ class SparseQR
/** \brief Reports whether previous computation was successful. /** \brief Reports whether previous computation was successful.
* *
* \returns \c Success if computation was succesful, * \returns \c Success if computation was successful,
* \c NumericalIssue if the QR factorization reports a numerical problem * \c NumericalIssue if the QR factorization reports a numerical problem
* \c InvalidInput if the input matrix is invalid * \c InvalidInput if the input matrix is invalid
* *
@ -255,20 +269,24 @@ class SparseQR
}; };
/** \brief Preprocessing step of a QR factorization /** \brief Preprocessing step of a QR factorization
*
* \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
* *
* In this step, the fill-reducing permutation is computed and applied to the columns of A * In this step, the fill-reducing permutation is computed and applied to the columns of A
* and the column elimination tree is computed as well. Only the sparcity pattern of \a mat is exploited. * and the column elimination tree is computed as well. Only the sparsity pattern of \a mat is exploited.
* *
* \note In this step it is assumed that there is no empty row in the matrix \a mat. * \note In this step it is assumed that there is no empty row in the matrix \a mat.
*/ */
template <typename MatrixType, typename OrderingType> template <typename MatrixType, typename OrderingType>
void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat) void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat)
{ {
eigen_assert(mat.isCompressed() && "SparseQR requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to SparseQR");
// Compute the column fill reducing ordering // Compute the column fill reducing ordering
OrderingType ord; OrderingType ord;
ord(mat, m_perm_c); ord(mat, m_perm_c);
Index n = mat.cols(); Index n = mat.cols();
Index m = mat.rows(); Index m = mat.rows();
Index diagSize = (std::min)(m,n);
if (!m_perm_c.size()) if (!m_perm_c.size())
{ {
@ -280,20 +298,20 @@ void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat)
m_outputPerm_c = m_perm_c.inverse(); m_outputPerm_c = m_perm_c.inverse();
internal::coletree(mat, m_etree, m_firstRowElt, m_outputPerm_c.indices().data()); internal::coletree(mat, m_etree, m_firstRowElt, m_outputPerm_c.indices().data());
m_R.resize(n, n); m_R.resize(m, n);
m_Q.resize(m, n); m_Q.resize(m, diagSize);
// Allocate space for nonzero elements : rough estimation // Allocate space for nonzero elements : rough estimation
m_R.reserve(2*mat.nonZeros()); //FIXME Get a more accurate estimation through symbolic factorization with the etree m_R.reserve(2*mat.nonZeros()); //FIXME Get a more accurate estimation through symbolic factorization with the etree
m_Q.reserve(2*mat.nonZeros()); m_Q.reserve(2*mat.nonZeros());
m_hcoeffs.resize(n); m_hcoeffs.resize(diagSize);
m_analysisIsok = true; m_analysisIsok = true;
} }
/** \brief Performs the numerical QR factorization of the input matrix /** \brief Performs the numerical QR factorization of the input matrix
* *
* The function SparseQR::analyzePattern(const MatrixType&) must have been called beforehand with * The function SparseQR::analyzePattern(const MatrixType&) must have been called beforehand with
* a matrix having the same sparcity pattern than \a mat. * a matrix having the same sparsity pattern than \a mat.
* *
* \param mat The sparse column-major matrix * \param mat The sparse column-major matrix
*/ */
@ -306,11 +324,12 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
eigen_assert(m_analysisIsok && "analyzePattern() should be called before this step"); eigen_assert(m_analysisIsok && "analyzePattern() should be called before this step");
Index m = mat.rows(); Index m = mat.rows();
Index n = mat.cols(); Index n = mat.cols();
IndexVector mark(m); mark.setConstant(-1); // Record the visited nodes Index diagSize = (std::min)(m,n);
IndexVector Ridx(n), Qidx(m); // Store temporarily the row indexes for the current column of R and Q IndexVector mark((std::max)(m,n)); mark.setConstant(-1); // Record the visited nodes
Index nzcolR, nzcolQ; // Number of nonzero for the current column of R and Q IndexVector Ridx(n), Qidx(m); // Store temporarily the row indexes for the current column of R and Q
ScalarVector tval(m); // The dense vector used to compute the current column Index nzcolR, nzcolQ; // Number of nonzero for the current column of R and Q
bool found_diag; ScalarVector tval(m); // The dense vector used to compute the current column
RealScalar pivotThreshold = m_threshold;
m_pmat = mat; m_pmat = mat;
m_pmat.uncompress(); // To have the innerNonZeroPtr allocated m_pmat.uncompress(); // To have the innerNonZeroPtr allocated
@ -322,7 +341,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
m_pmat.innerNonZeroPtr()[p] = mat.outerIndexPtr()[i+1] - mat.outerIndexPtr()[i]; m_pmat.innerNonZeroPtr()[p] = mat.outerIndexPtr()[i+1] - mat.outerIndexPtr()[i];
} }
/* Compute the default threshold, see : /* Compute the default threshold as in MatLab, see:
* Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing * Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing
* Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3 * Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3
*/ */
@ -330,24 +349,24 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
{ {
RealScalar max2Norm = 0.0; RealScalar max2Norm = 0.0;
for (int j = 0; j < n; j++) max2Norm = (max)(max2Norm, m_pmat.col(j).norm()); for (int j = 0; j < n; j++) max2Norm = (max)(max2Norm, m_pmat.col(j).norm());
m_threshold = 20 * (m + n) * max2Norm * NumTraits<RealScalar>::epsilon(); pivotThreshold = 20 * (m + n) * max2Norm * NumTraits<RealScalar>::epsilon();
} }
// Initialize the numerical permutation // Initialize the numerical permutation
m_pivotperm.setIdentity(n); m_pivotperm.setIdentity(n);
Index nonzeroCol = 0; // Record the number of valid pivots Index nonzeroCol = 0; // Record the number of valid pivots
m_Q.startVec(0);
// Left looking rank-revealing QR factorization: compute a column of R and Q at a time // Left looking rank-revealing QR factorization: compute a column of R and Q at a time
for (Index col = 0; col < (std::min)(n,m); ++col) for (Index col = 0; col < n; ++col)
{ {
mark.setConstant(-1); mark.setConstant(-1);
m_R.startVec(col); m_R.startVec(col);
m_Q.startVec(col);
mark(nonzeroCol) = col; mark(nonzeroCol) = col;
Qidx(0) = nonzeroCol; Qidx(0) = nonzeroCol;
nzcolR = 0; nzcolQ = 1; nzcolR = 0; nzcolQ = 1;
found_diag = col>=m; bool found_diag = nonzeroCol>=m;
tval.setZero(); tval.setZero();
// Symbolic factorization: find the nonzero locations of the column k of the factors R and Q, i.e., // Symbolic factorization: find the nonzero locations of the column k of the factors R and Q, i.e.,
@ -356,7 +375,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
// thus the trick with found_diag that permits to do one more iteration on the diagonal element if this one has not been found. // thus the trick with found_diag that permits to do one more iteration on the diagonal element if this one has not been found.
for (typename MatrixType::InnerIterator itp(m_pmat, col); itp || !found_diag; ++itp) for (typename MatrixType::InnerIterator itp(m_pmat, col); itp || !found_diag; ++itp)
{ {
Index curIdx = nonzeroCol ; Index curIdx = nonzeroCol;
if(itp) curIdx = itp.row(); if(itp) curIdx = itp.row();
if(curIdx == nonzeroCol) found_diag = true; if(curIdx == nonzeroCol) found_diag = true;
@ -398,7 +417,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
// Browse all the indexes of R(:,col) in reverse order // Browse all the indexes of R(:,col) in reverse order
for (Index i = nzcolR-1; i >= 0; i--) for (Index i = nzcolR-1; i >= 0; i--)
{ {
Index curIdx = m_pivotperm.indices()(Ridx(i)); Index curIdx = Ridx(i);
// Apply the curIdx-th householder vector to the current column (temporarily stored into tval) // Apply the curIdx-th householder vector to the current column (temporarily stored into tval)
Scalar tdot(0); Scalar tdot(0);
@ -427,33 +446,37 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
} }
} }
} // End update current column } // End update current column
// Compute the Householder reflection that eliminate the current column
// FIXME this step should call the Householder module.
Scalar tau; Scalar tau;
RealScalar beta; RealScalar beta = 0;
Scalar c0 = nzcolQ ? tval(Qidx(0)) : Scalar(0);
// First, the squared norm of Q((col+1):m, col) if(nonzeroCol < diagSize)
RealScalar sqrNorm = 0.;
for (Index itq = 1; itq < nzcolQ; ++itq) sqrNorm += numext::abs2(tval(Qidx(itq)));
if(sqrNorm == RealScalar(0) && numext::imag(c0) == RealScalar(0))
{ {
tau = RealScalar(0); // Compute the Householder reflection that eliminate the current column
beta = numext::real(c0); // FIXME this step should call the Householder module.
tval(Qidx(0)) = 1; Scalar c0 = nzcolQ ? tval(Qidx(0)) : Scalar(0);
}
else // First, the squared norm of Q((col+1):m, col)
{ RealScalar sqrNorm = 0.;
beta = std::sqrt(numext::abs2(c0) + sqrNorm); for (Index itq = 1; itq < nzcolQ; ++itq) sqrNorm += numext::abs2(tval(Qidx(itq)));
if(numext::real(c0) >= RealScalar(0)) if(sqrNorm == RealScalar(0) && numext::imag(c0) == RealScalar(0))
beta = -beta; {
tval(Qidx(0)) = 1; tau = RealScalar(0);
for (Index itq = 1; itq < nzcolQ; ++itq) beta = numext::real(c0);
tval(Qidx(itq)) /= (c0 - beta); tval(Qidx(0)) = 1;
tau = numext::conj((beta-c0) / beta); }
else
{
using std::sqrt;
beta = sqrt(numext::abs2(c0) + sqrNorm);
if(numext::real(c0) >= RealScalar(0))
beta = -beta;
tval(Qidx(0)) = 1;
for (Index itq = 1; itq < nzcolQ; ++itq)
tval(Qidx(itq)) /= (c0 - beta);
tau = numext::conj((beta-c0) / beta);
}
} }
// Insert values in R // Insert values in R
@ -467,24 +490,25 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
} }
} }
if(abs(beta) >= m_threshold) if(nonzeroCol < diagSize && abs(beta) >= pivotThreshold)
{ {
m_R.insertBackByOuterInner(col, nonzeroCol) = beta; m_R.insertBackByOuterInner(col, nonzeroCol) = beta;
nonzeroCol++;
// The householder coefficient // The householder coefficient
m_hcoeffs(col) = tau; m_hcoeffs(nonzeroCol) = tau;
// Record the householder reflections // Record the householder reflections
for (Index itq = 0; itq < nzcolQ; ++itq) for (Index itq = 0; itq < nzcolQ; ++itq)
{ {
Index iQ = Qidx(itq); Index iQ = Qidx(itq);
m_Q.insertBackByOuterInnerUnordered(col,iQ) = tval(iQ); m_Q.insertBackByOuterInnerUnordered(nonzeroCol,iQ) = tval(iQ);
tval(iQ) = Scalar(0.); tval(iQ) = Scalar(0.);
} }
nonzeroCol++;
if(nonzeroCol<diagSize)
m_Q.startVec(nonzeroCol);
} }
else else
{ {
// Zero pivot found: move implicitly this column to the end // Zero pivot found: move implicitly this column to the end
m_hcoeffs(col) = Scalar(0);
for (Index j = nonzeroCol; j < n-1; j++) for (Index j = nonzeroCol; j < n-1; j++)
std::swap(m_pivotperm.indices()(j), m_pivotperm.indices()[j+1]); std::swap(m_pivotperm.indices()(j), m_pivotperm.indices()[j+1]);
@ -493,6 +517,8 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
} }
} }
m_hcoeffs.tail(diagSize-nonzeroCol).setZero();
// Finalize the column pointers of the sparse matrices R and Q // Finalize the column pointers of the sparse matrices R and Q
m_Q.finalize(); m_Q.finalize();
m_Q.makeCompressed(); m_Q.makeCompressed();
@ -561,14 +587,16 @@ struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived
template<typename DesType> template<typename DesType>
void evalTo(DesType& res) const void evalTo(DesType& res) const
{ {
Index m = m_qr.rows();
Index n = m_qr.cols(); Index n = m_qr.cols();
Index diagSize = (std::min)(m,n);
res = m_other; res = m_other;
if (m_transpose) if (m_transpose)
{ {
eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes"); eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes");
//Compute res = Q' * other column by column //Compute res = Q' * other column by column
for(Index j = 0; j < res.cols(); j++){ for(Index j = 0; j < res.cols(); j++){
for (Index k = 0; k < n; k++) for (Index k = 0; k < diagSize; k++)
{ {
Scalar tau = Scalar(0); Scalar tau = Scalar(0);
tau = m_qr.m_Q.col(k).dot(res.col(j)); tau = m_qr.m_Q.col(k).dot(res.col(j));
@ -581,10 +609,10 @@ struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived
else else
{ {
eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes"); eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes");
// Compute res = Q' * other column by column // Compute res = Q * other column by column
for(Index j = 0; j < res.cols(); j++) for(Index j = 0; j < res.cols(); j++)
{ {
for (Index k = n-1; k >=0; k--) for (Index k = diagSize-1; k >=0; k--)
{ {
Scalar tau = Scalar(0); Scalar tau = Scalar(0);
tau = m_qr.m_Q.col(k).dot(res.col(j)); tau = m_qr.m_Q.col(k).dot(res.col(j));
@ -618,7 +646,7 @@ struct SparseQRMatrixQReturnType : public EigenBase<SparseQRMatrixQReturnType<Sp
return SparseQRMatrixQTransposeReturnType<SparseQRType>(m_qr); return SparseQRMatrixQTransposeReturnType<SparseQRType>(m_qr);
} }
inline Index rows() const { return m_qr.rows(); } inline Index rows() const { return m_qr.rows(); }
inline Index cols() const { return m_qr.cols(); } inline Index cols() const { return (std::min)(m_qr.rows(),m_qr.cols()); }
// To use for operations with the transpose of Q // To use for operations with the transpose of Q
SparseQRMatrixQTransposeReturnType<SparseQRType> transpose() const SparseQRMatrixQTransposeReturnType<SparseQRType> transpose() const
{ {

View File

@ -11,7 +11,7 @@
#ifndef EIGEN_STDDEQUE_H #ifndef EIGEN_STDDEQUE_H
#define EIGEN_STDDEQUE_H #define EIGEN_STDDEQUE_H
#include "Eigen/src/StlSupport/details.h" #include "details.h"
// Define the explicit instantiation (e.g. necessary for the Intel compiler) // Define the explicit instantiation (e.g. necessary for the Intel compiler)
#if defined(__INTEL_COMPILER) || defined(__GNUC__) #if defined(__INTEL_COMPILER) || defined(__GNUC__)

View File

@ -10,7 +10,7 @@
#ifndef EIGEN_STDLIST_H #ifndef EIGEN_STDLIST_H
#define EIGEN_STDLIST_H #define EIGEN_STDLIST_H
#include "Eigen/src/StlSupport/details.h" #include "details.h"
// Define the explicit instantiation (e.g. necessary for the Intel compiler) // Define the explicit instantiation (e.g. necessary for the Intel compiler)
#if defined(__INTEL_COMPILER) || defined(__GNUC__) #if defined(__INTEL_COMPILER) || defined(__GNUC__)

View File

@ -11,7 +11,7 @@
#ifndef EIGEN_STDVECTOR_H #ifndef EIGEN_STDVECTOR_H
#define EIGEN_STDVECTOR_H #define EIGEN_STDVECTOR_H
#include "Eigen/src/StlSupport/details.h" #include "details.h"
/** /**
* This section contains a convenience MACRO which allows an easy specialization of * This section contains a convenience MACRO which allows an easy specialization of

View File

@ -1,9 +1,6 @@
This directory contains a BLAS library built on top of Eigen. This directory contains a BLAS library built on top of Eigen.
This is currently a work in progress which is far to be ready for use,
but feel free to contribute to it if you wish.
This module is not built by default. In order to compile it, you need to This module is not built by default. In order to compile it, you need to
type 'make blas' from within your build dir. type 'make blas' from within your build dir.

View File

@ -41,7 +41,7 @@ endif()
# copy ctest properties, which currently # copy ctest properties, which currently
# o raise the warning levels # o raise the warning levels
configure_file(${CMAKE_BINARY_DIR}/DartConfiguration.tcl ${CMAKE_BINARY_DIR}/DartConfiguration.tcl) configure_file(${CMAKE_CURRENT_BINARY_DIR}/DartConfiguration.tcl ${CMAKE_BINARY_DIR}/DartConfiguration.tcl)
# restore default CMAKE_MAKE_PROGRAM # restore default CMAKE_MAKE_PROGRAM
set(CMAKE_MAKE_PROGRAM ${CMAKE_MAKE_PROGRAM_SAVE}) set(CMAKE_MAKE_PROGRAM ${CMAKE_MAKE_PROGRAM_SAVE})
@ -50,7 +50,7 @@ set(CMAKE_MAKE_PROGRAM ${CMAKE_MAKE_PROGRAM_SAVE})
set(CMAKE_MAKE_PROGRAM_SAVE) set(CMAKE_MAKE_PROGRAM_SAVE)
set(EIGEN_MAKECOMMAND_PLACEHOLDER) set(EIGEN_MAKECOMMAND_PLACEHOLDER)
configure_file(${CMAKE_SOURCE_DIR}/CTestCustom.cmake.in ${CMAKE_BINARY_DIR}/CTestCustom.cmake) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/CTestCustom.cmake.in ${CMAKE_BINARY_DIR}/CTestCustom.cmake)
# some documentation of this function would be nice # some documentation of this function would be nice
ei_init_testing() ei_init_testing()

View File

@ -41,8 +41,8 @@ MatrixXd::Ones(rows,cols) // ones(rows,cols)
C.setOnes(rows,cols) // C = ones(rows,cols) C.setOnes(rows,cols) // C = ones(rows,cols)
MatrixXd::Random(rows,cols) // rand(rows,cols)*2-1 // MatrixXd::Random returns uniform random numbers in (-1, 1). MatrixXd::Random(rows,cols) // rand(rows,cols)*2-1 // MatrixXd::Random returns uniform random numbers in (-1, 1).
C.setRandom(rows,cols) // C = rand(rows,cols)*2-1 C.setRandom(rows,cols) // C = rand(rows,cols)*2-1
VectorXd::LinSpace(size,low,high) // linspace(low,high,size)' VectorXd::LinSpaced(size,low,high) // linspace(low,high,size)'
v.setLinSpace(size,low,high) // v = linspace(low,high,size)' v.setLinSpaced(size,low,high) // v = linspace(low,high,size)'
// Matrix slicing and blocks. All expressions listed here are read/write. // Matrix slicing and blocks. All expressions listed here are read/write.
@ -91,6 +91,8 @@ R.adjoint() // R'
R.transpose() // R.' or conj(R') R.transpose() // R.' or conj(R')
R.diagonal() // diag(R) R.diagonal() // diag(R)
x.asDiagonal() // diag(x) x.asDiagonal() // diag(x)
R.transpose().colwise().reverse(); // rot90(R)
R.conjugate() // conj(R)
// All the same as Matlab, but matlab doesn't have *= style operators. // All the same as Matlab, but matlab doesn't have *= style operators.
// Matrix-vector. Matrix-matrix. Matrix-scalar. // Matrix-vector. Matrix-matrix. Matrix-scalar.
@ -167,6 +169,8 @@ x.cross(y) // cross(x, y) Requires #include <Eigen/Geometry>
A.cast<double>(); // double(A) A.cast<double>(); // double(A)
A.cast<float>(); // single(A) A.cast<float>(); // single(A)
A.cast<int>(); // int32(A) A.cast<int>(); // int32(A)
A.real(); // real(A)
A.imag(); // imag(A)
// if the original type equals destination type, no work is done // if the original type equals destination type, no work is done
// Note that for most operations Eigen requires all operands to have the same type: // Note that for most operations Eigen requires all operands to have the same type:

View File

@ -1,27 +0,0 @@
namespace Eigen {
/** \eigenManualPage LinearLeastSquares Solving linear least squares problems
lede
\eigenAutoToc
\section LinearLeastSquaresCopied Copied
The best way to do least squares solving is with a SVD decomposition. Eigen provides one as the JacobiSVD class, and its solve()
is doing least-squares solving.
Here is an example:
<table class="example">
<tr><th>Example:</th><th>Output:</th></tr>
<tr>
<td>\include TutorialLinAlgSVDSolve.cpp </td>
<td>\verbinclude TutorialLinAlgSVDSolve.out </td>
</tr>
</table>
For more information, including faster but less reliable methods, read our page concentrating on \ref LinearLeastSquares "linear least squares problems".
*/
}

View File

@ -62,6 +62,8 @@ run time. However, these assertions do cost time and can thus be turned off.
expect that any objects passed to it are aligned. This will turn off vectorization. Not defined by default. expect that any objects passed to it are aligned. This will turn off vectorization. Not defined by default.
- \b EIGEN_DONT_ALIGN_STATICALLY - disables alignment of arrays on the stack. Not defined by default, unless - \b EIGEN_DONT_ALIGN_STATICALLY - disables alignment of arrays on the stack. Not defined by default, unless
\c EIGEN_DONT_ALIGN is defined. \c EIGEN_DONT_ALIGN is defined.
- \b EIGEN_DONT_PARALLELIZE - if defined, this disables multi-threading. This is only relevant if you enabled OpenMP.
See \ref TopicMultiThreading for details.
- \b EIGEN_DONT_VECTORIZE - disables explicit vectorization when defined. Not defined by default, unless - \b EIGEN_DONT_VECTORIZE - disables explicit vectorization when defined. Not defined by default, unless
alignment is disabled by %Eigen's platform test or the user defining \c EIGEN_DONT_ALIGN. alignment is disabled by %Eigen's platform test or the user defining \c EIGEN_DONT_ALIGN.
- \b EIGEN_FAST_MATH - enables some optimizations which might affect the accuracy of the result. This currently - \b EIGEN_FAST_MATH - enables some optimizations which might affect the accuracy of the result. This currently
@ -69,7 +71,10 @@ run time. However, these assertions do cost time and can thus be turned off.
Define it to 0 to disable. Define it to 0 to disable.
- \b EIGEN_UNROLLING_LIMIT - defines the size of a loop to enable meta unrolling. Set it to zero to disable - \b EIGEN_UNROLLING_LIMIT - defines the size of a loop to enable meta unrolling. Set it to zero to disable
unrolling. The size of a loop here is expressed in %Eigen's own notion of "number of FLOPS", it does not unrolling. The size of a loop here is expressed in %Eigen's own notion of "number of FLOPS", it does not
correspond to the number of iterations or the number of instructions. The default is value 100. correspond to the number of iterations or the number of instructions. The default is value 100.
- \b EIGEN_STACK_ALLOCATION_LIMIT - defines the maximum bytes for a buffer to be allocated on the stack. For internal
temporary buffers, dynamic memory allocation is employed as a fall back. For fixed-size matrices or arrays, exceeding
this threshold raises a compile time assertion. Use 0 to set no limit. Default is 128 KB.
\section TopicPreprocessorDirectivesPlugins Plugins \section TopicPreprocessorDirectivesPlugins Plugins

View File

@ -253,12 +253,15 @@ SparseMatrix<double> A, B;
B = SparseMatrix<double>(A.transpose()) + A; B = SparseMatrix<double>(A.transpose()) + A;
\endcode \endcode
Binary coefficient wise operators can also mix sparse and dense expressions: Some binary coefficient-wise operators can also mix sparse and dense expressions:
\code \code
sm2 = sm1.cwiseProduct(dm1); sm2 = sm1.cwiseProduct(dm1);
dm2 = sm1 + dm1; dm1 += sm1;
\endcode \endcode
However, it is not yet possible to add a sparse and a dense matrix as in <tt>dm2 = sm1 + dm1</tt>.
Please write this as the equivalent <tt>dm2 = dm1; dm2 += sm1</tt> (we plan to lift this restriction
in the next release of %Eigen).
%Sparse expressions also support transposition: %Sparse expressions also support transposition:
\code \code

View File

@ -10,6 +10,26 @@
#define EIGEN_NO_STATIC_ASSERT // otherwise we fail at compile time on unused paths #define EIGEN_NO_STATIC_ASSERT // otherwise we fail at compile time on unused paths
#include "main.h" #include "main.h"
template<typename MatrixType, typename Index, typename Scalar>
typename Eigen::internal::enable_if<!NumTraits<typename MatrixType::Scalar>::IsComplex,typename MatrixType::Scalar>::type
block_real_only(const MatrixType &m1, Index r1, Index r2, Index c1, Index c2, const Scalar& s1) {
// check cwise-Functions:
VERIFY_IS_APPROX(m1.row(r1).cwiseMax(s1), m1.cwiseMax(s1).row(r1));
VERIFY_IS_APPROX(m1.col(c1).cwiseMin(s1), m1.cwiseMin(s1).col(c1));
VERIFY_IS_APPROX(m1.block(r1,c1,r2-r1+1,c2-c1+1).cwiseMin(s1), m1.cwiseMin(s1).block(r1,c1,r2-r1+1,c2-c1+1));
VERIFY_IS_APPROX(m1.block(r1,c1,r2-r1+1,c2-c1+1).cwiseMax(s1), m1.cwiseMax(s1).block(r1,c1,r2-r1+1,c2-c1+1));
return Scalar(0);
}
template<typename MatrixType, typename Index, typename Scalar>
typename Eigen::internal::enable_if<NumTraits<typename MatrixType::Scalar>::IsComplex,typename MatrixType::Scalar>::type
block_real_only(const MatrixType &, Index, Index, Index, Index, const Scalar&) {
return Scalar(0);
}
template<typename MatrixType> void block(const MatrixType& m) template<typename MatrixType> void block(const MatrixType& m)
{ {
typedef typename MatrixType::Index Index; typedef typename MatrixType::Index Index;
@ -37,6 +57,8 @@ template<typename MatrixType> void block(const MatrixType& m)
Index c1 = internal::random<Index>(0,cols-1); Index c1 = internal::random<Index>(0,cols-1);
Index c2 = internal::random<Index>(c1,cols-1); Index c2 = internal::random<Index>(c1,cols-1);
block_real_only(m1, r1, r2, c1, c1, s1);
//check row() and col() //check row() and col()
VERIFY_IS_EQUAL(m1.col(c1).transpose(), m1.transpose().row(c1)); VERIFY_IS_EQUAL(m1.col(c1).transpose(), m1.transpose().row(c1));
//check operator(), both constant and non-constant, on row() and col() //check operator(), both constant and non-constant, on row() and col()
@ -51,7 +73,8 @@ template<typename MatrixType> void block(const MatrixType& m)
VERIFY_IS_APPROX(m1.col(c1), m1_copy.col(c1) + s1 * m1_copy.col(c2)); VERIFY_IS_APPROX(m1.col(c1), m1_copy.col(c1) + s1 * m1_copy.col(c2));
m1.col(c1).col(0) += s1 * m1_copy.col(c2); m1.col(c1).col(0) += s1 * m1_copy.col(c2);
VERIFY_IS_APPROX(m1.col(c1), m1_copy.col(c1) + Scalar(2) * s1 * m1_copy.col(c2)); VERIFY_IS_APPROX(m1.col(c1), m1_copy.col(c1) + Scalar(2) * s1 * m1_copy.col(c2));
//check block() //check block()
Matrix<Scalar,Dynamic,Dynamic> b1(1,1); b1(0,0) = m1(r1,c1); Matrix<Scalar,Dynamic,Dynamic> b1(1,1); b1(0,0) = m1(r1,c1);

View File

@ -68,6 +68,7 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
Index cols = m.cols(); Index cols = m.cols();
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime> SquareMatrixType; typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime> SquareMatrixType;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType; typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
@ -179,6 +180,57 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
// restore // restore
if(sign == -1) if(sign == -1)
symm = -symm; symm = -symm;
// check matrices coming from linear constraints with Lagrange multipliers
if(rows>=3)
{
SquareMatrixType A = symm;
int c = internal::random<int>(0,rows-2);
A.bottomRightCorner(c,c).setZero();
// Make sure a solution exists:
vecX.setRandom();
vecB = A * vecX;
vecX.setZero();
ldltlo.compute(A);
VERIFY_IS_APPROX(A, ldltlo.reconstructedMatrix());
vecX = ldltlo.solve(vecB);
VERIFY_IS_APPROX(A * vecX, vecB);
}
// check non-full rank matrices
if(rows>=3)
{
int r = internal::random<int>(1,rows-1);
Matrix<Scalar,Dynamic,Dynamic> a = Matrix<Scalar,Dynamic,Dynamic>::Random(rows,r);
SquareMatrixType A = a * a.adjoint();
// Make sure a solution exists:
vecX.setRandom();
vecB = A * vecX;
vecX.setZero();
ldltlo.compute(A);
VERIFY_IS_APPROX(A, ldltlo.reconstructedMatrix());
vecX = ldltlo.solve(vecB);
VERIFY_IS_APPROX(A * vecX, vecB);
}
// check matrices with a wide spectrum
if(rows>=3)
{
RealScalar s = (std::min)(16,std::numeric_limits<RealScalar>::max_exponent10/8);
Matrix<Scalar,Dynamic,Dynamic> a = Matrix<Scalar,Dynamic,Dynamic>::Random(rows,rows);
Matrix<RealScalar,Dynamic,1> d = Matrix<RealScalar,Dynamic,1>::Random(rows);
for(int k=0; k<rows; ++k)
d(k) = d(k)*std::pow(RealScalar(10),internal::random<RealScalar>(-s,s));
SquareMatrixType A = a * d.asDiagonal() * a.adjoint();
// Make sure a solution exists:
vecX.setRandom();
vecB = A * vecX;
vecX.setZero();
ldltlo.compute(A);
VERIFY_IS_APPROX(A, ldltlo.reconstructedMatrix());
vecX = ldltlo.solve(vecB);
VERIFY_IS_APPROX(A * vecX, vecB);
}
} }
// update/downdate // update/downdate

View File

@ -53,7 +53,7 @@ void check_aligned_new()
void check_aligned_stack_alloc() void check_aligned_stack_alloc()
{ {
for(int i = 1; i < 1000; i++) for(int i = 1; i < 400; i++)
{ {
ei_declare_aligned_stack_constructed_variable(float,p,i,0); ei_declare_aligned_stack_constructed_variable(float,p,i,0);
VERIFY(size_t(p)%ALIGNMENT==0); VERIFY(size_t(p)%ALIGNMENT==0);
@ -87,6 +87,32 @@ template<typename T> void check_dynaligned()
delete obj; delete obj;
} }
template<typename T> void check_custom_new_delete()
{
{
T* t = new T;
delete t;
}
{
std::size_t N = internal::random<std::size_t>(1,10);
T* t = new T[N];
delete[] t;
}
#ifdef EIGEN_ALIGN
{
T* t = static_cast<T *>((T::operator new)(sizeof(T)));
(T::operator delete)(t, sizeof(T));
}
{
T* t = static_cast<T *>((T::operator new)(sizeof(T)));
(T::operator delete)(t);
}
#endif
}
void test_dynalloc() void test_dynalloc()
{ {
// low level dynamic memory allocation // low level dynamic memory allocation
@ -102,6 +128,12 @@ void test_dynalloc()
CALL_SUBTEST(check_dynaligned<Matrix4f>() ); CALL_SUBTEST(check_dynaligned<Matrix4f>() );
CALL_SUBTEST(check_dynaligned<Vector4d>() ); CALL_SUBTEST(check_dynaligned<Vector4d>() );
CALL_SUBTEST(check_dynaligned<Vector4i>() ); CALL_SUBTEST(check_dynaligned<Vector4i>() );
CALL_SUBTEST(check_dynaligned<Vector8f>() );
CALL_SUBTEST( check_custom_new_delete<Vector4f>() );
CALL_SUBTEST( check_custom_new_delete<Vector2f>() );
CALL_SUBTEST( check_custom_new_delete<Matrix4f>() );
CALL_SUBTEST( check_custom_new_delete<MatrixXi>() );
} }
// check static allocation, who knows ? // check static allocation, who knows ?

View File

@ -67,6 +67,7 @@ template<typename MatrixType, int QRPreconditioner>
void jacobisvd_solve(const MatrixType& m, unsigned int computationOptions) void jacobisvd_solve(const MatrixType& m, unsigned int computationOptions)
{ {
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index; typedef typename MatrixType::Index Index;
Index rows = m.rows(); Index rows = m.rows();
Index cols = m.cols(); Index cols = m.cols();
@ -81,9 +82,90 @@ void jacobisvd_solve(const MatrixType& m, unsigned int computationOptions)
RhsType rhs = RhsType::Random(rows, internal::random<Index>(1, cols)); RhsType rhs = RhsType::Random(rows, internal::random<Index>(1, cols));
JacobiSVD<MatrixType, QRPreconditioner> svd(m, computationOptions); JacobiSVD<MatrixType, QRPreconditioner> svd(m, computationOptions);
if(internal::is_same<RealScalar,double>::value) svd.setThreshold(1e-8);
else if(internal::is_same<RealScalar,float>::value) svd.setThreshold(1e-4);
SolutionType x = svd.solve(rhs); SolutionType x = svd.solve(rhs);
RealScalar residual = (m*x-rhs).norm();
// Check that there is no significantly better solution in the neighborhood of x
if(!test_isMuchSmallerThan(residual,rhs.norm()))
{
// If the residual is very small, then we have an exact solution, so we are already good.
for(int k=0;k<x.rows();++k)
{
SolutionType y(x);
y.row(k).array() += 2*NumTraits<RealScalar>::epsilon();
RealScalar residual_y = (m*y-rhs).norm();
VERIFY( test_isApprox(residual_y,residual) || residual < residual_y );
y.row(k) = x.row(k).array() - 2*NumTraits<RealScalar>::epsilon();
residual_y = (m*y-rhs).norm();
VERIFY( test_isApprox(residual_y,residual) || residual < residual_y );
}
}
// evaluate normal equation which works also for least-squares solutions // evaluate normal equation which works also for least-squares solutions
VERIFY_IS_APPROX(m.adjoint()*m*x,m.adjoint()*rhs); if(internal::is_same<RealScalar,double>::value)
{
// This test is not stable with single precision.
// This is probably because squaring m signicantly affects the precision.
VERIFY_IS_APPROX(m.adjoint()*m*x,m.adjoint()*rhs);
}
// check minimal norm solutions
{
// generate a full-rank m x n problem with m<n
enum {
RankAtCompileTime2 = ColsAtCompileTime==Dynamic ? Dynamic : (ColsAtCompileTime)/2+1,
RowsAtCompileTime3 = ColsAtCompileTime==Dynamic ? Dynamic : ColsAtCompileTime+1
};
typedef Matrix<Scalar, RankAtCompileTime2, ColsAtCompileTime> MatrixType2;
typedef Matrix<Scalar, RankAtCompileTime2, 1> RhsType2;
typedef Matrix<Scalar, ColsAtCompileTime, RankAtCompileTime2> MatrixType2T;
Index rank = RankAtCompileTime2==Dynamic ? internal::random<Index>(1,cols) : Index(RankAtCompileTime2);
MatrixType2 m2(rank,cols);
int guard = 0;
do {
m2.setRandom();
} while(m2.jacobiSvd().setThreshold(test_precision<Scalar>()).rank()!=rank && (++guard)<10);
VERIFY(guard<10);
RhsType2 rhs2 = RhsType2::Random(rank);
// use QR to find a reference minimal norm solution
HouseholderQR<MatrixType2T> qr(m2.adjoint());
Matrix<Scalar,Dynamic,1> tmp = qr.matrixQR().topLeftCorner(rank,rank).template triangularView<Upper>().adjoint().solve(rhs2);
tmp.conservativeResize(cols);
tmp.tail(cols-rank).setZero();
SolutionType x21 = qr.householderQ() * tmp;
// now check with SVD
JacobiSVD<MatrixType2, ColPivHouseholderQRPreconditioner> svd2(m2, computationOptions);
SolutionType x22 = svd2.solve(rhs2);
VERIFY_IS_APPROX(m2*x21, rhs2);
VERIFY_IS_APPROX(m2*x22, rhs2);
VERIFY_IS_APPROX(x21, x22);
// Now check with a rank deficient matrix
typedef Matrix<Scalar, RowsAtCompileTime3, ColsAtCompileTime> MatrixType3;
typedef Matrix<Scalar, RowsAtCompileTime3, 1> RhsType3;
Index rows3 = RowsAtCompileTime3==Dynamic ? internal::random<Index>(rank+1,2*cols) : Index(RowsAtCompileTime3);
Matrix<Scalar,RowsAtCompileTime3,Dynamic> C = Matrix<Scalar,RowsAtCompileTime3,Dynamic>::Random(rows3,rank);
MatrixType3 m3 = C * m2;
RhsType3 rhs3 = C * rhs2;
JacobiSVD<MatrixType3, ColPivHouseholderQRPreconditioner> svd3(m3, computationOptions);
SolutionType x3 = svd3.solve(rhs3);
if(svd3.rank()!=rank) {
std::cout << m3 << "\n\n";
std::cout << svd3.singularValues().transpose() << "\n";
std::cout << svd3.rank() << " == " << rank << "\n";
std::cout << x21.norm() << " == " << x3.norm() << "\n";
}
// VERIFY_IS_APPROX(m3*x3, rhs3);
VERIFY_IS_APPROX(m3*x21, rhs3);
VERIFY_IS_APPROX(m2*x3, rhs2);
VERIFY_IS_APPROX(x21, x3);
}
} }
template<typename MatrixType, int QRPreconditioner> template<typename MatrixType, int QRPreconditioner>
@ -92,10 +174,9 @@ void jacobisvd_test_all_computation_options(const MatrixType& m)
if (QRPreconditioner == NoQRPreconditioner && m.rows() != m.cols()) if (QRPreconditioner == NoQRPreconditioner && m.rows() != m.cols())
return; return;
JacobiSVD<MatrixType, QRPreconditioner> fullSvd(m, ComputeFullU|ComputeFullV); JacobiSVD<MatrixType, QRPreconditioner> fullSvd(m, ComputeFullU|ComputeFullV);
CALL_SUBTEST(( jacobisvd_check_full(m, fullSvd) ));
jacobisvd_check_full(m, fullSvd); CALL_SUBTEST(( jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeFullU | ComputeFullV) ));
jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeFullU | ComputeFullV);
#if defined __INTEL_COMPILER #if defined __INTEL_COMPILER
// remark #111: statement is unreachable // remark #111: statement is unreachable
#pragma warning disable 111 #pragma warning disable 111
@ -103,20 +184,20 @@ void jacobisvd_test_all_computation_options(const MatrixType& m)
if(QRPreconditioner == FullPivHouseholderQRPreconditioner) if(QRPreconditioner == FullPivHouseholderQRPreconditioner)
return; return;
jacobisvd_compare_to_full(m, ComputeFullU, fullSvd); CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeFullU, fullSvd) ));
jacobisvd_compare_to_full(m, ComputeFullV, fullSvd); CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeFullV, fullSvd) ));
jacobisvd_compare_to_full(m, 0, fullSvd); CALL_SUBTEST(( jacobisvd_compare_to_full(m, 0, fullSvd) ));
if (MatrixType::ColsAtCompileTime == Dynamic) { if (MatrixType::ColsAtCompileTime == Dynamic) {
// thin U/V are only available with dynamic number of columns // thin U/V are only available with dynamic number of columns
jacobisvd_compare_to_full(m, ComputeFullU|ComputeThinV, fullSvd); CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeFullU|ComputeThinV, fullSvd) ));
jacobisvd_compare_to_full(m, ComputeThinV, fullSvd); CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeThinV, fullSvd) ));
jacobisvd_compare_to_full(m, ComputeThinU|ComputeFullV, fullSvd); CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeThinU|ComputeFullV, fullSvd) ));
jacobisvd_compare_to_full(m, ComputeThinU , fullSvd); CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeThinU , fullSvd) ));
jacobisvd_compare_to_full(m, ComputeThinU|ComputeThinV, fullSvd); CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeThinU|ComputeThinV, fullSvd) ));
jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeFullU | ComputeThinV); CALL_SUBTEST(( jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeFullU | ComputeThinV) ));
jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeThinU | ComputeFullV); CALL_SUBTEST(( jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeThinU | ComputeFullV) ));
jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeThinU | ComputeThinV); CALL_SUBTEST(( jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeThinU | ComputeThinV) ));
// test reconstruction // test reconstruction
typedef typename MatrixType::Index Index; typedef typename MatrixType::Index Index;
@ -129,12 +210,29 @@ void jacobisvd_test_all_computation_options(const MatrixType& m)
template<typename MatrixType> template<typename MatrixType>
void jacobisvd(const MatrixType& a = MatrixType(), bool pickrandom = true) void jacobisvd(const MatrixType& a = MatrixType(), bool pickrandom = true)
{ {
MatrixType m = pickrandom ? MatrixType::Random(a.rows(), a.cols()) : a; MatrixType m = a;
if(pickrandom)
{
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
Index diagSize = (std::min)(a.rows(), a.cols());
RealScalar s = std::numeric_limits<RealScalar>::max_exponent10/4;
s = internal::random<RealScalar>(1,s);
Matrix<RealScalar,Dynamic,1> d = Matrix<RealScalar,Dynamic,1>::Random(diagSize);
for(Index k=0; k<diagSize; ++k)
d(k) = d(k)*std::pow(RealScalar(10),internal::random<RealScalar>(-s,s));
m = Matrix<Scalar,Dynamic,Dynamic>::Random(a.rows(),diagSize) * d.asDiagonal() * Matrix<Scalar,Dynamic,Dynamic>::Random(diagSize,a.cols());
// cancel some coeffs
Index n = internal::random<Index>(0,m.size()-1);
for(Index i=0; i<n; ++i)
m(internal::random<Index>(0,m.rows()-1), internal::random<Index>(0,m.cols()-1)) = Scalar(0);
}
jacobisvd_test_all_computation_options<MatrixType, FullPivHouseholderQRPreconditioner>(m); CALL_SUBTEST(( jacobisvd_test_all_computation_options<MatrixType, FullPivHouseholderQRPreconditioner>(m) ));
jacobisvd_test_all_computation_options<MatrixType, ColPivHouseholderQRPreconditioner>(m); CALL_SUBTEST(( jacobisvd_test_all_computation_options<MatrixType, ColPivHouseholderQRPreconditioner>(m) ));
jacobisvd_test_all_computation_options<MatrixType, HouseholderQRPreconditioner>(m); CALL_SUBTEST(( jacobisvd_test_all_computation_options<MatrixType, HouseholderQRPreconditioner>(m) ));
jacobisvd_test_all_computation_options<MatrixType, NoQRPreconditioner>(m); CALL_SUBTEST(( jacobisvd_test_all_computation_options<MatrixType, NoQRPreconditioner>(m) ));
} }
template<typename MatrixType> void jacobisvd_verify_assert(const MatrixType& m) template<typename MatrixType> void jacobisvd_verify_assert(const MatrixType& m)
@ -328,6 +426,7 @@ void test_jacobisvd()
TEST_SET_BUT_UNUSED_VARIABLE(r) TEST_SET_BUT_UNUSED_VARIABLE(r)
TEST_SET_BUT_UNUSED_VARIABLE(c) TEST_SET_BUT_UNUSED_VARIABLE(c)
CALL_SUBTEST_10(( jacobisvd<MatrixXd>(MatrixXd(r,c)) ));
CALL_SUBTEST_7(( jacobisvd<MatrixXf>(MatrixXf(r,c)) )); CALL_SUBTEST_7(( jacobisvd<MatrixXf>(MatrixXf(r,c)) ));
CALL_SUBTEST_8(( jacobisvd<MatrixXcd>(MatrixXcd(r,c)) )); CALL_SUBTEST_8(( jacobisvd<MatrixXcd>(MatrixXcd(r,c)) ));
(void) r; (void) r;

View File

@ -154,59 +154,79 @@ template<typename PlainObjectType> void check_const_correctness(const PlainObjec
VERIFY( !(Ref<ConstPlainObjectType, Aligned>::Flags & LvalueBit) ); VERIFY( !(Ref<ConstPlainObjectType, Aligned>::Flags & LvalueBit) );
} }
EIGEN_DONT_INLINE void call_ref_1(Ref<VectorXf> ) { } template<typename B>
EIGEN_DONT_INLINE void call_ref_2(const Ref<const VectorXf>& ) { } EIGEN_DONT_INLINE void call_ref_1(Ref<VectorXf> a, const B &b) { VERIFY_IS_EQUAL(a,b); }
EIGEN_DONT_INLINE void call_ref_3(Ref<VectorXf,0,InnerStride<> > ) { } template<typename B>
EIGEN_DONT_INLINE void call_ref_4(const Ref<const VectorXf,0,InnerStride<> >& ) { } EIGEN_DONT_INLINE void call_ref_2(const Ref<const VectorXf>& a, const B &b) { VERIFY_IS_EQUAL(a,b); }
EIGEN_DONT_INLINE void call_ref_5(Ref<MatrixXf,0,OuterStride<> > ) { } template<typename B>
EIGEN_DONT_INLINE void call_ref_6(const Ref<const MatrixXf,0,OuterStride<> >& ) { } EIGEN_DONT_INLINE void call_ref_3(Ref<VectorXf,0,InnerStride<> > a, const B &b) { VERIFY_IS_EQUAL(a,b); }
template<typename B>
EIGEN_DONT_INLINE void call_ref_4(const Ref<const VectorXf,0,InnerStride<> >& a, const B &b) { VERIFY_IS_EQUAL(a,b); }
template<typename B>
EIGEN_DONT_INLINE void call_ref_5(Ref<MatrixXf,0,OuterStride<> > a, const B &b) { VERIFY_IS_EQUAL(a,b); }
template<typename B>
EIGEN_DONT_INLINE void call_ref_6(const Ref<const MatrixXf,0,OuterStride<> >& a, const B &b) { VERIFY_IS_EQUAL(a,b); }
template<typename B>
EIGEN_DONT_INLINE void call_ref_7(Ref<Matrix<float,Dynamic,3> > a, const B &b) { VERIFY_IS_EQUAL(a,b); }
void call_ref() void call_ref()
{ {
VectorXcf ca(10); VectorXcf ca = VectorXcf::Random(10);
VectorXf a(10); VectorXf a = VectorXf::Random(10);
RowVectorXf b = RowVectorXf::Random(10);
MatrixXf A = MatrixXf::Random(10,10);
RowVector3f c = RowVector3f::Random();
const VectorXf& ac(a); const VectorXf& ac(a);
VectorBlock<VectorXf> ab(a,0,3); VectorBlock<VectorXf> ab(a,0,3);
MatrixXf A(10,10);
const VectorBlock<VectorXf> abc(a,0,3); const VectorBlock<VectorXf> abc(a,0,3);
VERIFY_EVALUATION_COUNT( call_ref_1(a), 0); VERIFY_EVALUATION_COUNT( call_ref_1(a,a), 0);
//call_ref_1(ac); // does not compile because ac is const VERIFY_EVALUATION_COUNT( call_ref_1(b,b.transpose()), 0);
VERIFY_EVALUATION_COUNT( call_ref_1(ab), 0); // call_ref_1(ac); // does not compile because ac is const
VERIFY_EVALUATION_COUNT( call_ref_1(a.head(4)), 0); VERIFY_EVALUATION_COUNT( call_ref_1(ab,ab), 0);
VERIFY_EVALUATION_COUNT( call_ref_1(abc), 0); VERIFY_EVALUATION_COUNT( call_ref_1(a.head(4),a.head(4)), 0);
VERIFY_EVALUATION_COUNT( call_ref_1(A.col(3)), 0); VERIFY_EVALUATION_COUNT( call_ref_1(abc,abc), 0);
// call_ref_1(A.row(3)); // does not compile because innerstride!=1 VERIFY_EVALUATION_COUNT( call_ref_1(A.col(3),A.col(3)), 0);
VERIFY_EVALUATION_COUNT( call_ref_3(A.row(3)), 0); // call_ref_1(A.row(3)); // does not compile because innerstride!=1
VERIFY_EVALUATION_COUNT( call_ref_4(A.row(3)), 0); VERIFY_EVALUATION_COUNT( call_ref_3(A.row(3),A.row(3).transpose()), 0);
//call_ref_1(a+a); // does not compile for obvious reason VERIFY_EVALUATION_COUNT( call_ref_4(A.row(3),A.row(3).transpose()), 0);
// call_ref_1(a+a); // does not compile for obvious reason
VERIFY_EVALUATION_COUNT( call_ref_2(A*A.col(1)), 1); // evaluated into a temp MatrixXf tmp = A*A.col(1);
VERIFY_EVALUATION_COUNT( call_ref_2(ac.head(5)), 0); VERIFY_EVALUATION_COUNT( call_ref_2(A*A.col(1), tmp), 1); // evaluated into a temp
VERIFY_EVALUATION_COUNT( call_ref_2(ac), 0); VERIFY_EVALUATION_COUNT( call_ref_2(ac.head(5),ac.head(5)), 0);
VERIFY_EVALUATION_COUNT( call_ref_2(a), 0); VERIFY_EVALUATION_COUNT( call_ref_2(ac,ac), 0);
VERIFY_EVALUATION_COUNT( call_ref_2(ab), 0); VERIFY_EVALUATION_COUNT( call_ref_2(a,a), 0);
VERIFY_EVALUATION_COUNT( call_ref_2(a.head(4)), 0); VERIFY_EVALUATION_COUNT( call_ref_2(ab,ab), 0);
VERIFY_EVALUATION_COUNT( call_ref_2(a+a), 1); // evaluated into a temp VERIFY_EVALUATION_COUNT( call_ref_2(a.head(4),a.head(4)), 0);
VERIFY_EVALUATION_COUNT( call_ref_2(ca.imag()), 1); // evaluated into a temp tmp = a+a;
VERIFY_EVALUATION_COUNT( call_ref_2(a+a,tmp), 1); // evaluated into a temp
VERIFY_EVALUATION_COUNT( call_ref_2(ca.imag(),ca.imag()), 1); // evaluated into a temp
VERIFY_EVALUATION_COUNT( call_ref_4(ac.head(5)), 0); VERIFY_EVALUATION_COUNT( call_ref_4(ac.head(5),ac.head(5)), 0);
VERIFY_EVALUATION_COUNT( call_ref_4(a+a), 1); // evaluated into a temp tmp = a+a;
VERIFY_EVALUATION_COUNT( call_ref_4(ca.imag()), 0); VERIFY_EVALUATION_COUNT( call_ref_4(a+a,tmp), 1); // evaluated into a temp
VERIFY_EVALUATION_COUNT( call_ref_4(ca.imag(),ca.imag()), 0);
VERIFY_EVALUATION_COUNT( call_ref_5(a), 0); VERIFY_EVALUATION_COUNT( call_ref_5(a,a), 0);
VERIFY_EVALUATION_COUNT( call_ref_5(a.head(3)), 0); VERIFY_EVALUATION_COUNT( call_ref_5(a.head(3),a.head(3)), 0);
VERIFY_EVALUATION_COUNT( call_ref_5(A), 0); VERIFY_EVALUATION_COUNT( call_ref_5(A,A), 0);
// call_ref_5(A.transpose()); // does not compile // call_ref_5(A.transpose()); // does not compile
VERIFY_EVALUATION_COUNT( call_ref_5(A.block(1,1,2,2)), 0); VERIFY_EVALUATION_COUNT( call_ref_5(A.block(1,1,2,2),A.block(1,1,2,2)), 0);
VERIFY_EVALUATION_COUNT( call_ref_5(b,b), 0); // storage order do not match, but this is a degenerate case that should work
VERIFY_EVALUATION_COUNT( call_ref_5(a.row(3),a.row(3)), 0);
VERIFY_EVALUATION_COUNT( call_ref_6(a), 0); VERIFY_EVALUATION_COUNT( call_ref_6(a,a), 0);
VERIFY_EVALUATION_COUNT( call_ref_6(a.head(3)), 0); VERIFY_EVALUATION_COUNT( call_ref_6(a.head(3),a.head(3)), 0);
VERIFY_EVALUATION_COUNT( call_ref_6(A.row(3)), 1); // evaluated into a temp thouth it could be avoided by viewing it as a 1xn matrix VERIFY_EVALUATION_COUNT( call_ref_6(A.row(3),A.row(3)), 1); // evaluated into a temp thouth it could be avoided by viewing it as a 1xn matrix
VERIFY_EVALUATION_COUNT( call_ref_6(A+A), 1); // evaluated into a temp tmp = A+A;
VERIFY_EVALUATION_COUNT( call_ref_6(A), 0); VERIFY_EVALUATION_COUNT( call_ref_6(A+A,tmp), 1); // evaluated into a temp
VERIFY_EVALUATION_COUNT( call_ref_6(A.transpose()), 1); // evaluated into a temp because the storage orders do not match VERIFY_EVALUATION_COUNT( call_ref_6(A,A), 0);
VERIFY_EVALUATION_COUNT( call_ref_6(A.block(1,1,2,2)), 0); VERIFY_EVALUATION_COUNT( call_ref_6(A.transpose(),A.transpose()), 1); // evaluated into a temp because the storage orders do not match
VERIFY_EVALUATION_COUNT( call_ref_6(A.block(1,1,2,2),A.block(1,1,2,2)), 0);
VERIFY_EVALUATION_COUNT( call_ref_7(c,c), 0);
} }
void test_ref() void test_ref()

View File

@ -11,26 +11,31 @@
template<typename T> void test_simplicial_cholesky_T() template<typename T> void test_simplicial_cholesky_T()
{ {
SimplicialCholesky<SparseMatrix<T>, Lower> chol_colmajor_lower; SimplicialCholesky<SparseMatrix<T>, Lower> chol_colmajor_lower_amd;
SimplicialCholesky<SparseMatrix<T>, Upper> chol_colmajor_upper; SimplicialCholesky<SparseMatrix<T>, Upper> chol_colmajor_upper_amd;
SimplicialLLT<SparseMatrix<T>, Lower> llt_colmajor_lower; SimplicialLLT<SparseMatrix<T>, Lower> llt_colmajor_lower_amd;
SimplicialLDLT<SparseMatrix<T>, Upper> llt_colmajor_upper; SimplicialLLT<SparseMatrix<T>, Upper> llt_colmajor_upper_amd;
SimplicialLDLT<SparseMatrix<T>, Lower> ldlt_colmajor_lower; SimplicialLDLT<SparseMatrix<T>, Lower> ldlt_colmajor_lower_amd;
SimplicialLDLT<SparseMatrix<T>, Upper> ldlt_colmajor_upper; SimplicialLDLT<SparseMatrix<T>, Upper> ldlt_colmajor_upper_amd;
SimplicialLDLT<SparseMatrix<T>, Lower, NaturalOrdering<int> > ldlt_colmajor_lower_nat;
SimplicialLDLT<SparseMatrix<T>, Upper, NaturalOrdering<int> > ldlt_colmajor_upper_nat;
check_sparse_spd_solving(chol_colmajor_lower); check_sparse_spd_solving(chol_colmajor_lower_amd);
check_sparse_spd_solving(chol_colmajor_upper); check_sparse_spd_solving(chol_colmajor_upper_amd);
check_sparse_spd_solving(llt_colmajor_lower); check_sparse_spd_solving(llt_colmajor_lower_amd);
check_sparse_spd_solving(llt_colmajor_upper); check_sparse_spd_solving(llt_colmajor_upper_amd);
check_sparse_spd_solving(ldlt_colmajor_lower); check_sparse_spd_solving(ldlt_colmajor_lower_amd);
check_sparse_spd_solving(ldlt_colmajor_upper); check_sparse_spd_solving(ldlt_colmajor_upper_amd);
check_sparse_spd_determinant(chol_colmajor_lower); check_sparse_spd_determinant(chol_colmajor_lower_amd);
check_sparse_spd_determinant(chol_colmajor_upper); check_sparse_spd_determinant(chol_colmajor_upper_amd);
check_sparse_spd_determinant(llt_colmajor_lower); check_sparse_spd_determinant(llt_colmajor_lower_amd);
check_sparse_spd_determinant(llt_colmajor_upper); check_sparse_spd_determinant(llt_colmajor_upper_amd);
check_sparse_spd_determinant(ldlt_colmajor_lower); check_sparse_spd_determinant(ldlt_colmajor_lower_amd);
check_sparse_spd_determinant(ldlt_colmajor_upper); check_sparse_spd_determinant(ldlt_colmajor_upper_amd);
check_sparse_spd_solving(ldlt_colmajor_lower_nat);
check_sparse_spd_solving(ldlt_colmajor_upper_nat);
} }
void test_simplicial_cholesky() void test_simplicial_cholesky()

View File

@ -2,24 +2,24 @@
// for linear algebra. // for linear algebra.
// //
// Copyright (C) 2012 Desire Nuentsa Wakam <desire.nuentsa_wakam@inria.fr> // Copyright (C) 2012 Desire Nuentsa Wakam <desire.nuentsa_wakam@inria.fr>
// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>
// //
// This Source Code Form is subject to the terms of the Mozilla // This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed // Public License v. 2.0. If a copy of the MPL was not distributed
#include "sparse.h" #include "sparse.h"
#include <Eigen/SparseQR> #include <Eigen/SparseQR>
template<typename MatrixType,typename DenseMat> template<typename MatrixType,typename DenseMat>
int generate_sparse_rectangular_problem(MatrixType& A, DenseMat& dA, int maxRows = 300, int maxCols = 300) int generate_sparse_rectangular_problem(MatrixType& A, DenseMat& dA, int maxRows = 300, int maxCols = 150)
{ {
eigen_assert(maxRows >= maxCols); eigen_assert(maxRows >= maxCols);
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
int rows = internal::random<int>(1,maxRows); int rows = internal::random<int>(1,maxRows);
int cols = internal::random<int>(1,rows); int cols = internal::random<int>(1,maxCols);
double density = (std::max)(8./(rows*cols), 0.01); double density = (std::max)(8./(rows*cols), 0.01);
A.resize(rows,rows); A.resize(rows,cols);
dA.resize(rows,rows); dA.resize(rows,cols);
initSparse<Scalar>(density, dA, A,ForceNonZeroDiag); initSparse<Scalar>(density, dA, A,ForceNonZeroDiag);
A.makeCompressed(); A.makeCompressed();
int nop = internal::random<int>(0, internal::random<double>(0,1) > 0.5 ? cols/2 : 0); int nop = internal::random<int>(0, internal::random<double>(0,1) > 0.5 ? cols/2 : 0);
@ -31,6 +31,13 @@ int generate_sparse_rectangular_problem(MatrixType& A, DenseMat& dA, int maxRows
A.col(j0) = s * A.col(j1); A.col(j0) = s * A.col(j1);
dA.col(j0) = s * dA.col(j1); dA.col(j0) = s * dA.col(j1);
} }
// if(rows<cols) {
// A.conservativeResize(cols,cols);
// dA.conservativeResize(cols,cols);
// dA.bottomRows(cols-rows).setZero();
// }
return rows; return rows;
} }
@ -42,11 +49,10 @@ template<typename Scalar> void test_sparseqr_scalar()
MatrixType A; MatrixType A;
DenseMat dA; DenseMat dA;
DenseVector refX,x,b; DenseVector refX,x,b;
SparseQR<MatrixType, AMDOrdering<int> > solver; SparseQR<MatrixType, COLAMDOrdering<int> > solver;
generate_sparse_rectangular_problem(A,dA); generate_sparse_rectangular_problem(A,dA);
int n = A.cols(); b = dA * DenseVector::Random(A.cols());
b = DenseVector::Random(n);
solver.compute(A); solver.compute(A);
if (solver.info() != Success) if (solver.info() != Success)
{ {
@ -60,17 +66,19 @@ template<typename Scalar> void test_sparseqr_scalar()
std::cerr << "sparse QR factorization failed\n"; std::cerr << "sparse QR factorization failed\n";
exit(0); exit(0);
return; return;
} }
VERIFY_IS_APPROX(A * x, b);
//Compare with a dense QR solver //Compare with a dense QR solver
ColPivHouseholderQR<DenseMat> dqr(dA); ColPivHouseholderQR<DenseMat> dqr(dA);
refX = dqr.solve(b); refX = dqr.solve(b);
VERIFY_IS_EQUAL(dqr.rank(), solver.rank()); VERIFY_IS_EQUAL(dqr.rank(), solver.rank());
if(solver.rank()==A.cols()) // full rank
if(solver.rank()<A.cols())
VERIFY((dA * refX - b).norm() * 2 > (A * x - b).norm() );
else
VERIFY_IS_APPROX(x, refX); VERIFY_IS_APPROX(x, refX);
// else
// VERIFY((dA * refX - b).norm() * 2 > (A * x - b).norm() );
// Compute explicitly the matrix Q // Compute explicitly the matrix Q
MatrixType Q, QtQ, idM; MatrixType Q, QtQ, idM;
@ -88,3 +96,4 @@ void test_sparseqr()
CALL_SUBTEST_2(test_sparseqr_scalar<std::complex<double> >()); CALL_SUBTEST_2(test_sparseqr_scalar<std::complex<double> >());
} }
} }

View File

@ -2,7 +2,7 @@
// for linear algebra. // for linear algebra.
// //
// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2012 Kolja Brix <brix@igpm.rwth-aaachen.de> // Copyright (C) 2012, 2014 Kolja Brix <brix@igpm.rwth-aaachen.de>
// //
// This Source Code Form is subject to the terms of the Mozilla // This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed // Public License v. 2.0. If a copy of the MPL was not distributed
@ -72,16 +72,20 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition
VectorType p0 = rhs - mat*x; VectorType p0 = rhs - mat*x;
VectorType r0 = precond.solve(p0); VectorType r0 = precond.solve(p0);
// RealScalar r0_sqnorm = r0.squaredNorm();
// is initial guess already good enough?
if(abs(r0.norm()) < tol) {
return true;
}
VectorType w = VectorType::Zero(restart + 1); VectorType w = VectorType::Zero(restart + 1);
FMatrixType H = FMatrixType::Zero(m, restart + 1); FMatrixType H = FMatrixType::Zero(m, restart + 1); // Hessenberg matrix
VectorType tau = VectorType::Zero(restart + 1); VectorType tau = VectorType::Zero(restart + 1);
std::vector < JacobiRotation < Scalar > > G(restart); std::vector < JacobiRotation < Scalar > > G(restart);
// generate first Householder vector // generate first Householder vector
VectorType e; VectorType e(m-1);
RealScalar beta; RealScalar beta;
r0.makeHouseholder(e, tau.coeffRef(0), beta); r0.makeHouseholder(e, tau.coeffRef(0), beta);
w(0)=(Scalar) beta; w(0)=(Scalar) beta;

View File

@ -127,46 +127,47 @@ template<typename Func> void forward_jacobian(const Func& f)
VERIFY_IS_APPROX(j, jref); VERIFY_IS_APPROX(j, jref);
} }
// TODO also check actual derivatives!
void test_autodiff_scalar() void test_autodiff_scalar()
{ {
std::cerr << foo<float>(1,2) << "\n"; Vector2f p = Vector2f::Random();
typedef AutoDiffScalar<Vector2f> AD; typedef AutoDiffScalar<Vector2f> AD;
AD ax(1,Vector2f::UnitX()); AD ax(p.x(),Vector2f::UnitX());
AD ay(2,Vector2f::UnitY()); AD ay(p.y(),Vector2f::UnitY());
AD res = foo<AD>(ax,ay); AD res = foo<AD>(ax,ay);
std::cerr << res.value() << " <> " VERIFY_IS_APPROX(res.value(), foo(p.x(),p.y()));
<< res.derivatives().transpose() << "\n\n";
} }
// TODO also check actual derivatives!
void test_autodiff_vector() void test_autodiff_vector()
{ {
std::cerr << foo<Vector2f>(Vector2f(1,2)) << "\n"; Vector2f p = Vector2f::Random();
typedef AutoDiffScalar<Vector2f> AD; typedef AutoDiffScalar<Vector2f> AD;
typedef Matrix<AD,2,1> VectorAD; typedef Matrix<AD,2,1> VectorAD;
VectorAD p(AD(1),AD(-1)); VectorAD ap = p.cast<AD>();
p.x().derivatives() = Vector2f::UnitX(); ap.x().derivatives() = Vector2f::UnitX();
p.y().derivatives() = Vector2f::UnitY(); ap.y().derivatives() = Vector2f::UnitY();
AD res = foo<VectorAD>(p); AD res = foo<VectorAD>(ap);
std::cerr << res.value() << " <> " VERIFY_IS_APPROX(res.value(), foo(p));
<< res.derivatives().transpose() << "\n\n";
} }
void test_autodiff_jacobian() void test_autodiff_jacobian()
{ {
for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST(( forward_jacobian(TestFunc1<double,2,2>()) ));
CALL_SUBTEST(( forward_jacobian(TestFunc1<double,2,2>()) )); CALL_SUBTEST(( forward_jacobian(TestFunc1<double,2,3>()) ));
CALL_SUBTEST(( forward_jacobian(TestFunc1<double,2,3>()) )); CALL_SUBTEST(( forward_jacobian(TestFunc1<double,3,2>()) ));
CALL_SUBTEST(( forward_jacobian(TestFunc1<double,3,2>()) )); CALL_SUBTEST(( forward_jacobian(TestFunc1<double,3,3>()) ));
CALL_SUBTEST(( forward_jacobian(TestFunc1<double,3,3>()) )); CALL_SUBTEST(( forward_jacobian(TestFunc1<double>(3,3)) ));
CALL_SUBTEST(( forward_jacobian(TestFunc1<double>(3,3)) ));
}
} }
void test_autodiff() void test_autodiff()
{ {
test_autodiff_scalar(); for(int i = 0; i < g_repeat; i++) {
test_autodiff_vector(); CALL_SUBTEST_1( test_autodiff_scalar() );
// test_autodiff_jacobian(); CALL_SUBTEST_2( test_autodiff_vector() );
CALL_SUBTEST_3( test_autodiff_jacobian() );
}
} }

View File

@ -17,9 +17,17 @@
#pragma once #pragma once
#ifndef MKL_BLAS
#define MKL_BLAS MKL_DOMAIN_BLAS
#endif
#cmakedefine EIGEN_USE_MKL_ALL // This is also defined in config.h #cmakedefine EIGEN_USE_MKL_ALL // This is also defined in config.h
#include <@GTSAM_EIGEN_INCLUDE_PREFIX@Eigen/Dense> #include <@GTSAM_EIGEN_INCLUDE_PREFIX@Eigen/Dense>
#include <@GTSAM_EIGEN_INCLUDE_PREFIX@Eigen/QR> #include <@GTSAM_EIGEN_INCLUDE_PREFIX@Eigen/QR>
#include <@GTSAM_EIGEN_INCLUDE_PREFIX@Eigen/LU> #include <@GTSAM_EIGEN_INCLUDE_PREFIX@Eigen/LU>
#include <@GTSAM_EIGEN_INCLUDE_PREFIX@Eigen/SVD> #include <@GTSAM_EIGEN_INCLUDE_PREFIX@Eigen/SVD>
#include <@GTSAM_EIGEN_INCLUDE_PREFIX@Eigen/Geometry> #include <@GTSAM_EIGEN_INCLUDE_PREFIX@Eigen/Geometry>

View File

@ -9,7 +9,9 @@ if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
endif() endif()
endif() endif()
add_definitions(-Wno-unknown-pragmas) if(NOT ("${CMAKE_C_COMPILER_ID}" MATCHES "MSVC" OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "MSVC"))
#add_definitions(-Wno-unknown-pragmas)
endif()
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.6 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 4.6) if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.6 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 4.6)

View File

@ -59,9 +59,10 @@ typedef ptrdiff_t ssize_t;
#endif #endif
#ifdef __MSC__ #ifdef __MSC__
#if(_MSC_VER < 1700)
/* MSC does not have rint() function */ /* MSC does not have rint() function */
#define rint(x) ((int)((x)+0.5)) #define rint(x) ((int)((x)+0.5))
#endif
/* MSC does not have INFINITY defined */ /* MSC does not have INFINITY defined */
#ifndef INFINITY #ifndef INFINITY
#define INFINITY FLT_MAX #define INFINITY FLT_MAX

221
gtsam/base/ChartValue.h Normal file
View File

@ -0,0 +1,221 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/*
* @file ChartValue.h
* @brief
* @date October, 2014
* @author Michael Bosse, Abel Gawel, Renaud Dube
* based on DerivedValue.h by Duy Nguyen Ta
*/
#pragma once
#include <gtsam/base/GenericValue.h>
#include <gtsam/base/Manifold.h>
#include <boost/make_shared.hpp>
//////////////////
// The following includes windows.h in some MSVC versions, so we undef min, max, and ERROR
#include <boost/pool/singleton_pool.hpp>
#ifdef min
#undef min
#endif
#ifdef max
#undef max
#endif
#ifdef ERROR
#undef ERROR
#endif
//////////////////
namespace gtsam {
/**
* ChartValue is derived from GenericValue<T> and Chart so that
* Chart can be zero sized (as in DefaultChart<T>)
* if the Chart is a member variable then it won't ever be zero sized.
*/
template<class T, class Chart_ = DefaultChart<T> >
class ChartValue: public GenericValue<T>, public Chart_ {
BOOST_CONCEPT_ASSERT((ChartConcept<Chart_>));
public:
typedef T type;
typedef Chart_ Chart;
public:
/// Default Constructor. TODO might not make sense for some types
ChartValue() :
GenericValue<T>(T()) {
}
/// Construct froma value
ChartValue(const T& value) :
GenericValue<T>(value) {
}
/// Construct from a value and initialize the chart
template<typename C>
ChartValue(const T& value, C chart_initializer) :
GenericValue<T>(value), Chart(chart_initializer) {
}
/// Destructor
virtual ~ChartValue() {
}
/**
* Create a duplicate object returned as a pointer to the generic Value interface.
* For the sake of performance, this function use singleton pool allocator instead of the normal heap allocator.
* The result must be deleted with Value::deallocate_, not with the 'delete' operator.
*/
virtual Value* clone_() const {
void *place = boost::singleton_pool<PoolTag, sizeof(ChartValue)>::malloc();
ChartValue* ptr = new (place) ChartValue(*this); // calls copy constructor to fill in
return ptr;
}
/**
* Destroy and deallocate this object, only if it was originally allocated using clone_().
*/
virtual void deallocate_() const {
this->~ChartValue(); // Virtual destructor cleans up the derived object
boost::singleton_pool<PoolTag, sizeof(ChartValue)>::free((void*) this); // Release memory from pool
}
/**
* Clone this value (normal clone on the heap, delete with 'delete' operator)
*/
virtual boost::shared_ptr<Value> clone() const {
return boost::make_shared<ChartValue>(*this);
}
/// Chart Value interface version of retract
virtual Value* retract_(const Vector& delta) const {
// Call retract on the derived class using the retract trait function
const T retractResult = Chart::retract(GenericValue<T>::value(), delta);
// Create a Value pointer copy of the result
void* resultAsValuePlace =
boost::singleton_pool<PoolTag, sizeof(ChartValue)>::malloc();
Value* resultAsValue = new (resultAsValuePlace) ChartValue(retractResult,
static_cast<const Chart&>(*this));
// Return the pointer to the Value base class
return resultAsValue;
}
/// Generic Value interface version of localCoordinates
virtual Vector localCoordinates_(const Value& value2) const {
// Cast the base class Value pointer to a templated generic class pointer
const GenericValue<T>& genericValue2 =
static_cast<const GenericValue<T>&>(value2);
// Return the result of calling localCoordinates trait on the derived class
return Chart::local(GenericValue<T>::value(), genericValue2.value());
}
/// Non-virtual version of retract
ChartValue retract(const Vector& delta) const {
return ChartValue(Chart::retract(GenericValue<T>::value(), delta),
static_cast<const Chart&>(*this));
}
/// Non-virtual version of localCoordinates
Vector localCoordinates(const ChartValue& value2) const {
return localCoordinates_(value2);
}
/// Return run-time dimensionality
virtual size_t dim() const {
// need functional form here since the dimension may be dynamic
return Chart::getDimension(GenericValue<T>::value());
}
/// Assignment operator
virtual Value& operator=(const Value& rhs) {
// Cast the base class Value pointer to a derived class pointer
const ChartValue& derivedRhs = static_cast<const ChartValue&>(rhs);
// Do the assignment and return the result
*this = ChartValue(derivedRhs); // calls copy constructor
return *this;
}
protected:
// implicit assignment operator for (const ChartValue& rhs) works fine here
/// Assignment operator, protected because only the Value or DERIVED
/// assignment operators should be used.
// DerivedValue<DERIVED>& operator=(const DerivedValue<DERIVED>& rhs) {
// // Nothing to do, do not call base class assignment operator
// return *this;
// }
private:
/// Fake Tag struct for singleton pool allocator. In fact, it is never used!
struct PoolTag {
};
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int version) {
// ar & boost::serialization::make_nvp("value",);
// todo: implement a serialization for charts
ar
& boost::serialization::make_nvp("GenericValue",
boost::serialization::base_object<GenericValue<T> >(*this));
}
};
// Define
namespace traits {
/// The dimension of a ChartValue is the dimension of the chart
template<typename T, typename Chart>
struct dimension<ChartValue<T, Chart> > : public dimension<Chart> {
// TODO Frank thinks dimension is a property of type, chart should conform
};
} // \ traits
/// Get the chart from a Value
template<typename Chart>
const Chart& Value::getChart() const {
return dynamic_cast<const Chart&>(*this);
}
/// Convenience function that can be used to make an expression to convert a value to a chart
template<typename T>
ChartValue<T> convertToChartValue(const T& value,
boost::optional<
Eigen::Matrix<double, traits::dimension<T>::value,
traits::dimension<T>::value>&> H = boost::none) {
if (H) {
*H = Eigen::Matrix<double, traits::dimension<T>::value,
traits::dimension<T>::value>::Identity();
}
return ChartValue<T>(value);
}
} /* namespace gtsam */

View File

@ -16,8 +16,9 @@
*/ */
#pragma once #pragma once
#include <boost/make_shared.hpp>
#include <gtsam/base/Value.h> #include <gtsam/base/Value.h>
#include <boost/make_shared.hpp>
////////////////// //////////////////
// The following includes windows.h in some MSVC versions, so we undef min, max, and ERROR // The following includes windows.h in some MSVC versions, so we undef min, max, and ERROR

168
gtsam/base/GenericValue.h Normal file
View File

@ -0,0 +1,168 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/*
* @file GenericValue.h
* @brief Wraps any type T so it can play as a Value
* @date October, 2014
* @author Michael Bosse, Abel Gawel, Renaud Dube
* based on DerivedValue.h by Duy Nguyen Ta
*/
#pragma once
#include <gtsam/base/Matrix.h>
#include <gtsam/base/Value.h>
#include <cmath>
#include <iostream>
namespace gtsam {
// To play as a GenericValue, we need the following traits
namespace traits {
// trait to wrap the default equals of types
template<typename T>
struct equals {
typedef T type;
typedef bool result_type;
bool operator()(const T& a, const T& b, double tol) {
return a.equals(b, tol);
}
};
// trait to wrap the default print of types
template<typename T>
struct print {
typedef T type;
typedef void result_type;
void operator()(const T& obj, const std::string& str) {
obj.print(str);
}
};
// equals for scalars
template<>
struct equals<double> {
typedef double type;
typedef bool result_type;
bool operator()(double a, double b, double tol) {
return std::abs(a - b) <= tol;
}
};
// print for scalars
template<>
struct print<double> {
typedef double type;
typedef void result_type;
void operator()(double a, const std::string& str) {
std::cout << str << ": " << a << std::endl;
}
};
// equals for Matrix types
template<int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
struct equals<Eigen::Matrix<double, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > {
typedef Eigen::Matrix<double, _Rows, _Cols, _Options, _MaxRows, _MaxCols> type;
typedef bool result_type;
bool operator()(const type& A, const type& B, double tol) {
return equal_with_abs_tol(A, B, tol);
}
};
// print for Matrix types
template<int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
struct print<Eigen::Matrix<double, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > {
typedef Eigen::Matrix<double, _Rows, _Cols, _Options, _MaxRows, _MaxCols> type;
typedef void result_type;
void operator()(const type& A, const std::string& str) {
std::cout << str << ": " << A << std::endl;
}
};
}
/**
* Wraps any type T so it can play as a Value
*/
template<class T>
class GenericValue: public Value {
public:
typedef T type;
protected:
T value_; ///< The wrapped value
public:
/// Construct from value
GenericValue(const T& value) :
value_(value) {
}
/// Return a constant value
const T& value() const {
return value_;
}
/// Return the value
T& value() {
return value_;
}
/// Destructor
virtual ~GenericValue() {
}
/// equals implementing generic Value interface
virtual bool equals_(const Value& p, double tol = 1e-9) const {
// Cast the base class Value pointer to a templated generic class pointer
const GenericValue& genericValue2 = static_cast<const GenericValue&>(p);
// Return the result of using the equals traits for the derived class
return traits::equals<T>()(this->value_, genericValue2.value_, tol);
}
/// non virtual equals function, uses traits
bool equals(const GenericValue &other, double tol = 1e-9) const {
return traits::equals<T>()(this->value(), other.value(), tol);
}
/// Virtual print function, uses traits
virtual void print(const std::string& str) const {
traits::print<T>()(value_, str);
}
// Serialization below:
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int version) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Value);
ar & BOOST_SERIALIZATION_NVP(value_);
}
protected:
// Assignment operator for this class not needed since GenericValue<T> is an abstract class
};
// define Value::cast here since now GenericValue has been declared
template<typename ValueType>
const ValueType& Value::cast() const {
return dynamic_cast<const GenericValue<ValueType>&>(*this).value();
}
} /* namespace gtsam */

View File

@ -19,9 +19,9 @@
#include <cstdarg> #include <cstdarg>
#include <gtsam/base/DerivedValue.h>
#include <gtsam/base/Lie.h> #include <gtsam/base/Lie.h>
#include <gtsam/base/Matrix.h> #include <gtsam/base/Matrix.h>
#include <gtsam/base/DerivedValue.h>
#include <boost/serialization/nvp.hpp> #include <boost/serialization/nvp.hpp>
namespace gtsam { namespace gtsam {
@ -29,7 +29,7 @@ namespace gtsam {
/** /**
* LieVector is a wrapper around vector to allow it to be a Lie type * LieVector is a wrapper around vector to allow it to be a Lie type
*/ */
struct LieMatrix : public Matrix, public DerivedValue<LieMatrix> { struct LieMatrix : public Matrix {
/// @name Constructors /// @name Constructors
/// @{ /// @{
@ -40,9 +40,12 @@ struct LieMatrix : public Matrix, public DerivedValue<LieMatrix> {
/** initialize from a normal matrix */ /** initialize from a normal matrix */
LieMatrix(const Matrix& v) : Matrix(v) {} LieMatrix(const Matrix& v) : Matrix(v) {}
// Currently TMP constructor causes ICE on MSVS 2013
#if (_MSC_VER < 1800)
/** initialize from a fixed size normal vector */ /** initialize from a fixed size normal vector */
template<int M, int N> template<int M, int N>
LieMatrix(const Eigen::Matrix<double, M, N>& v) : Matrix(v) {} LieMatrix(const Eigen::Matrix<double, M, N>& v) : Matrix(v) {}
#endif
/** constructor with size and initial data, row order ! */ /** constructor with size and initial data, row order ! */
LieMatrix(size_t m, size_t n, const double* const data) : LieMatrix(size_t m, size_t n, const double* const data) :
@ -82,6 +85,7 @@ struct LieMatrix : public Matrix, public DerivedValue<LieMatrix> {
inline LieMatrix retract(const Vector& v) const { inline LieMatrix retract(const Vector& v) const {
if(v.size() != this->size()) if(v.size() != this->size())
throw std::invalid_argument("LieMatrix::retract called with Vector of incorrect size"); throw std::invalid_argument("LieMatrix::retract called with Vector of incorrect size");
return LieMatrix(*this + return LieMatrix(*this +
Eigen::Map<const Eigen::Matrix<double,Eigen::Dynamic,Eigen::Dynamic,Eigen::RowMajor> >( Eigen::Map<const Eigen::Matrix<double,Eigen::Dynamic,Eigen::Dynamic,Eigen::RowMajor> >(
&v(0), this->rows(), this->cols())); &v(0), this->rows(), this->cols()));
@ -153,7 +157,7 @@ struct LieMatrix : public Matrix, public DerivedValue<LieMatrix> {
result.data(), p.rows(), p.cols()) = p; result.data(), p.rows(), p.cols()) = p;
return result; return result;
} }
/// @} /// @}
private: private:
@ -162,12 +166,24 @@ private:
friend class boost::serialization::access; friend class boost::serialization::access;
template<class Archive> template<class Archive>
void serialize(Archive & ar, const unsigned int version) { void serialize(Archive & ar, const unsigned int version) {
ar & boost::serialization::make_nvp("LieMatrix",
boost::serialization::base_object<Value>(*this));
ar & boost::serialization::make_nvp("Matrix", ar & boost::serialization::make_nvp("Matrix",
boost::serialization::base_object<Matrix>(*this)); boost::serialization::base_object<Matrix>(*this));
} }
}; };
// Define GTSAM traits
namespace traits {
template<>
struct is_manifold<LieMatrix> : public boost::true_type {
};
template<>
struct dimension<LieMatrix> : public Dynamic {
};
}
} // \namespace gtsam } // \namespace gtsam

View File

@ -26,7 +26,7 @@ namespace gtsam {
/** /**
* LieScalar is a wrapper around double to allow it to be a Lie type * LieScalar is a wrapper around double to allow it to be a Lie type
*/ */
struct GTSAM_EXPORT LieScalar : public DerivedValue<LieScalar> { struct GTSAM_EXPORT LieScalar {
/** default constructor */ /** default constructor */
LieScalar() : d_(0.0) {} LieScalar() : d_(0.0) {}
@ -111,4 +111,22 @@ namespace gtsam {
private: private:
double d_; double d_;
}; };
// Define GTSAM traits
namespace traits {
template<>
struct is_group<LieScalar> : public boost::true_type {
};
template<>
struct is_manifold<LieScalar> : public boost::true_type {
};
template<>
struct dimension<LieScalar> : public boost::integral_constant<int, 1> {
};
}
} // \namespace gtsam } // \namespace gtsam

View File

@ -26,17 +26,20 @@ namespace gtsam {
/** /**
* LieVector is a wrapper around vector to allow it to be a Lie type * LieVector is a wrapper around vector to allow it to be a Lie type
*/ */
struct LieVector : public Vector, public DerivedValue<LieVector> { struct LieVector : public Vector {
/** default constructor - should be unnecessary */ /** default constructor - should be unnecessary */
LieVector() {} LieVector() {}
/** initialize from a normal vector */ /** initialize from a normal vector */
LieVector(const Vector& v) : Vector(v) {} LieVector(const Vector& v) : Vector(v) {}
// Currently TMP constructor causes ICE on MSVS 2013
#if (_MSC_VER < 1800)
/** initialize from a fixed size normal vector */ /** initialize from a fixed size normal vector */
template<int N> template<int N>
LieVector(const Eigen::Matrix<double, N, 1>& v) : Vector(v) {} LieVector(const Eigen::Matrix<double, N, 1>& v) : Vector(v) {}
#endif
/** wrap a double */ /** wrap a double */
LieVector(double d) : Vector((Vector(1) << d)) {} LieVector(double d) : Vector((Vector(1) << d)) {}
@ -120,11 +123,22 @@ private:
friend class boost::serialization::access; friend class boost::serialization::access;
template<class Archive> template<class Archive>
void serialize(Archive & ar, const unsigned int version) { void serialize(Archive & ar, const unsigned int version) {
ar & boost::serialization::make_nvp("LieVector",
boost::serialization::base_object<Value>(*this));
ar & boost::serialization::make_nvp("Vector", ar & boost::serialization::make_nvp("Vector",
boost::serialization::base_object<Vector>(*this)); boost::serialization::base_object<Vector>(*this));
} }
}; };
// Define GTSAM traits
namespace traits {
template<>
struct is_manifold<LieVector> : public boost::true_type {
};
template<>
struct dimension<LieVector> : public Dynamic {
};
}
} // \namespace gtsam } // \namespace gtsam

View File

@ -13,25 +13,19 @@
* @file Manifold.h * @file Manifold.h
* @brief Base class and basic functions for Manifold types * @brief Base class and basic functions for Manifold types
* @author Alex Cunningham * @author Alex Cunningham
* @author Frank Dellaert
*/ */
#pragma once #pragma once
#include <string>
#include <gtsam/base/Matrix.h> #include <gtsam/base/Matrix.h>
#include <boost/static_assert.hpp>
#include <boost/type_traits.hpp>
#include <string>
namespace gtsam { namespace gtsam {
/** /**
* Concept check class for Manifold types
* Requires a mapping between a linear tangent space and the underlying
* manifold, of which Lie is a specialization.
*
* The necessary functions to implement for Manifold are defined
* below with additional details as to the interface. The
* concept checking function in class Manifold will check whether or not
* the function exists and throw compile-time errors.
*
* A manifold defines a space in which there is a notion of a linear tangent space * A manifold defines a space in which there is a notion of a linear tangent space
* that can be centered around a given point on the manifold. These nonlinear * that can be centered around a given point on the manifold. These nonlinear
* spaces may have such properties as wrapping around (as is the case with rotations), * spaces may have such properties as wrapping around (as is the case with rotations),
@ -45,7 +39,256 @@ namespace gtsam {
* There may be multiple possible retractions for a given manifold, which can be chosen * There may be multiple possible retractions for a given manifold, which can be chosen
* between depending on the computational complexity. The important criteria for * between depending on the computational complexity. The important criteria for
* the creation for the retract and localCoordinates functions is that they be * the creation for the retract and localCoordinates functions is that they be
* inverse operations. * inverse operations. The new notion of a Chart guarantees that.
*
*/
// Traits, same style as Boost.TypeTraits
// All meta-functions below ever only declare a single type
// or a type/value/value_type
namespace traits {
// is group, by default this is false
template<typename T>
struct is_group: public boost::false_type {
};
// identity, no default provided, by default given by default constructor
template<typename T>
struct identity {
static T value() {
return T();
}
};
// is manifold, by default this is false
template<typename T>
struct is_manifold: public boost::false_type {
};
// dimension, can return Eigen::Dynamic (-1) if not known at compile time
// defaults to dynamic, TODO makes sense ?
typedef boost::integral_constant<int, Eigen::Dynamic> Dynamic;
template<typename T>
struct dimension: public Dynamic {
};
/**
* zero<T>::value is intended to be the origin of a canonical coordinate system
* with canonical(t) == DefaultChart<T>::local(zero<T>::value, t)
* Below we provide the group identity as zero *in case* it is a group
*/
template<typename T> struct zero: public identity<T> {
BOOST_STATIC_ASSERT(is_group<T>::value);
};
// double
template<>
struct is_group<double> : public boost::true_type {
};
template<>
struct is_manifold<double> : public boost::true_type {
};
template<>
struct dimension<double> : public boost::integral_constant<int, 1> {
};
template<>
struct zero<double> {
static double value() {
return 0;
}
};
// Fixed size Eigen::Matrix type
template<int M, int N, int Options>
struct is_group<Eigen::Matrix<double, M, N, Options> > : public boost::true_type {
};
template<int M, int N, int Options>
struct is_manifold<Eigen::Matrix<double, M, N, Options> > : public boost::true_type {
};
template<int M, int N, int Options>
struct dimension<Eigen::Matrix<double, M, N, Options> > : public boost::integral_constant<int,
M == Eigen::Dynamic ? Eigen::Dynamic : (N == Eigen::Dynamic ? Eigen::Dynamic : M * N)> {
//TODO after switch to c++11 : the above should should be extracted to a constexpr function
// for readability and to reduce code duplication
};
template<int M, int N, int Options>
struct zero<Eigen::Matrix<double, M, N, Options> > {
BOOST_STATIC_ASSERT_MSG((M!=Eigen::Dynamic && N!=Eigen::Dynamic),
"traits::zero is only supported for fixed-size matrices");
static Eigen::Matrix<double, M, N, Options> value() {
return Eigen::Matrix<double, M, N, Options>::Zero();
}
};
template<int M, int N, int Options>
struct identity<Eigen::Matrix<double, M, N, Options> > : public zero<Eigen::Matrix<double, M, N, Options> > {
};
template<typename T> struct is_chart: public boost::false_type {
};
} // \ namespace traits
// Chart is a map from T -> vector, retract is its inverse
template<typename T>
struct DefaultChart {
//BOOST_STATIC_ASSERT(traits::is_manifold<T>::value);
typedef T type;
typedef Eigen::Matrix<double, traits::dimension<T>::value, 1> vector;
static vector local(const T& origin, const T& other) {
return origin.localCoordinates(other);
}
static T retract(const T& origin, const vector& d) {
return origin.retract(d);
}
static int getDimension(const T& origin) {
return origin.dim();
}
};
namespace traits {
// populate default traits
template<typename T> struct is_chart<DefaultChart<T> > : public boost::true_type {
};
template<typename T> struct dimension<DefaultChart<T> > : public dimension<T> {
};
}
template<class C>
struct ChartConcept {
public:
typedef typename C::type type;
typedef typename C::vector vector;
BOOST_CONCEPT_USAGE(ChartConcept) {
// is_chart trait should be true
BOOST_STATIC_ASSERT((traits::is_chart<C>::value));
/**
* Returns Retraction update of val_
*/
type retract_ret = C::retract(val_, vec_);
/**
* Returns local coordinates of another object
*/
vec_ = C::local(val_, retract_ret);
// a way to get the dimension that is compatible with dynamically sized types
dim_ = C::getDimension(val_);
}
private:
type val_;
vector vec_;
int dim_;
};
/**
* CanonicalChart<Chart<T> > is a chart around zero<T>::value
* Canonical<T> is CanonicalChart<DefaultChart<T> >
* An example is Canonical<Rot3>
*/
template<typename C> struct CanonicalChart {
BOOST_CONCEPT_ASSERT((ChartConcept<C>));
typedef C Chart;
typedef typename Chart::type type;
typedef typename Chart::vector vector;
// Convert t of type T into canonical coordinates
vector local(const type& t) {
return Chart::local(traits::zero<type>::value(), t);
}
// Convert back from canonical coordinates to T
type retract(const vector& v) {
return Chart::retract(traits::zero<type>::value(), v);
}
};
template<typename T> struct Canonical: public CanonicalChart<DefaultChart<T> > {
};
// double
template<>
struct DefaultChart<double> {
typedef double type;
typedef Eigen::Matrix<double, 1, 1> vector;
static vector local(double origin, double other) {
vector d;
d << other - origin;
return d;
}
static double retract(double origin, const vector& d) {
return origin + d[0];
}
static const int getDimension(double) {
return 1;
}
};
// Fixed size Eigen::Matrix type
template<int M, int N, int Options>
struct DefaultChart<Eigen::Matrix<double, M, N, Options> > {
/**
* This chart for the vector space of M x N matrices (represented by Eigen matrices) chooses as basis the one with respect to which the coordinates are exactly the matrix entries as laid out in memory (as determined by Options).
* Computing coordinates for a matrix is then simply a reshape to the row vector of appropriate size.
*/
typedef Eigen::Matrix<double, M, N, Options> type;
typedef type T;
typedef Eigen::Matrix<double, traits::dimension<T>::value, 1> vector;BOOST_STATIC_ASSERT_MSG((M!=Eigen::Dynamic && N!=Eigen::Dynamic),
"DefaultChart has not been implemented yet for dynamically sized matrices");
static vector local(const T& origin, const T& other) {
return reshape<vector::RowsAtCompileTime, 1, vector::Options>(other) - reshape<vector::RowsAtCompileTime, 1, vector::Options>(origin);
}
static T retract(const T& origin, const vector& d) {
return origin + reshape<M, N, Options>(d);
}
static int getDimension(const T&origin) {
return origin.rows() * origin.cols();
}
};
// Dynamically sized Vector
template<>
struct DefaultChart<Vector> {
typedef Vector T;
typedef T type;
typedef T vector;
static vector local(const T& origin, const T& other) {
return other - origin;
}
static T retract(const T& origin, const vector& d) {
return origin + d;
}
static int getDimension(const T& origin) {
return origin.size();
}
};
/**
* Old Concept check class for Manifold types
* Requires a mapping between a linear tangent space and the underlying
* manifold, of which Lie is a specialization.
*
* The necessary functions to implement for Manifold are defined
* below with additional details as to the interface. The
* concept checking function in class Manifold will check whether or not
* the function exists and throw compile-time errors.
* *
* Returns dimensionality of the tangent space, which may be smaller than the number * Returns dimensionality of the tangent space, which may be smaller than the number
* of nonlinear parameters. * of nonlinear parameters.
@ -61,7 +304,7 @@ namespace gtsam {
* By convention, we use capital letters to designate a static function * By convention, we use capital letters to designate a static function
* @tparam T is a Lie type, like Point2, Pose3, etc. * @tparam T is a Lie type, like Point2, Pose3, etc.
*/ */
template <class T> template<class T>
class ManifoldConcept { class ManifoldConcept {
private: private:
/** concept checking function - implement the functions this demands */ /** concept checking function - implement the functions this demands */
@ -89,7 +332,7 @@ private:
} }
}; };
} // namespace gtsam } // \ namespace gtsam
/** /**
* Macros for using the ManifoldConcept * Macros for using the ManifoldConcept

View File

@ -543,8 +543,7 @@ Matrix collect(size_t nrMatrices, ...)
void vector_scale_inplace(const Vector& v, Matrix& A, bool inf_mask) { void vector_scale_inplace(const Vector& v, Matrix& A, bool inf_mask) {
const DenseIndex m = A.rows(); const DenseIndex m = A.rows();
if (inf_mask) { if (inf_mask) {
// only scale the first v.size() rows of A to support augmented Matrix for (DenseIndex i=0; i<m; ++i) {
for (DenseIndex i=0; i<v.size(); ++i) {
const double& vi = v(i); const double& vi = v(i);
if (std::isfinite(vi)) if (std::isfinite(vi))
A.row(i) *= vi; A.row(i) *= vi;
@ -668,7 +667,7 @@ Matrix expm(const Matrix& A, size_t K) {
/* ************************************************************************* */ /* ************************************************************************* */
Matrix Cayley(const Matrix& A) { Matrix Cayley(const Matrix& A) {
size_t n = A.cols(); Matrix::Index n = A.cols();
assert(A.rows() == n); assert(A.rows() == n);
// original // original

View File

@ -37,10 +37,28 @@ namespace gtsam {
typedef Eigen::MatrixXd Matrix; typedef Eigen::MatrixXd Matrix;
typedef Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> MatrixRowMajor; typedef Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> MatrixRowMajor;
typedef Eigen::Matrix2d Matrix2;
typedef Eigen::Matrix3d Matrix3; typedef Eigen::Matrix3d Matrix3;
typedef Eigen::Matrix4d Matrix4; typedef Eigen::Matrix4d Matrix4;
typedef Eigen::Matrix<double,5,5> Matrix5;
typedef Eigen::Matrix<double,6,6> Matrix6; typedef Eigen::Matrix<double,6,6> Matrix6;
typedef Eigen::Matrix<double,2,3> Matrix23;
typedef Eigen::Matrix<double,2,4> Matrix24;
typedef Eigen::Matrix<double,2,5> Matrix25;
typedef Eigen::Matrix<double,2,6> Matrix26;
typedef Eigen::Matrix<double,2,7> Matrix27;
typedef Eigen::Matrix<double,2,8> Matrix28;
typedef Eigen::Matrix<double,2,9> Matrix29;
typedef Eigen::Matrix<double,3,2> Matrix32;
typedef Eigen::Matrix<double,3,4> Matrix34;
typedef Eigen::Matrix<double,3,5> Matrix35;
typedef Eigen::Matrix<double,3,6> Matrix36;
typedef Eigen::Matrix<double,3,7> Matrix37;
typedef Eigen::Matrix<double,3,8> Matrix38;
typedef Eigen::Matrix<double,3,9> Matrix39;
// Matrix expressions for accessing parts of matrices // Matrix expressions for accessing parts of matrices
typedef Eigen::Block<Matrix> SubMatrix; typedef Eigen::Block<Matrix> SubMatrix;
typedef Eigen::Block<const Matrix> ConstSubMatrix; typedef Eigen::Block<const Matrix> ConstSubMatrix;
@ -275,6 +293,49 @@ void zeroBelowDiagonal(MATRIX& A, size_t cols=0) {
*/ */
inline Matrix trans(const Matrix& A) { return A.transpose(); } inline Matrix trans(const Matrix& A) { return A.transpose(); }
/// Reshape functor
template <int OutM, int OutN, int OutOptions, int InM, int InN, int InOptions>
struct Reshape {
//TODO replace this with Eigen's reshape function as soon as available. (There is a PR already pending : https://bitbucket.org/eigen/eigen/pull-request/41/reshape/diff)
typedef Eigen::Map<const Eigen::Matrix<double, OutM, OutN, OutOptions> > ReshapedType;
static inline ReshapedType reshape(const Eigen::Matrix<double, InM, InN, InOptions> & in) {
return in.data();
}
};
/// Reshape specialization that does nothing as shape stays the same (needed to not be ambiguous for square input equals square output)
template <int M, int InOptions>
struct Reshape<M, M, InOptions, M, M, InOptions> {
typedef const Eigen::Matrix<double, M, M, InOptions> & ReshapedType;
static inline ReshapedType reshape(const Eigen::Matrix<double, M, M, InOptions> & in) {
return in;
}
};
/// Reshape specialization that does nothing as shape stays the same
template <int M, int N, int InOptions>
struct Reshape<M, N, InOptions, M, N, InOptions> {
typedef const Eigen::Matrix<double, M, N, InOptions> & ReshapedType;
static inline ReshapedType reshape(const Eigen::Matrix<double, M, N, InOptions> & in) {
return in;
}
};
/// Reshape specialization that does transpose
template <int M, int N, int InOptions>
struct Reshape<N, M, InOptions, M, N, InOptions> {
typedef typename Eigen::Matrix<double, M, N, InOptions>::ConstTransposeReturnType ReshapedType;
static inline ReshapedType reshape(const Eigen::Matrix<double, M, N, InOptions> & in) {
return in.transpose();
}
};
template <int OutM, int OutN, int OutOptions, int InM, int InN, int InOptions>
inline typename Reshape<OutM, OutN, OutOptions, InM, InN, InOptions>::ReshapedType reshape(const Eigen::Matrix<double, InM, InN, InOptions> & m){
BOOST_STATIC_ASSERT(InM * InN == OutM * OutN);
return Reshape<OutM, OutN, OutOptions, InM, InN, InOptions>::reshape(m);
}
/** /**
* solve AX=B via in-place Lu factorization and backsubstitution * solve AX=B via in-place Lu factorization and backsubstitution
* After calling, A contains LU, B the solved RHS vectors * After calling, A contains LU, B the solved RHS vectors
@ -398,7 +459,6 @@ GTSAM_EXPORT Matrix collect(size_t nrMatrices, ...);
* Arguments (Matrix, Vector) scales the columns, * Arguments (Matrix, Vector) scales the columns,
* (Vector, Matrix) scales the rows * (Vector, Matrix) scales the rows
* @param inf_mask when true, will not scale with a NaN or inf value. * @param inf_mask when true, will not scale with a NaN or inf value.
* The inplace version also allows v.size()<A.rows() and only scales the first v.size() rows of A.
*/ */
GTSAM_EXPORT void vector_scale_inplace(const Vector& v, Matrix& A, bool inf_mask = false); // row GTSAM_EXPORT void vector_scale_inplace(const Vector& v, Matrix& A, bool inf_mask = false); // row
GTSAM_EXPORT Matrix vector_scale(const Vector& v, const Matrix& A, bool inf_mask = false); // row GTSAM_EXPORT Matrix vector_scale(const Vector& v, const Matrix& A, bool inf_mask = false); // row
@ -467,7 +527,7 @@ GTSAM_EXPORT Matrix Cayley(const Matrix& A);
/// Implementation of Cayley transform using fixed size matrices to let /// Implementation of Cayley transform using fixed size matrices to let
/// Eigen do more optimization /// Eigen do more optimization
template<int N> template<int N>
Eigen::Matrix<double, N, N> Cayley(const Eigen::Matrix<double, N, N>& A) { Eigen::Matrix<double, N, N> CayleyFixed(const Eigen::Matrix<double, N, N>& A) {
typedef Eigen::Matrix<double, N, N> FMat; typedef Eigen::Matrix<double, N, N> FMat;
return (FMat::Identity() - A)*(FMat::Identity() + A).inverse(); return (FMat::Identity() - A)*(FMat::Identity() + A).inverse();
} }

View File

@ -36,18 +36,19 @@ namespace gtsam {
* Values can operate generically on Value objects, retracting or computing * Values can operate generically on Value objects, retracting or computing
* local coordinates for many Value objects of different types. * local coordinates for many Value objects of different types.
* *
* When you implement retract_(), localCoordinates_(), and equals_(), we * Inheriting from the DerivedValue class templated provides a generic implementation of
* suggest first implementing versions of these functions that work directly * the pure virtual functions retract_(), localCoordinates_(), and equals_(), eliminating
* with derived objects, then using the provided helper functions to * the need to implement these functions in your class. Note that you must inherit from
* implement the generic Value versions. This makes your implementation * DerivedValue templated on the class you are defining. For example you cannot define
* easier, and also improves performance in situations where the derived type * the following
* is in fact known, such as in most implementations of \c evaluateError() in * \code
* classes derived from NonlinearFactor. * class Rot3 : public DerivedValue<Point3>{ \\classdef }
* \endcode
* *
* Using the above practice, here is an example of implementing a typical * Using the above practice, here is an example of implementing a typical
* class derived from Value: * class derived from Value:
* \code * \code
class Rot3 : public Value { class GTSAM_EXPORT Rot3 : public DerivedValue<Rot3> {
public: public:
// Constructor, there is never a need to call the Value base class constructor. // Constructor, there is never a need to call the Value base class constructor.
Rot3() { ... } Rot3() { ... }
@ -74,27 +75,6 @@ namespace gtsam {
// Math to implement 3D rotation localCoordinates, e.g. logarithm map // Math to implement 3D rotation localCoordinates, e.g. logarithm map
return Vector(result); return Vector(result);
} }
// Equals implementing the generic Value interface (virtual, implements Value::equals_())
virtual bool equals_(const Value& other, double tol = 1e-9) const {
// Call our provided helper function to call your Rot3-specific
// equals with appropriate casting.
return CallDerivedEquals(this, other, tol);
}
// retract implementing the generic Value interface (virtual, implements Value::retract_())
virtual std::auto_ptr<Value> retract_(const Vector& delta) const {
// Call our provided helper function to call your Rot3-specific
// retract and do the appropriate casting and allocation.
return CallDerivedRetract(this, delta);
}
// localCoordinates implementing the generic Value interface (virtual, implements Value::localCoordinates_())
virtual Vector localCoordinates_(const Value& value) const {
// Call our provided helper function to call your Rot3-specific
// localCoordinates and do the appropriate casting.
return CallDerivedLocalCoordinates(this, value);
}
}; };
\endcode \endcode
*/ */
@ -140,7 +120,17 @@ namespace gtsam {
virtual Vector localCoordinates_(const Value& value) const = 0; virtual Vector localCoordinates_(const Value& value) const = 0;
/** Assignment operator */ /** Assignment operator */
virtual Value& operator=(const Value& rhs) = 0; virtual Value& operator=(const Value& rhs) {
//needs a empty definition so recursion in implicit derived assignment operators work
return *this;
}
/** Cast to known ValueType */
template<typename ValueType>
const ValueType& cast() const;
template<typename Chart>
const Chart& getChart() const;
/** Virutal destructor */ /** Virutal destructor */
virtual ~Value() {} virtual ~Value() {}

View File

@ -30,55 +30,11 @@
#include <gtsam/base/Vector.h> #include <gtsam/base/Vector.h>
//#ifdef WIN32
//#include <Windows.h>
//#endif
using namespace std; using namespace std;
namespace gtsam { namespace gtsam {
/* ************************************************************************* */
void odprintf_(const char *format, ostream& stream, ...) {
char buf[4096], *p = buf;
va_list args;
va_start(args, stream);
#ifdef WIN32
_vsnprintf(p, sizeof buf - 3, format, args); // buf-3 is room for CR/LF/NUL
#else
vsnprintf(p, sizeof buf - 3, format, args); // buf-3 is room for CR/LF/NUL
#endif
va_end(args);
//#ifdef WIN32
// OutputDebugString(buf);
//#else
stream << buf;
//#endif
}
/* ************************************************************************* */
void odprintf(const char *format, ...) {
char buf[4096], *p = buf;
va_list args;
va_start(args, format);
#ifdef WIN32
_vsnprintf(p, sizeof buf - 3, format, args); // buf-3 is room for CR/LF/NUL
#else
vsnprintf(p, sizeof buf - 3, format, args); // buf-3 is room for CR/LF/NUL
#endif
va_end(args);
//#ifdef WIN32
// OutputDebugString(buf);
//#else
cout << buf;
//#endif
}
/* ************************************************************************* */ /* ************************************************************************* */
bool zero(const Vector& v) { bool zero(const Vector& v) {
bool result = true; bool result = true;
@ -101,10 +57,12 @@ Vector delta(size_t n, size_t i, double value) {
/* ************************************************************************* */ /* ************************************************************************* */
void print(const Vector& v, const string& s, ostream& stream) { void print(const Vector& v, const string& s, ostream& stream) {
size_t n = v.size(); size_t n = v.size();
odprintf_("%s [", stream, s.c_str());
for(size_t i=0; i<n; i++) stream << s << "[";
odprintf_("%g%s", stream, v[i], (i<n-1 ? "; " : "")); for(size_t i=0; i<n; i++) {
odprintf_("];\n", stream); stream << setprecision(9) << v(i) << (i<n-1 ? "; " : "");
}
stream << "];" << endl;
} }
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -36,16 +36,16 @@ typedef Eigen::VectorXd Vector;
// Commonly used fixed size vectors // Commonly used fixed size vectors
typedef Eigen::Vector2d Vector2; typedef Eigen::Vector2d Vector2;
typedef Eigen::Vector3d Vector3; typedef Eigen::Vector3d Vector3;
typedef Eigen::Matrix<double, 4, 1> Vector4;
typedef Eigen::Matrix<double, 5, 1> Vector5;
typedef Eigen::Matrix<double, 6, 1> Vector6; typedef Eigen::Matrix<double, 6, 1> Vector6;
typedef Eigen::Matrix<double, 7, 1> Vector7;
typedef Eigen::Matrix<double, 8, 1> Vector8;
typedef Eigen::Matrix<double, 9, 1> Vector9;
typedef Eigen::VectorBlock<Vector> SubVector; typedef Eigen::VectorBlock<Vector> SubVector;
typedef Eigen::VectorBlock<const Vector> ConstSubVector; typedef Eigen::VectorBlock<const Vector> ConstSubVector;
/**
* An auxiliary function to printf for Win32 compatibility, added by Kai
*/
GTSAM_EXPORT void odprintf(const char *format, ...);
/** /**
* Create vector initialized to a constant value * Create vector initialized to a constant value
* @param n is the size of the vector * @param n is the size of the vector

View File

@ -65,36 +65,39 @@ namespace gtsam {
/** Construct from a container of the sizes of each vertical block. */ /** Construct from a container of the sizes of each vertical block. */
template<typename CONTAINER> template<typename CONTAINER>
VerticalBlockMatrix(const CONTAINER& dimensions, DenseIndex height, bool appendOneDimension = false) : VerticalBlockMatrix(const CONTAINER& dimensions, DenseIndex height,
rowStart_(0), rowEnd_(height), blockStart_(0) bool appendOneDimension = false) :
{ variableColOffsets_(dimensions.size() + (appendOneDimension ? 2 : 1)),
rowStart_(0), rowEnd_(height), blockStart_(0) {
fillOffsets(dimensions.begin(), dimensions.end(), appendOneDimension); fillOffsets(dimensions.begin(), dimensions.end(), appendOneDimension);
matrix_.resize(height, variableColOffsets_.back()); matrix_.resize(height, variableColOffsets_.back());
assertInvariants(); assertInvariants();
} }
/** Construct from a container of the sizes of each vertical block and a pre-prepared matrix. */ /** Construct from a container of the sizes of each vertical block and a pre-prepared matrix. */
template<typename CONTAINER> template<typename CONTAINER, typename DERIVED>
VerticalBlockMatrix(const CONTAINER& dimensions, const Matrix& matrix, bool appendOneDimension = false) : VerticalBlockMatrix(const CONTAINER& dimensions,
matrix_(matrix), rowStart_(0), rowEnd_(matrix.rows()), blockStart_(0) const Eigen::MatrixBase<DERIVED>& matrix, bool appendOneDimension = false) :
{ matrix_(matrix), variableColOffsets_(dimensions.size() + (appendOneDimension ? 2 : 1)),
rowStart_(0), rowEnd_(matrix.rows()), blockStart_(0) {
fillOffsets(dimensions.begin(), dimensions.end(), appendOneDimension); fillOffsets(dimensions.begin(), dimensions.end(), appendOneDimension);
if(variableColOffsets_.back() != matrix_.cols()) if (variableColOffsets_.back() != matrix_.cols())
throw std::invalid_argument("Requested to create a VerticalBlockMatrix with dimensions that do not sum to the total columns of the provided matrix."); throw std::invalid_argument(
"Requested to create a VerticalBlockMatrix with dimensions that do not sum to the total columns of the provided matrix.");
assertInvariants(); assertInvariants();
} }
/** /** Construct from iterator over the sizes of each vertical block. */
* Construct from iterator over the sizes of each vertical block. */
template<typename ITERATOR> template<typename ITERATOR>
VerticalBlockMatrix(ITERATOR firstBlockDim, ITERATOR lastBlockDim, DenseIndex height, bool appendOneDimension = false) : VerticalBlockMatrix(ITERATOR firstBlockDim, ITERATOR lastBlockDim,
rowStart_(0), rowEnd_(height), blockStart_(0) DenseIndex height, bool appendOneDimension = false) :
{ variableColOffsets_((lastBlockDim-firstBlockDim) + (appendOneDimension ? 2 : 1)),
rowStart_(0), rowEnd_(height), blockStart_(0) {
fillOffsets(firstBlockDim, lastBlockDim, appendOneDimension); fillOffsets(firstBlockDim, lastBlockDim, appendOneDimension);
matrix_.resize(height, variableColOffsets_.back()); matrix_.resize(height, variableColOffsets_.back());
assertInvariants(); assertInvariants();
} }
/** Copy the block structure and resize the underlying matrix, but do not copy the matrix data. /** Copy the block structure and resize the underlying matrix, but do not copy the matrix data.
* If blockStart(), rowStart(), and/or rowEnd() have been modified, this copies the structure of * If blockStart(), rowStart(), and/or rowEnd() have been modified, this copies the structure of
* the corresponding matrix view. In the destination VerticalBlockView, blockStart() and * the corresponding matrix view. In the destination VerticalBlockView, blockStart() and
@ -203,18 +206,12 @@ namespace gtsam {
template<typename ITERATOR> template<typename ITERATOR>
void fillOffsets(ITERATOR firstBlockDim, ITERATOR lastBlockDim, bool appendOneDimension) { void fillOffsets(ITERATOR firstBlockDim, ITERATOR lastBlockDim, bool appendOneDimension) {
variableColOffsets_.resize((lastBlockDim-firstBlockDim) + 1 + (appendOneDimension ? 1 : 0));
variableColOffsets_[0] = 0; variableColOffsets_[0] = 0;
DenseIndex j=0; DenseIndex j=0;
for(ITERATOR dim=firstBlockDim; dim!=lastBlockDim; ++dim) { for(ITERATOR dim=firstBlockDim; dim!=lastBlockDim; ++dim, ++j)
variableColOffsets_[j+1] = variableColOffsets_[j] + *dim; variableColOffsets_[j+1] = variableColOffsets_[j] + *dim;
++ j;
}
if(appendOneDimension) if(appendOneDimension)
{
variableColOffsets_[j+1] = variableColOffsets_[j] + 1; variableColOffsets_[j+1] = variableColOffsets_[j] + 1;
++ j;
}
} }
friend class SymmetricBlockMatrix; friend class SymmetricBlockMatrix;

File diff suppressed because it is too large Load Diff

View File

@ -1127,6 +1127,12 @@ TEST( matrix, svd2 )
svd(sampleA, U, s, V); svd(sampleA, U, s, V);
// take care of sign ambiguity
if (U(0, 1) > 0) {
U = -U;
V = -V;
}
EXPECT(assert_equal(expectedU,U)); EXPECT(assert_equal(expectedU,U));
EXPECT(assert_equal(expected_s,s,1e-9)); EXPECT(assert_equal(expected_s,s,1e-9));
EXPECT(assert_equal(expectedV,V)); EXPECT(assert_equal(expectedV,V));
@ -1143,6 +1149,13 @@ TEST( matrix, svd3 )
Matrix expectedV = (Matrix(3, 2) << 0.,1.,0.,0.,-1.,0.); Matrix expectedV = (Matrix(3, 2) << 0.,1.,0.,0.,-1.,0.);
svd(sampleAt, U, s, V); svd(sampleAt, U, s, V);
// take care of sign ambiguity
if (U(0, 0) > 0) {
U = -U;
V = -V;
}
Matrix S = diag(s); Matrix S = diag(s);
Matrix t = U * S; Matrix t = U * S;
Matrix Vt = trans(V); Matrix Vt = trans(V);
@ -1176,6 +1189,17 @@ TEST( matrix, svd4 )
0.6723, 0.7403); 0.6723, 0.7403);
svd(A, U, s, V); svd(A, U, s, V);
// take care of sign ambiguity
if (U(0, 0) < 0) {
U.col(0) = -U.col(0);
V.col(0) = -V.col(0);
}
if (U(0, 1) < 0) {
U.col(1) = -U.col(1);
V.col(1) = -V.col(1);
}
Matrix reconstructed = U * diag(s) * trans(V); Matrix reconstructed = U * diag(s) * trans(V);
EXPECT(assert_equal(A, reconstructed, 1e-4)); EXPECT(assert_equal(A, reconstructed, 1e-4));

View File

@ -15,115 +15,123 @@
* @date Apr 8, 2011 * @date Apr 8, 2011
*/ */
#include <gtsam/base/numericalDerivative.h>
#include <CppUnitLite/TestHarness.h> #include <CppUnitLite/TestHarness.h>
#include <gtsam/base/numericalDerivative.h> using namespace std;
using namespace gtsam; using namespace gtsam;
/* ************************************************************************* */ /* ************************************************************************* */
double f(const LieVector& x) { double f(const Vector2& x) {
assert(x.size() == 2); assert(x.size() == 2);
return sin(x(0)) + cos(x(1)); return sin(x(0)) + cos(x(1));
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST(testNumericalDerivative, numericalHessian) { //
LieVector center = ones(2); TEST(testNumericalDerivative, numericalGradient) {
Vector2 x(1, 1);
Matrix expected = (Matrix(2,2) << Vector expected(2);
-sin(center(0)), 0.0, expected << cos(x(1)), -sin(x(0));
0.0, -cos(center(1)));
Matrix actual = numericalHessian(f, center); Vector actual = numericalGradient<Vector2>(f, x);
EXPECT(assert_equal(expected, actual, 1e-5)); EXPECT(assert_equal(expected, actual, 1e-5));
} }
/* ************************************************************************* */ /* ************************************************************************* */
double f2(const LieVector& x) { TEST(testNumericalDerivative, numericalHessian) {
Vector2 x(1, 1);
Matrix expected(2, 2);
expected << -sin(x(0)), 0.0, 0.0, -cos(x(1));
Matrix actual = numericalHessian<Vector2>(f, x);
EXPECT(assert_equal(expected, actual, 1e-5));
}
/* ************************************************************************* */
double f2(const Vector2& x) {
assert(x.size() == 2); assert(x.size() == 2);
return sin(x(0)) * cos(x(1)); return sin(x(0)) * cos(x(1));
} }
/* ************************************************************************* */ /* ************************************************************************* */
//
TEST(testNumericalDerivative, numericalHessian2) { TEST(testNumericalDerivative, numericalHessian2) {
Vector v_center = (Vector(2) << 0.5, 1.0); Vector2 v(0.5, 1.0);
LieVector center(v_center); Vector2 x(v);
Matrix expected = (Matrix(2,2) << Matrix expected = (Matrix(2, 2) << -cos(x(1)) * sin(x(0)), -sin(x(1))
-cos(center(1))*sin(center(0)), -sin(center(1))*cos(center(0)), * cos(x(0)), -cos(x(0)) * sin(x(1)), -sin(x(0)) * cos(x(1)));
-cos(center(0))*sin(center(1)), -sin(center(0))*cos(center(1)));
Matrix actual = numericalHessian(f2, center); Matrix actual = numericalHessian(f2, x);
EXPECT(assert_equal(expected, actual, 1e-5)); EXPECT(assert_equal(expected, actual, 1e-5));
} }
/* ************************************************************************* */ /* ************************************************************************* */
double f3(const LieVector& x1, const LieVector& x2) { double f3(double x1, double x2) {
assert(x1.size() == 1 && x2.size() == 1); return sin(x1) * cos(x2);
return sin(x1(0)) * cos(x2(0));
} }
/* ************************************************************************* */ /* ************************************************************************* */
//
TEST(testNumericalDerivative, numericalHessian211) { TEST(testNumericalDerivative, numericalHessian211) {
Vector v_center1 = (Vector(1) << 1.0); double x1 = 1, x2 = 5;
Vector v_center2 = (Vector(1) << 5.0);
LieVector center1(v_center1), center2(v_center2);
Matrix expected11 = (Matrix(1, 1) << -sin(center1(0))*cos(center2(0))); Matrix expected11 = (Matrix(1, 1) << -sin(x1) * cos(x2));
Matrix actual11 = numericalHessian211(f3, center1, center2); Matrix actual11 = numericalHessian211<double, double>(f3, x1, x2);
EXPECT(assert_equal(expected11, actual11, 1e-5)); EXPECT(assert_equal(expected11, actual11, 1e-5));
Matrix expected12 = (Matrix(1, 1) <<-cos(center1(0))*sin(center2(0))); Matrix expected12 = (Matrix(1, 1) << -cos(x1) * sin(x2));
Matrix actual12 = numericalHessian212(f3, center1, center2); Matrix actual12 = numericalHessian212<double, double>(f3, x1, x2);
EXPECT(assert_equal(expected12, actual12, 1e-5)); EXPECT(assert_equal(expected12, actual12, 1e-5));
Matrix expected22 = (Matrix(1, 1) <<-sin(center1(0))*cos(center2(0))); Matrix expected22 = (Matrix(1, 1) << -sin(x1) * cos(x2));
Matrix actual22 = numericalHessian222(f3, center1, center2); Matrix actual22 = numericalHessian222<double, double>(f3, x1, x2);
EXPECT(assert_equal(expected22, actual22, 1e-5)); EXPECT(assert_equal(expected22, actual22, 1e-5));
} }
/* ************************************************************************* */ /* ************************************************************************* */
double f4(const LieVector& x, const LieVector& y, const LieVector& z) { double f4(double x, double y, double z) {
assert(x.size() == 1 && y.size() == 1 && z.size() == 1); return sin(x) * cos(y) * z * z;
return sin(x(0)) * cos(y(0)) * z(0)*z(0);
} }
/* ************************************************************************* */ /* ************************************************************************* */
//
TEST(testNumericalDerivative, numericalHessian311) { TEST(testNumericalDerivative, numericalHessian311) {
Vector v_center1 = (Vector(1) << 1.0); double x = 1, y = 2, z = 3;
Vector v_center2 = (Vector(1) << 2.0); Matrix expected11 = (Matrix(1, 1) << -sin(x) * cos(y) * z * z);
Vector v_center3 = (Vector(1) << 3.0); Matrix actual11 = numericalHessian311<double, double, double>(f4, x, y, z);
LieVector center1(v_center1), center2(v_center2), center3(v_center3);
double x = center1(0), y = center2(0), z = center3(0);
Matrix expected11 = (Matrix(1, 1) << -sin(x)*cos(y)*z*z);
Matrix actual11 = numericalHessian311(f4, center1, center2, center3);
EXPECT(assert_equal(expected11, actual11, 1e-5)); EXPECT(assert_equal(expected11, actual11, 1e-5));
Matrix expected12 = (Matrix(1, 1) << -cos(x)*sin(y)*z*z); Matrix expected12 = (Matrix(1, 1) << -cos(x) * sin(y) * z * z);
Matrix actual12 = numericalHessian312(f4, center1, center2, center3); Matrix actual12 = numericalHessian312<double, double, double>(f4, x, y, z);
EXPECT(assert_equal(expected12, actual12, 1e-5)); EXPECT(assert_equal(expected12, actual12, 1e-5));
Matrix expected13 = (Matrix(1, 1) << cos(x)*cos(y)*2*z); Matrix expected13 = (Matrix(1, 1) << cos(x) * cos(y) * 2 * z);
Matrix actual13 = numericalHessian313(f4, center1, center2, center3); Matrix actual13 = numericalHessian313<double, double, double>(f4, x, y, z);
EXPECT(assert_equal(expected13, actual13, 1e-5)); EXPECT(assert_equal(expected13, actual13, 1e-5));
Matrix expected22 = (Matrix(1, 1) << -sin(x)*cos(y)*z*z); Matrix expected22 = (Matrix(1, 1) << -sin(x) * cos(y) * z * z);
Matrix actual22 = numericalHessian322(f4, center1, center2, center3); Matrix actual22 = numericalHessian322<double, double, double>(f4, x, y, z);
EXPECT(assert_equal(expected22, actual22, 1e-5)); EXPECT(assert_equal(expected22, actual22, 1e-5));
Matrix expected23 = (Matrix(1, 1) << -sin(x)*sin(y)*2*z); Matrix expected23 = (Matrix(1, 1) << -sin(x) * sin(y) * 2 * z);
Matrix actual23 = numericalHessian323(f4, center1, center2, center3); Matrix actual23 = numericalHessian323<double, double, double>(f4, x, y, z);
EXPECT(assert_equal(expected23, actual23, 1e-5)); EXPECT(assert_equal(expected23, actual23, 1e-5));
Matrix expected33 = (Matrix(1, 1) << sin(x)*cos(y)*2); Matrix expected33 = (Matrix(1, 1) << sin(x) * cos(y) * 2);
Matrix actual33 = numericalHessian333(f4, center1, center2, center3); Matrix actual33 = numericalHessian333<double, double, double>(f4, x, y, z);
EXPECT(assert_equal(expected33, actual33, 1e-5)); EXPECT(assert_equal(expected33, actual33, 1e-5));
} }
/* ************************************************************************* */ /* ************************************************************************* */
int main() { TestResult tr; return TestRegistry::runAllTests(tr); } int main() {
TestResult tr;
return TestRegistry::runAllTests(tr);
}
/* ************************************************************************* */ /* ************************************************************************* */

Some files were not shown because too many files have changed in this diff Show More