Merge remote-tracking branch 'svn/trunk' into NLO
Conflicts: gtsam/nonlinear/NonlinearOptimization.hrelease/4.3a0
commit
da70164987
325
.cproject
325
.cproject
|
|
@ -311,14 +311,6 @@
|
|||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="testGaussianFactor.run" path="linear/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
<buildTarget>testGaussianFactor.run</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="all" path="inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
|
|
@ -345,6 +337,7 @@
|
|||
</target>
|
||||
<target name="tests/testBayesTree.run" path="inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments/>
|
||||
<buildTarget>tests/testBayesTree.run</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>false</useDefaultCommand>
|
||||
|
|
@ -352,6 +345,7 @@
|
|||
</target>
|
||||
<target name="testBinaryBayesNet.run" path="inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments/>
|
||||
<buildTarget>testBinaryBayesNet.run</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>false</useDefaultCommand>
|
||||
|
|
@ -399,6 +393,7 @@
|
|||
</target>
|
||||
<target name="testSymbolicBayesNet.run" path="inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments/>
|
||||
<buildTarget>testSymbolicBayesNet.run</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>false</useDefaultCommand>
|
||||
|
|
@ -406,6 +401,7 @@
|
|||
</target>
|
||||
<target name="tests/testSymbolicFactor.run" path="inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments/>
|
||||
<buildTarget>tests/testSymbolicFactor.run</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>false</useDefaultCommand>
|
||||
|
|
@ -413,6 +409,7 @@
|
|||
</target>
|
||||
<target name="testSymbolicFactorGraph.run" path="inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments/>
|
||||
<buildTarget>testSymbolicFactorGraph.run</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>false</useDefaultCommand>
|
||||
|
|
@ -428,11 +425,20 @@
|
|||
</target>
|
||||
<target name="tests/testBayesTree" path="inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments/>
|
||||
<buildTarget>tests/testBayesTree</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>false</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="testGaussianFactor.run" path="linear/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
<buildTarget>testGaussianFactor.run</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="check" path="tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
|
|
@ -459,7 +465,6 @@
|
|||
</target>
|
||||
<target name="testGraph.run" path="tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments/>
|
||||
<buildTarget>testGraph.run</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>false</useDefaultCommand>
|
||||
|
|
@ -531,7 +536,6 @@
|
|||
</target>
|
||||
<target name="testInference.run" path="tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments/>
|
||||
<buildTarget>testInference.run</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>false</useDefaultCommand>
|
||||
|
|
@ -539,7 +543,6 @@
|
|||
</target>
|
||||
<target name="testGaussianFactor.run" path="tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments/>
|
||||
<buildTarget>testGaussianFactor.run</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>false</useDefaultCommand>
|
||||
|
|
@ -547,7 +550,6 @@
|
|||
</target>
|
||||
<target name="testJunctionTree.run" path="tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments/>
|
||||
<buildTarget>testJunctionTree.run</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>false</useDefaultCommand>
|
||||
|
|
@ -555,7 +557,6 @@
|
|||
</target>
|
||||
<target name="testSymbolicBayesNet.run" path="tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments/>
|
||||
<buildTarget>testSymbolicBayesNet.run</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>false</useDefaultCommand>
|
||||
|
|
@ -563,7 +564,6 @@
|
|||
</target>
|
||||
<target name="testSymbolicFactorGraph.run" path="tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments/>
|
||||
<buildTarget>testSymbolicFactorGraph.run</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>false</useDefaultCommand>
|
||||
|
|
@ -633,22 +633,6 @@
|
|||
<useDefaultCommand>false</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="all" path="CppUnitLite" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
<buildTarget>all</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="clean" path="CppUnitLite" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
<buildTarget>clean</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="tests/testPose2.run" path="build_retract/gtsam/geometry" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
|
|
@ -665,6 +649,22 @@
|
|||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="all" path="CppUnitLite" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
<buildTarget>all</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="clean" path="CppUnitLite" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
<buildTarget>clean</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="all" path="spqr_mini" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
|
|
@ -689,18 +689,26 @@
|
|||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="check.nonlinear" path="build/gtsam/nonlinear" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<target name="all" path="build_wrap" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2 VERBOSE=1</buildArguments>
|
||||
<buildTarget>check.nonlinear</buildTarget>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
<buildTarget>all</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>false</useDefaultCommand>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="timing.nonlinear" path="build/gtsam/nonlinear" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<target name="check" path="build_wrap" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j5</buildArguments>
|
||||
<buildTarget>timing.nonlinear</buildTarget>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
<buildTarget>check</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="clean" path="build_wrap" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
<buildTarget>clean</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
|
|
@ -753,26 +761,34 @@
|
|||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="all" path="build_wrap" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<target name="check.nonlinear" path="build/gtsam/nonlinear" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
<buildTarget>all</buildTarget>
|
||||
<buildArguments>-j2 VERBOSE=1</buildArguments>
|
||||
<buildTarget>check.nonlinear</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>false</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="timing.nonlinear" path="build/gtsam/nonlinear" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j5</buildArguments>
|
||||
<buildTarget>timing.nonlinear</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="check" path="build_wrap" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<target name="nonlinear.testValues.run" path="build/gtsam/nonlinear" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
<buildTarget>check</buildTarget>
|
||||
<buildArguments>-j5</buildArguments>
|
||||
<buildTarget>nonlinear.testValues.run</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="clean" path="build_wrap" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<target name="nonlinear.testOrdering.run" path="build/gtsam/nonlinear" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
<buildTarget>clean</buildTarget>
|
||||
<buildArguments>-j5</buildArguments>
|
||||
<buildTarget>nonlinear.testOrdering.run</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
|
|
@ -1123,6 +1139,7 @@
|
|||
</target>
|
||||
<target name="testErrors.run" path="linear" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments/>
|
||||
<buildTarget>testErrors.run</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>false</useDefaultCommand>
|
||||
|
|
@ -1602,7 +1619,6 @@
|
|||
</target>
|
||||
<target name="testSimulated2DOriented.run" path="slam" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments/>
|
||||
<buildTarget>testSimulated2DOriented.run</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>false</useDefaultCommand>
|
||||
|
|
@ -1642,7 +1658,6 @@
|
|||
</target>
|
||||
<target name="testSimulated2D.run" path="slam" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments/>
|
||||
<buildTarget>testSimulated2D.run</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>false</useDefaultCommand>
|
||||
|
|
@ -1650,7 +1665,6 @@
|
|||
</target>
|
||||
<target name="testSimulated3D.run" path="slam" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments/>
|
||||
<buildTarget>testSimulated3D.run</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>false</useDefaultCommand>
|
||||
|
|
@ -1744,10 +1758,10 @@
|
|||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="tests/testNoiseModel.run" path="build/gtsam/linear" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<target name="linear.testNoiseModel.run" path="build/gtsam/linear" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
<buildTarget>tests/testNoiseModel.run</buildTarget>
|
||||
<buildArguments>-j5</buildArguments>
|
||||
<buildTarget>linear.testNoiseModel.run</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
|
|
@ -1824,6 +1838,14 @@
|
|||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="linear.testSerializationLinear.run" path="build/gtsam/linear" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j5</buildArguments>
|
||||
<buildTarget>linear.testSerializationLinear.run</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="SimpleRotation.run" path="build/examples" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
|
|
@ -1914,7 +1936,6 @@
|
|||
</target>
|
||||
<target name="tests/testGaussianISAM2" path="build/slam" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments/>
|
||||
<buildTarget>tests/testGaussianISAM2</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>false</useDefaultCommand>
|
||||
|
|
@ -1936,6 +1957,93 @@
|
|||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="install" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
<buildTarget>install</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="clean" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
<buildTarget>clean</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="check" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j5</buildArguments>
|
||||
<buildTarget>check</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="all" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j5</buildArguments>
|
||||
<buildTarget>all</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="cmake" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>cmake</buildCommand>
|
||||
<buildArguments>..</buildArguments>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>false</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="gtsam-shared" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j5</buildArguments>
|
||||
<buildTarget>gtsam-shared</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="gtsam-static" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j5</buildArguments>
|
||||
<buildTarget>gtsam-static</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="timing" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j5</buildArguments>
|
||||
<buildTarget>timing</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="examples" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j5</buildArguments>
|
||||
<buildTarget>examples</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="verbose all" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j5</buildArguments>
|
||||
<buildTarget>VERBOSE=1 all</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="verbose check" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j5</buildArguments>
|
||||
<buildTarget>VERBOSE=1 check</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="testRot3.run" path="geometry" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
|
|
@ -2032,23 +2140,7 @@
|
|||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="install" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
<buildTarget>install</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="clean" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
<buildTarget>clean</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="check" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<target name="check" path="build" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
<buildTarget>check</buildTarget>
|
||||
|
|
@ -2056,7 +2148,23 @@
|
|||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="all" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<target name="clean" path="build" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
<buildTarget>clean</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="install" path="build" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
<buildTarget>install</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="all" path="build" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
<buildTarget>all</buildTarget>
|
||||
|
|
@ -2064,46 +2172,13 @@
|
|||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="cmake" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<target name="cmake" path="build" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>cmake</buildCommand>
|
||||
<buildArguments>..</buildArguments>
|
||||
<buildTarget/>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>false</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="gtsam-shared" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j5</buildArguments>
|
||||
<buildTarget>gtsam-shared</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="gtsam-static" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j5</buildArguments>
|
||||
<buildTarget>gtsam-static</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="timing" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j5</buildArguments>
|
||||
<buildTarget>timing</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="examples" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j5</buildArguments>
|
||||
<buildTarget>examples</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="wrap.testSpirit.run" path="build/wrap" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j5</buildArguments>
|
||||
|
|
@ -2144,46 +2219,6 @@
|
|||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="check" path="build" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
<buildTarget>check</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="clean" path="build" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
<buildTarget>clean</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="install" path="build" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
<buildTarget>install</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="all" path="build" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j2</buildArguments>
|
||||
<buildTarget>all</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="cmake" path="build" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>cmake</buildCommand>
|
||||
<buildArguments>..</buildArguments>
|
||||
<buildTarget/>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>false</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
</buildTargets>
|
||||
</storageModule>
|
||||
<storageModule moduleId="org.eclipse.cdt.internal.ui.text.commentOwnerProjectMappings"/>
|
||||
|
|
|
|||
100
CMakeLists.txt
100
CMakeLists.txt
|
|
@ -2,37 +2,58 @@ project(GTSAM CXX C)
|
|||
cmake_minimum_required(VERSION 2.6)
|
||||
|
||||
# Set the version number for the library
|
||||
set (GTSAM_VERSION_MAJOR 0)
|
||||
set (GTSAM_VERSION_MAJOR 1)
|
||||
set (GTSAM_VERSION_MINOR 9)
|
||||
set (GTSAM_VERSION_PATCH 3)
|
||||
set (GTSAM_VERSION_PATCH 0)
|
||||
|
||||
# Set the default install path to home
|
||||
set (CMAKE_INSTALL_PREFIX ${HOME} CACHE DOCSTRING "Install prefix for library")
|
||||
|
||||
# Use macros for creating tests/timing scripts
|
||||
set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)
|
||||
include(GtsamTesting)
|
||||
include(GtsamPrinting)
|
||||
|
||||
# guard against in-source builds
|
||||
if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})
|
||||
message(FATAL_ERROR "In-source builds not allowed. Please make a new directory (called a build directory) and run CMake from there. You may need to remove CMakeCache.txt. ")
|
||||
endif()
|
||||
|
||||
# guard against bad build-type strings
|
||||
if (NOT CMAKE_BUILD_TYPE)
|
||||
set(CMAKE_BUILD_TYPE "Debug")
|
||||
# Default to Debug mode
|
||||
if(NOT FIRST_PASS_DONE)
|
||||
set(CMAKE_BUILD_TYPE "Debug" CACHE STRING
|
||||
"Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel."
|
||||
FORCE)
|
||||
endif()
|
||||
|
||||
# Check build types
|
||||
if(${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION} VERSION_GREATER 2.8 OR ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION} VERSION_EQUAL 2.8)
|
||||
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS Debug Release Timing Profiling RelWithDebInfo MinSizeRel)
|
||||
endif()
|
||||
string(TOLOWER "${CMAKE_BUILD_TYPE}" cmake_build_type_tolower)
|
||||
if( NOT cmake_build_type_tolower STREQUAL "debug"
|
||||
AND NOT cmake_build_type_tolower STREQUAL "release"
|
||||
AND NOT cmake_build_type_tolower STREQUAL "timing"
|
||||
AND NOT cmake_build_type_tolower STREQUAL "profiling"
|
||||
AND NOT cmake_build_type_tolower STREQUAL "relwithdebinfo")
|
||||
message(FATAL_ERROR "Unknown build type \"${CMAKE_BUILD_TYPE}\". Allowed values are Debug, Release, RelWithDebInfo (case-insensitive).")
|
||||
message(FATAL_ERROR "Unknown build type \"${CMAKE_BUILD_TYPE}\". Allowed values are Debug, Release, Timing, Profiling, RelWithDebInfo (case-insensitive).")
|
||||
endif()
|
||||
|
||||
# Add debugging flags
|
||||
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -fno-inline -Wall")
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fno-inline -Wall")
|
||||
set(CMAKE_C_FLAGS_RELWITHDEBINFO "-g -fno-inline -Wall -DNDEBUG -DEIGEN_NO_DEBUG")
|
||||
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-g -fno-inline -Wall -DNDEBUG -DEIGEN_NO_DEBUG")
|
||||
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -DNDEBUG -Wall -DEIGEN_NO_DEBUG")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -DNDEBUG -Wall -DEIGEN_NO_DEBUG")
|
||||
# Add debugging flags but only on the first pass
|
||||
if(NOT FIRST_PASS_DONE)
|
||||
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -fno-inline -Wall" CACHE STRING "Flags used by the compiler during debug builds." FORCE)
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fno-inline -Wall" CACHE STRING "Flags used by the compiler during debug builds." FORCE)
|
||||
set(CMAKE_C_FLAGS_RELWITHDEBINFO "-g -fno-inline -Wall -DNDEBUG" CACHE STRING "Flags used by the compiler during relwithdebinfo builds." FORCE)
|
||||
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-g -fno-inline -Wall -DNDEBUG" CACHE STRING "Flags used by the compiler during relwithdebinfo builds." FORCE)
|
||||
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Wall" CACHE STRING "Flags used by the compiler during release builds." FORCE)
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -Wall" CACHE STRING "Flags used by the compiler during release builds." FORCE)
|
||||
set(CMAKE_C_FLAGS_TIMING "${CMAKE_C_FLAGS_RELEASE} -DENABLE_TIMING" CACHE STRING "Flags used by the compiler during timing builds." FORCE)
|
||||
set(CMAKE_CXX_FLAGS_TIMING "${CMAKE_CXX_FLAGS_RELEASE} -DENABLE_TIMING" CACHE STRING "Flags used by the compiler during timing builds." FORCE)
|
||||
mark_as_advanced(CMAKE_C_FLAGS_TIMING CMAKE_CXX_FLAGS_TIMING)
|
||||
set(CMAKE_C_FLAGS_PROFILING "-g -O2 -Wall -DNDEBUG" CACHE STRING "Flags used by the compiler during profiling builds." FORCE)
|
||||
set(CMAKE_CXX_FLAGS_PROFILING "-g -O2 -Wall -DNDEBUG" CACHE STRING "Flags used by the compiler during profiling builds." FORCE)
|
||||
mark_as_advanced(CMAKE_C_FLAGS_PROFILING CMAKE_CXX_FLAGS_PROFILING)
|
||||
endif()
|
||||
|
||||
# Configurable Options
|
||||
option(GTSAM_BUILD_TESTS "Enable/Disable building of tests" ON)
|
||||
|
|
@ -41,6 +62,10 @@ option(GTSAM_BUILD_EXAMPLES "Enable/Disable building of examples" ON)
|
|||
option(GTSAM_BUILD_WRAP "Enable/Disable building of matlab wrap utility (necessary for matlab interface)" ON)
|
||||
option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices" OFF)
|
||||
option(GTSAM_BUILD_CONVENIENCE_LIBRARIES "Enable/Disable use of convenience libraries for faster development rebuilds, but slower install" ON)
|
||||
option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" ON)
|
||||
option(GTSAM_INSTALL_MATLAB_EXAMPLES "Enable/Disable installation of matlab examples" ON)
|
||||
option(GTSAM_INSTALL_MATLAB_TESTS "Enable/Disable installation of matlab tests" ON)
|
||||
option(GTSAM_INSTALL_WRAP "Enable/Disable installation of wrap utility" ON)
|
||||
|
||||
# Add the Quaternion Build Flag if requested
|
||||
if (GTSAM_USE_QUATERNIONS)
|
||||
|
|
@ -59,10 +84,6 @@ if (GTSAM_BUILD_TESTS)
|
|||
include(CTest)
|
||||
endif()
|
||||
|
||||
# Use macros for creating tests/timing scripts
|
||||
set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)
|
||||
include(GtsamTesting)
|
||||
|
||||
# Enable make check (http://www.cmake.org/Wiki/CMakeEmulateMakeCheck)
|
||||
add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND})
|
||||
add_custom_target(timing)
|
||||
|
|
@ -97,3 +118,48 @@ endif(GTSAM_BUILD_WRAP)
|
|||
if (GTSAM_BUILD_EXAMPLES)
|
||||
add_subdirectory(examples)
|
||||
endif(GTSAM_BUILD_EXAMPLES)
|
||||
|
||||
# Mark that first pass is done
|
||||
set(FIRST_PASS_DONE true CACHE BOOL "Internally used to mark whether cmake has been run multiple times")
|
||||
mark_as_advanced(FIRST_PASS_DONE)
|
||||
|
||||
# print configuration variables
|
||||
message(STATUS "===============================================================")
|
||||
message(STATUS "================ Configuration Options ======================")
|
||||
message(STATUS "Build flags ")
|
||||
print_config_flag(${GTSAM_BUILD_TIMING} "Build Timing scripts ")
|
||||
print_config_flag(${GTSAM_BUILD_EXAMPLES} "Build Examples ")
|
||||
print_config_flag(${GTSAM_BUILD_TESTS} "Build Tests ")
|
||||
print_config_flag(${GTSAM_BUILD_WRAP} "Build Wrap ")
|
||||
print_config_flag(${GTSAM_BUILD_CONVENIENCE_LIBRARIES} "Build Convenience Libraries")
|
||||
string(TOUPPER "${CMAKE_BUILD_TYPE}" cmake_build_type_toupper)
|
||||
message(STATUS " Build type : ${CMAKE_BUILD_TYPE}")
|
||||
message(STATUS " C compilation flags : ${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${cmake_build_type_toupper}}")
|
||||
message(STATUS " C++ compilation flags : ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${cmake_build_type_toupper}}")
|
||||
|
||||
message(STATUS "GTSAM flags ")
|
||||
print_config_flag(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3")
|
||||
|
||||
message(STATUS "MATLAB toolbox flags ")
|
||||
print_config_flag(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install matlab toolbox ")
|
||||
print_config_flag(${GTSAM_INSTALL_MATLAB_EXAMPLES} "Install matlab examples ")
|
||||
print_config_flag(${GTSAM_INSTALL_MATLAB_TESTS} "Install matlab tests ")
|
||||
print_config_flag(${GTSAM_INSTALL_WRAP} "Install wrap utility ")
|
||||
message(STATUS "===============================================================")
|
||||
|
||||
# Set up CPack
|
||||
set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "GTSAM")
|
||||
set(CPACK_PACKAGE_VENDOR "Frank Dellaert, Georgia Institute of Technology")
|
||||
set(CPACK_PACKAGE_DESCRIPTION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/README")
|
||||
set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE")
|
||||
set(CPACK_PACKAGE_VERSION_MAJOR ${GTSAM_VERSION_MAJOR})
|
||||
set(CPACK_PACKAGE_VERSION_MINOR ${GTSAM_VERSION_MINOR})
|
||||
set(CPACK_PACKAGE_VERSION_PATCH ${GTSAM_VERSION_PATCH})
|
||||
set(CPACK_PACKAGE_INSTALL_DIRECTORY "CMake ${CMake_VERSION_MAJOR}.${CMake_VERSION_MINOR}")
|
||||
set(CPACK_INSTALLED_DIRECTORIES "doc" ".") # Include doc directory
|
||||
set(CPACK_SOURCE_IGNORE_FILES "/build;/\\\\.;/makedoc.sh$")
|
||||
set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
|
||||
#set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-aspn${GTSAM_VERSION_PATCH}") # Used for creating ASPN tarballs
|
||||
set(CPACK_SOURCE_GENERATOR "TGZ")
|
||||
set(CPACK_GENERATOR "TGZ")
|
||||
include(CPack)
|
||||
|
|
|
|||
|
|
@ -3,22 +3,19 @@
|
|||
# The following variables will be defined:
|
||||
#
|
||||
# CppUnitLite_FOUND : TRUE if the package has been successfully found
|
||||
# CppUnitLite_INCLUDE_DIRS : paths to CppUnitLite's INCLUDE directories
|
||||
# CppUnitLite_INCLUDE_DIR : paths to CppUnitLite's INCLUDE directories
|
||||
# CppUnitLite_LIBS : paths to CppUnitLite's libraries
|
||||
|
||||
|
||||
# Find include dirs
|
||||
find_path(_CppUnitLite_INCLUDE_DIR CppUnitLite/Test.h
|
||||
PATHS ${GTSAM_ROOT} ${CMAKE_INSTALL_PREFIX}/include ${HOME}/include /usr/local/include /usr/include )
|
||||
PATHS ${CMAKE_INSTALL_PREFIX}/include "$ENV{HOME}/include" /usr/local/include /usr/include )
|
||||
|
||||
# Find libraries
|
||||
find_library(_CppUnitLite_LIB NAMES CppUnitLite
|
||||
HINTS ${_CppUnitLite_INCLUDE_DIR}/build/CppUnitLite ${_CppUnitLite_INCLUDE_DIR}/CppUnitLite)
|
||||
|
||||
set (CppUnitLite_INCLUDE_DIRS ${_CppUnitLite_INCLUDE_DIR})
|
||||
set (CppUnitLite_LIBS ${_CppUnitLite_LIB})
|
||||
|
||||
HINTS ${CMAKE_INSTALL_PREFIX}/lib "$ENV{HOME}/lib" /usr/local/lib /usr/lib)
|
||||
|
||||
set (CppUnitLite_INCLUDE_DIR ${_CppUnitLite_INCLUDE_DIR} CACHE STRING "CppUnitLite INCLUDE directories")
|
||||
set (CppUnitLite_LIBS ${_CppUnitLite_LIB} CACHE STRING "CppUnitLite libraries")
|
||||
|
||||
# handle the QUIETLY and REQUIRED arguments and set LIBXML2_FOUND to TRUE
|
||||
# if all listed variables are TRUE
|
||||
|
|
|
|||
|
|
@ -3,23 +3,19 @@
|
|||
# The following variables will be defined:
|
||||
#
|
||||
# GTSAM_FOUND : TRUE if the package has been successfully found
|
||||
# GTSAM_INCLUDE_DIRS : paths to GTSAM's INCLUDE directories
|
||||
# GTSAM_INCLUDE_DIR : paths to GTSAM's INCLUDE directories
|
||||
# GTSAM_LIBS : paths to GTSAM's libraries
|
||||
|
||||
|
||||
# Find include dirs
|
||||
find_path(_gtsam_INCLUDE_DIR gtsam/inference/FactorGraph.h
|
||||
PATHS ${GTSAM_ROOT} ${CMAKE_INSTALL_PREFIX}/include ${HOME}/include /usr/local/include /usr/include )
|
||||
PATHS ${CMAKE_INSTALL_PREFIX}/include "$ENV{HOME}/include" /usr/local/include /usr/include )
|
||||
|
||||
# Find libraries
|
||||
find_library(_gtsam_LIB NAMES gtsam
|
||||
HINTS ${_gtsam_INCLUDE_DIR}/build-debug/gtsam/.libs ${_gtsam_INCLUDE_DIR}/build/gtsam/.libs ${_gtsam_INCLUDE_DIR}/gtsam/.libs
|
||||
NO_DEFAULT_PATH)
|
||||
|
||||
set (GTSAM_INCLUDE_DIRS ${_gtsam_INCLUDE_DIR} CACHE STRING "GTSAM INCLUDE directories")
|
||||
set (GTSAM_LIBS ${_gtsam_LIB} CACHE STRING "GTSAM libraries")
|
||||
|
||||
HINTS ${CMAKE_INSTALL_PREFIX}/lib "$ENV{HOME}/lib" /usr/local/lib /usr/lib)
|
||||
|
||||
set (GTSAM_INCLUDE_DIR ${_gtsam_INCLUDE_DIR} CACHE STRING "GTSAM INCLUDE directories")
|
||||
set (GTSAM_LIBS ${_gtsam_LIB} CACHE STRING "GTSAM libraries")
|
||||
|
||||
# handle the QUIETLY and REQUIRED arguments and set LIBXML2_FOUND to TRUE
|
||||
# if all listed variables are TRUE
|
||||
|
|
|
|||
|
|
@ -0,0 +1,30 @@
|
|||
# This is FindWrap.cmake
|
||||
# CMake module to locate the Wrap tool and header after installation package
|
||||
# The following variables will be defined:
|
||||
#
|
||||
# Wrap_FOUND : TRUE if the package has been successfully found
|
||||
# Wrap_CMD : command for executing wrap
|
||||
# Wrap_INCLUDE_DIR : paths to Wrap's INCLUDE directories
|
||||
|
||||
# Find include dir
|
||||
find_path(_Wrap_INCLUDE_DIR wrap/matlab.h
|
||||
PATHS ${CMAKE_INSTALL_PREFIX}/include "$ENV{HOME}/include" /usr/local/include /usr/include )
|
||||
|
||||
# Find the installed executable
|
||||
find_program(_Wrap_CMD NAMES wrap
|
||||
PATHS ${CMAKE_INSTALL_PREFIX}/bin "$ENV{HOME}/bin" /usr/local/bin /usr/bin )
|
||||
|
||||
set (Wrap_INCLUDE_DIR ${_Wrap_INCLUDE_DIR} CACHE STRING "Wrap INCLUDE directories")
|
||||
set (Wrap_CMD ${_Wrap_CMD} CACHE STRING "Wrap executable location")
|
||||
|
||||
# handle the QUIETLY and REQUIRED arguments and set LIBXML2_FOUND to TRUE
|
||||
# if all listed variables are TRUE
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(Wrap DEFAULT_MSG
|
||||
_Wrap_INCLUDE_DIR _Wrap_CMD)
|
||||
|
||||
mark_as_advanced(_Wrap_INCLUDE_DIR _Wrap_CMD )
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
# Macros for using wrap functionality
|
||||
macro(find_mexextension)
|
||||
## Determine the mex extension
|
||||
# Apple Macintosh (64-bit) mexmaci64
|
||||
# Linux (32-bit) mexglx
|
||||
# Linux (64-bit) mexa64
|
||||
# Microsoft Windows (32-bit) mexw32
|
||||
# Windows (64-bit) mexw64
|
||||
|
||||
# only support 64-bit apple
|
||||
if(CMAKE_HOST_APPLE)
|
||||
set(GTSAM_MEX_BIN_EXTENSION_default mexmaci64)
|
||||
endif(CMAKE_HOST_APPLE)
|
||||
|
||||
if(NOT CMAKE_HOST_APPLE)
|
||||
# check 64 bit
|
||||
if( ${CMAKE_SIZEOF_VOID_P} EQUAL 4 )
|
||||
set( HAVE_64_BIT 0 )
|
||||
endif( ${CMAKE_SIZEOF_VOID_P} EQUAL 4 )
|
||||
|
||||
if( ${CMAKE_SIZEOF_VOID_P} EQUAL 8 )
|
||||
set( HAVE_64_BIT 1 )
|
||||
endif( ${CMAKE_SIZEOF_VOID_P} EQUAL 8 )
|
||||
|
||||
# Check for linux machines
|
||||
if (CMAKE_HOST_UNIX)
|
||||
if (HAVE_64_BIT)
|
||||
set(GTSAM_MEX_BIN_EXTENSION_default mexa64)
|
||||
else (HAVE_64_BIT)
|
||||
set(GTSAM_MEX_BIN_EXTENSION_default mexglx)
|
||||
endif (HAVE_64_BIT)
|
||||
endif(CMAKE_HOST_UNIX)
|
||||
|
||||
# Check for windows machines
|
||||
if (CMAKE_HOST_WIN32)
|
||||
if (HAVE_64_BIT)
|
||||
set(GTSAM_MEX_BIN_EXTENSION_default mexw64)
|
||||
else (HAVE_64_BIT)
|
||||
set(GTSAM_MEX_BIN_EXTENSION_default mexw32)
|
||||
endif (HAVE_64_BIT)
|
||||
endif(CMAKE_HOST_WIN32)
|
||||
endif(NOT CMAKE_HOST_APPLE)
|
||||
|
||||
# Allow for setting mex extension manually
|
||||
set(GTSAM_MEX_BIN_EXTENSION ${GTSAM_MEX_BIN_EXTENSION_default} CACHE DOCSTRING "Extension for matlab mex files")
|
||||
message(STATUS "Detected Matlab mex extension: ${GTSAM_MEX_BIN_EXTENSION_default}")
|
||||
message(STATUS "Current Matlab mex extension: ${GTSAM_MEX_BIN_EXTENSION}")
|
||||
endmacro(find_mexextension)
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
# print configuration variables
|
||||
# Usage:
|
||||
#print_config_flag(${GTSAM_BUILD_TESTS} "Build Tests ")
|
||||
macro(print_config_flag flag msg)
|
||||
if ("${flag}" STREQUAL "ON")
|
||||
message(STATUS " ${msg}: Enabled")
|
||||
else ("${flag}" STREQUAL "ON")
|
||||
message(STATUS " ${msg}: Disabled")
|
||||
endif ("${flag}" STREQUAL "ON")
|
||||
endmacro(print_config_flag)
|
||||
|
|
@ -6,7 +6,7 @@ macro(gtsam_add_tests subdir libs)
|
|||
file(GLOB tests_srcs "tests/test*.cpp")
|
||||
foreach(test_src ${tests_srcs})
|
||||
get_filename_component(test_base ${test_src} NAME_WE)
|
||||
set( test_bin ${subdir}.${test_base} )
|
||||
set( test_bin ${test_base} )
|
||||
message(STATUS "Adding Test ${test_bin}")
|
||||
add_executable(${test_bin} ${test_src})
|
||||
add_dependencies(check.${subdir} ${test_bin})
|
||||
|
|
@ -30,7 +30,7 @@ macro(gtsam_add_external_tests subdir libs)
|
|||
file(GLOB tests_srcs "tests/test*.cpp")
|
||||
foreach(test_src ${tests_srcs})
|
||||
get_filename_component(test_base ${test_src} NAME_WE)
|
||||
set( test_bin ${subdir}.${test_base} )
|
||||
set( test_bin ${test_base} )
|
||||
message(STATUS "Adding Test ${test_bin}")
|
||||
add_executable(${test_bin} ${test_src})
|
||||
add_dependencies(check.${subdir} ${test_bin})
|
||||
|
|
@ -48,7 +48,7 @@ macro(gtsam_add_timing subdir libs)
|
|||
file(GLOB base_timing_srcs "tests/time*.cpp")
|
||||
foreach(time_src ${base_timing_srcs})
|
||||
get_filename_component(time_base ${time_src} NAME_WE)
|
||||
set( time_bin ${subdir}.${time_base} )
|
||||
set( time_bin ${time_base} )
|
||||
message(STATUS "Adding Timing Benchmark ${time_bin}")
|
||||
add_executable(${time_bin} ${time_src})
|
||||
add_dependencies(timing.${subdir} ${time_bin})
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ foreach(example_src ${example_srcs} )
|
|||
add_dependencies(examples ${example_bin})
|
||||
add_executable(${example_bin} ${example_src})
|
||||
target_link_libraries(${example_bin} gtsam-static)
|
||||
add_custom_target(${example_bin}.run ${EXECUTABLE_OUTPUT_PATH}${example_bin} ${ARGN})
|
||||
endforeach(example_src)
|
||||
|
||||
add_subdirectory(vSLAMexample)
|
||||
|
|
|
|||
|
|
@ -39,13 +39,13 @@
|
|||
#include <gtsam/linear/GaussianSequentialSolver.h>
|
||||
#include <gtsam/linear/GaussianMultifrontalSolver.h>
|
||||
|
||||
// Main typedefs
|
||||
typedef gtsam::NonlinearOptimizer<gtsam::NonlinearFactorGraph,gtsam::GaussianFactorGraph,gtsam::GaussianSequentialSolver> OptimizerSeqential; // optimization engine for this domain
|
||||
typedef gtsam::NonlinearOptimizer<gtsam::NonlinearFactorGraph,gtsam::GaussianFactorGraph,gtsam::GaussianMultifrontalSolver> OptimizerMultifrontal; // optimization engine for this domain
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
// Main typedefs
|
||||
typedef NonlinearOptimizer<NonlinearFactorGraph,GaussianFactorGraph,GaussianSequentialSolver> OptimizerSeqential; // optimization engine for this domain
|
||||
typedef NonlinearOptimizer<NonlinearFactorGraph,GaussianFactorGraph,GaussianMultifrontalSolver> OptimizerMultifrontal; // optimization engine for this domain
|
||||
|
||||
/**
|
||||
* In this version of the system we make the following assumptions:
|
||||
* - All values are axis aligned
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ graph.addOdometry(x2, x3, odom_measurement, odom_model);
|
|||
|
||||
%% Add measurements
|
||||
% general noisemodel for measurements
|
||||
meas_model = SharedNoiseModel_sharedSigmas([0.1; 0.2]);
|
||||
meas_model = gtsamSharedNoiseModel_Sigmas([0.1; 0.2]);
|
||||
|
||||
% print
|
||||
graph.print('full graph');
|
||||
|
|
|
|||
4
gtsam.h
4
gtsam.h
|
|
@ -214,7 +214,7 @@ class CalibratedCamera {
|
|||
CalibratedCamera(const Vector& v);
|
||||
|
||||
void print(string s) const;
|
||||
bool equals(const gtsam::Pose3& pose, double tol) const;
|
||||
bool equals(const gtsam::CalibratedCamera& camera, double tol) const;
|
||||
|
||||
gtsam::Pose3 pose() const;
|
||||
|
||||
|
|
@ -404,7 +404,7 @@ class Ordering {
|
|||
Ordering();
|
||||
void print(string s) const;
|
||||
bool equals(const gtsam::Ordering& ord, double tol) const;
|
||||
void push_back(string key);
|
||||
void push_back(size_t key);
|
||||
};
|
||||
|
||||
class NonlinearOptimizationParameters {
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/DerivedValue.h>
|
||||
#include <gtsam/base/Lie.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
|
@ -24,7 +25,7 @@ namespace gtsam {
|
|||
/**
|
||||
* LieScalar is a wrapper around double to allow it to be a Lie type
|
||||
*/
|
||||
struct LieScalar {
|
||||
struct LieScalar : public DerivedValue<LieScalar> {
|
||||
|
||||
/** default constructor */
|
||||
LieScalar() : d_(0.0) {}
|
||||
|
|
|
|||
|
|
@ -70,6 +70,14 @@ bool assert_equal(const V& expected, const boost::optional<V>& actual, double to
|
|||
return assert_equal(expected, *actual, tol);
|
||||
}
|
||||
|
||||
template<class V>
|
||||
bool assert_equal(const V& expected, const boost::optional<const V&>& actual, double tol = 1e-9) {
|
||||
if (!actual) {
|
||||
std::cout << "actual is boost::none" << std::endl;
|
||||
return false;
|
||||
}
|
||||
return assert_equal(expected, *actual, tol);
|
||||
}
|
||||
|
||||
/**
|
||||
* Version of assert_equals to work with vectors
|
||||
|
|
@ -247,6 +255,45 @@ bool assert_container_equal(const V& expected, const V& actual, double tol = 1e-
|
|||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Function for comparing maps of size_t->testable
|
||||
* Types are assumed to have operator ==
|
||||
*/
|
||||
template<class V2>
|
||||
bool assert_container_equality(const std::map<size_t,V2>& expected, const std::map<size_t,V2>& actual) {
|
||||
typedef typename std::map<size_t,V2> Map;
|
||||
bool match = true;
|
||||
if (expected.size() != actual.size())
|
||||
match = false;
|
||||
typename Map::const_iterator
|
||||
itExp = expected.begin(),
|
||||
itAct = actual.begin();
|
||||
if(match) {
|
||||
for (; itExp!=expected.end() && itAct!=actual.end(); ++itExp, ++itAct) {
|
||||
if (itExp->first != itAct->first || itExp->second != itAct->second) {
|
||||
match = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if(!match) {
|
||||
std::cout << "expected: " << std::endl;
|
||||
BOOST_FOREACH(const typename Map::value_type& a, expected) {
|
||||
std::cout << "Key: " << a.first << std::endl;
|
||||
std::cout << "Value: " << a.second << std::endl;
|
||||
}
|
||||
std::cout << "\nactual: " << std::endl;
|
||||
BOOST_FOREACH(const typename Map::value_type& a, actual) {
|
||||
std::cout << "Key: " << a.first << std::endl;
|
||||
std::cout << "Value: " << a.second << std::endl;
|
||||
}
|
||||
std::cout << std::endl;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* General function for comparing containers of objects with operator==
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -74,6 +74,12 @@ public:
|
|||
/** Default constructor as an empty BayesNet */
|
||||
BayesNet() {};
|
||||
|
||||
/** convert from a derived type */
|
||||
template<class DERIVEDCONDITIONAL>
|
||||
BayesNet(const BayesNet<DERIVEDCONDITIONAL>& bn) {
|
||||
conditionals_.assign(bn.begin(), bn.end());
|
||||
}
|
||||
|
||||
/** BayesNet with 1 conditional */
|
||||
BayesNet(const sharedConditional& conditional) { push_back(conditional); }
|
||||
|
||||
|
|
|
|||
|
|
@ -234,15 +234,56 @@ namespace gtsam {
|
|||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/* ************************************************************************* */
|
||||
template<class CONDITIONAL, class CLIQUE>
|
||||
BayesTree<CONDITIONAL,CLIQUE>::BayesTree() {
|
||||
void BayesTree<CONDITIONAL,CLIQUE>::recursiveTreeBuild(const boost::shared_ptr<BayesTreeClique<IndexConditional> >& symbolic,
|
||||
const std::vector<boost::shared_ptr<CONDITIONAL> >& conditionals,
|
||||
const typename BayesTree<CONDITIONAL,CLIQUE>::sharedClique& parent) {
|
||||
|
||||
// Helper function to build a non-symbolic tree (e.g. Gaussian) using a
|
||||
// symbolic tree, used in the BT(BN) constructor.
|
||||
|
||||
// Build the current clique
|
||||
FastList<typename CONDITIONAL::shared_ptr> cliqueConditionals;
|
||||
BOOST_FOREACH(Index j, symbolic->conditional()->frontals()) {
|
||||
cliqueConditionals.push_back(conditionals[j]); }
|
||||
typename BayesTree<CONDITIONAL,CLIQUE>::sharedClique thisClique(new CLIQUE(CONDITIONAL::Combine(cliqueConditionals.begin(), cliqueConditionals.end())));
|
||||
|
||||
// Add the new clique with the current parent
|
||||
this->addClique(thisClique, parent);
|
||||
|
||||
// Build the children, whose parent is the new clique
|
||||
BOOST_FOREACH(const BayesTree<IndexConditional>::sharedClique& child, symbolic->children()) {
|
||||
this->recursiveTreeBuild(child, conditionals, thisClique); }
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class CONDITIONAL, class CLIQUE>
|
||||
BayesTree<CONDITIONAL,CLIQUE>::BayesTree(const BayesNet<CONDITIONAL>& bayesNet) {
|
||||
// First generate symbolic BT to determine clique structure
|
||||
BayesTree<IndexConditional> sbt(bayesNet);
|
||||
|
||||
// Build index of variables to conditionals
|
||||
std::vector<boost::shared_ptr<CONDITIONAL> > conditionals(sbt.root()->conditional()->frontals().back() + 1);
|
||||
BOOST_FOREACH(const boost::shared_ptr<CONDITIONAL>& c, bayesNet) {
|
||||
if(c->nrFrontals() != 1)
|
||||
throw std::invalid_argument("BayesTree constructor from BayesNet only supports single frontal variable conditionals");
|
||||
if(c->firstFrontalKey() >= conditionals.size())
|
||||
throw std::invalid_argument("An inconsistent BayesNet was passed into the BayesTree constructor!");
|
||||
if(conditionals[c->firstFrontalKey()])
|
||||
throw std::invalid_argument("An inconsistent BayesNet with duplicate frontal variables was passed into the BayesTree constructor!");
|
||||
|
||||
conditionals[c->firstFrontalKey()] = c;
|
||||
}
|
||||
|
||||
// Build the new tree
|
||||
this->recursiveTreeBuild(sbt.root(), conditionals, sharedClique());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class CONDITIONAL, class CLIQUE>
|
||||
BayesTree<CONDITIONAL,CLIQUE>::BayesTree(const BayesNet<CONDITIONAL>& bayesNet) {
|
||||
typename BayesNet<CONDITIONAL>::const_reverse_iterator rit;
|
||||
template<>
|
||||
inline BayesTree<IndexConditional>::BayesTree(const BayesNet<IndexConditional>& bayesNet) {
|
||||
BayesNet<IndexConditional>::const_reverse_iterator rit;
|
||||
for ( rit=bayesNet.rbegin(); rit != bayesNet.rend(); ++rit )
|
||||
insert(*this, *rit);
|
||||
}
|
||||
|
|
@ -311,19 +352,6 @@ namespace gtsam {
|
|||
typename CONTAINER::const_iterator lowestOrderedParent = min_element(parents.begin(), parents.end());
|
||||
assert(lowestOrderedParent != parents.end());
|
||||
return *lowestOrderedParent;
|
||||
|
||||
// boost::optional<Index> parentCliqueRepresentative;
|
||||
// boost::optional<size_t> lowest;
|
||||
// BOOST_FOREACH(Index p, parents) {
|
||||
// size_t i = index(p);
|
||||
// if (!lowest || i<*lowest) {
|
||||
// lowest.reset(i);
|
||||
// parentCliqueRepresentative.reset(p);
|
||||
// }
|
||||
// }
|
||||
// if (!lowest) throw
|
||||
// invalid_argument("BayesTree::findParentClique: no parents given or key not present in index");
|
||||
// return *parentCliqueRepresentative;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@
|
|||
#include <gtsam/inference/FactorGraph.h>
|
||||
#include <gtsam/inference/BayesNet.h>
|
||||
#include <gtsam/inference/BayesTreeCliqueBase.h>
|
||||
#include <gtsam/inference/IndexConditional.h>
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
|
@ -127,15 +128,22 @@ namespace gtsam {
|
|||
/** Fill the nodes index for a subtree */
|
||||
void fillNodesIndex(const sharedClique& subtree);
|
||||
|
||||
/** Helper function to build a non-symbolic tree (e.g. Gaussian) using a
|
||||
* symbolic tree, used in the BT(BN) constructor.
|
||||
*/
|
||||
void recursiveTreeBuild(const boost::shared_ptr<BayesTreeClique<IndexConditional> >& symbolic,
|
||||
const std::vector<boost::shared_ptr<CONDITIONAL> >& conditionals,
|
||||
const typename BayesTree<CONDITIONAL,CLIQUE>::sharedClique& parent);
|
||||
|
||||
public:
|
||||
|
||||
/// @name Standard Constructors
|
||||
/// @{
|
||||
|
||||
/** Create an empty Bayes Tree */
|
||||
BayesTree();
|
||||
BayesTree() {}
|
||||
|
||||
/** Create a Bayes Tree from a Bayes Net */
|
||||
/** Create a Bayes Tree from a Bayes Net (requires CONDITIONAL is IndexConditional *or* CONDITIONAL::Combine) */
|
||||
BayesTree(const BayesNet<CONDITIONAL>& bayesNet);
|
||||
|
||||
/// @}
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
#include <boost/shared_ptr.hpp>
|
||||
#include <boost/make_shared.hpp>
|
||||
#include <boost/weak_ptr.hpp>
|
||||
|
||||
#include <gtsam/base/types.h>
|
||||
#include <gtsam/inference/FactorGraph.h>
|
||||
|
|
|
|||
|
|
@ -45,12 +45,12 @@ namespace gtsam {
|
|||
void Factor<KEY>::assertInvariants() const {
|
||||
#ifndef NDEBUG
|
||||
// Check that keys are all unique
|
||||
std::multiset < KeyType > nonunique(keys_.begin(), keys_.end());
|
||||
std::set < KeyType > unique(keys_.begin(), keys_.end());
|
||||
std::multiset<KeyType> nonunique(keys_.begin(), keys_.end());
|
||||
std::set<KeyType> unique(keys_.begin(), keys_.end());
|
||||
bool correct = (nonunique.size() == unique.size())
|
||||
&& std::equal(nonunique.begin(), nonunique.end(), unique.begin());
|
||||
if (!correct) throw std::logic_error(
|
||||
"Factor::assertInvariants: detected inconsistency");
|
||||
"Factor::assertInvariants: Factor contains duplicate keys");
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -117,6 +117,14 @@ public:
|
|||
Factor(KeyType key1, KeyType key2, KeyType key3, KeyType key4) : keys_(4) {
|
||||
keys_[0] = key1; keys_[1] = key2; keys_[2] = key3; keys_[3] = key4; assertInvariants(); }
|
||||
|
||||
/** Construct 5-way factor */
|
||||
Factor(KeyType key1, KeyType key2, KeyType key3, KeyType key4, KeyType key5) : keys_(5) {
|
||||
keys_[0] = key1; keys_[1] = key2; keys_[2] = key3; keys_[3] = key4; keys_[4] = key5; assertInvariants(); }
|
||||
|
||||
/** Construct 6-way factor */
|
||||
Factor(KeyType key1, KeyType key2, KeyType key3, KeyType key4, KeyType key5, KeyType key6) : keys_(6) {
|
||||
keys_[0] = key1; keys_[1] = key2; keys_[2] = key3; keys_[3] = key4; keys_[4] = key5; keys_[5] = key6; assertInvariants(); }
|
||||
|
||||
/// @}
|
||||
/// @name Advanced Constructors
|
||||
/// @{
|
||||
|
|
|
|||
|
|
@ -43,6 +43,7 @@ template<class CONDITIONAL, class CLIQUE> class BayesTree;
|
|||
class FactorGraph {
|
||||
public:
|
||||
typedef FACTOR FactorType;
|
||||
typedef typename FACTOR::KeyType KeyType;
|
||||
typedef boost::shared_ptr<FactorGraph<FACTOR> > shared_ptr;
|
||||
typedef typename boost::shared_ptr<FACTOR> sharedFactor;
|
||||
typedef typename std::vector<sharedFactor>::iterator iterator;
|
||||
|
|
@ -56,6 +57,11 @@ template<class CONDITIONAL, class CLIQUE> class BayesTree;
|
|||
/** typedef for an eliminate subroutine */
|
||||
typedef boost::function<EliminationResult(const FactorGraph<FACTOR>&, size_t)> Eliminate;
|
||||
|
||||
/** Typedef for the result of factorization */
|
||||
typedef std::pair<
|
||||
boost::shared_ptr<typename FACTOR::ConditionalType>,
|
||||
FactorGraph<FACTOR> > FactorizationResult;
|
||||
|
||||
protected:
|
||||
|
||||
/** concept check */
|
||||
|
|
@ -87,7 +93,7 @@ template<class CONDITIONAL, class CLIQUE> class BayesTree;
|
|||
/** convert from a derived type */
|
||||
template<class DERIVEDFACTOR>
|
||||
FactorGraph(const FactorGraph<DERIVEDFACTOR>& factors) {
|
||||
factors_.insert(end(), factors.begin(), factors.end());
|
||||
factors_.assign(factors.begin(), factors.end());
|
||||
}
|
||||
|
||||
/// @}
|
||||
|
|
|
|||
|
|
@ -89,7 +89,7 @@ namespace gtsam {
|
|||
const std::vector<Index>& js, Eliminate function) const {
|
||||
|
||||
// Compute a COLAMD permutation with the marginal variable constrained to the end.
|
||||
Permutation::shared_ptr permutation(Inference::PermutationCOLAMD(*structure_, js));
|
||||
Permutation::shared_ptr permutation(inference::PermutationCOLAMD(*structure_, js));
|
||||
Permutation::shared_ptr permutationInverse(permutation->inverse());
|
||||
|
||||
// Permute the factors - NOTE that this permutes the original factors, not
|
||||
|
|
|
|||
|
|
@ -31,12 +31,12 @@ namespace gtsam {
|
|||
// Checks for uniqueness of keys
|
||||
Base::assertInvariants();
|
||||
#ifndef NDEBUG
|
||||
// Check that separator keys are sorted
|
||||
FastSet<Index> uniquesorted(beginFrontals(), endFrontals());
|
||||
assert(uniquesorted.size() == nrFrontals() && std::equal(uniquesorted.begin(), uniquesorted.end(), beginFrontals()));
|
||||
// Check that separator keys are less than parent keys
|
||||
//BOOST_FOREACH(Index j, frontals()) {
|
||||
// assert(find_if(beginParents(), endParents(), _1 < j) == endParents()); }
|
||||
// Check that frontal keys are sorted
|
||||
//FastSet<Index> uniquesorted(beginFrontals(), endFrontals());
|
||||
//assert(uniquesorted.size() == nrFrontals() && std::equal(uniquesorted.begin(), uniquesorted.end(), beginFrontals()));
|
||||
//// Check that separator keys are less than parent keys
|
||||
////BOOST_FOREACH(Index j, frontals()) {
|
||||
//// assert(find_if(beginParents(), endParents(), _1 < j) == endParents()); }
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
@ -60,13 +60,13 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
void IndexConditional::permuteWithInverse(const Permutation& inversePermutation) {
|
||||
// The permutation may not move the separators into the frontals
|
||||
#ifndef NDEBUG
|
||||
BOOST_FOREACH(const KeyType frontal, this->frontals()) {
|
||||
BOOST_FOREACH(const KeyType separator, this->parents()) {
|
||||
assert(inversePermutation[frontal] < inversePermutation[separator]);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
// #ifndef NDEBUG
|
||||
// BOOST_FOREACH(const KeyType frontal, this->frontals()) {
|
||||
// BOOST_FOREACH(const KeyType separator, this->parents()) {
|
||||
// assert(inversePermutation[frontal] < inversePermutation[separator]);
|
||||
// }
|
||||
// }
|
||||
// #endif
|
||||
BOOST_FOREACH(Index& key, keys())
|
||||
key = inversePermutation[key];
|
||||
assertInvariants();
|
||||
|
|
|
|||
|
|
@ -26,8 +26,6 @@
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
class Inference;
|
||||
|
||||
/**
|
||||
* A permutation reorders variables, for example to reduce fill-in during
|
||||
* elimination. To save computation, the permutation can be applied to
|
||||
|
|
@ -162,8 +160,6 @@ protected:
|
|||
void check(Index variable) const { assert(variable < rangeIndices_.size()); }
|
||||
|
||||
/// @}
|
||||
|
||||
friend class Inference;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -26,8 +26,6 @@
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
class Inference;
|
||||
|
||||
/**
|
||||
* The VariableIndex class computes and stores the block column structure of a
|
||||
* factor graph. The factor graph stores a collection of factors, each of
|
||||
|
|
|
|||
|
|
@ -0,0 +1,120 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file inference-inl.h
|
||||
* @brief
|
||||
* @author Richard Roberts
|
||||
* @date Mar 3, 2012
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
// Only for Eclipse parser, inference-inl.h (this file) is included at the bottom of inference.h
|
||||
#include <gtsam/inference/inference.h>
|
||||
|
||||
#include <gtsam/base/FastSet.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
namespace inference {
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<typename CONSTRAINED>
|
||||
Permutation::shared_ptr PermutationCOLAMD(const VariableIndex& variableIndex, const CONSTRAINED& constrainLast) {
|
||||
|
||||
std::vector<int> cmember(variableIndex.size(), 0);
|
||||
|
||||
// If at least some variables are not constrained to be last, constrain the
|
||||
// ones that should be constrained.
|
||||
if(constrainLast.size() < variableIndex.size()) {
|
||||
BOOST_FOREACH(Index var, constrainLast) {
|
||||
assert(var < variableIndex.size());
|
||||
cmember[var] = 1;
|
||||
}
|
||||
}
|
||||
|
||||
return PermutationCOLAMD_(variableIndex, cmember);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
inline Permutation::shared_ptr PermutationCOLAMD(const VariableIndex& variableIndex) {
|
||||
std::vector<int> cmember(variableIndex.size(), 0);
|
||||
return PermutationCOLAMD_(variableIndex, cmember);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class Graph>
|
||||
typename Graph::FactorizationResult eliminate(const Graph& factorGraph, const std::vector<typename Graph::KeyType>& variables,
|
||||
const typename Graph::Eliminate& eliminateFcn, boost::optional<const VariableIndex&> variableIndex_) {
|
||||
|
||||
const VariableIndex& variableIndex = variableIndex_ ? *variableIndex_ : VariableIndex(factorGraph);
|
||||
|
||||
// First find the involved factors
|
||||
Graph involvedFactors;
|
||||
Index highestInvolvedVariable = 0; // Largest index of the variables in the involved factors
|
||||
|
||||
// First get the indices of the involved factors, but uniquely in a set
|
||||
FastSet<size_t> involvedFactorIndices;
|
||||
BOOST_FOREACH(Index variable, variables) {
|
||||
involvedFactorIndices.insert(variableIndex[variable].begin(), variableIndex[variable].end()); }
|
||||
|
||||
// Add the factors themselves to involvedFactors and update largest index
|
||||
involvedFactors.reserve(involvedFactorIndices.size());
|
||||
BOOST_FOREACH(size_t factorIndex, involvedFactorIndices) {
|
||||
const typename Graph::sharedFactor factor = factorGraph[factorIndex];
|
||||
involvedFactors.push_back(factor); // Add involved factor
|
||||
highestInvolvedVariable = std::max( // Updated largest index
|
||||
highestInvolvedVariable,
|
||||
*std::max_element(factor->begin(), factor->end()));
|
||||
}
|
||||
|
||||
// Now permute the variables to be eliminated to the front of the ordering
|
||||
Permutation toFront = Permutation::PullToFront(variables, highestInvolvedVariable+1);
|
||||
Permutation toFrontInverse = *toFront.inverse();
|
||||
involvedFactors.permuteWithInverse(toFrontInverse);
|
||||
|
||||
// Eliminate into conditional and remaining factor
|
||||
typename Graph::EliminationResult eliminated = eliminateFcn(involvedFactors, variables.size());
|
||||
boost::shared_ptr<typename Graph::FactorType::ConditionalType> conditional = eliminated.first;
|
||||
typename Graph::sharedFactor remainingFactor = eliminated.second;
|
||||
|
||||
// Undo the permutation
|
||||
conditional->permuteWithInverse(toFront);
|
||||
remainingFactor->permuteWithInverse(toFront);
|
||||
|
||||
// Build the remaining graph, without the removed factors
|
||||
Graph remainingGraph;
|
||||
remainingGraph.reserve(factorGraph.size() - involvedFactors.size() + 1);
|
||||
FastSet<size_t>::const_iterator involvedFactorIndexIt = involvedFactorIndices.begin();
|
||||
for(size_t i = 0; i < factorGraph.size(); ++i) {
|
||||
if(involvedFactorIndexIt != involvedFactorIndices.end() && *involvedFactorIndexIt == i)
|
||||
++ involvedFactorIndexIt;
|
||||
else
|
||||
remainingGraph.push_back(factorGraph[i]);
|
||||
}
|
||||
|
||||
// Add the remaining factor if it is not empty.
|
||||
if(remainingFactor->size() != 0)
|
||||
remainingGraph.push_back(remainingFactor);
|
||||
|
||||
return typename Graph::FactorizationResult(conditional, remainingGraph);
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -29,7 +29,9 @@ using namespace std;
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
Permutation::shared_ptr Inference::PermutationCOLAMD_(const VariableIndex& variableIndex, std::vector<int>& cmember) {
|
||||
namespace inference {
|
||||
|
||||
Permutation::shared_ptr PermutationCOLAMD_(const VariableIndex& variableIndex, std::vector<int>& cmember) {
|
||||
size_t nEntries = variableIndex.nEntries(), nFactors = variableIndex.nFactors(), nVars = variableIndex.size();
|
||||
// Convert to compressed column major format colamd wants it in (== MATLAB format!)
|
||||
int Alen = ccolamd_recommended(nEntries, nFactors, nVars); /* colamd arg 3: size of the array A */
|
||||
|
|
@ -79,10 +81,10 @@ Permutation::shared_ptr Inference::PermutationCOLAMD_(const VariableIndex& varia
|
|||
// Convert elimination ordering in p to an ordering
|
||||
Permutation::shared_ptr permutation(new Permutation(nVars));
|
||||
for (Index j = 0; j < nVars; j++) {
|
||||
// if(p[j] == -1)
|
||||
// permutation->operator[](j) = j;
|
||||
// else
|
||||
permutation->operator[](j) = p[j];
|
||||
// if(p[j] == -1)
|
||||
// permutation->operator[](j) = j;
|
||||
// else
|
||||
permutation->operator[](j) = p[j];
|
||||
if(debug) cout << "COLAMD: " << j << "->" << p[j] << endl;
|
||||
}
|
||||
if(debug) cout << "COLAMD: p[" << nVars << "] = " << p[nVars] << endl;
|
||||
|
|
@ -91,3 +93,5 @@ Permutation::shared_ptr Inference::PermutationCOLAMD_(const VariableIndex& varia
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,58 +22,59 @@
|
|||
#include <gtsam/inference/Permutation.h>
|
||||
|
||||
#include <boost/foreach.hpp>
|
||||
#include <boost/optional.hpp>
|
||||
|
||||
#include <deque>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
class Inference {
|
||||
private:
|
||||
/* Static members only, private constructor */
|
||||
Inference() {}
|
||||
namespace inference {
|
||||
|
||||
public:
|
||||
/**
|
||||
* Compute a permutation (variable ordering) using colamd
|
||||
*/
|
||||
Permutation::shared_ptr PermutationCOLAMD(const VariableIndex& variableIndex);
|
||||
|
||||
/**
|
||||
* Compute a permutation (variable ordering) using colamd
|
||||
*/
|
||||
static Permutation::shared_ptr PermutationCOLAMD(const VariableIndex& variableIndex);
|
||||
/**
|
||||
* Compute a permutation (variable ordering) using constrained colamd
|
||||
*/
|
||||
template<typename CONSTRAINED>
|
||||
Permutation::shared_ptr PermutationCOLAMD(const VariableIndex& variableIndex, const CONSTRAINED& constrainLast);
|
||||
|
||||
/**
|
||||
* Compute a permutation (variable ordering) using constrained colamd
|
||||
*/
|
||||
template<typename CONSTRAINED>
|
||||
static Permutation::shared_ptr PermutationCOLAMD(const VariableIndex& variableIndex, const CONSTRAINED& constrainLast);
|
||||
/**
|
||||
* Compute a CCOLAMD permutation using the constraint groups in cmember.
|
||||
*/
|
||||
Permutation::shared_ptr PermutationCOLAMD_(const VariableIndex& variableIndex, std::vector<int>& cmember);
|
||||
|
||||
/**
|
||||
* Compute a CCOLAMD permutation using the constraint groups in cmember.
|
||||
*/
|
||||
static Permutation::shared_ptr PermutationCOLAMD_(const VariableIndex& variableIndex, std::vector<int>& cmember);
|
||||
/** Factor the factor graph into a conditional and a remaining factor graph.
|
||||
* Given the factor graph \f$ f(X) \f$, and \c variables to factorize out
|
||||
* \f$ V \f$, this function factorizes into \f$ f(X) = f(V;Y)f(Y) \f$, where
|
||||
* \f$ Y := X\V \f$ are the remaining variables. If \f$ f(X) = p(X) \f$ is
|
||||
* a probability density or likelihood, the factorization produces a
|
||||
* conditional probability density and a marginal \f$ p(X) = p(V|Y)p(Y) \f$.
|
||||
*
|
||||
* For efficiency, this function treats the variables to eliminate
|
||||
* \c variables as fully-connected, so produces a dense (fully-connected)
|
||||
* conditional on all of the variables in \c variables, instead of a sparse
|
||||
* BayesNet. If the variables are not fully-connected, it is more efficient
|
||||
* to sequentially factorize multiple times.
|
||||
*/
|
||||
template<class Graph>
|
||||
typename Graph::FactorizationResult eliminate(const Graph& factorGraph, const std::vector<typename Graph::KeyType>& variables,
|
||||
const typename Graph::Eliminate& eliminateFcn, boost::optional<const VariableIndex&> variableIndex = boost::none);
|
||||
|
||||
};
|
||||
/** Eliminate a single variable, by calling
|
||||
* eliminate(const Graph&, const std::vector<typename Graph::KeyType>&, const typename Graph::Eliminate&, boost::optional<const VariableIndex&>)
|
||||
*/
|
||||
template<class Graph>
|
||||
typename Graph::FactorizationResult eliminateOne(const Graph& factorGraph, typename Graph::KeyType variable,
|
||||
const typename Graph::Eliminate& eliminateFcn, boost::optional<const VariableIndex&> variableIndex = boost::none) {
|
||||
std::vector<size_t> variables(1, variable);
|
||||
return eliminate(factorGraph, variables, eliminateFcn, variableIndex);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<typename CONSTRAINED>
|
||||
Permutation::shared_ptr Inference::PermutationCOLAMD(const VariableIndex& variableIndex, const CONSTRAINED& constrainLast) {
|
||||
|
||||
std::vector<int> cmember(variableIndex.size(), 0);
|
||||
|
||||
// If at least some variables are not constrained to be last, constrain the
|
||||
// ones that should be constrained.
|
||||
if(constrainLast.size() < variableIndex.size()) {
|
||||
BOOST_FOREACH(Index var, constrainLast) {
|
||||
assert(var < variableIndex.size());
|
||||
cmember[var] = 1;
|
||||
}
|
||||
}
|
||||
|
||||
return PermutationCOLAMD_(variableIndex, cmember);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
inline Permutation::shared_ptr Inference::PermutationCOLAMD(const VariableIndex& variableIndex) {
|
||||
std::vector<int> cmember(variableIndex.size(), 0);
|
||||
return PermutationCOLAMD_(variableIndex, cmember);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
||||
|
||||
#include <gtsam/inference/inference-inl.h>
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@
|
|||
using namespace gtsam;
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(Inference, UnobservedVariables) {
|
||||
TEST(inference, UnobservedVariables) {
|
||||
SymbolicFactorGraph sfg;
|
||||
|
||||
// Create a factor graph that skips some variables
|
||||
|
|
@ -35,7 +35,7 @@ TEST(Inference, UnobservedVariables) {
|
|||
|
||||
VariableIndex variableIndex(sfg);
|
||||
|
||||
Permutation::shared_ptr colamd(Inference::PermutationCOLAMD(variableIndex));
|
||||
Permutation::shared_ptr colamd(inference::PermutationCOLAMD(variableIndex));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -83,33 +83,14 @@ boost::shared_ptr<VectorValues> allocateVectorValues(const GaussianBayesNet& bn)
|
|||
|
||||
/* ************************************************************************* */
|
||||
VectorValues optimize(const GaussianBayesNet& bn) {
|
||||
return *optimize_(bn);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
boost::shared_ptr<VectorValues> optimize_(const GaussianBayesNet& bn)
|
||||
{
|
||||
// get the RHS as a VectorValues to initialize system
|
||||
boost::shared_ptr<VectorValues> result(new VectorValues(rhs(bn)));
|
||||
|
||||
/** solve each node in turn in topological sort order (parents first)*/
|
||||
BOOST_REVERSE_FOREACH(GaussianConditional::shared_ptr cg, bn)
|
||||
cg->solveInPlace(*result); // solve and store solution in same step
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues backSubstitute(const GaussianBayesNet& bn, const VectorValues& y) {
|
||||
VectorValues x(y);
|
||||
backSubstituteInPlace(bn,x);
|
||||
VectorValues x = *allocateVectorValues(bn);
|
||||
optimizeInPlace(bn, x);
|
||||
return x;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// (R*x)./sigmas = y by solving x=inv(R)*(y.*sigmas)
|
||||
void backSubstituteInPlace(const GaussianBayesNet& bn, VectorValues& y) {
|
||||
VectorValues& x = y;
|
||||
void optimizeInPlace(const GaussianBayesNet& bn, VectorValues& x) {
|
||||
/** solve each node in turn in topological sort order (parents first)*/
|
||||
BOOST_REVERSE_FOREACH(const boost::shared_ptr<const GaussianConditional> cg, bn) {
|
||||
// i^th part of R*x=y, x=inv(R)*y
|
||||
|
|
@ -141,6 +122,42 @@ VectorValues backSubstituteTranspose(const GaussianBayesNet& bn,
|
|||
return gy;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues optimizeGradientSearch(const GaussianBayesNet& Rd) {
|
||||
tic(0, "Allocate VectorValues");
|
||||
VectorValues grad = *allocateVectorValues(Rd);
|
||||
toc(0, "Allocate VectorValues");
|
||||
|
||||
optimizeGradientSearchInPlace(Rd, grad);
|
||||
|
||||
return grad;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void optimizeGradientSearchInPlace(const GaussianBayesNet& Rd, VectorValues& grad) {
|
||||
tic(1, "Compute Gradient");
|
||||
// Compute gradient (call gradientAtZero function, which is defined for various linear systems)
|
||||
gradientAtZero(Rd, grad);
|
||||
double gradientSqNorm = grad.dot(grad);
|
||||
toc(1, "Compute Gradient");
|
||||
|
||||
tic(2, "Compute R*g");
|
||||
// Compute R * g
|
||||
FactorGraph<JacobianFactor> Rd_jfg(Rd);
|
||||
Errors Rg = Rd_jfg * grad;
|
||||
toc(2, "Compute R*g");
|
||||
|
||||
tic(3, "Compute minimizing step size");
|
||||
// Compute minimizing step size
|
||||
double step = -gradientSqNorm / dot(Rg, Rg);
|
||||
toc(3, "Compute minimizing step size");
|
||||
|
||||
tic(4, "Compute point");
|
||||
// Compute steepest descent point
|
||||
scal(step, grad);
|
||||
toc(4, "Compute point");
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
pair<Matrix,Vector> matrix(const GaussianBayesNet& bn) {
|
||||
|
||||
|
|
@ -194,15 +211,6 @@ pair<Matrix,Vector> matrix(const GaussianBayesNet& bn) {
|
|||
return make_pair(R,d);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues rhs(const GaussianBayesNet& bn) {
|
||||
boost::shared_ptr<VectorValues> result(allocateVectorValues(bn));
|
||||
BOOST_FOREACH(boost::shared_ptr<const GaussianConditional> cg,bn)
|
||||
cg->rhs(*result);
|
||||
|
||||
return *result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
double determinant(const GaussianBayesNet& bayesNet) {
|
||||
double logDet = 0.0;
|
||||
|
|
|
|||
|
|
@ -55,26 +55,56 @@ namespace gtsam {
|
|||
boost::shared_ptr<VectorValues> allocateVectorValues(const GaussianBayesNet& bn);
|
||||
|
||||
/**
|
||||
* optimize, i.e. return x = inv(R)*d
|
||||
* Solve the GaussianBayesNet, i.e. return \f$ x = R^{-1}*d \f$, computed by
|
||||
* back-substitution.
|
||||
*/
|
||||
VectorValues optimize(const GaussianBayesNet&);
|
||||
VectorValues optimize(const GaussianBayesNet& bn);
|
||||
|
||||
/**
|
||||
* shared pointer version
|
||||
*/
|
||||
boost::shared_ptr<VectorValues> optimize_(const GaussianBayesNet& bn);
|
||||
/**
|
||||
* Solve the GaussianBayesNet, i.e. return \f$ x = R^{-1}*d \f$, computed by
|
||||
* back-substitution, writes the solution \f$ x \f$ into a pre-allocated
|
||||
* VectorValues. You can use allocateVectorValues(const GaussianBayesNet&)
|
||||
* allocate it. See also optimize(const GaussianBayesNet&), which does not
|
||||
* require pre-allocation.
|
||||
*/
|
||||
void optimizeInPlace(const GaussianBayesNet& bn, VectorValues& x);
|
||||
|
||||
/**
|
||||
* Backsubstitute
|
||||
* (R*x)./sigmas = y by solving x=inv(R)*(y.*sigmas)
|
||||
* @param y is the RHS of the system
|
||||
*/
|
||||
VectorValues backSubstitute(const GaussianBayesNet& bn, const VectorValues& y);
|
||||
/**
|
||||
* Optimize along the gradient direction, with a closed-form computation to
|
||||
* perform the line search. The gradient is computed about \f$ \delta x=0 \f$.
|
||||
*
|
||||
* This function returns \f$ \delta x \f$ that minimizes a reparametrized
|
||||
* problem. The error function of a GaussianBayesNet is
|
||||
*
|
||||
* \f[ f(\delta x) = \frac{1}{2} |R \delta x - d|^2 = \frac{1}{2}d^T d - d^T R \delta x + \frac{1}{2} \delta x^T R^T R \delta x \f]
|
||||
*
|
||||
* with gradient and Hessian
|
||||
*
|
||||
* \f[ g(\delta x) = R^T(R\delta x - d), \qquad G(\delta x) = R^T R. \f]
|
||||
*
|
||||
* This function performs the line search in the direction of the
|
||||
* gradient evaluated at \f$ g = g(\delta x = 0) \f$ with step size
|
||||
* \f$ \alpha \f$ that minimizes \f$ f(\delta x = \alpha g) \f$:
|
||||
*
|
||||
* \f[ f(\alpha) = \frac{1}{2} d^T d + g^T \delta x + \frac{1}{2} \alpha^2 g^T G g \f]
|
||||
*
|
||||
* Optimizing by setting the derivative to zero yields
|
||||
* \f$ \hat \alpha = (-g^T g) / (g^T G g) \f$. For efficiency, this function
|
||||
* evaluates the denominator without computing the Hessian \f$ G \f$, returning
|
||||
*
|
||||
* \f[ \delta x = \hat\alpha g = \frac{-g^T g}{(R g)^T(R g)} \f]
|
||||
*
|
||||
* @param bn The GaussianBayesNet on which to perform this computation
|
||||
* @return The resulting \f$ \delta x \f$ as described above
|
||||
*/
|
||||
VectorValues optimizeGradientSearch(const GaussianBayesNet& bn);
|
||||
|
||||
/**
|
||||
* Backsubstitute in place, y starts as RHS and is replaced with solution
|
||||
*/
|
||||
void backSubstituteInPlace(const GaussianBayesNet& bn, VectorValues& y);
|
||||
/** In-place version of optimizeGradientSearch(const GaussianBayesNet&) requiring pre-allocated VectorValues \c grad
|
||||
*
|
||||
* @param bn The GaussianBayesNet on which to perform this computation
|
||||
* @param [out] grad The resulting \f$ \delta x \f$ as described in optimizeGradientSearch(const GaussianBayesNet&)
|
||||
* */
|
||||
void optimizeGradientSearchInPlace(const GaussianBayesNet& bn, VectorValues& grad);
|
||||
|
||||
/**
|
||||
* Transpose Backsubstitute
|
||||
|
|
@ -91,12 +121,6 @@ namespace gtsam {
|
|||
*/
|
||||
std::pair<Matrix, Vector> matrix(const GaussianBayesNet&);
|
||||
|
||||
/**
|
||||
* Return RHS d as a VectorValues
|
||||
* Such that backSubstitute(bn,d) = optimize(bn)
|
||||
*/
|
||||
VectorValues rhs(const GaussianBayesNet&);
|
||||
|
||||
/**
|
||||
* Computes the determinant of a GassianBayesNet
|
||||
* A GaussianBayesNet is an upper triangular matrix and for an upper triangular matrix
|
||||
|
|
|
|||
|
|
@ -0,0 +1,28 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file GaussianBayesTree.cpp
|
||||
* @brief Gaussian Bayes Tree, the result of eliminating a GaussianJunctionTree
|
||||
* @brief GaussianBayesTree
|
||||
* @author Frank Dellaert
|
||||
* @author Richard Roberts
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <boost/foreach.hpp>
|
||||
|
||||
#include <gtsam/linear/GaussianBayesTree.h> // Only to help Eclipse
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,99 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file GaussianBayesTree.cpp
|
||||
* @brief Gaussian Bayes Tree, the result of eliminating a GaussianJunctionTree
|
||||
* @brief GaussianBayesTree
|
||||
* @author Frank Dellaert
|
||||
* @author Richard Roberts
|
||||
*/
|
||||
|
||||
#include <gtsam/linear/GaussianBayesTree.h>
|
||||
#include <gtsam/linear/JacobianFactor.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
namespace internal {
|
||||
void optimizeInPlace(const boost::shared_ptr<BayesTreeClique<GaussianConditional> >& clique, VectorValues& result) {
|
||||
// parents are assumed to already be solved and available in result
|
||||
clique->conditional()->solveInPlace(result);
|
||||
|
||||
// starting from the root, call optimize on each conditional
|
||||
BOOST_FOREACH(const boost::shared_ptr<BayesTreeClique<GaussianConditional> >& child, clique->children_)
|
||||
optimizeInPlace(child, result);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues optimize(const GaussianBayesTree& bayesTree) {
|
||||
VectorValues result = *allocateVectorValues(bayesTree);
|
||||
internal::optimizeInPlace(bayesTree.root(), result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues optimizeGradientSearch(const GaussianBayesTree& Rd) {
|
||||
tic(0, "Allocate VectorValues");
|
||||
VectorValues grad = *allocateVectorValues(Rd);
|
||||
toc(0, "Allocate VectorValues");
|
||||
|
||||
optimizeGradientSearchInPlace(Rd, grad);
|
||||
|
||||
return grad;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void optimizeGradientSearchInPlace(const GaussianBayesTree& Rd, VectorValues& grad) {
|
||||
tic(1, "Compute Gradient");
|
||||
// Compute gradient (call gradientAtZero function, which is defined for various linear systems)
|
||||
gradientAtZero(Rd, grad);
|
||||
double gradientSqNorm = grad.dot(grad);
|
||||
toc(1, "Compute Gradient");
|
||||
|
||||
tic(2, "Compute R*g");
|
||||
// Compute R * g
|
||||
FactorGraph<JacobianFactor> Rd_jfg(Rd);
|
||||
Errors Rg = Rd_jfg * grad;
|
||||
toc(2, "Compute R*g");
|
||||
|
||||
tic(3, "Compute minimizing step size");
|
||||
// Compute minimizing step size
|
||||
double step = -gradientSqNorm / dot(Rg, Rg);
|
||||
toc(3, "Compute minimizing step size");
|
||||
|
||||
tic(4, "Compute point");
|
||||
// Compute steepest descent point
|
||||
scal(step, grad);
|
||||
toc(4, "Compute point");
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void optimizeInPlace(const GaussianBayesTree& bayesTree, VectorValues& result) {
|
||||
internal::optimizeInPlace(bayesTree.root(), result);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues gradient(const GaussianBayesTree& bayesTree, const VectorValues& x0) {
|
||||
return gradient(FactorGraph<JacobianFactor>(bayesTree), x0);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void gradientAtZero(const GaussianBayesTree& bayesTree, VectorValues& g) {
|
||||
gradientAtZero(FactorGraph<JacobianFactor>(bayesTree), g);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,95 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file GaussianBayesTree.h
|
||||
* @brief Gaussian Bayes Tree, the result of eliminating a GaussianJunctionTree
|
||||
* @brief GaussianBayesTree
|
||||
* @author Frank Dellaert
|
||||
* @author Richard Roberts
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/inference/BayesTree.h>
|
||||
#include <gtsam/linear/GaussianConditional.h>
|
||||
#include <gtsam/linear/GaussianFactor.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
typedef BayesTree<GaussianConditional> GaussianBayesTree;
|
||||
|
||||
/// optimize the BayesTree, starting from the root
|
||||
VectorValues optimize(const GaussianBayesTree& bayesTree);
|
||||
|
||||
/// recursively optimize this conditional and all subtrees
|
||||
void optimizeInPlace(const GaussianBayesTree& clique, VectorValues& result);
|
||||
|
||||
namespace internal {
|
||||
void optimizeInPlace(const boost::shared_ptr<BayesTreeClique<GaussianConditional> >& clique, VectorValues& result);
|
||||
}
|
||||
|
||||
/**
|
||||
* Optimize along the gradient direction, with a closed-form computation to
|
||||
* perform the line search. The gradient is computed about \f$ \delta x=0 \f$.
|
||||
*
|
||||
* This function returns \f$ \delta x \f$ that minimizes a reparametrized
|
||||
* problem. The error function of a GaussianBayesNet is
|
||||
*
|
||||
* \f[ f(\delta x) = \frac{1}{2} |R \delta x - d|^2 = \frac{1}{2}d^T d - d^T R \delta x + \frac{1}{2} \delta x^T R^T R \delta x \f]
|
||||
*
|
||||
* with gradient and Hessian
|
||||
*
|
||||
* \f[ g(\delta x) = R^T(R\delta x - d), \qquad G(\delta x) = R^T R. \f]
|
||||
*
|
||||
* This function performs the line search in the direction of the
|
||||
* gradient evaluated at \f$ g = g(\delta x = 0) \f$ with step size
|
||||
* \f$ \alpha \f$ that minimizes \f$ f(\delta x = \alpha g) \f$:
|
||||
*
|
||||
* \f[ f(\alpha) = \frac{1}{2} d^T d + g^T \delta x + \frac{1}{2} \alpha^2 g^T G g \f]
|
||||
*
|
||||
* Optimizing by setting the derivative to zero yields
|
||||
* \f$ \hat \alpha = (-g^T g) / (g^T G g) \f$. For efficiency, this function
|
||||
* evaluates the denominator without computing the Hessian \f$ G \f$, returning
|
||||
*
|
||||
* \f[ \delta x = \hat\alpha g = \frac{-g^T g}{(R g)^T(R g)} \f]
|
||||
*/
|
||||
VectorValues optimizeGradientSearch(const GaussianBayesTree& bn);
|
||||
|
||||
/** In-place version of optimizeGradientSearch requiring pre-allocated VectorValues \c x */
|
||||
void optimizeGradientSearchInPlace(const GaussianBayesTree& bn, VectorValues& grad);
|
||||
|
||||
/**
|
||||
* Compute the gradient of the energy function,
|
||||
* \f$ \nabla_{x=x_0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$,
|
||||
* centered around \f$ x = x_0 \f$.
|
||||
* The gradient is \f$ R^T(Rx-d) \f$.
|
||||
* @param bayesTree The Gaussian Bayes Tree $(R,d)$
|
||||
* @param x0 The center about which to compute the gradient
|
||||
* @return The gradient as a VectorValues
|
||||
*/
|
||||
VectorValues gradient(const GaussianBayesTree& bayesTree, const VectorValues& x0);
|
||||
|
||||
/**
|
||||
* Compute the gradient of the energy function,
|
||||
* \f$ \nabla_{x=0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$,
|
||||
* centered around zero.
|
||||
* The gradient about zero is \f$ -R^T d \f$. See also gradient(const GaussianBayesNet&, const VectorValues&).
|
||||
* @param bayesTree The Gaussian Bayes Tree $(R,d)$
|
||||
* @param [output] g A VectorValues to store the gradient, which must be preallocated, see allocateVectorValues
|
||||
* @return The gradient as a VectorValues
|
||||
*/
|
||||
void gradientAtZero(const GaussianBayesTree& bayesTree, VectorValues& g);
|
||||
|
||||
}
|
||||
|
||||
#include <gtsam/linear/GaussianBayesTree-inl.h>
|
||||
|
||||
|
|
@ -31,19 +31,19 @@ namespace gtsam {
|
|||
// Helper function used only in this file - extracts vectors with variable indices
|
||||
// in the first and last iterators, and concatenates them in that order into the
|
||||
// output.
|
||||
template<typename ITERATOR>
|
||||
static Vector extractVectorValuesSlices(const VectorValues& values, ITERATOR first, ITERATOR last) {
|
||||
template<class VALUES, typename ITERATOR>
|
||||
static Vector extractVectorValuesSlices(const VALUES& values, ITERATOR first, ITERATOR last) {
|
||||
// Find total dimensionality
|
||||
int dim = 0;
|
||||
for(ITERATOR j = first; j != last; ++j)
|
||||
dim += values.dim(*j);
|
||||
dim += values[*j].rows();
|
||||
|
||||
// Copy vectors
|
||||
Vector ret(dim);
|
||||
int varStart = 0;
|
||||
for(ITERATOR j = first; j != last; ++j) {
|
||||
ret.segment(varStart, values.dim(*j)) = values[*j];
|
||||
varStart += values.dim(*j);
|
||||
ret.segment(varStart, values[*j].rows()) = values[*j];
|
||||
varStart += values[*j].rows();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -52,15 +52,15 @@ static Vector extractVectorValuesSlices(const VectorValues& values, ITERATOR fir
|
|||
// Helper function used only in this file - writes to the variables in values
|
||||
// with indices iterated over by first and last, interpreting vector as the
|
||||
// concatenated vectors to write.
|
||||
template<class VECTOR, typename ITERATOR>
|
||||
static void writeVectorValuesSlices(const VECTOR& vector, VectorValues& values, ITERATOR first, ITERATOR last) {
|
||||
template<class VECTOR, class VALUES, typename ITERATOR>
|
||||
static void writeVectorValuesSlices(const VECTOR& vector, VALUES& values, ITERATOR first, ITERATOR last) {
|
||||
// Copy vectors
|
||||
int varStart = 0;
|
||||
for(ITERATOR j = first; j != last; ++j) {
|
||||
values[*j] = vector.segment(varStart, values.dim(*j));
|
||||
varStart += values.dim(*j);
|
||||
values[*j] = vector.segment(varStart, values[*j].rows());
|
||||
varStart += values[*j].rows();
|
||||
}
|
||||
assert(varStart = vector.rows());
|
||||
assert(varStart == vector.rows());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -221,78 +221,34 @@ JacobianFactor::shared_ptr GaussianConditional::toFactor() const {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void GaussianConditional::rhs(VectorValues& x) const {
|
||||
writeVectorValuesSlices(get_d(), x, beginFrontals(), endFrontals());
|
||||
}
|
||||
template<class VALUES>
|
||||
inline static void doSolveInPlace(const GaussianConditional& conditional, VALUES& x) {
|
||||
|
||||
/* ************************************************************************* */
|
||||
void GaussianConditional::rhs(Permuted<VectorValues>& x) const {
|
||||
// Copy the rhs into x, accounting for the permutation
|
||||
Vector d = get_d();
|
||||
size_t rhsPosition = 0; // We walk through the rhs by variable
|
||||
for(const_iterator j = beginFrontals(); j != endFrontals(); ++j) {
|
||||
// Get the segment of the rhs for this variable
|
||||
x[*j] = d.segment(rhsPosition, this->dim(j));
|
||||
// Increment the position
|
||||
rhsPosition += this->dim(j);
|
||||
// Helper function to solve-in-place on a VectorValues or Permuted<VectorValues>,
|
||||
// called by GaussianConditional::solveInPlace(VectorValues&) and by
|
||||
// GaussianConditional::solveInPlace(Permuted<VectorValues>&).
|
||||
|
||||
static const bool debug = false;
|
||||
if(debug) conditional.print("Solving conditional in place");
|
||||
Vector xS = extractVectorValuesSlices(x, conditional.beginParents(), conditional.endParents());
|
||||
xS = conditional.get_d() - conditional.get_S() * xS;
|
||||
Vector soln = conditional.permutation().transpose() *
|
||||
conditional.get_R().triangularView<Eigen::Upper>().solve(xS);
|
||||
if(debug) {
|
||||
gtsam::print(Matrix(conditional.get_R()), "Calling backSubstituteUpper on ");
|
||||
gtsam::print(soln, "full back-substitution solution: ");
|
||||
}
|
||||
writeVectorValuesSlices(soln, x, conditional.beginFrontals(), conditional.endFrontals());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void GaussianConditional::solveInPlace(VectorValues& x) const {
|
||||
static const bool debug = false;
|
||||
if(debug) print("Solving conditional in place");
|
||||
Vector rhs = extractVectorValuesSlices(x, beginFrontals(), endFrontals());
|
||||
for (const_iterator parent = beginParents(); parent != endParents(); ++parent) {
|
||||
rhs += -get_S(parent) * x[*parent];
|
||||
}
|
||||
Vector soln = permutation_.transpose() * get_R().triangularView<Eigen::Upper>().solve(rhs);
|
||||
if(debug) {
|
||||
gtsam::print(Matrix(get_R()), "Calling backSubstituteUpper on ");
|
||||
gtsam::print(rhs, "rhs: ");
|
||||
gtsam::print(soln, "full back-substitution solution: ");
|
||||
}
|
||||
writeVectorValuesSlices(soln, x, beginFrontals(), endFrontals());
|
||||
doSolveInPlace(*this, x); // Call helper version above
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void GaussianConditional::solveInPlace(Permuted<VectorValues>& x) const {
|
||||
static const bool debug = false;
|
||||
if(debug) print("Solving conditional in place (permuted)");
|
||||
// Extract RHS from values - inlined from VectorValues
|
||||
size_t s = 0;
|
||||
for (const_iterator it=beginFrontals(); it!=endFrontals(); ++it)
|
||||
s += x[*it].size();
|
||||
Vector rhs(s); size_t start = 0;
|
||||
for (const_iterator it=beginFrontals(); it!=endFrontals(); ++it) {
|
||||
SubVector v = x[*it];
|
||||
const size_t d = v.size();
|
||||
rhs.segment(start, d) = v;
|
||||
start += d;
|
||||
}
|
||||
|
||||
// apply parents to rhs
|
||||
for (const_iterator parent = beginParents(); parent != endParents(); ++parent) {
|
||||
rhs += -get_S(parent) * x[*parent];
|
||||
}
|
||||
|
||||
// solve system - backsubstitution
|
||||
Vector soln = permutation_.transpose() * get_R().triangularView<Eigen::Upper>().solve(rhs);
|
||||
|
||||
// apply solution: inlined manually due to permutation
|
||||
size_t solnStart = 0;
|
||||
for (const_iterator frontal = beginFrontals(); frontal != endFrontals(); ++frontal) {
|
||||
const size_t d = this->dim(frontal);
|
||||
x[*frontal] = soln.segment(solnStart, d);
|
||||
solnStart += d;
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues GaussianConditional::solve(const VectorValues& x) const {
|
||||
VectorValues result = x;
|
||||
solveInPlace(result);
|
||||
return result;
|
||||
doSolveInPlace(*this, x); // Call helper version above
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ class JacobianFactor;
|
|||
/**
|
||||
* A conditional Gaussian functions as the node in a Bayes network
|
||||
* It has a set of parents y,z, etc. and implements a probability density on x.
|
||||
* The negative log-probability is given by \f$ |Rx - (d - Sy - Tz - ...)|^2 \f$
|
||||
* The negative log-probability is given by \f$ \frac{1}{2} |Rx - (d - Sy - Tz - ...)|^2 \f$
|
||||
*/
|
||||
class GaussianConditional : public IndexConditional {
|
||||
|
||||
|
|
@ -137,6 +137,15 @@ public:
|
|||
/** Copy constructor */
|
||||
GaussianConditional(const GaussianConditional& rhs);
|
||||
|
||||
/** Combine several GaussianConditional into a single dense GC. The
|
||||
* conditionals enumerated by \c first and \c last must be in increasing
|
||||
* order, meaning that the parents of any conditional may not include a
|
||||
* conditional coming before it.
|
||||
* @param firstConditional Iterator to the first conditional to combine, must dereference to a shared_ptr<GaussianConditional>.
|
||||
* @param lastConditional Iterator to after the last conditional to combine, must dereference to a shared_ptr<GaussianConditional>. */
|
||||
template<typename ITERATOR>
|
||||
static shared_ptr Combine(ITERATOR firstConditional, ITERATOR lastConditional);
|
||||
|
||||
/** Assignment operator */
|
||||
GaussianConditional& operator=(const GaussianConditional& rhs);
|
||||
|
||||
|
|
@ -188,46 +197,39 @@ public:
|
|||
*/
|
||||
boost::shared_ptr<JacobianFactor> toFactor() const;
|
||||
|
||||
/**
|
||||
* Adds the RHS to a given VectorValues for use in solve() functions.
|
||||
* @param x is the values to be updated, assumed allocated
|
||||
*/
|
||||
void rhs(VectorValues& x) const;
|
||||
|
||||
/**
|
||||
* Adds the RHS to a given VectorValues for use in solve() functions.
|
||||
* @param x is the values to be updated, assumed allocated
|
||||
*/
|
||||
void rhs(Permuted<VectorValues>& x) const;
|
||||
|
||||
/**
|
||||
* solves a conditional Gaussian and stores the result in x
|
||||
* Solves a conditional Gaussian and writes the solution into the entries of
|
||||
* \c x for each frontal variable of the conditional. The parents are
|
||||
* assumed to have already been solved in and their values are read from \c x.
|
||||
* This function works for multiple frontal variables.
|
||||
* NOTE: assumes that the RHS for the frontals is stored in x, and
|
||||
* then replaces the RHS with the partial result for this conditional,
|
||||
* assuming that parents have been solved already.
|
||||
*
|
||||
* @param x values structure with solved parents, and the RHS for this conditional
|
||||
* @return solution \f$ x = R \ (d - Sy - Tz - ...) \f$ for each frontal variable
|
||||
* Given the Gaussian conditional with log likelihood \f$ |R x_f - (d - S x_s)|^2,
|
||||
* where \f$ f \f$ are the frontal variables and \f$ s \f$ are the separator
|
||||
* variables of this conditional, this solve function computes
|
||||
* \f$ x_f = R^{-1} (d - S x_s) \f$ using back-substitution.
|
||||
*
|
||||
* @param x VectorValues structure with solved parents \f$ x_s \f$, and into which the
|
||||
* solution \f$ x_f \f$ will be written.
|
||||
*/
|
||||
void solveInPlace(VectorValues& x) const;
|
||||
|
||||
/**
|
||||
* solves a conditional Gaussian and stores the result in x
|
||||
* Identical to solveInPlace() above, with a permuted x
|
||||
* Solves a conditional Gaussian and writes the solution into the entries of
|
||||
* \c x for each frontal variable of the conditional (version for permuted
|
||||
* VectorValues). The parents are assumed to have already been solved in
|
||||
* and their values are read from \c x. This function works for multiple
|
||||
* frontal variables.
|
||||
*
|
||||
* Given the Gaussian conditional with log likelihood \f$ |R x_f - (d - S x_s)|^2,
|
||||
* where \f$ f \f$ are the frontal variables and \f$ s \f$ are the separator
|
||||
* variables of this conditional, this solve function computes
|
||||
* \f$ x_f = R^{-1} (d - S x_s) \f$ using back-substitution.
|
||||
*
|
||||
* @param x VectorValues structure with solved parents \f$ x_s \f$, and into which the
|
||||
* solution \f$ x_f \f$ will be written.
|
||||
*/
|
||||
void solveInPlace(Permuted<VectorValues>& x) const;
|
||||
|
||||
/**
|
||||
* Solves a conditional Gaussian and returns a new VectorValues
|
||||
* This function works for multiple frontal variables, but should
|
||||
* only be used for testing as it copies the input vector values
|
||||
*
|
||||
* Assumes, as in solveInPlace, that the RHS has been stored in x
|
||||
* for all frontal variables
|
||||
*/
|
||||
VectorValues solve(const VectorValues& x) const;
|
||||
|
||||
// functions for transpose backsubstitution
|
||||
|
||||
/**
|
||||
|
|
@ -274,5 +276,49 @@ GaussianConditional::GaussianConditional(ITERATOR firstKey, ITERATOR lastKey,
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<typename ITERATOR>
|
||||
GaussianConditional::shared_ptr GaussianConditional::Combine(ITERATOR firstConditional, ITERATOR lastConditional) {
|
||||
|
||||
// TODO: check for being a clique
|
||||
|
||||
// Get dimensions from first conditional
|
||||
std::vector<size_t> dims; dims.reserve((*firstConditional)->size() + 1);
|
||||
for(const_iterator j = (*firstConditional)->begin(); j != (*firstConditional)->end(); ++j)
|
||||
dims.push_back((*firstConditional)->dim(j));
|
||||
dims.push_back(1);
|
||||
|
||||
// We assume the conditionals form clique, so the first n variables will be
|
||||
// frontal variables in the new conditional.
|
||||
size_t nFrontals = 0;
|
||||
size_t nRows = 0;
|
||||
for(ITERATOR c = firstConditional; c != lastConditional; ++c) {
|
||||
nRows += dims[nFrontals];
|
||||
++ nFrontals;
|
||||
}
|
||||
|
||||
// Allocate combined conditional, has same keys as firstConditional
|
||||
Matrix tempCombined;
|
||||
VerticalBlockView<Matrix> tempBlockView(tempCombined, dims.begin(), dims.end(), 0);
|
||||
GaussianConditional::shared_ptr combinedConditional(new GaussianConditional((*firstConditional)->begin(), (*firstConditional)->end(), nFrontals, tempBlockView, zero(nRows)));
|
||||
|
||||
// Resize to correct number of rows
|
||||
combinedConditional->matrix_.resize(nRows, combinedConditional->matrix_.cols());
|
||||
combinedConditional->rsd_.rowEnd() = combinedConditional->matrix_.rows();
|
||||
|
||||
// Copy matrix and sigmas
|
||||
const size_t totalDims = combinedConditional->matrix_.cols();
|
||||
size_t currentSlot = 0;
|
||||
for(ITERATOR c = firstConditional; c != lastConditional; ++c) {
|
||||
const size_t startRow = combinedConditional->rsd_.offset(currentSlot); // Start row is same as start column
|
||||
combinedConditional->rsd_.range(0, currentSlot).block(startRow, 0, dims[currentSlot], combinedConditional->rsd_.offset(currentSlot)).operator=(
|
||||
Matrix::Zero(dims[currentSlot], combinedConditional->rsd_.offset(currentSlot)));
|
||||
combinedConditional->rsd_.range(currentSlot, dims.size()).block(startRow, 0, dims[currentSlot], totalDims - startRow).operator=(
|
||||
(*c)->matrix_);
|
||||
combinedConditional->sigmas_.segment(startRow, dims[currentSlot]) = (*c)->sigmas_;
|
||||
++ currentSlot;
|
||||
}
|
||||
|
||||
return combinedConditional;
|
||||
}
|
||||
|
||||
} // gtsam
|
||||
|
|
|
|||
|
|
@ -42,7 +42,6 @@ namespace gtsam {
|
|||
Index k = firstFrontalKey();
|
||||
// a VectorValues that only has a value for k: cannot be printed if k<>0
|
||||
x.insert(k, Vector(sigmas_.size()));
|
||||
rhs(x);
|
||||
solveInPlace(x);
|
||||
return x[k];
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@
|
|||
|
||||
#include <gtsam/3rdparty/Eigen/Eigen/Dense>
|
||||
#include <gtsam/linear/GaussianISAM.h>
|
||||
#include <gtsam/linear/GaussianBayesTree.h>
|
||||
|
||||
#include <gtsam/inference/ISAM-inl.h>
|
||||
|
||||
|
|
@ -50,50 +51,14 @@ Matrix GaussianISAM::marginalCovariance(Index j) const {
|
|||
return Super::jointBayesNet(key1, key2, &EliminateQR);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void optimize(const BayesTree<GaussianConditional>::sharedClique& clique, VectorValues& result) {
|
||||
// parents are assumed to already be solved and available in result
|
||||
// RHS for current conditional should already be in place in result
|
||||
clique->conditional()->solveInPlace(result);
|
||||
|
||||
BOOST_FOREACH(const BayesTree<GaussianConditional>::sharedClique& child, clique->children_)
|
||||
optimize(child, result);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void treeRHS(const BayesTree<GaussianConditional>::sharedClique& clique, VectorValues& result) {
|
||||
clique->conditional()->rhs(result);
|
||||
BOOST_FOREACH(const BayesTree<GaussianConditional>::sharedClique& child, clique->children_)
|
||||
treeRHS(child, result);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues rhs(const BayesTree<GaussianConditional>& bayesTree, boost::optional<const GaussianISAM::Dims&> dims) {
|
||||
VectorValues result;
|
||||
if(dims)
|
||||
result = VectorValues(*dims);
|
||||
else
|
||||
result = *allocateVectorValues(bayesTree); // allocate
|
||||
treeRHS(bayesTree.root(), result); // recursively fill
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues optimize(const GaussianISAM& isam) {
|
||||
VectorValues result = rhs(isam, isam.dims_);
|
||||
// starting from the root, call optimize on each conditional
|
||||
optimize(isam.root(), result);
|
||||
VectorValues result(isam.dims_);
|
||||
// Call optimize for BayesTree
|
||||
optimizeInPlace((const BayesTree<GaussianConditional>&)isam, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues optimize(const BayesTree<GaussianConditional>& bayesTree) {
|
||||
VectorValues result = rhs(bayesTree);
|
||||
// starting from the root, call optimize on each conditional
|
||||
optimize(bayesTree.root(), result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
BayesNet<GaussianConditional> GaussianISAM::shortcut(sharedClique clique, sharedClique root) {
|
||||
return clique->shortcut(root,&EliminateQR);
|
||||
|
|
|
|||
|
|
@ -90,19 +90,7 @@ public:
|
|||
|
||||
}; // \class GaussianISAM
|
||||
|
||||
/** load a VectorValues with the RHS of the system for backsubstitution */
|
||||
VectorValues rhs(const BayesTree<GaussianConditional>& bayesTree, boost::optional<const GaussianISAM::Dims&> dims = boost::none);
|
||||
|
||||
/** recursively load RHS for system */
|
||||
void treeRHS(const BayesTree<GaussianConditional>::sharedClique& clique, VectorValues& result);
|
||||
|
||||
// recursively optimize this conditional and all subtrees
|
||||
void optimize(const BayesTree<GaussianConditional>::sharedClique& clique, VectorValues& result);
|
||||
|
||||
// optimize the BayesTree, starting from the root
|
||||
VectorValues optimize(const GaussianISAM& isam);
|
||||
|
||||
// optimize the BayesTree, starting from the root
|
||||
VectorValues optimize(const BayesTree<GaussianConditional>& bayesTree);
|
||||
|
||||
} // \namespace gtsam
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@
|
|||
#include <gtsam/inference/ClusterTree.h>
|
||||
#include <gtsam/inference/JunctionTree.h>
|
||||
#include <gtsam/linear/GaussianJunctionTree.h>
|
||||
#include <gtsam/linear/GaussianBayesTree.h>
|
||||
|
||||
#include <vector>
|
||||
|
||||
|
|
@ -33,32 +34,6 @@ namespace gtsam {
|
|||
|
||||
using namespace std;
|
||||
|
||||
/* ************************************************************************* */
|
||||
void GaussianJunctionTree::btreeBackSubstitute(const BTClique::shared_ptr& current, VectorValues& config) const {
|
||||
// solve the bayes net in the current node
|
||||
current->conditional()->solveInPlace(config);
|
||||
|
||||
// GaussianBayesNet::const_reverse_iterator it = current->rbegin();
|
||||
// for (; it!=current->rend(); ++it) {
|
||||
// (*it)->solveInPlace(config); // solve and store result
|
||||
//
|
||||
//// Vector x = (*it)->solve(config); // Solve for that variable
|
||||
//// config[(*it)->key()] = x; // store result in partial solution
|
||||
// }
|
||||
|
||||
// solve the bayes nets in the child nodes
|
||||
BOOST_FOREACH(const BTClique::shared_ptr& child, current->children()) {
|
||||
btreeBackSubstitute(child, config);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void GaussianJunctionTree::btreeRHS(const BTClique::shared_ptr& current, VectorValues& config) const {
|
||||
current->conditional()->rhs(config);
|
||||
BOOST_FOREACH(const BTClique::shared_ptr& child, current->children())
|
||||
btreeRHS(child, config);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues GaussianJunctionTree::optimize(Eliminate function) const {
|
||||
tic(1, "GJT eliminate");
|
||||
|
|
@ -71,12 +46,11 @@ namespace gtsam {
|
|||
vector<size_t> dims(rootClique->conditional()->back()+1, 0);
|
||||
countDims(rootClique, dims);
|
||||
VectorValues result(dims);
|
||||
btreeRHS(rootClique, result);
|
||||
toc(2, "allocate VectorValues");
|
||||
|
||||
// back-substitution
|
||||
tic(3, "back-substitute");
|
||||
btreeBackSubstitute(rootClique, result);
|
||||
internal::optimizeInPlace(rootClique, result);
|
||||
toc(3, "back-substitute");
|
||||
return result;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -45,13 +45,6 @@ namespace gtsam {
|
|||
typedef Base::sharedClique sharedClique;
|
||||
typedef GaussianFactorGraph::Eliminate Eliminate;
|
||||
|
||||
protected:
|
||||
// back-substitute in topological sort order (parents first)
|
||||
void btreeBackSubstitute(const BTClique::shared_ptr& current, VectorValues& config) const;
|
||||
|
||||
// find the RHS for the system in order to perform backsubstitution
|
||||
void btreeRHS(const BTClique::shared_ptr& current, VectorValues& config) const;
|
||||
|
||||
public :
|
||||
|
||||
/** Default constructor */
|
||||
|
|
|
|||
|
|
@ -78,7 +78,8 @@ VectorValues::shared_ptr GaussianSequentialSolver::optimize() const {
|
|||
|
||||
tic(2,"optimize");
|
||||
// Back-substitute
|
||||
VectorValues::shared_ptr solution(gtsam::optimize_(*bayesNet));
|
||||
VectorValues::shared_ptr solution(
|
||||
new VectorValues(gtsam::optimize(*bayesNet)));
|
||||
toc(2,"optimize");
|
||||
|
||||
if(debug) solution->print("GaussianSequentialSolver, solution ");
|
||||
|
|
|
|||
|
|
@ -71,7 +71,7 @@ namespace gtsam {
|
|||
* and the information vector @f$ \eta = P^{-1} \mu = \Lambda \mu @f$
|
||||
* to arrive at the canonical form of the Gaussian:
|
||||
* @f[
|
||||
* E(x) = 0.5 x^T \Lambda x - x^T \eta + 0.5 \mu^T \Lambda \mu + C
|
||||
* E(x) = 0.5 x^T \Lambda x - x^T \eta + 0.5 \mu^T \Lambda \mu
|
||||
* @f]
|
||||
*
|
||||
* This factor is one of the factors that can be in a GaussianFactorGraph.
|
||||
|
|
|
|||
|
|
@ -642,8 +642,9 @@ namespace gtsam {
|
|||
r.vector() = Vector::Zero(r.dim());
|
||||
Index i = 0;
|
||||
BOOST_FOREACH(const JacobianFactor::shared_ptr& factor, fg) {
|
||||
SubVector &y = r[i];
|
||||
for(JacobianFactor::const_iterator j = factor->begin(); j != factor->end(); ++j) {
|
||||
r[i] += factor->getA(j) * x[*j];
|
||||
y += factor->getA(j) * x[*j];
|
||||
}
|
||||
++i;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
// take a step, return true if converged
|
||||
bool step(const S& Ab, V& x) {
|
||||
if ((++k) >= parameters_.maxIterations()) return true;
|
||||
if ((++k) >= ((int)parameters_.maxIterations())) return true;
|
||||
|
||||
//---------------------------------->
|
||||
double alpha = takeOptimalStep(x);
|
||||
|
|
|
|||
|
|
@ -144,48 +144,6 @@ TEST( GaussianConditional, equals )
|
|||
EXPECT( expected.equals(actual) );
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianConditional, rhs_permuted )
|
||||
{
|
||||
// Test filling the rhs when the VectorValues is permuted
|
||||
|
||||
// Create a VectorValues
|
||||
VectorValues unpermuted(5, 2);
|
||||
unpermuted[0] << 1, 2;
|
||||
unpermuted[1] << 3, 4;
|
||||
unpermuted[2] << 5, 6;
|
||||
unpermuted[3] << 7, 8;
|
||||
unpermuted[4] << 9, 10;
|
||||
|
||||
// Create a permutation
|
||||
Permutation permutation(5);
|
||||
permutation[0] = 4;
|
||||
permutation[1] = 3;
|
||||
permutation[2] = 2;
|
||||
permutation[3] = 1;
|
||||
permutation[4] = 0;
|
||||
|
||||
// Permuted VectorValues
|
||||
Permuted<VectorValues> permuted(permutation, unpermuted);
|
||||
|
||||
// Expected VectorValues
|
||||
VectorValues expected(5, 2);
|
||||
expected[0] << 1, 2;
|
||||
expected[1] << 3, 4;
|
||||
expected[2] << 5, 6;
|
||||
expected[3] << 7, 8;
|
||||
expected[4] << 11, 12;
|
||||
|
||||
// GaussianConditional
|
||||
Vector d(2); d << 11, 12;
|
||||
GaussianConditional conditional(0, d, Matrix::Identity(2,2), Vector::Ones(2));
|
||||
|
||||
// Fill rhs, conditional is on index 0, which should fill slot 4 of the values
|
||||
conditional.rhs(permuted);
|
||||
|
||||
EXPECT(assert_equal(expected, unpermuted));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianConditional, solve )
|
||||
{
|
||||
|
|
@ -208,8 +166,7 @@ TEST( GaussianConditional, solve )
|
|||
|
||||
Vector tau = ones(2);
|
||||
|
||||
// RHS is different than the one in the solution vector
|
||||
GaussianConditional cg(_x_,ones(2), R, _x1_, A1, _l1_, A2, tau);
|
||||
GaussianConditional cg(_x_, d, R, _x1_, A1, _l1_, A2, tau);
|
||||
|
||||
Vector sx1(2);
|
||||
sx1(0) = 1.0; sx1(1) = 1.0;
|
||||
|
|
@ -218,21 +175,16 @@ TEST( GaussianConditional, solve )
|
|||
sl1(0) = 1.0; sl1(1) = 1.0;
|
||||
|
||||
VectorValues solution(vector<size_t>(3, 2));
|
||||
solution[_x_] = d; // RHS
|
||||
solution[_x_] = d;
|
||||
solution[_x1_] = sx1; // parents
|
||||
solution[_l1_] = sl1;
|
||||
|
||||
// NOTE: the solve functions assume the RHS is passed as the initialization of
|
||||
// the solution.
|
||||
VectorValues expected(vector<size_t>(3, 2));
|
||||
expected[_x_] = expectedX;
|
||||
expected[_x1_] = sx1;
|
||||
expected[_l1_] = sl1;
|
||||
|
||||
VectorValues copy_result = cg.solve(solution);
|
||||
cg.solveInPlace(solution);
|
||||
|
||||
EXPECT(assert_equal(expected, copy_result, 0.0001));
|
||||
EXPECT(assert_equal(expected, solution, 0.0001));
|
||||
}
|
||||
|
||||
|
|
@ -240,12 +192,11 @@ TEST( GaussianConditional, solve )
|
|||
TEST( GaussianConditional, solve_simple )
|
||||
{
|
||||
// no pivoting from LDL, so R matrix is not permuted
|
||||
// RHS is deliberately not the same as below
|
||||
Matrix full_matrix = Matrix_(4, 7,
|
||||
1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 0.0,
|
||||
0.0, 1.0, 0.0, 2.0, 0.0, 3.0, 0.0,
|
||||
0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.0,
|
||||
0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.0);
|
||||
1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 0.1,
|
||||
0.0, 1.0, 0.0, 2.0, 0.0, 3.0, 0.2,
|
||||
0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.3,
|
||||
0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.4);
|
||||
|
||||
// solve system as a non-multifrontal version first
|
||||
// 2 variables, frontal has dim=4
|
||||
|
|
@ -261,7 +212,6 @@ TEST( GaussianConditional, solve_simple )
|
|||
// elimination order; _x_, _x1_
|
||||
vector<size_t> vdim; vdim += 4, 2;
|
||||
VectorValues actual(vdim);
|
||||
actual[_x_] = Vector_(4, 0.1, 0.2, 0.3, 0.4); // d
|
||||
actual[_x1_] = sx1; // parent
|
||||
|
||||
VectorValues expected(vdim);
|
||||
|
|
@ -283,10 +233,10 @@ TEST( GaussianConditional, solve_multifrontal )
|
|||
// create full system, 3 variables, 2 frontals, all 2 dim
|
||||
// no pivoting from LDL, so R matrix is not permuted
|
||||
Matrix full_matrix = Matrix_(4, 7,
|
||||
1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 0.5,
|
||||
0.0, 1.0, 0.0, 2.0, 0.0, 3.0, 0.6,
|
||||
0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.7,
|
||||
0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.8);
|
||||
1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 0.1,
|
||||
0.0, 1.0, 0.0, 2.0, 0.0, 3.0, 0.2,
|
||||
0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.3,
|
||||
0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.4);
|
||||
|
||||
// 3 variables, all dim=2
|
||||
vector<size_t> dims; dims += 2, 2, 2, 1;
|
||||
|
|
@ -295,15 +245,13 @@ TEST( GaussianConditional, solve_multifrontal )
|
|||
vector<size_t> cgdims; cgdims += _x_, _x1_, _l1_;
|
||||
GaussianConditional cg(cgdims.begin(), cgdims.end(), 2, matrices, sigmas);
|
||||
|
||||
EXPECT(assert_equal(Vector_(4, 0.5, 0.6, 0.7, 0.8), cg.get_d()));
|
||||
EXPECT(assert_equal(Vector_(4, 0.1, 0.2, 0.3, 0.4), cg.get_d()));
|
||||
|
||||
// partial solution
|
||||
Vector sl1 = Vector_(2, 9.0, 10.0);
|
||||
|
||||
// elimination order; _x_, _x1_, _l1_
|
||||
VectorValues actual(vector<size_t>(3, 2));
|
||||
actual[_x_] = Vector_(2, 0.1, 0.2); // rhs
|
||||
actual[_x1_] = Vector_(2, 0.3, 0.4); // rhs
|
||||
actual[_l1_] = sl1; // parent
|
||||
|
||||
VectorValues expected(vector<size_t>(3, 2));
|
||||
|
|
@ -327,10 +275,10 @@ TEST( GaussianConditional, solve_multifrontal_permuted )
|
|||
// create full system, 3 variables, 2 frontals, all 2 dim
|
||||
// no pivoting from LDL, so R matrix is not permuted
|
||||
Matrix full_matrix = Matrix_(4, 7,
|
||||
1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 0.5,
|
||||
0.0, 1.0, 0.0, 2.0, 0.0, 3.0, 0.6,
|
||||
0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.7,
|
||||
0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.8);
|
||||
1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 0.1,
|
||||
0.0, 1.0, 0.0, 2.0, 0.0, 3.0, 0.2,
|
||||
0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.3,
|
||||
0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.4);
|
||||
|
||||
// 3 variables, all dim=2
|
||||
vector<size_t> dims; dims += 2, 2, 2, 1;
|
||||
|
|
@ -339,7 +287,7 @@ TEST( GaussianConditional, solve_multifrontal_permuted )
|
|||
vector<size_t> cgdims; cgdims += _x_, _x1_, _l1_;
|
||||
GaussianConditional cg(cgdims.begin(), cgdims.end(), 2, matrices, sigmas);
|
||||
|
||||
EXPECT(assert_equal(Vector_(4, 0.5, 0.6, 0.7, 0.8), cg.get_d()));
|
||||
EXPECT(assert_equal(Vector_(4, 0.1, 0.2, 0.3, 0.4), cg.get_d()));
|
||||
|
||||
// partial solution
|
||||
Vector sl1 = Vector_(2, 9.0, 10.0);
|
||||
|
|
|
|||
|
|
@ -103,6 +103,21 @@ TEST( GaussianJunctionTree, eliminate )
|
|||
EXPECT(assert_equal(*(bayesTree_expected.root()->children().front()), *(rootClique->children().front())));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST_UNSAFE( GaussianJunctionTree, GBNConstructor )
|
||||
{
|
||||
GaussianFactorGraph fg = createChain();
|
||||
GaussianJunctionTree jt(fg);
|
||||
BayesTree<GaussianConditional>::sharedClique root = jt.eliminate(&EliminateQR);
|
||||
BayesTree<GaussianConditional> expected;
|
||||
expected.insert(root);
|
||||
|
||||
GaussianBayesNet bn(*GaussianSequentialSolver(fg).eliminate());
|
||||
BayesTree<GaussianConditional> actual(bn);
|
||||
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianJunctionTree, optimizeMultiFrontal )
|
||||
{
|
||||
|
|
|
|||
|
|
@ -115,29 +115,6 @@ struct DoglegOptimizerImpl {
|
|||
*/
|
||||
static VectorValues ComputeDoglegPoint(double Delta, const VectorValues& dx_u, const VectorValues& dx_n, const bool verbose=false);
|
||||
|
||||
/** Compute the minimizer \f$ \delta x_u \f$ of the line search along the gradient direction \f$ g \f$ of
|
||||
* the function
|
||||
* \f[
|
||||
* M(\delta x) = (R \delta x - d)^T (R \delta x - d)
|
||||
* \f]
|
||||
* where \f$ R \f$ is an upper-triangular matrix and \f$ d \f$ is a vector.
|
||||
* Together \f$ (R,d) \f$ are either a Bayes' net or a Bayes' tree.
|
||||
*
|
||||
* The same quadratic error function written as a Taylor expansion of the original
|
||||
* non-linear error function is
|
||||
* \f[
|
||||
* M(\delta x) = f(x_0) + g(x_0) + \frac{1}{2} \delta x^T G(x_0) \delta x,
|
||||
* \f]
|
||||
* @tparam M The type of the Bayes' net or tree, currently
|
||||
* either BayesNet<GaussianConditional> (or GaussianBayesNet) or BayesTree<GaussianConditional>.
|
||||
* @param Rd The Bayes' net or tree \f$ (R,d) \f$ as described above, currently
|
||||
* this must be of type BayesNet<GaussianConditional> (or GaussianBayesNet) or
|
||||
* BayesTree<GaussianConditional>.
|
||||
* @return The minimizer \f$ \delta x_u \f$ along the gradient descent direction.
|
||||
*/
|
||||
template<class M>
|
||||
static VectorValues ComputeSteepestDescentPoint(const M& Rd);
|
||||
|
||||
/** Compute the point on the line between the steepest descent point and the
|
||||
* Newton's method point intersecting the trust region boundary.
|
||||
* Mathematically, computes \f$ \tau \f$ such that \f$ 0<\tau<1 \f$ and
|
||||
|
|
@ -159,7 +136,7 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
|
|||
|
||||
// Compute steepest descent and Newton's method points
|
||||
tic(0, "Steepest Descent");
|
||||
VectorValues dx_u = ComputeSteepestDescentPoint(Rd);
|
||||
VectorValues dx_u = optimizeGradientSearch(Rd);
|
||||
toc(0, "Steepest Descent");
|
||||
tic(1, "optimize");
|
||||
VectorValues dx_n = optimize(Rd);
|
||||
|
|
@ -173,7 +150,7 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
|
|||
IterationResult result;
|
||||
|
||||
bool stay = true;
|
||||
enum { NONE, INCREASED_DELTA, DECREASED_DELTA } lastAction; // Used to prevent alternating between increasing and decreasing in one iteration
|
||||
enum { NONE, INCREASED_DELTA, DECREASED_DELTA } lastAction = NONE; // Used to prevent alternating between increasing and decreasing in one iteration
|
||||
while(stay) {
|
||||
tic(3, "Dog leg point");
|
||||
// Compute dog leg point
|
||||
|
|
@ -274,33 +251,4 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
|
|||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class M>
|
||||
VectorValues DoglegOptimizerImpl::ComputeSteepestDescentPoint(const M& Rd) {
|
||||
|
||||
tic(0, "Compute Gradient");
|
||||
// Compute gradient (call gradientAtZero function, which is defined for various linear systems)
|
||||
VectorValues grad = *allocateVectorValues(Rd);
|
||||
gradientAtZero(Rd, grad);
|
||||
double gradientSqNorm = grad.dot(grad);
|
||||
toc(0, "Compute Gradient");
|
||||
|
||||
tic(1, "Compute R*g");
|
||||
// Compute R * g
|
||||
FactorGraph<JacobianFactor> Rd_jfg(Rd);
|
||||
Errors Rg = Rd_jfg * grad;
|
||||
toc(1, "Compute R*g");
|
||||
|
||||
tic(2, "Compute minimizing step size");
|
||||
// Compute minimizing step size
|
||||
double step = -gradientSqNorm / dot(Rg, Rg);
|
||||
toc(2, "Compute minimizing step size");
|
||||
|
||||
tic(3, "Compute point");
|
||||
// Compute steepest descent point
|
||||
scal(step, grad);
|
||||
toc(3, "Compute point");
|
||||
return grad;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,16 +18,16 @@
|
|||
#include <gtsam/inference/FactorGraph.h>
|
||||
#include <gtsam/linear/JacobianFactor.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
#include <boost/bind.hpp>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
using namespace std;
|
||||
|
||||
/* ************************************************************************* */
|
||||
namespace internal {
|
||||
template<class CLIQUE>
|
||||
void optimize2(const boost::shared_ptr<CLIQUE>& clique, double threshold,
|
||||
void optimizeWildfire(const boost::shared_ptr<CLIQUE>& clique, double threshold,
|
||||
vector<bool>& changed, const vector<bool>& replaced, Permuted<VectorValues>& delta, int& count) {
|
||||
// if none of the variables in this clique (frontal and separator!) changed
|
||||
// significantly, then by the running intersection property, none of the
|
||||
|
|
@ -64,7 +64,6 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
// Back-substitute
|
||||
(*clique)->rhs(delta);
|
||||
(*clique)->solveInPlace(delta);
|
||||
count += (*clique)->nrFrontals();
|
||||
|
||||
|
|
@ -98,45 +97,61 @@ namespace gtsam {
|
|||
|
||||
// Recurse to children
|
||||
BOOST_FOREACH(const typename CLIQUE::shared_ptr& child, clique->children_) {
|
||||
optimize2(child, threshold, changed, replaced, delta, count);
|
||||
optimizeWildfire(child, threshold, changed, replaced, delta, count);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// fast full version without threshold
|
||||
template<class CLIQUE>
|
||||
void optimize2(const boost::shared_ptr<CLIQUE>& clique, VectorValues& delta) {
|
||||
|
||||
// parents are assumed to already be solved and available in result
|
||||
(*clique)->rhs(delta);
|
||||
(*clique)->solveInPlace(delta);
|
||||
|
||||
// Solve chilren recursively
|
||||
BOOST_FOREACH(const typename CLIQUE::shared_ptr& child, clique->children_) {
|
||||
optimize2(child, delta);
|
||||
}
|
||||
}
|
||||
|
||||
///* ************************************************************************* */
|
||||
//boost::shared_ptr<VectorValues> optimize2(const GaussianISAM2::sharedClique& root) {
|
||||
// boost::shared_ptr<VectorValues> delta(new VectorValues());
|
||||
// set<Key> changed;
|
||||
// // starting from the root, call optimize on each conditional
|
||||
// optimize2(root, delta);
|
||||
// return delta;
|
||||
//}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class CLIQUE>
|
||||
int optimize2(const boost::shared_ptr<CLIQUE>& root, double threshold, const vector<bool>& keys, Permuted<VectorValues>& delta) {
|
||||
int optimizeWildfire(const boost::shared_ptr<CLIQUE>& root, double threshold, const vector<bool>& keys, Permuted<VectorValues>& delta) {
|
||||
vector<bool> changed(keys.size(), false);
|
||||
int count = 0;
|
||||
// starting from the root, call optimize on each conditional
|
||||
optimize2(root, threshold, changed, keys, delta, count);
|
||||
if(root)
|
||||
internal::optimizeWildfire(root, threshold, changed, keys, delta, count);
|
||||
return count;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class GRAPH>
|
||||
VectorValues optimizeGradientSearch(const ISAM2<GaussianConditional, GRAPH>& isam) {
|
||||
tic(0, "Allocate VectorValues");
|
||||
VectorValues grad = *allocateVectorValues(isam);
|
||||
toc(0, "Allocate VectorValues");
|
||||
|
||||
optimizeGradientSearchInPlace(isam, grad);
|
||||
|
||||
return grad;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class GRAPH>
|
||||
void optimizeGradientSearchInPlace(const ISAM2<GaussianConditional, GRAPH>& Rd, VectorValues& grad) {
|
||||
tic(1, "Compute Gradient");
|
||||
// Compute gradient (call gradientAtZero function, which is defined for various linear systems)
|
||||
gradientAtZero(Rd, grad);
|
||||
double gradientSqNorm = grad.dot(grad);
|
||||
toc(1, "Compute Gradient");
|
||||
|
||||
tic(2, "Compute R*g");
|
||||
// Compute R * g
|
||||
FactorGraph<JacobianFactor> Rd_jfg(Rd);
|
||||
Errors Rg = Rd_jfg * grad;
|
||||
toc(2, "Compute R*g");
|
||||
|
||||
tic(3, "Compute minimizing step size");
|
||||
// Compute minimizing step size
|
||||
double step = -gradientSqNorm / dot(Rg, Rg);
|
||||
toc(3, "Compute minimizing step size");
|
||||
|
||||
tic(4, "Compute point");
|
||||
// Compute steepest descent point
|
||||
scal(step, grad);
|
||||
toc(4, "Compute point");
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class CLIQUE>
|
||||
void nnz_internal(const boost::shared_ptr<CLIQUE>& clique, int& result) {
|
||||
|
|
|
|||
|
|
@ -26,16 +26,6 @@ using namespace gtsam;
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues gradient(const BayesTree<GaussianConditional>& bayesTree, const VectorValues& x0) {
|
||||
return gradient(FactorGraph<JacobianFactor>(bayesTree), x0);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void gradientAtZero(const BayesTree<GaussianConditional>& bayesTree, VectorValues& g) {
|
||||
gradientAtZero(FactorGraph<JacobianFactor>(bayesTree), g);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues gradient(const BayesTree<GaussianConditional, ISAM2Clique<GaussianConditional> >& bayesTree, const VectorValues& x0) {
|
||||
return gradient(FactorGraph<JacobianFactor>(bayesTree), x0);
|
||||
|
|
|
|||
|
|
@ -63,48 +63,65 @@ public:
|
|||
|
||||
};
|
||||
|
||||
/** optimize the BayesTree, starting from the root */
|
||||
template<class CLIQUE>
|
||||
void optimize2(const boost::shared_ptr<CLIQUE>& root, VectorValues& delta);
|
||||
/** Get the linear delta for the ISAM2 object, unpermuted the delta returned by ISAM2::getDelta() */
|
||||
template<class GRAPH>
|
||||
VectorValues optimize(const ISAM2<GaussianConditional, GRAPH>& isam) {
|
||||
VectorValues delta = *allocateVectorValues(isam);
|
||||
internal::optimizeInPlace(isam.root(), delta);
|
||||
return delta;
|
||||
}
|
||||
|
||||
/// optimize the BayesTree, starting from the root; "replaced" needs to contain
|
||||
/// Optimize the BayesTree, starting from the root.
|
||||
/// @param replaced Needs to contain
|
||||
/// all variables that are contained in the top of the Bayes tree that has been
|
||||
/// redone; "delta" is the current solution, an offset from the linearization
|
||||
/// point; "threshold" is the maximum change against the PREVIOUS delta for
|
||||
/// redone.
|
||||
/// @param delta The current solution, an offset from the linearization
|
||||
/// point.
|
||||
/// @param threshold The maximum change against the PREVIOUS delta for
|
||||
/// non-replaced variables that can be ignored, ie. the old delta entry is kept
|
||||
/// and recursive backsubstitution might eventually stop if none of the changed
|
||||
/// variables are contained in the subtree.
|
||||
/// returns the number of variables that were solved for
|
||||
/// @return The number of variables that were solved for
|
||||
template<class CLIQUE>
|
||||
int optimize2(const boost::shared_ptr<CLIQUE>& root,
|
||||
int optimizeWildfire(const boost::shared_ptr<CLIQUE>& root,
|
||||
double threshold, const std::vector<bool>& replaced, Permuted<VectorValues>& delta);
|
||||
|
||||
/**
|
||||
* Optimize along the gradient direction, with a closed-form computation to
|
||||
* perform the line search. The gradient is computed about \f$ \delta x=0 \f$.
|
||||
*
|
||||
* This function returns \f$ \delta x \f$ that minimizes a reparametrized
|
||||
* problem. The error function of a GaussianBayesNet is
|
||||
*
|
||||
* \f[ f(\delta x) = \frac{1}{2} |R \delta x - d|^2 = \frac{1}{2}d^T d - d^T R \delta x + \frac{1}{2} \delta x^T R^T R \delta x \f]
|
||||
*
|
||||
* with gradient and Hessian
|
||||
*
|
||||
* \f[ g(\delta x) = R^T(R\delta x - d), \qquad G(\delta x) = R^T R. \f]
|
||||
*
|
||||
* This function performs the line search in the direction of the
|
||||
* gradient evaluated at \f$ g = g(\delta x = 0) \f$ with step size
|
||||
* \f$ \alpha \f$ that minimizes \f$ f(\delta x = \alpha g) \f$:
|
||||
*
|
||||
* \f[ f(\alpha) = \frac{1}{2} d^T d + g^T \delta x + \frac{1}{2} \alpha^2 g^T G g \f]
|
||||
*
|
||||
* Optimizing by setting the derivative to zero yields
|
||||
* \f$ \hat \alpha = (-g^T g) / (g^T G g) \f$. For efficiency, this function
|
||||
* evaluates the denominator without computing the Hessian \f$ G \f$, returning
|
||||
*
|
||||
* \f[ \delta x = \hat\alpha g = \frac{-g^T g}{(R g)^T(R g)} \f]
|
||||
*/
|
||||
template<class GRAPH>
|
||||
VectorValues optimizeGradientSearch(const ISAM2<GaussianConditional, GRAPH>& isam);
|
||||
|
||||
/** In-place version of optimizeGradientSearch requiring pre-allocated VectorValues \c x */
|
||||
template<class GRAPH>
|
||||
void optimizeGradientSearchInPlace(const ISAM2<GaussianConditional, GRAPH>& isam, VectorValues& grad);
|
||||
|
||||
/// calculate the number of non-zero entries for the tree starting at clique (use root for complete matrix)
|
||||
template<class CLIQUE>
|
||||
int calculate_nnz(const boost::shared_ptr<CLIQUE>& clique);
|
||||
|
||||
/**
|
||||
* Compute the gradient of the energy function,
|
||||
* \f$ \nabla_{x=x_0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$,
|
||||
* centered around \f$ x = x_0 \f$.
|
||||
* The gradient is \f$ R^T(Rx-d) \f$.
|
||||
* @param bayesTree The Gaussian Bayes Tree $(R,d)$
|
||||
* @param x0 The center about which to compute the gradient
|
||||
* @return The gradient as a VectorValues
|
||||
*/
|
||||
VectorValues gradient(const BayesTree<GaussianConditional>& bayesTree, const VectorValues& x0);
|
||||
|
||||
/**
|
||||
* Compute the gradient of the energy function,
|
||||
* \f$ \nabla_{x=0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$,
|
||||
* centered around zero.
|
||||
* The gradient about zero is \f$ -R^T d \f$. See also gradient(const GaussianBayesNet&, const VectorValues&).
|
||||
* @param bayesTree The Gaussian Bayes Tree $(R,d)$
|
||||
* @param [output] g A VectorValues to store the gradient, which must be preallocated, see allocateVectorValues
|
||||
* @return The gradient as a VectorValues
|
||||
*/
|
||||
void gradientAtZero(const BayesTree<GaussianConditional>& bayesTree, VectorValues& g);
|
||||
|
||||
/**
|
||||
* Compute the gradient of the energy function,
|
||||
* \f$ \nabla_{x=x_0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$,
|
||||
|
|
|
|||
|
|
@ -15,6 +15,8 @@
|
|||
* @author Michael Kaess, Richard Roberts
|
||||
*/
|
||||
|
||||
#include <gtsam/linear/GaussianBayesTree.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
using namespace std;
|
||||
|
|
@ -46,8 +48,8 @@ struct ISAM2<CONDITIONAL, GRAPH>::Impl {
|
|||
* @param nodes Current BayesTree::Nodes index to be augmented with slots for new variables
|
||||
* @param keyFormatter Formatter for printing nonlinear keys during debugging
|
||||
*/
|
||||
static void AddVariables(const Values& newTheta, Values& theta, Permuted<VectorValues>& delta, Ordering& ordering,
|
||||
typename Base::Nodes& nodes, const KeyFormatter& keyFormatter = DefaultKeyFormatter);
|
||||
static void AddVariables(const Values& newTheta, Values& theta, Permuted<VectorValues>& delta, vector<bool>& replacedKeys,
|
||||
Ordering& ordering, typename Base::Nodes& nodes, const KeyFormatter& keyFormatter = DefaultKeyFormatter);
|
||||
|
||||
/**
|
||||
* Extract the set of variable indices from a NonlinearFactorGraph. For each Symbol
|
||||
|
|
@ -121,12 +123,15 @@ struct ISAM2<CONDITIONAL, GRAPH>::Impl {
|
|||
static PartialSolveResult PartialSolve(GaussianFactorGraph& factors, const FastSet<Index>& keys,
|
||||
const ReorderingMode& reorderingMode);
|
||||
|
||||
static size_t UpdateDelta(const boost::shared_ptr<ISAM2Clique<CONDITIONAL> >& root, std::vector<bool>& replacedKeys, Permuted<VectorValues>& delta, double wildfireThreshold);
|
||||
|
||||
};
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class CONDITIONAL, class GRAPH>
|
||||
void ISAM2<CONDITIONAL,GRAPH>::Impl::AddVariables(
|
||||
const Values& newTheta, Values& theta, Permuted<VectorValues>& delta, Ordering& ordering, typename Base::Nodes& nodes, const KeyFormatter& keyFormatter) {
|
||||
const Values& newTheta, Values& theta, Permuted<VectorValues>& delta, vector<bool>& replacedKeys,
|
||||
Ordering& ordering,typename Base::Nodes& nodes, const KeyFormatter& keyFormatter) {
|
||||
const bool debug = ISDEBUG("ISAM2 AddVariables");
|
||||
|
||||
theta.insert(newTheta);
|
||||
|
|
@ -153,6 +158,7 @@ void ISAM2<CONDITIONAL,GRAPH>::Impl::AddVariables(
|
|||
assert(ordering.size() == delta.size());
|
||||
}
|
||||
assert(ordering.nVars() >= nodes.size());
|
||||
replacedKeys.resize(ordering.nVars(), false);
|
||||
nodes.resize(ordering.nVars());
|
||||
}
|
||||
|
||||
|
|
@ -229,7 +235,8 @@ void ISAM2<CONDITIONAL, GRAPH>::Impl::ExpmapMasked(Values& values, const Permute
|
|||
invalidateIfDebug = boost::optional<Permuted<VectorValues>&>();
|
||||
#endif
|
||||
|
||||
assert(values.size() == ordering.size());
|
||||
assert(values.size() == ordering.nVars());
|
||||
assert(delta.size() == ordering.nVars());
|
||||
Values::iterator key_value;
|
||||
Ordering::const_iterator key_index;
|
||||
for(key_value = values.begin(), key_index = ordering.begin();
|
||||
|
|
@ -303,7 +310,7 @@ ISAM2<CONDITIONAL, GRAPH>::Impl::PartialSolve(GaussianFactorGraph& factors,
|
|||
}
|
||||
}
|
||||
}
|
||||
Permutation::shared_ptr affectedColamd(Inference::PermutationCOLAMD_(affectedFactorsIndex, cmember));
|
||||
Permutation::shared_ptr affectedColamd(inference::PermutationCOLAMD_(affectedFactorsIndex, cmember));
|
||||
toc(3,"ccolamd");
|
||||
tic(4,"ccolamd permutations");
|
||||
Permutation::shared_ptr affectedColamdInverse(affectedColamd->inverse());
|
||||
|
|
@ -354,4 +361,55 @@ ISAM2<CONDITIONAL, GRAPH>::Impl::PartialSolve(GaussianFactorGraph& factors,
|
|||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
namespace internal {
|
||||
inline static void optimizeInPlace(const boost::shared_ptr<ISAM2Clique<GaussianConditional> >& clique, VectorValues& result) {
|
||||
// parents are assumed to already be solved and available in result
|
||||
clique->conditional()->solveInPlace(result);
|
||||
|
||||
// starting from the root, call optimize on each conditional
|
||||
BOOST_FOREACH(const boost::shared_ptr<ISAM2Clique<GaussianConditional> >& child, clique->children_)
|
||||
optimizeInPlace(child, result);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class CONDITIONAL, class GRAPH>
|
||||
size_t ISAM2<CONDITIONAL,GRAPH>::Impl::UpdateDelta(const boost::shared_ptr<ISAM2Clique<CONDITIONAL> >& root, std::vector<bool>& replacedKeys, Permuted<VectorValues>& delta, double wildfireThreshold) {
|
||||
|
||||
size_t lastBacksubVariableCount;
|
||||
|
||||
if (wildfireThreshold <= 0.0) {
|
||||
// Threshold is zero or less, so do a full recalculation
|
||||
// Collect dimensions and allocate new VectorValues
|
||||
vector<size_t> dims(delta.size());
|
||||
for(size_t j=0; j<delta.size(); ++j)
|
||||
dims[j] = delta->dim(j);
|
||||
VectorValues newDelta(dims);
|
||||
|
||||
// Optimize full solution delta
|
||||
internal::optimizeInPlace(root, newDelta);
|
||||
|
||||
// Copy solution into delta
|
||||
delta.permutation() = Permutation::Identity(delta.size());
|
||||
delta.container() = newDelta;
|
||||
|
||||
lastBacksubVariableCount = delta.size();
|
||||
|
||||
} else {
|
||||
// Optimize with wildfire
|
||||
lastBacksubVariableCount = optimizeWildfire(root, wildfireThreshold, replacedKeys, delta); // modifies delta_
|
||||
|
||||
#ifndef NDEBUG
|
||||
for(size_t j=0; j<delta.container().size(); ++j)
|
||||
assert(delta.container()[j].unaryExpr(&isfinite<double>).all());
|
||||
#endif
|
||||
}
|
||||
|
||||
// Clear replacedKeys
|
||||
replacedKeys.assign(replacedKeys.size(), false);
|
||||
|
||||
return lastBacksubVariableCount;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ static const double batchThreshold = 0.65;
|
|||
/* ************************************************************************* */
|
||||
template<class CONDITIONAL, class GRAPH>
|
||||
ISAM2<CONDITIONAL, GRAPH>::ISAM2(const ISAM2Params& params):
|
||||
delta_(Permutation(), deltaUnpermuted_), params_(params) {
|
||||
delta_(Permutation(), deltaUnpermuted_), deltaUptodate_(true), params_(params) {
|
||||
// See note in gtsam/base/boost_variant_with_workaround.h
|
||||
if(params_.optimizationParams.type() == typeid(ISAM2DoglegParams))
|
||||
doglegDelta_ = boost::get<ISAM2DoglegParams>(params_.optimizationParams).initialDelta;
|
||||
|
|
@ -50,7 +50,7 @@ ISAM2<CONDITIONAL, GRAPH>::ISAM2(const ISAM2Params& params):
|
|||
/* ************************************************************************* */
|
||||
template<class CONDITIONAL, class GRAPH>
|
||||
ISAM2<CONDITIONAL, GRAPH>::ISAM2():
|
||||
delta_(Permutation(), deltaUnpermuted_) {
|
||||
delta_(Permutation(), deltaUnpermuted_), deltaUptodate_(true) {
|
||||
// See note in gtsam/base/boost_variant_with_workaround.h
|
||||
if(params_.optimizationParams.type() == typeid(ISAM2DoglegParams))
|
||||
doglegDelta_ = boost::get<ISAM2DoglegParams>(params_.optimizationParams).initialDelta;
|
||||
|
|
@ -238,7 +238,7 @@ boost::shared_ptr<FastSet<Index> > ISAM2<CONDITIONAL, GRAPH>::recalculate(
|
|||
if(theta_.size() > constrainedKeysSet.size()) {
|
||||
BOOST_FOREACH(Index var, constrainedKeysSet) { cmember[var] = 1; }
|
||||
}
|
||||
Permutation::shared_ptr colamd(Inference::PermutationCOLAMD_(variableIndex_, cmember));
|
||||
Permutation::shared_ptr colamd(inference::PermutationCOLAMD_(variableIndex_, cmember));
|
||||
Permutation::shared_ptr colamdInverse(colamd->inverse());
|
||||
toc(1,"CCOLAMD");
|
||||
|
||||
|
|
@ -413,13 +413,21 @@ ISAM2Result ISAM2<CONDITIONAL, GRAPH>::update(
|
|||
lastBacksubVariableCount = 0;
|
||||
lastNnzTop = 0;
|
||||
ISAM2Result result;
|
||||
const bool relinearizeThisStep = force_relinearize || (params_.enableRelinearization && count % params_.relinearizeSkip == 0);
|
||||
|
||||
if(verbose) {
|
||||
cout << "ISAM2::update\n";
|
||||
this->print("ISAM2: ");
|
||||
}
|
||||
|
||||
tic(0,"push_back factors");
|
||||
// Update delta if we need it to check relinearization later
|
||||
if(relinearizeThisStep) {
|
||||
tic(0, "updateDelta");
|
||||
updateDelta(disableReordering);
|
||||
toc(0, "updateDelta");
|
||||
}
|
||||
|
||||
tic(1,"push_back factors");
|
||||
// Add the new factor indices to the result struct
|
||||
result.newFactorsIndices.resize(newFactors.size());
|
||||
for(size_t i=0; i<newFactors.size(); ++i)
|
||||
|
|
@ -438,19 +446,19 @@ ISAM2Result ISAM2<CONDITIONAL, GRAPH>::update(
|
|||
|
||||
// Remove removed factors from the variable index so we do not attempt to relinearize them
|
||||
variableIndex_.remove(removeFactorIndices, *removeFactors.symbolic(ordering_));
|
||||
toc(0,"push_back factors");
|
||||
toc(1,"push_back factors");
|
||||
|
||||
tic(1,"add new variables");
|
||||
tic(2,"add new variables");
|
||||
// 2. Initialize any new variables \Theta_{new} and add \Theta:=\Theta\cup\Theta_{new}.
|
||||
Impl::AddVariables(newTheta, theta_, delta_, ordering_, Base::nodes_);
|
||||
toc(1,"add new variables");
|
||||
Impl::AddVariables(newTheta, theta_, delta_, deltaReplacedMask_, ordering_, Base::nodes_);
|
||||
toc(2,"add new variables");
|
||||
|
||||
tic(2,"evaluate error before");
|
||||
tic(3,"evaluate error before");
|
||||
if(params_.evaluateNonlinearError)
|
||||
result.errorBefore.reset(nonlinearFactors_.error(calculateEstimate()));
|
||||
toc(2,"evaluate error before");
|
||||
toc(3,"evaluate error before");
|
||||
|
||||
tic(3,"gather involved keys");
|
||||
tic(4,"gather involved keys");
|
||||
// 3. Mark linear update
|
||||
FastSet<Index> markedKeys = Impl::IndicesFromFactors(ordering_, newFactors); // Get keys from new factors
|
||||
// Also mark keys involved in removed factors
|
||||
|
|
@ -462,11 +470,11 @@ ISAM2Result ISAM2<CONDITIONAL, GRAPH>::update(
|
|||
// is a vector of size_t, so the constructor unintentionally resolves to
|
||||
// vector(size_t count, Index value) instead of the iterator constructor.
|
||||
FastVector<Index> newKeys; newKeys.assign(markedKeys.begin(), markedKeys.end()); // Make a copy of these, as we'll soon add to them
|
||||
toc(3,"gather involved keys");
|
||||
toc(4,"gather involved keys");
|
||||
|
||||
// Check relinearization if we're at the nth step, or we are using a looser loop relin threshold
|
||||
if (force_relinearize || (params_.enableRelinearization && count % params_.relinearizeSkip == 0)) { // todo: every n steps
|
||||
tic(4,"gather relinearize keys");
|
||||
if (relinearizeThisStep) {
|
||||
tic(5,"gather relinearize keys");
|
||||
vector<bool> markedRelinMask(ordering_.nVars(), false);
|
||||
// 4. Mark keys in \Delta above threshold \beta: J=\{\Delta_{j}\in\Delta|\Delta_{j}\geq\beta\}.
|
||||
FastSet<Index> relinKeys = Impl::CheckRelinearization(delta_, ordering_, params_.relinearizeThreshold);
|
||||
|
|
@ -475,19 +483,19 @@ ISAM2Result ISAM2<CONDITIONAL, GRAPH>::update(
|
|||
// Add the variables being relinearized to the marked keys
|
||||
BOOST_FOREACH(const Index j, relinKeys) { markedRelinMask[j] = true; }
|
||||
markedKeys.insert(relinKeys.begin(), relinKeys.end());
|
||||
toc(4,"gather relinearize keys");
|
||||
toc(5,"gather relinearize keys");
|
||||
|
||||
tic(5,"fluid find_all");
|
||||
tic(6,"fluid find_all");
|
||||
// 5. Mark all cliques that involve marked variables \Theta_{J} and all their ancestors.
|
||||
if (!relinKeys.empty() && this->root())
|
||||
Impl::FindAll(this->root(), markedKeys, markedRelinMask); // add other cliques that have the marked ones in the separator
|
||||
toc(5,"fluid find_all");
|
||||
toc(6,"fluid find_all");
|
||||
|
||||
tic(6,"expmap");
|
||||
tic(7,"expmap");
|
||||
// 6. Update linearization point for marked variables: \Theta_{J}:=\Theta_{J}+\Delta_{J}.
|
||||
if (!relinKeys.empty())
|
||||
Impl::ExpmapMasked(theta_, delta_, ordering_, markedRelinMask, delta_);
|
||||
toc(6,"expmap");
|
||||
toc(7,"expmap");
|
||||
|
||||
result.variablesRelinearized = markedKeys.size();
|
||||
|
||||
|
|
@ -501,7 +509,7 @@ ISAM2Result ISAM2<CONDITIONAL, GRAPH>::update(
|
|||
#endif
|
||||
}
|
||||
|
||||
tic(7,"linearize new");
|
||||
tic(8,"linearize new");
|
||||
tic(1,"linearize");
|
||||
// 7. Linearize new factors
|
||||
FactorGraph<GaussianFactor>::shared_ptr linearFactors = newFactors.linearize(theta_, ordering_);
|
||||
|
|
@ -511,9 +519,9 @@ ISAM2Result ISAM2<CONDITIONAL, GRAPH>::update(
|
|||
// Augment the variable index with the new factors
|
||||
variableIndex_.augment(*linearFactors);
|
||||
toc(2,"augment VI");
|
||||
toc(7,"linearize new");
|
||||
toc(8,"linearize new");
|
||||
|
||||
tic(8,"recalculate");
|
||||
tic(9,"recalculate");
|
||||
// 8. Redo top of Bayes tree
|
||||
// Convert constrained symbols to indices
|
||||
boost::optional<FastSet<Index> > constrainedIndices;
|
||||
|
|
@ -526,49 +534,17 @@ ISAM2Result ISAM2<CONDITIONAL, GRAPH>::update(
|
|||
boost::shared_ptr<FastSet<Index> > replacedKeys;
|
||||
if(!markedKeys.empty() || !newKeys.empty())
|
||||
replacedKeys = recalculate(markedKeys, newKeys, linearFactors, constrainedIndices, result);
|
||||
toc(8,"recalculate");
|
||||
|
||||
tic(9,"solve");
|
||||
// Update replaced keys mask (accumulates until back-substitution takes place)
|
||||
if(replacedKeys) {
|
||||
BOOST_FOREACH(const Index var, *replacedKeys) {
|
||||
deltaReplacedMask_[var] = true; } }
|
||||
toc(9,"recalculate");
|
||||
|
||||
//tic(9,"solve");
|
||||
// 9. Solve
|
||||
if(params_.optimizationParams.type() == typeid(ISAM2GaussNewtonParams)) {
|
||||
// See note in gtsam/base/boost_variant_with_workaround.h
|
||||
const ISAM2GaussNewtonParams& gaussNewtonParams =
|
||||
boost::get<ISAM2GaussNewtonParams>(params_.optimizationParams);
|
||||
if (gaussNewtonParams.wildfireThreshold <= 0.0 || disableReordering) {
|
||||
VectorValues newDelta(theta_.dims(ordering_));
|
||||
optimize2(this->root(), newDelta);
|
||||
if(debug) newDelta.print("newDelta: ");
|
||||
assert(newDelta.size() == delta_.size());
|
||||
delta_.permutation() = Permutation::Identity(delta_.size());
|
||||
delta_.container() = newDelta;
|
||||
lastBacksubVariableCount = theta_.size();
|
||||
} else {
|
||||
vector<bool> replacedKeysMask(variableIndex_.size(), false);
|
||||
if(replacedKeys) {
|
||||
BOOST_FOREACH(const Index var, *replacedKeys) {
|
||||
replacedKeysMask[var] = true; } }
|
||||
lastBacksubVariableCount = optimize2(this->root(), gaussNewtonParams.wildfireThreshold, replacedKeysMask, delta_); // modifies delta_
|
||||
|
||||
#ifndef NDEBUG
|
||||
for(size_t j=0; j<delta_.container().size(); ++j)
|
||||
assert(delta_.container()[j].unaryExpr(&isfinite<double>).all());
|
||||
#endif
|
||||
}
|
||||
} else if(params_.optimizationParams.type() == typeid(ISAM2DoglegParams)) {
|
||||
// See note in gtsam/base/boost_variant_with_workaround.h
|
||||
const ISAM2DoglegParams& doglegParams =
|
||||
boost::get<ISAM2DoglegParams>(params_.optimizationParams);
|
||||
// Do one Dogleg iteration
|
||||
tic(1, "Dogleg Iterate");
|
||||
DoglegOptimizerImpl::IterationResult doglegResult = DoglegOptimizerImpl::Iterate(
|
||||
*doglegDelta_, doglegParams.adaptationMode, *this, nonlinearFactors_, theta_, ordering_, nonlinearFactors_.error(theta_), doglegParams.verbose);
|
||||
toc(1, "Dogleg Iterate");
|
||||
// Update Delta and linear step
|
||||
doglegDelta_ = doglegResult.Delta;
|
||||
delta_.permutation() = Permutation::Identity(delta_.size()); // Dogleg solves for the full delta so there is no permutation
|
||||
delta_.container() = doglegResult.dx_d; // Copy the VectorValues containing with the linear solution
|
||||
}
|
||||
toc(9,"solve");
|
||||
if(debug) delta_.print("delta_: ");
|
||||
//toc(9,"solve");
|
||||
|
||||
tic(10,"evaluate error after");
|
||||
if(params_.evaluateNonlinearError)
|
||||
|
|
@ -576,10 +552,47 @@ ISAM2Result ISAM2<CONDITIONAL, GRAPH>::update(
|
|||
toc(10,"evaluate error after");
|
||||
|
||||
result.cliques = this->nodes().size();
|
||||
deltaUptodate_ = false;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class CONDITIONAL, class GRAPH>
|
||||
void ISAM2<CONDITIONAL, GRAPH>::updateDelta(bool forceFullSolve) const {
|
||||
|
||||
if(params_.optimizationParams.type() == typeid(ISAM2GaussNewtonParams)) {
|
||||
// If using Gauss-Newton, update with wildfireThreshold
|
||||
const ISAM2GaussNewtonParams& gaussNewtonParams =
|
||||
boost::get<ISAM2GaussNewtonParams>(params_.optimizationParams);
|
||||
const double effectiveWildfireThreshold = forceFullSolve ? 0.0 : gaussNewtonParams.wildfireThreshold;
|
||||
tic(0, "Wildfire update");
|
||||
lastBacksubVariableCount = Impl::UpdateDelta(this->root(), deltaReplacedMask_, delta_, effectiveWildfireThreshold);
|
||||
toc(0, "Wildfire update");
|
||||
|
||||
} else if(params_.optimizationParams.type() == typeid(ISAM2DoglegParams)) {
|
||||
// If using Dogleg, do a Dogleg step
|
||||
const ISAM2DoglegParams& doglegParams =
|
||||
boost::get<ISAM2DoglegParams>(params_.optimizationParams);
|
||||
|
||||
// Do one Dogleg iteration
|
||||
tic(1, "Dogleg Iterate");
|
||||
DoglegOptimizerImpl::IterationResult doglegResult = DoglegOptimizerImpl::Iterate(
|
||||
*doglegDelta_, doglegParams.adaptationMode, *this, nonlinearFactors_, theta_, ordering_, nonlinearFactors_.error(theta_), doglegParams.verbose);
|
||||
toc(1, "Dogleg Iterate");
|
||||
|
||||
// Update Delta and linear step
|
||||
doglegDelta_ = doglegResult.Delta;
|
||||
delta_.permutation() = Permutation::Identity(delta_.size()); // Dogleg solves for the full delta so there is no permutation
|
||||
delta_.container() = doglegResult.dx_d; // Copy the VectorValues containing with the linear solution
|
||||
|
||||
// Clear replaced mask
|
||||
deltaReplacedMask_.assign(deltaReplacedMask_.size(), false);
|
||||
}
|
||||
|
||||
deltaUptodate_ = true;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class CONDITIONAL, class GRAPH>
|
||||
Values ISAM2<CONDITIONAL, GRAPH>::calculateEstimate() const {
|
||||
|
|
@ -587,17 +600,17 @@ Values ISAM2<CONDITIONAL, GRAPH>::calculateEstimate() const {
|
|||
// handles Permuted<VectorValues>
|
||||
Values ret(theta_);
|
||||
vector<bool> mask(ordering_.nVars(), true);
|
||||
Impl::ExpmapMasked(ret, delta_, ordering_, mask);
|
||||
Impl::ExpmapMasked(ret, getDelta(), ordering_, mask);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class CONDITIONAL, class GRAPH>
|
||||
template<class KEY>
|
||||
typename KEY::Value ISAM2<CONDITIONAL, GRAPH>::calculateEstimate(const KEY& key) const {
|
||||
template<class VALUE>
|
||||
VALUE ISAM2<CONDITIONAL, GRAPH>::calculateEstimate(Key key) const {
|
||||
const Index index = getOrdering()[key];
|
||||
const SubVector delta = getDelta()[index];
|
||||
return getLinearizationPoint()[key].retract(delta);
|
||||
return theta_.at<VALUE>(key).retract(delta);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -610,10 +623,10 @@ Values ISAM2<CONDITIONAL, GRAPH>::calculateBestEstimate() const {
|
|||
|
||||
/* ************************************************************************* */
|
||||
template<class CONDITIONAL, class GRAPH>
|
||||
VectorValues optimize(const ISAM2<CONDITIONAL, GRAPH>& isam) {
|
||||
VectorValues delta = *allocateVectorValues(isam);
|
||||
optimize2(isam.root(), delta);
|
||||
return delta;
|
||||
const Permuted<VectorValues>& ISAM2<CONDITIONAL, GRAPH>::getDelta() const {
|
||||
if(!deltaUptodate_)
|
||||
updateDelta();
|
||||
return delta_;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -283,19 +283,42 @@ protected:
|
|||
/** The linear delta from the last linear solution, an update to the estimate in theta */
|
||||
VectorValues deltaUnpermuted_;
|
||||
|
||||
/** @brief The permutation through which the deltaUnpermuted_ is
|
||||
/** The permutation through which the deltaUnpermuted_ is
|
||||
* referenced.
|
||||
*
|
||||
* Permuting Vector entries would be slow, so for performance we
|
||||
* instead maintain this permutation through which we access the linear delta
|
||||
* indirectly
|
||||
*
|
||||
* This is \c mutable because it is a "cached" variable - it is not updated
|
||||
* until either requested with getDelta() or calculateEstimate(), or needed
|
||||
* during update() to evaluate whether to relinearize variables.
|
||||
*/
|
||||
Permuted<VectorValues> delta_;
|
||||
mutable Permuted<VectorValues> delta_;
|
||||
|
||||
/** Indicates whether the current delta is up-to-date, only used
|
||||
* internally - delta will always be updated if necessary when it is
|
||||
* requested with getDelta() or calculateEstimate().
|
||||
*
|
||||
* This is \c mutable because it is used internally to not update delta_
|
||||
* until it is needed.
|
||||
*/
|
||||
mutable bool deltaUptodate_;
|
||||
|
||||
/** A cumulative mask for the variables that were replaced and have not yet
|
||||
* been updated in the linear solution delta_, this is only used internally,
|
||||
* delta will always be updated if necessary when requested with getDelta()
|
||||
* or calculateEstimate().
|
||||
*
|
||||
* This is \c mutable because it is used internally to not update delta_
|
||||
* until it is needed.
|
||||
*/
|
||||
mutable std::vector<bool> deltaReplacedMask_;
|
||||
|
||||
/** All original nonlinear factors are stored here to use during relinearization */
|
||||
GRAPH nonlinearFactors_;
|
||||
|
||||
/** @brief The current elimination ordering Symbols to Index (integer) keys.
|
||||
/** The current elimination ordering Symbols to Index (integer) keys.
|
||||
*
|
||||
* We keep it up to date as we add and reorder variables.
|
||||
*/
|
||||
|
|
@ -305,7 +328,7 @@ protected:
|
|||
ISAM2Params params_;
|
||||
|
||||
/** The current Dogleg Delta (trust region radius) */
|
||||
boost::optional<double> doglegDelta_;
|
||||
mutable boost::optional<double> doglegDelta_;
|
||||
|
||||
private:
|
||||
#ifndef NDEBUG
|
||||
|
|
@ -337,6 +360,8 @@ public:
|
|||
newISAM2->variableIndex_ = variableIndex_;
|
||||
newISAM2->deltaUnpermuted_ = deltaUnpermuted_;
|
||||
newISAM2->delta_ = delta_;
|
||||
newISAM2->deltaUptodate_ = deltaUptodate_;
|
||||
newISAM2->deltaReplacedMask_ = deltaReplacedMask_;
|
||||
newISAM2->nonlinearFactors_ = nonlinearFactors_;
|
||||
newISAM2->ordering_ = ordering_;
|
||||
newISAM2->params_ = params_;
|
||||
|
|
@ -390,8 +415,8 @@ public:
|
|||
* @param key
|
||||
* @return
|
||||
*/
|
||||
template<class KEY>
|
||||
typename KEY::Value calculateEstimate(const KEY& key) const;
|
||||
template<class VALUE>
|
||||
VALUE calculateEstimate(Key key) const;
|
||||
|
||||
/// @name Public members for non-typical usage
|
||||
//@{
|
||||
|
|
@ -404,7 +429,7 @@ public:
|
|||
Values calculateBestEstimate() const;
|
||||
|
||||
/** Access the current delta, computed during the last call to update */
|
||||
const Permuted<VectorValues>& getDelta() const { return delta_; }
|
||||
const Permuted<VectorValues>& getDelta() const;
|
||||
|
||||
/** Access the set of nonlinear factors */
|
||||
const GRAPH& getFactorsUnsafe() const { return nonlinearFactors_; }
|
||||
|
|
@ -416,7 +441,7 @@ public:
|
|||
size_t lastAffectedFactorCount;
|
||||
size_t lastAffectedCliqueCount;
|
||||
size_t lastAffectedMarkedCount;
|
||||
size_t lastBacksubVariableCount;
|
||||
mutable size_t lastBacksubVariableCount;
|
||||
size_t lastNnzTop;
|
||||
|
||||
ISAM2Params params() const { return params_; }
|
||||
|
|
@ -433,13 +458,10 @@ private:
|
|||
const FastVector<Index>& newKeys, const FactorGraph<GaussianFactor>::shared_ptr newFactors,
|
||||
const boost::optional<FastSet<size_t> >& constrainKeys, ISAM2Result& result);
|
||||
// void linear_update(const GaussianFactorGraph& newFactors);
|
||||
void updateDelta(bool forceFullSolve = false) const;
|
||||
|
||||
}; // ISAM2
|
||||
|
||||
/** Get the linear delta for the ISAM2 object, unpermuted the delta returned by ISAM2::getDelta() */
|
||||
template<class CONDITIONAL, class GRAPH>
|
||||
VectorValues optimize(const ISAM2<CONDITIONAL, GRAPH>& isam);
|
||||
|
||||
} /// namespace gtsam
|
||||
|
||||
#include <gtsam/nonlinear/ISAM2-inl.h>
|
||||
|
|
|
|||
|
|
@ -66,13 +66,24 @@ public:
|
|||
}
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param keys The variables involved in this factor
|
||||
* Constructor from a vector of the keys involved in this factor
|
||||
*/
|
||||
NonlinearFactor(const std::vector<size_t>& keys) :
|
||||
Base(keys) {}
|
||||
|
||||
/**
|
||||
* Constructor from iterators over the keys involved in this factor
|
||||
*/
|
||||
template<class ITERATOR>
|
||||
NonlinearFactor(ITERATOR beginKeys, ITERATOR endKeys) {
|
||||
this->keys_.insert(this->keys_.end(), beginKeys, endKeys);
|
||||
}
|
||||
NonlinearFactor(ITERATOR beginKeys, ITERATOR endKeys) :
|
||||
Base(beginKeys, endKeys) {}
|
||||
|
||||
NonlinearFactor(Key key) : Base(key) {} ///< Convenience constructor for 1 key
|
||||
NonlinearFactor(Key key1, Key key2) : Base(key1, key2) {} ///< Convenience constructor for 2 keys
|
||||
NonlinearFactor(Key key1, Key key2, Key key3) : Base(key1, key2, key3) {} ///< Convenience constructor for 3 keys
|
||||
NonlinearFactor(Key key1, Key key2, Key key3, Key key4) : Base(key1, key2, key3, key4) {} ///< Convenience constructor for 4 keys
|
||||
NonlinearFactor(Key key1, Key key2, Key key3, Key key4, Key key5) : Base(key1, key2, key3, key4, key5) {} ///< Convenience constructor for 5 keys
|
||||
NonlinearFactor(Key key1, Key key2, Key key3, Key key4, Key key5, Key key6) : Base(key1, key2, key3, key4, key5, key6) {} ///< Convenience constructor for 6 keys
|
||||
|
||||
/// @}
|
||||
/// @name Testable
|
||||
|
|
@ -178,6 +189,13 @@ public:
|
|||
: Base(beginKeys, endKeys), noiseModel_(noiseModel) {
|
||||
}
|
||||
|
||||
NoiseModelFactor(const SharedNoiseModel& noiseModel, Key key) : Base(key), noiseModel_(noiseModel) {} ///< Convenience constructor for 1 key
|
||||
NoiseModelFactor(const SharedNoiseModel& noiseModel, Key key1, Key key2) : Base(key1, key2), noiseModel_(noiseModel) {} ///< Convenience constructor for 2 keys
|
||||
NoiseModelFactor(const SharedNoiseModel& noiseModel, Key key1, Key key2, Key key3) : Base(key1, key2, key3), noiseModel_(noiseModel) {} ///< Convenience constructor for 3 keys
|
||||
NoiseModelFactor(const SharedNoiseModel& noiseModel, Key key1, Key key2, Key key3, Key key4) : Base(key1, key2, key3, key4), noiseModel_(noiseModel) {} ///< Convenience constructor for 4 keys
|
||||
NoiseModelFactor(const SharedNoiseModel& noiseModel, Key key1, Key key2, Key key3, Key key4, Key key5) : Base(key1, key2, key3, key4, key5), noiseModel_(noiseModel) {} ///< Convenience constructor for 5 keys
|
||||
NoiseModelFactor(const SharedNoiseModel& noiseModel, Key key1, Key key2, Key key3, Key key4, Key key5, Key key6) : Base(key1, key2, key3, key4, key5, key6), noiseModel_(noiseModel) {} ///< Convenience constructor for 6 keys
|
||||
|
||||
protected:
|
||||
|
||||
/**
|
||||
|
|
@ -319,10 +337,7 @@ public:
|
|||
* @param key by which to look up X value in Values
|
||||
*/
|
||||
NoiseModelFactor1(const SharedNoiseModel& noiseModel, Key key1) :
|
||||
Base(noiseModel) {
|
||||
keys_.resize(1);
|
||||
keys_[0] = key1;
|
||||
}
|
||||
Base(noiseModel, key1) {}
|
||||
|
||||
/** Calls the 1-key specific version of evaluateError, which is pure virtual
|
||||
* so must be implemented in the derived class. */
|
||||
|
|
@ -389,11 +404,7 @@ public:
|
|||
* @param j2 key of the second variable
|
||||
*/
|
||||
NoiseModelFactor2(const SharedNoiseModel& noiseModel, Key j1, Key j2) :
|
||||
Base(noiseModel) {
|
||||
keys_.resize(2);
|
||||
keys_[0] = j1;
|
||||
keys_[1] = j2;
|
||||
}
|
||||
Base(noiseModel, j1, j2) {}
|
||||
|
||||
virtual ~NoiseModelFactor2() {}
|
||||
|
||||
|
|
@ -469,12 +480,7 @@ public:
|
|||
* @param j3 key of the third variable
|
||||
*/
|
||||
NoiseModelFactor3(const SharedNoiseModel& noiseModel, Key j1, Key j2, Key j3) :
|
||||
Base(noiseModel) {
|
||||
keys_.resize(3);
|
||||
keys_[0] = j1;
|
||||
keys_[1] = j2;
|
||||
keys_[2] = j3;
|
||||
}
|
||||
Base(noiseModel, j1, j2, j3) {}
|
||||
|
||||
virtual ~NoiseModelFactor3() {}
|
||||
|
||||
|
|
@ -552,13 +558,7 @@ public:
|
|||
* @param j4 key of the fourth variable
|
||||
*/
|
||||
NoiseModelFactor4(const SharedNoiseModel& noiseModel, Key j1, Key j2, Key j3, Key j4) :
|
||||
Base(noiseModel) {
|
||||
keys_.resize(4);
|
||||
keys_[0] = j1;
|
||||
keys_[1] = j2;
|
||||
keys_[2] = j3;
|
||||
keys_[3] = j4;
|
||||
}
|
||||
Base(noiseModel, j1, j2, j3, j4) {}
|
||||
|
||||
virtual ~NoiseModelFactor4() {}
|
||||
|
||||
|
|
@ -640,14 +640,7 @@ public:
|
|||
* @param j5 key of the fifth variable
|
||||
*/
|
||||
NoiseModelFactor5(const SharedNoiseModel& noiseModel, Key j1, Key j2, Key j3, Key j4, Key j5) :
|
||||
Base(noiseModel) {
|
||||
keys_.resize(5);
|
||||
keys_[0] = j1;
|
||||
keys_[1] = j2;
|
||||
keys_[2] = j3;
|
||||
keys_[3] = j4;
|
||||
keys_[4] = j5;
|
||||
}
|
||||
Base(noiseModel, j1, j2, j3, j4, j5) {}
|
||||
|
||||
virtual ~NoiseModelFactor5() {}
|
||||
|
||||
|
|
@ -733,15 +726,7 @@ public:
|
|||
* @param j6 key of the fifth variable
|
||||
*/
|
||||
NoiseModelFactor6(const SharedNoiseModel& noiseModel, Key j1, Key j2, Key j3, Key j4, Key j5, Key j6) :
|
||||
Base(noiseModel) {
|
||||
keys_.resize(6);
|
||||
keys_[0] = j1;
|
||||
keys_[1] = j2;
|
||||
keys_[2] = j3;
|
||||
keys_[3] = j4;
|
||||
keys_[4] = j5;
|
||||
keys_[5] = j6;
|
||||
}
|
||||
Base(noiseModel, j1, j2, j3, j4, j5, j6) {}
|
||||
|
||||
virtual ~NoiseModelFactor6() {}
|
||||
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ namespace gtsam {
|
|||
"orderingCOLAMD: some variables in the graph are not constrained!");
|
||||
|
||||
// Compute a fill-reducing ordering with COLAMD
|
||||
Permutation::shared_ptr colamdPerm(Inference::PermutationCOLAMD(
|
||||
Permutation::shared_ptr colamdPerm(inference::PermutationCOLAMD(
|
||||
variableIndex));
|
||||
|
||||
// Permute the Ordering with the COLAMD ordering
|
||||
|
|
|
|||
|
|
@ -85,6 +85,14 @@ Index Ordering::pop_back(Key key) {
|
|||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Ordering::InvertedMap Ordering::invert() const {
|
||||
InvertedMap result;
|
||||
BOOST_FOREACH(const value_type& p, *this)
|
||||
result.insert(make_pair(p.second, p.first));
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void Unordered::print(const string& s) const {
|
||||
cout << s << " (" << size() << "):";
|
||||
|
|
|
|||
|
|
@ -40,6 +40,7 @@ protected:
|
|||
|
||||
public:
|
||||
|
||||
typedef std::map<Index, Key> InvertedMap;
|
||||
typedef boost::shared_ptr<Ordering> shared_ptr;
|
||||
|
||||
typedef std::pair<const Key, Index> value_type;
|
||||
|
|
@ -203,6 +204,12 @@ public:
|
|||
/// Synonym for operator[](Key)
|
||||
Index& at(Key key) { return operator[](key); }
|
||||
|
||||
/**
|
||||
* Create an inverse mapping from Index->Key, useful for decoding linear systems
|
||||
* @return inverse mapping structure
|
||||
*/
|
||||
InvertedMap invert() const;
|
||||
|
||||
/// @}
|
||||
/// @name Testable
|
||||
/// @{
|
||||
|
|
|
|||
|
|
@ -113,9 +113,15 @@ public:
|
|||
bool operator==(const Symbol& comp) const {
|
||||
return comp.c_ == c_ && comp.j_ == j_;
|
||||
}
|
||||
bool operator==(Key comp) const {
|
||||
return comp == (Key)(*this);
|
||||
}
|
||||
bool operator!=(const Symbol& comp) const {
|
||||
return comp.c_ != c_ || comp.j_ != j_;
|
||||
}
|
||||
bool operator!=(Key comp) const {
|
||||
return comp != (Key)(*this);
|
||||
}
|
||||
|
||||
/** Return a filter function that returns true when evaluated on a Key whose
|
||||
* character (when converted to a Symbol) matches \c c. Use this with the
|
||||
|
|
@ -138,5 +144,34 @@ private:
|
|||
}
|
||||
};
|
||||
|
||||
namespace symbol_shorthand {
|
||||
inline Key A(size_t j) { return Symbol('a', j); }
|
||||
inline Key B(size_t j) { return Symbol('b', j); }
|
||||
inline Key C(size_t j) { return Symbol('c', j); }
|
||||
inline Key D(size_t j) { return Symbol('d', j); }
|
||||
inline Key E(size_t j) { return Symbol('e', j); }
|
||||
inline Key F(size_t j) { return Symbol('f', j); }
|
||||
inline Key G(size_t j) { return Symbol('g', j); }
|
||||
inline Key H(size_t j) { return Symbol('h', j); }
|
||||
inline Key I(size_t j) { return Symbol('i', j); }
|
||||
inline Key J(size_t j) { return Symbol('j', j); }
|
||||
inline Key K(size_t j) { return Symbol('k', j); }
|
||||
inline Key L(size_t j) { return Symbol('l', j); }
|
||||
inline Key M(size_t j) { return Symbol('m', j); }
|
||||
inline Key N(size_t j) { return Symbol('n', j); }
|
||||
inline Key O(size_t j) { return Symbol('o', j); }
|
||||
inline Key P(size_t j) { return Symbol('p', j); }
|
||||
inline Key Q(size_t j) { return Symbol('q', j); }
|
||||
inline Key R(size_t j) { return Symbol('r', j); }
|
||||
inline Key S(size_t j) { return Symbol('s', j); }
|
||||
inline Key T(size_t j) { return Symbol('t', j); }
|
||||
inline Key U(size_t j) { return Symbol('u', j); }
|
||||
inline Key V(size_t j) { return Symbol('v', j); }
|
||||
inline Key W(size_t j) { return Symbol('w', j); }
|
||||
inline Key X(size_t j) { return Symbol('x', j); }
|
||||
inline Key Y(size_t j) { return Symbol('y', j); }
|
||||
inline Key Z(size_t j) { return Symbol('z', j); }
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
||||
|
||||
|
|
|
|||
|
|
@ -22,9 +22,9 @@
|
|||
* which is also a manifold element, and hence supports operations dim, retract, and localCoordinates.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <utility>
|
||||
#include <boost/type_traits/conditional.hpp>
|
||||
#include <boost/type_traits/is_base_of.hpp>
|
||||
|
||||
#include <gtsam/base/DerivedValue.h>
|
||||
#include <gtsam/nonlinear/Values.h> // Only so Eclipse finds class definition
|
||||
|
|
|
|||
|
|
@ -106,6 +106,17 @@ namespace gtsam {
|
|||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
const Value& Values::at(Key j) const {
|
||||
// Find the item
|
||||
KeyValueMap::const_iterator item = values_.find(j);
|
||||
|
||||
// Throw exception if it does not exist
|
||||
if(item == values_.end())
|
||||
throw ValuesKeyDoesNotExist("retrieve", j);
|
||||
return *item->second;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void Values::insert(Key j, const Value& val) {
|
||||
Key key = j; // Non-const duplicate to deal with non-const insert argument
|
||||
|
|
|
|||
|
|
@ -34,9 +34,7 @@
|
|||
#include <boost/function.hpp>
|
||||
#include <boost/bind.hpp>
|
||||
#include <boost/ptr_container/serialize_ptr_map.hpp>
|
||||
#include <boost/range/iterator_range.hpp>
|
||||
#include <boost/range/adaptor/filtered.hpp>
|
||||
#include <boost/range/adaptor/transformed.hpp>
|
||||
#include <boost/iterator_adaptors.hpp>
|
||||
#include <boost/lambda/lambda.hpp>
|
||||
|
||||
#include <gtsam/base/Value.h>
|
||||
|
|
@ -124,63 +122,125 @@ namespace gtsam {
|
|||
struct _KeyValuePair {
|
||||
const Key key; ///< The key
|
||||
ValueType& value; ///< The value
|
||||
const Key& first; ///< For std::pair compatibility, the key
|
||||
ValueType& second; ///< For std::pair compatibility, the value
|
||||
|
||||
_KeyValuePair(Key _key, ValueType& _value) : key(_key), value(_value), first(key), second(value) {}
|
||||
_KeyValuePair(Key _key, ValueType& _value) : key(_key), value(_value) {}
|
||||
};
|
||||
|
||||
template<class ValueType>
|
||||
struct _ConstKeyValuePair {
|
||||
const Key key; ///< The key
|
||||
const ValueType& value; ///< The value
|
||||
const Key& first; ///< For std::pair compatibility, the key
|
||||
const ValueType& second; ///< For std::pair compatibility, the value
|
||||
|
||||
_ConstKeyValuePair(Key _key, const Value& _value) : key(_key), value(_value), first(key), second(value) {}
|
||||
_ConstKeyValuePair(Key _key, const ValueType& _value) : key(_key), value(_value) {}
|
||||
_ConstKeyValuePair(const _KeyValuePair<ValueType>& rhs) : key(rhs.key), value(rhs.value) {}
|
||||
};
|
||||
|
||||
public:
|
||||
|
||||
template<class ValueType = Value>
|
||||
class Filtered : public boost::transformed_range<
|
||||
_KeyValuePair<ValueType>(*)(Values::KeyValuePair key_value),
|
||||
const boost::filtered_range<
|
||||
boost::function<bool(const ConstKeyValuePair&)>,
|
||||
const boost::iterator_range<iterator> > > {
|
||||
class Filtered {
|
||||
public:
|
||||
/** A key-value pair, with the value a specific derived Value type. */
|
||||
typedef _KeyValuePair<ValueType> KeyValuePair;
|
||||
typedef _ConstKeyValuePair<ValueType> ConstKeyValuePair;
|
||||
|
||||
typedef
|
||||
boost::transform_iterator<
|
||||
KeyValuePair(*)(Values::KeyValuePair),
|
||||
boost::filter_iterator<
|
||||
boost::function<bool(const Values::ConstKeyValuePair&)>,
|
||||
Values::iterator> >
|
||||
iterator;
|
||||
|
||||
typedef iterator const_iterator;
|
||||
|
||||
typedef
|
||||
boost::transform_iterator<
|
||||
ConstKeyValuePair(*)(Values::ConstKeyValuePair),
|
||||
boost::filter_iterator<
|
||||
boost::function<bool(const Values::ConstKeyValuePair&)>,
|
||||
Values::const_iterator> >
|
||||
const_const_iterator;
|
||||
|
||||
iterator begin() { return begin_; }
|
||||
iterator end() { return end_; }
|
||||
const_iterator begin() const { return begin_; }
|
||||
const_iterator end() const { return end_; }
|
||||
const_const_iterator beginConst() const { return constBegin_; }
|
||||
const_const_iterator endConst() const { return constEnd_; }
|
||||
|
||||
/** Returns the number of values in this view */
|
||||
size_t size() const {
|
||||
size_t i = 0;
|
||||
for (const_const_iterator it = beginConst(); it != endConst(); ++it)
|
||||
++i;
|
||||
return i;
|
||||
}
|
||||
|
||||
private:
|
||||
typedef boost::transformed_range<
|
||||
KeyValuePair(*)(Values::KeyValuePair key_value),
|
||||
const boost::filtered_range<
|
||||
boost::function<bool(const Values::ConstKeyValuePair&)>,
|
||||
const boost::iterator_range<iterator> > > Base;
|
||||
|
||||
Filtered(const Base& base) : Base(base) {}
|
||||
Filtered(const boost::function<bool(const Values::ConstKeyValuePair&)>& filter, Values& values) :
|
||||
begin_(boost::make_transform_iterator(
|
||||
boost::make_filter_iterator(
|
||||
filter, values.begin(), values.end()),
|
||||
&castHelper<ValueType, KeyValuePair, Values::KeyValuePair>)),
|
||||
end_(boost::make_transform_iterator(
|
||||
boost::make_filter_iterator(
|
||||
filter, values.end(), values.end()),
|
||||
&castHelper<ValueType, KeyValuePair, Values::KeyValuePair>)),
|
||||
constBegin_(boost::make_transform_iterator(
|
||||
boost::make_filter_iterator(
|
||||
filter, ((const Values&)values).begin(), ((const Values&)values).end()),
|
||||
&castHelper<const ValueType, ConstKeyValuePair, Values::ConstKeyValuePair>)),
|
||||
constEnd_(boost::make_transform_iterator(
|
||||
boost::make_filter_iterator(
|
||||
filter, ((const Values&)values).end(), ((const Values&)values).end()),
|
||||
&castHelper<const ValueType, ConstKeyValuePair, Values::ConstKeyValuePair>)) {}
|
||||
|
||||
friend class Values;
|
||||
iterator begin_;
|
||||
iterator end_;
|
||||
const_const_iterator constBegin_;
|
||||
const_const_iterator constEnd_;
|
||||
};
|
||||
|
||||
template<class ValueType = Value>
|
||||
class ConstFiltered : public boost::transformed_range<
|
||||
_ConstKeyValuePair<ValueType>(*)(Values::ConstKeyValuePair key_value),
|
||||
const boost::filtered_range<
|
||||
boost::function<bool(const ConstKeyValuePair&)>,
|
||||
const boost::iterator_range<const_iterator> > > {
|
||||
class ConstFiltered {
|
||||
public:
|
||||
/** A const key-value pair, with the value a specific derived Value type. */
|
||||
typedef _ConstKeyValuePair<ValueType> KeyValuePair;
|
||||
|
||||
typedef typename Filtered<ValueType>::const_const_iterator iterator;
|
||||
typedef typename Filtered<ValueType>::const_const_iterator const_iterator;
|
||||
|
||||
/** Conversion from Filtered to ConstFiltered */
|
||||
ConstFiltered(const Filtered<ValueType>& rhs) :
|
||||
begin_(rhs.beginConst()),
|
||||
end_(rhs.endConst()) {}
|
||||
|
||||
iterator begin() { return begin_; }
|
||||
iterator end() { return end_; }
|
||||
const_iterator begin() const { return begin_; }
|
||||
const_iterator end() const { return end_; }
|
||||
|
||||
/** Returns the number of values in this view */
|
||||
size_t size() const {
|
||||
size_t i = 0;
|
||||
for (const_iterator it = begin(); it != end(); ++it)
|
||||
++i;
|
||||
return i;
|
||||
}
|
||||
|
||||
private:
|
||||
typedef boost::transformed_range<
|
||||
KeyValuePair(*)(Values::ConstKeyValuePair key_value),
|
||||
const boost::filtered_range<
|
||||
boost::function<bool(const Values::ConstKeyValuePair&)>,
|
||||
const boost::iterator_range<const_iterator> > > Base;
|
||||
|
||||
ConstFiltered(const Base& base) : Base(base) {}
|
||||
|
||||
friend class Values;
|
||||
const_iterator begin_;
|
||||
const_iterator end_;
|
||||
ConstFiltered(const boost::function<bool(const Values::ConstKeyValuePair&)>& filter, const Values& values) {
|
||||
// We remove the const from values to create a non-const Filtered
|
||||
// view, then pull the const_iterators out of it.
|
||||
const Filtered<ValueType> filtered(filter, const_cast<Values&>(values));
|
||||
begin_ = filtered.beginConst();
|
||||
end_ = filtered.endConst();
|
||||
}
|
||||
};
|
||||
|
||||
/** Default constructor creates an empty Values class */
|
||||
|
|
@ -189,6 +249,24 @@ namespace gtsam {
|
|||
/** Copy constructor duplicates all keys and values */
|
||||
Values(const Values& other);
|
||||
|
||||
/** Constructor from a Filtered view copies out all values */
|
||||
template<class ValueType>
|
||||
Values(const Filtered<ValueType>& view) {
|
||||
BOOST_FOREACH(const typename Filtered<ValueType>::KeyValuePair& key_value, view) {
|
||||
Key key = key_value.key;
|
||||
insert(key, key_value.value);
|
||||
}
|
||||
}
|
||||
|
||||
/** Constructor from Const Filtered view */
|
||||
template<class ValueType>
|
||||
Values(const ConstFiltered<ValueType>& view) {
|
||||
BOOST_FOREACH(const typename ConstFiltered<ValueType>::KeyValuePair& key_value, view) {
|
||||
Key key = key_value.key;
|
||||
insert(key, key_value.value);
|
||||
}
|
||||
}
|
||||
|
||||
/// @name Testable
|
||||
/// @{
|
||||
|
||||
|
|
@ -210,6 +288,13 @@ namespace gtsam {
|
|||
template<typename ValueType>
|
||||
const ValueType& at(Key j) const;
|
||||
|
||||
/** Retrieve a variable by key \c j. This version returns a reference
|
||||
* to the base Value class, and needs to be casted before use.
|
||||
* @param j Retrieve the value associated with this key
|
||||
* @return A const reference to the stored value
|
||||
*/
|
||||
const Value& at(Key j) const;
|
||||
|
||||
#if 0
|
||||
/** Retrieve a variable by key \c j. This non-templated version returns a
|
||||
* special ValueAutomaticCasting object that may be assigned to the proper
|
||||
|
|
@ -358,12 +443,7 @@ namespace gtsam {
|
|||
template<class ValueType>
|
||||
Filtered<ValueType>
|
||||
filter(const boost::function<bool(Key)>& filterFcn = (boost::lambda::_1, true)) {
|
||||
return boost::adaptors::transform(
|
||||
boost::adaptors::filter(
|
||||
boost::make_iterator_range(begin(), end()),
|
||||
boost::function<bool(const ConstKeyValuePair&)>(
|
||||
boost::bind(&filterHelper<ValueType>, filterFcn, _1))),
|
||||
&castHelper<ValueType, _KeyValuePair<ValueType>, KeyValuePair>);
|
||||
return Filtered<ValueType>(boost::bind(&filterHelper<ValueType>, filterFcn, _1), *this);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -405,12 +485,7 @@ namespace gtsam {
|
|||
template<class ValueType>
|
||||
ConstFiltered<ValueType>
|
||||
filter(const boost::function<bool(Key)>& filterFcn = (boost::lambda::_1, true)) const {
|
||||
return boost::adaptors::transform(
|
||||
boost::adaptors::filter(
|
||||
boost::make_iterator_range(begin(), end()),
|
||||
boost::function<bool(const ConstKeyValuePair&)>(
|
||||
boost::bind(&filterHelper<ValueType>, filterFcn, _1))),
|
||||
&castHelper<const ValueType, _ConstKeyValuePair<ValueType>, ConstKeyValuePair>);
|
||||
return ConstFiltered<ValueType>(boost::bind(&filterHelper<ValueType>, filterFcn, _1), *this);
|
||||
}
|
||||
|
||||
private:
|
||||
|
|
|
|||
|
|
@ -44,17 +44,17 @@ TEST(Key, KeySymbolEncoding) {
|
|||
Key key = 0x6100000000000005;
|
||||
string str = "a5";
|
||||
|
||||
LONGS_EQUAL(key, (Key)symbol);
|
||||
assert_equal(str, DefaultKeyFormatter(symbol));
|
||||
assert_equal(symbol, Symbol(key));
|
||||
EXPECT_LONGS_EQUAL(key, (Key)symbol);
|
||||
EXPECT(assert_equal(str, DefaultKeyFormatter(symbol)));
|
||||
EXPECT(assert_equal(symbol, Symbol(key)));
|
||||
} else if(sizeof(Key) == 4) {
|
||||
Symbol symbol(0x61, 5);
|
||||
Key key = 0x61000005;
|
||||
string str = "a5";
|
||||
|
||||
LONGS_EQUAL(key, (Key)symbol);
|
||||
assert_equal(str, DefaultKeyFormatter(symbol));
|
||||
assert_equal(symbol, Symbol(key));
|
||||
EXPECT_LONGS_EQUAL(key, (Key)symbol);
|
||||
EXPECT(assert_equal(str, DefaultKeyFormatter(symbol)));
|
||||
EXPECT(assert_equal(symbol, Symbol(key)));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@
|
|||
#include <gtsam/base/TestableAssertions.h>
|
||||
#include <gtsam/nonlinear/Ordering.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -51,6 +52,25 @@ TEST( testOrdering, simple_modifications ) {
|
|||
EXPECT(assert_equal(expectedFinal, ordering));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( testOrdering, invert ) {
|
||||
// creates a map with the opposite mapping: Index->Key
|
||||
Ordering ordering;
|
||||
|
||||
// create an ordering
|
||||
Symbol x1('x', 1), x2('x', 2), x3('x', 3), x4('x', 4);
|
||||
ordering += x1, x2, x3, x4;
|
||||
|
||||
Ordering::InvertedMap actual = ordering.invert();
|
||||
Ordering::InvertedMap expected;
|
||||
expected.insert(make_pair(0, x1));
|
||||
expected.insert(make_pair(1, x2));
|
||||
expected.insert(make_pair(2, x3));
|
||||
expected.insert(make_pair(3, x4));
|
||||
|
||||
EXPECT(assert_container_equality(expected, actual));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -274,6 +274,7 @@ TEST(Values, filter) {
|
|||
// Filter by key
|
||||
int i = 0;
|
||||
Values::Filtered<Value> filtered = values.filter(boost::bind(std::greater_equal<Key>(), _1, 2));
|
||||
EXPECT_LONGS_EQUAL(2, filtered.size());
|
||||
BOOST_FOREACH(const Values::Filtered<>::KeyValuePair& key_value, filtered) {
|
||||
if(i == 0) {
|
||||
LONGS_EQUAL(2, key_value.key);
|
||||
|
|
@ -288,23 +289,39 @@ TEST(Values, filter) {
|
|||
}
|
||||
++ i;
|
||||
}
|
||||
LONGS_EQUAL(2, i);
|
||||
EXPECT_LONGS_EQUAL(2, i);
|
||||
|
||||
// construct a values with the view
|
||||
Values actualSubValues1(filtered);
|
||||
Values expectedSubValues1;
|
||||
expectedSubValues1.insert(2, pose2);
|
||||
expectedSubValues1.insert(3, pose3);
|
||||
EXPECT(assert_equal(expectedSubValues1, actualSubValues1));
|
||||
|
||||
// Filter by type
|
||||
i = 0;
|
||||
BOOST_FOREACH(const Values::Filtered<Pose3>::KeyValuePair& key_value, values.filter<Pose3>()) {
|
||||
Values::ConstFiltered<Pose3> pose_filtered = values.filter<Pose3>();
|
||||
EXPECT_LONGS_EQUAL(2, pose_filtered.size());
|
||||
BOOST_FOREACH(const Values::ConstFiltered<Pose3>::KeyValuePair& key_value, pose_filtered) {
|
||||
if(i == 0) {
|
||||
LONGS_EQUAL(1, key_value.key);
|
||||
EXPECT_LONGS_EQUAL(1, key_value.key);
|
||||
EXPECT(assert_equal(pose1, key_value.value));
|
||||
} else if(i == 1) {
|
||||
LONGS_EQUAL(3, key_value.key);
|
||||
EXPECT_LONGS_EQUAL(3, key_value.key);
|
||||
EXPECT(assert_equal(pose3, key_value.value));
|
||||
} else {
|
||||
EXPECT(false);
|
||||
}
|
||||
++ i;
|
||||
}
|
||||
LONGS_EQUAL(2, i);
|
||||
EXPECT_LONGS_EQUAL(2, i);
|
||||
|
||||
// construct a values with the view
|
||||
Values actualSubValues2(pose_filtered);
|
||||
Values expectedSubValues2;
|
||||
expectedSubValues2.insert(1, pose1);
|
||||
expectedSubValues2.insert(3, pose3);
|
||||
EXPECT(assert_equal(expectedSubValues2, actualSubValues2));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -63,10 +63,13 @@ namespace gtsam {
|
|||
* @param K shared pointer to the constant calibration
|
||||
*/
|
||||
GenericProjectionFactor(const Point2& measured, const SharedNoiseModel& model,
|
||||
const Symbol poseKey, Key pointKey, const shared_ptrK& K) :
|
||||
const Key poseKey, Key pointKey, const shared_ptrK& K) :
|
||||
Base(model, poseKey, pointKey), measured_(measured), K_(K) {
|
||||
}
|
||||
|
||||
/** Virtual destructor */
|
||||
virtual ~GenericProjectionFactor() {}
|
||||
|
||||
/**
|
||||
* print
|
||||
* @param s optional string naming the factor
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ public:
|
|||
Base(model, poseKey, landmarkKey), measured_(measured), K_(K) {
|
||||
}
|
||||
|
||||
~GenericStereoFactor() {} ///< destructor
|
||||
virtual ~GenericStereoFactor() {} ///< Virtual destructor
|
||||
|
||||
/**
|
||||
* print
|
||||
|
|
|
|||
|
|
@ -126,6 +126,13 @@ TEST( planarSLAM, BearingRangeFactor_equals )
|
|||
EXPECT(assert_inequal(factor1, factor2, 1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( planarSLAM, BearingRangeFactor_poses )
|
||||
{
|
||||
typedef BearingRangeFactor<Pose2,Pose2> PoseBearingRange;
|
||||
PoseBearingRange actual(PoseKey(2), PoseKey(3), Rot2::fromDegrees(60.0), 12.3, sigma2);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( planarSLAM, PoseConstraint_equals )
|
||||
{
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ if (GTSAM_BUILD_TESTS)
|
|||
list(REMOVE_ITEM tests_srcs ${tests_exclude})
|
||||
foreach(test_src ${tests_srcs})
|
||||
get_filename_component(test_base ${test_src} NAME_WE)
|
||||
set( test_bin tests.${test_base} )
|
||||
set( test_bin ${test_base} )
|
||||
message(STATUS "Adding Test ${test_bin}")
|
||||
add_executable(${test_bin} ${test_src})
|
||||
add_dependencies(check.tests ${test_bin})
|
||||
|
|
@ -43,7 +43,7 @@ if (GTSAM_BUILD_TIMING)
|
|||
list(REMOVE_ITEM timing_srcs ${tests_exclude})
|
||||
foreach(time_src ${timing_srcs})
|
||||
get_filename_component(time_base ${time_src} NAME_WE)
|
||||
set( time_bin tests.${time_base} )
|
||||
set( time_bin ${time_base} )
|
||||
message(STATUS "Adding Timing Benchmark ${time_bin}")
|
||||
add_executable(${time_bin} ${time_src})
|
||||
add_dependencies(timing.tests ${time_bin})
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@
|
|||
#include <gtsam/inference/BayesTree-inl.h>
|
||||
#include <gtsam/linear/JacobianFactor.h>
|
||||
#include <gtsam/linear/GaussianSequentialSolver.h>
|
||||
#include <gtsam/linear/GaussianBayesTree.h>
|
||||
#include <gtsam/nonlinear/DoglegOptimizerImpl.h>
|
||||
#include <gtsam/slam/pose2SLAM.h>
|
||||
#include <gtsam/slam/smallExample.h>
|
||||
|
|
@ -102,7 +103,7 @@ TEST(DoglegOptimizer, ComputeSteepestDescentPoint) {
|
|||
VectorValues expected = gradientValues; scal(step, expected);
|
||||
|
||||
// Compute the steepest descent point with the dogleg function
|
||||
VectorValues actual = DoglegOptimizerImpl::ComputeSteepestDescentPoint(gbn);
|
||||
VectorValues actual = optimizeGradientSearch(gbn);
|
||||
|
||||
// Check that points agree
|
||||
EXPECT(assert_equal(expected, actual, 1e-5));
|
||||
|
|
@ -290,7 +291,7 @@ TEST(DoglegOptimizer, ComputeSteepestDescentPointBT) {
|
|||
expectedFromBN[4] = Vector_(2, 0.300134, 0.423233);
|
||||
|
||||
// Compute the steepest descent point with the dogleg function
|
||||
VectorValues actual = DoglegOptimizerImpl::ComputeSteepestDescentPoint(bt);
|
||||
VectorValues actual = optimizeGradientSearch(bt);
|
||||
|
||||
// Check that points agree
|
||||
EXPECT(assert_equal(expected, actual, 1e-5));
|
||||
|
|
@ -324,7 +325,7 @@ TEST(DoglegOptimizer, ComputeBlend) {
|
|||
4, Vector_(2, 49.0,50.0), Matrix_(2,2, 51.0,52.0,0.0,54.0), ones(2)));
|
||||
|
||||
// Compute steepest descent point
|
||||
VectorValues xu = DoglegOptimizerImpl::ComputeSteepestDescentPoint(gbn);
|
||||
VectorValues xu = optimizeGradientSearch(gbn);
|
||||
|
||||
// Compute Newton's method point
|
||||
VectorValues xn = optimize(gbn);
|
||||
|
|
@ -362,18 +363,18 @@ TEST(DoglegOptimizer, ComputeDoglegPoint) {
|
|||
// Compute dogleg point for different deltas
|
||||
|
||||
double Delta1 = 0.5; // Less than steepest descent
|
||||
VectorValues actual1 = DoglegOptimizerImpl::ComputeDoglegPoint(Delta1, DoglegOptimizerImpl::ComputeSteepestDescentPoint(gbn), optimize(gbn));
|
||||
VectorValues actual1 = DoglegOptimizerImpl::ComputeDoglegPoint(Delta1, optimizeGradientSearch(gbn), optimize(gbn));
|
||||
DOUBLES_EQUAL(Delta1, actual1.vector().norm(), 1e-5);
|
||||
|
||||
double Delta2 = 1.5; // Between steepest descent and Newton's method
|
||||
VectorValues expected2 = DoglegOptimizerImpl::ComputeBlend(Delta2, DoglegOptimizerImpl::ComputeSteepestDescentPoint(gbn), optimize(gbn));
|
||||
VectorValues actual2 = DoglegOptimizerImpl::ComputeDoglegPoint(Delta2, DoglegOptimizerImpl::ComputeSteepestDescentPoint(gbn), optimize(gbn));
|
||||
VectorValues expected2 = DoglegOptimizerImpl::ComputeBlend(Delta2, optimizeGradientSearch(gbn), optimize(gbn));
|
||||
VectorValues actual2 = DoglegOptimizerImpl::ComputeDoglegPoint(Delta2, optimizeGradientSearch(gbn), optimize(gbn));
|
||||
DOUBLES_EQUAL(Delta2, actual2.vector().norm(), 1e-5);
|
||||
EXPECT(assert_equal(expected2, actual2));
|
||||
|
||||
double Delta3 = 5.0; // Larger than Newton's method point
|
||||
VectorValues expected3 = optimize(gbn);
|
||||
VectorValues actual3 = DoglegOptimizerImpl::ComputeDoglegPoint(Delta3, DoglegOptimizerImpl::ComputeSteepestDescentPoint(gbn), optimize(gbn));
|
||||
VectorValues actual3 = DoglegOptimizerImpl::ComputeDoglegPoint(Delta3, optimizeGradientSearch(gbn), optimize(gbn));
|
||||
EXPECT(assert_equal(expected3, actual3));
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -362,8 +362,8 @@ public:
|
|||
TEST( ExtendedKalmanFilter, nonlinear ) {
|
||||
|
||||
// Create the set of expected output TestValues (generated using Matlab Kalman Filter)
|
||||
Point2 expected_predict[11];
|
||||
Point2 expected_update[11];
|
||||
Point2 expected_predict[10];
|
||||
Point2 expected_update[10];
|
||||
expected_predict[0] = Point2(0.81,0.99);
|
||||
expected_update[0] = Point2(0.824926197027,0.29509808);
|
||||
expected_predict[1] = Point2(0.680503230541,0.24343413);
|
||||
|
|
@ -409,11 +409,11 @@ TEST( ExtendedKalmanFilter, nonlinear ) {
|
|||
Point2 x_predict, x_update;
|
||||
for(unsigned int i = 0; i < 10; ++i){
|
||||
// Create motion factor
|
||||
NonlinearMotionModel motionFactor(Symbol('x',i-1), Symbol('x',i));
|
||||
NonlinearMotionModel motionFactor(Symbol('x',i), Symbol('x',i+1));
|
||||
x_predict = ekf.predict(motionFactor);
|
||||
|
||||
// Create a measurement factor
|
||||
NonlinearMeasurementModel measurementFactor(Symbol('x',i), Vector_(1, z[i]));
|
||||
NonlinearMeasurementModel measurementFactor(Symbol('x',i+1), Vector_(1, z[i]));
|
||||
x_update = ekf.update(measurementFactor);
|
||||
|
||||
EXPECT(assert_equal(expected_predict[i],x_predict, 1e-6));
|
||||
|
|
|
|||
|
|
@ -119,64 +119,25 @@ TEST( GaussianBayesNet, optimize2 )
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianBayesNet, backSubstitute )
|
||||
TEST( GaussianBayesNet, optimize3 )
|
||||
{
|
||||
// y=R*x, x=inv(R)*y
|
||||
// 2 = 1 1 -1
|
||||
// 3 1 3
|
||||
// 9 = 1 1 4
|
||||
// 5 1 5
|
||||
// NOTE: we are supplying a new RHS here
|
||||
GaussianBayesNet cbn = createSmallGaussianBayesNet();
|
||||
|
||||
VectorValues y(vector<size_t>(2,1)), x(vector<size_t>(2,1));
|
||||
y[_x_] = Vector_(1,2.);
|
||||
y[_y_] = Vector_(1,3.);
|
||||
x[_x_] = Vector_(1,-1.);
|
||||
x[_y_] = Vector_(1, 3.);
|
||||
VectorValues expected(vector<size_t>(2,1)), x(vector<size_t>(2,1));
|
||||
expected[_x_] = Vector_(1, 4.);
|
||||
expected[_y_] = Vector_(1, 5.);
|
||||
|
||||
// test functional version
|
||||
VectorValues actual = backSubstitute(cbn,y);
|
||||
EXPECT(assert_equal(x,actual));
|
||||
VectorValues actual = optimize(cbn);
|
||||
EXPECT(assert_equal(expected,actual));
|
||||
|
||||
// test imperative version
|
||||
backSubstituteInPlace(cbn,y);
|
||||
EXPECT(assert_equal(x,y));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianBayesNet, rhs )
|
||||
{
|
||||
// y=R*x, x=inv(R)*y
|
||||
// 2 = 1 1 -1
|
||||
// 3 1 3
|
||||
GaussianBayesNet cbn = createSmallGaussianBayesNet();
|
||||
VectorValues expected = gtsam::optimize(cbn);
|
||||
VectorValues d = rhs(cbn);
|
||||
VectorValues actual = backSubstitute(cbn, d);
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianBayesNet, rhs_with_sigmas )
|
||||
{
|
||||
Matrix R11 = Matrix_(1, 1, 1.0), S12 = Matrix_(1, 1, 1.0);
|
||||
Matrix R22 = Matrix_(1, 1, 1.0);
|
||||
Vector d1(1), d2(1);
|
||||
d1(0) = 9;
|
||||
d2(0) = 5;
|
||||
Vector tau(1);
|
||||
tau(0) = 0.25;
|
||||
|
||||
// define nodes and specify in reverse topological sort (i.e. parents last)
|
||||
GaussianConditional::shared_ptr Px_y(new GaussianConditional(_x_, d1, R11,
|
||||
_y_, S12, tau)), Py(new GaussianConditional(_y_, d2, R22, tau));
|
||||
GaussianBayesNet cbn;
|
||||
cbn.push_back(Px_y);
|
||||
cbn.push_back(Py);
|
||||
|
||||
VectorValues expected = gtsam::optimize(cbn);
|
||||
VectorValues d = rhs(cbn);
|
||||
VectorValues actual = backSubstitute(cbn, d);
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
optimizeInPlace(cbn,x);
|
||||
EXPECT(assert_equal(expected,x));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@
|
|||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testGaussianFactorGraph.cpp
|
||||
* @file testGaussianFactorGraphB.cpp
|
||||
* @brief Unit tests for Linear Factor Graph
|
||||
* @author Christian Potthast
|
||||
**/
|
||||
|
|
@ -192,96 +192,116 @@ TEST( GaussianFactorGraph, equals ) {
|
|||
// EXPECT(assert_equal(expected,*actual));
|
||||
//}
|
||||
|
||||
///* ************************************************************************* */
|
||||
//TEST( GaussianFactorGraph, eliminateOne_x1 )
|
||||
//{
|
||||
// Ordering ordering; ordering += kx(1),kl(1),kx(2);
|
||||
// GaussianFactorGraph fg = createGaussianFactorGraph(ordering);
|
||||
// GaussianConditional::shared_ptr actual = GaussianSequentialSolver::EliminateUntil(fg, 1);
|
||||
//
|
||||
// // create expected Conditional Gaussian
|
||||
// Matrix I = 15*eye(2), R11 = I, S12 = -0.111111*I, S13 = -0.444444*I;
|
||||
// Vector d = Vector_(2, -0.133333, -0.0222222), sigma = ones(2);
|
||||
// GaussianConditional expected(ordering[kx(1)],15*d,R11,ordering[kl(1)],S12,ordering[kx(2)],S13,sigma);
|
||||
//
|
||||
// EXPECT(assert_equal(expected,*actual,tol));
|
||||
//}
|
||||
//
|
||||
///* ************************************************************************* */
|
||||
//TEST( GaussianFactorGraph, eliminateOne_x2 )
|
||||
//{
|
||||
// Ordering ordering; ordering += kx(2),kl(1),kx(1);
|
||||
// GaussianFactorGraph fg = createGaussianFactorGraph(ordering);
|
||||
// GaussianConditional::shared_ptr actual = GaussianSequentialSolver::EliminateUntil(fg, 1);
|
||||
//
|
||||
// // create expected Conditional Gaussian
|
||||
// double sig = 0.0894427;
|
||||
// Matrix I = eye(2)/sig, R11 = I, S12 = -0.2*I, S13 = -0.8*I;
|
||||
// Vector d = Vector_(2, 0.2, -0.14)/sig, sigma = ones(2);
|
||||
// GaussianConditional expected(ordering[kx(2)],d,R11,ordering[kl(1)],S12,ordering[kx(1)],S13,sigma);
|
||||
//
|
||||
// EXPECT(assert_equal(expected,*actual,tol));
|
||||
//}
|
||||
//
|
||||
///* ************************************************************************* */
|
||||
//TEST( GaussianFactorGraph, eliminateOne_l1 )
|
||||
//{
|
||||
// Ordering ordering; ordering += kl(1),kx(1),kx(2);
|
||||
// GaussianFactorGraph fg = createGaussianFactorGraph(ordering);
|
||||
// GaussianConditional::shared_ptr actual = GaussianSequentialSolver::EliminateUntil(fg, 1);
|
||||
//
|
||||
// // create expected Conditional Gaussian
|
||||
// double sig = sqrt(2)/10.;
|
||||
// Matrix I = eye(2)/sig, R11 = I, S12 = -0.5*I, S13 = -0.5*I;
|
||||
// Vector d = Vector_(2, -0.1, 0.25)/sig, sigma = ones(2);
|
||||
// GaussianConditional expected(ordering[kl(1)],d,R11,ordering[kx(1)],S12,ordering[kx(2)],S13,sigma);
|
||||
//
|
||||
// EXPECT(assert_equal(expected,*actual,tol));
|
||||
//}
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianFactorGraph, eliminateOne_x1 )
|
||||
{
|
||||
Ordering ordering; ordering += kx(1),kl(1),kx(2);
|
||||
GaussianFactorGraph fg = createGaussianFactorGraph(ordering);
|
||||
|
||||
///* ************************************************************************* */
|
||||
//TEST( GaussianFactorGraph, eliminateOne_x1_fast )
|
||||
//{
|
||||
// GaussianFactorGraph fg = createGaussianFactorGraph();
|
||||
// GaussianConditional::shared_ptr actual = fg.eliminateOne(kx(1), false);
|
||||
//
|
||||
// // create expected Conditional Gaussian
|
||||
// Matrix I = 15*eye(2), R11 = I, S12 = -0.111111*I, S13 = -0.444444*I;
|
||||
// Vector d = Vector_(2, -0.133333, -0.0222222), sigma = ones(2);
|
||||
// GaussianConditional expected(kx(1),15*d,R11,kl(1),S12,kx(2),S13,sigma);
|
||||
//
|
||||
// EXPECT(assert_equal(expected,*actual,tol));
|
||||
//}
|
||||
//
|
||||
///* ************************************************************************* */
|
||||
//TEST( GaussianFactorGraph, eliminateOne_x2_fast )
|
||||
//{
|
||||
// GaussianFactorGraph fg = createGaussianFactorGraph();
|
||||
// GaussianConditional::shared_ptr actual = fg.eliminateOne(kx(2), false);
|
||||
//
|
||||
// // create expected Conditional Gaussian
|
||||
// double sig = 0.0894427;
|
||||
// Matrix I = eye(2)/sig, R11 = I, S12 = -0.2*I, S13 = -0.8*I;
|
||||
// Vector d = Vector_(2, 0.2, -0.14)/sig, sigma = ones(2);
|
||||
// GaussianConditional expected(kx(2),d,R11,kl(1),S12,kx(1),S13,sigma);
|
||||
//
|
||||
// EXPECT(assert_equal(expected,*actual,tol));
|
||||
//}
|
||||
//
|
||||
///* ************************************************************************* */
|
||||
//TEST( GaussianFactorGraph, eliminateOne_l1_fast )
|
||||
//{
|
||||
// GaussianFactorGraph fg = createGaussianFactorGraph();
|
||||
// GaussianConditional::shared_ptr actual = fg.eliminateOne(kl(1), false);
|
||||
//
|
||||
// // create expected Conditional Gaussian
|
||||
// double sig = sqrt(2)/10.;
|
||||
// Matrix I = eye(2)/sig, R11 = I, S12 = -0.5*I, S13 = -0.5*I;
|
||||
// Vector d = Vector_(2, -0.1, 0.25)/sig, sigma = ones(2);
|
||||
// GaussianConditional expected(kl(1),d,R11,kx(1),S12,kx(2),S13,sigma);
|
||||
//
|
||||
// EXPECT(assert_equal(expected,*actual,tol));
|
||||
//}
|
||||
GaussianFactorGraph::FactorizationResult result = inference::eliminateOne(fg, 0, EliminateQR);
|
||||
|
||||
// create expected Conditional Gaussian
|
||||
Matrix I = 15*eye(2), R11 = I, S12 = -0.111111*I, S13 = -0.444444*I;
|
||||
Vector d = Vector_(2, -0.133333, -0.0222222), sigma = ones(2);
|
||||
GaussianConditional expected(ordering[kx(1)],15*d,R11,ordering[kl(1)],S12,ordering[kx(2)],S13,sigma);
|
||||
|
||||
EXPECT(assert_equal(expected,*result.first,tol));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianFactorGraph, eliminateOne_x2 )
|
||||
{
|
||||
Ordering ordering; ordering += kx(2),kl(1),kx(1);
|
||||
GaussianFactorGraph fg = createGaussianFactorGraph(ordering);
|
||||
GaussianConditional::shared_ptr actual = inference::eliminateOne(fg, 0, EliminateQR).first;
|
||||
|
||||
// create expected Conditional Gaussian
|
||||
double sig = 0.0894427;
|
||||
Matrix I = eye(2)/sig, R11 = I, S12 = -0.2*I, S13 = -0.8*I;
|
||||
Vector d = Vector_(2, 0.2, -0.14)/sig, sigma = ones(2);
|
||||
GaussianConditional expected(ordering[kx(2)],d,R11,ordering[kl(1)],S12,ordering[kx(1)],S13,sigma);
|
||||
|
||||
EXPECT(assert_equal(expected,*actual,tol));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianFactorGraph, eliminateOne_l1 )
|
||||
{
|
||||
Ordering ordering; ordering += kl(1),kx(1),kx(2);
|
||||
GaussianFactorGraph fg = createGaussianFactorGraph(ordering);
|
||||
GaussianConditional::shared_ptr actual = inference::eliminateOne(fg, 0, EliminateQR).first;
|
||||
|
||||
// create expected Conditional Gaussian
|
||||
double sig = sqrt(2)/10.;
|
||||
Matrix I = eye(2)/sig, R11 = I, S12 = -0.5*I, S13 = -0.5*I;
|
||||
Vector d = Vector_(2, -0.1, 0.25)/sig, sigma = ones(2);
|
||||
GaussianConditional expected(ordering[kl(1)],d,R11,ordering[kx(1)],S12,ordering[kx(2)],S13,sigma);
|
||||
|
||||
EXPECT(assert_equal(expected,*actual,tol));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianFactorGraph, eliminateOne_x1_fast )
|
||||
{
|
||||
Ordering ordering; ordering += kx(1),kl(1),kx(2);
|
||||
GaussianFactorGraph fg = createGaussianFactorGraph(ordering);
|
||||
GaussianFactorGraph::FactorizationResult result = inference::eliminateOne(fg, ordering[kx(1)], EliminateQR);
|
||||
GaussianConditional::shared_ptr conditional = result.first;
|
||||
GaussianFactorGraph remaining = result.second;
|
||||
|
||||
// create expected Conditional Gaussian
|
||||
Matrix I = 15*eye(2), R11 = I, S12 = -0.111111*I, S13 = -0.444444*I;
|
||||
Vector d = Vector_(2, -0.133333, -0.0222222), sigma = ones(2);
|
||||
GaussianConditional expected(ordering[kx(1)],15*d,R11,ordering[kl(1)],S12,ordering[kx(2)],S13,sigma);
|
||||
|
||||
// Create expected remaining new factor
|
||||
JacobianFactor expectedFactor(1, Matrix_(4,2,
|
||||
4.714045207910318, 0.,
|
||||
0., 4.714045207910318,
|
||||
0., 0.,
|
||||
0., 0.),
|
||||
2, Matrix_(4,2,
|
||||
-2.357022603955159, 0.,
|
||||
0., -2.357022603955159,
|
||||
7.071067811865475, 0.,
|
||||
0., 7.071067811865475),
|
||||
Vector_(4, -0.707106781186547, 0.942809041582063, 0.707106781186547, -1.414213562373094), sharedUnit(4));
|
||||
|
||||
EXPECT(assert_equal(expected,*conditional,tol));
|
||||
EXPECT(assert_equal((const GaussianFactor&)expectedFactor,*remaining.back(),tol));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianFactorGraph, eliminateOne_x2_fast )
|
||||
{
|
||||
Ordering ordering; ordering += kx(1),kl(1),kx(2);
|
||||
GaussianFactorGraph fg = createGaussianFactorGraph(ordering);
|
||||
GaussianConditional::shared_ptr actual = inference::eliminateOne(fg, ordering[kx(2)], EliminateQR).first;
|
||||
|
||||
// create expected Conditional Gaussian
|
||||
double sig = 0.0894427;
|
||||
Matrix I = eye(2)/sig, R11 = I, S12 = -0.2*I, S13 = -0.8*I;
|
||||
Vector d = Vector_(2, 0.2, -0.14)/sig, sigma = ones(2);
|
||||
GaussianConditional expected(ordering[kx(2)],d,R11,ordering[kx(1)],S13,ordering[kl(1)],S12,sigma);
|
||||
|
||||
EXPECT(assert_equal(expected,*actual,tol));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianFactorGraph, eliminateOne_l1_fast )
|
||||
{
|
||||
Ordering ordering; ordering += kx(1),kl(1),kx(2);
|
||||
GaussianFactorGraph fg = createGaussianFactorGraph(ordering);
|
||||
GaussianConditional::shared_ptr actual = inference::eliminateOne(fg, ordering[kl(1)], EliminateQR).first;
|
||||
|
||||
// create expected Conditional Gaussian
|
||||
double sig = sqrt(2)/10.;
|
||||
Matrix I = eye(2)/sig, R11 = I, S12 = -0.5*I, S13 = -0.5*I;
|
||||
Vector d = Vector_(2, -0.1, 0.25)/sig, sigma = ones(2);
|
||||
GaussianConditional expected(ordering[kl(1)],d,R11,ordering[kx(1)],S12,ordering[kx(2)],S13,sigma);
|
||||
|
||||
EXPECT(assert_equal(expected,*actual,tol));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianFactorGraph, eliminateAll )
|
||||
|
|
@ -439,7 +459,7 @@ TEST( GaussianFactorGraph, getOrdering)
|
|||
{
|
||||
Ordering original; original += kl(1),kx(1),kx(2);
|
||||
FactorGraph<IndexFactor> symbolic(createGaussianFactorGraph(original));
|
||||
Permutation perm(*Inference::PermutationCOLAMD(VariableIndex(symbolic)));
|
||||
Permutation perm(*inference::PermutationCOLAMD(VariableIndex(symbolic)));
|
||||
Ordering actual = original; actual.permuteWithInverse((*perm.inverse()));
|
||||
Ordering expected; expected += kl(1),kx(2),kx(1);
|
||||
EXPECT(assert_equal(expected,actual));
|
||||
|
|
@ -85,7 +85,7 @@ TEST_UNSAFE( ISAM, iSAM_smoother )
|
|||
// Ordering ord; ord += kx(4),kx(3),kx(2),kx(1);
|
||||
// GaussianFactorGraph factors1;
|
||||
// for (int i=0;i<7;i++) factors1.push_back(smoother[i]);
|
||||
// GaussianISAM actual(*Inference::Eliminate(factors1));
|
||||
// GaussianISAM actual(*inference::Eliminate(factors1));
|
||||
//
|
||||
// // run iSAM with remaining factors
|
||||
// GaussianFactorGraph factors2;
|
||||
|
|
@ -298,7 +298,7 @@ TEST_UNSAFE( BayesTree, balanced_smoother_shortcuts )
|
|||
// varIndex.permute(toFront);
|
||||
// BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, marginal) {
|
||||
// factor->permuteWithInverse(toFrontInverse); }
|
||||
// GaussianBayesNet actual = *Inference::EliminateUntil(marginal, C3->keys().size(), varIndex);
|
||||
// GaussianBayesNet actual = *inference::EliminateUntil(marginal, C3->keys().size(), varIndex);
|
||||
// actual.permuteWithInverse(toFront);
|
||||
// EXPECT(assert_equal(expected,actual,tol));
|
||||
//}
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ using namespace boost::assign;
|
|||
#include <gtsam/nonlinear/Ordering.h>
|
||||
#include <gtsam/linear/GaussianBayesNet.h>
|
||||
#include <gtsam/linear/GaussianSequentialSolver.h>
|
||||
#include <gtsam/linear/GaussianBayesTree.h>
|
||||
#include <gtsam/nonlinear/GaussianISAM2.h>
|
||||
#include <gtsam/slam/smallExample.h>
|
||||
#include <gtsam/slam/planarSLAM.h>
|
||||
|
|
@ -47,6 +48,8 @@ TEST(ISAM2, AddVariables) {
|
|||
|
||||
Permuted<VectorValues> delta(permutation, deltaUnpermuted);
|
||||
|
||||
vector<bool> replacedKeys(2, false);
|
||||
|
||||
Ordering ordering; ordering += planarSLAM::PointKey(0), planarSLAM::PoseKey(0);
|
||||
|
||||
GaussianISAM2<>::Nodes nodes(2);
|
||||
|
|
@ -75,17 +78,20 @@ TEST(ISAM2, AddVariables) {
|
|||
|
||||
Permuted<VectorValues> deltaExpected(permutationExpected, deltaUnpermutedExpected);
|
||||
|
||||
vector<bool> replacedKeysExpected(3, false);
|
||||
|
||||
Ordering orderingExpected; orderingExpected += planarSLAM::PointKey(0), planarSLAM::PoseKey(0), planarSLAM::PoseKey(1);
|
||||
|
||||
GaussianISAM2<>::Nodes nodesExpected(
|
||||
3, GaussianISAM2<>::sharedClique());
|
||||
|
||||
// Expand initial state
|
||||
GaussianISAM2<>::Impl::AddVariables(newTheta, theta, delta, ordering, nodes);
|
||||
GaussianISAM2<>::Impl::AddVariables(newTheta, theta, delta, replacedKeys, ordering, nodes);
|
||||
|
||||
EXPECT(assert_equal(thetaExpected, theta));
|
||||
EXPECT(assert_equal(deltaUnpermutedExpected, deltaUnpermuted));
|
||||
EXPECT(assert_equal(deltaExpected.permutation(), delta.permutation()));
|
||||
EXPECT(assert_container_equality(replacedKeysExpected, replacedKeys));
|
||||
EXPECT(assert_equal(orderingExpected, ordering));
|
||||
}
|
||||
|
||||
|
|
@ -162,15 +168,13 @@ TEST(ISAM2, optimize2) {
|
|||
|
||||
// Expected vector
|
||||
VectorValues expected(1, 3);
|
||||
conditional->rhs(expected);
|
||||
conditional->solveInPlace(expected);
|
||||
|
||||
// Clique
|
||||
GaussianISAM2<>::sharedClique clique(
|
||||
GaussianISAM2<>::Clique::Create(make_pair(conditional,GaussianFactor::shared_ptr())));
|
||||
VectorValues actual(theta.dims(ordering));
|
||||
conditional->rhs(actual);
|
||||
optimize2(clique, actual);
|
||||
internal::optimizeInPlace(clique, actual);
|
||||
|
||||
// expected.print("expected: ");
|
||||
// actual.print("actual: ");
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@
|
|||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testGaussianJunctionTree.cpp
|
||||
* @file testGaussianJunctionTreeB.cpp
|
||||
* @date Jul 8, 2010
|
||||
* @author nikai
|
||||
*/
|
||||
|
|
@ -10,7 +10,7 @@
|
|||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testInference.cpp
|
||||
* @file testInferenceB.cpp
|
||||
* @brief Unit tests for functionality declared in inference.h
|
||||
* @author Frank Dellaert
|
||||
*/
|
||||
|
|
@ -31,7 +31,7 @@ using namespace gtsam;
|
|||
/* ************************************************************************* */
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( Inference, marginals )
|
||||
TEST( inference, marginals )
|
||||
{
|
||||
using namespace example;
|
||||
// create and marginalize a small Bayes net on "x"
|
||||
|
|
@ -46,7 +46,7 @@ TEST( Inference, marginals )
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( Inference, marginals2)
|
||||
TEST( inference, marginals2)
|
||||
{
|
||||
planarSLAM::Graph fg;
|
||||
SharedDiagonal poseModel(sharedSigma(3, 0.1));
|
||||
|
|
@ -10,7 +10,7 @@
|
|||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testSymbolicBayesNet.cpp
|
||||
* @file testSymbolicBayesNetB.cpp
|
||||
* @brief Unit tests for a symbolic Bayes chain
|
||||
* @author Frank Dellaert
|
||||
*/
|
||||
|
|
@ -34,11 +34,6 @@ using namespace example;
|
|||
Key kx(size_t i) { return Symbol('x',i); }
|
||||
Key kl(size_t i) { return Symbol('l',i); }
|
||||
|
||||
//Symbol _B_('B', 0), _L_('L', 0);
|
||||
//IndexConditional::shared_ptr
|
||||
// B(new IndexConditional(_B_)),
|
||||
// L(new IndexConditional(_L_, _B_));
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( SymbolicBayesNet, constructor )
|
||||
{
|
||||
|
|
@ -64,6 +59,18 @@ TEST( SymbolicBayesNet, constructor )
|
|||
CHECK(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( SymbolicBayesNet, FromGaussian) {
|
||||
SymbolicBayesNet expected;
|
||||
expected.push_back(IndexConditional::shared_ptr(new IndexConditional(0, 1)));
|
||||
expected.push_back(IndexConditional::shared_ptr(new IndexConditional(1)));
|
||||
|
||||
GaussianBayesNet gbn = createSmallGaussianBayesNet();
|
||||
SymbolicBayesNet actual(gbn);
|
||||
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
|
|
@ -10,7 +10,7 @@
|
|||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testSymbolicFactorGraph.cpp
|
||||
* @file testSymbolicFactorGraphB.cpp
|
||||
* @brief Unit tests for a symbolic Factor Graph
|
||||
* @author Frank Dellaert
|
||||
*/
|
||||
|
|
@ -8,7 +8,6 @@ add_executable(wrap wrap.cpp)
|
|||
target_link_libraries(wrap wrap_lib)
|
||||
|
||||
# Install wrap binary
|
||||
option(GTSAM_INSTALL_WRAP "Enable/Disable installation of wrap utility" ON)
|
||||
if (GTSAM_INSTALL_WRAP)
|
||||
install(TARGETS wrap DESTINATION ${CMAKE_INSTALL_PREFIX}/bin)
|
||||
endif(GTSAM_INSTALL_WRAP)
|
||||
|
|
@ -34,61 +33,14 @@ set(mexFlags "-I${Boost_INCLUDE_DIR} -I${CMAKE_INSTALL_PREFIX}/include -I${CMAKE
|
|||
set(toolbox_path ${CMAKE_BINARY_DIR}/wrap/gtsam)
|
||||
set(moduleName gtsam)
|
||||
|
||||
## Determine the mex extension
|
||||
# Apple Macintosh (64-bit) mexmaci64
|
||||
# Linux (32-bit) mexglx
|
||||
# Linux (64-bit) mexa64
|
||||
# Microsoft Windows (32-bit) mexw32
|
||||
# Windows (64-bit) mexw64
|
||||
include(GtsamMatlabWrap)
|
||||
find_mexextension()
|
||||
|
||||
# only support 64-bit apple
|
||||
if(CMAKE_HOST_APPLE)
|
||||
set(GTSAM_MEX_BIN_EXTENSION_default mexmaci64)
|
||||
endif(CMAKE_HOST_APPLE)
|
||||
|
||||
if(NOT CMAKE_HOST_APPLE)
|
||||
# check 64 bit
|
||||
if( ${CMAKE_SIZEOF_VOID_P} EQUAL 4 )
|
||||
set( HAVE_64_BIT 0 )
|
||||
endif( ${CMAKE_SIZEOF_VOID_P} EQUAL 4 )
|
||||
|
||||
if( ${CMAKE_SIZEOF_VOID_P} EQUAL 8 )
|
||||
set( HAVE_64_BIT 1 )
|
||||
endif( ${CMAKE_SIZEOF_VOID_P} EQUAL 8 )
|
||||
|
||||
# Check for linux machines
|
||||
if (CMAKE_HOST_UNIX)
|
||||
if (HAVE_64_BIT)
|
||||
set(GTSAM_MEX_BIN_EXTENSION_default mexa64)
|
||||
else (HAVE_64_BIT)
|
||||
set(GTSAM_MEX_BIN_EXTENSION_default mexglx)
|
||||
endif (HAVE_64_BIT)
|
||||
endif(CMAKE_HOST_UNIX)
|
||||
|
||||
# Check for windows machines
|
||||
if (CMAKE_HOST_WIN32)
|
||||
if (HAVE_64_BIT)
|
||||
set(GTSAM_MEX_BIN_EXTENSION_default mexw64)
|
||||
else (HAVE_64_BIT)
|
||||
set(GTSAM_MEX_BIN_EXTENSION_default mexw32)
|
||||
endif (HAVE_64_BIT)
|
||||
endif(CMAKE_HOST_WIN32)
|
||||
endif(NOT CMAKE_HOST_APPLE)
|
||||
|
||||
# Allow for setting mex extension manually
|
||||
set(GTSAM_MEX_BIN_EXTENSION ${GTSAM_MEX_BIN_EXTENSION_default} CACHE DOCSTRING "Extension for matlab mex files")
|
||||
message(STATUS "Detected Matlab mex extension: ${GTSAM_MEX_BIN_EXTENSION_default}")
|
||||
message(STATUS "Current Matlab mex extension: ${GTSAM_MEX_BIN_EXTENSION}")
|
||||
|
||||
# Actual build commands - separated by OS
|
||||
# Code generation command
|
||||
add_custom_target(wrap_gtsam ALL COMMAND
|
||||
./wrap ${GTSAM_MEX_BIN_EXTENSION} ${CMAKE_SOURCE_DIR} ${moduleName} ${toolbox_path} "${mexFlags}"
|
||||
DEPENDS wrap)
|
||||
|
||||
option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" ON)
|
||||
option(GTSAM_INSTALL_MATLAB_EXAMPLES "Enable/Disable installation of matlab examples" ON)
|
||||
option(GTSAM_INSTALL_MATLAB_TESTS "Enable/Disable installation of matlab tests" ON)
|
||||
|
||||
set(GTSAM_TOOLBOX_INSTALL_PATH ${CMAKE_INSTALL_PREFIX}/borg/toolbox CACHE DOCSTRING "Path to install matlab toolbox")
|
||||
|
||||
if (GTSAM_INSTALL_MATLAB_TOOLBOX)
|
||||
|
|
|
|||
Loading…
Reference in New Issue