Making more things in gtsam_unstable compile
parent
883c870dff
commit
4a4e16485c
|
@ -2452,6 +2452,14 @@
|
|||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="check.nonlinear_unstable" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j6 -j8</buildArguments>
|
||||
<buildTarget>check.nonlinear_unstable</buildTarget>
|
||||
<stopOnError>true</stopOnError>
|
||||
<useDefaultCommand>true</useDefaultCommand>
|
||||
<runAllBuilders>true</runAllBuilders>
|
||||
</target>
|
||||
<target name="testSpirit.run" path="build/wrap" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
|
||||
<buildCommand>make</buildCommand>
|
||||
<buildArguments>-j5</buildArguments>
|
||||
|
|
|
@ -7,7 +7,7 @@ set (gtsam_unstable_subdirs
|
|||
linear
|
||||
dynamics
|
||||
nonlinear
|
||||
#slam
|
||||
slam
|
||||
)
|
||||
|
||||
set(GTSAM_UNSTABLE_BOOST_LIBRARIES ${GTSAM_BOOST_LIBRARIES})
|
||||
|
@ -18,27 +18,18 @@ add_custom_target(check.unstable COMMAND ${CMAKE_CTEST_COMMAND} --output-on-fail
|
|||
# Add the full name to this list, as in the following example
|
||||
# Sources to remove from builds
|
||||
set (excluded_sources # "")
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/slam/serialization.cpp"
|
||||
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/BatchFixedLagSmoother.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/ConcurrentBatchFilter.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/ConcurrentBatchSmoother.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/ConcurrentFilteringAndSmoothing.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/FixedLagSmoother.cpp"
|
||||
# "${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/ConcurrentFilteringAndSmoothing.cpp"
|
||||
# "${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/FixedLagSmoother.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/IncrementalFixedLagSmoother.cpp"
|
||||
# "${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/LinearizedFactor.cpp"
|
||||
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/slam/serialization.cpp"
|
||||
)
|
||||
|
||||
set (excluded_headers # "")
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/slam/serialization.h"
|
||||
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/BatchFixedLagSmoother.h"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/ConcurrentBatchFilter.h"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/ConcurrentBatchSmoother.h"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/ConcurrentFilteringAndSmoothing.h"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/FixedLagSmoother.h"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/IncrementalFixedLagSmoother.h"
|
||||
# "${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/LinearizedFactor.h"
|
||||
)
|
||||
|
||||
# assemble core libaries
|
||||
|
|
|
@ -88,7 +88,10 @@ GaussianFactorGraph liquefy(const typename BAYESTREE::sharedClique& root, bool s
|
|||
*/
|
||||
template <class BAYESTREE>
|
||||
GaussianFactorGraph liquefy(const BAYESTREE& bayesTree, bool splitConditionals = false) {
|
||||
return liquefy<BAYESTREE>(bayesTree.root(), splitConditionals);
|
||||
GaussianFactorGraph result;
|
||||
BOOST_FOREACH(const typename BAYESTREE::sharedClique& root, bayesTree.roots())
|
||||
result.push_back(liquefy<BAYESTREE>(root, splitConditionals));
|
||||
return result;
|
||||
}
|
||||
|
||||
} // \namespace gtsam
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
#include <gtsam/linear/GaussianJunctionTree.h>
|
||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||
#include <gtsam/linear/GaussianFactor.h>
|
||||
#include <gtsam/inference/inference.h>
|
||||
//#include <gtsam/inference/inference.h>
|
||||
#include <gtsam/base/debug.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
|
|
@ -55,7 +55,7 @@ public:
|
|||
* a single variable is needed, it is faster to call calculateEstimate(const KEY&).
|
||||
*/
|
||||
Values calculateEstimate() const {
|
||||
return theta_.retract(delta_, ordering_);
|
||||
return theta_.retract(delta_);
|
||||
}
|
||||
|
||||
/** Compute an estimate for a single variable using its incomplete linear delta computed
|
||||
|
@ -66,8 +66,7 @@ public:
|
|||
*/
|
||||
template<class VALUE>
|
||||
VALUE calculateEstimate(Key key) const {
|
||||
const Index index = ordering_.at(key);
|
||||
const Vector delta = delta_.at(index);
|
||||
const Vector delta = delta_.at(key);
|
||||
return theta_.at<VALUE>(key).retract(delta);
|
||||
}
|
||||
|
||||
|
|
|
@ -20,11 +20,10 @@ set (nonlinear_full_libs
|
|||
|
||||
# Exclude tests that don't work
|
||||
set (nonlinear_excluded_tests #"")
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/test/testBatchFixedLagSmoother.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/test/testConcurrentBatchFilter.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/test/testConcurrentBatchSmoother.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/test/testIncrementalFixedLagSmoother.cpp"
|
||||
#"${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/test/testLinearizedFactor.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/tests/testBatchFixedLagSmoother.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/tests/testConcurrentBatchFilter.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/tests/testConcurrentBatchSmoother.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/tests/testIncrementalFixedLagSmoother.cpp"
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -128,8 +128,8 @@ void ConcurrentBatchFilter::synchronize(const NonlinearFactorGraph& summarizedFa
|
|||
// Perform an optional optimization on the to-be-sent-to-the-smoother factors
|
||||
if(relin_) {
|
||||
// Create ordering and delta
|
||||
Ordering ordering = *graph.orderingCOLAMD(values);
|
||||
VectorValues delta = values.zeroVectors(ordering);
|
||||
Ordering ordering = graph.orderingCOLAMD();
|
||||
VectorValues delta = values.zeroVectors();
|
||||
// Optimize this graph using a modified version of L-M
|
||||
optimize(graph, values, ordering, delta, separatorValues, parameters_);
|
||||
// Update filter theta and delta
|
||||
|
@ -162,8 +162,8 @@ void ConcurrentBatchFilter::synchronize(const NonlinearFactorGraph& summarizedFa
|
|||
|
||||
// Generate separate orderings that place the filter keys or the smoother keys first
|
||||
// TODO: This is convenient, but it recalculates the variable index each time
|
||||
Ordering filterOrdering = *graph.orderingCOLAMDConstrained(values, filterConstraints);
|
||||
Ordering smootherOrdering = *graph.orderingCOLAMDConstrained(values, smootherConstraints);
|
||||
Ordering filterOrdering = graph.orderingCOLAMDConstrained(filterConstraints);
|
||||
Ordering smootherOrdering = graph.orderingCOLAMDConstrained(smootherConstraints);
|
||||
|
||||
// Extract the set of filter keys and smoother keys
|
||||
std::set<Key> filterKeys;
|
||||
|
@ -339,7 +339,7 @@ ConcurrentBatchFilter::Result ConcurrentBatchFilter::optimize(const NonlinearFac
|
|||
double errorTol = parameters.errorTol;
|
||||
|
||||
// Create a Values that holds the current evaluation point
|
||||
Values evalpoint = theta.retract(delta, ordering);
|
||||
Values evalpoint = theta.retract(delta);
|
||||
result.error = factors.error(evalpoint);
|
||||
|
||||
// Use a custom optimization loop so the linearization points can be controlled
|
||||
|
@ -352,7 +352,7 @@ ConcurrentBatchFilter::Result ConcurrentBatchFilter::optimize(const NonlinearFac
|
|||
gttic(optimizer_iteration);
|
||||
{
|
||||
// Linearize graph around the linearization point
|
||||
GaussianFactorGraph linearFactorGraph = *factors.linearize(theta, ordering);
|
||||
GaussianFactorGraph linearFactorGraph = *factors.linearize(theta);
|
||||
|
||||
// Keep increasing lambda until we make make progress
|
||||
while(true) {
|
||||
|
@ -377,7 +377,7 @@ ConcurrentBatchFilter::Result ConcurrentBatchFilter::optimize(const NonlinearFac
|
|||
// Solve Damped Gaussian Factor Graph
|
||||
newDelta = GaussianJunctionTree(dampedFactorGraph).optimize(parameters.getEliminationFunction());
|
||||
// update the evalpoint with the new delta
|
||||
evalpoint = theta.retract(newDelta, ordering);
|
||||
evalpoint = theta.retract(newDelta);
|
||||
gttoc(solve);
|
||||
|
||||
// Evaluate the new nonlinear error
|
||||
|
@ -442,7 +442,7 @@ void ConcurrentBatchFilter::marginalize(const FastList<Key>& keysToMove) {
|
|||
// Note: It is assumed the ordering already has these keys first
|
||||
|
||||
// Create the linear factor graph
|
||||
GaussianFactorGraph linearFactorGraph = *factors_.linearize(theta_, ordering_);
|
||||
GaussianFactorGraph linearFactorGraph = *factors_.linearize(theta_);
|
||||
|
||||
// Calculate the variable index
|
||||
VariableIndex variableIndex(linearFactorGraph, ordering_.size());
|
||||
|
@ -474,7 +474,7 @@ void ConcurrentBatchFilter::marginalize(const FastList<Key>& keysToMove) {
|
|||
BOOST_FOREACH(Index index, indicesToEliminate) {
|
||||
GaussianFactor::shared_ptr gaussianFactor = forest.at(index)->eliminateRecursive(parameters_.getEliminationFunction());
|
||||
if(gaussianFactor->size() > 0) {
|
||||
LinearContainerFactor::shared_ptr marginalFactor(new LinearContainerFactor(gaussianFactor, ordering_, theta_));
|
||||
LinearContainerFactor::shared_ptr marginalFactor(new LinearContainerFactor(gaussianFactor, theta_));
|
||||
marginalFactors.push_back(marginalFactor);
|
||||
// Add the keys associated with the marginal factor to the separator values
|
||||
BOOST_FOREACH(Key key, *marginalFactor) {
|
||||
|
@ -551,7 +551,7 @@ NonlinearFactorGraph ConcurrentBatchFilter::marginalize(const NonlinearFactorGra
|
|||
// Note: It is assumed the ordering already has these keys first
|
||||
|
||||
// Create the linear factor graph
|
||||
GaussianFactorGraph linearFactorGraph = *graph.linearize(values, ordering);
|
||||
GaussianFactorGraph linearFactorGraph = *graph.linearize(values);
|
||||
|
||||
// Construct a variable index
|
||||
VariableIndex variableIndex(linearFactorGraph, ordering.size());
|
||||
|
@ -576,7 +576,7 @@ NonlinearFactorGraph ConcurrentBatchFilter::marginalize(const NonlinearFactorGra
|
|||
BOOST_FOREACH(Index index, indicesToEliminate) {
|
||||
GaussianFactor::shared_ptr gaussianFactor = forest.at(index)->eliminateRecursive(function);
|
||||
if(gaussianFactor->size() > 0) {
|
||||
LinearContainerFactor::shared_ptr marginalFactor(new LinearContainerFactor(gaussianFactor, ordering, values));
|
||||
LinearContainerFactor::shared_ptr marginalFactor(new LinearContainerFactor(gaussianFactor, values));
|
||||
marginalFactors.push_back(marginalFactor);
|
||||
}
|
||||
}
|
||||
|
@ -604,13 +604,13 @@ void ConcurrentBatchFilter::PrintNonlinearFactor(const NonlinearFactor::shared_p
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void ConcurrentBatchFilter::PrintLinearFactor(const GaussianFactor::shared_ptr& factor, const Ordering& ordering,
|
||||
void ConcurrentBatchFilter::PrintLinearFactor(const GaussianFactor::shared_ptr& factor,
|
||||
const std::string& indent, const KeyFormatter& keyFormatter) {
|
||||
std::cout << indent;
|
||||
if(factor) {
|
||||
std::cout << "g( ";
|
||||
BOOST_FOREACH(Index index, *factor) {
|
||||
std::cout << keyFormatter(ordering.key(index)) << " ";
|
||||
BOOST_FOREACH(Index key, *factor) {
|
||||
std::cout << keyFormatter(key) << " ";
|
||||
}
|
||||
std::cout << ")" << std::endl;
|
||||
} else {
|
||||
|
|
|
@ -89,7 +89,7 @@ public:
|
|||
* If only a single variable is needed, it may be faster to call calculateEstimate(const KEY&).
|
||||
*/
|
||||
Values calculateEstimate() const {
|
||||
return theta_.retract(delta_, ordering_);
|
||||
return theta_.retract(delta_);
|
||||
}
|
||||
|
||||
/** Compute the current best estimate of a single variable. This is generally faster than
|
||||
|
@ -99,8 +99,7 @@ public:
|
|||
*/
|
||||
template<class VALUE>
|
||||
VALUE calculateEstimate(Key key) const {
|
||||
const Index index = ordering_.at(key);
|
||||
const Vector delta = delta_.at(index);
|
||||
const Vector delta = delta_.at(key);
|
||||
return theta_.at<VALUE>(key).retract(delta);
|
||||
}
|
||||
|
||||
|
@ -213,7 +212,7 @@ private:
|
|||
const std::string& indent = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter);
|
||||
|
||||
/** Print just the nonlinear keys in a linear factor */
|
||||
static void PrintLinearFactor(const GaussianFactor::shared_ptr& factor, const Ordering& ordering,
|
||||
static void PrintLinearFactor(const GaussianFactor::shared_ptr& factor,
|
||||
const std::string& indent = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter);
|
||||
|
||||
// A custom elimination tree that supports forests and partial elimination
|
||||
|
|
|
@ -89,7 +89,7 @@ public:
|
|||
* If only a single variable is needed, it may be faster to call calculateEstimate(const KEY&).
|
||||
*/
|
||||
Values calculateEstimate() const {
|
||||
return theta_.retract(delta_, ordering_);
|
||||
return theta_.retract(delta_);
|
||||
}
|
||||
|
||||
/** Compute the current best estimate of a single variable. This is generally faster than
|
||||
|
|
|
@ -33,9 +33,6 @@ TEST( LinearizedFactor, equals_jacobian )
|
|||
// Create a Between Factor from a Point3. This is actually a linear factor.
|
||||
Key x1(1);
|
||||
Key x2(2);
|
||||
Ordering ordering;
|
||||
ordering.push_back(x1);
|
||||
ordering.push_back(x2);
|
||||
Values values;
|
||||
values.insert(x1, Point3(-22.4, +8.5, +2.4));
|
||||
values.insert(x2, Point3(-21.0, +5.0, +21.0));
|
||||
|
@ -46,9 +43,9 @@ TEST( LinearizedFactor, equals_jacobian )
|
|||
|
||||
|
||||
// Create two identical factors and make sure they're equal
|
||||
JacobianFactor::shared_ptr jf = boost::static_pointer_cast<JacobianFactor>(betweenFactor.linearize(values, ordering));
|
||||
LinearizedJacobianFactor jacobian1(jf, ordering, values);
|
||||
LinearizedJacobianFactor jacobian2(jf, ordering, values);
|
||||
JacobianFactor::shared_ptr jf = boost::static_pointer_cast<JacobianFactor>(betweenFactor.linearize(values));
|
||||
LinearizedJacobianFactor jacobian1(jf, values);
|
||||
LinearizedJacobianFactor jacobian2(jf, values);
|
||||
|
||||
CHECK(assert_equal(jacobian1, jacobian2));
|
||||
}
|
||||
|
@ -59,9 +56,6 @@ TEST( LinearizedFactor, clone_jacobian )
|
|||
// Create a Between Factor from a Point3. This is actually a linear factor.
|
||||
Key x1(1);
|
||||
Key x2(2);
|
||||
Ordering ordering;
|
||||
ordering.push_back(x1);
|
||||
ordering.push_back(x2);
|
||||
Values values;
|
||||
values.insert(x1, Point3(-22.4, +8.5, +2.4));
|
||||
values.insert(x2, Point3(-21.0, +5.0, +21.0));
|
||||
|
@ -71,13 +65,13 @@ TEST( LinearizedFactor, clone_jacobian )
|
|||
BetweenFactor<Point3> betweenFactor(x1, x2, measured, model);
|
||||
|
||||
// Create one factor that is a clone of the other and make sure they're equal
|
||||
JacobianFactor::shared_ptr jf = boost::static_pointer_cast<JacobianFactor>(betweenFactor.linearize(values, ordering));
|
||||
LinearizedJacobianFactor jacobian1(jf, ordering, values);
|
||||
JacobianFactor::shared_ptr jf = boost::static_pointer_cast<JacobianFactor>(betweenFactor.linearize(values));
|
||||
LinearizedJacobianFactor jacobian1(jf, values);
|
||||
LinearizedJacobianFactor::shared_ptr jacobian2 = boost::static_pointer_cast<LinearizedJacobianFactor>(jacobian1.clone());
|
||||
CHECK(assert_equal(jacobian1, *jacobian2));
|
||||
|
||||
JacobianFactor::shared_ptr jf1 = boost::static_pointer_cast<JacobianFactor>(jacobian1.linearize(values, ordering));
|
||||
JacobianFactor::shared_ptr jf2 = boost::static_pointer_cast<JacobianFactor>(jacobian2->linearize(values, ordering));
|
||||
JacobianFactor::shared_ptr jf1 = boost::static_pointer_cast<JacobianFactor>(jacobian1.linearize(values));
|
||||
JacobianFactor::shared_ptr jf2 = boost::static_pointer_cast<JacobianFactor>(jacobian2->linearize(values));
|
||||
CHECK(assert_equal(*jf1, *jf2));
|
||||
|
||||
Matrix information1 = jf1->augmentedInformation();
|
||||
|
@ -91,9 +85,6 @@ TEST( LinearizedFactor, add_jacobian )
|
|||
// Create a Between Factor from a Point3. This is actually a linear factor.
|
||||
Key x1(1);
|
||||
Key x2(2);
|
||||
Ordering ordering;
|
||||
ordering.push_back(x1);
|
||||
ordering.push_back(x2);
|
||||
Values values;
|
||||
values.insert(x1, Point3(-22.4, +8.5, +2.4));
|
||||
values.insert(x2, Point3(-21.0, +5.0, +21.0));
|
||||
|
@ -103,10 +94,10 @@ TEST( LinearizedFactor, add_jacobian )
|
|||
BetweenFactor<Point3> betweenFactor(x1, x2, measured, model);
|
||||
|
||||
// Create two factor graphs, one using 'push_back' and one using 'add' and make sure they're equal
|
||||
JacobianFactor::shared_ptr jf = boost::static_pointer_cast<JacobianFactor>(betweenFactor.linearize(values, ordering));
|
||||
LinearizedJacobianFactor::shared_ptr jacobian(new LinearizedJacobianFactor(jf, ordering, values));
|
||||
JacobianFactor::shared_ptr jf = boost::static_pointer_cast<JacobianFactor>(betweenFactor.linearize(values));
|
||||
LinearizedJacobianFactor::shared_ptr jacobian(new LinearizedJacobianFactor(jf, values));
|
||||
NonlinearFactorGraph graph1; graph1.push_back(jacobian);
|
||||
NonlinearFactorGraph graph2; graph2.add(*jacobian);
|
||||
NonlinearFactorGraph graph2; graph2.push_back(*jacobian);
|
||||
|
||||
// TODO: When creating a Jacobian from a cached factor, I experienced a problem in the 'add' version
|
||||
// However, I am currently unable to reproduce the error in this unit test.
|
||||
|
@ -133,8 +124,8 @@ TEST( LinearizedFactor, add_jacobian )
|
|||
//
|
||||
//
|
||||
// // Create a linearized jacobian factors
|
||||
// JacobianFactor::shared_ptr jf = boost::static_pointer_cast<JacobianFactor>(betweenFactor.linearize(values, ordering));
|
||||
// LinearizedJacobianFactor jacobian(jf, ordering, values);
|
||||
// JacobianFactor::shared_ptr jf = boost::static_pointer_cast<JacobianFactor>(betweenFactor.linearize(values));
|
||||
// LinearizedJacobianFactor jacobian(jf, values);
|
||||
//
|
||||
//
|
||||
// for(double x1 = -10; x1 < 10; x1 += 2.0) {
|
||||
|
@ -156,8 +147,8 @@ TEST( LinearizedFactor, add_jacobian )
|
|||
// EXPECT_DOUBLES_EQUAL(expected_error, actual_error, 1e-9 );
|
||||
//
|
||||
// // Check that the linearized factors are identical
|
||||
// GaussianFactor::shared_ptr expected_factor = betweenFactor.linearize(linpoint, ordering);
|
||||
// GaussianFactor::shared_ptr actual_factor = jacobian.linearize(linpoint, ordering);
|
||||
// GaussianFactor::shared_ptr expected_factor = betweenFactor.linearize(linpoint);
|
||||
// GaussianFactor::shared_ptr actual_factor = jacobian.linearize(linpoint);
|
||||
// CHECK(assert_equal(*expected_factor, *actual_factor));
|
||||
// }
|
||||
// }
|
||||
|
@ -175,9 +166,6 @@ TEST( LinearizedFactor, equals_hessian )
|
|||
// Create a Between Factor from a Point3. This is actually a linear factor.
|
||||
Key x1(1);
|
||||
Key x2(2);
|
||||
Ordering ordering;
|
||||
ordering.push_back(x1);
|
||||
ordering.push_back(x2);
|
||||
Values values;
|
||||
values.insert(x1, Point3(-22.4, +8.5, +2.4));
|
||||
values.insert(x2, Point3(-21.0, +5.0, +21.0));
|
||||
|
@ -188,10 +176,10 @@ TEST( LinearizedFactor, equals_hessian )
|
|||
|
||||
|
||||
// Create two identical factors and make sure they're equal
|
||||
JacobianFactor::shared_ptr jf = boost::static_pointer_cast<JacobianFactor>(betweenFactor.linearize(values, ordering));
|
||||
JacobianFactor::shared_ptr jf = boost::static_pointer_cast<JacobianFactor>(betweenFactor.linearize(values));
|
||||
HessianFactor::shared_ptr hf(new HessianFactor(*jf));
|
||||
LinearizedHessianFactor hessian1(hf, ordering, values);
|
||||
LinearizedHessianFactor hessian2(hf, ordering, values);
|
||||
LinearizedHessianFactor hessian1(hf, values);
|
||||
LinearizedHessianFactor hessian2(hf, values);
|
||||
|
||||
CHECK(assert_equal(hessian1, hessian2));
|
||||
}
|
||||
|
@ -202,9 +190,6 @@ TEST( LinearizedFactor, clone_hessian )
|
|||
// Create a Between Factor from a Point3. This is actually a linear factor.
|
||||
Key x1(1);
|
||||
Key x2(2);
|
||||
Ordering ordering;
|
||||
ordering.push_back(x1);
|
||||
ordering.push_back(x2);
|
||||
Values values;
|
||||
values.insert(x1, Point3(-22.4, +8.5, +2.4));
|
||||
values.insert(x2, Point3(-21.0, +5.0, +21.0));
|
||||
|
@ -215,9 +200,9 @@ TEST( LinearizedFactor, clone_hessian )
|
|||
|
||||
|
||||
// Create two identical factors and make sure they're equal
|
||||
JacobianFactor::shared_ptr jf = boost::static_pointer_cast<JacobianFactor>(betweenFactor.linearize(values, ordering));
|
||||
JacobianFactor::shared_ptr jf = boost::static_pointer_cast<JacobianFactor>(betweenFactor.linearize(values));
|
||||
HessianFactor::shared_ptr hf(new HessianFactor(*jf));
|
||||
LinearizedHessianFactor hessian1(hf, ordering, values);
|
||||
LinearizedHessianFactor hessian1(hf, values);
|
||||
LinearizedHessianFactor::shared_ptr hessian2 = boost::static_pointer_cast<LinearizedHessianFactor>(hessian1.clone());
|
||||
|
||||
CHECK(assert_equal(hessian1, *hessian2));
|
||||
|
@ -229,9 +214,6 @@ TEST( LinearizedFactor, add_hessian )
|
|||
// Create a Between Factor from a Point3. This is actually a linear factor.
|
||||
Key x1(1);
|
||||
Key x2(2);
|
||||
Ordering ordering;
|
||||
ordering.push_back(x1);
|
||||
ordering.push_back(x2);
|
||||
Values values;
|
||||
values.insert(x1, Point3(-22.4, +8.5, +2.4));
|
||||
values.insert(x2, Point3(-21.0, +5.0, +21.0));
|
||||
|
@ -242,11 +224,11 @@ TEST( LinearizedFactor, add_hessian )
|
|||
|
||||
|
||||
// Create two identical factors and make sure they're equal
|
||||
JacobianFactor::shared_ptr jf = boost::static_pointer_cast<JacobianFactor>(betweenFactor.linearize(values, ordering));
|
||||
JacobianFactor::shared_ptr jf = boost::static_pointer_cast<JacobianFactor>(betweenFactor.linearize(values));
|
||||
HessianFactor::shared_ptr hf(new HessianFactor(*jf));
|
||||
LinearizedHessianFactor::shared_ptr hessian(new LinearizedHessianFactor(hf, ordering, values));
|
||||
LinearizedHessianFactor::shared_ptr hessian(new LinearizedHessianFactor(hf, values));
|
||||
NonlinearFactorGraph graph1; graph1.push_back(hessian);
|
||||
NonlinearFactorGraph graph2; graph2.add(*hessian);
|
||||
NonlinearFactorGraph graph2; graph2.push_back(*hessian);
|
||||
|
||||
CHECK(assert_equal(graph1, graph2));
|
||||
}
|
||||
|
@ -270,9 +252,9 @@ TEST( LinearizedFactor, add_hessian )
|
|||
//
|
||||
//
|
||||
// // Create a linearized hessian factor
|
||||
// JacobianFactor::shared_ptr jf = boost::static_pointer_cast<JacobianFactor>(betweenFactor.linearize(values, ordering));
|
||||
// JacobianFactor::shared_ptr jf = boost::static_pointer_cast<JacobianFactor>(betweenFactor.linearize(values));
|
||||
// HessianFactor::shared_ptr hf(new HessianFactor(*jf));
|
||||
// LinearizedHessianFactor hessian(hf, ordering, values);
|
||||
// LinearizedHessianFactor hessian(hf, values);
|
||||
//
|
||||
//
|
||||
// for(double x1 = -10; x1 < 10; x1 += 2.0) {
|
||||
|
@ -294,8 +276,8 @@ TEST( LinearizedFactor, add_hessian )
|
|||
// EXPECT_DOUBLES_EQUAL(expected_error, actual_error, 1e-9 );
|
||||
//
|
||||
// // Check that the linearized factors are identical
|
||||
// GaussianFactor::shared_ptr expected_factor = HessianFactor::shared_ptr(new HessianFactor(*betweenFactor.linearize(linpoint, ordering)));
|
||||
// GaussianFactor::shared_ptr actual_factor = hessian.linearize(linpoint, ordering);
|
||||
// GaussianFactor::shared_ptr expected_factor = HessianFactor::shared_ptr(new HessianFactor(*betweenFactor.linearize(linpoint)));
|
||||
// GaussianFactor::shared_ptr actual_factor = hessian.linearize(linpoint);
|
||||
// CHECK(assert_equal(*expected_factor, *actual_factor));
|
||||
// }
|
||||
// }
|
||||
|
|
|
@ -25,7 +25,15 @@ set (slam_full_libs
|
|||
|
||||
# Exclude tests that don't work
|
||||
set (slam_excluded_tests
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/tests/testSerialization.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/tests/testAHRS.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/tests/testCombinedImuFactor.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/tests/testImuFactor.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/tests/testInvDepthFactor3.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/tests/testInvDepthFactorVariant1.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/tests/testInvDepthFactorVariant2.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/tests/testInvDepthFactorVariant3.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/tests/testMultiProjectionFactor.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/tests/testSerialization.cpp"
|
||||
# "" # Add to this list, with full path, to exclude
|
||||
)
|
||||
# Add all tests
|
||||
|
|
|
@ -7,13 +7,16 @@
|
|||
|
||||
#include <gtsam_unstable/slam/DummyFactor.h>
|
||||
|
||||
#include <boost/assign/list_of.hpp>
|
||||
#include <boost/foreach.hpp>
|
||||
|
||||
using namespace boost::assign;
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
DummyFactor::DummyFactor(const Key& key1, size_t dim1, const Key& key2, size_t dim2)
|
||||
: NonlinearFactor(key1, key2)
|
||||
: NonlinearFactor(cref_list_of<2>(key1)(key2))
|
||||
{
|
||||
dims_.push_back(dim1);
|
||||
dims_.push_back(dim2);
|
||||
|
@ -38,7 +41,7 @@ bool DummyFactor::equals(const NonlinearFactor& f, double tol) const {
|
|||
|
||||
/* ************************************************************************* */
|
||||
boost::shared_ptr<GaussianFactor>
|
||||
DummyFactor::linearize(const Values& c, const Ordering& ordering) const {
|
||||
DummyFactor::linearize(const Values& c) const {
|
||||
// Only linearize if the factor is active
|
||||
if (!this->active(c))
|
||||
return boost::shared_ptr<JacobianFactor>();
|
||||
|
@ -46,7 +49,7 @@ DummyFactor::linearize(const Values& c, const Ordering& ordering) const {
|
|||
// Fill in terms with zero matrices
|
||||
std::vector<std::pair<Index, Matrix> > terms(this->size());
|
||||
for(size_t j=0; j<this->size(); ++j) {
|
||||
terms[j].first = ordering[this->keys()[j]];
|
||||
terms[j].first = keys()[j];
|
||||
terms[j].second = zeros(rowDim_, dims_[j]);
|
||||
}
|
||||
|
||||
|
|
|
@ -54,8 +54,7 @@ public:
|
|||
virtual size_t dim() const { return rowDim_; }
|
||||
|
||||
/** linearize to a GaussianFactor */
|
||||
virtual boost::shared_ptr<GaussianFactor>
|
||||
linearize(const Values& c, const Ordering& ordering) const;
|
||||
virtual boost::shared_ptr<GaussianFactor> linearize(const Values& c) const;
|
||||
|
||||
/**
|
||||
* Creates a shared_ptr clone of the factor - needs to be specialized to allow
|
||||
|
|
|
@ -147,7 +147,7 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/// linearize returns a Hessianfactor that is an approximation of error(p)
|
||||
virtual boost::shared_ptr<GaussianFactor> linearize(const Values& values, const Ordering& ordering) const {
|
||||
virtual boost::shared_ptr<GaussianFactor> linearize(const Values& values) const {
|
||||
|
||||
// std::cout.precision(20);
|
||||
|
||||
|
@ -171,11 +171,6 @@ namespace gtsam {
|
|||
std::vector<Matrix> Gs(keys_.size()*(keys_.size()+1)/2);
|
||||
std::vector<Vector> gs(keys_.size());
|
||||
double f = 0;
|
||||
// fill in the keys
|
||||
std::vector<Index> js;
|
||||
BOOST_FOREACH(const Key& k, keys_) {
|
||||
js += ordering[k];
|
||||
}
|
||||
|
||||
bool blockwise = false;
|
||||
|
||||
|
@ -336,7 +331,7 @@ namespace gtsam {
|
|||
|
||||
|
||||
// ==========================================================================================================
|
||||
return HessianFactor::shared_ptr(new HessianFactor(js, Gs, gs, f));
|
||||
return HessianFactor::shared_ptr(new HessianFactor(keys_, Gs, gs, f));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||
#include <gtsam/inference/Key.h>
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
#include <gtsam/base/timing.h>
|
||||
#include <boost/foreach.hpp>
|
||||
#include <map>
|
||||
|
||||
|
|
|
@ -10,10 +10,11 @@
|
|||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
||||
#include <gtsam/geometry/Point3.h>
|
||||
|
||||
#include <gtsam_unstable/slam/DummyFactor.h>
|
||||
|
||||
#include <gtsam/geometry/Point3.h>
|
||||
#include <gtsam/base/TestableAssertions.h>
|
||||
|
||||
using namespace gtsam;
|
||||
|
||||
const double tol = 1e-9;
|
||||
|
@ -41,15 +42,12 @@ TEST( testDummyFactor, basics ) {
|
|||
// errors - all zeros
|
||||
DOUBLES_EQUAL(0.0, dummyfactor.error(c), tol);
|
||||
|
||||
Ordering ordering;
|
||||
ordering += key1, key2;
|
||||
|
||||
// linearization: all zeros
|
||||
GaussianFactor::shared_ptr actLinearization = dummyfactor.linearize(c, ordering);
|
||||
GaussianFactor::shared_ptr actLinearization = dummyfactor.linearize(c);
|
||||
CHECK(actLinearization);
|
||||
noiseModel::Diagonal::shared_ptr model3 = noiseModel::Unit::Create(3);
|
||||
GaussianFactor::shared_ptr expLinearization(new JacobianFactor(
|
||||
ordering[key1], zeros(3,3), ordering[key2], zeros(3,3), zero(3), model3));
|
||||
key1, zeros(3,3), key2, zeros(3,3), zero(3), model3));
|
||||
EXPECT(assert_equal(*expLinearization, *actLinearization, tol));
|
||||
}
|
||||
|
||||
|
|
|
@ -25,11 +25,8 @@
|
|||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/LinearContainerFactor.h>
|
||||
#include <gtsam/nonlinear/Ordering.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
#include <gtsam/nonlinear/Symbol.h>
|
||||
#include <gtsam/nonlinear/Key.h>
|
||||
#include <gtsam/linear/GaussianSequentialSolver.h>
|
||||
#include <gtsam/inference/JunctionTree.h>
|
||||
#include <gtsam_unstable/geometry/triangulation.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
|
@ -40,8 +37,10 @@
|
|||
#include <gtsam/geometry/SimpleCamera.h>
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
||||
#include <boost/assign/std/vector.hpp>
|
||||
|
||||
using namespace std;
|
||||
using namespace boost::assign;
|
||||
using namespace gtsam;
|
||||
|
||||
// make a realistic calibration matrix
|
||||
|
@ -223,8 +222,8 @@ TEST( MultiProjectionFactor, 3poses ){
|
|||
graph.push_back(smartFactor1);
|
||||
graph.push_back(smartFactor2);
|
||||
graph.push_back(smartFactor3);
|
||||
graph.add(PriorFactor<Pose3>(x1, pose1, noisePrior));
|
||||
graph.add(PriorFactor<Pose3>(x2, pose2, noisePrior));
|
||||
graph.push_back(PriorFactor<Pose3>(x1, pose1, noisePrior));
|
||||
graph.push_back(PriorFactor<Pose3>(x2, pose2, noisePrior));
|
||||
|
||||
// smartFactor1->print("smartFactor1");
|
||||
|
||||
|
|
|
@ -113,11 +113,11 @@ TEST( SmartRangeFactor, optimization ) {
|
|||
|
||||
// Create Factor graph
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(f);
|
||||
graph.push_back(f);
|
||||
const noiseModel::Base::shared_ptr //
|
||||
priorNoise = noiseModel::Diagonal::Sigmas(Vector3(1, 1, M_PI));
|
||||
graph.add(PriorFactor<Pose2>(1, pose1, priorNoise));
|
||||
graph.add(PriorFactor<Pose2>(2, pose2, priorNoise));
|
||||
graph.push_back(PriorFactor<Pose2>(1, pose1, priorNoise));
|
||||
graph.push_back(PriorFactor<Pose2>(2, pose2, priorNoise));
|
||||
|
||||
// Try optimizing
|
||||
LevenbergMarquardtParams params;
|
||||
|
|
Loading…
Reference in New Issue