Merge branch 'feature/LevenbergMarquardt'

release/4.3a0
Luca 2014-03-12 14:52:08 -04:00
commit 4bb6402ff2
44 changed files with 1602 additions and 624 deletions

View File

@ -1,19 +1,17 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?fileVersion 4.0.0?>
<cproject storage_type_id="org.eclipse.cdt.core.XmlProjectDescriptionStorage">
<?fileVersion 4.0.0?><cproject storage_type_id="org.eclipse.cdt.core.XmlProjectDescriptionStorage">
<storageModule moduleId="org.eclipse.cdt.core.settings">
<cconfiguration id="cdt.managedbuild.toolchain.gnu.macosx.base.1359703544">
<storageModule buildSystemId="org.eclipse.cdt.managedbuilder.core.configurationDataProvider" id="cdt.managedbuild.toolchain.gnu.macosx.base.1359703544" moduleId="org.eclipse.cdt.core.settings" name="MacOSX GCC">
<externalSettings/>
<extensions>
<extension id="org.eclipse.cdt.core.ELF" point="org.eclipse.cdt.core.BinaryParser"/>
<extension id="org.eclipse.cdt.core.MachO64" point="org.eclipse.cdt.core.BinaryParser"/>
<extension id="org.eclipse.cdt.core.GASErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GLDErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GCCErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GmakeErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.CWDLocator" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.ELF" point="org.eclipse.cdt.core.BinaryParser"/>
<extension id="org.eclipse.cdt.core.MachO64" point="org.eclipse.cdt.core.BinaryParser"/>
</extensions>
</storageModule>
<storageModule moduleId="cdtBuildSystem" version="4.0.0">
@ -62,13 +60,13 @@
<storageModule buildSystemId="org.eclipse.cdt.managedbuilder.core.configurationDataProvider" id="cdt.managedbuild.toolchain.gnu.macosx.base.1359703544.1441575890" moduleId="org.eclipse.cdt.core.settings" name="Timing">
<externalSettings/>
<extensions>
<extension id="org.eclipse.cdt.core.ELF" point="org.eclipse.cdt.core.BinaryParser"/>
<extension id="org.eclipse.cdt.core.MachO64" point="org.eclipse.cdt.core.BinaryParser"/>
<extension id="org.eclipse.cdt.core.GASErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GLDErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GCCErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GmakeErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.CWDLocator" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.ELF" point="org.eclipse.cdt.core.BinaryParser"/>
<extension id="org.eclipse.cdt.core.MachO64" point="org.eclipse.cdt.core.BinaryParser"/>
</extensions>
</storageModule>
<storageModule moduleId="cdtBuildSystem" version="4.0.0">
@ -118,13 +116,13 @@
<storageModule buildSystemId="org.eclipse.cdt.managedbuilder.core.configurationDataProvider" id="cdt.managedbuild.toolchain.gnu.macosx.base.1359703544.127261216" moduleId="org.eclipse.cdt.core.settings" name="fast">
<externalSettings/>
<extensions>
<extension id="org.eclipse.cdt.core.ELF" point="org.eclipse.cdt.core.BinaryParser"/>
<extension id="org.eclipse.cdt.core.MachO64" point="org.eclipse.cdt.core.BinaryParser"/>
<extension id="org.eclipse.cdt.core.GASErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GLDErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GCCErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.GmakeErrorParser" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.CWDLocator" point="org.eclipse.cdt.core.ErrorParser"/>
<extension id="org.eclipse.cdt.core.ELF" point="org.eclipse.cdt.core.BinaryParser"/>
<extension id="org.eclipse.cdt.core.MachO64" point="org.eclipse.cdt.core.BinaryParser"/>
</extensions>
</storageModule>
<storageModule moduleId="cdtBuildSystem" version="4.0.0">
@ -1932,6 +1930,22 @@
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testImuFactor.run" path="build/gtsam/navigation" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testImuFactor.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testCombinedImuFactor.run" path="build/gtsam/navigation" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>testCombinedImuFactor.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="all" path="slam" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j2</buildArguments>
@ -2855,6 +2869,14 @@
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="check.tests" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>check.tests</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testSpirit.run" path="build/wrap" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>

View File

@ -0,0 +1,17 @@
1 1 1
0 0 -3.859900e+02 3.871200e+02
-1.6943983532198115e-02
1.1171804676513932e-02
2.4643508831711991e-03
7.3030995682610689e-01
-2.6490818471043420e-01
-1.7127892627337182e+00
1.4300319432711681e+03
-7.5572758535864072e-08
3.2377569465570913e-14
-1.2055995050700867e+01
1.2838775976205760e+01
-4.1099369264082803e+01

View File

@ -1,20 +1,20 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
* -------------------------------------------------------------------------- */
/**
* @file SymmetricBlockMatrix.cpp
* @brief Access to matrices via blocks of pre-defined sizes. Used in GaussianFactor and GaussianConditional.
* @author Richard Roberts
* @date Sep 18, 2010
*/
* @file SymmetricBlockMatrix.cpp
* @brief Access to matrices via blocks of pre-defined sizes. Used in GaussianFactor and GaussianConditional.
* @author Richard Roberts
* @date Sep 18, 2010
*/
#include <gtsam/base/SymmetricBlockMatrix.h>
#include <gtsam/base/VerticalBlockMatrix.h>
@ -23,42 +23,43 @@
namespace gtsam {
/* ************************************************************************* */
SymmetricBlockMatrix SymmetricBlockMatrix::LikeActiveViewOf(const SymmetricBlockMatrix& other)
{
SymmetricBlockMatrix result;
result.variableColOffsets_.resize(other.nBlocks() + 1);
for(size_t i = 0; i < result.variableColOffsets_.size(); ++i)
result.variableColOffsets_[i] =
other.variableColOffsets_[other.blockStart_ + i] - other.variableColOffsets_[other.blockStart_];
result.matrix_.resize(other.cols(), other.cols());
result.assertInvariants();
return result;
}
/* ************************************************************************* */
SymmetricBlockMatrix SymmetricBlockMatrix::LikeActiveViewOf(
const SymmetricBlockMatrix& other) {
SymmetricBlockMatrix result;
result.variableColOffsets_.resize(other.nBlocks() + 1);
for (size_t i = 0; i < result.variableColOffsets_.size(); ++i)
result.variableColOffsets_[i] = other.variableColOffsets_[other.blockStart_
+ i] - other.variableColOffsets_[other.blockStart_];
result.matrix_.resize(other.cols(), other.cols());
result.assertInvariants();
return result;
}
/* ************************************************************************* */
SymmetricBlockMatrix SymmetricBlockMatrix::LikeActiveViewOf(const VerticalBlockMatrix& other)
{
SymmetricBlockMatrix result;
result.variableColOffsets_.resize(other.nBlocks() + 1);
for(size_t i = 0; i < result.variableColOffsets_.size(); ++i)
result.variableColOffsets_[i] =
other.variableColOffsets_[other.blockStart_ + i] - other.variableColOffsets_[other.blockStart_];
result.matrix_.resize(other.cols(), other.cols());
result.assertInvariants();
return result;
}
/* ************************************************************************* */
SymmetricBlockMatrix SymmetricBlockMatrix::LikeActiveViewOf(
const VerticalBlockMatrix& other) {
SymmetricBlockMatrix result;
result.variableColOffsets_.resize(other.nBlocks() + 1);
for (size_t i = 0; i < result.variableColOffsets_.size(); ++i)
result.variableColOffsets_[i] = other.variableColOffsets_[other.blockStart_
+ i] - other.variableColOffsets_[other.blockStart_];
result.matrix_.resize(other.cols(), other.cols());
result.assertInvariants();
return result;
}
/* ************************************************************************* */
VerticalBlockMatrix SymmetricBlockMatrix::choleskyPartial(DenseIndex nFrontals)
{
// Do dense elimination
if(!blockStart() == 0)
throw std::invalid_argument("Can only do Cholesky when the SymmetricBlockMatrix is not a restricted view, i.e. when blockStart == 0.");
if(!gtsam::choleskyPartial(matrix_, offset(nFrontals)))
throw CholeskyFailed();
/* ************************************************************************* */
VerticalBlockMatrix SymmetricBlockMatrix::choleskyPartial(
DenseIndex nFrontals) {
// Do dense elimination
if (!blockStart() == 0)
throw std::invalid_argument(
"Can only do Cholesky when the SymmetricBlockMatrix is not a restricted view, i.e. when blockStart == 0.");
if (!gtsam::choleskyPartial(matrix_, offset(nFrontals)))
throw CholeskyFailed();
// Split conditional
// Split conditional
// Create one big conditionals with many frontal variables.
gttic(Construct_conditional);
@ -68,11 +69,14 @@ namespace gtsam {
Ab.full().triangularView<Eigen::StrictlyLower>().setZero();
gttoc(Construct_conditional);
gttic(Remaining_factor);
// Take lower-right block of Ab_ to get the remaining factor
blockStart() = nFrontals;
gttoc(Remaining_factor);
gttic(Remaining_factor);
// Take lower-right block of Ab_ to get the remaining factor
blockStart() = nFrontals;
gttoc(Remaining_factor);
return Ab;
}
return Ab;
}
/* ************************************************************************* */
} //\ namespace gtsam

View File

@ -21,32 +21,32 @@
namespace gtsam {
/* ************************************************************************* */
VerticalBlockMatrix VerticalBlockMatrix::LikeActiveViewOf(const VerticalBlockMatrix& other)
{
VerticalBlockMatrix result;
result.variableColOffsets_.resize(other.nBlocks() + 1);
for(size_t i = 0; i < result.variableColOffsets_.size(); ++i)
result.variableColOffsets_[i] =
other.variableColOffsets_[other.blockStart_ + i] - other.variableColOffsets_[other.blockStart_];
result.matrix_.resize(other.rows(), result.variableColOffsets_.back());
result.rowEnd_ = other.rows();
result.assertInvariants();
return result;
}
/* ************************************************************************* */
VerticalBlockMatrix VerticalBlockMatrix::LikeActiveViewOf(
const VerticalBlockMatrix& other) {
VerticalBlockMatrix result;
result.variableColOffsets_.resize(other.nBlocks() + 1);
for (size_t i = 0; i < result.variableColOffsets_.size(); ++i)
result.variableColOffsets_[i] = other.variableColOffsets_[other.blockStart_
+ i] - other.variableColOffsets_[other.blockStart_];
result.matrix_.resize(other.rows(), result.variableColOffsets_.back());
result.rowEnd_ = other.rows();
result.assertInvariants();
return result;
}
/* ************************************************************************* */
VerticalBlockMatrix VerticalBlockMatrix::LikeActiveViewOf(const SymmetricBlockMatrix& other, DenseIndex height)
{
VerticalBlockMatrix result;
result.variableColOffsets_.resize(other.nBlocks() + 1);
for(size_t i = 0; i < result.variableColOffsets_.size(); ++i)
result.variableColOffsets_[i] =
other.variableColOffsets_[other.blockStart_ + i] - other.variableColOffsets_[other.blockStart_];
result.matrix_.resize(height, result.variableColOffsets_.back());
result.rowEnd_ = height;
result.assertInvariants();
return result;
}
/* ************************************************************************* */
VerticalBlockMatrix VerticalBlockMatrix::LikeActiveViewOf(
const SymmetricBlockMatrix& other, DenseIndex height) {
VerticalBlockMatrix result;
result.variableColOffsets_.resize(other.nBlocks() + 1);
for (size_t i = 0; i < result.variableColOffsets_.size(); ++i)
result.variableColOffsets_[i] = other.variableColOffsets_[other.blockStart_
+ i] - other.variableColOffsets_[other.blockStart_];
result.matrix_.resize(height, result.variableColOffsets_.back());
result.rowEnd_ = height;
result.assertInvariants();
return result;
}
}

View File

@ -0,0 +1,47 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file testVerticalBlockMatrix.cpp
* @brief Unit tests for VerticalBlockMatrix class
* @author Frank Dellaert
* @date February 15, 2014
**/
#include <CppUnitLite/TestHarness.h>
#include <gtsam/base/VerticalBlockMatrix.h>
#include <boost/assign/list_of.hpp>
using namespace std;
using namespace gtsam;
using boost::assign::list_of;
//*****************************************************************************
TEST(VerticalBlockMatrix, constructor) {
VerticalBlockMatrix actual(list_of(3)(2)(1),
(Matrix(6, 6) << 1, 2, 3, 4, 5, 6, //
2, 8, 9, 10, 11, 12, //
3, 9, 15, 16, 17, 18, //
4, 10, 16, 22, 23, 24, //
5, 11, 17, 23, 29, 30, //
6, 12, 18, 24, 30, 36));
EXPECT_LONGS_EQUAL(6,actual.rows());
EXPECT_LONGS_EQUAL(6,actual.cols());
EXPECT_LONGS_EQUAL(3,actual.nBlocks());
}
//*****************************************************************************
int main() {
TestResult tr;
return TestRegistry::runAllTests(tr);
}
//*****************************************************************************

View File

@ -78,17 +78,16 @@ Point2 Cal3Bundler::uncalibrate(const Point2& p, //
// Derivatives make use of intermediate variables above
if (Dcal) {
double rx = r * x, ry = r * y;
Eigen::Matrix<double, 2, 3> D;
D << u, f_ * rx, f_ * r * rx, v, f_ * ry, f_ * r * ry;
*Dcal = D;
Dcal->resize(2, 3);
*Dcal << u, f_ * rx, f_ * r * rx, v, f_ * ry, f_ * r * ry;
}
if (Dp) {
const double a = 2. * (k1_ + 2. * k2_ * r);
const double axx = a * x * x, axy = a * x * y, ayy = a * y * y;
Eigen::Matrix<double, 2, 2> D;
D << g + axx, axy, axy, g + ayy;
*Dp = f_ * D;
Dp->resize(2,2);
*Dp << g + axx, axy, axy, g + ayy;
*Dp *= f_;
}
return Point2(u0_ + f_ * u, v0_ + f_ * v);

View File

@ -271,6 +271,10 @@ public:
*/
inline static Point2 project_to_camera(const Point3& P,
boost::optional<Matrix&> Dpoint = boost::none) {
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
if (P.z() <= 0)
throw CheiralityException();
#endif
if (Dpoint) {
double d = 1.0 / P.z(), d2 = d * d;
*Dpoint = (Matrix(2, 3) << d, 0.0, -P.x() * d2, 0.0, d, -P.y() * d2);
@ -300,40 +304,25 @@ public:
// Transform to camera coordinates and check cheirality
const Point3 pc = pose_.transform_to(pw);
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
if (pc.z() <= 0)
throw CheiralityException();
#endif
// Project to normalized image coordinates
const Point2 pn = project_to_camera(pc);
if (Dpose || Dpoint) {
// optimized version of derivatives, see CalibratedCamera.nb
const double z = pc.z(), d = 1.0 / z;
const double u = pn.x(), v = pn.y();
Matrix Dpn_pose(2, 6), Dpn_point(2, 3);
if (Dpose) {
double uv = u * v, uu = u * u, vv = v * v;
Dpn_pose << uv, -1 - uu, v, -d, 0, d * u, 1 + vv, -uv, -u, 0, -d, d * v;
}
if (Dpoint) {
const Matrix R(pose_.rotation().matrix());
Dpn_point << //
R(0, 0) - u * R(0, 2), R(1, 0) - u * R(1, 2), R(2, 0) - u * R(2, 2), //
/**/R(0, 1) - v * R(0, 2), R(1, 1) - v * R(1, 2), R(2, 1) - v * R(2, 2);
Dpn_point *= d;
}
// uncalibration
Matrix Dpi_pn; // 2*2
Matrix Dpi_pn(2, 2);
const Point2 pi = K_.uncalibrate(pn, Dcal, Dpi_pn);
// chain the Jacobian matrices
if (Dpose)
*Dpose = Dpi_pn * Dpn_pose;
if (Dpoint)
*Dpoint = Dpi_pn * Dpn_point;
if (Dpose) {
Dpose->resize(2, 6);
calculateDpose(pn, d, Dpi_pn, *Dpose);
}
if (Dpoint) {
Dpoint->resize(2, 3);
calculateDpoint(pn, d, pose_.rotation().matrix(), Dpi_pn, *Dpoint);
}
return pi;
} else
return K_.uncalibrate(pn, Dcal);
@ -391,27 +380,29 @@ public:
boost::optional<Matrix&> Dcamera = boost::none,
boost::optional<Matrix&> Dpoint = boost::none) const {
const Point3 pc = pose_.transform_to(pw);
const Point2 pn = project_to_camera(pc);
if (!Dcamera && !Dpoint) {
const Point3 pc = pose_.transform_to(pw);
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
if (pc.z() <= 0)
throw CheiralityException();
#endif
const Point2 pn = project_to_camera(pc);
return K_.uncalibrate(pn);
}
} else {
const double z = pc.z(), d = 1.0 / z;
Matrix Dpose, Dp, Dcal;
const Point2 pi = this->project(pw, Dpose, Dp, Dcal);
if (Dcamera) {
*Dcamera = Matrix(2, this->dim());
Dcamera->leftCols(pose().dim()) = Dpose; // Jacobian wrt pose3
Dcamera->rightCols(calibration().dim()) = Dcal; // Jacobian wrt calib
// uncalibration
Matrix Dcal, Dpi_pn(2, 2);
const Point2 pi = K_.uncalibrate(pn, Dcal, Dpi_pn);
if (Dcamera) {
Dcamera->resize(2, this->dim());
calculateDpose(pn, d, Dpi_pn, Dcamera->leftCols<6>());
Dcamera->rightCols(K_.dim()) = Dcal; // Jacobian wrt calib
}
if (Dpoint) {
Dpoint->resize(2, 3);
calculateDpoint(pn, d, pose_.rotation().matrix(), Dpi_pn, *Dpoint);
}
return pi;
}
if (Dpoint)
*Dpoint = Dp;
return pi;
}
/// backproject a 2-dimensional point to a 3-dimensional point at given depth
@ -516,6 +507,50 @@ public:
private:
/**
* Calculate Jacobian with respect to pose
* @param pn projection in normalized coordinates
* @param d disparity (inverse depth)
* @param Dpi_pn derivative of uncalibrate with respect to pn
* @param Dpose Output argument, can be matrix or block, assumed right size !
* See http://eigen.tuxfamily.org/dox/TopicFunctionTakingEigenTypes.html
*/
template<typename Derived>
static void calculateDpose(const Point2& pn, double d, const Matrix& Dpi_pn,
Eigen::MatrixBase<Derived> const & Dpose) {
// optimized version of derivatives, see CalibratedCamera.nb
const double u = pn.x(), v = pn.y();
double uv = u * v, uu = u * u, vv = v * v;
Eigen::Matrix<double, 2, 6> Dpn_pose;
Dpn_pose << uv, -1 - uu, v, -d, 0, d * u, 1 + vv, -uv, -u, 0, -d, d * v;
assert(Dpose.rows()==2 && Dpose.cols()==6);
const_cast<Eigen::MatrixBase<Derived>&>(Dpose) = //
Dpi_pn.block<2, 2>(0, 0) * Dpn_pose;
}
/**
* Calculate Jacobian with respect to point
* @param pn projection in normalized coordinates
* @param d disparity (inverse depth)
* @param Dpi_pn derivative of uncalibrate with respect to pn
* @param Dpoint Output argument, can be matrix or block, assumed right size !
* See http://eigen.tuxfamily.org/dox/TopicFunctionTakingEigenTypes.html
*/
template<typename Derived>
static void calculateDpoint(const Point2& pn, double d, const Matrix& R,
const Matrix& Dpi_pn, Eigen::MatrixBase<Derived> const & Dpoint) {
// optimized version of derivatives, see CalibratedCamera.nb
const double u = pn.x(), v = pn.y();
Eigen::Matrix<double, 2, 3> Dpn_point;
Dpn_point << //
R(0, 0) - u * R(0, 2), R(1, 0) - u * R(1, 2), R(2, 0) - u * R(2, 2), //
/**/R(0, 1) - v * R(0, 2), R(1, 1) - v * R(1, 2), R(2, 1) - v * R(2, 2);
Dpn_point *= d;
assert(Dpoint.rows()==2 && Dpoint.cols()==3);
const_cast<Eigen::MatrixBase<Derived>&>(Dpoint) = //
Dpi_pn.block<2, 2>(0, 0) * Dpn_point;
}
/// @}
/// @name Advanced Interface
/// @{

View File

@ -101,10 +101,9 @@ Unit3 Rot3::operator*(const Unit3& p) const {
// see doc/math.lyx, SO(3) section
Point3 Rot3::unrotate(const Point3& p,
boost::optional<Matrix&> H1, boost::optional<Matrix&> H2) const {
const Matrix Rt(transpose());
Point3 q(Rt*p.vector()); // q = Rt*p
Point3 q(transpose()*p.vector()); // q = Rt*p
if (H1) *H1 = skewSymmetric(q.x(), q.y(), q.z());
if (H2) *H2 = Rt;
if (H2) *H2 = transpose();
return q;
}
@ -175,6 +174,39 @@ Vector Rot3::quaternion() const {
return v;
}
/* ************************************************************************* */
Matrix3 Rot3::rightJacobianExpMapSO3(const Vector3& x) {
// x is the axis-angle representation (exponential coordinates) for a rotation
double normx = norm_2(x); // rotation angle
Matrix3 Jr;
if (normx < 10e-8){
Jr = Matrix3::Identity();
}
else{
const Matrix3 X = skewSymmetric(x); // element of Lie algebra so(3): X = x^
Jr = Matrix3::Identity() - ((1-cos(normx))/(normx*normx)) * X +
((normx-sin(normx))/(normx*normx*normx)) * X * X; // right Jacobian
}
return Jr;
}
/* ************************************************************************* */
Matrix3 Rot3::rightJacobianExpMapSO3inverse(const Vector3& x) {
// x is the axis-angle representation (exponential coordinates) for a rotation
double normx = norm_2(x); // rotation angle
Matrix3 Jrinv;
if (normx < 10e-8){
Jrinv = Matrix3::Identity();
}
else{
const Matrix3 X = skewSymmetric(x); // element of Lie algebra so(3): X = x^
Jrinv = Matrix3::Identity() +
0.5 * X + (1/(normx*normx) - (1+cos(normx))/(2*normx * sin(normx)) ) * X * X;
}
return Jrinv;
}
/* ************************************************************************* */
pair<Matrix3, Vector3> RQ(const Matrix3& A) {

View File

@ -304,6 +304,17 @@ namespace gtsam {
/// Left-trivialized derivative inverse of the exponential map
static Matrix3 dexpInvL(const Vector3& v);
/**
* Right Jacobian for Exponential map in SO(3) - equation (10.86) and following equations in
* G.S. Chirikjian, "Stochastic Models, Information Theory, and Lie Groups", Volume 2, 2008.
*/
static Matrix3 rightJacobianExpMapSO3(const Vector3& x);
/** Right Jacobian for Log map in SO(3) - equation (10.86) and following equations in
* G.S. Chirikjian, "Stochastic Models, Information Theory, and Lie Groups", Volume 2, 2008.
*/
static Matrix3 rightJacobianExpMapSO3inverse(const Vector3& x);
/// @}
/// @name Group Action on Point3
/// @{

View File

@ -169,38 +169,55 @@ static Point2 project3(const Pose3& pose, const Point3& point, const Cal3_S2& ca
}
/* ************************************************************************* */
static Point2 projectInfinity3(const Pose3& pose, const Point2& point2D, const Cal3_S2& cal) {
Point3 point(point2D.x(), point2D.y(), 1.0);
return Camera(pose,cal).projectPointAtInfinity(point);
}
/* ************************************************************************* */
TEST( PinholeCamera, Dproject_point_pose)
TEST( PinholeCamera, Dproject)
{
Matrix Dpose, Dpoint, Dcal;
Point2 result = camera.project(point1, Dpose, Dpoint, Dcal);
Matrix numerical_pose = numericalDerivative31(project3, pose1, point1, K);
Matrix numerical_point = numericalDerivative32(project3, pose1, point1, K);
Matrix numerical_cal = numericalDerivative33(project3, pose1, point1, K);
CHECK(assert_equal(result, Point2(-100, 100) ));
CHECK(assert_equal(Dpose, numerical_pose, 1e-7));
CHECK(assert_equal(Dpoint, numerical_point,1e-7));
CHECK(assert_equal(Dcal, numerical_cal,1e-7));
CHECK(assert_equal(Point2(-100, 100), result));
CHECK(assert_equal(numerical_pose, Dpose, 1e-7));
CHECK(assert_equal(numerical_point, Dpoint, 1e-7));
CHECK(assert_equal(numerical_cal, Dcal, 1e-7));
}
/* ************************************************************************* */
TEST( PinholeCamera, Dproject_point_pose_Infinity)
static Point2 projectInfinity3(const Pose3& pose, const Point2& point2D, const Cal3_S2& cal) {
Point3 point(point2D.x(), point2D.y(), 1.0);
return Camera(pose,cal).projectPointAtInfinity(point);
}
/* ************************************************************************* */
//TEST( PinholeCamera, Dproject_Infinity)
//{
// Matrix Dpose, Dpoint, Dcal;
// Point2 point2D(-0.08,-0.08);
// Point3 point3D(point1.x(), point1.y(), 1.0);
// Point2 result = camera.projectPointAtInfinity(point3D, Dpose, Dpoint, Dcal);
// Matrix numerical_pose = numericalDerivative31(projectInfinity3, pose1, point2D, K);
// Matrix numerical_point = numericalDerivative32(projectInfinity3, pose1, point2D, K);
// Matrix numerical_cal = numericalDerivative33(projectInfinity3, pose1, point2D, K);
// CHECK(assert_equal(numerical_pose, Dpose, 1e-7));
// CHECK(assert_equal(numerical_point, Dpoint, 1e-7));
// CHECK(assert_equal(numerical_cal, Dcal, 1e-7));
//}
/* ************************************************************************* */
static Point2 project4(const Camera& camera, const Point3& point) {
return camera.project2(point);
}
/* ************************************************************************* */
TEST( PinholeCamera, Dproject2)
{
Matrix Dpose, Dpoint, Dcal;
Point2 point2D(-0.08,-0.08);
Point3 point3D(point1.x(), point1.y(), 1.0);
Point2 result = camera.projectPointAtInfinity(point3D, Dpose, Dpoint, Dcal);
Matrix numerical_pose = numericalDerivative31(projectInfinity3, pose1, point2D, K);
Matrix numerical_point = numericalDerivative32(projectInfinity3, pose1, point2D, K);
Matrix numerical_cal = numericalDerivative33(projectInfinity3, pose1, point2D, K);
CHECK(assert_equal(Dpose, numerical_pose, 1e-7));
CHECK(assert_equal(Dpoint, numerical_point,1e-7));
CHECK(assert_equal(Dcal, numerical_cal,1e-7));
Matrix Dcamera, Dpoint;
Point2 result = camera.project2(point1, Dcamera, Dpoint);
Matrix numerical_camera = numericalDerivative21(project4, camera, point1);
Matrix numerical_point = numericalDerivative22(project4, camera, point1);
CHECK(assert_equal(result, Point2(-100, 100) ));
CHECK(assert_equal(numerical_camera, Dcamera, 1e-7));
CHECK(assert_equal(numerical_point, Dpoint, 1e-7));
}
/* ************************************************************************* */

View File

@ -200,6 +200,37 @@ TEST(Rot3, log)
CHECK_OMEGA_ZERO(x*2.*PI,y*2.*PI,z*2.*PI)
}
Rot3 evaluateRotation(const Vector3 thetahat){
return Rot3::Expmap(thetahat);
}
Vector3 evaluateLogRotation(const Vector3 thetahat, const Vector3 deltatheta){
return Rot3::Logmap( Rot3::Expmap(thetahat).compose( Rot3::Expmap(deltatheta) ) );
}
/* ************************************************************************* */
TEST( Rot3, rightJacobianExpMapSO3 )
{
// Linearization point
Vector3 thetahat; thetahat << 0.1, 0, 0;
Matrix expectedJacobian = numericalDerivative11<Rot3, LieVector>(boost::bind(&evaluateRotation, _1), LieVector(thetahat));
Matrix actualJacobian = Rot3::rightJacobianExpMapSO3(thetahat);
EXPECT(assert_equal(expectedJacobian, actualJacobian));
}
/* ************************************************************************* */
TEST( Rot3, rightJacobianExpMapSO3inverse )
{
// Linearization point
Vector3 thetahat; thetahat << 0.1,0.1,0; ///< Current estimate of rotation rate bias
Vector3 deltatheta; deltatheta << 0, 0, 0;
Matrix expectedJacobian = numericalDerivative11<LieVector>(boost::bind(&evaluateLogRotation, thetahat, _1), LieVector(deltatheta));
Matrix actualJacobian = Rot3::rightJacobianExpMapSO3inverse(thetahat);
EXPECT(assert_equal(expectedJacobian, actualJacobian));
}
/* ************************************************************************* */
TEST(Rot3, manifold_caley)
{

View File

@ -96,6 +96,12 @@ namespace gtsam {
*/
virtual Matrix information() const = 0;
/// Return the diagonal of the Hessian for this factor
virtual VectorValues hessianDiagonal() const = 0;
/// Return the block diagonal of the Hessian for this factor
virtual std::map<Key,Matrix> hessianBlockDiagonal() const = 0;
/** Clone a factor (make a deep copy) */
virtual GaussianFactor::shared_ptr clone() const = 0;
@ -112,6 +118,12 @@ namespace gtsam {
/// y += alpha * A'*A*x
virtual void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const = 0;
/// y += alpha * A'*A*x
virtual void multiplyHessianAdd(double alpha, const double* x, double* y, std::vector<size_t> keys) const = 0;
/// y += alpha * A'*A*x
virtual void multiplyHessianAdd(double alpha, const double* x, double* y) const = 0;
/// A'*b for Jacobian, eta for Hessian
virtual VectorValues gradientAtZero() const = 0;

View File

@ -54,6 +54,27 @@ namespace gtsam {
return keys;
}
/* ************************************************************************* */
vector<size_t> GaussianFactorGraph::getkeydim() const {
// First find dimensions of each variable
vector<size_t> dims;
BOOST_FOREACH(const sharedFactor& factor, *this) {
for (GaussianFactor::const_iterator pos = factor->begin();
pos != factor->end(); ++pos) {
if (dims.size() <= *pos)
dims.resize(*pos + 1, 0);
dims[*pos] = factor->getDim(pos);
}
}
// Find accumulated dimensions for variables
vector<size_t> dims_accumulated;
dims_accumulated.resize(dims.size()+1,0);
dims_accumulated[0]=0;
for (int i=1; i<dims_accumulated.size(); i++)
dims_accumulated[i] = dims_accumulated[i-1]+dims[i-1];
return dims_accumulated;
}
/* ************************************************************************* */
GaussianFactorGraph GaussianFactorGraph::clone() const {
GaussianFactorGraph result;
@ -79,21 +100,22 @@ namespace gtsam {
}
/* ************************************************************************* */
std::vector<boost::tuple<size_t, size_t, double> > GaussianFactorGraph::sparseJacobian() const {
vector<boost::tuple<size_t, size_t, double> > GaussianFactorGraph::sparseJacobian() const {
// First find dimensions of each variable
vector<size_t> dims;
BOOST_FOREACH(const sharedFactor& factor, *this) {
for(GaussianFactor::const_iterator pos = factor->begin(); pos != factor->end(); ++pos) {
if(dims.size() <= *pos)
for (GaussianFactor::const_iterator pos = factor->begin();
pos != factor->end(); ++pos) {
if (dims.size() <= *pos)
dims.resize(*pos + 1, 0);
dims[*pos] = factor->getDim(pos);
}
}
// Compute first scalar column of each variable
vector<size_t> columnIndices(dims.size()+1, 0);
for(size_t j=1; j<dims.size()+1; ++j)
columnIndices[j] = columnIndices[j-1] + dims[j-1];
vector<size_t> columnIndices(dims.size() + 1, 0);
for (size_t j = 1; j < dims.size() + 1; ++j)
columnIndices[j] = columnIndices[j - 1] + dims[j - 1];
// Iterate over all factors, adding sparse scalar entries
typedef boost::tuple<size_t, size_t, double> triplet;
@ -104,7 +126,8 @@ namespace gtsam {
JacobianFactor::shared_ptr jacobianFactor(
boost::dynamic_pointer_cast<JacobianFactor>(factor));
if (!jacobianFactor) {
HessianFactor::shared_ptr hessian(boost::dynamic_pointer_cast<HessianFactor>(factor));
HessianFactor::shared_ptr hessian(
boost::dynamic_pointer_cast<HessianFactor>(factor));
if (hessian)
jacobianFactor.reset(new JacobianFactor(*hessian));
else
@ -115,22 +138,23 @@ namespace gtsam {
// Whiten the factor and add entries for it
// iterate over all variables in the factor
const JacobianFactor whitened(jacobianFactor->whiten());
for(JacobianFactor::const_iterator pos=whitened.begin(); pos<whitened.end(); ++pos) {
for (JacobianFactor::const_iterator pos = whitened.begin();
pos < whitened.end(); ++pos) {
JacobianFactor::constABlock whitenedA = whitened.getA(pos);
// find first column index for this key
size_t column_start = columnIndices[*pos];
for (size_t i = 0; i < (size_t) whitenedA.rows(); i++)
for (size_t j = 0; j < (size_t) whitenedA.cols(); j++) {
double s = whitenedA(i,j);
if (std::abs(s) > 1e-12) entries.push_back(
boost::make_tuple(row+i, column_start+j, s));
double s = whitenedA(i, j);
if (std::abs(s) > 1e-12)
entries.push_back(boost::make_tuple(row + i, column_start + j, s));
}
}
JacobianFactor::constBVector whitenedb(whitened.getb());
size_t bcolumn = columnIndices.back();
for (size_t i = 0; i < (size_t) whitenedb.size(); i++)
entries.push_back(boost::make_tuple(row+i, bcolumn, whitenedb(i)));
entries.push_back(boost::make_tuple(row + i, bcolumn, whitenedb(i)));
// Increment row index
row += jacobianFactor->rows();
@ -143,7 +167,7 @@ namespace gtsam {
// call sparseJacobian
typedef boost::tuple<size_t, size_t, double> triplet;
std::vector<triplet> result = sparseJacobian();
vector<triplet> result = sparseJacobian();
// translate to base 1 matrix
size_t nzmax = result.size();
@ -158,22 +182,24 @@ namespace gtsam {
}
/* ************************************************************************* */
Matrix GaussianFactorGraph::augmentedJacobian(boost::optional<const Ordering&> optionalOrdering) const {
Matrix GaussianFactorGraph::augmentedJacobian(
boost::optional<const Ordering&> optionalOrdering) const {
// combine all factors
JacobianFactor combined(*this, optionalOrdering);
return combined.augmentedJacobian();
}
/* ************************************************************************* */
std::pair<Matrix,Vector> GaussianFactorGraph::jacobian(boost::optional<const Ordering&> optionalOrdering) const {
pair<Matrix, Vector> GaussianFactorGraph::jacobian(
boost::optional<const Ordering&> optionalOrdering) const {
Matrix augmented = augmentedJacobian(optionalOrdering);
return make_pair(
augmented.leftCols(augmented.cols()-1),
augmented.col(augmented.cols()-1));
return make_pair(augmented.leftCols(augmented.cols() - 1),
augmented.col(augmented.cols() - 1));
}
/* ************************************************************************* */
Matrix GaussianFactorGraph::augmentedHessian(boost::optional<const Ordering&> optionalOrdering) const {
Matrix GaussianFactorGraph::augmentedHessian(
boost::optional<const Ordering&> optionalOrdering) const {
// combine all factors and get upper-triangular part of Hessian
HessianFactor combined(*this, Scatter(*this, optionalOrdering));
Matrix result = combined.info();
@ -183,13 +209,44 @@ namespace gtsam {
}
/* ************************************************************************* */
std::pair<Matrix,Vector> GaussianFactorGraph::hessian(boost::optional<const Ordering&> optionalOrdering) const {
pair<Matrix, Vector> GaussianFactorGraph::hessian(
boost::optional<const Ordering&> optionalOrdering) const {
Matrix augmented = augmentedHessian(optionalOrdering);
return make_pair(
augmented.topLeftCorner(augmented.rows()-1, augmented.rows()-1),
augmented.col(augmented.rows()-1).head(augmented.rows()-1));
augmented.topLeftCorner(augmented.rows() - 1, augmented.rows() - 1),
augmented.col(augmented.rows() - 1).head(augmented.rows() - 1));
}
/* ************************************************************************* */
VectorValues GaussianFactorGraph::hessianDiagonal() const {
VectorValues d;
BOOST_FOREACH(const sharedFactor& factor, *this) {
if(factor){
VectorValues di = factor->hessianDiagonal();
d.addInPlace_(di);
}
}
return d;
}
/* ************************************************************************* */
map<Key,Matrix> GaussianFactorGraph::hessianBlockDiagonal() const {
map<Key,Matrix> blocks;
BOOST_FOREACH(const sharedFactor& factor, *this) {
map<Key,Matrix> BD = factor->hessianBlockDiagonal();
map<Key,Matrix>::const_iterator it = BD.begin();
for(;it!=BD.end();it++) {
Key j = it->first; // variable key for this block
const Matrix& Bj = it->second;
if (blocks.count(j))
blocks[j] += Bj;
else
blocks.insert(make_pair(j,Bj));
}
}
return blocks;
}
/* ************************************************************************* */
VectorValues GaussianFactorGraph::optimize(OptionalOrdering ordering, const Eliminate& function) const
{
@ -201,9 +258,9 @@ namespace gtsam {
namespace {
JacobianFactor::shared_ptr convertToJacobianFactorPtr(const GaussianFactor::shared_ptr &gf) {
JacobianFactor::shared_ptr result = boost::dynamic_pointer_cast<JacobianFactor>(gf);
if( !result ) {
result = boost::make_shared<JacobianFactor>(*gf); // Convert any non-Jacobian factors to Jacobians (e.g. Hessian -> Jacobian with Cholesky)
}
if( !result )
// Convert any non-Jacobian factors to Jacobians (e.g. Hessian -> Jacobian with Cholesky)
result = boost::make_shared<JacobianFactor>(*gf);
return result;
}
}
@ -274,7 +331,16 @@ namespace gtsam {
void GaussianFactorGraph::multiplyHessianAdd(double alpha,
const VectorValues& x, VectorValues& y) const {
BOOST_FOREACH(const GaussianFactor::shared_ptr& f, *this)
f->multiplyHessianAdd(alpha, x, y);
f->multiplyHessianAdd(alpha, x, y);
}
/* ************************************************************************* */
void GaussianFactorGraph::multiplyHessianAdd(double alpha,
const double* x, double* y) const {
vector<size_t> FactorKeys = getkeydim();
BOOST_FOREACH(const GaussianFactor::shared_ptr& f, *this)
f->multiplyHessianAdd(alpha, x, y, FactorKeys);
}
/* ************************************************************************* */

View File

@ -135,11 +135,15 @@ namespace gtsam {
typedef FastSet<Key> Keys;
Keys keys() const;
std::vector<size_t> getkeydim() const;
/** unnormalized error */
double error(const VectorValues& x) const {
double total_error = 0.;
BOOST_FOREACH(const sharedFactor& factor, *this)
total_error += factor->error(x);
BOOST_FOREACH(const sharedFactor& factor, *this){
if(factor)
total_error += factor->error(x);
}
return total_error;
}
@ -219,6 +223,12 @@ namespace gtsam {
*/
std::pair<Matrix,Vector> hessian(boost::optional<const Ordering&> optionalOrdering = boost::none) const;
/** Return only the diagonal of the Hessian A'*A, as a VectorValues */
VectorValues hessianDiagonal() const;
/** Return the block diagonal of the Hessian for this factor */
std::map<Key,Matrix> hessianBlockDiagonal() const;
/** Solve the factor graph by performing multifrontal variable elimination in COLAMD order using
* the dense elimination function specified in \c function (default EliminatePreferCholesky),
* followed by back-substitution in the Bayes tree resulting from elimination. Is equivalent
@ -288,6 +298,10 @@ namespace gtsam {
void multiplyHessianAdd(double alpha, const VectorValues& x,
VectorValues& y) const;
///** y += alpha*A'A*x */
void multiplyHessianAdd(double alpha, const double* x,
double* y) const;
///** In-place version e <- A*x that overwrites e. */
void multiplyInPlace(const VectorValues& x, Errors& e) const;

View File

@ -61,41 +61,44 @@ string SlotEntry::toString() const {
}
/* ************************************************************************* */
Scatter::Scatter(const GaussianFactorGraph& gfg, boost::optional<const Ordering&> ordering)
{
Scatter::Scatter(const GaussianFactorGraph& gfg,
boost::optional<const Ordering&> ordering) {
gttic(Scatter_Constructor);
static const size_t none = std::numeric_limits<size_t>::max();
// First do the set union.
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, gfg) {
if(factor) {
for(GaussianFactor::const_iterator variable = factor->begin(); variable != factor->end(); ++variable) {
if (factor) {
for (GaussianFactor::const_iterator variable = factor->begin();
variable != factor->end(); ++variable) {
// TODO: Fix this hack to cope with zero-row Jacobians that come from BayesTreeOrphanWrappers
const JacobianFactor* asJacobian = dynamic_cast<const JacobianFactor*>(factor.get());
if(!asJacobian || asJacobian->cols() > 1)
this->insert(make_pair(*variable, SlotEntry(none, factor->getDim(variable))));
const JacobianFactor* asJacobian =
dynamic_cast<const JacobianFactor*>(factor.get());
if (!asJacobian || asJacobian->cols() > 1)
this->insert(
make_pair(*variable, SlotEntry(none, factor->getDim(variable))));
}
}
}
// If we have an ordering, pre-fill the ordered variables first
size_t slot = 0;
if(ordering) {
if (ordering) {
BOOST_FOREACH(Key key, *ordering) {
const_iterator entry = find(key);
if(entry == end())
if (entry == end())
throw std::invalid_argument(
"The ordering provided to the HessianFactor Scatter constructor\n"
"contained extra variables that did not appear in the factors to combine.");
at(key).slot = (slot ++);
"contained extra variables that did not appear in the factors to combine.");
at(key).slot = (slot++);
}
}
// Next fill in the slot indices (we can only get these after doing the set
// union.
BOOST_FOREACH(value_type& var_slot, *this) {
if(var_slot.second.slot == none)
var_slot.second.slot = (slot ++);
if (var_slot.second.slot == none)
var_slot.second.slot = (slot++);
}
}
@ -343,6 +346,30 @@ Matrix HessianFactor::information() const
return info_.range(0, this->size(), 0, this->size()).selfadjointView();
}
/* ************************************************************************* */
VectorValues HessianFactor::hessianDiagonal() const {
VectorValues d;
// Loop over all variables
for (DenseIndex j = 0; j < (DenseIndex)size(); ++j) {
// Get the diagonal block, and insert its diagonal
Matrix B = info_(j, j).selfadjointView();
d.insert(keys_[j],B.diagonal());
}
return d;
}
/* ************************************************************************* */
map<Key,Matrix> HessianFactor::hessianBlockDiagonal() const {
map<Key,Matrix> blocks;
// Loop over all variables
for (DenseIndex j = 0; j < (DenseIndex)size(); ++j) {
// Get the diagonal block, and insert it
Matrix B = info_(j, j).selfadjointView();
blocks.insert(make_pair(keys_[j],B));
}
return blocks;
}
/* ************************************************************************* */
Matrix HessianFactor::augmentedJacobian() const
{
@ -494,7 +521,7 @@ void HessianFactor::multiplyHessianAdd(double alpha, const VectorValues& x,
// copy to yvalues
for(DenseIndex i = 0; i < (DenseIndex)size(); ++i) {
bool didNotExist;
bool didNotExist;
VectorValues::iterator it;
boost::tie(it, didNotExist) = yvalues.tryInsert(keys_[i], Vector());
if (didNotExist)
@ -504,6 +531,48 @@ void HessianFactor::multiplyHessianAdd(double alpha, const VectorValues& x,
}
}
/* ************************************************************************* */
void HessianFactor::multiplyHessianAdd(double alpha, const double* x,
double* yvalues, vector<size_t> offsets) const {
// Use eigen magic to access raw memory
typedef Eigen::Matrix<double, Eigen::Dynamic, 1> DVector;
typedef Eigen::Map<DVector> DMap;
typedef Eigen::Map<const DVector> ConstDMap;
// Create a vector of temporary y values, corresponding to rows i
vector<Vector> y;
y.reserve(size());
for (const_iterator it = begin(); it != end(); it++)
y.push_back(zero(getDim(it)));
// Accessing the VectorValues one by one is expensive
// So we will loop over columns to access x only once per column
// And fill the above temporary y values, to be added into yvalues after
for (DenseIndex j = 0; j < (DenseIndex) size(); ++j) {
DenseIndex i = 0;
for (; i < j; ++i)
y[i] += info_(i, j).knownOffDiagonal()
* ConstDMap(x + offsets[keys_[j]],
offsets[keys_[j] + 1] - offsets[keys_[j]]);
// blocks on the diagonal are only half
y[i] += info_(j, j).selfadjointView()
* ConstDMap(x + offsets[keys_[j]],
offsets[keys_[j] + 1] - offsets[keys_[j]]);
// for below diagonal, we take transpose block from upper triangular part
for (i = j + 1; i < (DenseIndex) size(); ++i)
y[i] += info_(i, j).knownOffDiagonal()
* ConstDMap(x + offsets[keys_[j]],
offsets[keys_[j] + 1] - offsets[keys_[j]]);
}
// copy to yvalues
for (DenseIndex i = 0; i < (DenseIndex) size(); ++i)
DMap(yvalues + offsets[keys_[i]], offsets[keys_[i] + 1] - offsets[keys_[i]]) +=
alpha * y[i];
}
/* ************************************************************************* */
VectorValues HessianFactor::gradientAtZero() const {
VectorValues g;

View File

@ -41,21 +41,28 @@ namespace gtsam {
GTSAM_EXPORT std::pair<boost::shared_ptr<GaussianConditional>, boost::shared_ptr<HessianFactor> >
EliminateCholesky(const GaussianFactorGraph& factors, const Ordering& keys);
// Definition of Scatter, which is an intermediate data structure used when building a
// HessianFactor incrementally, to get the keys in the right order. The "scatter" is a map from
// global variable indices to slot indices in the union of involved variables. We also include
// the dimensionality of the variable.
/**
* One SlotEntry stores the slot index for a variable, as well its dimension.
*/
struct GTSAM_EXPORT SlotEntry {
size_t slot;
size_t dimension;
size_t slot, dimension;
SlotEntry(size_t _slot, size_t _dimension)
: slot(_slot), dimension(_dimension) {}
std::string toString() const;
};
class Scatter : public FastMap<Key, SlotEntry> {
/**
* Scatter is an intermediate data structure used when building a HessianFactor
* incrementally, to get the keys in the right order. The "scatter" is a map from
* global variable indices to slot indices in the union of involved variables.
* We also include the dimensionality of the variable.
*/
class Scatter: public FastMap<Key, SlotEntry> {
public:
Scatter() {}
Scatter(const GaussianFactorGraph& gfg, boost::optional<const Ordering&> ordering = boost::none);
Scatter() {
}
Scatter(const GaussianFactorGraph& gfg,
boost::optional<const Ordering&> ordering = boost::none);
};
/**
@ -134,6 +141,7 @@ namespace gtsam {
typedef SymmetricBlockMatrix::Block Block; ///< A block from the Hessian matrix
typedef SymmetricBlockMatrix::constBlock constBlock; ///< A block from the Hessian matrix (const version)
/** default constructor for I/O */
HessianFactor();
@ -328,7 +336,13 @@ namespace gtsam {
* GaussianFactor.
*/
virtual Matrix information() const;
/// Return the diagonal of the Hessian for this factor
virtual VectorValues hessianDiagonal() const;
/// Return the block diagonal of the Hessian for this factor
virtual std::map<Key,Matrix> hessianBlockDiagonal() const;
/**
* Return (dense) matrix associated with factor
* @param ordering of variables needed for matrix column order
@ -363,6 +377,10 @@ namespace gtsam {
/** y += alpha * A'*A*x */
void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const;
void multiplyHessianAdd(double alpha, const double* x, double* y, std::vector<size_t> keys) const;
void multiplyHessianAdd(double alpha, const double* x, double* y) const {};
/// eta for Hessian
VectorValues gradientAtZero() const;

View File

@ -131,7 +131,7 @@ namespace gtsam {
Matrix::Zero(maxrank, Ab_.matrix().cols());
// FIXME: replace with triangular system
Ab_.rowEnd() = maxrank;
model_ = noiseModel::Unit::Create(maxrank);
model_ = SharedDiagonal(); // should be same as Unit::Create(maxrank);
}
/* ************************************************************************* */
@ -438,6 +438,37 @@ namespace gtsam {
}
}
/* ************************************************************************* */
VectorValues JacobianFactor::hessianDiagonal() const {
VectorValues d;
for(size_t pos=0; pos<size(); ++pos)
{
Key j = keys_[pos];
size_t nj = Ab_(pos).cols();
Vector dj(nj);
for (size_t k = 0; k < nj; ++k) {
Vector column_k = Ab_(pos).col(k);
if (model_) column_k = model_->whiten(column_k);
dj(k) = dot(column_k,column_k);
}
d.insert(j,dj);
}
return d;
}
/* ************************************************************************* */
map<Key,Matrix> JacobianFactor::hessianBlockDiagonal() const {
map<Key,Matrix> blocks;
for(size_t pos=0; pos<size(); ++pos)
{
Key j = keys_[pos];
Matrix Aj = Ab_(pos);
if (model_) Aj = model_->Whiten(Aj);
blocks.insert(make_pair(j,Aj.transpose()*Aj));
}
return blocks;
}
/* ************************************************************************* */
Vector JacobianFactor::operator*(const VectorValues& x) const {
Vector Ax = zero(Ab_.rows());
@ -458,11 +489,13 @@ namespace gtsam {
// Just iterate over all A matrices and insert Ai^e into VectorValues
for(size_t pos=0; pos<size(); ++pos)
{
Key j = keys_[pos];
// Create the value as a zero vector if it does not exist.
pair<VectorValues::iterator, bool> xi = x.tryInsert(keys_[pos], Vector());
pair<VectorValues::iterator, bool> xi = x.tryInsert(j, Vector());
if(xi.second)
xi.first->second = Vector::Zero(getDim(begin() + pos));
gtsam::transposeMultiplyAdd(Ab_(pos), E, xi.first->second);
}
}
@ -473,6 +506,32 @@ namespace gtsam {
transposeMultiplyAdd(alpha,Ax,y);
}
void JacobianFactor::multiplyHessianAdd(double alpha, const double* x, double* y, std::vector<size_t> keys) const {
// Use eigen magic to access raw memory
typedef Eigen::Matrix<double, Eigen::Dynamic, 1> DVector;
typedef Eigen::Map<DVector> DMap;
typedef Eigen::Map<const DVector> ConstDMap;
if (empty()) return;
Vector Ax = zero(Ab_.rows());
// Just iterate over all A matrices and multiply in correct config part
for(size_t pos=0; pos<size(); ++pos)
Ax += Ab_(pos) * ConstDMap(x + keys[keys_[pos]],keys[keys_[pos]+1]-keys[keys_[pos]]);
// Deal with noise properly, need to Double* whiten as we are dividing by variance
if (model_) { model_->whitenInPlace(Ax); model_->whitenInPlace(Ax); }
// multiply with alpha
Ax *= alpha;
// Again iterate over all A matrices and insert Ai^e into y
for(size_t pos=0; pos<size(); ++pos)
DMap(y + keys[keys_[pos]],keys[keys_[pos]+1]-keys[keys_[pos]]) += Ab_(pos).transpose() * Ax;
}
/* ************************************************************************* */
VectorValues JacobianFactor::gradientAtZero() const {
VectorValues g;

View File

@ -96,6 +96,7 @@ namespace gtsam {
typedef ABlock::ColXpr BVector;
typedef constABlock::ConstColXpr constBVector;
/** Convert from other GaussianFactor */
explicit JacobianFactor(const GaussianFactor& gf);
@ -181,6 +182,12 @@ namespace gtsam {
*/
virtual Matrix information() const;
/// Return the diagonal of the Hessian for this factor
virtual VectorValues hessianDiagonal() const;
/// Return the block diagonal of the Hessian for this factor
virtual std::map<Key,Matrix> hessianBlockDiagonal() const;
/**
* @brief Returns (dense) A,b pair associated with factor, bakes in the weights
*/
@ -269,6 +276,10 @@ namespace gtsam {
/** y += alpha * A'*A*x */
void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const;
void multiplyHessianAdd(double alpha, const double* x, double* y, std::vector<size_t> keys) const;
void multiplyHessianAdd(double alpha, const double* x, double* y) const {};
/// A'*b for Jacobian
VectorValues gradientAtZero() const;

View File

@ -53,7 +53,7 @@ KalmanFilter::solve(const GaussianFactorGraph& factorGraph) const {
/* ************************************************************************* */
// Auxiliary function to create a small graph for predict or update and solve
KalmanFilter::State //
KalmanFilter::fuse(const State& p, GaussianFactor::shared_ptr newFactor) {
KalmanFilter::fuse(const State& p, GaussianFactor::shared_ptr newFactor) const {
// Create a factor graph
GaussianFactorGraph factorGraph;
@ -65,7 +65,7 @@ KalmanFilter::fuse(const State& p, GaussianFactor::shared_ptr newFactor) {
/* ************************************************************************* */
KalmanFilter::State KalmanFilter::init(const Vector& x0,
const SharedDiagonal& P0) {
const SharedDiagonal& P0) const {
// Create a factor graph f(x0), eliminate it into P(x0)
GaussianFactorGraph factorGraph;
@ -74,7 +74,7 @@ KalmanFilter::State KalmanFilter::init(const Vector& x0,
}
/* ************************************************************************* */
KalmanFilter::State KalmanFilter::init(const Vector& x, const Matrix& P0) {
KalmanFilter::State KalmanFilter::init(const Vector& x, const Matrix& P0) const {
// Create a factor graph f(x0), eliminate it into P(x0)
GaussianFactorGraph factorGraph;
@ -89,7 +89,7 @@ void KalmanFilter::print(const string& s) const {
/* ************************************************************************* */
KalmanFilter::State KalmanFilter::predict(const State& p, const Matrix& F,
const Matrix& B, const Vector& u, const SharedDiagonal& model) {
const Matrix& B, const Vector& u, const SharedDiagonal& model) const {
// The factor related to the motion model is defined as
// f2(x_{t},x_{t+1}) = (F*x_{t} + B*u - x_{t+1}) * Q^-1 * (F*x_{t} + B*u - x_{t+1})^T
@ -100,7 +100,7 @@ KalmanFilter::State KalmanFilter::predict(const State& p, const Matrix& F,
/* ************************************************************************* */
KalmanFilter::State KalmanFilter::predictQ(const State& p, const Matrix& F,
const Matrix& B, const Vector& u, const Matrix& Q) {
const Matrix& B, const Vector& u, const Matrix& Q) const {
#ifndef NDEBUG
DenseIndex n = F.cols();
@ -126,7 +126,7 @@ KalmanFilter::State KalmanFilter::predictQ(const State& p, const Matrix& F,
/* ************************************************************************* */
KalmanFilter::State KalmanFilter::predict2(const State& p, const Matrix& A0,
const Matrix& A1, const Vector& b, const SharedDiagonal& model) {
const Matrix& A1, const Vector& b, const SharedDiagonal& model) const {
// Nhe factor related to the motion model is defined as
// f2(x_{t},x_{t+1}) = |A0*x_{t} + A1*x_{t+1} - b|^2
Key k = step(p);
@ -135,7 +135,7 @@ KalmanFilter::State KalmanFilter::predict2(const State& p, const Matrix& A0,
/* ************************************************************************* */
KalmanFilter::State KalmanFilter::update(const State& p, const Matrix& H,
const Vector& z, const SharedDiagonal& model) {
const Vector& z, const SharedDiagonal& model) const {
// The factor related to the measurements would be defined as
// f2 = (h(x_{t}) - z_{t}) * R^-1 * (h(x_{t}) - z_{t})^T
// = (x_{t} - z_{t}) * R^-1 * (x_{t} - z_{t})^T
@ -145,7 +145,7 @@ KalmanFilter::State KalmanFilter::update(const State& p, const Matrix& H,
/* ************************************************************************* */
KalmanFilter::State KalmanFilter::updateQ(const State& p, const Matrix& H,
const Vector& z, const Matrix& Q) {
const Vector& z, const Matrix& Q) const {
Key k = step(p);
Matrix M = inverse(Q), Ht = trans(H);
Matrix G = Ht * M * H;

View File

@ -62,7 +62,7 @@ private:
const GaussianFactorGraph::Eliminate function_; /** algorithm */
State solve(const GaussianFactorGraph& factorGraph) const;
State fuse(const State& p, GaussianFactor::shared_ptr newFactor);
State fuse(const State& p, GaussianFactor::shared_ptr newFactor) const;
public:
@ -80,10 +80,10 @@ public:
* @param x0 estimate at time 0
* @param P0 covariance at time 0, given as a diagonal Gaussian 'model'
*/
State init(const Vector& x0, const SharedDiagonal& P0);
State init(const Vector& x0, const SharedDiagonal& P0) const;
/// version of init with a full covariance matrix
State init(const Vector& x0, const Matrix& P0);
State init(const Vector& x0, const Matrix& P0) const;
/// print
void print(const std::string& s = "") const;
@ -102,7 +102,7 @@ public:
* and w is zero-mean, Gaussian white noise with covariance Q.
*/
State predict(const State& p, const Matrix& F, const Matrix& B,
const Vector& u, const SharedDiagonal& modelQ);
const Vector& u, const SharedDiagonal& modelQ) const;
/*
* Version of predict with full covariance
@ -111,7 +111,7 @@ public:
* This version allows more realistic models than a diagonal covariance matrix.
*/
State predictQ(const State& p, const Matrix& F, const Matrix& B,
const Vector& u, const Matrix& Q);
const Vector& u, const Matrix& Q) const;
/**
* Predict the state P(x_{t+1}|Z^t)
@ -122,7 +122,7 @@ public:
* with an optional noise model.
*/
State predict2(const State& p, const Matrix& A0, const Matrix& A1,
const Vector& b, const SharedDiagonal& model);
const Vector& b, const SharedDiagonal& model) const;
/**
* Update Kalman filter with a measurement
@ -133,7 +133,7 @@ public:
* In this version, R is restricted to diagonal Gaussians (model parameter)
*/
State update(const State& p, const Matrix& H, const Vector& z,
const SharedDiagonal& model);
const SharedDiagonal& model) const;
/*
* Version of update with full covariance
@ -142,7 +142,7 @@ public:
* This version allows more realistic models than a diagonal covariance matrix.
*/
State updateQ(const State& p, const Matrix& H, const Vector& z,
const Matrix& Q);
const Matrix& Q) const;
};
} // \namespace gtsam

View File

@ -585,6 +585,10 @@ namespace gtsam {
virtual Matrix Whiten(const Matrix& H) const { return H; }
virtual void WhitenInPlace(Matrix& H) const {}
virtual void WhitenInPlace(Eigen::Block<Matrix> H) const {}
virtual void whitenInPlace(Vector& v) const {}
virtual void unwhitenInPlace(Vector& v) const {}
virtual void whitenInPlace(Eigen::Block<Vector>& v) const {}
virtual void unwhitenInPlace(Eigen::Block<Vector>& v) const {}
private:
/** Serialization function */

View File

@ -112,36 +112,51 @@ TEST(GaussianFactorGraph, matrices) {
// 9 10 0 11 12 13
// 0 0 0 14 15 16
Matrix A00 = (Matrix(2, 3) << 1, 2, 3, 5, 6, 7);
Matrix A10 = (Matrix(2, 3) << 9, 10, 0, 0, 0, 0);
Matrix A11 = (Matrix(2, 2) << 11, 12, 14, 15);
GaussianFactorGraph gfg;
SharedDiagonal model = noiseModel::Unit::Create(2);
gfg.add(0, (Matrix(2, 3) << 1., 2., 3., 5., 6., 7.), (Vector(2) << 4., 8.), model);
gfg.add(0, (Matrix(2, 3) << 9.,10., 0., 0., 0., 0.), 1, (Matrix(2, 2) << 11., 12., 14., 15.), (Vector(2) << 13.,16.), model);
gfg.add(0, A00, (Vector(2) << 4., 8.), model);
gfg.add(0, A10, 1, A11, (Vector(2) << 13.,16.), model);
Matrix jacobian(4,6);
jacobian <<
Matrix Ab(4,6);
Ab <<
1, 2, 3, 0, 0, 4,
5, 6, 7, 0, 0, 8,
9,10, 0,11,12,13,
0, 0, 0,14,15,16;
Matrix expectedJacobian = jacobian;
Matrix expectedHessian = jacobian.transpose() * jacobian;
Matrix expectedA = jacobian.leftCols(jacobian.cols()-1);
Vector expectedb = jacobian.col(jacobian.cols()-1);
Matrix expectedL = expectedA.transpose() * expectedA;
Vector expectedeta = expectedA.transpose() * expectedb;
// augmented versions
EXPECT(assert_equal(Ab, gfg.augmentedJacobian()));
EXPECT(assert_equal(Ab.transpose() * Ab, gfg.augmentedHessian()));
Matrix actualJacobian = gfg.augmentedJacobian();
Matrix actualHessian = gfg.augmentedHessian();
// jacobian
Matrix A = Ab.leftCols(Ab.cols()-1);
Vector b = Ab.col(Ab.cols()-1);
Matrix actualA; Vector actualb; boost::tie(actualA,actualb) = gfg.jacobian();
Matrix actualL; Vector actualeta; boost::tie(actualL,actualeta) = gfg.hessian();
EXPECT(assert_equal(A, actualA));
EXPECT(assert_equal(b, actualb));
EXPECT(assert_equal(expectedJacobian, actualJacobian));
EXPECT(assert_equal(expectedHessian, actualHessian));
EXPECT(assert_equal(expectedA, actualA));
EXPECT(assert_equal(expectedb, actualb));
EXPECT(assert_equal(expectedL, actualL));
EXPECT(assert_equal(expectedeta, actualeta));
// hessian
Matrix L = A.transpose() * A;
Vector eta = A.transpose() * b;
Matrix actualL; Vector actualeta; boost::tie(actualL,actualeta) = gfg.hessian();
EXPECT(assert_equal(L, actualL));
EXPECT(assert_equal(eta, actualeta));
// hessianBlockDiagonal
VectorValues expectLdiagonal; // Make explicit that diagonal is sum-squares of columns
expectLdiagonal.insert(0, (Vector(3) << 1+25+81, 4+36+100, 9+49));
expectLdiagonal.insert(1, (Vector(2) << 121+196, 144+225));
EXPECT(assert_equal(expectLdiagonal, gfg.hessianDiagonal()));
// hessianBlockDiagonal
map<Key,Matrix> actualBD = gfg.hessianBlockDiagonal();
LONGS_EQUAL(2,actualBD.size());
EXPECT(assert_equal(A00.transpose()*A00 + A10.transpose()*A10,actualBD[0]));
EXPECT(assert_equal(A11.transpose()*A11,actualBD[1]));
}
/* ************************************************************************* */
@ -151,9 +166,9 @@ static GaussianFactorGraph createSimpleGaussianFactorGraph() {
// linearized prior on x1: c[_x1_]+x1=0 i.e. x1=-c[_x1_]
fg += JacobianFactor(2, 10*eye(2), -1.0*ones(2), unit2);
// odometry between x1 and x2: x2-x1=[0.2;-0.1]
fg += JacobianFactor(2, -10*eye(2), 0, 10*eye(2), (Vector(2) << 2.0, -1.0), unit2);
fg += JacobianFactor(0, 10*eye(2), 2, -10*eye(2), (Vector(2) << 2.0, -1.0), unit2);
// measurement between x1 and l1: l1-x1=[0.0;0.2]
fg += JacobianFactor(2, -5*eye(2), 1, 5*eye(2), (Vector(2) << 0.0, 1.0), unit2);
fg += JacobianFactor(1, 5*eye(2), 2, -5*eye(2), (Vector(2) << 0.0, 1.0), unit2);
// measurement between x2 and l1: l1-x2=[-0.2;0.3]
fg += JacobianFactor(0, -5*eye(2), 1, 5*eye(2), (Vector(2) << -1.0, 1.5), unit2);
return fg;
@ -298,6 +313,31 @@ TEST( GaussianFactorGraph, multiplyHessianAdd2 )
EXPECT(assert_equal(2*expected, actual));
}
/* ************************************************************************* */
TEST( GaussianFactorGraph, multiplyHessianAdd3 )
{
GaussianFactorGraph gfg = createGaussianFactorGraphWithHessianFactor();
// brute force
Matrix AtA; Vector eta; boost::tie(AtA,eta) = gfg.hessian();
Vector X(6); X<<1,2,3,4,5,6;
Vector Y(6); Y<<-450, -450, 300, 400, 2950, 3450;
EXPECT(assert_equal(Y,AtA*X));
double* x = &X[0];
double* y = &Y[0];
Vector fast_y = gtsam::zero(6);
double* actual = &fast_y[0];
gfg.multiplyHessianAdd(1.0, x, fast_y.data());
EXPECT(assert_equal(Y, fast_y));
// now, do it with non-zero y
gfg.multiplyHessianAdd(1.0, x, fast_y.data());
EXPECT(assert_equal(2*Y, fast_y));
}
/* ************************************************************************* */
TEST( GaussianFactorGraph, matricesMixed )

View File

@ -430,11 +430,11 @@ TEST(HessianFactor, combine) {
/* ************************************************************************* */
TEST(HessianFactor, gradientAtZero)
{
Matrix G11 = (Matrix(1, 1) << 1.0);
Matrix G12 = (Matrix(1, 2) << 0.0, 0.0);
Matrix G22 = (Matrix(2, 2) << 1.0, 0.0, 0.0, 1.0);
Vector g1 = (Vector(1) << -7.0);
Vector g2 = (Vector(2) << -8.0, -9.0);
Matrix G11 = (Matrix(1, 1) << 1);
Matrix G12 = (Matrix(1, 2) << 0, 0);
Matrix G22 = (Matrix(2, 2) << 1, 0, 0, 1);
Vector g1 = (Vector(1) << -7);
Vector g2 = (Vector(2) << -8, -9);
double f = 194;
HessianFactor factor(0, 1, G11, G12, g1, G22, g2, f);
@ -448,6 +448,31 @@ TEST(HessianFactor, gradientAtZero)
EXPECT(assert_equal(expectedG, actualG));
}
/* ************************************************************************* */
TEST(HessianFactor, hessianDiagonal)
{
Matrix G11 = (Matrix(1, 1) << 1);
Matrix G12 = (Matrix(1, 2) << 0, 0);
Matrix G22 = (Matrix(2, 2) << 1, 0, 0, 1);
Vector g1 = (Vector(1) << -7);
Vector g2 = (Vector(2) << -8, -9);
double f = 194;
HessianFactor factor(0, 1, G11, G12, g1, G22, g2, f);
// hessianDiagonal
VectorValues expected;
expected.insert(0, (Vector(1) << 1));
expected.insert(1, (Vector(2) << 1,1));
EXPECT(assert_equal(expected, factor.hessianDiagonal()));
// hessianBlockDiagonal
map<Key,Matrix> actualBD = factor.hessianBlockDiagonal();
LONGS_EQUAL(2,actualBD.size());
EXPECT(assert_equal(G11,actualBD[0]));
EXPECT(assert_equal(G22,actualBD[1]));
}
/* ************************************************************************* */
int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
/* ************************************************************************* */

View File

@ -131,19 +131,16 @@ TEST(JabobianFactor, Hessian_conversion) {
1.57, 2.695, -1.1, -2.35,
2.695, 11.3125, -0.65, -10.225,
-1.1, -0.65, 1, 0.5,
-2.35, -10.225, 0.5, 9.25).finished(),
(Vector(4) << -7.885, -28.5175, 2.75, 25.675).finished(),
-2.35, -10.225, 0.5, 9.25),
(Vector(4) << -7.885, -28.5175, 2.75, 25.675),
73.1725);
JacobianFactor expected(0, (Matrix(2,4) <<
1.2530, 2.1508, -0.8779, -1.8755,
0, 2.5858, 0.4789, -2.3943).finished(),
(Vector(2) << -6.2929, -5.7941).finished(),
noiseModel::Unit::Create(2));
0, 2.5858, 0.4789, -2.3943),
(Vector(2) << -6.2929, -5.7941));
JacobianFactor actual(hessian);
EXPECT(assert_equal(expected, actual, 1e-3));
EXPECT(assert_equal(expected, JacobianFactor(hessian), 1e-3));
}
/* ************************************************************************* */
@ -204,9 +201,54 @@ TEST(JacobianFactor, error)
DOUBLES_EQUAL(expected_error, actual_error, 1e-10);
}
/* ************************************************************************* */
TEST(JacobianFactor, matrices_NULL)
{
// Make sure everything works with NULL noise model
JacobianFactor factor(simple::terms, simple::b);
Matrix jacobianExpected(3, 9);
jacobianExpected << simple::terms[0].second, simple::terms[1].second, simple::terms[2].second;
Vector rhsExpected = simple::b;
Matrix augmentedJacobianExpected(3, 10);
augmentedJacobianExpected << jacobianExpected, rhsExpected;
Matrix augmentedHessianExpected =
augmentedJacobianExpected.transpose() * augmentedJacobianExpected;
// Hessian
EXPECT(assert_equal(Matrix(augmentedHessianExpected.topLeftCorner(9,9)), factor.information()));
EXPECT(assert_equal(augmentedHessianExpected, factor.augmentedInformation()));
// Whitened Jacobian
EXPECT(assert_equal(jacobianExpected, factor.jacobian().first));
EXPECT(assert_equal(rhsExpected, factor.jacobian().second));
EXPECT(assert_equal(augmentedJacobianExpected, factor.augmentedJacobian()));
// Unwhitened Jacobian
EXPECT(assert_equal(jacobianExpected, factor.jacobianUnweighted().first));
EXPECT(assert_equal(rhsExpected, factor.jacobianUnweighted().second));
EXPECT(assert_equal(augmentedJacobianExpected, factor.augmentedJacobianUnweighted()));
// hessianDiagonal
VectorValues expectDiagonal;
expectDiagonal.insert(5, ones(3));
expectDiagonal.insert(10, 4*ones(3));
expectDiagonal.insert(15, 9*ones(3));
EXPECT(assert_equal(expectDiagonal, factor.hessianDiagonal()));
// hessianBlockDiagonal
map<Key,Matrix> actualBD = factor.hessianBlockDiagonal();
LONGS_EQUAL(3,actualBD.size());
EXPECT(assert_equal(1*eye(3),actualBD[5]));
EXPECT(assert_equal(4*eye(3),actualBD[10]));
EXPECT(assert_equal(9*eye(3),actualBD[15]));
}
/* ************************************************************************* */
TEST(JacobianFactor, matrices)
{
// And now witgh a non-unit noise model
JacobianFactor factor(simple::terms, simple::b, simple::noise);
Matrix jacobianExpected(3, 9);
@ -232,6 +274,21 @@ TEST(JacobianFactor, matrices)
EXPECT(assert_equal(jacobianExpected, factor.jacobianUnweighted().first));
EXPECT(assert_equal(rhsExpected, factor.jacobianUnweighted().second));
EXPECT(assert_equal(augmentedJacobianExpected, factor.augmentedJacobianUnweighted()));
// hessianDiagonal
VectorValues expectDiagonal;
// below we divide by the variance 0.5^2
expectDiagonal.insert(5, (Vector(3) << 1, 1, 1)/0.25);
expectDiagonal.insert(10, (Vector(3) << 4, 4, 4)/0.25);
expectDiagonal.insert(15, (Vector(3) << 9, 9, 9)/0.25);
EXPECT(assert_equal(expectDiagonal, factor.hessianDiagonal()));
// hessianBlockDiagonal
map<Key,Matrix> actualBD = factor.hessianBlockDiagonal();
LONGS_EQUAL(3,actualBD.size());
EXPECT(assert_equal(4*eye(3),actualBD[5]));
EXPECT(assert_equal(16*eye(3),actualBD[10]));
EXPECT(assert_equal(36*eye(3),actualBD[15]));
}
/* ************************************************************************* */

1
gtsam/navigation/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
/*.*~

View File

@ -48,39 +48,6 @@ namespace gtsam {
/** Struct to store results of preintegrating IMU measurements. Can be build
* incrementally so as to avoid costly integration at time of factor construction. */
/** Right Jacobian for Exponential map in SO(3) - equation (10.86) and following equations in [1] */
static Matrix3 rightJacobianExpMapSO3(const Vector3& x) {
// x is the axis-angle representation (exponential coordinates) for a rotation
double normx = norm_2(x); // rotation angle
Matrix3 Jr;
if (normx < 10e-8){
Jr = Matrix3::Identity();
}
else{
const Matrix3 X = skewSymmetric(x); // element of Lie algebra so(3): X = x^
Jr = Matrix3::Identity() - ((1-cos(normx))/(normx*normx)) * X +
((normx-sin(normx))/(normx*normx*normx)) * X * X; // right Jacobian
}
return Jr;
}
/** Right Jacobian for Log map in SO(3) - equation (10.86) and following equations in [1] */
static Matrix3 rightJacobianExpMapSO3inverse(const Vector3& x) {
// x is the axis-angle representation (exponential coordinates) for a rotation
double normx = norm_2(x); // rotation angle
Matrix3 Jrinv;
if (normx < 10e-8){
Jrinv = Matrix3::Identity();
}
else{
const Matrix3 X = skewSymmetric(x); // element of Lie algebra so(3): X = x^
Jrinv = Matrix3::Identity() +
0.5 * X + (1/(normx*normx) - (1+cos(normx))/(2*normx * sin(normx)) ) * X * X;
}
return Jrinv;
}
/** CombinedPreintegratedMeasurements accumulates (integrates) the IMU measurements (rotation rates and accelerations)
* and the corresponding covariance matrix. The measurements are then used to build the Preintegrated IMU factor*/
class CombinedPreintegratedMeasurements {
@ -187,7 +154,7 @@ namespace gtsam {
const Vector3 theta_incr = correctedOmega * deltaT; // rotation vector describing rotation increment computed from the current rotation rate measurement
const Rot3 Rincr = Rot3::Expmap(theta_incr); // rotation increment computed from the current rotation rate measurement
const Matrix3 Jr_theta_incr = rightJacobianExpMapSO3(theta_incr); // Right jacobian computed at theta_incr
const Matrix3 Jr_theta_incr = Rot3::rightJacobianExpMapSO3(theta_incr); // Right jacobian computed at theta_incr
// Update Jacobians
/* ----------------------------------------------------------------------------------------------------------------------- */
@ -208,11 +175,11 @@ namespace gtsam {
Matrix3 Z_3x3 = Matrix3::Zero();
Matrix3 I_3x3 = Matrix3::Identity();
const Vector3 theta_i = Rot3::Logmap(deltaRij); // parametrization of so(3)
const Matrix3 Jr_theta_i = rightJacobianExpMapSO3(theta_i);
const Matrix3 Jr_theta_i = Rot3::rightJacobianExpMapSO3(theta_i);
Rot3 Rot_j = deltaRij * Rincr;
const Vector3 theta_j = Rot3::Logmap(Rot_j); // parametrization of so(3)
const Matrix3 Jrinv_theta_j = rightJacobianExpMapSO3inverse(theta_j);
const Matrix3 Jrinv_theta_j = Rot3::rightJacobianExpMapSO3inverse(theta_j);
// Single Jacobians to propagate covariance
Matrix3 H_pos_pos = I_3x3;
@ -436,11 +403,11 @@ namespace gtsam {
const Rot3 fRhat = deltaRij_biascorrected_corioliscorrected.between(Rot_i.between(Rot_j));
const Matrix3 Jr_theta_bcc = rightJacobianExpMapSO3(theta_biascorrected_corioliscorrected);
const Matrix3 Jr_theta_bcc = Rot3::rightJacobianExpMapSO3(theta_biascorrected_corioliscorrected);
const Matrix3 Jtheta = -Jr_theta_bcc * skewSymmetric(Rot_i.inverse().matrix() * omegaCoriolis_ * deltaTij);
const Matrix3 Jrinv_fRhat = rightJacobianExpMapSO3inverse(Rot3::Logmap(fRhat));
const Matrix3 Jrinv_fRhat = Rot3::rightJacobianExpMapSO3inverse(Rot3::Logmap(fRhat));
if(H1) {
H1->resize(15,6);
@ -515,8 +482,8 @@ namespace gtsam {
}
if(H5) {
const Matrix3 Jrinv_theta_bc = rightJacobianExpMapSO3inverse(theta_biascorrected);
const Matrix3 Jr_JbiasOmegaIncr = rightJacobianExpMapSO3(preintegratedMeasurements_.delRdelBiasOmega * biasOmegaIncr);
const Matrix3 Jrinv_theta_bc = Rot3::rightJacobianExpMapSO3inverse(theta_biascorrected);
const Matrix3 Jr_JbiasOmegaIncr = Rot3::rightJacobianExpMapSO3(preintegratedMeasurements_.delRdelBiasOmega * biasOmegaIncr);
const Matrix3 JbiasOmega = Jr_theta_bcc * Jrinv_theta_bc * Jr_JbiasOmegaIncr * preintegratedMeasurements_.delRdelBiasOmega;
H5->resize(15,6);

View File

@ -47,39 +47,6 @@ namespace gtsam {
/** Struct to store results of preintegrating IMU measurements. Can be build
* incrementally so as to avoid costly integration at time of factor construction. */
/** Right Jacobian for Exponential map in SO(3) - equation (10.86) and following equations in [1] */
static Matrix3 rightJacobianExpMapSO3(const Vector3& x) {
// x is the axis-angle representation (exponential coordinates) for a rotation
double normx = norm_2(x); // rotation angle
Matrix3 Jr;
if (normx < 10e-8){
Jr = Matrix3::Identity();
}
else{
const Matrix3 X = skewSymmetric(x); // element of Lie algebra so(3): X = x^
Jr = Matrix3::Identity() - ((1-cos(normx))/(normx*normx)) * X +
((normx-sin(normx))/(normx*normx*normx)) * X * X; // right Jacobian
}
return Jr;
}
/** Right Jacobian for Log map in SO(3) - equation (10.86) and following equations in [1] */
static Matrix3 rightJacobianExpMapSO3inverse(const Vector3& x) {
// x is the axis-angle representation (exponential coordinates) for a rotation
double normx = norm_2(x); // rotation angle
Matrix3 Jrinv;
if (normx < 10e-8){
Jrinv = Matrix3::Identity();
}
else{
const Matrix3 X = skewSymmetric(x); // element of Lie algebra so(3): X = x^
Jrinv = Matrix3::Identity() +
0.5 * X + (1/(normx*normx) - (1+cos(normx))/(2*normx * sin(normx)) ) * X * X;
}
return Jrinv;
}
/** CombinedPreintegratedMeasurements accumulates (integrates) the IMU measurements (rotation rates and accelerations)
* and the corresponding covariance matrix. The measurements are then used to build the Preintegrated IMU factor*/
class PreintegratedMeasurements {
@ -182,7 +149,7 @@ namespace gtsam {
const Vector3 theta_incr = correctedOmega * deltaT; // rotation vector describing rotation increment computed from the current rotation rate measurement
const Rot3 Rincr = Rot3::Expmap(theta_incr); // rotation increment computed from the current rotation rate measurement
const Matrix3 Jr_theta_incr = rightJacobianExpMapSO3(theta_incr); // Right jacobian computed at theta_incr
const Matrix3 Jr_theta_incr = Rot3::rightJacobianExpMapSO3(theta_incr); // Right jacobian computed at theta_incr
// Update Jacobians
/* ----------------------------------------------------------------------------------------------------------------------- */
@ -200,11 +167,11 @@ namespace gtsam {
Matrix3 Z_3x3 = Matrix3::Zero();
Matrix3 I_3x3 = Matrix3::Identity();
const Vector3 theta_i = Rot3::Logmap(deltaRij); // parametrization of so(3)
const Matrix3 Jr_theta_i = rightJacobianExpMapSO3(theta_i);
const Matrix3 Jr_theta_i = Rot3::rightJacobianExpMapSO3(theta_i);
Rot3 Rot_j = deltaRij * Rincr;
const Vector3 theta_j = Rot3::Logmap(Rot_j); // parametrization of so(3)
const Matrix3 Jrinv_theta_j = rightJacobianExpMapSO3inverse(theta_j);
const Matrix3 Jrinv_theta_j = Rot3::rightJacobianExpMapSO3inverse(theta_j);
// Update preintegrated measurements covariance: as in [2] we consider a first order propagation that
// can be seen as a prediction phase in an EKF framework
@ -397,11 +364,11 @@ namespace gtsam {
const Rot3 fRhat = deltaRij_biascorrected_corioliscorrected.between(Rot_i.between(Rot_j));
const Matrix3 Jr_theta_bcc = rightJacobianExpMapSO3(theta_biascorrected_corioliscorrected);
const Matrix3 Jr_theta_bcc = Rot3::rightJacobianExpMapSO3(theta_biascorrected_corioliscorrected);
const Matrix3 Jtheta = -Jr_theta_bcc * skewSymmetric(Rot_i.inverse().matrix() * omegaCoriolis_ * deltaTij);
const Matrix3 Jrinv_fRhat = rightJacobianExpMapSO3inverse(Rot3::Logmap(fRhat));
const Matrix3 Jrinv_fRhat = Rot3::rightJacobianExpMapSO3inverse(Rot3::Logmap(fRhat));
if(H1) {
H1->resize(9,6);
@ -457,8 +424,8 @@ namespace gtsam {
}
if(H5) {
const Matrix3 Jrinv_theta_bc = rightJacobianExpMapSO3inverse(theta_biascorrected);
const Matrix3 Jr_JbiasOmegaIncr = rightJacobianExpMapSO3(preintegratedMeasurements_.delRdelBiasOmega * biasOmegaIncr);
const Matrix3 Jrinv_theta_bc = Rot3::rightJacobianExpMapSO3inverse(theta_biascorrected);
const Matrix3 Jr_JbiasOmegaIncr = Rot3::rightJacobianExpMapSO3(preintegratedMeasurements_.delRdelBiasOmega * biasOmegaIncr);
const Matrix3 JbiasOmega = Jr_theta_bcc * Jrinv_theta_bc * Jr_JbiasOmegaIncr * preintegratedMeasurements_.delRdelBiasOmega;
H5->resize(9,6);

View File

@ -319,7 +319,7 @@ TEST( ImuFactor, PartialDerivativeExpmap )
Matrix expectedDelRdelBiasOmega = numericalDerivative11<Rot3, LieVector>(boost::bind(
&evaluateRotation, measuredOmega, _1, deltaT), LieVector(biasOmega));
const Matrix3 Jr = ImuFactor::rightJacobianExpMapSO3((measuredOmega - biasOmega) * deltaT);
const Matrix3 Jr = Rot3::rightJacobianExpMapSO3((measuredOmega - biasOmega) * deltaT);
Matrix3 actualdelRdelBiasOmega = - Jr * deltaT; // the delta bias appears with the minus sign
@ -371,7 +371,7 @@ TEST( ImuFactor, fistOrderExponential )
Vector3 deltabiasOmega; deltabiasOmega << alpha,alpha,alpha;
const Matrix3 Jr = ImuFactor::rightJacobianExpMapSO3((measuredOmega - biasOmega) * deltaT);
const Matrix3 Jr = Rot3::rightJacobianExpMapSO3((measuredOmega - biasOmega) * deltaT);
Matrix3 delRdelBiasOmega = - Jr * deltaT; // the delta bias appears with the minus sign

View File

@ -13,6 +13,7 @@
* @file LevenbergMarquardtOptimizer.cpp
* @brief
* @author Richard Roberts
* @author Luca Carlone
* @date Feb 26, 2012
*/
@ -20,16 +21,21 @@
#include <gtsam/linear/linearExceptions.h>
#include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam/linear/VectorValues.h>
#include <gtsam/linear/Errors.h>
#include <boost/algorithm/string.hpp>
#include <boost/range/adaptor/map.hpp>
#include <string>
#include <cmath>
#include <fstream>
using namespace std;
namespace gtsam {
using boost::adaptors::map_values;
/* ************************************************************************* */
LevenbergMarquardtParams::VerbosityLM LevenbergMarquardtParams::verbosityLMTranslator(const std::string &src) const {
std::string s = src; boost::algorithm::to_upper(s);
@ -48,13 +54,14 @@ LevenbergMarquardtParams::VerbosityLM LevenbergMarquardtParams::verbosityLMTrans
std::string LevenbergMarquardtParams::verbosityLMTranslator(VerbosityLM value) const {
std::string s;
switch (value) {
case LevenbergMarquardtParams::SILENT: s = "SILENT" ; break;
case LevenbergMarquardtParams::LAMBDA: s = "LAMBDA" ; break;
case LevenbergMarquardtParams::TRYLAMBDA: s = "TRYLAMBDA" ; break;
case LevenbergMarquardtParams::TRYCONFIG: s = "TRYCONFIG" ; break;
case LevenbergMarquardtParams::TRYDELTA: s = "TRYDELTA" ; break;
case LevenbergMarquardtParams::DAMPED: s = "DAMPED" ; break;
default: s = "UNDEFINED" ; break;
case LevenbergMarquardtParams::SILENT: s = "SILENT" ; break;
case LevenbergMarquardtParams::TERMINATION: s = "TERMINATION" ; break;
case LevenbergMarquardtParams::LAMBDA: s = "LAMBDA" ; break;
case LevenbergMarquardtParams::TRYLAMBDA: s = "TRYLAMBDA" ; break;
case LevenbergMarquardtParams::TRYCONFIG: s = "TRYCONFIG" ; break;
case LevenbergMarquardtParams::TRYDELTA: s = "TRYDELTA" ; break;
case LevenbergMarquardtParams::DAMPED: s = "DAMPED" ; break;
default: s = "UNDEFINED" ; break;
}
return s;
}
@ -65,6 +72,10 @@ void LevenbergMarquardtParams::print(const std::string& str) const {
std::cout << " lambdaInitial: " << lambdaInitial << "\n";
std::cout << " lambdaFactor: " << lambdaFactor << "\n";
std::cout << " lambdaUpperBound: " << lambdaUpperBound << "\n";
std::cout << " lambdaLowerBound: " << lambdaLowerBound << "\n";
std::cout << " disableInnerIterations: " << disableInnerIterations << "\n";
std::cout << " minModelFidelity: " << minModelFidelity << "\n";
std::cout << " diagonalDamping: " << diagonalDamping << "\n";
std::cout << " verbosityLM: " << verbosityLMTranslator(verbosityLM) << "\n";
std::cout.flush();
}
@ -76,12 +87,75 @@ GaussianFactorGraph::shared_ptr LevenbergMarquardtOptimizer::linearize() const {
/* ************************************************************************* */
void LevenbergMarquardtOptimizer::increaseLambda(){
state_.lambda *= params_.lambdaFactor;
if(params_.useFixedLambdaFactor_){
state_.lambda *= params_.lambdaFactor;
}else{
state_.lambda *= params_.lambdaFactor;
params_.lambdaFactor *= 2.0;
}
params_.reuse_diagonal_ = true;
}
/* ************************************************************************* */
void LevenbergMarquardtOptimizer::decreaseLambda(){
state_.lambda /= params_.lambdaFactor;
void LevenbergMarquardtOptimizer::decreaseLambda(double stepQuality){
if(params_.useFixedLambdaFactor_){
state_.lambda /= params_.lambdaFactor;
}else{
// CHECK_GT(step_quality, 0.0);
state_.lambda *= std::max(1.0 / 3.0, 1.0 - pow(2.0 * stepQuality - 1.0, 3));
params_.lambdaFactor = 2.0;
}
state_.lambda = std::max(params_.lambdaLowerBound, state_.lambda);
params_.reuse_diagonal_ = false;
}
/* ************************************************************************* */
GaussianFactorGraph LevenbergMarquardtOptimizer::buildDampedSystem(
const GaussianFactorGraph& linear) {
//Set two parameters as Ceres, will move out later
static const double min_diagonal_ = 1e-6;
static const double max_diagonal_ = 1e32;
gttic(damp);
if (params_.verbosityLM >= LevenbergMarquardtParams::DAMPED)
cout << "building damped system with lambda " << state_.lambda << endl;
// Only retrieve diagonal vector when reuse_diagonal = false
if (params_.diagonalDamping && params_.reuse_diagonal_ == false) {
state_.hessianDiagonal = linear.hessianDiagonal();
BOOST_FOREACH(Vector& v, state_.hessianDiagonal | map_values) {
for (int aa = 0; aa < v.size(); aa++) {
v(aa) = std::min(std::max(v(aa), min_diagonal_), max_diagonal_);
v(aa) = sqrt(v(aa));
}
}
} // reuse diagonal
// for each of the variables, add a prior
double sigma = 1.0 / std::sqrt(state_.lambda);
GaussianFactorGraph damped = linear;
damped.reserve(damped.size() + state_.values.size());
BOOST_FOREACH(const Values::KeyValuePair& key_value, state_.values) {
size_t dim = key_value.value.dim();
Matrix A = Matrix::Identity(dim, dim);
//Replace the identity matrix with diagonal of Hessian
if (params_.diagonalDamping){
try {
A.diagonal() = state_.hessianDiagonal.at(key_value.key);
} catch (std::exception e) {
// Don't attempt any damping if no key found in diagonal
continue;
}
}
Vector b = Vector::Zero(dim);
SharedDiagonal model = noiseModel::Isotropic::Sigma(dim, sigma);
damped += boost::make_shared<JacobianFactor>(key_value.key, A, b, model);
}
gttoc(damp);
return damped;
}
/* ************************************************************************* */
@ -94,116 +168,115 @@ void LevenbergMarquardtOptimizer::iterate() {
const LevenbergMarquardtParams::VerbosityLM lmVerbosity = params_.verbosityLM;
// Linearize graph
if(nloVerbosity >= NonlinearOptimizerParams::ERROR)
cout << "linearizing = " << endl;
if(lmVerbosity >= LevenbergMarquardtParams::DAMPED) cout << "linearizing = " << endl;
GaussianFactorGraph::shared_ptr linear = linearize();
// Keep increasing lambda until we make make progress
while (true) {
++state_.totalNumberInnerIterations;
// Add prior-factors
// TODO: replace this dampening with a backsubstitution approach
gttic(damp);
if (lmVerbosity >= LevenbergMarquardtParams::DAMPED) cout << "building damped system" << endl;
GaussianFactorGraph dampedSystem = *linear;
{
double sigma = 1.0 / std::sqrt(state_.lambda);
dampedSystem.reserve(dampedSystem.size() + state_.values.size());
// for each of the variables, add a prior
BOOST_FOREACH(const Values::KeyValuePair& key_value, state_.values) {
size_t dim = key_value.value.dim();
Matrix A = Matrix::Identity(dim, dim);
Vector b = Vector::Zero(dim);
SharedDiagonal model = noiseModel::Isotropic::Sigma(dim, sigma);
dampedSystem += boost::make_shared<JacobianFactor>(key_value.key, A, b, model);
}
if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "trying lambda = " << state_.lambda << endl;
// Build damped system for this lambda (adds prior factors that make it like gradient descent)
GaussianFactorGraph dampedSystem = buildDampedSystem(*linear);
// Log current error/lambda to file
if (!params_.logFile.empty()) {
ofstream os(params_.logFile.c_str(), ios::app);
boost::posix_time::ptime currentTime = boost::posix_time::microsec_clock::universal_time();
os << state_.totalNumberInnerIterations << "," << 1e-6 * (currentTime - state_.startTime).total_microseconds() << ","
<< state_.error << "," << state_.lambda << endl;
}
gttoc(damp);
++state_.totalNumberInnerIterations;
// Try solving
double modelFidelity = 0.0;
bool step_is_successful = false;
bool stopSearchingLambda = false;
double newError;
Values newValues;
VectorValues delta;
bool systemSolvedSuccessfully;
try {
if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA)
cout << "trying lambda = " << state_.lambda << endl;
// Log current error/lambda to file
if (!params_.logFile.empty()) {
ofstream os(params_.logFile.c_str(), ios::app);
delta = solve(dampedSystem, state_.values, params_);
systemSolvedSuccessfully = true;
} catch(IndeterminantLinearSystemException& e) {
systemSolvedSuccessfully = false;
}
boost::posix_time::ptime currentTime = boost::posix_time::microsec_clock::universal_time();
os << state_.iterations << "," << 1e-6 * (currentTime - state_.startTime).total_microseconds() << ","
<< state_.error << "," << state_.lambda << endl;
}
// Solve Damped Gaussian Factor Graph
const VectorValues delta = solve(dampedSystem, state_.values, params_);
if(systemSolvedSuccessfully) {
params_.reuse_diagonal_ = true;
if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "linear delta norm = " << delta.norm() << endl;
if (lmVerbosity >= LevenbergMarquardtParams::TRYDELTA) delta.print("delta");
// update values
gttic (retract);
Values newValues = state_.values.retract(delta);
gttoc(retract);
// cost change in the linearized system (old - new)
double newlinearizedError = linear->error(delta);
// create new optimization state with more adventurous lambda
gttic (compute_error);
if(nloVerbosity >= NonlinearOptimizerParams::ERROR) cout << "calculating error" << endl;
double error = graph_.error(newValues);
gttoc(compute_error);
double linearizedCostChange = state_.error - newlinearizedError;
if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "next error = " << error << endl;
if(linearizedCostChange >= 0){ // step is valid
// update values
gttic (retract);
newValues = state_.values.retract(delta);
gttoc(retract);
if (error <= state_.error) {
state_.values.swap(newValues);
state_.error = error;
decreaseLambda();
break;
} else {
// Either we're not cautious, or the same lambda was worse than the current error.
// The more adventurous lambda was worse too, so make lambda more conservative
// and keep the same values.
if(state_.lambda >= params_.lambdaUpperBound) {
if(nloVerbosity >= NonlinearOptimizerParams::ERROR)
cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl;
break;
// compute new error
gttic (compute_error);
if(lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "calculating error" << endl;
newError = graph_.error(newValues);
gttoc(compute_error);
// cost change in the original, nonlinear system (old - new)
double costChange = state_.error - newError;
double absolute_function_tolerance = params_.relativeErrorTol * state_.error;
if (fabs(costChange) >= absolute_function_tolerance) {
// fidelity of linearized model VS original system between
if(linearizedCostChange > 1e-15){
modelFidelity = costChange / linearizedCostChange;
// if we decrease the error in the nonlinear system and modelFidelity is above threshold
step_is_successful = modelFidelity > params_.minModelFidelity;
}else{
step_is_successful = true; // linearizedCostChange close to zero
}
} else {
if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA)
cout << "increasing lambda: old error (" << state_.error << ") new error (" << error << ")" << endl;
increaseLambda();
stopSearchingLambda = true;
}
}
} catch (IndeterminantLinearSystemException& e) {
(void) e; // Prevent unused variable warning
if(lmVerbosity >= LevenbergMarquardtParams::LAMBDA)
cout << "Negative matrix, increasing lambda" << endl;
// Either we're not cautious, or the same lambda was worse than the current error.
// The more adventurous lambda was worse too, so make lambda more conservative
// and keep the same values.
}
if(step_is_successful){ // we have successfully decreased the cost and we have good modelFidelity
state_.values.swap(newValues);
state_.error = newError;
decreaseLambda(modelFidelity);
break;
}else if (!stopSearchingLambda){ // we failed to solved the system or we had no decrease in cost
if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA)
cout << "increasing lambda: old error (" << state_.error << ") new error (" << newError << ")" << endl;
increaseLambda();
// check if lambda is too big
if(state_.lambda >= params_.lambdaUpperBound) {
if(nloVerbosity >= NonlinearOptimizerParams::ERROR)
if(nloVerbosity >= NonlinearOptimizerParams::TERMINATION)
cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl;
break;
} else {
increaseLambda();
}
} else { // the change in the cost is very small and it is not worth trying bigger lambdas
break;
}
// Frank asks: why would we do that?
// catch(...) {
// throw;
// }
} // end while
if (lmVerbosity >= LevenbergMarquardtParams::LAMBDA)
cout << "using lambda = " << state_.lambda << endl;
// Increment the iteration counter
++state_.iterations;
}
/* ************************************************************************* */
LevenbergMarquardtParams LevenbergMarquardtOptimizer::ensureHasOrdering(
LevenbergMarquardtParams params, const NonlinearFactorGraph& graph) const
LevenbergMarquardtParams params, const NonlinearFactorGraph& graph) const
{
if(!params.ordering)
params.ordering = Ordering::COLAMD(graph);

View File

@ -21,6 +21,8 @@
#include <gtsam/nonlinear/NonlinearOptimizer.h>
#include <boost/date_time/posix_time/posix_time.hpp>
class NonlinearOptimizerMoreOptimizationTest;
namespace gtsam {
class LevenbergMarquardtOptimizer;
@ -35,7 +37,7 @@ class GTSAM_EXPORT LevenbergMarquardtParams: public NonlinearOptimizerParams {
public:
/** See LevenbergMarquardtParams::lmVerbosity */
enum VerbosityLM {
SILENT = 0, LAMBDA, TRYLAMBDA, TRYCONFIG, DAMPED, TRYDELTA
SILENT = 0, TERMINATION, LAMBDA, TRYLAMBDA, TRYCONFIG, DAMPED, TRYDELTA
};
private:
@ -47,12 +49,19 @@ public:
double lambdaInitial; ///< The initial Levenberg-Marquardt damping term (default: 1e-5)
double lambdaFactor; ///< The amount by which to multiply or divide lambda when adjusting lambda (default: 10.0)
double lambdaUpperBound; ///< The maximum lambda to try before assuming the optimization has failed (default: 1e5)
double lambdaLowerBound; ///< The minimum lambda used in LM (default: 0)
VerbosityLM verbosityLM; ///< The verbosity level for Levenberg-Marquardt (default: SILENT), see also NonlinearOptimizerParams::verbosity
bool disableInnerIterations; ///< If enabled inner iterations on the linearized system are performed
double minModelFidelity; ///< Lower bound for the modelFidelity to accept the result of an LM iteration
std::string logFile; ///< an optional CSV log file, with [iteration, time, error, labda]
bool diagonalDamping; ///< if true, use diagonal of Hessian
bool reuse_diagonal_; //an additional option in Ceres for diagonalDamping (related to efficiency)
bool useFixedLambdaFactor_; // if true applies constant increase (or decrease) to lambda according to lambdaFactor
LevenbergMarquardtParams() :
lambdaInitial(1e-5), lambdaFactor(10.0), lambdaUpperBound(1e5), verbosityLM(
SILENT) {
lambdaInitial(1e-5), lambdaFactor(10.0), lambdaUpperBound(1e5), lambdaLowerBound(
0.0), verbosityLM(SILENT), disableInnerIterations(false), minModelFidelity(
1e-3), diagonalDamping(false), reuse_diagonal_(false), useFixedLambdaFactor_(true) {
}
virtual ~LevenbergMarquardtParams() {
}
@ -68,12 +77,18 @@ public:
inline double getlambdaUpperBound() const {
return lambdaUpperBound;
}
inline double getlambdaLowerBound() const {
return lambdaLowerBound;
}
inline std::string getVerbosityLM() const {
return verbosityLMTranslator(verbosityLM);
}
inline std::string getLogFile() const {
return logFile;
}
inline bool getDiagonalDamping() const {
return diagonalDamping;
}
inline void setlambdaInitial(double value) {
lambdaInitial = value;
@ -84,12 +99,22 @@ public:
inline void setlambdaUpperBound(double value) {
lambdaUpperBound = value;
}
inline void setlambdaLowerBound(double value) {
lambdaLowerBound = value;
}
inline void setVerbosityLM(const std::string &s) {
verbosityLM = verbosityLMTranslator(s);
}
inline void setLogFile(const std::string &s) {
logFile = s;
}
inline void setDiagonalDamping(bool flag) {
diagonalDamping = flag;
}
inline void setUseFixedLambdaFactor(bool flag) {
useFixedLambdaFactor_ = flag;
}
};
/**
@ -101,6 +126,7 @@ public:
double lambda;
int totalNumberInnerIterations; // The total number of inner iterations in the optimization (for each iteration, LM may try multiple iterations with different lambdas)
boost::posix_time::ptime startTime;
VectorValues hessianDiagonal; //only update hessianDiagonal when reuse_diagonal_ = false
LevenbergMarquardtState() {
initTime();
@ -174,11 +200,11 @@ public:
return state_.lambda;
}
// Apply policy to increase lambda if the current update was successful
virtual void increaseLambda();
// Apply policy to increase lambda if the current update was successful (stepQuality not used in the naive policy)
void increaseLambda();
// Apply policy to decrease lambda if the current update was NOT successful
virtual void decreaseLambda();
// Apply policy to decrease lambda if the current update was NOT successful (stepQuality not used in the naive policy)
void decreaseLambda(double stepQuality);
/// Access the current number of inner iterations
int getInnerIterations() const {
@ -226,9 +252,14 @@ public:
return state_;
}
/** Build a damped system for a specific lambda */
GaussianFactorGraph buildDampedSystem(const GaussianFactorGraph& linear);
friend class ::NonlinearOptimizerMoreOptimizationTest;
/// @}
protected:
/** Access the parameters (base class version) */
virtual const NonlinearOptimizerParams& _params() const {
return params_;

View File

@ -55,8 +55,12 @@ void NonlinearOptimizer::defaultOptimize() {
if (params.verbosity >= NonlinearOptimizerParams::ERROR) cout << "Initial error: " << currentError << endl;
// Return if we already have too many iterations
if(this->iterations() >= params.maxIterations)
if(this->iterations() >= params.maxIterations){
if (params.verbosity >= NonlinearOptimizerParams::TERMINATION) {
cout << "iterations: " << this->iterations() << " >? " << params.maxIterations << endl;
}
return;
}
// Iterative loop
do {
@ -72,7 +76,7 @@ void NonlinearOptimizer::defaultOptimize() {
params.errorTol, currentError, this->error(), params.verbosity));
// Printing if verbose
if (params.verbosity >= NonlinearOptimizerParams::ERROR &&
if (params.verbosity >= NonlinearOptimizerParams::TERMINATION &&
this->iterations() >= params.maxIterations)
cout << "Terminating because reached maximum iterations" << endl;
}
@ -152,11 +156,16 @@ bool checkConvergence(double relativeErrorTreshold, double absoluteErrorTreshold
}
bool converged = (relativeErrorTreshold && (relativeDecrease <= relativeErrorTreshold))
|| (absoluteDecrease <= absoluteErrorTreshold);
if (verbosity >= NonlinearOptimizerParams::ERROR && converged) {
if (verbosity >= NonlinearOptimizerParams::TERMINATION && converged) {
if(absoluteDecrease >= 0.0)
cout << "converged" << endl;
else
cout << "Warning: stopping nonlinear iterations because error increased" << endl;
cout << "errorThreshold: " << newError << " <? " << errorThreshold << endl;
cout << "absoluteDecrease: " << setprecision(12) << absoluteDecrease << " <? " << absoluteErrorTreshold << endl;
cout << "relativeDecrease: " << setprecision(12) << relativeDecrease << " <? " << relativeErrorTreshold << endl;
}
return converged;
}

View File

@ -27,6 +27,8 @@ NonlinearOptimizerParams::Verbosity NonlinearOptimizerParams::verbosityTranslato
return NonlinearOptimizerParams::DELTA;
if (s == "LINEAR")
return NonlinearOptimizerParams::LINEAR;
if (s == "TERMINATION")
return NonlinearOptimizerParams::TERMINATION;
/* default is silent */
return NonlinearOptimizerParams::SILENT;
@ -40,6 +42,9 @@ std::string NonlinearOptimizerParams::verbosityTranslator(
case NonlinearOptimizerParams::SILENT:
s = "SILENT";
break;
case NonlinearOptimizerParams::TERMINATION:
s = "TERMINATION";
break;
case NonlinearOptimizerParams::ERROR:
s = "ERROR";
break;

View File

@ -34,7 +34,7 @@ class GTSAM_EXPORT NonlinearOptimizerParams {
public:
/** See NonlinearOptimizerParams::verbosity */
enum Verbosity {
SILENT, ERROR, VALUES, DELTA, LINEAR
SILENT, TERMINATION, ERROR, VALUES, DELTA, LINEAR
};
int maxIterations; ///< The maximum iterations to stop iterating (default 100)

View File

@ -101,8 +101,8 @@ namespace gtsam {
if (H2) *H2 = zeros(2, point.dim());
std::cout << e.what() << ": Landmark "<< DefaultKeyFormatter(this->key2())
<< " behind Camera " << DefaultKeyFormatter(this->key1()) << std::endl;
return zero(2);
}
return zero(2);
}
/** return the measured */

View File

@ -685,7 +685,7 @@ bool writeBALfromValues(const string& filename, SfM_data &data, Values& values){
Values valuesCameras = values.filter< PinholeCamera<Cal3Bundler> >();
if ( valuesCameras.size() == data.number_cameras() ){ // we only estimated camera poses and calibration
for (size_t i = 0; i < data.number_cameras(); i++){ // for each camera
Key cameraKey = symbol('c',i);
Key cameraKey = i; // symbol('c',i);
PinholeCamera<Cal3Bundler> camera = values.at<PinholeCamera<Cal3Bundler> >(cameraKey);
data.cameras[i] = camera;
}
@ -720,4 +720,22 @@ bool writeBALfromValues(const string& filename, SfM_data &data, Values& values){
return writeBAL(filename, data);
}
Values initialCamerasEstimate(const SfM_data& db) {
Values initial;
size_t i = 0; // NO POINTS: j = 0;
BOOST_FOREACH(const SfM_Camera& camera, db.cameras)
initial.insert(i++, camera);
return initial;
}
Values initialCamerasAndPointsEstimate(const SfM_data& db) {
Values initial;
size_t i = 0, j = 0;
BOOST_FOREACH(const SfM_Camera& camera, db.cameras)
initial.insert((i++), camera);
BOOST_FOREACH(const SfM_Track& track, db.tracks)
initial.insert(P(j++), track.p);
return initial;
}
} // \namespace gtsam

View File

@ -175,4 +175,19 @@ GTSAM_EXPORT Pose3 gtsam2openGL(const Rot3& R, double tx, double ty, double tz);
*/
GTSAM_EXPORT Pose3 gtsam2openGL(const Pose3& PoseGTSAM);
/**
* @brief This function creates initial values for cameras from db
* @param SfM_data
* @return Values
*/
GTSAM_EXPORT Values initialCamerasEstimate(const SfM_data& db);
/**
* @brief This function creates initial values for cameras and points from db
* @param SfM_data
* @return Values
*/
GTSAM_EXPORT Values initialCamerasAndPointsEstimate(const SfM_data& db);
} // namespace gtsam

View File

@ -0,0 +1,197 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* testTriangulationFactor.h
* @date March 2, 2014
* @author Frank Dellaert
*/
#include <gtsam/nonlinear/NonlinearFactor.h>
#include <gtsam/geometry/SimpleCamera.h>
#include <gtsam/base/numericalDerivative.h>
#include <boost/optional.hpp>
#include <boost/make_shared.hpp>
namespace gtsam {
/**
* Non-linear factor for a constraint derived from a 2D measurement.
* The calibration and pose are assumed known.
* @addtogroup SLAM
*/
template<class CALIBRATION = Cal3_S2>
class TriangulationFactor: public NoiseModelFactor1<Point3> {
public:
/// Camera type
typedef PinholeCamera<CALIBRATION> Camera;
protected:
// Keep a copy of measurement and calibration for I/O
const Camera camera_; ///< Camera in which this landmark was seen
const Point2 measured_; ///< 2D measurement
// verbosity handling for Cheirality Exceptions
const bool throwCheirality_; ///< If true, rethrows Cheirality exceptions (default: false)
const bool verboseCheirality_; ///< If true, prints text for Cheirality exceptions (default: false)
public:
/// shorthand for base class type
typedef NoiseModelFactor1<Point3> Base;
/// shorthand for this class
typedef TriangulationFactor<CALIBRATION> This;
/// shorthand for a smart pointer to a factor
typedef boost::shared_ptr<This> shared_ptr;
/// Default constructor
TriangulationFactor() :
throwCheirality_(false), verboseCheirality_(false) {
}
/**
* Constructor with exception-handling flags
* @param camera is the camera in which unknown landmark is seen
* @param measured is the 2 dimensional location of point in image (the measurement)
* @param model is the standard deviation
* @param pointKey is the index of the landmark
* @param throwCheirality determines whether Cheirality exceptions are rethrown
* @param verboseCheirality determines whether exceptions are printed for Cheirality
*/
TriangulationFactor(const Camera& camera, const Point2& measured,
const SharedNoiseModel& model, Key pointKey, bool throwCheirality = false,
bool verboseCheirality = false) :
Base(model, pointKey), camera_(camera), measured_(measured), throwCheirality_(
throwCheirality), verboseCheirality_(verboseCheirality) {
if (model && model->dim() != 2)
throw std::invalid_argument(
"TriangulationFactor must be created with 2-dimensional noise model.");
}
/** Virtual destructor */
virtual ~TriangulationFactor() {
}
/// @return a deep copy of this factor
virtual gtsam::NonlinearFactor::shared_ptr clone() const {
return boost::static_pointer_cast<gtsam::NonlinearFactor>(
gtsam::NonlinearFactor::shared_ptr(new This(*this)));
}
/**
* print
* @param s optional string naming the factor
* @param keyFormatter optional formatter useful for printing Symbols
*/
void print(const std::string& s = "", const KeyFormatter& keyFormatter =
DefaultKeyFormatter) const {
std::cout << s << "TriangulationFactor,";
camera_.print("camera");
measured_.print("z");
Base::print("", keyFormatter);
}
/// equals
virtual bool equals(const NonlinearFactor& p, double tol = 1e-9) const {
const This *e = dynamic_cast<const This*>(&p);
return e && Base::equals(p, tol) && this->camera_.equals(e->camera_, tol)
&& this->measured_.equals(e->measured_, tol);
}
/// Evaluate error h(x)-z and optionally derivatives
Vector evaluateError(const Point3& point, boost::optional<Matrix&> H2 =
boost::none) const {
try {
Point2 error(camera_.project(point, boost::none, H2) - measured_);
return error.vector();
} catch (CheiralityException& e) {
if (H2)
*H2 = zeros(2, 3);
if (verboseCheirality_)
std::cout << e.what() << ": Landmark "
<< DefaultKeyFormatter(this->key()) << " moved behind camera"
<< std::endl;
if (throwCheirality_)
throw e;
return ones(2) * 2.0 * camera_.calibration().fx();
}
}
/// thread-safe (?) scratch memory for linearize
mutable VerticalBlockMatrix Ab;
mutable Matrix A;
mutable Vector b;
/**
* Linearize to a JacobianFactor, does not support constrained noise model !
* \f$ Ax-b \approx h(x+\delta x)-z = h(x) + A \delta x - z \f$
* Hence \f$ b = z - h(x) = - \mathtt{error\_vector}(x) \f$
*/
boost::shared_ptr<GaussianFactor> linearize(const Values& x) const {
// Only linearize if the factor is active
if (!this->active(x))
return boost::shared_ptr<JacobianFactor>();
// Allocate memory for Jacobian factor, do only once
if (Ab.rows() == 0) {
std::vector<size_t> dimensions(1, 3);
Ab = VerticalBlockMatrix(dimensions, 2, true);
A.resize(2,3);
b.resize(2);
}
// Would be even better if we could pass blocks to project
const Point3& point = x.at<Point3>(key());
b = -(camera_.project(point, boost::none, A) - measured_).vector();
if (noiseModel_)
this->noiseModel_->WhitenSystem(A, b);
Ab(0) = A;
Ab(1) = b;
return boost::make_shared<JacobianFactor>(this->keys_, Ab);
}
/** return the measurement */
const Point2& measured() const {
return measured_;
}
/** return verbosity */
inline bool verboseCheirality() const {
return verboseCheirality_;
}
/** return flag for throwing cheirality exceptions */
inline bool throwCheirality() const {
return throwCheirality_;
}
private:
/// Serialization function
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int version) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar & BOOST_SERIALIZATION_NVP(camera_);
ar & BOOST_SERIALIZATION_NVP(measured_);
ar & BOOST_SERIALIZATION_NVP(throwCheirality_);
ar & BOOST_SERIALIZATION_NVP(verboseCheirality_);
}
};
} // \ namespace gtsam

View File

@ -16,50 +16,46 @@
* Author: cbeall3
*/
#include <CppUnitLite/TestHarness.h>
#include <gtsam/base/Testable.h>
#include <gtsam/geometry/SimpleCamera.h>
#include <gtsam/geometry/PinholeCamera.h>
#include <gtsam/geometry/Cal3Bundler.h>
#include <gtsam_unstable/geometry/InvDepthCamera3.h>
#include <gtsam_unstable/geometry/triangulation.h>
#include <gtsam/geometry/Cal3Bundler.h>
#include <CppUnitLite/TestHarness.h>
#include <boost/assign.hpp>
#include <boost/assign/std/vector.hpp>
#include <boost/make_shared.hpp>
using namespace std;
using namespace gtsam;
using namespace boost::assign;
/* ************************************************************************* */
// Some common constants
TEST( triangulation, twoPosesBundler) {
boost::shared_ptr<Cal3Bundler> sharedCal = //
boost::make_shared<Cal3Bundler>(1500, 0, 0, 640, 480);
// create first camera. Looking along X-axis, 1 meter above ground plane (x-y)
Pose3 level_pose = Pose3(Rot3::ypr(-M_PI / 2, 0., -M_PI / 2),
gtsam::Point3(0, 0, 1));
PinholeCamera<Cal3Bundler> level_camera(level_pose, *sharedCal);
static const boost::shared_ptr<Cal3_S2> sharedCal = //
boost::make_shared<Cal3_S2>(1500, 1200, 0, 640, 480);
// create second camera 1 meter to the right of first camera
Pose3 level_pose_right = level_pose * Pose3(Rot3(), Point3(1, 0, 0));
PinholeCamera<Cal3Bundler> level_camera_right(level_pose_right, *sharedCal);
// Looking along X-axis, 1 meter above ground plane (x-y)
static const Rot3 upright = Rot3::ypr(-M_PI / 2, 0., -M_PI / 2);
static const Pose3 pose1 = Pose3(upright, gtsam::Point3(0, 0, 1));
PinholeCamera<Cal3_S2> camera1(pose1, *sharedCal);
// landmark ~5 meters infront of camera
Point3 landmark(5, 0.5, 1.2);
// create second camera 1 meter to the right of first camera
static const Pose3 pose2 = pose1 * Pose3(Rot3(), Point3(1, 0, 0));
PinholeCamera<Cal3_S2> camera2(pose2, *sharedCal);
// 1. Project two landmarks into two cameras and triangulate
Point2 level_uv = level_camera.project(landmark);
Point2 level_uv_right = level_camera_right.project(landmark);
// landmark ~5 meters infront of camera
static const Point3 landmark(5, 0.5, 1.2);
vector < Pose3 > poses;
// 1. Project two landmarks into two cameras and triangulate
Point2 z1 = camera1.project(landmark);
Point2 z2 = camera2.project(landmark);
//******************************************************************************
TEST( triangulation, twoPoses) {
vector<Pose3> poses;
vector<Point2> measurements;
poses += level_pose, level_pose_right;
measurements += level_uv, level_uv_right;
poses += pose1, pose2;
measurements += z1, z2;
bool optimize = true;
double rank_tol = 1e-9;
@ -77,32 +73,48 @@ TEST( triangulation, twoPosesBundler) {
EXPECT(assert_equal(landmark, *triangulated_landmark_noise, 1e-2));
}
/* ************************************************************************* */
//******************************************************************************
TEST( triangulation, fourPoses) {
boost::shared_ptr<Cal3_S2> sharedCal = //
boost::make_shared<Cal3_S2>(1500, 1200, 0, 640, 480);
// create first camera. Looking along X-axis, 1 meter above ground plane (x-y)
Pose3 level_pose = Pose3(Rot3::ypr(-M_PI / 2, 0., -M_PI / 2),
gtsam::Point3(0, 0, 1));
SimpleCamera level_camera(level_pose, *sharedCal);
TEST( triangulation, twoPosesBundler) {
// create second camera 1 meter to the right of first camera
Pose3 level_pose_right = level_pose * Pose3(Rot3(), Point3(1, 0, 0));
SimpleCamera level_camera_right(level_pose_right, *sharedCal);
// landmark ~5 meters infront of camera
Point3 landmark(5, 0.5, 1.2);
boost::shared_ptr<Cal3Bundler> bundlerCal = //
boost::make_shared<Cal3Bundler>(1500, 0, 0, 640, 480);
PinholeCamera<Cal3Bundler> camera1(pose1, *bundlerCal);
PinholeCamera<Cal3Bundler> camera2(pose2, *bundlerCal);
// 1. Project two landmarks into two cameras and triangulate
Point2 level_uv = level_camera.project(landmark);
Point2 level_uv_right = level_camera_right.project(landmark);
Point2 z1 = camera1.project(landmark);
Point2 z2 = camera2.project(landmark);
vector < Pose3 > poses;
vector<Pose3> poses;
vector<Point2> measurements;
poses += level_pose, level_pose_right;
measurements += level_uv, level_uv_right;
poses += pose1, pose2;
measurements += z1, z2;
bool optimize = true;
double rank_tol = 1e-9;
boost::optional<Point3> triangulated_landmark = triangulatePoint3(poses,
bundlerCal, measurements, rank_tol, optimize);
EXPECT(assert_equal(landmark, *triangulated_landmark, 1e-2));
// 2. Add some noise and try again: result should be ~ (4.995, 0.499167, 1.19814)
measurements.at(0) += Point2(0.1, 0.5);
measurements.at(1) += Point2(-0.2, 0.3);
boost::optional<Point3> triangulated_landmark_noise = triangulatePoint3(poses,
bundlerCal, measurements, rank_tol, optimize);
EXPECT(assert_equal(landmark, *triangulated_landmark_noise, 1e-2));
}
//******************************************************************************
TEST( triangulation, fourPoses) {
vector<Pose3> poses;
vector<Point2> measurements;
poses += pose1, pose2;
measurements += z1, z2;
boost::optional<Point3> triangulated_landmark = triangulatePoint3(poses,
sharedCal, measurements);
@ -112,21 +124,20 @@ TEST( triangulation, fourPoses) {
measurements.at(0) += Point2(0.1, 0.5);
measurements.at(1) += Point2(-0.2, 0.3);
boost::optional<Point3> triangulated_landmark_noise = triangulatePoint3(poses,
sharedCal, measurements);
boost::optional<Point3> triangulated_landmark_noise = //
triangulatePoint3(poses, sharedCal, measurements);
EXPECT(assert_equal(landmark, *triangulated_landmark_noise, 1e-2));
// 3. Add a slightly rotated third camera above, again with measurement noise
Pose3 pose_top = level_pose
* Pose3(Rot3::ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
SimpleCamera camera_top(pose_top, *sharedCal);
Point2 top_uv = camera_top.project(landmark);
Pose3 pose3 = pose1 * Pose3(Rot3::ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
SimpleCamera camera3(pose3, *sharedCal);
Point2 z3 = camera3.project(landmark);
poses += pose_top;
measurements += top_uv + Point2(0.1, -0.1);
poses += pose3;
measurements += z3 + Point2(0.1, -0.1);
boost::optional<Point3> triangulated_3cameras = triangulatePoint3(poses,
sharedCal, measurements);
boost::optional<Point3> triangulated_3cameras = //
triangulatePoint3(poses, sharedCal, measurements);
EXPECT(assert_equal(landmark, *triangulated_3cameras, 1e-2));
// Again with nonlinear optimization
@ -135,48 +146,42 @@ TEST( triangulation, fourPoses) {
EXPECT(assert_equal(landmark, *triangulated_3cameras_opt, 1e-2));
// 4. Test failure: Add a 4th camera facing the wrong way
Pose3 level_pose180 = Pose3(Rot3::ypr(M_PI / 2, 0., -M_PI / 2),
Point3(0, 0, 1));
SimpleCamera camera_180(level_pose180, *sharedCal);
Pose3 pose4 = Pose3(Rot3::ypr(M_PI / 2, 0., -M_PI / 2), Point3(0, 0, 1));
SimpleCamera camera4(pose4, *sharedCal);
CHECK_EXCEPTION(camera_180.project(landmark) ;, CheiralityException);
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
CHECK_EXCEPTION(camera4.project(landmark);, CheiralityException);
poses += level_pose180;
poses += pose4;
measurements += Point2(400, 400);
CHECK_EXCEPTION(triangulatePoint3(poses, sharedCal, measurements),
TriangulationCheiralityException);
#endif
}
/* ************************************************************************* */
//******************************************************************************
TEST( triangulation, fourPoses_distinct_Ks) {
Cal3_S2 K1(1500, 1200, 0, 640, 480);
// create first camera. Looking along X-axis, 1 meter above ground plane (x-y)
Pose3 level_pose = Pose3(Rot3::ypr(-M_PI / 2, 0., -M_PI / 2),
gtsam::Point3(0, 0, 1));
SimpleCamera level_camera(level_pose, K1);
SimpleCamera camera1(pose1, K1);
// create second camera 1 meter to the right of first camera
Pose3 level_pose_right = level_pose * Pose3(Rot3(), Point3(1, 0, 0));
Cal3_S2 K2(1600, 1300, 0, 650, 440);
SimpleCamera level_camera_right(level_pose_right, K2);
// landmark ~5 meters infront of camera
Point3 landmark(5, 0.5, 1.2);
SimpleCamera camera2(pose2, K2);
// 1. Project two landmarks into two cameras and triangulate
Point2 level_uv = level_camera.project(landmark);
Point2 level_uv_right = level_camera_right.project(landmark);
Point2 z1 = camera1.project(landmark);
Point2 z2 = camera2.project(landmark);
vector<SimpleCamera> cameras;
vector<Point2> measurements;
cameras += level_camera, level_camera_right;
measurements += level_uv, level_uv_right;
cameras += camera1, camera2;
measurements += z1, z2;
boost::optional<Point3> triangulated_landmark = triangulatePoint3(cameras,
measurements);
boost::optional<Point3> triangulated_landmark = //
triangulatePoint3(cameras, measurements);
EXPECT(assert_equal(landmark, *triangulated_landmark, 1e-2));
// 2. Add some noise and try again: result should be ~ (4.995, 0.499167, 1.19814)
@ -188,17 +193,16 @@ TEST( triangulation, fourPoses_distinct_Ks) {
EXPECT(assert_equal(landmark, *triangulated_landmark_noise, 1e-2));
// 3. Add a slightly rotated third camera above, again with measurement noise
Pose3 pose_top = level_pose
* Pose3(Rot3::ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
Pose3 pose3 = pose1 * Pose3(Rot3::ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
Cal3_S2 K3(700, 500, 0, 640, 480);
SimpleCamera camera_top(pose_top, K3);
Point2 top_uv = camera_top.project(landmark);
SimpleCamera camera3(pose3, K3);
Point2 z3 = camera3.project(landmark);
cameras += camera_top;
measurements += top_uv + Point2(0.1, -0.1);
cameras += camera3;
measurements += z3 + Point2(0.1, -0.1);
boost::optional<Point3> triangulated_3cameras = triangulatePoint3(cameras,
measurements);
boost::optional<Point3> triangulated_3cameras = //
triangulatePoint3(cameras, measurements);
EXPECT(assert_equal(landmark, *triangulated_3cameras, 1e-2));
// Again with nonlinear optimization
@ -207,47 +211,40 @@ TEST( triangulation, fourPoses_distinct_Ks) {
EXPECT(assert_equal(landmark, *triangulated_3cameras_opt, 1e-2));
// 4. Test failure: Add a 4th camera facing the wrong way
Pose3 level_pose180 = Pose3(Rot3::ypr(M_PI / 2, 0., -M_PI / 2),
Point3(0, 0, 1));
Pose3 pose4 = Pose3(Rot3::ypr(M_PI / 2, 0., -M_PI / 2), Point3(0, 0, 1));
Cal3_S2 K4(700, 500, 0, 640, 480);
SimpleCamera camera_180(level_pose180, K4);
SimpleCamera camera4(pose4, K4);
CHECK_EXCEPTION(camera_180.project(landmark) ;, CheiralityException);
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
CHECK_EXCEPTION(camera4.project(landmark);, CheiralityException);
cameras += camera_180;
cameras += camera4;
measurements += Point2(400, 400);
CHECK_EXCEPTION(triangulatePoint3(cameras, measurements),
TriangulationCheiralityException);
#endif
}
/* ************************************************************************* */
//******************************************************************************
TEST( triangulation, twoIdenticalPoses) {
boost::shared_ptr<Cal3_S2> sharedCal = //
boost::make_shared<Cal3_S2>(1500, 1200, 0, 640, 480);
// create first camera. Looking along X-axis, 1 meter above ground plane (x-y)
Pose3 level_pose = Pose3(Rot3::ypr(-M_PI / 2, 0., -M_PI / 2),
gtsam::Point3(0, 0, 1));
SimpleCamera level_camera(level_pose, *sharedCal);
// landmark ~5 meters infront of camera
Point3 landmark(5, 0.5, 1.2);
SimpleCamera camera1(pose1, *sharedCal);
// 1. Project two landmarks into two cameras and triangulate
Point2 level_uv = level_camera.project(landmark);
Point2 z1 = camera1.project(landmark);
vector < Pose3 > poses;
vector<Pose3> poses;
vector<Point2> measurements;
poses += level_pose, level_pose;
measurements += level_uv, level_uv;
poses += pose1, pose1;
measurements += z1, z1;
CHECK_EXCEPTION(triangulatePoint3(poses, sharedCal, measurements),
TriangulationUnderconstrainedException);
}
/* ************************************************************************* *
//******************************************************************************
/*
TEST( triangulation, onePose) {
// we expect this test to fail with a TriangulationUnderconstrainedException
// because there's only one camera observation
@ -265,9 +262,32 @@ TEST( triangulation, twoIdenticalPoses) {
}
*/
/* ************************************************************************* */
//******************************************************************************
TEST( triangulation, TriangulationFactor ) {
// Create the factor with a measurement that is 3 pixels off in x
Key pointKey(1);
SharedNoiseModel model;
typedef TriangulationFactor<> Factor;
Factor factor(camera1, z1, model, pointKey, sharedCal);
// Use the factor to calculate the Jacobians
Matrix HActual;
factor.evaluateError(landmark, HActual);
// Matrix expectedH1 = numericalDerivative11<Pose3>(
// boost::bind(&EssentialMatrixConstraint::evaluateError, &factor, _1, pose2,
// boost::none, boost::none), pose1);
// The expected Jacobian
Matrix HExpected = numericalDerivative11<Point3>(
boost::bind(&Factor::evaluateError, &factor, _1, boost::none), landmark);
// Verify the Jacobians are correct
CHECK(assert_equal(HExpected, HActual, 1e-3));
}
//******************************************************************************
int main() {
TestResult tr;
return TestRegistry::runAllTests(tr);
}
/* ************************************************************************* */
//******************************************************************************

View File

@ -18,6 +18,9 @@
#include <gtsam_unstable/geometry/triangulation.h>
#include <gtsam/geometry/PinholeCamera.h>
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
namespace gtsam {
/**

View File

@ -18,19 +18,11 @@
#pragma once
#include <gtsam/geometry/Pose3.h>
#include <gtsam/geometry/Point2.h>
#include <gtsam/geometry/PinholeCamera.h>
#include <gtsam/inference/Symbol.h>
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
#include <gtsam/slam/ProjectionFactor.h>
#include <gtsam/slam/PriorFactor.h>
#include <gtsam_unstable/base/dllexport.h>
#include <boost/foreach.hpp>
#include <boost/assign.hpp>
#include <boost/assign/std/vector.hpp>
#include <gtsam_unstable/geometry/TriangulationFactor.h>
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
#include <gtsam/inference/Symbol.h>
#include <gtsam/slam/PriorFactor.h>
#include <vector>
@ -60,10 +52,9 @@ public:
* @param rank_tol SVD rank tolerance
* @return Triangulated Point3
*/
GTSAM_UNSTABLE_EXPORT Point3 triangulateDLT(const std::vector<Matrix>& projection_matrices, const std::vector<Point2>& measurements, double rank_tol);
// Frank says: putting priors on poses and then optimizing is a terrible idea: we turn a 3dof problem into a much more difficult problem
// We should have a projectionfactor that knows pose is fixed
GTSAM_UNSTABLE_EXPORT Point3 triangulateDLT(
const std::vector<Matrix>& projection_matrices,
const std::vector<Point2>& measurements, double rank_tol);
///
/**
@ -87,10 +78,9 @@ std::pair<NonlinearFactorGraph, Values> triangulationGraph(
static SharedNoiseModel prior_model(noiseModel::Isotropic::Sigma(6, 1e-6));
for (size_t i = 0; i < measurements.size(); i++) {
const Pose3& pose_i = poses[i];
graph.push_back(GenericProjectionFactor<Pose3, Point3, CALIBRATION> //
(measurements[i], unit2, i, landmarkKey, sharedCal));
graph.push_back(PriorFactor<Pose3>(i, pose_i, prior_model));
values.insert(i, pose_i);
PinholeCamera<CALIBRATION> camera_i(pose_i, *sharedCal);
graph.push_back(TriangulationFactor<CALIBRATION> //
(camera_i, measurements[i], unit2, landmarkKey));
}
return std::make_pair(graph, values);
}
@ -116,13 +106,8 @@ std::pair<NonlinearFactorGraph, Values> triangulationGraph(
static SharedNoiseModel prior_model(noiseModel::Isotropic::Sigma(6, 1e-6));
for (size_t i = 0; i < measurements.size(); i++) {
const PinholeCamera<CALIBRATION>& camera_i = cameras[i];
boost::shared_ptr<CALIBRATION> // Seems wasteful to create new object
sharedCal(new CALIBRATION(camera_i.calibration()));
graph.push_back(GenericProjectionFactor<Pose3, Point3, CALIBRATION> //
(measurements[i], unit2, i, landmarkKey, sharedCal));
const Pose3& pose_i = camera_i.pose();
graph.push_back(PriorFactor<Pose3>(i, pose_i, prior_model));
values.insert(i, pose_i);
graph.push_back(TriangulationFactor<CALIBRATION> //
(camera_i, measurements[i], unit2, landmarkKey));
}
return std::make_pair(graph, values);
}
@ -135,7 +120,8 @@ std::pair<NonlinearFactorGraph, Values> triangulationGraph(
* @param landmarkKey to refer to landmark
* @return refined Point3
*/
GTSAM_UNSTABLE_EXPORT Point3 optimize(const NonlinearFactorGraph& graph, const Values& values, Key landmarkKey);
GTSAM_UNSTABLE_EXPORT Point3 optimize(const NonlinearFactorGraph& graph,
const Values& values, Key landmarkKey);
/**
* Given an initial estimate , refine a point using measurements in several cameras
@ -221,7 +207,7 @@ Point3 triangulatePoint3(const std::vector<Pose3>& poses,
BOOST_FOREACH(const Pose3& pose, poses) {
const Point3& p_local = pose.transform_to(point);
if (p_local.z() <= 0)
throw(TriangulationCheiralityException());
throw(TriangulationCheiralityException());
}
#endif
@ -271,7 +257,7 @@ Point3 triangulatePoint3(
BOOST_FOREACH(const Camera& camera, cameras) {
const Point3& p_local = camera.pose().transform_to(point);
if (p_local.z() <= 0)
throw(TriangulationCheiralityException());
throw(TriangulationCheiralityException());
}
#endif

View File

@ -94,17 +94,17 @@ std::pair<Mechanization_bRn2, KalmanFilter::State> AHRS::initialize(double g_e)
Matrix P12 = -Omega_T * H_g * Pa;
Matrix P_plus_k2 = Matrix(9, 9);
P_plus_k2.block(0, 0, 3, 3) = P11;
P_plus_k2.block(0, 3, 3, 3) = Z3;
P_plus_k2.block(0, 6, 3, 3) = P12;
P_plus_k2.block<3,3>(0, 0) = P11;
P_plus_k2.block<3,3>(0, 3) = Z3;
P_plus_k2.block<3,3>(0, 6) = P12;
P_plus_k2.block(3, 0, 3, 3) = Z3;
P_plus_k2.block(3, 3, 3, 3) = Pg_;
P_plus_k2.block(3, 6, 3, 3) = Z3;
P_plus_k2.block<3,3>(3, 0) = Z3;
P_plus_k2.block<3,3>(3, 3) = Pg_;
P_plus_k2.block<3,3>(3, 6) = Z3;
P_plus_k2.block(6, 0, 3, 3) = trans(P12);
P_plus_k2.block(6, 3, 3, 3) = Z3;
P_plus_k2.block(6, 6, 3, 3) = Pa;
P_plus_k2.block<3,3>(6, 0) = trans(P12);
P_plus_k2.block<3,3>(6, 3) = Z3;
P_plus_k2.block<3,3>(6, 6) = Pa;
Vector dx = zero(9);
KalmanFilter::State state = KF_.init(dx, P_plus_k2);
@ -127,19 +127,20 @@ std::pair<Mechanization_bRn2, KalmanFilter::State> AHRS::integrate(
Matrix Z3 = zeros(3, 3);
Matrix F_k = zeros(9, 9);
F_k.block(0, 3, 3, 3) = -bRn;
F_k.block(3, 3, 3, 3) = F_g_;
F_k.block(6, 6, 3, 3) = F_a_;
F_k.block<3,3>(0, 3) = -bRn;
F_k.block<3,3>(3, 3) = F_g_;
F_k.block<3,3>(6, 6) = F_a_;
Matrix G_k = zeros(9, 12);
G_k.block(0, 0, 3, 3) = -bRn;
G_k.block(0, 6, 3, 3) = -bRn;
G_k.block(3, 3, 3, 3) = I3;
G_k.block(6, 9, 3, 3) = I3;
G_k.block<3,3>(0, 0) = -bRn;
G_k.block<3,3>(0, 6) = -bRn;
G_k.block<3,3>(3, 3) = I3;
G_k.block<3,3>(6, 9) = I3;
Matrix Q_k = G_k * var_w_ * trans(G_k);
Matrix Psi_k = eye(9) + dt * F_k; // +dt*dt*F_k*F_k/2; // transition matrix
// This implements update in section 10.6
Matrix B = zeros(9, 9);
Vector u2 = zero(9);
KalmanFilter::State newState = KF_.predictQ(state, Psi_k,B,u2,dt*Q_k);
@ -148,21 +149,21 @@ std::pair<Mechanization_bRn2, KalmanFilter::State> AHRS::integrate(
/* ************************************************************************* */
bool AHRS::isAidingAvailable(const Mechanization_bRn2& mech,
const gtsam::Vector& f, const gtsam::Vector& u, double ge) {
const gtsam::Vector& f, const gtsam::Vector& u, double ge) const {
// Subtract the biases
Vector f_ = f - mech.x_a();
Vector u_ = u - mech.x_g();
double mu_f = f_.norm() - ge;
double mu_u = u_.norm();
return (fabs(mu_f)<0.5 && mu_u < 5.0 / 180.0 * 3.1415926);
double mu_f = f_.norm() - ge; // accelerometer same magnitude as local gravity ?
double mu_u = u_.norm(); // gyro says we are not maneuvering ?
return (fabs(mu_f)<0.5 && mu_u < 5.0 / 180.0 * M_PI);
}
/* ************************************************************************* */
std::pair<Mechanization_bRn2, KalmanFilter::State> AHRS::aid(
const Mechanization_bRn2& mech, KalmanFilter::State state,
const Vector& f, bool Farrell) {
const Vector& f, bool Farrell) const {
Matrix bRn = mech.bRn().matrix();
@ -210,7 +211,7 @@ std::pair<Mechanization_bRn2, KalmanFilter::State> AHRS::aid(
std::pair<Mechanization_bRn2, KalmanFilter::State> AHRS::aidGeneral(
const Mechanization_bRn2& mech, KalmanFilter::State state,
const Vector& f, const Vector& f_previous,
const Rot3& R_previous) {
const Rot3& R_previous) const {
Matrix increment = R_previous.between(mech.bRn()).matrix();

View File

@ -52,15 +52,22 @@ public:
const Vector& u, double dt);
bool isAidingAvailable(const Mechanization_bRn2& mech,
const Vector& f, const Vector& u, double ge);
const Vector& f, const Vector& u, double ge) const;
/**
* Aid the AHRS with an accelerometer reading, typically called when isAidingAvailable == true
* @param mech current mechanization state
* @param state current Kalman filter state
* @param f accelerometer reading
* @param Farrell
*/
std::pair<Mechanization_bRn2, KalmanFilter::State> aid(
const Mechanization_bRn2& mech, KalmanFilter::State state,
const Vector& f, bool Farrell=0);
const Vector& f, bool Farrell=0) const;
std::pair<Mechanization_bRn2, KalmanFilter::State> aidGeneral(
const Mechanization_bRn2& mech, KalmanFilter::State state,
const Vector& f, const Vector& f_expected, const Rot3& R_previous);
const Vector& f, const Vector& f_expected, const Rot3& R_previous) const;
/// print
void print(const std::string& s = "") const;

View File

@ -34,14 +34,20 @@ public:
bRn_(initial_bRn), x_g_(initial_x_g), x_a_(initial_x_a) {
}
Vector b_g(double g_e) {
Vector n_g = (Vector(3) << 0.0, 0.0, g_e);
/// Copy constructor
Mechanization_bRn2(const Mechanization_bRn2& other) :
bRn_(other.bRn_), x_g_(other.x_g_), x_a_(other.x_a_) {
}
/// gravity in the body frame
Vector b_g(double g_e) const {
Vector n_g = (Vector(3) << 0, 0, g_e);
return (bRn_ * n_g).vector();
}
Rot3 bRn() const {return bRn_; }
Vector x_g() const { return x_g_; }
Vector x_a() const { return x_a_; }
const Rot3& bRn() const {return bRn_; }
const Vector& x_g() const { return x_g_; }
const Vector& x_a() const { return x_a_; }
/**
* Initialize the first Mechanization state

View File

@ -237,23 +237,105 @@ TEST(NonlinearOptimizer, NullFactor) {
TEST(NonlinearOptimizer, MoreOptimization) {
NonlinearFactorGraph fg;
fg += PriorFactor<Pose2>(0, Pose2(0,0,0), noiseModel::Isotropic::Sigma(3,1));
fg += BetweenFactor<Pose2>(0, 1, Pose2(1,0,M_PI/2), noiseModel::Isotropic::Sigma(3,1));
fg += BetweenFactor<Pose2>(1, 2, Pose2(1,0,M_PI/2), noiseModel::Isotropic::Sigma(3,1));
fg += PriorFactor<Pose2>(0, Pose2(0, 0, 0),
noiseModel::Isotropic::Sigma(3, 1));
fg += BetweenFactor<Pose2>(0, 1, Pose2(1, 0, M_PI / 2),
noiseModel::Isotropic::Sigma(3, 1));
fg += BetweenFactor<Pose2>(1, 2, Pose2(1, 0, M_PI / 2),
noiseModel::Isotropic::Sigma(3, 1));
Values init;
init.insert(0, Pose2(3,4,-M_PI));
init.insert(1, Pose2(10,2,-M_PI));
init.insert(2, Pose2(11,7,-M_PI));
init.insert(0, Pose2(3, 4, -M_PI));
init.insert(1, Pose2(10, 2, -M_PI));
init.insert(2, Pose2(11, 7, -M_PI));
Values expected;
expected.insert(0, Pose2(0,0,0));
expected.insert(1, Pose2(1,0,M_PI/2));
expected.insert(2, Pose2(1,1,M_PI));
expected.insert(0, Pose2(0, 0, 0));
expected.insert(1, Pose2(1, 0, M_PI / 2));
expected.insert(2, Pose2(1, 1, M_PI));
VectorValues expectedGradient;
expectedGradient.insert(0,zero(3));
expectedGradient.insert(1,zero(3));
expectedGradient.insert(2,zero(3));
// Try LM and Dogleg
EXPECT(assert_equal(expected, LevenbergMarquardtOptimizer(fg, init).optimize()));
LevenbergMarquardtParams params;
// params.setVerbosityLM("TRYDELTA");
// params.setVerbosity("TERMINATION");
params.setlambdaUpperBound(1e9);
// params.setRelativeErrorTol(0);
// params.setAbsoluteErrorTol(0);
//params.setlambdaInitial(10);
{
LevenbergMarquardtOptimizer optimizer(fg, init, params);
// test convergence
Values actual = optimizer.optimize();
EXPECT(assert_equal(expected, actual));
// Check that the gradient is zero
GaussianFactorGraph::shared_ptr linear = optimizer.linearize();
EXPECT(assert_equal(expectedGradient,linear->gradientAtZero()));
}
EXPECT(assert_equal(expected, DoglegOptimizer(fg, init).optimize()));
// cout << "===================================================================================" << endl;
// Try LM with diagonal damping
Values initBetter = init;
// initBetter.insert(0, Pose2(3,4,0));
// initBetter.insert(1, Pose2(10,2,M_PI/3));
// initBetter.insert(2, Pose2(11,7,M_PI/2));
{
// params.setDiagonalDamping(true);
// LevenbergMarquardtOptimizer optimizer(fg, initBetter, params);
//
// // test the diagonal
// GaussianFactorGraph::shared_ptr linear = optimizer.linearize();
// GaussianFactorGraph damped = optimizer.buildDampedSystem(*linear);
// VectorValues d = linear->hessianDiagonal(), //
// expectedDiagonal = d + params.lambdaInitial * d;
// EXPECT(assert_equal(expectedDiagonal, damped.hessianDiagonal()));
//
// // test convergence (does not!)
// Values actual = optimizer.optimize();
// EXPECT(assert_equal(expected, actual));
//
// // Check that the gradient is zero (it is not!)
// linear = optimizer.linearize();
// EXPECT(assert_equal(expectedGradient,linear->gradientAtZero()));
//
// // Check that the gradient is zero for damped system (it is not!)
// damped = optimizer.buildDampedSystem(*linear);
// VectorValues actualGradient = damped.gradientAtZero();
// EXPECT(assert_equal(expectedGradient,actualGradient));
//
// // Check errors at convergence and errors in direction of gradient (decreases!)
// EXPECT_DOUBLES_EQUAL(46.02558,fg.error(actual),1e-5);
// EXPECT_DOUBLES_EQUAL(44.742237,fg.error(actual.retract(-0.01*actualGradient)),1e-5);
//
// // Check that solve yields gradient (it's not equal to gradient, as predicted)
// VectorValues delta = damped.optimize();
// double factor = actualGradient[0][0]/delta[0][0];
// EXPECT(assert_equal(actualGradient,factor*delta));
//
// // Still pointing downhill wrt actual gradient !
// EXPECT_DOUBLES_EQUAL( 0.1056157,dot(-1*actualGradient,delta),1e-3);
//
// // delta.print("This is the delta value computed by LM with diagonal damping");
//
// // Still pointing downhill wrt expected gradient (IT DOES NOT! actually they are perpendicular)
// EXPECT_DOUBLES_EQUAL( 0.0,dot(-1*expectedGradient,delta),1e-5);
//
// // Check errors at convergence and errors in direction of solution (does not decrease!)
// EXPECT_DOUBLES_EQUAL(46.0254859,fg.error(actual.retract(delta)),1e-5);
//
// // Check errors at convergence and errors at a small step in direction of solution (does not decrease!)
// EXPECT_DOUBLES_EQUAL(46.0255,fg.error(actual.retract(0.01*delta)),1e-3);
}
}
/* ************************************************************************* */