Merge branch 'develop' into direct-hybrid-fg
commit
fbffd79366
|
|
@ -56,7 +56,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ jobs:
|
|||
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
|
||||
name: [
|
||||
macos-12-xcode-14.2,
|
||||
macos-14-xcode-15.4,
|
||||
]
|
||||
|
||||
build_type: [Debug, Release]
|
||||
|
|
@ -36,9 +37,14 @@ jobs:
|
|||
compiler: xcode
|
||||
version: "14.2"
|
||||
|
||||
- name: macos-14-xcode-15.4
|
||||
os: macos-14
|
||||
compiler: xcode
|
||||
version: "15.4"
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
|
|
|
|||
|
|
@ -32,6 +32,7 @@ jobs:
|
|||
ubuntu-20.04-gcc-9-tbb,
|
||||
ubuntu-20.04-clang-9,
|
||||
macos-12-xcode-14.2,
|
||||
macos-14-xcode-15.4,
|
||||
windows-2022-msbuild,
|
||||
]
|
||||
|
||||
|
|
@ -59,13 +60,18 @@ jobs:
|
|||
compiler: xcode
|
||||
version: "14.2"
|
||||
|
||||
- name: macos-14-xcode-15.4
|
||||
os: macos-14
|
||||
compiler: xcode
|
||||
version: "15.4"
|
||||
|
||||
- name: windows-2022-msbuild
|
||||
os: windows-2022
|
||||
platform: 64
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
|
|
@ -162,6 +168,14 @@ jobs:
|
|||
run: |
|
||||
bash .github/scripts/python.sh -d
|
||||
|
||||
- name: Create virtual on MacOS
|
||||
if: runner.os == 'macOS'
|
||||
run: |
|
||||
python$PYTHON_VERSION -m venv venv
|
||||
source venv/bin/activate
|
||||
echo "PATH=$(pwd)/venv/bin:$PATH" >> $GITHUB_ENV
|
||||
python -m pip install --upgrade pip
|
||||
|
||||
- name: Install Python Dependencies
|
||||
shell: bash
|
||||
run: python$PYTHON_VERSION -m pip install -r python/dev_requirements.txt
|
||||
|
|
|
|||
|
|
@ -83,7 +83,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup msbuild
|
||||
uses: ilammy/msvc-dev-cmd@v1
|
||||
|
|
|
|||
|
|
@ -1,4 +1,16 @@
|
|||
cmake_minimum_required(VERSION 3.0)
|
||||
cmake_minimum_required(VERSION 3.5)
|
||||
if (POLICY CMP0082)
|
||||
cmake_policy(SET CMP0082 NEW) # install from sub-directories immediately
|
||||
endif()
|
||||
if (POLICY CMP0102)
|
||||
cmake_policy(SET CMP0102 NEW) # set policy on advanced variables and cmake cache
|
||||
endif()
|
||||
if (POLICY CMP0156)
|
||||
cmake_policy(SET CMP0156 NEW) # new linker strategies
|
||||
endif()
|
||||
if (POLICY CMP0167)
|
||||
cmake_policy(SET CMP0167 OLD) # Don't complain about boost
|
||||
endif()
|
||||
|
||||
# Set the version number for the library
|
||||
set (GTSAM_VERSION_MAJOR 4)
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ file(GLOB cppunitlite_src "*.cpp")
|
|||
add_library(CppUnitLite STATIC ${cppunitlite_src} ${cppunitlite_headers})
|
||||
list(APPEND GTSAM_EXPORTED_TARGETS CppUnitLite)
|
||||
set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE)
|
||||
target_compile_features(CppUnitLite PUBLIC ${GTSAM_COMPILE_FEATURES_PUBLIC})
|
||||
|
||||
gtsam_assign_source_folders("${cppunitlite_headers};${cppunitlite_src}") # MSVC project structure
|
||||
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ endif()
|
|||
set(BOOST_FIND_MINIMUM_VERSION 1.65)
|
||||
set(BOOST_FIND_MINIMUM_COMPONENTS serialization system filesystem thread program_options date_time timer chrono regex)
|
||||
|
||||
find_package(Boost ${BOOST_FIND_MINIMUM_VERSION} COMPONENTS ${BOOST_FIND_MINIMUM_COMPONENTS})
|
||||
find_package(Boost ${BOOST_FIND_MINIMUM_VERSION} COMPONENTS ${BOOST_FIND_MINIMUM_COMPONENTS} REQUIRED)
|
||||
|
||||
# Required components
|
||||
if(NOT Boost_SERIALIZATION_LIBRARY OR NOT Boost_SYSTEM_LIBRARY OR NOT Boost_FILESYSTEM_LIBRARY OR
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# This file shows how to build and link a user project against GTSAM using CMake
|
||||
###################################################################################
|
||||
# To create your own project, replace "example" with the actual name of your project
|
||||
cmake_minimum_required(VERSION 3.0)
|
||||
cmake_minimum_required(VERSION 3.5)
|
||||
project(example CXX)
|
||||
|
||||
# Find GTSAM, either from a local build, or from a Debian/Ubuntu package.
|
||||
|
|
|
|||
|
|
@ -270,11 +270,9 @@ struct sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
|
|||
}
|
||||
|
||||
|
||||
Index count = 0;
|
||||
// FIXME compute a reference value to filter zeros
|
||||
for (typename AmbiVector<Scalar,StorageIndex>::Iterator it(tempVector/*,1e-12*/); it; ++it)
|
||||
{
|
||||
++ count;
|
||||
// std::cerr << "fill " << it.index() << ", " << col << "\n";
|
||||
// std::cout << it.value() << " ";
|
||||
// FIXME use insertBack
|
||||
|
|
|
|||
|
|
@ -75,8 +75,6 @@ void SparseLUImpl<Scalar,StorageIndex>::heap_relax_snode (const Index n, IndexVe
|
|||
// Identify the relaxed supernodes by postorder traversal of the etree
|
||||
Index snode_start; // beginning of a snode
|
||||
StorageIndex k;
|
||||
Index nsuper_et_post = 0; // Number of relaxed snodes in postordered etree
|
||||
Index nsuper_et = 0; // Number of relaxed snodes in the original etree
|
||||
StorageIndex l;
|
||||
for (j = 0; j < n; )
|
||||
{
|
||||
|
|
@ -88,7 +86,6 @@ void SparseLUImpl<Scalar,StorageIndex>::heap_relax_snode (const Index n, IndexVe
|
|||
parent = et(j);
|
||||
}
|
||||
// Found a supernode in postordered etree, j is the last column
|
||||
++nsuper_et_post;
|
||||
k = StorageIndex(n);
|
||||
for (Index i = snode_start; i <= j; ++i)
|
||||
k = (std::min)(k, inv_post(i));
|
||||
|
|
@ -97,7 +94,6 @@ void SparseLUImpl<Scalar,StorageIndex>::heap_relax_snode (const Index n, IndexVe
|
|||
{
|
||||
// This is also a supernode in the original etree
|
||||
relax_end(k) = l; // Record last column
|
||||
++nsuper_et;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
@ -107,7 +103,6 @@ void SparseLUImpl<Scalar,StorageIndex>::heap_relax_snode (const Index n, IndexVe
|
|||
if (descendants(i) == 0)
|
||||
{
|
||||
relax_end(l) = l;
|
||||
++nsuper_et;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -131,7 +131,7 @@ that structure.
|
|||
/************************************************************************/
|
||||
pdbf *gk_readpdbfile(char *fname) { /* {{{ */
|
||||
int i=0, res=0;
|
||||
char linetype[6];
|
||||
char linetype[7];
|
||||
int aserial;
|
||||
char aname[5] = " \0";
|
||||
char altLoc = ' ';
|
||||
|
|
|
|||
|
|
@ -292,7 +292,7 @@ TEST(DiscreteConditional, choose) {
|
|||
/* ************************************************************************* */
|
||||
// Check argmax on P(C|D) and P(D), plus tie-breaking for P(B)
|
||||
TEST(DiscreteConditional, Argmax) {
|
||||
DiscreteKey B(2, 2), C(2, 2), D(4, 2);
|
||||
DiscreteKey C(2, 2), D(4, 2);
|
||||
DiscreteConditional B_prior(D, "1/1");
|
||||
DiscreteConditional D_prior(D, "1/3");
|
||||
DiscreteConditional C_given_D((C | D) = "1/4 1/1");
|
||||
|
|
|
|||
|
|
@ -68,7 +68,7 @@ namespace gtsam {
|
|||
return fromAngle(theta * degree);
|
||||
}
|
||||
|
||||
/// Named constructor from cos(theta),sin(theta) pair, will *not* normalize!
|
||||
/// Named constructor from cos(theta),sin(theta) pair
|
||||
static Rot2 fromCosSin(double c, double s);
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -1207,6 +1207,31 @@ TEST(Pose3, Print) {
|
|||
EXPECT(assert_print_equal(expected, pose));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(Pose3, ExpmapChainRule) {
|
||||
// Muliply with an arbitrary matrix and exponentiate
|
||||
Matrix6 M;
|
||||
M << 1, 2, 3, 4, 5, 6, //
|
||||
7, 8, 9, 1, 2, 3, //
|
||||
4, 5, 6, 7, 8, 9, //
|
||||
1, 2, 3, 4, 5, 6, //
|
||||
7, 8, 9, 1, 2, 3, //
|
||||
4, 5, 6, 7, 8, 9;
|
||||
auto g = [&](const Vector6& omega) {
|
||||
return Pose3::Expmap(M*omega);
|
||||
};
|
||||
|
||||
// Test the derivatives at zero
|
||||
const Matrix6 expected = numericalDerivative11<Pose3, Vector6>(g, Z_6x1);
|
||||
EXPECT(assert_equal<Matrix6>(expected, M, 1e-5)); // Pose3::ExpmapDerivative(Z_6x1) is identity
|
||||
|
||||
// Test the derivatives at another value
|
||||
const Vector6 delta{0.1, 0.2, 0.3, 0.4, 0.5, 0.6};
|
||||
const Matrix6 expected2 = numericalDerivative11<Pose3, Vector6>(g, delta);
|
||||
const Matrix6 analytic = Pose3::ExpmapDerivative(M*delta) * M;
|
||||
EXPECT(assert_equal<Matrix6>(expected2, analytic, 1e-5)); // note tolerance
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
|
|
|
|||
|
|
@ -956,6 +956,46 @@ TEST(Rot3, determinant) {
|
|||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(Rot3, ExpmapChainRule) {
|
||||
// Multiply with an arbitrary matrix and exponentiate
|
||||
Matrix3 M;
|
||||
M << 1, 2, 3, 4, 5, 6, 7, 8, 9;
|
||||
auto g = [&](const Vector3& omega) {
|
||||
return Rot3::Expmap(M*omega);
|
||||
};
|
||||
|
||||
// Test the derivatives at zero
|
||||
const Matrix3 expected = numericalDerivative11<Rot3, Vector3>(g, Z_3x1);
|
||||
EXPECT(assert_equal<Matrix3>(expected, M, 1e-5)); // SO3::ExpmapDerivative(Z_3x1) is identity
|
||||
|
||||
// Test the derivatives at another value
|
||||
const Vector3 delta{0.1,0.2,0.3};
|
||||
const Matrix3 expected2 = numericalDerivative11<Rot3, Vector3>(g, delta);
|
||||
EXPECT(assert_equal<Matrix3>(expected2, SO3::ExpmapDerivative(M*delta) * M, 1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(Rot3, expmapChainRule) {
|
||||
// Multiply an arbitrary rotation with exp(M*x)
|
||||
// Perhaps counter-intuitively, this has the same derivatives as above
|
||||
Matrix3 M;
|
||||
M << 1, 2, 3, 4, 5, 6, 7, 8, 9;
|
||||
const Rot3 R = Rot3::Expmap({1, 2, 3});
|
||||
auto g = [&](const Vector3& omega) {
|
||||
return R.expmap(M*omega);
|
||||
};
|
||||
|
||||
// Test the derivatives at zero
|
||||
const Matrix3 expected = numericalDerivative11<Rot3, Vector3>(g, Z_3x1);
|
||||
EXPECT(assert_equal<Matrix3>(expected, M, 1e-5));
|
||||
|
||||
// Test the derivatives at another value
|
||||
const Vector3 delta{0.1,0.2,0.3};
|
||||
const Matrix3 expected2 = numericalDerivative11<Rot3, Vector3>(g, delta);
|
||||
EXPECT(assert_equal<Matrix3>(expected2, SO3::ExpmapDerivative(M*delta) * M, 1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
|
|
|
|||
|
|
@ -87,7 +87,22 @@ GaussianFactorGraphTree GaussianMixture::add(
|
|||
|
||||
/* *******************************************************************************/
|
||||
GaussianFactorGraphTree GaussianMixture::asGaussianFactorGraphTree() const {
|
||||
auto wrap = [](const GaussianConditional::shared_ptr &gc) {
|
||||
auto wrap = [this](const GaussianConditional::shared_ptr &gc) {
|
||||
// First check if conditional has not been pruned
|
||||
if (gc) {
|
||||
const double Cgm_Kgcm =
|
||||
this->logConstant_ - gc->logNormalizationConstant();
|
||||
// If there is a difference in the covariances, we need to account for
|
||||
// that since the error is dependent on the mode.
|
||||
if (Cgm_Kgcm > 0.0) {
|
||||
// We add a constant factor which will be used when computing
|
||||
// the probability of the discrete variables.
|
||||
Vector c(1);
|
||||
c << std::sqrt(2.0 * Cgm_Kgcm);
|
||||
auto constantFactor = std::make_shared<JacobianFactor>(c);
|
||||
return GaussianFactorGraph{gc, constantFactor};
|
||||
}
|
||||
}
|
||||
return GaussianFactorGraph{gc};
|
||||
};
|
||||
return {conditionals_, wrap};
|
||||
|
|
@ -318,19 +333,28 @@ AlgebraicDecisionTree<Key> GaussianMixture::logProbability(
|
|||
return DecisionTree<Key, double>(conditionals_, probFunc);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
double GaussianMixture::conditionalError(
|
||||
const GaussianConditional::shared_ptr &conditional,
|
||||
const VectorValues &continuousValues) const {
|
||||
// Check if valid pointer
|
||||
if (conditional) {
|
||||
return conditional->error(continuousValues) + //
|
||||
logConstant_ - conditional->logNormalizationConstant();
|
||||
} else {
|
||||
// If not valid, pointer, it means this conditional was pruned,
|
||||
// so we return maximum error.
|
||||
// This way the negative exponential will give
|
||||
// a probability value close to 0.0.
|
||||
return std::numeric_limits<double>::max();
|
||||
}
|
||||
}
|
||||
|
||||
/* *******************************************************************************/
|
||||
AlgebraicDecisionTree<Key> GaussianMixture::errorTree(
|
||||
const VectorValues &continuousValues) const {
|
||||
auto errorFunc = [&](const GaussianConditional::shared_ptr &conditional) {
|
||||
// Check if valid pointer
|
||||
if (conditional) {
|
||||
return conditional->error(continuousValues) + //
|
||||
logConstant_ - conditional->logNormalizationConstant();
|
||||
} else {
|
||||
// If not valid, pointer, it means this conditional was pruned,
|
||||
// so we return maximum error.
|
||||
return std::numeric_limits<double>::max();
|
||||
}
|
||||
return conditionalError(conditional, continuousValues);
|
||||
};
|
||||
DecisionTree<Key, double> error_tree(conditionals_, errorFunc);
|
||||
return error_tree;
|
||||
|
|
@ -338,33 +362,9 @@ AlgebraicDecisionTree<Key> GaussianMixture::errorTree(
|
|||
|
||||
/* *******************************************************************************/
|
||||
double GaussianMixture::error(const HybridValues &values) const {
|
||||
// Check if discrete keys in discrete assignment are
|
||||
// present in the GaussianMixture
|
||||
KeyVector dKeys = this->discreteKeys_.indices();
|
||||
bool valid_assignment = false;
|
||||
for (auto &&kv : values.discrete()) {
|
||||
if (std::find(dKeys.begin(), dKeys.end(), kv.first) != dKeys.end()) {
|
||||
valid_assignment = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// The discrete assignment is not valid so we throw an error.
|
||||
if (!valid_assignment) {
|
||||
throw std::runtime_error(
|
||||
"Invalid discrete values in values. Not all discrete keys specified.");
|
||||
}
|
||||
|
||||
// Directly index to get the conditional, no need to build the whole tree.
|
||||
auto conditional = conditionals_(values.discrete());
|
||||
if (conditional) {
|
||||
return conditional->error(values.continuous()) + //
|
||||
logConstant_ - conditional->logNormalizationConstant();
|
||||
} else {
|
||||
// If not valid, pointer, it means this conditional was pruned,
|
||||
// so we return maximum error.
|
||||
return std::numeric_limits<double>::max();
|
||||
}
|
||||
return conditionalError(conditional, values.continuous());
|
||||
}
|
||||
|
||||
/* *******************************************************************************/
|
||||
|
|
|
|||
|
|
@ -256,6 +256,10 @@ class GTSAM_EXPORT GaussianMixture
|
|||
/// Check whether `given` has values for all frontal keys.
|
||||
bool allFrontalsGiven(const VectorValues &given) const;
|
||||
|
||||
/// Helper method to compute the error of a conditional.
|
||||
double conditionalError(const GaussianConditional::shared_ptr &conditional,
|
||||
const VectorValues &continuousValues) const;
|
||||
|
||||
#ifdef GTSAM_ENABLE_BOOST_SERIALIZATION
|
||||
/** Serialization function */
|
||||
friend class boost::serialization::access;
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ class GTSAM_EXPORT HybridConditional
|
|||
public Conditional<HybridFactor, HybridConditional> {
|
||||
public:
|
||||
// typedefs needed to play nice with gtsam
|
||||
typedef HybridConditional This; ///< Typedef to this class
|
||||
typedef HybridConditional This; ///< Typedef to this class
|
||||
typedef std::shared_ptr<This> shared_ptr; ///< shared_ptr to this class
|
||||
typedef HybridFactor BaseFactor; ///< Typedef to our factor base class
|
||||
typedef Conditional<BaseFactor, This>
|
||||
|
|
@ -185,7 +185,7 @@ class GTSAM_EXPORT HybridConditional
|
|||
* Return the log normalization constant.
|
||||
* Note this is 0.0 for discrete and hybrid conditionals, but depends
|
||||
* on the continuous parameters for Gaussian conditionals.
|
||||
*/
|
||||
*/
|
||||
double logNormalizationConstant() const override;
|
||||
|
||||
/// Return the probability (or density) of the underlying conditional.
|
||||
|
|
|
|||
|
|
@ -49,15 +49,6 @@ KeySet HybridFactorGraph::discreteKeySet() const {
|
|||
return keys;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
std::unordered_map<Key, DiscreteKey> HybridFactorGraph::discreteKeyMap() const {
|
||||
std::unordered_map<Key, DiscreteKey> result;
|
||||
for (const DiscreteKey& k : discreteKeys()) {
|
||||
result[k.first] = k;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
const KeySet HybridFactorGraph::continuousKeySet() const {
|
||||
KeySet keys;
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ using SharedFactor = std::shared_ptr<Factor>;
|
|||
class GTSAM_EXPORT HybridFactorGraph : public FactorGraph<Factor> {
|
||||
public:
|
||||
using Base = FactorGraph<Factor>;
|
||||
using This = HybridFactorGraph; ///< this class
|
||||
using This = HybridFactorGraph; ///< this class
|
||||
using shared_ptr = std::shared_ptr<This>; ///< shared_ptr to This
|
||||
|
||||
using Values = gtsam::Values; ///< backwards compatibility
|
||||
|
|
@ -66,12 +66,9 @@ class GTSAM_EXPORT HybridFactorGraph : public FactorGraph<Factor> {
|
|||
/// Get all the discrete keys in the factor graph.
|
||||
std::set<DiscreteKey> discreteKeys() const;
|
||||
|
||||
/// Get all the discrete keys in the factor graph, as a set.
|
||||
/// Get all the discrete keys in the factor graph, as a set of Keys.
|
||||
KeySet discreteKeySet() const;
|
||||
|
||||
/// Get a map from Key to corresponding DiscreteKey.
|
||||
std::unordered_map<Key, DiscreteKey> discreteKeyMap() const;
|
||||
|
||||
/// Get all the continuous keys in the factor graph.
|
||||
const KeySet continuousKeySet() const;
|
||||
|
||||
|
|
|
|||
|
|
@ -97,29 +97,27 @@ void HybridGaussianFactorGraph::printErrors(
|
|||
std::cout << "nullptr"
|
||||
<< "\n";
|
||||
} else {
|
||||
factor->print(ss.str(), keyFormatter);
|
||||
std::cout << "error = ";
|
||||
gmf->errorTree(values.continuous()).print("", keyFormatter);
|
||||
std::cout << std::endl;
|
||||
gmf->operator()(values.discrete())->print(ss.str(), keyFormatter);
|
||||
std::cout << "error = " << gmf->error(values) << std::endl;
|
||||
}
|
||||
} else if (auto hc = std::dynamic_pointer_cast<HybridConditional>(factor)) {
|
||||
if (factor == nullptr) {
|
||||
std::cout << "nullptr"
|
||||
<< "\n";
|
||||
} else {
|
||||
factor->print(ss.str(), keyFormatter);
|
||||
|
||||
if (hc->isContinuous()) {
|
||||
factor->print(ss.str(), keyFormatter);
|
||||
std::cout << "error = " << hc->asGaussian()->error(values) << "\n";
|
||||
} else if (hc->isDiscrete()) {
|
||||
std::cout << "error = ";
|
||||
hc->asDiscrete()->errorTree().print("", keyFormatter);
|
||||
std::cout << "\n";
|
||||
factor->print(ss.str(), keyFormatter);
|
||||
std::cout << "error = " << hc->asDiscrete()->error(values.discrete())
|
||||
<< "\n";
|
||||
} else {
|
||||
// Is hybrid
|
||||
std::cout << "error = ";
|
||||
hc->asMixture()->errorTree(values.continuous()).print();
|
||||
std::cout << "\n";
|
||||
auto mixtureComponent =
|
||||
hc->asMixture()->operator()(values.discrete());
|
||||
mixtureComponent->print(ss.str(), keyFormatter);
|
||||
std::cout << "error = " << mixtureComponent->error(values) << "\n";
|
||||
}
|
||||
}
|
||||
} else if (auto gf = std::dynamic_pointer_cast<GaussianFactor>(factor)) {
|
||||
|
|
@ -140,8 +138,7 @@ void HybridGaussianFactorGraph::printErrors(
|
|||
<< "\n";
|
||||
} else {
|
||||
factor->print(ss.str(), keyFormatter);
|
||||
std::cout << "error = ";
|
||||
df->errorTree().print("", keyFormatter);
|
||||
std::cout << "error = " << df->error(values.discrete()) << std::endl;
|
||||
}
|
||||
|
||||
} else {
|
||||
|
|
@ -233,6 +230,25 @@ continuousElimination(const HybridGaussianFactorGraph &factors,
|
|||
return {std::make_shared<HybridConditional>(result.first), result.second};
|
||||
}
|
||||
|
||||
/* ************************************************************************ */
|
||||
/**
|
||||
* @brief Exponentiate log-values, not necessarily normalized, normalize, and
|
||||
* return as AlgebraicDecisionTree<Key>.
|
||||
*
|
||||
* @param logValues DecisionTree of (unnormalized) log values.
|
||||
* @return AlgebraicDecisionTree<Key>
|
||||
*/
|
||||
static AlgebraicDecisionTree<Key> probabilitiesFromLogValues(
|
||||
const AlgebraicDecisionTree<Key> &logValues) {
|
||||
// Perform normalization
|
||||
double max_log = logValues.max();
|
||||
AlgebraicDecisionTree<Key> probabilities = DecisionTree<Key, double>(
|
||||
logValues, [&max_log](const double x) { return exp(x - max_log); });
|
||||
probabilities = probabilities.normalize(probabilities.sum());
|
||||
|
||||
return probabilities;
|
||||
}
|
||||
|
||||
/* ************************************************************************ */
|
||||
static std::pair<HybridConditional::shared_ptr, std::shared_ptr<Factor>>
|
||||
discreteElimination(const HybridGaussianFactorGraph &factors,
|
||||
|
|
@ -245,14 +261,18 @@ discreteElimination(const HybridGaussianFactorGraph &factors,
|
|||
} else if (auto gmf = dynamic_pointer_cast<GaussianMixtureFactor>(f)) {
|
||||
// Case where we have a GaussianMixtureFactor with no continuous keys.
|
||||
// In this case, compute discrete probabilities.
|
||||
auto probability =
|
||||
auto logProbability =
|
||||
[&](const GaussianFactor::shared_ptr &factor) -> double {
|
||||
if (!factor) return 0.0;
|
||||
return exp(-factor->error(VectorValues()));
|
||||
return -factor->error(VectorValues());
|
||||
};
|
||||
dfg.emplace_shared<DecisionTreeFactor>(
|
||||
gmf->discreteKeys(),
|
||||
DecisionTree<Key, double>(gmf->factors(), probability));
|
||||
AlgebraicDecisionTree<Key> logProbabilities =
|
||||
DecisionTree<Key, double>(gmf->factors(), logProbability);
|
||||
|
||||
AlgebraicDecisionTree<Key> probabilities =
|
||||
probabilitiesFromLogValues(logProbabilities);
|
||||
dfg.emplace_shared<DecisionTreeFactor>(gmf->discreteKeys(),
|
||||
probabilities);
|
||||
|
||||
} else if (auto orphan = dynamic_pointer_cast<OrphanWrapper>(f)) {
|
||||
// Ignore orphaned clique.
|
||||
|
|
@ -292,8 +312,8 @@ using Result = std::pair<std::shared_ptr<GaussianConditional>,
|
|||
GaussianMixtureFactor::sharedFactor>;
|
||||
|
||||
/**
|
||||
* Compute the probability q(μ;m) = exp(-error(μ;m)) * sqrt(det(2π Σ_m)
|
||||
* from the residual error at the mean μ.
|
||||
* Compute the probability p(μ;m) = exp(-error(μ;m)) * sqrt(det(2π Σ_m)
|
||||
* from the residual error ||b||^2 at the mean μ.
|
||||
* The residual error contains no keys, and only
|
||||
* depends on the discrete separator if present.
|
||||
*/
|
||||
|
|
@ -310,18 +330,13 @@ static std::shared_ptr<Factor> createDiscreteFactor(
|
|||
// exp(-factor->error(kEmpty)) / conditional->normalizationConstant();
|
||||
// We take negative of the logNormalizationConstant `log(1/k)`
|
||||
// to get `log(k)`.
|
||||
return -factor->error(kEmpty) + (-conditional->logNormalizationConstant());
|
||||
return -factor->error(kEmpty) - conditional->logNormalizationConstant();
|
||||
};
|
||||
|
||||
AlgebraicDecisionTree<Key> logProbabilities(
|
||||
DecisionTree<Key, double>(eliminationResults, logProbability));
|
||||
|
||||
// Perform normalization
|
||||
double max_log = logProbabilities.max();
|
||||
AlgebraicDecisionTree probabilities = DecisionTree<Key, double>(
|
||||
logProbabilities,
|
||||
[&max_log](const double x) { return exp(x - max_log); });
|
||||
probabilities = probabilities.normalize(probabilities.sum());
|
||||
AlgebraicDecisionTree<Key> probabilities =
|
||||
probabilitiesFromLogValues(logProbabilities);
|
||||
|
||||
return std::make_shared<DecisionTreeFactor>(discreteSeparator, probabilities);
|
||||
}
|
||||
|
|
@ -508,18 +523,9 @@ EliminateHybrid(const HybridGaussianFactorGraph &factors,
|
|||
std::inserter(continuousSeparator, continuousSeparator.begin()));
|
||||
|
||||
// Similarly for the discrete separator.
|
||||
KeySet discreteSeparatorSet;
|
||||
std::set<DiscreteKey> discreteSeparator;
|
||||
auto discreteKeySet = factors.discreteKeySet();
|
||||
std::set_difference(
|
||||
discreteKeySet.begin(), discreteKeySet.end(), frontalKeysSet.begin(),
|
||||
frontalKeysSet.end(),
|
||||
std::inserter(discreteSeparatorSet, discreteSeparatorSet.begin()));
|
||||
// Convert from set of keys to set of DiscreteKeys
|
||||
auto discreteKeyMap = factors.discreteKeyMap();
|
||||
for (auto key : discreteSeparatorSet) {
|
||||
discreteSeparator.insert(discreteKeyMap.at(key));
|
||||
}
|
||||
// Since we eliminate all continuous variables first,
|
||||
// the discrete separator will be *all* the discrete keys.
|
||||
std::set<DiscreteKey> discreteSeparator = factors.discreteKeys();
|
||||
|
||||
return hybridElimination(factors, frontalKeys, continuousSeparator,
|
||||
discreteSeparator);
|
||||
|
|
@ -532,10 +538,15 @@ AlgebraicDecisionTree<Key> HybridGaussianFactorGraph::errorTree(
|
|||
AlgebraicDecisionTree<Key> error_tree(0.0);
|
||||
|
||||
// Iterate over each factor.
|
||||
for (auto &f : factors_) {
|
||||
for (auto &factor : factors_) {
|
||||
// TODO(dellaert): just use a virtual method defined in HybridFactor.
|
||||
AlgebraicDecisionTree<Key> factor_error;
|
||||
|
||||
auto f = factor;
|
||||
if (auto hc = dynamic_pointer_cast<HybridConditional>(factor)) {
|
||||
f = hc->inner();
|
||||
}
|
||||
|
||||
if (auto gaussianMixture = dynamic_pointer_cast<GaussianMixtureFactor>(f)) {
|
||||
// Compute factor error and add it.
|
||||
error_tree = error_tree + gaussianMixture->errorTree(continuousValues);
|
||||
|
|
|
|||
|
|
@ -144,6 +144,14 @@ class GTSAM_EXPORT HybridGaussianFactorGraph
|
|||
// const std::string& s = "HybridGaussianFactorGraph",
|
||||
// const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override;
|
||||
|
||||
/**
|
||||
* @brief Print the errors of each factor in the hybrid factor graph.
|
||||
*
|
||||
* @param values The HybridValues for the variables used to compute the error.
|
||||
* @param str String that is output before the factor graph and errors.
|
||||
* @param keyFormatter Formatter function for the keys in the factors.
|
||||
* @param printCondition A condition to check if a factor should be printed.
|
||||
*/
|
||||
void printErrors(
|
||||
const HybridValues& values,
|
||||
const std::string& str = "HybridGaussianFactorGraph: ",
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@
|
|||
using namespace std;
|
||||
using namespace gtsam;
|
||||
using noiseModel::Isotropic;
|
||||
using symbol_shorthand::F;
|
||||
using symbol_shorthand::M;
|
||||
using symbol_shorthand::X;
|
||||
using symbol_shorthand::Z;
|
||||
|
|
@ -200,67 +201,29 @@ TEST(GaussianMixtureFactor, Error) {
|
|||
4.0, mixtureFactor.error({continuousValues, discreteValues}), 1e-9);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* Test a simple Gaussian Mixture Model represented as P(m)P(z|m)
|
||||
* where m is a discrete variable and z is a continuous variable.
|
||||
* m is binary and depending on m, we have 2 different means
|
||||
* μ1 and μ2 for the Gaussian distribution around which we sample z.
|
||||
*
|
||||
* The resulting factor graph should eliminate to a Bayes net
|
||||
* which represents a sigmoid function.
|
||||
*/
|
||||
TEST(GaussianMixtureFactor, GaussianMixtureModel) {
|
||||
double mu0 = 1.0, mu1 = 3.0;
|
||||
double sigma = 2.0;
|
||||
auto model = noiseModel::Isotropic::Sigma(1, sigma);
|
||||
namespace test_gmm {
|
||||
|
||||
/**
|
||||
* Function to compute P(m=1|z). For P(m=0|z), swap mus and sigmas.
|
||||
* If sigma0 == sigma1, it simplifies to a sigmoid function.
|
||||
*
|
||||
* Follows equation 7.108 since it is more generic.
|
||||
*/
|
||||
double prob_m_z(double mu0, double mu1, double sigma0, double sigma1,
|
||||
double z) {
|
||||
double x1 = ((z - mu0) / sigma0), x2 = ((z - mu1) / sigma1);
|
||||
double d = sigma1 / sigma0;
|
||||
double e = d * std::exp(-0.5 * (x1 * x1 - x2 * x2));
|
||||
return 1 / (1 + e);
|
||||
};
|
||||
|
||||
static HybridBayesNet GetGaussianMixtureModel(double mu0, double mu1,
|
||||
double sigma0, double sigma1) {
|
||||
DiscreteKey m(M(0), 2);
|
||||
Key z = Z(0);
|
||||
|
||||
auto c0 = make_shared<GaussianConditional>(z, Vector1(mu0), I_1x1, model),
|
||||
c1 = make_shared<GaussianConditional>(z, Vector1(mu1), I_1x1, model);
|
||||
|
||||
auto gm = new GaussianMixture({z}, {}, {m}, {c0, c1});
|
||||
auto mixing = new DiscreteConditional(m, "0.5/0.5");
|
||||
|
||||
HybridBayesNet hbn;
|
||||
hbn.emplace_back(gm);
|
||||
hbn.emplace_back(mixing);
|
||||
|
||||
// The result should be a sigmoid.
|
||||
// So should be m = 0.5 at z=3.0 - 1.0=2.0
|
||||
VectorValues given;
|
||||
given.insert(z, Vector1(mu1 - mu0));
|
||||
|
||||
HybridGaussianFactorGraph gfg = hbn.toFactorGraph(given);
|
||||
HybridBayesNet::shared_ptr bn = gfg.eliminateSequential();
|
||||
|
||||
HybridBayesNet expected;
|
||||
expected.emplace_back(new DiscreteConditional(m, "0.5/0.5"));
|
||||
|
||||
EXPECT(assert_equal(expected, *bn));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* Test a simple Gaussian Mixture Model represented as P(m)P(z|m)
|
||||
* where m is a discrete variable and z is a continuous variable.
|
||||
* m is binary and depending on m, we have 2 different means
|
||||
* and covariances each for the
|
||||
* Gaussian distribution around which we sample z.
|
||||
*
|
||||
* The resulting factor graph should eliminate to a Bayes net
|
||||
* which represents a sigmoid function leaning towards
|
||||
* the tighter covariance Gaussian.
|
||||
*/
|
||||
TEST(GaussianMixtureFactor, GaussianMixtureModel2) {
|
||||
double mu0 = 1.0, mu1 = 3.0;
|
||||
auto model0 = noiseModel::Isotropic::Sigma(1, 8.0);
|
||||
auto model1 = noiseModel::Isotropic::Sigma(1, 4.0);
|
||||
|
||||
DiscreteKey m(M(0), 2);
|
||||
Key z = Z(0);
|
||||
auto model0 = noiseModel::Isotropic::Sigma(1, sigma0);
|
||||
auto model1 = noiseModel::Isotropic::Sigma(1, sigma1);
|
||||
|
||||
auto c0 = make_shared<GaussianConditional>(z, Vector1(mu0), I_1x1, model0),
|
||||
c1 = make_shared<GaussianConditional>(z, Vector1(mu1), I_1x1, model1);
|
||||
|
|
@ -272,126 +235,283 @@ TEST(GaussianMixtureFactor, GaussianMixtureModel2) {
|
|||
hbn.emplace_back(gm);
|
||||
hbn.emplace_back(mixing);
|
||||
|
||||
// The result should be a sigmoid leaning towards model1
|
||||
// since it has the tighter covariance.
|
||||
// So should be m = 0.34/0.66 at z=3.0 - 1.0=2.0
|
||||
VectorValues given;
|
||||
given.insert(z, Vector1(mu1 - mu0));
|
||||
HybridGaussianFactorGraph gfg = hbn.toFactorGraph(given);
|
||||
HybridBayesNet::shared_ptr bn = gfg.eliminateSequential();
|
||||
return hbn;
|
||||
}
|
||||
|
||||
HybridBayesNet expected;
|
||||
expected.emplace_back(
|
||||
new DiscreteConditional(m, "0.338561851224/0.661438148776"));
|
||||
} // namespace test_gmm
|
||||
|
||||
EXPECT(assert_equal(expected, *bn));
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* Test a simple Gaussian Mixture Model represented as P(m)P(z|m)
|
||||
* where m is a discrete variable and z is a continuous variable.
|
||||
* m is binary and depending on m, we have 2 different means
|
||||
* μ1 and μ2 for the Gaussian distribution around which we sample z.
|
||||
*
|
||||
* The resulting factor graph should eliminate to a Bayes net
|
||||
* which represents a sigmoid function.
|
||||
*/
|
||||
TEST(GaussianMixtureFactor, GaussianMixtureModel) {
|
||||
using namespace test_gmm;
|
||||
|
||||
double mu0 = 1.0, mu1 = 3.0;
|
||||
double sigma = 2.0;
|
||||
|
||||
DiscreteKey m(M(0), 2);
|
||||
Key z = Z(0);
|
||||
|
||||
auto hbn = GetGaussianMixtureModel(mu0, mu1, sigma, sigma);
|
||||
|
||||
// The result should be a sigmoid.
|
||||
// So should be P(m=1|z) = 0.5 at z=3.0 - 1.0=2.0
|
||||
double midway = mu1 - mu0, lambda = 4;
|
||||
{
|
||||
VectorValues given;
|
||||
given.insert(z, Vector1(midway));
|
||||
|
||||
HybridGaussianFactorGraph gfg = hbn.toFactorGraph(given);
|
||||
HybridBayesNet::shared_ptr bn = gfg.eliminateSequential();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(
|
||||
prob_m_z(mu0, mu1, sigma, sigma, midway),
|
||||
bn->at(0)->asDiscrete()->operator()(DiscreteValues{{m.first, 1}}),
|
||||
1e-8);
|
||||
|
||||
// At the halfway point between the means, we should get P(m|z)=0.5
|
||||
HybridBayesNet expected;
|
||||
expected.emplace_back(new DiscreteConditional(m, "0.5/0.5"));
|
||||
|
||||
EXPECT(assert_equal(expected, *bn));
|
||||
}
|
||||
{
|
||||
// Shift by -lambda
|
||||
VectorValues given;
|
||||
given.insert(z, Vector1(midway - lambda));
|
||||
|
||||
HybridGaussianFactorGraph gfg = hbn.toFactorGraph(given);
|
||||
HybridBayesNet::shared_ptr bn = gfg.eliminateSequential();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(
|
||||
prob_m_z(mu0, mu1, sigma, sigma, midway - lambda),
|
||||
bn->at(0)->asDiscrete()->operator()(DiscreteValues{{m.first, 1}}),
|
||||
1e-8);
|
||||
}
|
||||
{
|
||||
// Shift by lambda
|
||||
VectorValues given;
|
||||
given.insert(z, Vector1(midway + lambda));
|
||||
|
||||
HybridGaussianFactorGraph gfg = hbn.toFactorGraph(given);
|
||||
HybridBayesNet::shared_ptr bn = gfg.eliminateSequential();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(
|
||||
prob_m_z(mu0, mu1, sigma, sigma, midway + lambda),
|
||||
bn->at(0)->asDiscrete()->operator()(DiscreteValues{{m.first, 1}}),
|
||||
1e-8);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* Test a model P(x0)P(z0|x0)p(x1|m1)p(z1|x1)p(m1).
|
||||
* Test a simple Gaussian Mixture Model represented as P(m)P(z|m)
|
||||
* where m is a discrete variable and z is a continuous variable.
|
||||
* m is binary and depending on m, we have 2 different means
|
||||
* and covariances each for the
|
||||
* Gaussian distribution around which we sample z.
|
||||
*
|
||||
* p(x1|m1) has different means and same covariance.
|
||||
* The resulting factor graph should eliminate to a Bayes net
|
||||
* which represents a Gaussian-like function
|
||||
* where m1>m0 close to 3.1333.
|
||||
*/
|
||||
TEST(GaussianMixtureFactor, GaussianMixtureModel2) {
|
||||
using namespace test_gmm;
|
||||
|
||||
double mu0 = 1.0, mu1 = 3.0;
|
||||
double sigma0 = 8.0, sigma1 = 4.0;
|
||||
|
||||
DiscreteKey m(M(0), 2);
|
||||
Key z = Z(0);
|
||||
|
||||
auto hbn = GetGaussianMixtureModel(mu0, mu1, sigma0, sigma1);
|
||||
|
||||
double m1_high = 3.133, lambda = 4;
|
||||
{
|
||||
// The result should be a bell curve like function
|
||||
// with m1 > m0 close to 3.1333.
|
||||
// We get 3.1333 by finding the maximum value of the function.
|
||||
VectorValues given;
|
||||
given.insert(z, Vector1(3.133));
|
||||
|
||||
HybridGaussianFactorGraph gfg = hbn.toFactorGraph(given);
|
||||
HybridBayesNet::shared_ptr bn = gfg.eliminateSequential();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(
|
||||
prob_m_z(mu0, mu1, sigma0, sigma1, m1_high),
|
||||
bn->at(0)->asDiscrete()->operator()(DiscreteValues{{M(0), 1}}), 1e-8);
|
||||
|
||||
// At the halfway point between the means
|
||||
HybridBayesNet expected;
|
||||
expected.emplace_back(new DiscreteConditional(
|
||||
m, {},
|
||||
vector<double>{prob_m_z(mu1, mu0, sigma1, sigma0, m1_high),
|
||||
prob_m_z(mu0, mu1, sigma0, sigma1, m1_high)}));
|
||||
|
||||
EXPECT(assert_equal(expected, *bn));
|
||||
}
|
||||
{
|
||||
// Shift by -lambda
|
||||
VectorValues given;
|
||||
given.insert(z, Vector1(m1_high - lambda));
|
||||
|
||||
HybridGaussianFactorGraph gfg = hbn.toFactorGraph(given);
|
||||
HybridBayesNet::shared_ptr bn = gfg.eliminateSequential();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(
|
||||
prob_m_z(mu0, mu1, sigma0, sigma1, m1_high - lambda),
|
||||
bn->at(0)->asDiscrete()->operator()(DiscreteValues{{m.first, 1}}),
|
||||
1e-8);
|
||||
}
|
||||
{
|
||||
// Shift by lambda
|
||||
VectorValues given;
|
||||
given.insert(z, Vector1(m1_high + lambda));
|
||||
|
||||
HybridGaussianFactorGraph gfg = hbn.toFactorGraph(given);
|
||||
HybridBayesNet::shared_ptr bn = gfg.eliminateSequential();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(
|
||||
prob_m_z(mu0, mu1, sigma0, sigma1, m1_high + lambda),
|
||||
bn->at(0)->asDiscrete()->operator()(DiscreteValues{{m.first, 1}}),
|
||||
1e-8);
|
||||
}
|
||||
}
|
||||
|
||||
namespace test_two_state_estimation {
|
||||
|
||||
/// Create Two State Bayes Network with measurements
|
||||
static HybridBayesNet CreateBayesNet(double mu0, double mu1, double sigma0,
|
||||
double sigma1,
|
||||
bool add_second_measurement = false,
|
||||
double prior_sigma = 1e-3,
|
||||
double measurement_sigma = 3.0) {
|
||||
DiscreteKey m1(M(1), 2);
|
||||
Key z0 = Z(0), z1 = Z(1);
|
||||
Key x0 = X(0), x1 = X(1);
|
||||
|
||||
HybridBayesNet hbn;
|
||||
|
||||
auto measurement_model = noiseModel::Isotropic::Sigma(1, measurement_sigma);
|
||||
// Add measurement P(z0 | x0)
|
||||
auto p_z0 = new GaussianConditional(z0, Vector1(0.0), -I_1x1, x0, I_1x1,
|
||||
measurement_model);
|
||||
hbn.emplace_back(p_z0);
|
||||
|
||||
// Add hybrid motion model
|
||||
auto model0 = noiseModel::Isotropic::Sigma(1, sigma0);
|
||||
auto model1 = noiseModel::Isotropic::Sigma(1, sigma1);
|
||||
auto c0 = make_shared<GaussianConditional>(x1, Vector1(mu0), I_1x1, x0,
|
||||
-I_1x1, model0),
|
||||
c1 = make_shared<GaussianConditional>(x1, Vector1(mu1), I_1x1, x0,
|
||||
-I_1x1, model1);
|
||||
|
||||
auto motion = new GaussianMixture({x1}, {x0}, {m1}, {c0, c1});
|
||||
hbn.emplace_back(motion);
|
||||
|
||||
if (add_second_measurement) {
|
||||
// Add second measurement
|
||||
auto p_z1 = new GaussianConditional(z1, Vector1(0.0), -I_1x1, x1, I_1x1,
|
||||
measurement_model);
|
||||
hbn.emplace_back(p_z1);
|
||||
}
|
||||
|
||||
// Discrete uniform prior.
|
||||
auto p_m1 = new DiscreteConditional(m1, "0.5/0.5");
|
||||
hbn.emplace_back(p_m1);
|
||||
|
||||
return hbn;
|
||||
}
|
||||
|
||||
} // namespace test_two_state_estimation
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* Test a model P(z0|x0)P(x1|x0,m1)P(z1|x1)P(m1).
|
||||
*
|
||||
* P(f01|x1,x0,m1) has different means and same covariance.
|
||||
*
|
||||
* Converting to a factor graph gives us
|
||||
* P(x0)ϕ(x0)P(x1|m1)ϕ(x1)P(m1)
|
||||
* ϕ(x0)ϕ(x1,x0,m1)ϕ(x1)P(m1)
|
||||
*
|
||||
* If we only have a measurement on z0, then
|
||||
* the probability of x1 should be 0.5/0.5.
|
||||
* the probability of m1 should be 0.5/0.5.
|
||||
* Getting a measurement on z1 gives use more information.
|
||||
*/
|
||||
TEST(GaussianMixtureFactor, TwoStateModel) {
|
||||
using namespace test_two_state_estimation;
|
||||
|
||||
double mu0 = 1.0, mu1 = 3.0;
|
||||
auto model = noiseModel::Isotropic::Sigma(1, 2.0);
|
||||
double sigma = 2.0;
|
||||
|
||||
DiscreteKey m1(M(1), 2);
|
||||
Key z0 = Z(0), z1 = Z(1), x0 = X(0), x1 = X(1);
|
||||
Key z0 = Z(0), z1 = Z(1);
|
||||
|
||||
auto c0 = make_shared<GaussianConditional>(x1, Vector1(mu0), I_1x1, model),
|
||||
c1 = make_shared<GaussianConditional>(x1, Vector1(mu1), I_1x1, model);
|
||||
|
||||
auto p_x0 = new GaussianConditional(x0, Vector1(0.0), I_1x1,
|
||||
noiseModel::Isotropic::Sigma(1, 1.0));
|
||||
auto p_z0x0 = new GaussianConditional(z0, Vector1(0.0), I_1x1, x0, -I_1x1,
|
||||
noiseModel::Isotropic::Sigma(1, 1.0));
|
||||
auto p_x1m1 = new GaussianMixture({x1}, {}, {m1}, {c0, c1});
|
||||
auto p_z1x1 = new GaussianConditional(z1, Vector1(0.0), I_1x1, x1, -I_1x1,
|
||||
noiseModel::Isotropic::Sigma(1, 3.0));
|
||||
auto p_m1 = new DiscreteConditional(m1, "0.5/0.5");
|
||||
|
||||
HybridBayesNet hbn;
|
||||
hbn.emplace_back(p_x0);
|
||||
hbn.emplace_back(p_z0x0);
|
||||
hbn.emplace_back(p_x1m1);
|
||||
hbn.emplace_back(p_m1);
|
||||
// Start with no measurement on x1, only on x0
|
||||
HybridBayesNet hbn = CreateBayesNet(mu0, mu1, sigma, sigma, false);
|
||||
|
||||
VectorValues given;
|
||||
given.insert(z0, Vector1(0.5));
|
||||
|
||||
{
|
||||
// Start with no measurement on x1, only on x0
|
||||
HybridGaussianFactorGraph gfg = hbn.toFactorGraph(given);
|
||||
HybridBayesNet::shared_ptr bn = gfg.eliminateSequential();
|
||||
|
||||
// Since no measurement on x1, we hedge our bets
|
||||
DiscreteConditional expected(m1, "0.5/0.5");
|
||||
|
||||
EXPECT(assert_equal(expected, *(bn->at(2)->asDiscrete())));
|
||||
}
|
||||
|
||||
{
|
||||
// Now we add a measurement z1 on x1
|
||||
hbn.emplace_back(p_z1x1);
|
||||
hbn = CreateBayesNet(mu0, mu1, sigma, sigma, true);
|
||||
|
||||
given.insert(z1, Vector1(2.2));
|
||||
// If we see z1=2.6 (> 2.5 which is the halfway point),
|
||||
// discrete mode should say m1=1
|
||||
given.insert(z1, Vector1(2.6));
|
||||
HybridGaussianFactorGraph gfg = hbn.toFactorGraph(given);
|
||||
HybridBayesNet::shared_ptr bn = gfg.eliminateSequential();
|
||||
|
||||
// Since we have a measurement on z2, we get a definite result
|
||||
DiscreteConditional expected(m1, "0.4923083/0.5076917");
|
||||
|
||||
DiscreteConditional expected(m1, "0.49772729/0.50227271");
|
||||
// regression
|
||||
EXPECT(assert_equal(expected, *(bn->at(2)->asDiscrete()), 1e-6));
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* Test a model P(x0)P(z0|x0)p(x1|m1)p(z1|x1)p(m1).
|
||||
* Test a model P(z0|x0)P(x1|x0,m1)P(z1|x1)P(m1).
|
||||
*
|
||||
* p(x1|m1) has different means and different covariances.
|
||||
* P(f01|x1,x0,m1) has different means and different covariances.
|
||||
*
|
||||
* Converting to a factor graph gives us
|
||||
* P(x0)ϕ(x0)P(x1|m1)ϕ(x1)P(m1)
|
||||
* ϕ(x0)ϕ(x1,x0,m1)ϕ(x1)P(m1)
|
||||
*
|
||||
* If we only have a measurement on z0, then
|
||||
* the probability of x1 should be the ratio of covariances.
|
||||
* the P(m1) should be 0.5/0.5.
|
||||
* Getting a measurement on z1 gives use more information.
|
||||
*/
|
||||
TEST(GaussianMixtureFactor, TwoStateModel2) {
|
||||
using namespace test_two_state_estimation;
|
||||
|
||||
double mu0 = 1.0, mu1 = 3.0;
|
||||
auto model0 = noiseModel::Isotropic::Sigma(1, 6.0);
|
||||
auto model1 = noiseModel::Isotropic::Sigma(1, 4.0);
|
||||
double sigma0 = 6.0, sigma1 = 4.0;
|
||||
auto model0 = noiseModel::Isotropic::Sigma(1, sigma0);
|
||||
auto model1 = noiseModel::Isotropic::Sigma(1, sigma1);
|
||||
|
||||
DiscreteKey m1(M(1), 2);
|
||||
Key z0 = Z(0), z1 = Z(1), x0 = X(0), x1 = X(1);
|
||||
Key z0 = Z(0), z1 = Z(1);
|
||||
|
||||
auto c0 = make_shared<GaussianConditional>(x1, Vector1(mu0), I_1x1, model0),
|
||||
c1 = make_shared<GaussianConditional>(x1, Vector1(mu1), I_1x1, model1);
|
||||
|
||||
auto p_x0 = new GaussianConditional(x0, Vector1(0.0), I_1x1,
|
||||
noiseModel::Isotropic::Sigma(1, 1.0));
|
||||
auto p_z0x0 = new GaussianConditional(z0, Vector1(0.0), I_1x1, x0, -I_1x1,
|
||||
noiseModel::Isotropic::Sigma(1, 1.0));
|
||||
auto p_x1m1 = new GaussianMixture({x1}, {}, {m1}, {c0, c1});
|
||||
auto p_z1x1 = new GaussianConditional(z1, Vector1(0.0), I_1x1, x1, -I_1x1,
|
||||
noiseModel::Isotropic::Sigma(1, 3.0));
|
||||
auto p_m1 = new DiscreteConditional(m1, "0.5/0.5");
|
||||
|
||||
HybridBayesNet hbn;
|
||||
hbn.emplace_back(p_x0);
|
||||
hbn.emplace_back(p_z0x0);
|
||||
hbn.emplace_back(p_x1m1);
|
||||
hbn.emplace_back(p_m1);
|
||||
// Start with no measurement on x1, only on x0
|
||||
HybridBayesNet hbn = CreateBayesNet(mu0, mu1, sigma0, sigma1, false);
|
||||
|
||||
VectorValues given;
|
||||
given.insert(z0, Vector1(0.5));
|
||||
|
|
@ -399,25 +519,67 @@ TEST(GaussianMixtureFactor, TwoStateModel2) {
|
|||
{
|
||||
// Start with no measurement on x1, only on x0
|
||||
HybridGaussianFactorGraph gfg = hbn.toFactorGraph(given);
|
||||
|
||||
{
|
||||
VectorValues vv{
|
||||
{X(0), Vector1(0.0)}, {X(1), Vector1(1.0)}, {Z(0), Vector1(0.5)}};
|
||||
HybridValues hv0(vv, DiscreteValues{{M(1), 0}}),
|
||||
hv1(vv, DiscreteValues{{M(1), 1}});
|
||||
EXPECT_DOUBLES_EQUAL(gfg.error(hv0) / hbn.error(hv0),
|
||||
gfg.error(hv1) / hbn.error(hv1), 1e-9);
|
||||
}
|
||||
{
|
||||
VectorValues vv{
|
||||
{X(0), Vector1(0.5)}, {X(1), Vector1(3.0)}, {Z(0), Vector1(0.5)}};
|
||||
HybridValues hv0(vv, DiscreteValues{{M(1), 0}}),
|
||||
hv1(vv, DiscreteValues{{M(1), 1}});
|
||||
EXPECT_DOUBLES_EQUAL(gfg.error(hv0) / hbn.error(hv0),
|
||||
gfg.error(hv1) / hbn.error(hv1), 1e-9);
|
||||
}
|
||||
|
||||
HybridBayesNet::shared_ptr bn = gfg.eliminateSequential();
|
||||
|
||||
// Since no measurement on x1, we get the ratio of covariances.
|
||||
DiscreteConditional expected(m1, "0.6/0.4");
|
||||
|
||||
EXPECT(assert_equal(expected, *(bn->at(2)->asDiscrete())));
|
||||
// Since no measurement on x1, we a 50/50 probability
|
||||
auto p_m = bn->at(2)->asDiscrete();
|
||||
EXPECT_DOUBLES_EQUAL(0.5, p_m->operator()(DiscreteValues{{m1.first, 0}}),
|
||||
1e-9);
|
||||
EXPECT_DOUBLES_EQUAL(0.5, p_m->operator()(DiscreteValues{{m1.first, 1}}),
|
||||
1e-9);
|
||||
}
|
||||
|
||||
{
|
||||
// Now we add a measurement z1 on x1
|
||||
hbn.emplace_back(p_z1x1);
|
||||
hbn = CreateBayesNet(mu0, mu1, sigma0, sigma1, true);
|
||||
|
||||
given.insert(z1, Vector1(2.2));
|
||||
HybridGaussianFactorGraph gfg = hbn.toFactorGraph(given);
|
||||
|
||||
{
|
||||
VectorValues vv{{X(0), Vector1(0.0)},
|
||||
{X(1), Vector1(1.0)},
|
||||
{Z(0), Vector1(0.5)},
|
||||
{Z(1), Vector1(2.2)}};
|
||||
HybridValues hv0(vv, DiscreteValues{{M(1), 0}}),
|
||||
hv1(vv, DiscreteValues{{M(1), 1}});
|
||||
EXPECT_DOUBLES_EQUAL(gfg.error(hv0) / hbn.error(hv0),
|
||||
gfg.error(hv1) / hbn.error(hv1), 1e-9);
|
||||
}
|
||||
{
|
||||
VectorValues vv{{X(0), Vector1(0.5)},
|
||||
{X(1), Vector1(3.0)},
|
||||
{Z(0), Vector1(0.5)},
|
||||
{Z(1), Vector1(2.2)}};
|
||||
HybridValues hv0(vv, DiscreteValues{{M(1), 0}}),
|
||||
hv1(vv, DiscreteValues{{M(1), 1}});
|
||||
EXPECT_DOUBLES_EQUAL(gfg.error(hv0) / hbn.error(hv0),
|
||||
gfg.error(hv1) / hbn.error(hv1), 1e-9);
|
||||
}
|
||||
|
||||
HybridBayesNet::shared_ptr bn = gfg.eliminateSequential();
|
||||
|
||||
// Since we have a measurement on z2, we get a definite result
|
||||
DiscreteConditional expected(m1, "0.52706646/0.47293354");
|
||||
|
||||
DiscreteConditional expected(m1, "0.44744586/0.55255414");
|
||||
// regression
|
||||
EXPECT(assert_equal(expected, *(bn->at(2)->asDiscrete()), 1e-6));
|
||||
}
|
||||
}
|
||||
|
|
@ -761,4 +923,4 @@ int main() {
|
|||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -598,6 +598,57 @@ TEST(HybridGaussianFactorGraph, ErrorAndProbPrimeTree) {
|
|||
EXPECT(assert_equal(expected_probs, probs, 1e-7));
|
||||
}
|
||||
|
||||
/* ****************************************************************************/
|
||||
// Test hybrid gaussian factor graph errorTree when there is a HybridConditional in the graph
|
||||
TEST(HybridGaussianFactorGraph, ErrorTreeWithConditional) {
|
||||
using symbol_shorthand::F;
|
||||
|
||||
DiscreteKey m1(M(1), 2);
|
||||
Key z0 = Z(0), f01 = F(0);
|
||||
Key x0 = X(0), x1 = X(1);
|
||||
|
||||
HybridBayesNet hbn;
|
||||
|
||||
auto prior_model = noiseModel::Isotropic::Sigma(1, 1e-1);
|
||||
auto measurement_model = noiseModel::Isotropic::Sigma(1, 2.0);
|
||||
|
||||
// Set a prior P(x0) at x0=0
|
||||
hbn.emplace_back(
|
||||
new GaussianConditional(x0, Vector1(0.0), I_1x1, prior_model));
|
||||
|
||||
// Add measurement P(z0 | x0)
|
||||
hbn.emplace_back(new GaussianConditional(z0, Vector1(0.0), -I_1x1, x0, I_1x1,
|
||||
measurement_model));
|
||||
|
||||
// Add hybrid motion model
|
||||
double mu = 0.0;
|
||||
double sigma0 = 1e2, sigma1 = 1e-2;
|
||||
auto model0 = noiseModel::Isotropic::Sigma(1, sigma0);
|
||||
auto model1 = noiseModel::Isotropic::Sigma(1, sigma1);
|
||||
auto c0 = make_shared<GaussianConditional>(f01, Vector1(mu), I_1x1, x1, I_1x1,
|
||||
x0, -I_1x1, model0),
|
||||
c1 = make_shared<GaussianConditional>(f01, Vector1(mu), I_1x1, x1, I_1x1,
|
||||
x0, -I_1x1, model1);
|
||||
hbn.emplace_back(new GaussianMixture({f01}, {x0, x1}, {m1}, {c0, c1}));
|
||||
|
||||
// Discrete uniform prior.
|
||||
hbn.emplace_back(new DiscreteConditional(m1, "0.5/0.5"));
|
||||
|
||||
VectorValues given;
|
||||
given.insert(z0, Vector1(0.0));
|
||||
given.insert(f01, Vector1(0.0));
|
||||
auto gfg = hbn.toFactorGraph(given);
|
||||
|
||||
VectorValues vv;
|
||||
vv.insert(x0, Vector1(1.0));
|
||||
vv.insert(x1, Vector1(2.0));
|
||||
AlgebraicDecisionTree<Key> errorTree = gfg.errorTree(vv);
|
||||
|
||||
// regression
|
||||
AlgebraicDecisionTree<Key> expected(m1, 59.335390372, 5050.125);
|
||||
EXPECT(assert_equal(expected, errorTree, 1e-9));
|
||||
}
|
||||
|
||||
/* ****************************************************************************/
|
||||
// Check that assembleGraphTree assembles Gaussian factor graphs for each
|
||||
// assignment.
|
||||
|
|
|
|||
|
|
@ -118,156 +118,6 @@ TEST(MixtureFactor, Dim) {
|
|||
EXPECT_LONGS_EQUAL(1, mixtureFactor.dim());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Test components with differing means
|
||||
TEST(MixtureFactor, DifferentMeans) {
|
||||
DiscreteKey m1(M(1), 2), m2(M(2), 2);
|
||||
|
||||
Values values;
|
||||
double x1 = 0.0, x2 = 1.75, x3 = 2.60;
|
||||
values.insert(X(1), x1);
|
||||
values.insert(X(2), x2);
|
||||
values.insert(X(3), x3);
|
||||
|
||||
auto model0 = noiseModel::Isotropic::Sigma(1, 1e-0);
|
||||
auto model1 = noiseModel::Isotropic::Sigma(1, 1e-0);
|
||||
auto prior_noise = noiseModel::Isotropic::Sigma(1, 1e-0);
|
||||
|
||||
auto f0 = std::make_shared<BetweenFactor<double>>(X(1), X(2), 0.0, model0);
|
||||
auto f1 = std::make_shared<BetweenFactor<double>>(X(1), X(2), 2.0, model1);
|
||||
std::vector<NonlinearFactor::shared_ptr> factors{f0, f1};
|
||||
|
||||
MixtureFactor mixtureFactor({X(1), X(2)}, {m1}, factors);
|
||||
HybridNonlinearFactorGraph hnfg;
|
||||
hnfg.push_back(mixtureFactor);
|
||||
|
||||
f0 = std::make_shared<BetweenFactor<double>>(X(2), X(3), 0.0, model0);
|
||||
f1 = std::make_shared<BetweenFactor<double>>(X(2), X(3), 2.0, model1);
|
||||
std::vector<NonlinearFactor::shared_ptr> factors23{f0, f1};
|
||||
hnfg.push_back(MixtureFactor({X(2), X(3)}, {m2}, factors23));
|
||||
|
||||
auto prior = PriorFactor<double>(X(1), x1, prior_noise);
|
||||
hnfg.push_back(prior);
|
||||
|
||||
hnfg.emplace_shared<PriorFactor<double>>(X(2), 2.0, prior_noise);
|
||||
|
||||
auto hgfg = hnfg.linearize(values);
|
||||
auto bn = hgfg->eliminateSequential();
|
||||
HybridValues actual = bn->optimize();
|
||||
|
||||
HybridValues expected(
|
||||
VectorValues{
|
||||
{X(1), Vector1(0.0)}, {X(2), Vector1(0.25)}, {X(3), Vector1(-0.6)}},
|
||||
DiscreteValues{{M(1), 1}, {M(2), 0}});
|
||||
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
|
||||
{
|
||||
DiscreteValues dv{{M(1), 0}, {M(2), 0}};
|
||||
VectorValues cont = bn->optimize(dv);
|
||||
double error = bn->error(HybridValues(cont, dv));
|
||||
// regression
|
||||
EXPECT_DOUBLES_EQUAL(1.77418393408, error, 1e-9);
|
||||
}
|
||||
{
|
||||
DiscreteValues dv{{M(1), 0}, {M(2), 1}};
|
||||
VectorValues cont = bn->optimize(dv);
|
||||
double error = bn->error(HybridValues(cont, dv));
|
||||
// regression
|
||||
EXPECT_DOUBLES_EQUAL(1.77418393408, error, 1e-9);
|
||||
}
|
||||
{
|
||||
DiscreteValues dv{{M(1), 1}, {M(2), 0}};
|
||||
VectorValues cont = bn->optimize(dv);
|
||||
double error = bn->error(HybridValues(cont, dv));
|
||||
// regression
|
||||
EXPECT_DOUBLES_EQUAL(1.10751726741, error, 1e-9);
|
||||
}
|
||||
{
|
||||
DiscreteValues dv{{M(1), 1}, {M(2), 1}};
|
||||
VectorValues cont = bn->optimize(dv);
|
||||
double error = bn->error(HybridValues(cont, dv));
|
||||
// regression
|
||||
EXPECT_DOUBLES_EQUAL(1.10751726741, error, 1e-9);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Test components with differing covariances
|
||||
TEST(MixtureFactor, DifferentCovariances) {
|
||||
DiscreteKey m1(M(1), 2);
|
||||
|
||||
Values values;
|
||||
double x1 = 1.0, x2 = 1.0;
|
||||
values.insert(X(1), x1);
|
||||
values.insert(X(2), x2);
|
||||
|
||||
double between = 0.0;
|
||||
|
||||
auto model0 = noiseModel::Isotropic::Sigma(1, 1e2);
|
||||
auto model1 = noiseModel::Isotropic::Sigma(1, 1e-2);
|
||||
auto prior_noise = noiseModel::Isotropic::Sigma(1, 1e-3);
|
||||
|
||||
auto f0 =
|
||||
std::make_shared<BetweenFactor<double>>(X(1), X(2), between, model0);
|
||||
auto f1 =
|
||||
std::make_shared<BetweenFactor<double>>(X(1), X(2), between, model1);
|
||||
std::vector<NonlinearFactor::shared_ptr> factors{f0, f1};
|
||||
|
||||
// Create via toFactorGraph
|
||||
using symbol_shorthand::Z;
|
||||
Matrix H0_1, H0_2, H1_1, H1_2;
|
||||
Vector d0 = f0->evaluateError(x1, x2, &H0_1, &H0_2);
|
||||
std::vector<std::pair<Key, Matrix>> terms0 = {{Z(1), gtsam::I_1x1 /*Rx*/},
|
||||
//
|
||||
{X(1), H0_1 /*Sp1*/},
|
||||
{X(2), H0_2 /*Tp2*/}};
|
||||
|
||||
Vector d1 = f1->evaluateError(x1, x2, &H1_1, &H1_2);
|
||||
std::vector<std::pair<Key, Matrix>> terms1 = {{Z(1), gtsam::I_1x1 /*Rx*/},
|
||||
//
|
||||
{X(1), H1_1 /*Sp1*/},
|
||||
{X(2), H1_2 /*Tp2*/}};
|
||||
auto gm = new gtsam::GaussianMixture(
|
||||
{Z(1)}, {X(1), X(2)}, {m1},
|
||||
{std::make_shared<GaussianConditional>(terms0, 1, -d0, model0),
|
||||
std::make_shared<GaussianConditional>(terms1, 1, -d1, model1)});
|
||||
gtsam::HybridBayesNet bn;
|
||||
bn.emplace_back(gm);
|
||||
|
||||
gtsam::VectorValues measurements;
|
||||
measurements.insert(Z(1), gtsam::Z_1x1);
|
||||
// Create FG with single GaussianMixtureFactor
|
||||
HybridGaussianFactorGraph mixture_fg = bn.toFactorGraph(measurements);
|
||||
|
||||
// Linearized prior factor on X1
|
||||
auto prior = PriorFactor<double>(X(1), x1, prior_noise).linearize(values);
|
||||
mixture_fg.push_back(prior);
|
||||
|
||||
auto hbn = mixture_fg.eliminateSequential();
|
||||
|
||||
VectorValues cv;
|
||||
cv.insert(X(1), Vector1(0.0));
|
||||
cv.insert(X(2), Vector1(0.0));
|
||||
// P(m1) = [0.5, 0.5], so we should pick 0
|
||||
DiscreteValues dv;
|
||||
dv.insert({M(1), 0});
|
||||
HybridValues expected_values(cv, dv);
|
||||
|
||||
HybridValues actual_values = hbn->optimize();
|
||||
EXPECT(assert_equal(expected_values, actual_values));
|
||||
|
||||
// Check that we get different error values at the MLE point μ.
|
||||
AlgebraicDecisionTree<Key> errorTree = hbn->errorTree(cv);
|
||||
|
||||
HybridValues hv0(cv, DiscreteValues{{M(1), 0}});
|
||||
HybridValues hv1(cv, DiscreteValues{{M(1), 1}});
|
||||
|
||||
AlgebraicDecisionTree<Key> expectedErrorTree(m1, 9.90348755254,
|
||||
0.69314718056);
|
||||
EXPECT(assert_equal(expectedErrorTree, errorTree));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
|
|
|
|||
|
|
@ -46,6 +46,12 @@ void IterativeOptimizationParameters::print(ostream &os) const {
|
|||
<< verbosityTranslator(verbosity_) << endl;
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
bool IterativeOptimizationParameters::equals(
|
||||
const IterativeOptimizationParameters &other, double tol) const {
|
||||
return verbosity_ == other.verbosity();
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
ostream& operator<<(ostream &os, const IterativeOptimizationParameters &p) {
|
||||
p.print(os);
|
||||
|
|
|
|||
|
|
@ -41,15 +41,14 @@ class VectorValues;
|
|||
* parameters for iterative linear solvers
|
||||
*/
|
||||
class IterativeOptimizationParameters {
|
||||
|
||||
public:
|
||||
|
||||
public:
|
||||
typedef std::shared_ptr<IterativeOptimizationParameters> shared_ptr;
|
||||
enum Verbosity {
|
||||
SILENT = 0, COMPLEXITY, ERROR
|
||||
} verbosity_;
|
||||
enum Verbosity { SILENT = 0, COMPLEXITY, ERROR };
|
||||
|
||||
public:
|
||||
protected:
|
||||
Verbosity verbosity_;
|
||||
|
||||
public:
|
||||
|
||||
IterativeOptimizationParameters(Verbosity v = SILENT) :
|
||||
verbosity_(v) {
|
||||
|
|
@ -71,6 +70,9 @@ public:
|
|||
/* virtual print function */
|
||||
GTSAM_EXPORT virtual void print(std::ostream &os) const;
|
||||
|
||||
GTSAM_EXPORT virtual bool equals(const IterativeOptimizationParameters &other,
|
||||
double tol = 1e-9) const;
|
||||
|
||||
/* for serialization */
|
||||
GTSAM_EXPORT friend std::ostream &operator<<(
|
||||
std::ostream &os, const IterativeOptimizationParameters &p);
|
||||
|
|
|
|||
|
|
@ -375,7 +375,8 @@ virtual class JacobianFactor : gtsam::GaussianFactor {
|
|||
void serialize() const;
|
||||
};
|
||||
|
||||
pair<gtsam::GaussianConditional, gtsam::JacobianFactor*> EliminateQR(const gtsam::GaussianFactorGraph& factors, const gtsam::Ordering& keys);
|
||||
pair<gtsam::GaussianConditional*, gtsam::JacobianFactor*> EliminateQR(
|
||||
const gtsam::GaussianFactorGraph& factors, const gtsam::Ordering& keys);
|
||||
|
||||
#include <gtsam/linear/HessianFactor.h>
|
||||
virtual class HessianFactor : gtsam::GaussianFactor {
|
||||
|
|
|
|||
|
|
@ -75,13 +75,11 @@ Rot3 IncrementalRotation::operator()(
|
|||
Vector3 correctedOmega = measuredOmega - bias;
|
||||
|
||||
// Then compensate for sensor-body displacement: we express the quantities
|
||||
// (originally in the IMU frame) into the body frame. If Jacobian is
|
||||
// requested, the rotation matrix is obtained as `rotate` Jacobian.
|
||||
Matrix3 body_R_sensor;
|
||||
// (originally in the IMU frame) into the body frame.
|
||||
// Note that the rotate Jacobian is just body_P_sensor->rotation().matrix().
|
||||
if (body_P_sensor) {
|
||||
// rotation rate vector in the body frame
|
||||
correctedOmega = body_P_sensor->rotation().rotate(
|
||||
correctedOmega, {}, H_bias ? &body_R_sensor : nullptr);
|
||||
correctedOmega = body_P_sensor->rotation() * correctedOmega;
|
||||
}
|
||||
|
||||
// rotation vector describing rotation increment computed from the
|
||||
|
|
@ -90,7 +88,7 @@ Rot3 IncrementalRotation::operator()(
|
|||
Rot3 incrR = Rot3::Expmap(integratedOmega, H_bias); // expensive !!
|
||||
if (H_bias) {
|
||||
*H_bias *= -deltaT; // Correct so accurately reflects bias derivative
|
||||
if (body_P_sensor) *H_bias *= body_R_sensor;
|
||||
if (body_P_sensor) *H_bias *= body_P_sensor->rotation().matrix();
|
||||
}
|
||||
return incrR;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -118,17 +118,18 @@ TEST(ManifoldPreintegration, CompareWithPreintegratedRotation) {
|
|||
// Integrate a single measurement
|
||||
const double omega = 0.1;
|
||||
const Vector3 trueOmega(omega, 0, 0);
|
||||
const Vector3 bias(1, 2, 3);
|
||||
const Vector3 measuredOmega = trueOmega + bias;
|
||||
const Vector3 biasOmega(1, 2, 3);
|
||||
const Vector3 measuredOmega = trueOmega + biasOmega;
|
||||
const double deltaT = 0.5;
|
||||
|
||||
// Check the accumulated rotation.
|
||||
Rot3 expected = Rot3::Roll(omega * deltaT);
|
||||
pim.integrateGyroMeasurement(measuredOmega, bias, deltaT);
|
||||
const Vector biasOmegaHat = biasOmega;
|
||||
pim.integrateGyroMeasurement(measuredOmega, biasOmegaHat, deltaT);
|
||||
EXPECT(assert_equal(expected, pim.deltaRij(), 1e-9));
|
||||
|
||||
// Now do the same for a ManifoldPreintegration object
|
||||
imuBias::ConstantBias biasHat {Z_3x1, bias};
|
||||
imuBias::ConstantBias biasHat {Z_3x1, biasOmega};
|
||||
ManifoldPreintegration manifoldPim(testing::Params(), biasHat);
|
||||
manifoldPim.integrateMeasurement(Z_3x1, measuredOmega, deltaT);
|
||||
EXPECT(assert_equal(expected, manifoldPim.deltaRij(), 1e-9));
|
||||
|
|
@ -136,17 +137,21 @@ TEST(ManifoldPreintegration, CompareWithPreintegratedRotation) {
|
|||
// Check their internal Jacobians are the same:
|
||||
EXPECT(assert_equal(pim.delRdelBiasOmega(), manifoldPim.delRdelBiasOmega()));
|
||||
|
||||
// Check PreintegratedRotation::biascorrectedDeltaRij.
|
||||
Matrix3 H;
|
||||
// Let's check the derivatives a delta away from the bias hat
|
||||
const double delta = 0.05;
|
||||
const Vector3 biasOmegaIncr(delta, 0, 0);
|
||||
imuBias::ConstantBias bias_i {Z_3x1, biasOmegaHat + biasOmegaIncr};
|
||||
|
||||
// Check PreintegratedRotation::biascorrectedDeltaRij.
|
||||
// Note that biascorrectedDeltaRij expects the biasHat to be subtracted already
|
||||
Matrix3 H;
|
||||
Rot3 corrected = pim.biascorrectedDeltaRij(biasOmegaIncr, H);
|
||||
EQUALITY(Vector3(-deltaT * delta, 0, 0), expected.logmap(corrected));
|
||||
const Rot3 expected2 = Rot3::Roll((omega - delta) * deltaT);
|
||||
EXPECT(assert_equal(expected2, corrected, 1e-9));
|
||||
|
||||
// Check ManifoldPreintegration::biasCorrectedDelta.
|
||||
imuBias::ConstantBias bias_i {Z_3x1, bias + biasOmegaIncr};
|
||||
// Note that, confusingly, biasCorrectedDelta will subtract biasHat inside
|
||||
Matrix96 H2;
|
||||
Vector9 biasCorrected = manifoldPim.biasCorrectedDelta(bias_i, H2);
|
||||
Vector9 expected3;
|
||||
|
|
@ -154,12 +159,11 @@ TEST(ManifoldPreintegration, CompareWithPreintegratedRotation) {
|
|||
EXPECT(assert_equal(expected3, biasCorrected, 1e-9));
|
||||
|
||||
// Check that this one is sane:
|
||||
auto g = [&](const Vector3& increment) {
|
||||
return manifoldPim.biasCorrectedDelta({Z_3x1, bias + increment}, {});
|
||||
auto g = [&](const Vector3& b) {
|
||||
return manifoldPim.biasCorrectedDelta({Z_3x1, b}, {});
|
||||
};
|
||||
EXPECT(assert_equal<Matrix>(numericalDerivative11<Vector9, Vector3>(g, Z_3x1),
|
||||
H2.rightCols<3>(),
|
||||
1e-4)); // NOTE(frank): reduced tolerance
|
||||
EXPECT(assert_equal<Matrix>(numericalDerivative11<Vector9, Vector3>(g, bias_i.gyroscope()),
|
||||
H2.rightCols<3>()));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -36,40 +36,33 @@ const Vector3 measuredOmega = trueOmega + bias;
|
|||
const double deltaT = 0.5;
|
||||
} // namespace biased_x_rotation
|
||||
|
||||
// Create params where x and y axes are exchanged.
|
||||
static std::shared_ptr<PreintegratedRotationParams> paramsWithTransform() {
|
||||
auto p = std::make_shared<PreintegratedRotationParams>();
|
||||
p->setBodyPSensor({Rot3::Yaw(M_PI_2), {0, 0, 0}});
|
||||
return p;
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(PreintegratedRotation, integrateGyroMeasurement) {
|
||||
// Example where IMU is identical to body frame, then omega is roll
|
||||
using namespace biased_x_rotation;
|
||||
auto p = std::make_shared<PreintegratedRotationParams>();
|
||||
PreintegratedRotation pim(p);
|
||||
|
||||
// Check the value.
|
||||
Matrix3 H_bias;
|
||||
internal::IncrementalRotation f{measuredOmega, deltaT, p->getBodyPSensor()};
|
||||
const internal::IncrementalRotation f{measuredOmega, deltaT, p->getBodyPSensor()};
|
||||
const Rot3 incrR = f(bias, H_bias);
|
||||
Rot3 expected = Rot3::Roll(omega * deltaT);
|
||||
EXPECT(assert_equal(expected, incrR, 1e-9));
|
||||
const Rot3 expected = Rot3::Roll(omega * deltaT);
|
||||
EXPECT(assert_equal(expected, incrR, 1e-9))
|
||||
|
||||
// Check the derivative:
|
||||
EXPECT(assert_equal(numericalDerivative11<Rot3, Vector3>(f, bias), H_bias));
|
||||
EXPECT(assert_equal(numericalDerivative11<Rot3, Vector3>(f, bias), H_bias))
|
||||
|
||||
// Check value of deltaRij() after integration.
|
||||
Matrix3 F;
|
||||
PreintegratedRotation pim(p);
|
||||
pim.integrateGyroMeasurement(measuredOmega, bias, deltaT, F);
|
||||
EXPECT(assert_equal(expected, pim.deltaRij(), 1e-9));
|
||||
EXPECT(assert_equal(expected, pim.deltaRij(), 1e-9))
|
||||
|
||||
// Check that system matrix F is the first derivative of compose:
|
||||
EXPECT(assert_equal<Matrix3>(pim.deltaRij().inverse().AdjointMap(), F));
|
||||
EXPECT(assert_equal<Matrix3>(pim.deltaRij().inverse().AdjointMap(), F))
|
||||
|
||||
// Make sure delRdelBiasOmega is H_bias after integration.
|
||||
EXPECT(assert_equal<Matrix3>(H_bias, pim.delRdelBiasOmega()));
|
||||
EXPECT(assert_equal<Matrix3>(H_bias, pim.delRdelBiasOmega()))
|
||||
|
||||
// Check if we make a correction to the bias, the value and Jacobian are
|
||||
// correct. Note that the bias is subtracted from the measurement, and the
|
||||
|
|
@ -78,56 +71,127 @@ TEST(PreintegratedRotation, integrateGyroMeasurement) {
|
|||
const double delta = 0.05;
|
||||
const Vector3 biasOmegaIncr(delta, 0, 0);
|
||||
Rot3 corrected = pim.biascorrectedDeltaRij(biasOmegaIncr, H);
|
||||
EQUALITY(Vector3(-deltaT * delta, 0, 0), expected.logmap(corrected));
|
||||
EXPECT(assert_equal(Rot3::Roll((omega - delta) * deltaT), corrected, 1e-9));
|
||||
EQUALITY(Vector3(-deltaT * delta, 0, 0), expected.logmap(corrected))
|
||||
EXPECT(assert_equal(Rot3::Roll((omega - delta) * deltaT), corrected, 1e-9))
|
||||
|
||||
// TODO(frank): again the derivative is not the *sane* one!
|
||||
// auto g = [&](const Vector3& increment) {
|
||||
// return pim.biascorrectedDeltaRij(increment, {});
|
||||
// };
|
||||
// EXPECT(assert_equal(numericalDerivative11<Rot3, Vector3>(g, Z_3x1), H));
|
||||
// Check the derivative matches the numerical one
|
||||
auto g = [&](const Vector3& increment) {
|
||||
return pim.biascorrectedDeltaRij(increment, {});
|
||||
};
|
||||
Matrix3 expectedH = numericalDerivative11<Rot3, Vector3>(g, biasOmegaIncr);
|
||||
EXPECT(assert_equal(expectedH, H));
|
||||
|
||||
// Let's integrate a second IMU measurement and check the Jacobian update:
|
||||
pim.integrateGyroMeasurement(measuredOmega, bias, deltaT);
|
||||
expectedH = numericalDerivative11<Rot3, Vector3>(g, biasOmegaIncr);
|
||||
corrected = pim.biascorrectedDeltaRij(biasOmegaIncr, H);
|
||||
EXPECT(assert_equal(expectedH, H));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
|
||||
// Create params where x and y axes are exchanged.
|
||||
static std::shared_ptr<PreintegratedRotationParams> paramsWithTransform() {
|
||||
auto p = std::make_shared<PreintegratedRotationParams>();
|
||||
p->setBodyPSensor({Rot3::Yaw(M_PI_2), {0, 0, 0}});
|
||||
return p;
|
||||
}
|
||||
|
||||
TEST(PreintegratedRotation, integrateGyroMeasurementWithTransform) {
|
||||
// Example where IMU is rotated, so measured omega indicates pitch.
|
||||
using namespace biased_x_rotation;
|
||||
auto p = paramsWithTransform();
|
||||
PreintegratedRotation pim(p);
|
||||
|
||||
// Check the value.
|
||||
Matrix3 H_bias;
|
||||
internal::IncrementalRotation f{measuredOmega, deltaT, p->getBodyPSensor()};
|
||||
Rot3 expected = Rot3::Pitch(omega * deltaT);
|
||||
EXPECT(assert_equal(expected, f(bias, H_bias), 1e-9));
|
||||
const internal::IncrementalRotation f{measuredOmega, deltaT, p->getBodyPSensor()};
|
||||
const Rot3 expected = Rot3::Pitch(omega * deltaT); // Pitch, because of sensor-IMU rotation!
|
||||
EXPECT(assert_equal(expected, f(bias, H_bias), 1e-9))
|
||||
|
||||
// Check the derivative:
|
||||
EXPECT(assert_equal(numericalDerivative11<Rot3, Vector3>(f, bias), H_bias));
|
||||
EXPECT(assert_equal(numericalDerivative11<Rot3, Vector3>(f, bias), H_bias))
|
||||
|
||||
// Check value of deltaRij() after integration.
|
||||
Matrix3 F;
|
||||
PreintegratedRotation pim(p);
|
||||
pim.integrateGyroMeasurement(measuredOmega, bias, deltaT, F);
|
||||
EXPECT(assert_equal(expected, pim.deltaRij(), 1e-9));
|
||||
EXPECT(assert_equal(expected, pim.deltaRij(), 1e-9))
|
||||
|
||||
// Check that system matrix F is the first derivative of compose:
|
||||
EXPECT(assert_equal<Matrix3>(pim.deltaRij().inverse().AdjointMap(), F));
|
||||
EXPECT(assert_equal<Matrix3>(pim.deltaRij().inverse().AdjointMap(), F))
|
||||
|
||||
// Make sure delRdelBiasOmega is H_bias after integration.
|
||||
EXPECT(assert_equal<Matrix3>(H_bias, pim.delRdelBiasOmega()));
|
||||
EXPECT(assert_equal<Matrix3>(H_bias, pim.delRdelBiasOmega()))
|
||||
|
||||
// Check the bias correction in same way, but will now yield pitch change.
|
||||
Matrix3 H;
|
||||
const double delta = 0.05;
|
||||
const Vector3 biasOmegaIncr(delta, 0, 0);
|
||||
Rot3 corrected = pim.biascorrectedDeltaRij(biasOmegaIncr, H);
|
||||
EQUALITY(Vector3(0, -deltaT * delta, 0), expected.logmap(corrected));
|
||||
EXPECT(assert_equal(Rot3::Pitch((omega - delta) * deltaT), corrected, 1e-9));
|
||||
EQUALITY(Vector3(0, -deltaT * delta, 0), expected.logmap(corrected))
|
||||
EXPECT(assert_equal(Rot3::Pitch((omega - delta) * deltaT), corrected, 1e-9))
|
||||
|
||||
// TODO(frank): again the derivative is not the *sane* one!
|
||||
// auto g = [&](const Vector3& increment) {
|
||||
// return pim.biascorrectedDeltaRij(increment, {});
|
||||
// };
|
||||
// EXPECT(assert_equal(numericalDerivative11<Rot3, Vector3>(g, Z_3x1), H));
|
||||
// Check the derivative matches the *expectedH* one
|
||||
auto g = [&](const Vector3& increment) {
|
||||
return pim.biascorrectedDeltaRij(increment, {});
|
||||
};
|
||||
Matrix3 expectedH = numericalDerivative11<Rot3, Vector3>(g, biasOmegaIncr);
|
||||
EXPECT(assert_equal(expectedH, H));
|
||||
|
||||
// Let's integrate a second IMU measurement and check the Jacobian update:
|
||||
pim.integrateGyroMeasurement(measuredOmega, bias, deltaT);
|
||||
corrected = pim.biascorrectedDeltaRij(biasOmegaIncr, H);
|
||||
expectedH = numericalDerivative11<Rot3, Vector3>(g, biasOmegaIncr);
|
||||
EXPECT(assert_equal(expectedH, H));
|
||||
}
|
||||
|
||||
// Create params we have a non-axis-aligned rotation and even an offset.
|
||||
static std::shared_ptr<PreintegratedRotationParams> paramsWithArbitraryTransform() {
|
||||
auto p = std::make_shared<PreintegratedRotationParams>();
|
||||
p->setBodyPSensor({Rot3::Expmap({1,2,3}), {4,5,6}});
|
||||
return p;
|
||||
}
|
||||
|
||||
TEST(PreintegratedRotation, integrateGyroMeasurementWithArbitraryTransform) {
|
||||
// Example with a non-axis-aligned transform and some position.
|
||||
using namespace biased_x_rotation;
|
||||
auto p = paramsWithArbitraryTransform();
|
||||
|
||||
// Check the derivative:
|
||||
Matrix3 H_bias;
|
||||
const internal::IncrementalRotation f{measuredOmega, deltaT, p->getBodyPSensor()};
|
||||
f(bias, H_bias);
|
||||
EXPECT(assert_equal(numericalDerivative11<Rot3, Vector3>(f, bias), H_bias))
|
||||
|
||||
// Check derivative of deltaRij() after integration.
|
||||
Matrix3 F;
|
||||
PreintegratedRotation pim(p);
|
||||
pim.integrateGyroMeasurement(measuredOmega, bias, deltaT, F);
|
||||
|
||||
// Check that system matrix F is the first derivative of compose:
|
||||
EXPECT(assert_equal<Matrix3>(pim.deltaRij().inverse().AdjointMap(), F))
|
||||
|
||||
// Make sure delRdelBiasOmega is H_bias after integration.
|
||||
EXPECT(assert_equal<Matrix3>(H_bias, pim.delRdelBiasOmega()))
|
||||
|
||||
// Check the bias correction in same way, but will now yield pitch change.
|
||||
Matrix3 H;
|
||||
const double delta = 0.05;
|
||||
const Vector3 biasOmegaIncr(delta, 0, 0);
|
||||
Rot3 corrected = pim.biascorrectedDeltaRij(biasOmegaIncr, H);
|
||||
|
||||
// Check the derivative matches the numerical one
|
||||
auto g = [&](const Vector3& increment) {
|
||||
return pim.biascorrectedDeltaRij(increment, {});
|
||||
};
|
||||
Matrix3 expectedH = numericalDerivative11<Rot3, Vector3>(g, biasOmegaIncr);
|
||||
EXPECT(assert_equal(expectedH, H));
|
||||
|
||||
// Let's integrate a second IMU measurement and check the Jacobian update:
|
||||
pim.integrateGyroMeasurement(measuredOmega, bias, deltaT);
|
||||
corrected = pim.biascorrectedDeltaRij(biasOmegaIncr, H);
|
||||
expectedH = numericalDerivative11<Rot3, Vector3>(g, biasOmegaIncr);
|
||||
EXPECT(assert_equal(expectedH, H));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
|
|
|
|||
|
|
@ -424,6 +424,11 @@ ISAM2Result ISAM2::update(const NonlinearFactorGraph& newFactors,
|
|||
ISAM2Result result(params_.enableDetailedResults);
|
||||
UpdateImpl update(params_, updateParams);
|
||||
|
||||
// Initialize any new variables \Theta_{new} and add
|
||||
// \Theta:=\Theta\cup\Theta_{new}.
|
||||
// Needed before delta update if using Dogleg optimizer.
|
||||
addVariables(newTheta, result.details());
|
||||
|
||||
// Update delta if we need it to check relinearization later
|
||||
if (update.relinarizationNeeded(update_count_))
|
||||
updateDelta(updateParams.forceFullSolve);
|
||||
|
|
@ -435,9 +440,7 @@ ISAM2Result ISAM2::update(const NonlinearFactorGraph& newFactors,
|
|||
update.computeUnusedKeys(newFactors, variableIndex_,
|
||||
result.keysWithRemovedFactors, &result.unusedKeys);
|
||||
|
||||
// 2. Initialize any new variables \Theta_{new} and add
|
||||
// \Theta:=\Theta\cup\Theta_{new}.
|
||||
addVariables(newTheta, result.details());
|
||||
// 2. Compute new error to check for relinearization
|
||||
if (params_.evaluateNonlinearError)
|
||||
update.error(nonlinearFactors_, calculateEstimate(), &result.errorBefore);
|
||||
|
||||
|
|
@ -731,6 +734,7 @@ void ISAM2::updateDelta(bool forceFullSolve) const {
|
|||
effectiveWildfireThreshold, &delta_);
|
||||
deltaReplacedMask_.clear();
|
||||
gttoc(Wildfire_update);
|
||||
|
||||
} else if (std::holds_alternative<ISAM2DoglegParams>(params_.optimizationParams)) {
|
||||
// If using Dogleg, do a Dogleg step
|
||||
const ISAM2DoglegParams& doglegParams =
|
||||
|
|
@ -769,9 +773,8 @@ void ISAM2::updateDelta(bool forceFullSolve) const {
|
|||
gttic(Copy_dx_d);
|
||||
// Update Delta and linear step
|
||||
doglegDelta_ = doglegResult.delta;
|
||||
delta_ =
|
||||
doglegResult
|
||||
.dx_d; // Copy the VectorValues containing with the linear solution
|
||||
// Copy the VectorValues containing with the linear solution
|
||||
delta_ = doglegResult.dx_d;
|
||||
gttoc(Copy_dx_d);
|
||||
} else {
|
||||
throw std::runtime_error("iSAM2: unknown ISAM2Params type");
|
||||
|
|
|
|||
|
|
@ -236,9 +236,9 @@ bool LevenbergMarquardtOptimizer::tryLambda(const GaussianFactorGraph& linear,
|
|||
if (currentState->iterations == 0) {
|
||||
cout << "iter cost cost_change lambda success iter_time" << endl;
|
||||
}
|
||||
cout << setw(4) << currentState->iterations << " " << setw(8) << newError << " " << setw(3) << setprecision(2)
|
||||
<< costChange << " " << setw(3) << setprecision(2) << currentState->lambda << " " << setw(4)
|
||||
<< systemSolvedSuccessfully << " " << setw(3) << setprecision(2) << iterationTime << endl;
|
||||
cout << setw(4) << currentState->iterations << " " << setw(12) << newError << " " << setw(12) << setprecision(2)
|
||||
<< costChange << " " << setw(10) << setprecision(2) << currentState->lambda << " " << setw(6)
|
||||
<< systemSolvedSuccessfully << " " << setw(10) << setprecision(2) << iterationTime << endl;
|
||||
}
|
||||
if (step_is_successful) {
|
||||
// we have successfully decreased the cost and we have good modelFidelity
|
||||
|
|
|
|||
|
|
@ -123,6 +123,28 @@ void NonlinearOptimizerParams::print(const std::string& str) const {
|
|||
std::cout.flush();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
bool NonlinearOptimizerParams::equals(const NonlinearOptimizerParams& other,
|
||||
double tol) const {
|
||||
// Check for equality of shared ptrs
|
||||
bool iterative_params_equal = iterativeParams == other.iterativeParams;
|
||||
// Check equality of components
|
||||
if (iterativeParams && other.iterativeParams) {
|
||||
iterative_params_equal = iterativeParams->equals(*other.iterativeParams);
|
||||
} else {
|
||||
// Check if either is null. If both are null, then true
|
||||
iterative_params_equal = !iterativeParams && !other.iterativeParams;
|
||||
}
|
||||
|
||||
return maxIterations == other.getMaxIterations() &&
|
||||
std::abs(relativeErrorTol - other.getRelativeErrorTol()) <= tol &&
|
||||
std::abs(absoluteErrorTol - other.getAbsoluteErrorTol()) <= tol &&
|
||||
std::abs(errorTol - other.getErrorTol()) <= tol &&
|
||||
verbosityTranslator(verbosity) == other.getVerbosity() &&
|
||||
orderingType == other.orderingType && ordering == other.ordering &&
|
||||
linearSolverType == other.linearSolverType && iterative_params_equal;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
std::string NonlinearOptimizerParams::linearSolverTranslator(
|
||||
LinearSolverType linearSolverType) const {
|
||||
|
|
|
|||
|
|
@ -114,16 +114,7 @@ public:
|
|||
|
||||
virtual void print(const std::string& str = "") const;
|
||||
|
||||
bool equals(const NonlinearOptimizerParams& other, double tol = 1e-9) const {
|
||||
return maxIterations == other.getMaxIterations()
|
||||
&& std::abs(relativeErrorTol - other.getRelativeErrorTol()) <= tol
|
||||
&& std::abs(absoluteErrorTol - other.getAbsoluteErrorTol()) <= tol
|
||||
&& std::abs(errorTol - other.getErrorTol()) <= tol
|
||||
&& verbosityTranslator(verbosity) == other.getVerbosity();
|
||||
// && orderingType.equals(other.getOrderingType()_;
|
||||
// && linearSolverType == other.getLinearSolverType();
|
||||
// TODO: check ordering, iterativeParams, and iterationsHook
|
||||
}
|
||||
bool equals(const NonlinearOptimizerParams& other, double tol = 1e-9) const;
|
||||
|
||||
inline bool isMultifrontal() const {
|
||||
return (linearSolverType == MULTIFRONTAL_CHOLESKY)
|
||||
|
|
|
|||
|
|
@ -42,7 +42,9 @@ namespace gtsam
|
|||
// Gather all keys
|
||||
KeySet allKeys;
|
||||
for(const std::shared_ptr<FACTOR>& factor: factors) {
|
||||
allKeys.insert(factor->begin(), factor->end());
|
||||
// Non-active factors are nullptr
|
||||
if (factor)
|
||||
allKeys.insert(factor->begin(), factor->end());
|
||||
}
|
||||
|
||||
// Check keys
|
||||
|
|
|
|||
|
|
@ -671,6 +671,21 @@ class AHRS {
|
|||
//void print(string s) const;
|
||||
};
|
||||
|
||||
#include <gtsam_unstable/slam/PartialPriorFactor.h>
|
||||
template <T = {gtsam::Pose2, gtsam::Pose3}>
|
||||
virtual class PartialPriorFactor : gtsam::NoiseModelFactor {
|
||||
PartialPriorFactor(gtsam::Key key, size_t idx, double prior,
|
||||
const gtsam::noiseModel::Base* model);
|
||||
PartialPriorFactor(gtsam::Key key, const std::vector<size_t>& indices,
|
||||
const gtsam::Vector& prior,
|
||||
const gtsam::noiseModel::Base* model);
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
const gtsam::Vector& prior();
|
||||
};
|
||||
|
||||
// Tectonic SAM Factors
|
||||
|
||||
#include <gtsam_unstable/slam/TSAMFactors.h>
|
||||
|
|
|
|||
|
|
@ -50,9 +50,6 @@ namespace gtsam {
|
|||
Vector prior_; ///< Measurement on tangent space parameters, in compressed form.
|
||||
std::vector<size_t> indices_; ///< Indices of the measured tangent space parameters.
|
||||
|
||||
/** default constructor - only use for serialization */
|
||||
PartialPriorFactor() {}
|
||||
|
||||
/**
|
||||
* constructor with just minimum requirements for a factor - allows more
|
||||
* computation in the constructor. This should only be used by subclasses
|
||||
|
|
@ -65,7 +62,8 @@ namespace gtsam {
|
|||
// Provide access to the Matrix& version of evaluateError:
|
||||
using Base::evaluateError;
|
||||
|
||||
~PartialPriorFactor() override {}
|
||||
/** default constructor - only use for serialization */
|
||||
PartialPriorFactor() {}
|
||||
|
||||
/** Single Element Constructor: Prior on a single parameter at index 'idx' in the tangent vector.*/
|
||||
PartialPriorFactor(Key key, size_t idx, double prior, const SharedNoiseModel& model) :
|
||||
|
|
@ -85,6 +83,8 @@ namespace gtsam {
|
|||
assert(model->dim() == (size_t)prior.size());
|
||||
}
|
||||
|
||||
~PartialPriorFactor() override {}
|
||||
|
||||
/// @return a deep copy of this factor
|
||||
gtsam::NonlinearFactor::shared_ptr clone() const override {
|
||||
return std::static_pointer_cast<gtsam::NonlinearFactor>(
|
||||
|
|
|
|||
|
|
@ -29,11 +29,8 @@ if(POLICY CMP0057)
|
|||
cmake_policy(SET CMP0057 NEW)
|
||||
endif()
|
||||
|
||||
# Prefer system pybind11 first, if not found, rely on bundled version:
|
||||
find_package(pybind11 CONFIG QUIET)
|
||||
if (NOT pybind11_FOUND)
|
||||
add_subdirectory(${PROJECT_SOURCE_DIR}/wrap/pybind11 pybind11)
|
||||
endif()
|
||||
# Use bundled pybind11 version
|
||||
add_subdirectory(${PROJECT_SOURCE_DIR}/wrap/pybind11 pybind11)
|
||||
|
||||
# Set the wrapping script variable
|
||||
set(PYBIND_WRAP_SCRIPT "${PROJECT_SOURCE_DIR}/wrap/scripts/pybind_wrap.py")
|
||||
|
|
@ -189,6 +186,8 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON)
|
|||
gtsam::BinaryMeasurementsPoint3
|
||||
gtsam::BinaryMeasurementsUnit3
|
||||
gtsam::BinaryMeasurementsRot3
|
||||
gtsam::SimWall2DVector
|
||||
gtsam::SimPolygon2DVector
|
||||
gtsam::CameraSetCal3_S2
|
||||
gtsam::CameraSetCal3Bundler
|
||||
gtsam::CameraSetCal3Unified
|
||||
|
|
@ -264,11 +263,25 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON)
|
|||
endif()
|
||||
|
||||
# Add custom target so we can install with `make python-install`
|
||||
# Note below we make sure to install with --user iff not in a virtualenv
|
||||
set(GTSAM_PYTHON_INSTALL_TARGET python-install)
|
||||
add_custom_target(${GTSAM_PYTHON_INSTALL_TARGET}
|
||||
COMMAND ${PYTHON_EXECUTABLE} -m pip install --user .
|
||||
#TODO(Varun) Maybe move the long command to script?
|
||||
# https://stackoverflow.com/questions/49053544/how-do-i-run-a-python-script-every-time-in-a-cmake-build
|
||||
if (NOT WIN32) # WIN32=1 is target platform is Windows
|
||||
add_custom_target(${GTSAM_PYTHON_INSTALL_TARGET}
|
||||
COMMAND stubgen -q -p gtsam && cp -a out/gtsam/ gtsam && ${PYTHON_EXECUTABLE} -c "import sys, subprocess; cmd = [sys.executable, '-m', 'pip', 'install']; has_venv = hasattr(sys, 'real_prefix') or (hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix); cmd.append('--user' if not has_venv else ''); cmd.append('.'); subprocess.check_call([c for c in cmd if c])"
|
||||
DEPENDS ${GTSAM_PYTHON_DEPENDENCIES}
|
||||
WORKING_DIRECTORY ${GTSAM_PYTHON_BUILD_DIRECTORY})
|
||||
WORKING_DIRECTORY ${GTSAM_PYTHON_BUILD_DIRECTORY}
|
||||
VERBATIM)
|
||||
else()
|
||||
#TODO(Varun) Find equivalent cp on Windows
|
||||
add_custom_target(${GTSAM_PYTHON_INSTALL_TARGET}
|
||||
COMMAND ${PYTHON_EXECUTABLE} -c "import sys, subprocess; cmd = [sys.executable, '-m', 'pip', 'install']; has_venv = hasattr(sys, 'real_prefix') or (hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix); cmd.append('--user' if not has_venv else ''); cmd.append('.'); subprocess.check_call([c for c in cmd if c])"
|
||||
DEPENDS ${GTSAM_PYTHON_DEPENDENCIES}
|
||||
WORKING_DIRECTORY ${GTSAM_PYTHON_BUILD_DIRECTORY}
|
||||
VERBATIM)
|
||||
endif()
|
||||
|
||||
|
||||
# Custom make command to run all GTSAM Python tests
|
||||
add_custom_target(
|
||||
|
|
|
|||
|
|
@ -1,2 +1,3 @@
|
|||
-r requirements.txt
|
||||
pyparsing>=2.4.2
|
||||
pyparsing>=2.4.2
|
||||
mypy==1.4.1 #TODO(Varun) A bug in mypy>=1.5.0 causes an unresolved placeholder error when importing numpy>=2.0.0 (https://github.com/python/mypy/issues/17396)
|
||||
|
|
@ -17,7 +17,7 @@
|
|||
| InverseKinematicsExampleExpressions.cpp | |
|
||||
| ISAM2Example_SmartFactor | |
|
||||
| ISAM2_SmartFactorStereo_IMU | |
|
||||
| LocalizationExample | impossible? |
|
||||
| LocalizationExample | :heavy_check_mark: |
|
||||
| METISOrderingExample | |
|
||||
| OdometryExample | :heavy_check_mark: |
|
||||
| PlanarSLAMExample | :heavy_check_mark: |
|
||||
|
|
|
|||
|
|
@ -0,0 +1,122 @@
|
|||
# pylint: disable=unused-import,consider-using-from-import,invalid-name,no-name-in-module,no-member,missing-function-docstring,too-many-locals
|
||||
"""
|
||||
Transcription of SelfCalibrationExample.cpp
|
||||
"""
|
||||
import math
|
||||
|
||||
from gtsam import Cal3_S2
|
||||
from gtsam.noiseModel import Diagonal, Isotropic
|
||||
|
||||
# SFM-specific factors
|
||||
from gtsam import GeneralSFMFactor2Cal3_S2 # does calibration !
|
||||
from gtsam import PinholeCameraCal3_S2
|
||||
|
||||
# Camera observations of landmarks (i.e. pixel coordinates) will be stored as Point2 (x, y).
|
||||
from gtsam import Point2
|
||||
from gtsam import Point3, Pose3, Rot3
|
||||
|
||||
# Inference and optimization
|
||||
from gtsam import NonlinearFactorGraph, DoglegOptimizer, Values
|
||||
from gtsam.symbol_shorthand import K, L, X
|
||||
|
||||
|
||||
# this is a direct translation of examples/SFMData.h
|
||||
# which is slightly different from python/gtsam/examples/SFMdata.py.
|
||||
def createPoints() -> list[Point3]:
|
||||
"""
|
||||
Create the set of ground-truth landmarks
|
||||
"""
|
||||
return [
|
||||
Point3(10.0, 10.0, 10.0),
|
||||
Point3(-10.0, 10.0, 10.0),
|
||||
Point3(-10.0, -10.0, 10.0),
|
||||
Point3(10.0, -10.0, 10.0),
|
||||
Point3(10.0, 10.0, -10.0),
|
||||
Point3(-10.0, 10.0, -10.0),
|
||||
Point3(-10.0, -10.0, -10.0),
|
||||
Point3(10.0, -10.0, -10.0),
|
||||
]
|
||||
|
||||
|
||||
def createPoses(
|
||||
init: Pose3 = Pose3(Rot3.Ypr(math.pi / 2, 0, -math.pi / 2), Point3(30, 0, 0)),
|
||||
delta: Pose3 = Pose3(
|
||||
Rot3.Ypr(0, -math.pi / 4, 0),
|
||||
Point3(math.sin(math.pi / 4) * 30, 0, 30 * (1 - math.sin(math.pi / 4))),
|
||||
),
|
||||
steps: int = 8,
|
||||
) -> list[Pose3]:
|
||||
"""
|
||||
Create the set of ground-truth poses
|
||||
Default values give a circular trajectory,
|
||||
radius 30 at pi/4 intervals, always facing the circle center
|
||||
"""
|
||||
poses: list[Pose3] = []
|
||||
poses.append(init)
|
||||
for i in range(1, steps):
|
||||
poses.append(poses[i - 1].compose(delta))
|
||||
return poses
|
||||
|
||||
|
||||
def main() -> None:
|
||||
# Create the set of ground-truth
|
||||
points: list[Point3] = createPoints()
|
||||
poses: list[Pose3] = createPoses()
|
||||
|
||||
# Create the factor graph
|
||||
graph = NonlinearFactorGraph()
|
||||
|
||||
# Add a prior on pose x1.
|
||||
# 30cm std on x,y,z 0.1 rad on roll,pitch,yaw
|
||||
poseNoise = Diagonal.Sigmas([0.1, 0.1, 0.1, 0.3, 0.3, 0.3])
|
||||
graph.addPriorPose3(X(0), poses[0], poseNoise)
|
||||
|
||||
# Simulated measurements from each camera pose, adding them to the factor graph
|
||||
Kcal = Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)
|
||||
measurementNoise = Isotropic.Sigma(2, 1.0)
|
||||
for i, pose in enumerate(poses):
|
||||
for j, point in enumerate(points):
|
||||
camera = PinholeCameraCal3_S2(pose, Kcal)
|
||||
measurement: Point2 = camera.project(point)
|
||||
# The only real difference with the Visual SLAM example is that here we
|
||||
# use a different factor type, that also calculates the Jacobian with
|
||||
# respect to calibration
|
||||
graph.add(
|
||||
GeneralSFMFactor2Cal3_S2(
|
||||
measurement,
|
||||
measurementNoise,
|
||||
X(i),
|
||||
L(j),
|
||||
K(0),
|
||||
)
|
||||
)
|
||||
|
||||
# Add a prior on the position of the first landmark.
|
||||
pointNoise = Isotropic.Sigma(3, 0.1)
|
||||
graph.addPriorPoint3(L(0), points[0], pointNoise) # add directly to graph
|
||||
|
||||
# Add a prior on the calibration.
|
||||
calNoise = Diagonal.Sigmas([500, 500, 0.1, 100, 100])
|
||||
graph.addPriorCal3_S2(K(0), Kcal, calNoise)
|
||||
|
||||
# Create the initial estimate to the solution
|
||||
# now including an estimate on the camera calibration parameters
|
||||
initialEstimate = Values()
|
||||
initialEstimate.insert(K(0), Cal3_S2(60.0, 60.0, 0.0, 45.0, 45.0))
|
||||
for i, pose in enumerate(poses):
|
||||
initialEstimate.insert(
|
||||
X(i),
|
||||
pose.compose(
|
||||
Pose3(Rot3.Rodrigues(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20))
|
||||
),
|
||||
)
|
||||
for j, point in enumerate(points):
|
||||
initialEstimate.insert(L(j), point + Point3(-0.25, 0.20, 0.15))
|
||||
|
||||
# Optimize the graph and print results
|
||||
result: Values = DoglegOptimizer(graph, initialEstimate).optimize()
|
||||
result.print("Final results:\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,68 @@
|
|||
"""
|
||||
A simple 2D pose slam example with "GPS" measurements
|
||||
- The robot moves forward 2 meter each iteration
|
||||
- The robot initially faces along the X axis (horizontal, to the right in 2D)
|
||||
- We have full odometry between pose
|
||||
- We have "GPS-like" measurements implemented with a custom factor
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam import BetweenFactorPose2, Pose2, noiseModel
|
||||
from gtsam_unstable import PartialPriorFactorPose2
|
||||
|
||||
|
||||
def main():
|
||||
# 1. Create a factor graph container and add factors to it.
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# 2a. Add odometry factors
|
||||
# For simplicity, we will use the same noise model for each odometry factor
|
||||
odometryNoise = noiseModel.Diagonal.Sigmas(np.asarray([0.2, 0.2, 0.1]))
|
||||
|
||||
# Create odometry (Between) factors between consecutive poses
|
||||
graph.push_back(
|
||||
BetweenFactorPose2(1, 2, Pose2(2.0, 0.0, 0.0), odometryNoise))
|
||||
graph.push_back(
|
||||
BetweenFactorPose2(2, 3, Pose2(2.0, 0.0, 0.0), odometryNoise))
|
||||
|
||||
# 2b. Add "GPS-like" measurements
|
||||
# We will use PartialPrior factor for this.
|
||||
unaryNoise = noiseModel.Diagonal.Sigmas(np.array([0.1,
|
||||
0.1])) # 10cm std on x,y
|
||||
|
||||
graph.push_back(
|
||||
PartialPriorFactorPose2(1, [0, 1], np.asarray([0.0, 0.0]), unaryNoise))
|
||||
graph.push_back(
|
||||
PartialPriorFactorPose2(2, [0, 1], np.asarray([2.0, 0.0]), unaryNoise))
|
||||
graph.push_back(
|
||||
PartialPriorFactorPose2(3, [0, 1], np.asarray([4.0, 0.0]), unaryNoise))
|
||||
graph.print("\nFactor Graph:\n")
|
||||
|
||||
# 3. Create the data structure to hold the initialEstimate estimate to the solution
|
||||
# For illustrative purposes, these have been deliberately set to incorrect values
|
||||
initialEstimate = gtsam.Values()
|
||||
initialEstimate.insert(1, Pose2(0.5, 0.0, 0.2))
|
||||
initialEstimate.insert(2, Pose2(2.3, 0.1, -0.2))
|
||||
initialEstimate.insert(3, Pose2(4.1, 0.1, 0.1))
|
||||
initialEstimate.print("\nInitial Estimate:\n")
|
||||
|
||||
# 4. Optimize using Levenberg-Marquardt optimization. The optimizer
|
||||
# accepts an optional set of configuration parameters, controlling
|
||||
# things like convergence criteria, the type of linear system solver
|
||||
# to use, and the amount of information displayed during optimization.
|
||||
# Here we will use the default set of parameters. See the
|
||||
# documentation for the full set of parameters.
|
||||
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate)
|
||||
result = optimizer.optimize()
|
||||
result.print("Final Result:\n")
|
||||
|
||||
# 5. Calculate and print marginal covariances for all variables
|
||||
marginals = gtsam.Marginals(graph, result)
|
||||
print("x1 covariance:\n", marginals.marginalCovariance(1))
|
||||
print("x2 covariance:\n", marginals.marginalCovariance(2))
|
||||
print("x3 covariance:\n", marginals.marginalCovariance(3))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -9,6 +9,7 @@
|
|||
|
||||
#include <pybind11/eigen.h>
|
||||
#include <pybind11/stl_bind.h>
|
||||
#include <pybind11/stl.h>
|
||||
#include <pybind11/pybind11.h>
|
||||
#include <pybind11/functional.h>
|
||||
#include <pybind11/iostream.h>
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ package_data = {
|
|||
"./*.so",
|
||||
"./*.dll",
|
||||
"./*.pyd",
|
||||
"*.pyi", "**/*.pyi", # Add the type hints
|
||||
]
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -24,10 +24,9 @@
|
|||
#include <gtsam/nonlinear/DoglegOptimizerImpl.h>
|
||||
#include <gtsam/nonlinear/NonlinearEquality.h>
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/linear/JacobianFactor.h>
|
||||
#include <gtsam/linear/GaussianBayesTree.h>
|
||||
#include <gtsam/base/numericalDerivative.h>
|
||||
#include <gtsam/nonlinear/ISAM2.h>
|
||||
#include <gtsam/slam/SmartProjectionPoseFactor.h>
|
||||
#include "examples/SFMdata.h"
|
||||
|
||||
#include <functional>
|
||||
|
||||
|
|
@ -36,7 +35,6 @@ using namespace gtsam;
|
|||
|
||||
// Convenience for named keys
|
||||
using symbol_shorthand::X;
|
||||
using symbol_shorthand::L;
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(DoglegOptimizer, ComputeBlend) {
|
||||
|
|
@ -185,6 +183,128 @@ TEST(DoglegOptimizer, Constraint) {
|
|||
#endif
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* Test created to fix issue in ISAM2 when using the DogLegOptimizer.
|
||||
* Originally reported by kvmanohar22 in issue #301
|
||||
* https://github.com/borglab/gtsam/issues/301
|
||||
*
|
||||
* This test is based on a script provided by kvmanohar22
|
||||
* to help reproduce the issue.
|
||||
*/
|
||||
TEST(DogLegOptimizer, VariableUpdate) {
|
||||
// Make the typename short so it looks much cleaner
|
||||
typedef SmartProjectionPoseFactor<Cal3_S2> SmartFactor;
|
||||
|
||||
// create a typedef to the camera type
|
||||
typedef PinholePose<Cal3_S2> Camera;
|
||||
// Define the camera calibration parameters
|
||||
Cal3_S2::shared_ptr K(new Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0));
|
||||
|
||||
// Define the camera observation noise model
|
||||
noiseModel::Isotropic::shared_ptr measurementNoise =
|
||||
noiseModel::Isotropic::Sigma(2, 1.0); // one pixel in u and v
|
||||
|
||||
// Create the set of ground-truth landmarks and poses
|
||||
vector<Point3> points = createPoints();
|
||||
vector<Pose3> poses = createPoses();
|
||||
|
||||
// Create a factor graph
|
||||
NonlinearFactorGraph graph;
|
||||
|
||||
ISAM2DoglegParams doglegparams = ISAM2DoglegParams();
|
||||
doglegparams.verbose = false;
|
||||
ISAM2Params isam2_params;
|
||||
isam2_params.evaluateNonlinearError = true;
|
||||
isam2_params.relinearizeThreshold = 0.0;
|
||||
isam2_params.enableRelinearization = true;
|
||||
isam2_params.optimizationParams = doglegparams;
|
||||
isam2_params.relinearizeSkip = 1;
|
||||
ISAM2 isam2(isam2_params);
|
||||
|
||||
// Simulated measurements from each camera pose, adding them to the factor
|
||||
// graph
|
||||
unordered_map<int, SmartFactor::shared_ptr> smart_factors;
|
||||
for (size_t j = 0; j < points.size(); ++j) {
|
||||
// every landmark represent a single landmark, we use shared pointer to init
|
||||
// the factor, and then insert measurements.
|
||||
SmartFactor::shared_ptr smartfactor(new SmartFactor(measurementNoise, K));
|
||||
|
||||
for (size_t i = 0; i < poses.size(); ++i) {
|
||||
// generate the 2D measurement
|
||||
Camera camera(poses[i], K);
|
||||
Point2 measurement = camera.project(points[j]);
|
||||
|
||||
// call add() function to add measurement into a single factor, here we
|
||||
// need to add:
|
||||
// 1. the 2D measurement
|
||||
// 2. the corresponding camera's key
|
||||
// 3. camera noise model
|
||||
// 4. camera calibration
|
||||
|
||||
// add only first 3 measurements and update the later measurements
|
||||
// incrementally
|
||||
if (i < 3) smartfactor->add(measurement, i);
|
||||
}
|
||||
|
||||
// insert the smart factor in the graph
|
||||
smart_factors[j] = smartfactor;
|
||||
graph.push_back(smartfactor);
|
||||
}
|
||||
|
||||
// Add a prior on pose x0. This indirectly specifies where the origin is.
|
||||
// 30cm std on x,y,z 0.1 rad on roll,pitch,yaw
|
||||
noiseModel::Diagonal::shared_ptr noise = noiseModel::Diagonal::Sigmas(
|
||||
(Vector(6) << Vector3::Constant(0.3), Vector3::Constant(0.1)).finished());
|
||||
graph.emplace_shared<PriorFactor<Pose3> >(0, poses[0], noise);
|
||||
|
||||
// Because the structure-from-motion problem has a scale ambiguity, the
|
||||
// problem is still under-constrained. Here we add a prior on the second pose
|
||||
// x1, so this will fix the scale by indicating the distance between x0 and
|
||||
// x1. Because these two are fixed, the rest of the poses will be also be
|
||||
// fixed.
|
||||
graph.emplace_shared<PriorFactor<Pose3> >(1, poses[1],
|
||||
noise); // add directly to graph
|
||||
|
||||
// Create the initial estimate to the solution
|
||||
// Intentionally initialize the variables off from the ground truth
|
||||
Values initialEstimate;
|
||||
Pose3 delta(Rot3::Rodrigues(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20));
|
||||
for (size_t i = 0; i < 3; ++i)
|
||||
initialEstimate.insert(i, poses[i].compose(delta));
|
||||
// initialEstimate.print("Initial Estimates:\n");
|
||||
|
||||
// Optimize the graph and print results
|
||||
isam2.update(graph, initialEstimate);
|
||||
Values result = isam2.calculateEstimate();
|
||||
// result.print("Results:\n");
|
||||
|
||||
// we add new measurements from this pose
|
||||
size_t pose_idx = 3;
|
||||
|
||||
// Now update existing smart factors with new observations
|
||||
for (size_t j = 0; j < points.size(); ++j) {
|
||||
SmartFactor::shared_ptr smartfactor = smart_factors[j];
|
||||
|
||||
// add the 4th measurement
|
||||
Camera camera(poses[pose_idx], K);
|
||||
Point2 measurement = camera.project(points[j]);
|
||||
smartfactor->add(measurement, pose_idx);
|
||||
}
|
||||
|
||||
graph.resize(0);
|
||||
initialEstimate.clear();
|
||||
|
||||
// update initial estimate for the new pose
|
||||
initialEstimate.insert(pose_idx, poses[pose_idx].compose(delta));
|
||||
|
||||
// this should break the system
|
||||
isam2.update(graph, initialEstimate);
|
||||
result = isam2.calculateEstimate();
|
||||
EXPECT(std::find(result.keys().begin(), result.keys().end(), pose_idx) !=
|
||||
result.keys().end());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -994,6 +994,56 @@ TEST(ISAM2, calculate_nnz)
|
|||
EXPECT_LONGS_EQUAL(expected, actual);
|
||||
}
|
||||
|
||||
class FixActiveFactor : public NoiseModelFactorN<Vector2> {
|
||||
using Base = NoiseModelFactorN<Vector2>;
|
||||
bool is_active_;
|
||||
|
||||
public:
|
||||
FixActiveFactor(const gtsam::Key& key, const bool active)
|
||||
: Base(nullptr, key), is_active_(active) {}
|
||||
|
||||
virtual bool active(const gtsam::Values &values) const override {
|
||||
return is_active_;
|
||||
}
|
||||
|
||||
virtual Vector
|
||||
evaluateError(const Vector2& x,
|
||||
Base::OptionalMatrixTypeT<Vector2> H = nullptr) const override {
|
||||
if (H) {
|
||||
*H = Vector2::Identity();
|
||||
}
|
||||
return Vector2::Zero();
|
||||
}
|
||||
};
|
||||
|
||||
TEST(ActiveFactorTesting, Issue1596) {
|
||||
// Issue1596: When a derived Nonlinear Factor is not active, the linearization returns a nullptr (NonlinearFactor.cpp:156), which
|
||||
// causes an error when `EliminateSymbolic` is called (SymbolicFactor-inst.h:45) due to not checking if the factor is nullptr.
|
||||
const gtsam::Key key{Symbol('x', 0)};
|
||||
|
||||
ISAM2 isam;
|
||||
Values values;
|
||||
NonlinearFactorGraph graph;
|
||||
|
||||
// Insert an active factor
|
||||
values.insert<Vector2>(key, Vector2::Zero());
|
||||
graph.emplace_shared<FixActiveFactor>(key, true);
|
||||
|
||||
// No problem here
|
||||
isam.update(graph, values);
|
||||
|
||||
graph = NonlinearFactorGraph();
|
||||
|
||||
// Inserting a factor that is never active
|
||||
graph.emplace_shared<FixActiveFactor>(key, false);
|
||||
|
||||
// This call throws the error if the pointer is not validated on (SymbolicFactor-inst.h:45)
|
||||
isam.update(graph);
|
||||
|
||||
// If the bug is fixed, this line is reached.
|
||||
EXPECT(isam.getFactorsUnsafe().size() == 2);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
Loading…
Reference in New Issue