diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml index 4502dbe0e..da398ad23 100644 --- a/.github/workflows/build-linux.yml +++ b/.github/workflows/build-linux.yml @@ -56,7 +56,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install Dependencies run: | diff --git a/.github/workflows/build-macos.yml b/.github/workflows/build-macos.yml index 72e27e3b6..e4c78bf67 100644 --- a/.github/workflows/build-macos.yml +++ b/.github/workflows/build-macos.yml @@ -26,6 +26,7 @@ jobs: # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. name: [ macos-12-xcode-14.2, + macos-14-xcode-15.4, ] build_type: [Debug, Release] @@ -36,9 +37,14 @@ jobs: compiler: xcode version: "14.2" + - name: macos-14-xcode-15.4 + os: macos-14 + compiler: xcode + version: "15.4" + steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install Dependencies run: | diff --git a/.github/workflows/build-python.yml b/.github/workflows/build-python.yml index 037704a36..f581a5974 100644 --- a/.github/workflows/build-python.yml +++ b/.github/workflows/build-python.yml @@ -29,9 +29,9 @@ jobs: name: [ ubuntu-20.04-gcc-9, - ubuntu-20.04-gcc-9-tbb, ubuntu-20.04-clang-9, macos-12-xcode-14.2, + macos-14-xcode-15.4, windows-2022-msbuild, ] @@ -43,12 +43,6 @@ jobs: compiler: gcc version: "9" - - name: ubuntu-20.04-gcc-9-tbb - os: ubuntu-20.04 - compiler: gcc - version: "9" - flag: tbb - - name: ubuntu-20.04-clang-9 os: ubuntu-20.04 compiler: clang @@ -59,13 +53,18 @@ jobs: compiler: xcode version: "14.2" + - name: macos-14-xcode-15.4 + os: macos-14 + compiler: xcode + version: "15.4" + - name: windows-2022-msbuild os: windows-2022 platform: 64 steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install (Linux) if: runner.os == 'Linux' @@ -139,7 +138,12 @@ jobs: # Use the prebuilt binary for Windows $Url = "https://sourceforge.net/projects/boost/files/boost-binaries/$env:BOOST_VERSION/$env:BOOST_EXE-${{matrix.platform}}.exe" - (New-Object System.Net.WebClient).DownloadFile($Url, "$env:TEMP\boost.exe") + + # Create WebClient with appropriate settings and download Boost exe + $wc = New-Object System.Net.Webclient + $wc.Headers.Add("User-Agent: Other"); + $wc.DownloadFile($Url, "$env:TEMP\boost.exe") + Start-Process -Wait -FilePath "$env:TEMP\boost.exe" "/SILENT","/SP-","/SUPPRESSMSGBOXES","/DIR=$BOOST_PATH" # Set the BOOST_ROOT variable @@ -162,6 +166,14 @@ jobs: run: | bash .github/scripts/python.sh -d + - name: Create virtual on MacOS + if: runner.os == 'macOS' + run: | + python$PYTHON_VERSION -m venv venv + source venv/bin/activate + echo "PATH=$(pwd)/venv/bin:$PATH" >> $GITHUB_ENV + python -m pip install --upgrade pip + - name: Install Python Dependencies shell: bash run: python$PYTHON_VERSION -m pip install -r python/dev_requirements.txt diff --git a/.github/workflows/build-special.yml b/.github/workflows/build-special.yml index 164646e3e..3a7dd974d 100644 --- a/.github/workflows/build-special.yml +++ b/.github/workflows/build-special.yml @@ -83,7 +83,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install (Linux) if: runner.os == 'Linux' diff --git a/.github/workflows/build-windows.yml b/.github/workflows/build-windows.yml index dcf742c05..20b4a846f 100644 --- a/.github/workflows/build-windows.yml +++ b/.github/workflows/build-windows.yml @@ -44,7 +44,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup msbuild uses: ilammy/msvc-dev-cmd@v1 @@ -70,9 +70,6 @@ jobs: } if ("${{ matrix.compiler }}" -eq "gcc") { - # Chocolatey GCC is broken on the windows-2019 image. - # See: https://github.com/DaanDeMeyer/doctest/runs/231595515 - # See: https://github.community/t5/GitHub-Actions/Something-is-wrong-with-the-chocolatey-installed-version-of-gcc/td-p/32413 scoop install gcc --global echo "CC=gcc" >> $GITHUB_ENV echo "CXX=g++" >> $GITHUB_ENV @@ -98,7 +95,12 @@ jobs: # Use the prebuilt binary for Windows $Url = "https://sourceforge.net/projects/boost/files/boost-binaries/$env:BOOST_VERSION/$env:BOOST_EXE-${{matrix.platform}}.exe" - (New-Object System.Net.WebClient).DownloadFile($Url, "$env:TEMP\boost.exe") + + # Create WebClient with appropriate settings and download Boost exe + $wc = New-Object System.Net.Webclient + $wc.Headers.Add("User-Agent: Other"); + $wc.DownloadFile($Url, "$env:TEMP\boost.exe") + Start-Process -Wait -FilePath "$env:TEMP\boost.exe" "/SILENT","/SP-","/SUPPRESSMSGBOXES","/DIR=$BOOST_PATH" # Set the BOOST_ROOT variable diff --git a/CMakeLists.txt b/CMakeLists.txt index 66b1804af..3306e3470 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,16 @@ -cmake_minimum_required(VERSION 3.0) +cmake_minimum_required(VERSION 3.5) +if (POLICY CMP0082) + cmake_policy(SET CMP0082 NEW) # install from sub-directories immediately +endif() +if (POLICY CMP0102) +cmake_policy(SET CMP0102 NEW) # set policy on advanced variables and cmake cache +endif() +if (POLICY CMP0156) +cmake_policy(SET CMP0156 NEW) # new linker strategies +endif() +if (POLICY CMP0167) +cmake_policy(SET CMP0167 OLD) # Don't complain about boost +endif() # Set the version number for the library set (GTSAM_VERSION_MAJOR 4) diff --git a/CppUnitLite/CMakeLists.txt b/CppUnitLite/CMakeLists.txt index cbffa79d1..e3b2dfadb 100644 --- a/CppUnitLite/CMakeLists.txt +++ b/CppUnitLite/CMakeLists.txt @@ -6,6 +6,7 @@ file(GLOB cppunitlite_src "*.cpp") add_library(CppUnitLite STATIC ${cppunitlite_src} ${cppunitlite_headers}) list(APPEND GTSAM_EXPORTED_TARGETS CppUnitLite) set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE) +target_compile_features(CppUnitLite PUBLIC ${GTSAM_COMPILE_FEATURES_PUBLIC}) gtsam_assign_source_folders("${cppunitlite_headers};${cppunitlite_src}") # MSVC project structure diff --git a/cmake/Config.cmake.in b/cmake/Config.cmake.in index 36906d090..338ff8500 100644 --- a/cmake/Config.cmake.in +++ b/cmake/Config.cmake.in @@ -15,10 +15,12 @@ endif() # Find dependencies, required by cmake exported targets: include(CMakeFindDependencyMacro) # Allow using cmake < 3.8 -if(${CMAKE_VERSION} VERSION_LESS "3.8.0") -find_package(Boost @BOOST_FIND_MINIMUM_VERSION@ COMPONENTS @BOOST_FIND_MINIMUM_COMPONENTS@) -else() -find_dependency(Boost @BOOST_FIND_MINIMUM_VERSION@ COMPONENTS @BOOST_FIND_MINIMUM_COMPONENTS@) +if (@GTSAM_ENABLE_BOOST_SERIALIZATION@ OR @GTSAM_USE_BOOST_FEATURES@) + if(${CMAKE_VERSION} VERSION_LESS "3.8.0") + find_package(Boost @BOOST_FIND_MINIMUM_VERSION@ COMPONENTS @BOOST_FIND_MINIMUM_COMPONENTS@) + else() + find_dependency(Boost @BOOST_FIND_MINIMUM_VERSION@ COMPONENTS @BOOST_FIND_MINIMUM_COMPONENTS@) + endif() endif() if(@GTSAM_USE_TBB@) diff --git a/cmake/HandleBoost.cmake b/cmake/HandleBoost.cmake index 6c742cfe5..03251126e 100644 --- a/cmake/HandleBoost.cmake +++ b/cmake/HandleBoost.cmake @@ -25,7 +25,7 @@ endif() set(BOOST_FIND_MINIMUM_VERSION 1.65) set(BOOST_FIND_MINIMUM_COMPONENTS serialization system filesystem thread program_options date_time timer chrono regex) -find_package(Boost ${BOOST_FIND_MINIMUM_VERSION} COMPONENTS ${BOOST_FIND_MINIMUM_COMPONENTS}) +find_package(Boost ${BOOST_FIND_MINIMUM_VERSION} COMPONENTS ${BOOST_FIND_MINIMUM_COMPONENTS} REQUIRED) # Required components if(NOT Boost_SERIALIZATION_LIBRARY OR NOT Boost_SYSTEM_LIBRARY OR NOT Boost_FILESYSTEM_LIBRARY OR diff --git a/cmake/example_cmake_find_gtsam/CMakeLists.txt b/cmake/example_cmake_find_gtsam/CMakeLists.txt index d020f7032..05216d3f3 100644 --- a/cmake/example_cmake_find_gtsam/CMakeLists.txt +++ b/cmake/example_cmake_find_gtsam/CMakeLists.txt @@ -1,7 +1,7 @@ # This file shows how to build and link a user project against GTSAM using CMake ################################################################################### # To create your own project, replace "example" with the actual name of your project -cmake_minimum_required(VERSION 3.0) +cmake_minimum_required(VERSION 3.5) project(example CXX) # Find GTSAM, either from a local build, or from a Debian/Ubuntu package. diff --git a/doc/Hybrid.lyx b/doc/Hybrid.lyx index 44df81e39..a5fdd5d0a 100644 --- a/doc/Hybrid.lyx +++ b/doc/Hybrid.lyx @@ -191,17 +191,17 @@ E_{gc}(x,y)=\frac{1}{2}\|Rx+Sy-d\|_{\Sigma}^{2}.\label{eq:gc_error} \end_layout \begin_layout Subsubsection* -GaussianMixture +HybridGaussianConditional \end_layout \begin_layout Standard A \emph on -GaussianMixture +HybridGaussianConditional \emph default (maybe to be renamed to \emph on -GaussianMixtureComponent +HybridGaussianConditionalComponent \emph default ) just indexes into a number of \emph on @@ -233,7 +233,7 @@ GaussianConditional to a set of discrete variables. As \emph on -GaussianMixture +HybridGaussianConditional \emph default is a \emph on @@ -324,7 +324,7 @@ The key point here is that \color inherit is the log-normalization constant for the complete \emph on -GaussianMixture +HybridGaussianConditional \emph default across all values of \begin_inset Formula $m$ @@ -548,15 +548,15 @@ with \end_layout \begin_layout Subsubsection* -GaussianMixtureFactor +HybridGaussianFactor \end_layout \begin_layout Standard Analogously, a \emph on -GaussianMixtureFactor +HybridGaussianFactor \emph default - typically results from a GaussianMixture by having known values + typically results from a HybridGaussianConditional by having known values \begin_inset Formula $\bar{x}$ \end_inset @@ -817,7 +817,7 @@ E_{mf}(y,m)=\frac{1}{2}\|A_{m}y-b_{m}\|_{\Sigma_{mfm}}^{2}=E_{gcm}(\bar{x},y)+K_ \end_inset -which is identical to the GaussianMixture error +which is identical to the HybridGaussianConditional error \begin_inset CommandInset ref LatexCommand eqref reference "eq:gm_error" diff --git a/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/TriangularSolver.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/TriangularSolver.h index f9c56ba79..6b5fdb3e6 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/TriangularSolver.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/TriangularSolver.h @@ -270,11 +270,9 @@ struct sparse_solve_triangular_sparse_selector } - Index count = 0; // FIXME compute a reference value to filter zeros for (typename AmbiVector::Iterator it(tempVector/*,1e-12*/); it; ++it) { - ++ count; // std::cerr << "fill " << it.index() << ", " << col << "\n"; // std::cout << it.value() << " "; // FIXME use insertBack diff --git a/gtsam/3rdparty/Eigen/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h index 6f75d500e..7aecbcad8 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h @@ -75,8 +75,6 @@ void SparseLUImpl::heap_relax_snode (const Index n, IndexVe // Identify the relaxed supernodes by postorder traversal of the etree Index snode_start; // beginning of a snode StorageIndex k; - Index nsuper_et_post = 0; // Number of relaxed snodes in postordered etree - Index nsuper_et = 0; // Number of relaxed snodes in the original etree StorageIndex l; for (j = 0; j < n; ) { @@ -88,7 +86,6 @@ void SparseLUImpl::heap_relax_snode (const Index n, IndexVe parent = et(j); } // Found a supernode in postordered etree, j is the last column - ++nsuper_et_post; k = StorageIndex(n); for (Index i = snode_start; i <= j; ++i) k = (std::min)(k, inv_post(i)); @@ -97,7 +94,6 @@ void SparseLUImpl::heap_relax_snode (const Index n, IndexVe { // This is also a supernode in the original etree relax_end(k) = l; // Record last column - ++nsuper_et; } else { @@ -107,7 +103,6 @@ void SparseLUImpl::heap_relax_snode (const Index n, IndexVe if (descendants(i) == 0) { relax_end(l) = l; - ++nsuper_et; } } } diff --git a/gtsam/3rdparty/metis/GKlib/pdb.c b/gtsam/3rdparty/metis/GKlib/pdb.c index b4d222653..018846604 100644 --- a/gtsam/3rdparty/metis/GKlib/pdb.c +++ b/gtsam/3rdparty/metis/GKlib/pdb.c @@ -131,7 +131,7 @@ that structure. /************************************************************************/ pdbf *gk_readpdbfile(char *fname) { /* {{{ */ int i=0, res=0; - char linetype[6]; + char linetype[7]; int aserial; char aname[5] = " \0"; char altLoc = ' '; diff --git a/gtsam/discrete/AlgebraicDecisionTree.h b/gtsam/discrete/AlgebraicDecisionTree.h index 9f55f3b63..17385a975 100644 --- a/gtsam/discrete/AlgebraicDecisionTree.h +++ b/gtsam/discrete/AlgebraicDecisionTree.h @@ -196,6 +196,42 @@ namespace gtsam { return this->apply(g, &Ring::div); } + /// Compute sum of all values + double sum() const { + double sum = 0; + auto visitor = [&](double y) { sum += y; }; + this->visit(visitor); + return sum; + } + + /** + * @brief Helper method to perform normalization such that all leaves in the + * tree sum to 1 + * + * @param sum + * @return AlgebraicDecisionTree + */ + AlgebraicDecisionTree normalize(double sum) const { + return this->apply([&sum](const double& x) { return x / sum; }); + } + + /// Find the minimum values amongst all leaves + double min() const { + double min = std::numeric_limits::max(); + auto visitor = [&](double x) { min = x < min ? x : min; }; + this->visit(visitor); + return min; + } + + /// Find the maximum values amongst all leaves + double max() const { + // Get the most negative value + double max = -std::numeric_limits::max(); + auto visitor = [&](double x) { max = x > max ? x : max; }; + this->visit(visitor); + return max; + } + /** sum out variable */ AlgebraicDecisionTree sum(const L& label, size_t cardinality) const { return this->combine(label, cardinality, &Ring::add); diff --git a/gtsam/discrete/DecisionTree-inl.h b/gtsam/discrete/DecisionTree-inl.h index 06161c2e1..27e98fcde 100644 --- a/gtsam/discrete/DecisionTree-inl.h +++ b/gtsam/discrete/DecisionTree-inl.h @@ -91,7 +91,7 @@ namespace gtsam { void dot(std::ostream& os, const LabelFormatter& labelFormatter, const ValueFormatter& valueFormatter, bool showZero) const override { - std::string value = valueFormatter(constant_); + const std::string value = valueFormatter(constant_); if (showZero || value.compare("0")) os << "\"" << this->id() << "\" [label=\"" << value << "\", shape=box, rank=sink, height=0.35, fixedsize=true]\n"; @@ -306,7 +306,8 @@ namespace gtsam { void dot(std::ostream& os, const LabelFormatter& labelFormatter, const ValueFormatter& valueFormatter, bool showZero) const override { - os << "\"" << this->id() << "\" [shape=circle, label=\"" << label_ + const std::string label = labelFormatter(label_); + os << "\"" << this->id() << "\" [shape=circle, label=\"" << label << "\"]\n"; size_t B = branches_.size(); for (size_t i = 0; i < B; i++) { diff --git a/gtsam/discrete/DecisionTreeFactor.cpp b/gtsam/discrete/DecisionTreeFactor.cpp index c56818448..d1b68f4bf 100644 --- a/gtsam/discrete/DecisionTreeFactor.cpp +++ b/gtsam/discrete/DecisionTreeFactor.cpp @@ -147,14 +147,14 @@ namespace gtsam { size_t i; ADT result(*this); for (i = 0; i < nrFrontals; i++) { - Key j = keys()[i]; + Key j = keys_[i]; result = result.combine(j, cardinality(j), op); } - // create new factor, note we start keys after nrFrontals + // Create new factor, note we start with keys after nrFrontals: DiscreteKeys dkeys; - for (; i < keys().size(); i++) { - Key j = keys()[i]; + for (; i < keys_.size(); i++) { + Key j = keys_[i]; dkeys.push_back(DiscreteKey(j, cardinality(j))); } return std::make_shared(dkeys, result); @@ -179,24 +179,22 @@ namespace gtsam { result = result.combine(j, cardinality(j), op); } - // create new factor, note we collect keys that are not in frontalKeys /* - Due to branch merging, the labels in `result` may be missing some keys + Create new factor, note we collect keys that are not in frontalKeys. + + Due to branch merging, the labels in `result` may be missing some keys. E.g. After branch merging, we may get a ADT like: Leaf [2] 1.0204082 - This is missing the key values used for branching. + Hence, code below traverses the original keys and omits those in + frontalKeys. We loop over cardinalities, which is O(n) even for a map, and + then "contains" is a binary search on a small vector. */ - KeyVector difference, frontalKeys_(frontalKeys), keys_(keys()); - // Get the difference of the frontalKeys and the factor keys using set_difference - std::sort(keys_.begin(), keys_.end()); - std::sort(frontalKeys_.begin(), frontalKeys_.end()); - std::set_difference(keys_.begin(), keys_.end(), frontalKeys_.begin(), - frontalKeys_.end(), back_inserter(difference)); - DiscreteKeys dkeys; - for (Key key : difference) { - dkeys.push_back(DiscreteKey(key, cardinality(key))); + for (auto&& [key, cardinality] : cardinalities_) { + if (!frontalKeys.contains(key)) { + dkeys.push_back(DiscreteKey(key, cardinality)); + } } return std::make_shared(dkeys, result); } diff --git a/gtsam/discrete/DiscreteBayesNet.cpp b/gtsam/discrete/DiscreteBayesNet.cpp index c1aa18828..56265b0a4 100644 --- a/gtsam/discrete/DiscreteBayesNet.cpp +++ b/gtsam/discrete/DiscreteBayesNet.cpp @@ -18,8 +18,6 @@ #include #include -#include -#include #include namespace gtsam { @@ -60,7 +58,12 @@ DiscreteValues DiscreteBayesNet::sample(DiscreteValues result) const { // sample each node in turn in topological sort order (parents first) for (auto it = std::make_reverse_iterator(end()); it != std::make_reverse_iterator(begin()); ++it) { - (*it)->sampleInPlace(&result); + const DiscreteConditional::shared_ptr& conditional = *it; + // Sample the conditional only if value for j not already in result + const Key j = conditional->firstFrontalKey(); + if (result.count(j) == 0) { + conditional->sampleInPlace(&result); + } } return result; } diff --git a/gtsam/discrete/DiscreteConditional.cpp b/gtsam/discrete/DiscreteConditional.cpp index 9cf19638c..5ab0c59ec 100644 --- a/gtsam/discrete/DiscreteConditional.cpp +++ b/gtsam/discrete/DiscreteConditional.cpp @@ -259,8 +259,18 @@ size_t DiscreteConditional::argmax(const DiscreteValues& parentsValues) const { /* ************************************************************************** */ void DiscreteConditional::sampleInPlace(DiscreteValues* values) const { - assert(nrFrontals() == 1); - Key j = (firstFrontalKey()); + // throw if more than one frontal: + if (nrFrontals() != 1) { + throw std::invalid_argument( + "DiscreteConditional::sampleInPlace can only be called on single " + "variable conditionals"); + } + Key j = firstFrontalKey(); + // throw if values already contains j: + if (values->count(j) > 0) { + throw std::invalid_argument( + "DiscreteConditional::sampleInPlace: values already contains j"); + } size_t sampled = sample(*values); // Sample variable given parents (*values)[j] = sampled; // store result in partial solution } @@ -465,6 +475,10 @@ string DiscreteConditional::html(const KeyFormatter& keyFormatter, double DiscreteConditional::evaluate(const HybridValues& x) const { return this->evaluate(x.discrete()); } + +/* ************************************************************************* */ +double DiscreteConditional::negLogConstant() const { return 0.0; } + /* ************************************************************************* */ } // namespace gtsam diff --git a/gtsam/discrete/DiscreteConditional.h b/gtsam/discrete/DiscreteConditional.h index 4c7af1489..f59e29285 100644 --- a/gtsam/discrete/DiscreteConditional.h +++ b/gtsam/discrete/DiscreteConditional.h @@ -168,7 +168,7 @@ class GTSAM_EXPORT DiscreteConditional static_cast(this)->print(s, formatter); } - /// Evaluate, just look up in AlgebraicDecisonTree + /// Evaluate, just look up in AlgebraicDecisionTree double evaluate(const DiscreteValues& values) const { return ADT::operator()(values); } @@ -264,11 +264,12 @@ class GTSAM_EXPORT DiscreteConditional } /** - * logNormalizationConstant K is just zero, such that - * logProbability(x) = log(evaluate(x)) = - error(x) - * and hence error(x) = - log(evaluate(x)) > 0 for all x. + * negLogConstant is just zero, such that + * -logProbability(x) = -log(evaluate(x)) = error(x) + * and hence error(x) > 0 for all x. + * Thus -log(K) for the normalization constant k is 0. */ - double logNormalizationConstant() const override { return 0.0; } + double negLogConstant() const override; /// @} diff --git a/gtsam/discrete/discrete.i b/gtsam/discrete/discrete.i index 0bdebd0e1..55c8f9e22 100644 --- a/gtsam/discrete/discrete.i +++ b/gtsam/discrete/discrete.i @@ -112,7 +112,7 @@ virtual class DiscreteConditional : gtsam::DecisionTreeFactor { const std::vector& table); // Standard interface - double logNormalizationConstant() const; + double negLogConstant() const; double logProbability(const gtsam::DiscreteValues& values) const; double evaluate(const gtsam::DiscreteValues& values) const; double error(const gtsam::DiscreteValues& values) const; diff --git a/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp b/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp index 19f4686c2..ffb1f0b5a 100644 --- a/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp +++ b/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp @@ -20,12 +20,9 @@ #include // make sure we have traits #include // headers first to make sure no missing headers +#include #include #include // for convert only -#define DISABLE_TIMING - -#include -#include #include using namespace std; @@ -71,16 +68,14 @@ void dot(const T& f, const string& filename) { // instrumented operators /* ************************************************************************** */ size_t muls = 0, adds = 0; -double elapsed; void resetCounts() { muls = 0; adds = 0; } void printCounts(const string& s) { #ifndef DISABLE_TIMING -cout << s << ": " << std::setw(3) << muls << " muls, " << - std::setw(3) << adds << " adds, " << 1000 * elapsed << " ms." - << endl; + cout << s << ": " << std::setw(3) << muls << " muls, " << std::setw(3) << adds + << " adds" << endl; #endif resetCounts(); } @@ -131,37 +126,35 @@ ADT create(const Signature& signature) { static size_t count = 0; const DiscreteKey& key = signature.key(); std::stringstream ss; - ss << "CPT-" << std::setw(3) << std::setfill('0') << ++count << "-" << key.first; + ss << "CPT-" << std::setw(3) << std::setfill('0') << ++count << "-" + << key.first; string DOTfile = ss.str(); dot(p, DOTfile); return p; } +/* ************************************************************************* */ +namespace asiaCPTs { +DiscreteKey A(0, 2), S(1, 2), T(2, 2), L(3, 2), B(4, 2), E(5, 2), X(6, 2), + D(7, 2); + +ADT pA = create(A % "99/1"); +ADT pS = create(S % "50/50"); +ADT pT = create(T | A = "99/1 95/5"); +ADT pL = create(L | S = "99/1 90/10"); +ADT pB = create(B | S = "70/30 40/60"); +ADT pE = create((E | T, L) = "F T T T"); +ADT pX = create(X | E = "95/5 2/98"); +ADT pD = create((D | E, B) = "9/1 2/8 3/7 1/9"); +} // namespace asiaCPTs + /* ************************************************************************* */ // test Asia Joint TEST(ADT, joint) { - DiscreteKey A(0, 2), S(1, 2), T(2, 2), L(3, 2), B(4, 2), E(5, 2), X(6, 2), - D(7, 2); - - resetCounts(); - gttic_(asiaCPTs); - ADT pA = create(A % "99/1"); - ADT pS = create(S % "50/50"); - ADT pT = create(T | A = "99/1 95/5"); - ADT pL = create(L | S = "99/1 90/10"); - ADT pB = create(B | S = "70/30 40/60"); - ADT pE = create((E | T, L) = "F T T T"); - ADT pX = create(X | E = "95/5 2/98"); - ADT pD = create((D | E, B) = "9/1 2/8 3/7 1/9"); - gttoc_(asiaCPTs); - tictoc_getNode(asiaCPTsNode, asiaCPTs); - elapsed = asiaCPTsNode->secs() + asiaCPTsNode->wall(); - tictoc_reset_(); - printCounts("Asia CPTs"); + using namespace asiaCPTs; // Create joint resetCounts(); - gttic_(asiaJoint); ADT joint = pA; dot(joint, "Asia-A"); joint = apply(joint, pS, &mul); @@ -183,11 +176,12 @@ TEST(ADT, joint) { #else EXPECT_LONGS_EQUAL(508, muls); #endif - gttoc_(asiaJoint); - tictoc_getNode(asiaJointNode, asiaJoint); - elapsed = asiaJointNode->secs() + asiaJointNode->wall(); - tictoc_reset_(); printCounts("Asia joint"); +} + +/* ************************************************************************* */ +TEST(ADT, combine) { + using namespace asiaCPTs; // Form P(A,S,T,L) = P(A) P(S) P(T|A) P(L|S) ADT pASTL = pA; @@ -203,13 +197,11 @@ TEST(ADT, joint) { } /* ************************************************************************* */ -// test Inference with joint +// test Inference with joint, created using different ordering TEST(ADT, inference) { DiscreteKey A(0, 2), D(1, 2), // B(2, 2), L(3, 2), E(4, 2), S(5, 2), T(6, 2), X(7, 2); - resetCounts(); - gttic_(infCPTs); ADT pA = create(A % "99/1"); ADT pS = create(S % "50/50"); ADT pT = create(T | A = "99/1 95/5"); @@ -218,15 +210,9 @@ TEST(ADT, inference) { ADT pE = create((E | T, L) = "F T T T"); ADT pX = create(X | E = "95/5 2/98"); ADT pD = create((D | E, B) = "9/1 2/8 3/7 1/9"); - gttoc_(infCPTs); - tictoc_getNode(infCPTsNode, infCPTs); - elapsed = infCPTsNode->secs() + infCPTsNode->wall(); - tictoc_reset_(); - // printCounts("Inference CPTs"); - // Create joint + // Create joint, note different ordering than above: different tree! resetCounts(); - gttic_(asiaProd); ADT joint = pA; dot(joint, "Joint-Product-A"); joint = apply(joint, pS, &mul); @@ -248,14 +234,9 @@ TEST(ADT, inference) { #else EXPECT_LONGS_EQUAL(508, (long)muls); // different ordering #endif - gttoc_(asiaProd); - tictoc_getNode(asiaProdNode, asiaProd); - elapsed = asiaProdNode->secs() + asiaProdNode->wall(); - tictoc_reset_(); printCounts("Asia product"); resetCounts(); - gttic_(asiaSum); ADT marginal = joint; marginal = marginal.combine(X, &add_); dot(marginal, "Joint-Sum-ADBLEST"); @@ -270,10 +251,6 @@ TEST(ADT, inference) { #else EXPECT_LONGS_EQUAL(240, (long)adds); #endif - gttoc_(asiaSum); - tictoc_getNode(asiaSumNode, asiaSum); - elapsed = asiaSumNode->secs() + asiaSumNode->wall(); - tictoc_reset_(); printCounts("Asia sum"); } @@ -281,8 +258,6 @@ TEST(ADT, inference) { TEST(ADT, factor_graph) { DiscreteKey B(0, 2), L(1, 2), E(2, 2), S(3, 2), T(4, 2), X(5, 2); - resetCounts(); - gttic_(createCPTs); ADT pS = create(S % "50/50"); ADT pT = create(T % "95/5"); ADT pL = create(L | S = "99/1 90/10"); @@ -290,15 +265,9 @@ TEST(ADT, factor_graph) { ADT pX = create(X | E = "95/5 2/98"); ADT pD = create(B | E = "1/8 7/9"); ADT pB = create(B | S = "70/30 40/60"); - gttoc_(createCPTs); - tictoc_getNode(createCPTsNode, createCPTs); - elapsed = createCPTsNode->secs() + createCPTsNode->wall(); - tictoc_reset_(); - // printCounts("Create CPTs"); // Create joint resetCounts(); - gttic_(asiaFG); ADT fg = pS; fg = apply(fg, pT, &mul); fg = apply(fg, pL, &mul); @@ -312,14 +281,9 @@ TEST(ADT, factor_graph) { #else EXPECT_LONGS_EQUAL(188, (long)muls); #endif - gttoc_(asiaFG); - tictoc_getNode(asiaFGNode, asiaFG); - elapsed = asiaFGNode->secs() + asiaFGNode->wall(); - tictoc_reset_(); printCounts("Asia FG"); resetCounts(); - gttic_(marg); fg = fg.combine(X, &add_); dot(fg, "Marginalized-6X"); fg = fg.combine(T, &add_); @@ -335,83 +299,54 @@ TEST(ADT, factor_graph) { #else LONGS_EQUAL(62, adds); #endif - gttoc_(marg); - tictoc_getNode(margNode, marg); - elapsed = margNode->secs() + margNode->wall(); - tictoc_reset_(); printCounts("marginalize"); // BLESTX // Eliminate X resetCounts(); - gttic_(elimX); ADT fE = pX; dot(fE, "Eliminate-01-fEX"); fE = fE.combine(X, &add_); dot(fE, "Eliminate-02-fE"); - gttoc_(elimX); - tictoc_getNode(elimXNode, elimX); - elapsed = elimXNode->secs() + elimXNode->wall(); - tictoc_reset_(); printCounts("Eliminate X"); // Eliminate T resetCounts(); - gttic_(elimT); ADT fLE = pT; fLE = apply(fLE, pE, &mul); dot(fLE, "Eliminate-03-fLET"); fLE = fLE.combine(T, &add_); dot(fLE, "Eliminate-04-fLE"); - gttoc_(elimT); - tictoc_getNode(elimTNode, elimT); - elapsed = elimTNode->secs() + elimTNode->wall(); - tictoc_reset_(); printCounts("Eliminate T"); // Eliminate S resetCounts(); - gttic_(elimS); ADT fBL = pS; fBL = apply(fBL, pL, &mul); fBL = apply(fBL, pB, &mul); dot(fBL, "Eliminate-05-fBLS"); fBL = fBL.combine(S, &add_); dot(fBL, "Eliminate-06-fBL"); - gttoc_(elimS); - tictoc_getNode(elimSNode, elimS); - elapsed = elimSNode->secs() + elimSNode->wall(); - tictoc_reset_(); printCounts("Eliminate S"); // Eliminate E resetCounts(); - gttic_(elimE); ADT fBL2 = fE; fBL2 = apply(fBL2, fLE, &mul); fBL2 = apply(fBL2, pD, &mul); dot(fBL2, "Eliminate-07-fBLE"); fBL2 = fBL2.combine(E, &add_); dot(fBL2, "Eliminate-08-fBL2"); - gttoc_(elimE); - tictoc_getNode(elimENode, elimE); - elapsed = elimENode->secs() + elimENode->wall(); - tictoc_reset_(); printCounts("Eliminate E"); // Eliminate L resetCounts(); - gttic_(elimL); ADT fB = fBL; fB = apply(fB, fBL2, &mul); dot(fB, "Eliminate-09-fBL"); fB = fB.combine(L, &add_); dot(fB, "Eliminate-10-fB"); - gttoc_(elimL); - tictoc_getNode(elimLNode, elimL); - elapsed = elimLNode->secs() + elimLNode->wall(); - tictoc_reset_(); printCounts("Eliminate L"); } @@ -593,6 +528,55 @@ TEST(ADT, zero) { EXPECT_DOUBLES_EQUAL(0, anotb(x11), 1e-9); } +/// Example ADT from 0 to 11. +ADT exampleADT() { + DiscreteKey A(0, 2), B(1, 3), C(2, 2); + ADT f(A & B & C, "0 6 2 8 4 10 1 7 3 9 5 11"); + return f; +} +/* ************************************************************************** */ +// Test sum +TEST(ADT, Sum) { + ADT a = exampleADT(); + double expected_sum = 0; + for (double i = 0; i < 12; i++) { + expected_sum += i; + } + EXPECT_DOUBLES_EQUAL(expected_sum, a.sum(), 1e-9); +} + +/* ************************************************************************** */ +// Test normalize +TEST(ADT, Normalize) { + ADT a = exampleADT(); + double sum = a.sum(); + auto actual = a.normalize(sum); + + DiscreteKey A(0, 2), B(1, 3), C(2, 2); + DiscreteKeys keys = DiscreteKeys{A, B, C}; + std::vector cpt{0 / sum, 6 / sum, 2 / sum, 8 / sum, + 4 / sum, 10 / sum, 1 / sum, 7 / sum, + 3 / sum, 9 / sum, 5 / sum, 11 / sum}; + ADT expected(keys, cpt); + EXPECT(assert_equal(expected, actual)); +} + +/* ************************************************************************** */ +// Test min +TEST(ADT, Min) { + ADT a = exampleADT(); + double min = a.min(); + EXPECT_DOUBLES_EQUAL(0.0, min, 1e-9); +} + +/* ************************************************************************** */ +// Test max +TEST(ADT, Max) { + ADT a = exampleADT(); + double max = a.max(); + EXPECT_DOUBLES_EQUAL(11.0, max, 1e-9); +} + /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam/discrete/tests/testDecisionTreeFactor.cpp b/gtsam/discrete/tests/testDecisionTreeFactor.cpp index d764da7bf..a41d06c2b 100644 --- a/gtsam/discrete/tests/testDecisionTreeFactor.cpp +++ b/gtsam/discrete/tests/testDecisionTreeFactor.cpp @@ -22,7 +22,10 @@ #include #include #include +#include #include +#include +#include using namespace std; using namespace gtsam; @@ -33,25 +36,24 @@ TEST(DecisionTreeFactor, ConstructorsMatch) { DiscreteKey X(0, 2), Y(1, 3); // Create with vector and with string - const std::vector table {2, 5, 3, 6, 4, 7}; + const std::vector table{2, 5, 3, 6, 4, 7}; DecisionTreeFactor f1({X, Y}, table); DecisionTreeFactor f2({X, Y}, "2 5 3 6 4 7"); EXPECT(assert_equal(f1, f2)); } /* ************************************************************************* */ -TEST( DecisionTreeFactor, constructors) -{ +TEST(DecisionTreeFactor, constructors) { // Declare a bunch of keys - DiscreteKey X(0,2), Y(1,3), Z(2,2); + DiscreteKey X(0, 2), Y(1, 3), Z(2, 2); // Create factors DecisionTreeFactor f1(X, {2, 8}); DecisionTreeFactor f2(X & Y, "2 5 3 6 4 7"); DecisionTreeFactor f3(X & Y & Z, "2 5 3 6 4 7 25 55 35 65 45 75"); - EXPECT_LONGS_EQUAL(1,f1.size()); - EXPECT_LONGS_EQUAL(2,f2.size()); - EXPECT_LONGS_EQUAL(3,f3.size()); + EXPECT_LONGS_EQUAL(1, f1.size()); + EXPECT_LONGS_EQUAL(2, f2.size()); + EXPECT_LONGS_EQUAL(3, f3.size()); DiscreteValues x121{{0, 1}, {1, 2}, {2, 1}}; EXPECT_DOUBLES_EQUAL(8, f1(x121), 1e-9); @@ -70,7 +72,7 @@ TEST( DecisionTreeFactor, constructors) /* ************************************************************************* */ TEST(DecisionTreeFactor, Error) { // Declare a bunch of keys - DiscreteKey X(0,2), Y(1,3), Z(2,2); + DiscreteKey X(0, 2), Y(1, 3), Z(2, 2); // Create factors DecisionTreeFactor f(X & Y & Z, "2 5 3 6 4 7 25 55 35 65 45 75"); @@ -104,9 +106,8 @@ TEST(DecisionTreeFactor, multiplication) { } /* ************************************************************************* */ -TEST( DecisionTreeFactor, sum_max) -{ - DiscreteKey v0(0,3), v1(1,2); +TEST(DecisionTreeFactor, sum_max) { + DiscreteKey v0(0, 3), v1(1, 2); DecisionTreeFactor f1(v0 & v1, "1 2 3 4 5 6"); DecisionTreeFactor expected(v1, "9 12"); @@ -165,22 +166,85 @@ TEST(DecisionTreeFactor, Prune) { "0.0 0.0 0.0 0.60658897 0.61241912 0.61241969 0.61247685 0.61247742 0.0 " "0.0 0.0 0.99995287 1.0 1.0 1.0 1.0"); - DecisionTreeFactor expected3( - D & C & B & A, - "0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 " - "0.999952870000 1.0 1.0 1.0 1.0"); + DecisionTreeFactor expected3(D & C & B & A, + "0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 " + "0.999952870000 1.0 1.0 1.0 1.0"); maxNrAssignments = 5; auto pruned3 = factor.prune(maxNrAssignments); EXPECT(assert_equal(expected3, pruned3)); } +/* ************************************************************************** */ +// Asia Bayes Network +/* ************************************************************************** */ + +#define DISABLE_DOT + +void maybeSaveDotFile(const DecisionTreeFactor& f, const string& filename) { +#ifndef DISABLE_DOT + std::vector names = {"A", "S", "T", "L", "B", "E", "X", "D"}; + auto formatter = [&](Key key) { return names[key]; }; + f.dot(filename, formatter, true); +#endif +} + +/** Convert Signature into CPT */ +DecisionTreeFactor create(const Signature& signature) { + DecisionTreeFactor p(signature.discreteKeys(), signature.cpt()); + return p; +} + +/* ************************************************************************* */ +// test Asia Joint +TEST(DecisionTreeFactor, joint) { + DiscreteKey A(0, 2), S(1, 2), T(2, 2), L(3, 2), B(4, 2), E(5, 2), X(6, 2), + D(7, 2); + + gttic_(asiaCPTs); + DecisionTreeFactor pA = create(A % "99/1"); + DecisionTreeFactor pS = create(S % "50/50"); + DecisionTreeFactor pT = create(T | A = "99/1 95/5"); + DecisionTreeFactor pL = create(L | S = "99/1 90/10"); + DecisionTreeFactor pB = create(B | S = "70/30 40/60"); + DecisionTreeFactor pE = create((E | T, L) = "F T T T"); + DecisionTreeFactor pX = create(X | E = "95/5 2/98"); + DecisionTreeFactor pD = create((D | E, B) = "9/1 2/8 3/7 1/9"); + + // Create joint + gttic_(asiaJoint); + DecisionTreeFactor joint = pA; + maybeSaveDotFile(joint, "Asia-A"); + joint = joint * pS; + maybeSaveDotFile(joint, "Asia-AS"); + joint = joint * pT; + maybeSaveDotFile(joint, "Asia-AST"); + joint = joint * pL; + maybeSaveDotFile(joint, "Asia-ASTL"); + joint = joint * pB; + maybeSaveDotFile(joint, "Asia-ASTLB"); + joint = joint * pE; + maybeSaveDotFile(joint, "Asia-ASTLBE"); + joint = joint * pX; + maybeSaveDotFile(joint, "Asia-ASTLBEX"); + joint = joint * pD; + maybeSaveDotFile(joint, "Asia-ASTLBEXD"); + + // Check that discrete keys are as expected + EXPECT(assert_equal(joint.discreteKeys(), {A, S, T, L, B, E, X, D})); + + // Check that summing out variables maintains the keys even if merged, as is + // the case with S. + auto noAB = joint.sum(Ordering{A.first, B.first}); + EXPECT(assert_equal(noAB->discreteKeys(), {S, T, L, E, X, D})); +} + /* ************************************************************************* */ TEST(DecisionTreeFactor, DotWithNames) { DiscreteKey A(12, 3), B(5, 2); DecisionTreeFactor f(A & B, "1 2 3 4 5 6"); auto formatter = [](Key key) { return key == 12 ? "A" : "B"; }; - for (bool showZero:{true, false}) { + for (bool showZero : {true, false}) { string actual = f.dot(formatter, showZero); // pretty weak test, as ids are pointers and not stable across platforms. string expected = "digraph G {"; diff --git a/gtsam/discrete/tests/testDiscreteConditional.cpp b/gtsam/discrete/tests/testDiscreteConditional.cpp index 172dd0fa1..2482a86a2 100644 --- a/gtsam/discrete/tests/testDiscreteConditional.cpp +++ b/gtsam/discrete/tests/testDiscreteConditional.cpp @@ -292,7 +292,7 @@ TEST(DiscreteConditional, choose) { /* ************************************************************************* */ // Check argmax on P(C|D) and P(D), plus tie-breaking for P(B) TEST(DiscreteConditional, Argmax) { - DiscreteKey B(2, 2), C(2, 2), D(4, 2); + DiscreteKey C(2, 2), D(4, 2); DiscreteConditional B_prior(D, "1/1"); DiscreteConditional D_prior(D, "1/3"); DiscreteConditional C_given_D((C | D) = "1/4 1/1"); diff --git a/gtsam/geometry/Rot2.h b/gtsam/geometry/Rot2.h index 63b914434..6bb97ff6c 100644 --- a/gtsam/geometry/Rot2.h +++ b/gtsam/geometry/Rot2.h @@ -68,7 +68,7 @@ namespace gtsam { return fromAngle(theta * degree); } - /// Named constructor from cos(theta),sin(theta) pair, will *not* normalize! + /// Named constructor from cos(theta),sin(theta) pair static Rot2 fromCosSin(double c, double s); /** diff --git a/gtsam/geometry/geometry.i b/gtsam/geometry/geometry.i index 3d816fc25..095a350dd 100644 --- a/gtsam/geometry/geometry.i +++ b/gtsam/geometry/geometry.i @@ -856,7 +856,7 @@ class Cal3_S2Stereo { gtsam::Matrix K() const; gtsam::Point2 principalPoint() const; double baseline() const; - Vector6 vector() const; + gtsam::Vector6 vector() const; gtsam::Matrix inverse() const; }; diff --git a/gtsam/geometry/tests/testPose3.cpp b/gtsam/geometry/tests/testPose3.cpp index 93cf99972..c9851f761 100644 --- a/gtsam/geometry/tests/testPose3.cpp +++ b/gtsam/geometry/tests/testPose3.cpp @@ -1207,6 +1207,31 @@ TEST(Pose3, Print) { EXPECT(assert_print_equal(expected, pose)); } +/* ************************************************************************* */ +TEST(Pose3, ExpmapChainRule) { + // Muliply with an arbitrary matrix and exponentiate + Matrix6 M; + M << 1, 2, 3, 4, 5, 6, // + 7, 8, 9, 1, 2, 3, // + 4, 5, 6, 7, 8, 9, // + 1, 2, 3, 4, 5, 6, // + 7, 8, 9, 1, 2, 3, // + 4, 5, 6, 7, 8, 9; + auto g = [&](const Vector6& omega) { + return Pose3::Expmap(M*omega); + }; + + // Test the derivatives at zero + const Matrix6 expected = numericalDerivative11(g, Z_6x1); + EXPECT(assert_equal(expected, M, 1e-5)); // Pose3::ExpmapDerivative(Z_6x1) is identity + + // Test the derivatives at another value + const Vector6 delta{0.1, 0.2, 0.3, 0.4, 0.5, 0.6}; + const Matrix6 expected2 = numericalDerivative11(g, delta); + const Matrix6 analytic = Pose3::ExpmapDerivative(M*delta) * M; + EXPECT(assert_equal(expected2, analytic, 1e-5)); // note tolerance +} + /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam/geometry/tests/testRot3.cpp b/gtsam/geometry/tests/testRot3.cpp index 1232348f0..ce056edb6 100644 --- a/gtsam/geometry/tests/testRot3.cpp +++ b/gtsam/geometry/tests/testRot3.cpp @@ -956,6 +956,46 @@ TEST(Rot3, determinant) { } } +/* ************************************************************************* */ +TEST(Rot3, ExpmapChainRule) { + // Multiply with an arbitrary matrix and exponentiate + Matrix3 M; + M << 1, 2, 3, 4, 5, 6, 7, 8, 9; + auto g = [&](const Vector3& omega) { + return Rot3::Expmap(M*omega); + }; + + // Test the derivatives at zero + const Matrix3 expected = numericalDerivative11(g, Z_3x1); + EXPECT(assert_equal(expected, M, 1e-5)); // SO3::ExpmapDerivative(Z_3x1) is identity + + // Test the derivatives at another value + const Vector3 delta{0.1,0.2,0.3}; + const Matrix3 expected2 = numericalDerivative11(g, delta); + EXPECT(assert_equal(expected2, SO3::ExpmapDerivative(M*delta) * M, 1e-5)); +} + +/* ************************************************************************* */ +TEST(Rot3, expmapChainRule) { + // Multiply an arbitrary rotation with exp(M*x) + // Perhaps counter-intuitively, this has the same derivatives as above + Matrix3 M; + M << 1, 2, 3, 4, 5, 6, 7, 8, 9; + const Rot3 R = Rot3::Expmap({1, 2, 3}); + auto g = [&](const Vector3& omega) { + return R.expmap(M*omega); + }; + + // Test the derivatives at zero + const Matrix3 expected = numericalDerivative11(g, Z_3x1); + EXPECT(assert_equal(expected, M, 1e-5)); + + // Test the derivatives at another value + const Vector3 delta{0.1,0.2,0.3}; + const Matrix3 expected2 = numericalDerivative11(g, delta); + EXPECT(assert_equal(expected2, SO3::ExpmapDerivative(M*delta) * M, 1e-5)); +} + /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam/hybrid/GaussianMixtureFactor.cpp b/gtsam/hybrid/GaussianMixtureFactor.cpp deleted file mode 100644 index a3db16d04..000000000 --- a/gtsam/hybrid/GaussianMixtureFactor.cpp +++ /dev/null @@ -1,122 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file GaussianMixtureFactor.cpp - * @brief A set of Gaussian factors indexed by a set of discrete keys. - * @author Fan Jiang - * @author Varun Agrawal - * @author Frank Dellaert - * @date Mar 12, 2022 - */ - -#include -#include -#include -#include -#include -#include -#include - -namespace gtsam { - -/* *******************************************************************************/ -GaussianMixtureFactor::GaussianMixtureFactor(const KeyVector &continuousKeys, - const DiscreteKeys &discreteKeys, - const Factors &factors) - : Base(continuousKeys, discreteKeys), factors_(factors) {} - -/* *******************************************************************************/ -bool GaussianMixtureFactor::equals(const HybridFactor &lf, double tol) const { - const This *e = dynamic_cast(&lf); - if (e == nullptr) return false; - - // This will return false if either factors_ is empty or e->factors_ is empty, - // but not if both are empty or both are not empty: - if (factors_.empty() ^ e->factors_.empty()) return false; - - // Check the base and the factors: - return Base::equals(*e, tol) && - factors_.equals(e->factors_, - [tol](const sharedFactor &f1, const sharedFactor &f2) { - return f1->equals(*f2, tol); - }); -} - -/* *******************************************************************************/ -void GaussianMixtureFactor::print(const std::string &s, - const KeyFormatter &formatter) const { - HybridFactor::print(s, formatter); - std::cout << "{\n"; - if (factors_.empty()) { - std::cout << " empty" << std::endl; - } else { - factors_.print( - "", [&](Key k) { return formatter(k); }, - [&](const sharedFactor &gf) -> std::string { - RedirectCout rd; - std::cout << ":\n"; - if (gf && !gf->empty()) { - gf->print("", formatter); - return rd.str(); - } else { - return "nullptr"; - } - }); - } - std::cout << "}" << std::endl; -} - -/* *******************************************************************************/ -GaussianMixtureFactor::sharedFactor GaussianMixtureFactor::operator()( - const DiscreteValues &assignment) const { - return factors_(assignment); -} - -/* *******************************************************************************/ -GaussianFactorGraphTree GaussianMixtureFactor::add( - const GaussianFactorGraphTree &sum) const { - using Y = GaussianFactorGraph; - auto add = [](const Y &graph1, const Y &graph2) { - auto result = graph1; - result.push_back(graph2); - return result; - }; - const auto tree = asGaussianFactorGraphTree(); - return sum.empty() ? tree : sum.apply(tree, add); -} - -/* *******************************************************************************/ -GaussianFactorGraphTree GaussianMixtureFactor::asGaussianFactorGraphTree() - const { - auto wrap = [](const sharedFactor &gf) { return GaussianFactorGraph{gf}; }; - return {factors_, wrap}; -} - -/* *******************************************************************************/ -AlgebraicDecisionTree GaussianMixtureFactor::errorTree( - const VectorValues &continuousValues) const { - // functor to convert from sharedFactor to double error value. - auto errorFunc = [&continuousValues](const sharedFactor &gf) { - return gf->error(continuousValues); - }; - DecisionTree error_tree(factors_, errorFunc); - return error_tree; -} - -/* *******************************************************************************/ -double GaussianMixtureFactor::error(const HybridValues &values) const { - const sharedFactor gf = factors_(values.discrete()); - return gf->error(values.continuous()); -} -/* *******************************************************************************/ - -} // namespace gtsam diff --git a/gtsam/hybrid/GaussianMixtureFactor.h b/gtsam/hybrid/GaussianMixtureFactor.h deleted file mode 100644 index 63ca9e923..000000000 --- a/gtsam/hybrid/GaussianMixtureFactor.h +++ /dev/null @@ -1,174 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file GaussianMixtureFactor.h - * @brief A set of GaussianFactors, indexed by a set of discrete keys. - * @author Fan Jiang - * @author Varun Agrawal - * @author Frank Dellaert - * @date Mar 12, 2022 - */ - -#pragma once - -#include -#include -#include -#include -#include -#include - -namespace gtsam { - -class HybridValues; -class DiscreteValues; -class VectorValues; - -/** - * @brief Implementation of a discrete conditional mixture factor. - * Implements a joint discrete-continuous factor where the discrete variable - * serves to "select" a mixture component corresponding to a GaussianFactor type - * of measurement. - * - * Represents the underlying Gaussian mixture as a Decision Tree, where the set - * of discrete variables indexes to the continuous gaussian distribution. - * - * @ingroup hybrid - */ -class GTSAM_EXPORT GaussianMixtureFactor : public HybridFactor { - public: - using Base = HybridFactor; - using This = GaussianMixtureFactor; - using shared_ptr = std::shared_ptr; - - using sharedFactor = std::shared_ptr; - - /// typedef for Decision Tree of Gaussian factors and log-constant. - using Factors = DecisionTree; - - private: - /// Decision tree of Gaussian factors indexed by discrete keys. - Factors factors_; - - /** - * @brief Helper function to return factors and functional to create a - * DecisionTree of Gaussian Factor Graphs. - * - * @return GaussianFactorGraphTree - */ - GaussianFactorGraphTree asGaussianFactorGraphTree() const; - - public: - /// @name Constructors - /// @{ - - /// Default constructor, mainly for serialization. - GaussianMixtureFactor() = default; - - /** - * @brief Construct a new Gaussian mixture factor. - * - * @param continuousKeys A vector of keys representing continuous variables. - * @param discreteKeys A vector of keys representing discrete variables and - * their cardinalities. - * @param factors The decision tree of Gaussian factors stored as the mixture - * density. - */ - GaussianMixtureFactor(const KeyVector &continuousKeys, - const DiscreteKeys &discreteKeys, - const Factors &factors); - - /** - * @brief Construct a new GaussianMixtureFactor object using a vector of - * GaussianFactor shared pointers. - * - * @param continuousKeys Vector of keys for continuous factors. - * @param discreteKeys Vector of discrete keys. - * @param factors Vector of gaussian factor shared pointers. - */ - GaussianMixtureFactor(const KeyVector &continuousKeys, - const DiscreteKeys &discreteKeys, - const std::vector &factors) - : GaussianMixtureFactor(continuousKeys, discreteKeys, - Factors(discreteKeys, factors)) {} - - /// @} - /// @name Testable - /// @{ - - bool equals(const HybridFactor &lf, double tol = 1e-9) const override; - - void print( - const std::string &s = "GaussianMixtureFactor\n", - const KeyFormatter &formatter = DefaultKeyFormatter) const override; - - /// @} - /// @name Standard API - /// @{ - - /// Get factor at a given discrete assignment. - sharedFactor operator()(const DiscreteValues &assignment) const; - - /** - * @brief Combine the Gaussian Factor Graphs in `sum` and `this` while - * maintaining the original tree structure. - * - * @param sum Decision Tree of Gaussian Factor Graphs indexed by the - * variables. - * @return Sum - */ - GaussianFactorGraphTree add(const GaussianFactorGraphTree &sum) const; - - /** - * @brief Compute error of the GaussianMixtureFactor as a tree. - * - * @param continuousValues The continuous VectorValues. - * @return AlgebraicDecisionTree A decision tree with the same keys - * as the factors involved, and leaf values as the error. - */ - AlgebraicDecisionTree errorTree(const VectorValues &continuousValues) const; - - /** - * @brief Compute the log-likelihood, including the log-normalizing constant. - * @return double - */ - double error(const HybridValues &values) const override; - - /// Getter for GaussianFactor decision tree - const Factors &factors() const { return factors_; } - - /// Add MixtureFactor to a Sum, syntactic sugar. - friend GaussianFactorGraphTree &operator+=( - GaussianFactorGraphTree &sum, const GaussianMixtureFactor &factor) { - sum = factor.add(sum); - return sum; - } - /// @} - - private: -#ifdef GTSAM_ENABLE_BOOST_SERIALIZATION - /** Serialization function */ - friend class boost::serialization::access; - template - void serialize(ARCHIVE &ar, const unsigned int /*version*/) { - ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base); - ar &BOOST_SERIALIZATION_NVP(factors_); - } -#endif -}; - -// traits -template <> -struct traits : public Testable { -}; - -} // namespace gtsam diff --git a/gtsam/hybrid/HybridBayesNet.cpp b/gtsam/hybrid/HybridBayesNet.cpp index b02967555..3c77e3f9a 100644 --- a/gtsam/hybrid/HybridBayesNet.cpp +++ b/gtsam/hybrid/HybridBayesNet.cpp @@ -49,7 +49,7 @@ std::function &, double)> prunerFunc( const DecisionTreeFactor &prunedDiscreteProbs, const HybridConditional &conditional) { // Get the discrete keys as sets for the decision tree - // and the Gaussian mixture. + // and the hybrid Gaussian conditional. std::set discreteProbsKeySet = DiscreteKeysAsSet(prunedDiscreteProbs.discreteKeys()); std::set conditionalKeySet = @@ -63,7 +63,7 @@ std::function &, double)> prunerFunc( // typecast so we can use this to get probability value DiscreteValues values(choices); - // Case where the Gaussian mixture has the same + // Case where the hybrid Gaussian conditional has the same // discrete keys as the decision tree. if (conditionalKeySet == discreteProbsKeySet) { if (prunedDiscreteProbs(values) == 0) { @@ -168,11 +168,11 @@ HybridBayesNet HybridBayesNet::prune(size_t maxNrLeaves) { DecisionTreeFactor prunedDiscreteProbs = this->pruneDiscreteConditionals(maxNrLeaves); - /* To prune, we visitWith every leaf in the GaussianMixture. + /* To prune, we visitWith every leaf in the HybridGaussianConditional. * For each leaf, using the assignment we can check the discrete decision tree * for 0.0 probability, then just set the leaf to a nullptr. * - * We can later check the GaussianMixture for just nullptrs. + * We can later check the HybridGaussianConditional for just nullptrs. */ HybridBayesNet prunedBayesNetFragment; @@ -180,16 +180,18 @@ HybridBayesNet HybridBayesNet::prune(size_t maxNrLeaves) { // Go through all the conditionals in the // Bayes Net and prune them as per prunedDiscreteProbs. for (auto &&conditional : *this) { - if (auto gm = conditional->asMixture()) { - // Make a copy of the Gaussian mixture and prune it! - auto prunedGaussianMixture = std::make_shared(*gm); - prunedGaussianMixture->prune(prunedDiscreteProbs); // imperative :-( + if (auto gm = conditional->asHybrid()) { + // Make a copy of the hybrid Gaussian conditional and prune it! + auto prunedHybridGaussianConditional = + std::make_shared(*gm); + prunedHybridGaussianConditional->prune( + prunedDiscreteProbs); // imperative :-( // Type-erase and add to the pruned Bayes Net fragment. - prunedBayesNetFragment.push_back(prunedGaussianMixture); + prunedBayesNetFragment.push_back(prunedHybridGaussianConditional); } else { - // Add the non-GaussianMixture conditional + // Add the non-HybridGaussianConditional conditional prunedBayesNetFragment.push_back(conditional); } } @@ -202,9 +204,9 @@ GaussianBayesNet HybridBayesNet::choose( const DiscreteValues &assignment) const { GaussianBayesNet gbn; for (auto &&conditional : *this) { - if (auto gm = conditional->asMixture()) { + if (auto gm = conditional->asHybrid()) { // If conditional is hybrid, select based on assignment. - gbn.push_back((*gm)(assignment)); + gbn.push_back(gm->choose(assignment)); } else if (auto gc = conditional->asGaussian()) { // If continuous only, add Gaussian conditional. gbn.push_back(gc); @@ -220,15 +222,16 @@ GaussianBayesNet HybridBayesNet::choose( /* ************************************************************************* */ HybridValues HybridBayesNet::optimize() const { // Collect all the discrete factors to compute MPE - DiscreteBayesNet discrete_bn; + DiscreteFactorGraph discrete_fg; + for (auto &&conditional : *this) { if (conditional->isDiscrete()) { - discrete_bn.push_back(conditional->asDiscrete()); + discrete_fg.push_back(conditional->asDiscrete()); } } // Solve for the MPE - DiscreteValues mpe = DiscreteFactorGraph(discrete_bn).optimize(); + DiscreteValues mpe = discrete_fg.optimize(); // Given the MPE, compute the optimal continuous values. return HybridValues(optimize(mpe), mpe); @@ -288,7 +291,7 @@ AlgebraicDecisionTree HybridBayesNet::errorTree( // Iterate over each conditional. for (auto &&conditional : *this) { - if (auto gm = conditional->asMixture()) { + if (auto gm = conditional->asHybrid()) { // If conditional is hybrid, compute error for all assignments. result = result + gm->errorTree(continuousValues); @@ -318,7 +321,7 @@ AlgebraicDecisionTree HybridBayesNet::logProbability( // Iterate over each conditional. for (auto &&conditional : *this) { - if (auto gm = conditional->asMixture()) { + if (auto gm = conditional->asHybrid()) { // If conditional is hybrid, select based on assignment and compute // logProbability. result = result + gm->logProbability(continuousValues); @@ -366,7 +369,7 @@ HybridGaussianFactorGraph HybridBayesNet::toFactorGraph( if (conditional->frontalsIn(measurements)) { if (auto gc = conditional->asGaussian()) { fg.push_back(gc->likelihood(measurements)); - } else if (auto gm = conditional->asMixture()) { + } else if (auto gm = conditional->asHybrid()) { fg.push_back(gm->likelihood(measurements)); } else { throw std::runtime_error("Unknown conditional type"); diff --git a/gtsam/hybrid/HybridBayesNet.h b/gtsam/hybrid/HybridBayesNet.h index 032cd55b9..62688e8b2 100644 --- a/gtsam/hybrid/HybridBayesNet.h +++ b/gtsam/hybrid/HybridBayesNet.h @@ -28,7 +28,8 @@ namespace gtsam { /** * A hybrid Bayes net is a collection of HybridConditionals, which can have - * discrete conditionals, Gaussian mixtures, or pure Gaussian conditionals. + * discrete conditionals, hybrid Gaussian conditionals, + * or pure Gaussian conditionals. * * @ingroup hybrid */ @@ -43,9 +44,14 @@ class GTSAM_EXPORT HybridBayesNet : public BayesNet { /// @name Standard Constructors /// @{ - /** Construct empty Bayes net */ + /// Construct empty Bayes net. HybridBayesNet() = default; + /// Constructor that takes an initializer list of shared pointers. + HybridBayesNet( + std::initializer_list conditionals) + : Base(conditionals) {} + /// @} /// @name Testable /// @{ @@ -70,20 +76,6 @@ class GTSAM_EXPORT HybridBayesNet : public BayesNet { factors_.push_back(conditional); } - /** - * Preferred: add a conditional directly using a pointer. - * - * Examples: - * hbn.emplace_back(new GaussianMixture(...))); - * hbn.emplace_back(new GaussianConditional(...))); - * hbn.emplace_back(new DiscreteConditional(...))); - */ - template - void emplace_back(Conditional *conditional) { - factors_.push_back(std::make_shared( - std::shared_ptr(conditional))); - } - /** * Add a conditional using a shared_ptr, using implicit conversion to * a HybridConditional. @@ -93,7 +85,7 @@ class GTSAM_EXPORT HybridBayesNet : public BayesNet { * * Example: * auto shared_ptr_to_a_conditional = - * std::make_shared(...); + * std::make_shared(...); * hbn.push_back(shared_ptr_to_a_conditional); */ void push_back(HybridConditional &&conditional) { @@ -101,10 +93,42 @@ class GTSAM_EXPORT HybridBayesNet : public BayesNet { std::make_shared(std::move(conditional))); } + /** + * @brief Add a conditional to the Bayes net. + * Implicitly convert to a HybridConditional. + * + * E.g. + * hbn.push_back(std::make_shared(m, "1/1")); + * + * @tparam CONDITIONAL Type of conditional. This is shared_ptr version. + * @param conditional The conditional as a shared pointer. + */ + template + void push_back(const std::shared_ptr &conditional) { + factors_.push_back(std::make_shared(conditional)); + } + + /** + * Preferred: Emplace a conditional directly using arguments. + * + * Examples: + * hbn.emplace_shared(...))); + * hbn.emplace_shared(...))); + * hbn.emplace_shared(...))); + */ + template + void emplace_shared(Args &&...args) { + auto cond = std::allocate_shared( + Eigen::aligned_allocator(), std::forward(args)...); + factors_.push_back(std::make_shared(std::move(cond))); + } + /** * @brief Get the Gaussian Bayes Net which corresponds to a specific discrete * value assignment. * + * @note Any pure discrete factors are ignored. + * * @param assignment The discrete value assignment for the discrete keys. * @return GaussianBayesNet */ diff --git a/gtsam/hybrid/HybridBayesTree.cpp b/gtsam/hybrid/HybridBayesTree.cpp index ae8fa0378..9aee6dcf8 100644 --- a/gtsam/hybrid/HybridBayesTree.cpp +++ b/gtsam/hybrid/HybridBayesTree.cpp @@ -40,17 +40,17 @@ bool HybridBayesTree::equals(const This& other, double tol) const { /* ************************************************************************* */ HybridValues HybridBayesTree::optimize() const { - DiscreteBayesNet dbn; + DiscreteFactorGraph discrete_fg; DiscreteValues mpe; auto root = roots_.at(0); // Access the clique and get the underlying hybrid conditional HybridConditional::shared_ptr root_conditional = root->conditional(); - // The root should be discrete only, we compute the MPE + // The root should be discrete only, we compute the MPE if (root_conditional->isDiscrete()) { - dbn.push_back(root_conditional->asDiscrete()); - mpe = DiscreteFactorGraph(dbn).optimize(); + discrete_fg.push_back(root_conditional->asDiscrete()); + mpe = discrete_fg.optimize(); } else { throw std::runtime_error( "HybridBayesTree root is not discrete-only. Please check elimination " @@ -109,7 +109,7 @@ struct HybridAssignmentData { GaussianConditional::shared_ptr conditional; if (hybrid_conditional->isHybrid()) { - conditional = (*hybrid_conditional->asMixture())(parentData.assignment_); + conditional = (*hybrid_conditional->asHybrid())(parentData.assignment_); } else if (hybrid_conditional->isContinuous()) { conditional = hybrid_conditional->asGaussian(); } else { @@ -136,8 +136,7 @@ struct HybridAssignmentData { } }; -/* ************************************************************************* - */ +/* ************************************************************************* */ GaussianBayesTree HybridBayesTree::choose( const DiscreteValues& assignment) const { GaussianBayesTree gbt; @@ -157,8 +156,12 @@ GaussianBayesTree HybridBayesTree::choose( return gbt; } -/* ************************************************************************* - */ +/* ************************************************************************* */ +double HybridBayesTree::error(const HybridValues& values) const { + return HybridGaussianFactorGraph(*this).error(values); +} + +/* ************************************************************************* */ VectorValues HybridBayesTree::optimize(const DiscreteValues& assignment) const { GaussianBayesTree gbt = this->choose(assignment); // If empty GaussianBayesTree, means a clique is pruned hence invalid @@ -202,9 +205,9 @@ void HybridBayesTree::prune(const size_t maxNrLeaves) { // If conditional is hybrid, we prune it. if (conditional->isHybrid()) { - auto gaussianMixture = conditional->asMixture(); + auto hybridGaussianCond = conditional->asHybrid(); - gaussianMixture->prune(parentData.prunedDiscreteProbs); + hybridGaussianCond->prune(parentData.prunedDiscreteProbs); } return parentData; } diff --git a/gtsam/hybrid/HybridBayesTree.h b/gtsam/hybrid/HybridBayesTree.h index f91e16cbf..0fe9ca6ea 100644 --- a/gtsam/hybrid/HybridBayesTree.h +++ b/gtsam/hybrid/HybridBayesTree.h @@ -84,6 +84,9 @@ class GTSAM_EXPORT HybridBayesTree : public BayesTree { */ GaussianBayesTree choose(const DiscreteValues& assignment) const; + /** Error for all conditionals. */ + double error(const HybridValues& values) const; + /** * @brief Optimize the hybrid Bayes tree by computing the MPE for the current * set of discrete variables and using it to compute the best continuous @@ -96,8 +99,8 @@ class GTSAM_EXPORT HybridBayesTree : public BayesTree { /** * @brief Recursively optimize the BayesTree to produce a vector solution. * - * @param assignment The discrete values assignment to select the Gaussian - * mixtures. + * @param assignment The discrete values assignment to select + * the hybrid conditional. * @return VectorValues */ VectorValues optimize(const DiscreteValues& assignment) const; @@ -167,7 +170,7 @@ class BayesTreeOrphanWrapper : public HybridConditional { void print( const std::string& s = "", const KeyFormatter& formatter = DefaultKeyFormatter) const override { - clique->print(s + "stored clique", formatter); + clique->print(s + " stored clique ", formatter); } }; diff --git a/gtsam/hybrid/HybridConditional.cpp b/gtsam/hybrid/HybridConditional.cpp index 6c4893476..cac2adcf8 100644 --- a/gtsam/hybrid/HybridConditional.cpp +++ b/gtsam/hybrid/HybridConditional.cpp @@ -28,14 +28,9 @@ HybridConditional::HybridConditional(const KeyVector &continuousFrontals, const DiscreteKeys &discreteFrontals, const KeyVector &continuousParents, const DiscreteKeys &discreteParents) - : HybridConditional( - CollectKeys( - {continuousFrontals.begin(), continuousFrontals.end()}, - KeyVector{continuousParents.begin(), continuousParents.end()}), - CollectDiscreteKeys( - {discreteFrontals.begin(), discreteFrontals.end()}, - {discreteParents.begin(), discreteParents.end()}), - continuousFrontals.size() + discreteFrontals.size()) {} + : HybridConditional(CollectKeys(continuousFrontals, continuousParents), + CollectDiscreteKeys(discreteFrontals, discreteParents), + continuousFrontals.size() + discreteFrontals.size()) {} /* ************************************************************************ */ HybridConditional::HybridConditional( @@ -55,13 +50,11 @@ HybridConditional::HybridConditional( /* ************************************************************************ */ HybridConditional::HybridConditional( - const std::shared_ptr &gaussianMixture) - : BaseFactor(KeyVector(gaussianMixture->keys().begin(), - gaussianMixture->keys().begin() + - gaussianMixture->nrContinuous()), - gaussianMixture->discreteKeys()), - BaseConditional(gaussianMixture->nrFrontals()) { - inner_ = gaussianMixture; + const std::shared_ptr &hybridGaussianCond) + : BaseFactor(hybridGaussianCond->continuousKeys(), + hybridGaussianCond->discreteKeys()), + BaseConditional(hybridGaussianCond->nrFrontals()) { + inner_ = hybridGaussianCond; } /* ************************************************************************ */ @@ -104,8 +97,8 @@ void HybridConditional::print(const std::string &s, bool HybridConditional::equals(const HybridFactor &other, double tol) const { const This *e = dynamic_cast(&other); if (e == nullptr) return false; - if (auto gm = asMixture()) { - auto other = e->asMixture(); + if (auto gm = asHybrid()) { + auto other = e->asHybrid(); return other != nullptr && gm->equals(*other, tol); } if (auto gc = asGaussian()) { @@ -126,7 +119,7 @@ double HybridConditional::error(const HybridValues &values) const { if (auto gc = asGaussian()) { return gc->error(values.continuous()); } - if (auto gm = asMixture()) { + if (auto gm = asHybrid()) { return gm->error(values); } if (auto dc = asDiscrete()) { @@ -136,12 +129,28 @@ double HybridConditional::error(const HybridValues &values) const { "HybridConditional::error: conditional type not handled"); } +/* ************************************************************************ */ +AlgebraicDecisionTree HybridConditional::errorTree( + const VectorValues &values) const { + if (auto gc = asGaussian()) { + return AlgebraicDecisionTree(gc->error(values)); + } + if (auto gm = asHybrid()) { + return gm->errorTree(values); + } + if (auto dc = asDiscrete()) { + return AlgebraicDecisionTree(0.0); + } + throw std::runtime_error( + "HybridConditional::error: conditional type not handled"); +} + /* ************************************************************************ */ double HybridConditional::logProbability(const HybridValues &values) const { if (auto gc = asGaussian()) { return gc->logProbability(values.continuous()); } - if (auto gm = asMixture()) { + if (auto gm = asHybrid()) { return gm->logProbability(values); } if (auto dc = asDiscrete()) { @@ -152,18 +161,18 @@ double HybridConditional::logProbability(const HybridValues &values) const { } /* ************************************************************************ */ -double HybridConditional::logNormalizationConstant() const { +double HybridConditional::negLogConstant() const { if (auto gc = asGaussian()) { - return gc->logNormalizationConstant(); + return gc->negLogConstant(); } - if (auto gm = asMixture()) { - return gm->logNormalizationConstant(); // 0.0! + if (auto gm = asHybrid()) { + return gm->negLogConstant(); // 0.0! } if (auto dc = asDiscrete()) { - return dc->logNormalizationConstant(); // 0.0! + return dc->negLogConstant(); // 0.0! } throw std::runtime_error( - "HybridConditional::logProbability: conditional type not handled"); + "HybridConditional::negLogConstant: conditional type not handled"); } /* ************************************************************************ */ diff --git a/gtsam/hybrid/HybridConditional.h b/gtsam/hybrid/HybridConditional.h index 64bdcb2c1..588c44e0b 100644 --- a/gtsam/hybrid/HybridConditional.h +++ b/gtsam/hybrid/HybridConditional.h @@ -18,8 +18,8 @@ #pragma once #include -#include #include +#include #include #include #include @@ -39,7 +39,7 @@ namespace gtsam { * As a type-erased variant of: * - DiscreteConditional * - GaussianConditional - * - GaussianMixture + * - HybridGaussianConditional * * The reason why this is important is that `Conditional` is a CRTP class. * CRTP is static polymorphism such that all CRTP classes, while bearing the @@ -61,7 +61,7 @@ class GTSAM_EXPORT HybridConditional public Conditional { public: // typedefs needed to play nice with gtsam - typedef HybridConditional This; ///< Typedef to this class + typedef HybridConditional This; ///< Typedef to this class typedef std::shared_ptr shared_ptr; ///< shared_ptr to this class typedef HybridFactor BaseFactor; ///< Typedef to our factor base class typedef Conditional @@ -124,10 +124,11 @@ class GTSAM_EXPORT HybridConditional /** * @brief Construct a new Hybrid Conditional object * - * @param gaussianMixture Gaussian Mixture Conditional used to create the + * @param hybridGaussianCond Hybrid Gaussian Conditional used to create the * HybridConditional. */ - HybridConditional(const std::shared_ptr& gaussianMixture); + HybridConditional( + const std::shared_ptr& hybridGaussianCond); /// @} /// @name Testable @@ -146,12 +147,12 @@ class GTSAM_EXPORT HybridConditional /// @{ /** - * @brief Return HybridConditional as a GaussianMixture - * @return nullptr if not a mixture - * @return GaussianMixture::shared_ptr otherwise + * @brief Return HybridConditional as a HybridGaussianConditional + * @return nullptr if not a conditional + * @return HybridGaussianConditional::shared_ptr otherwise */ - GaussianMixture::shared_ptr asMixture() const { - return std::dynamic_pointer_cast(inner_); + HybridGaussianConditional::shared_ptr asHybrid() const { + return std::dynamic_pointer_cast(inner_); } /** @@ -178,15 +179,26 @@ class GTSAM_EXPORT HybridConditional /// Return the error of the underlying conditional. double error(const HybridValues& values) const override; + /** + * @brief Compute error of the HybridConditional as a tree. + * + * @param continuousValues The continuous VectorValues. + * @return AlgebraicDecisionTree A decision tree with the same keys + * as the conditionals involved, and leaf values as the error. + */ + AlgebraicDecisionTree errorTree( + const VectorValues& values) const override; + /// Return the log-probability (or density) of the underlying conditional. double logProbability(const HybridValues& values) const override; /** - * Return the log normalization constant. + * Return the negative log of the normalization constant. + * This shows up in the error as -(error(x) + negLogConstant) * Note this is 0.0 for discrete and hybrid conditionals, but depends * on the continuous parameters for Gaussian conditionals. - */ - double logNormalizationConstant() const override; + */ + double negLogConstant() const override; /// Return the probability (or density) of the underlying conditional. double evaluate(const HybridValues& values) const override; @@ -222,8 +234,10 @@ class GTSAM_EXPORT HybridConditional boost::serialization::void_cast_register( static_cast(NULL), static_cast(NULL)); } else { - boost::serialization::void_cast_register( - static_cast(NULL), static_cast(NULL)); + boost::serialization::void_cast_register( + static_cast(NULL), + static_cast(NULL)); } } #endif diff --git a/gtsam/hybrid/HybridEliminationTree.h b/gtsam/hybrid/HybridEliminationTree.h index e1eb1764a..543e09c6f 100644 --- a/gtsam/hybrid/HybridEliminationTree.h +++ b/gtsam/hybrid/HybridEliminationTree.h @@ -35,8 +35,8 @@ class GTSAM_EXPORT HybridEliminationTree public: typedef EliminationTree - Base; ///< Base class - typedef HybridEliminationTree This; ///< This class + Base; ///< Base class + typedef HybridEliminationTree This; ///< This class typedef std::shared_ptr shared_ptr; ///< Shared pointer to this class /// @name Constructors diff --git a/gtsam/hybrid/HybridFactor.cpp b/gtsam/hybrid/HybridFactor.cpp index b25e97f05..3338951bf 100644 --- a/gtsam/hybrid/HybridFactor.cpp +++ b/gtsam/hybrid/HybridFactor.cpp @@ -50,31 +50,43 @@ DiscreteKeys CollectDiscreteKeys(const DiscreteKeys &key1, /* ************************************************************************ */ HybridFactor::HybridFactor(const KeyVector &keys) - : Base(keys), isContinuous_(true), continuousKeys_(keys) {} + : Base(keys), category_(Category::Continuous), continuousKeys_(keys) {} + +/* ************************************************************************ */ +HybridFactor::Category GetCategory(const KeyVector &continuousKeys, + const DiscreteKeys &discreteKeys) { + if ((continuousKeys.size() == 0) && (discreteKeys.size() != 0)) { + return HybridFactor::Category::Discrete; + } else if ((continuousKeys.size() != 0) && (discreteKeys.size() == 0)) { + return HybridFactor::Category::Continuous; + } else if ((continuousKeys.size() != 0) && (discreteKeys.size() != 0)) { + return HybridFactor::Category::Hybrid; + } else { + // Case where we have no keys. Should never happen. + return HybridFactor::Category::None; + } +} /* ************************************************************************ */ HybridFactor::HybridFactor(const KeyVector &continuousKeys, const DiscreteKeys &discreteKeys) : Base(CollectKeys(continuousKeys, discreteKeys)), - isDiscrete_((continuousKeys.size() == 0) && (discreteKeys.size() != 0)), - isContinuous_((continuousKeys.size() != 0) && (discreteKeys.size() == 0)), - isHybrid_((continuousKeys.size() != 0) && (discreteKeys.size() != 0)), + category_(GetCategory(continuousKeys, discreteKeys)), discreteKeys_(discreteKeys), continuousKeys_(continuousKeys) {} /* ************************************************************************ */ HybridFactor::HybridFactor(const DiscreteKeys &discreteKeys) : Base(CollectKeys({}, discreteKeys)), - isDiscrete_(true), + category_(Category::Discrete), discreteKeys_(discreteKeys), continuousKeys_({}) {} /* ************************************************************************ */ bool HybridFactor::equals(const HybridFactor &lf, double tol) const { const This *e = dynamic_cast(&lf); - return e != nullptr && Base::equals(*e, tol) && - isDiscrete_ == e->isDiscrete_ && isContinuous_ == e->isContinuous_ && - isHybrid_ == e->isHybrid_ && continuousKeys_ == e->continuousKeys_ && + return e != nullptr && Base::equals(*e, tol) && category_ == e->category_ && + continuousKeys_ == e->continuousKeys_ && discreteKeys_ == e->discreteKeys_; } @@ -82,9 +94,21 @@ bool HybridFactor::equals(const HybridFactor &lf, double tol) const { void HybridFactor::print(const std::string &s, const KeyFormatter &formatter) const { std::cout << (s.empty() ? "" : s + "\n"); - if (isContinuous_) std::cout << "Continuous "; - if (isDiscrete_) std::cout << "Discrete "; - if (isHybrid_) std::cout << "Hybrid "; + switch (category_) { + case Category::Continuous: + std::cout << "Continuous "; + break; + case Category::Discrete: + std::cout << "Discrete "; + break; + case Category::Hybrid: + std::cout << "Hybrid "; + break; + case Category::None: + std::cout << "None "; + break; + } + std::cout << "["; for (size_t c = 0; c < continuousKeys_.size(); c++) { std::cout << formatter(continuousKeys_.at(c)); diff --git a/gtsam/hybrid/HybridFactor.h b/gtsam/hybrid/HybridFactor.h index afd1c8032..fc91e0838 100644 --- a/gtsam/hybrid/HybridFactor.h +++ b/gtsam/hybrid/HybridFactor.h @@ -13,6 +13,7 @@ * @file HybridFactor.h * @date Mar 11, 2022 * @author Fan Jiang + * @author Varun Agrawal */ #pragma once @@ -44,17 +45,20 @@ DiscreteKeys CollectDiscreteKeys(const DiscreteKeys &key1, * Base class for *truly* hybrid probabilistic factors * * Examples: - * - MixtureFactor - * - GaussianMixtureFactor - * - GaussianMixture + * - HybridNonlinearFactor + * - HybridGaussianFactor + * - HybridGaussianConditional * * @ingroup hybrid */ class GTSAM_EXPORT HybridFactor : public Factor { + public: + /// Enum to help with categorizing hybrid factors. + enum class Category { None, Discrete, Continuous, Hybrid }; + private: - bool isDiscrete_ = false; - bool isContinuous_ = false; - bool isHybrid_ = false; + /// Record what category of HybridFactor this is. + Category category_ = Category::None; protected: // Set of DiscreteKeys for this factor. @@ -115,13 +119,13 @@ class GTSAM_EXPORT HybridFactor : public Factor { /// @{ /// True if this is a factor of discrete variables only. - bool isDiscrete() const { return isDiscrete_; } + bool isDiscrete() const { return category_ == Category::Discrete; } /// True if this is a factor of continuous variables only. - bool isContinuous() const { return isContinuous_; } + bool isContinuous() const { return category_ == Category::Continuous; } /// True is this is a Discrete-Continuous factor. - bool isHybrid() const { return isHybrid_; } + bool isHybrid() const { return category_ == Category::Hybrid; } /// Return the number of continuous variables in this factor. size_t nrContinuous() const { return continuousKeys_.size(); } @@ -132,6 +136,10 @@ class GTSAM_EXPORT HybridFactor : public Factor { /// Return only the continuous keys for this factor. const KeyVector &continuousKeys() const { return continuousKeys_; } + /// Virtual class to compute tree of linear errors. + virtual AlgebraicDecisionTree errorTree( + const VectorValues &values) const = 0; + /// @} private: @@ -141,9 +149,7 @@ class GTSAM_EXPORT HybridFactor : public Factor { template void serialize(ARCHIVE &ar, const unsigned int /*version*/) { ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base); - ar &BOOST_SERIALIZATION_NVP(isDiscrete_); - ar &BOOST_SERIALIZATION_NVP(isContinuous_); - ar &BOOST_SERIALIZATION_NVP(isHybrid_); + ar &BOOST_SERIALIZATION_NVP(category_); ar &BOOST_SERIALIZATION_NVP(discreteKeys_); ar &BOOST_SERIALIZATION_NVP(continuousKeys_); } diff --git a/gtsam/hybrid/HybridFactorGraph.cpp b/gtsam/hybrid/HybridFactorGraph.cpp index f7b96f694..78c051b17 100644 --- a/gtsam/hybrid/HybridFactorGraph.cpp +++ b/gtsam/hybrid/HybridFactorGraph.cpp @@ -18,6 +18,7 @@ */ #include +#include namespace gtsam { @@ -29,8 +30,7 @@ std::set HybridFactorGraph::discreteKeys() const { for (const DiscreteKey& key : p->discreteKeys()) { keys.insert(key); } - } - if (auto p = std::dynamic_pointer_cast(factor)) { + } else if (auto p = std::dynamic_pointer_cast(factor)) { for (const DiscreteKey& key : p->discreteKeys()) { keys.insert(key); } @@ -49,15 +49,6 @@ KeySet HybridFactorGraph::discreteKeySet() const { return keys; } -/* ************************************************************************* */ -std::unordered_map HybridFactorGraph::discreteKeyMap() const { - std::unordered_map result; - for (const DiscreteKey& k : discreteKeys()) { - result[k.first] = k; - } - return result; -} - /* ************************************************************************* */ const KeySet HybridFactorGraph::continuousKeySet() const { KeySet keys; @@ -68,6 +59,8 @@ const KeySet HybridFactorGraph::continuousKeySet() const { } } else if (auto p = std::dynamic_pointer_cast(factor)) { keys.insert(p->keys().begin(), p->keys().end()); + } else if (auto p = std::dynamic_pointer_cast(factor)) { + keys.insert(p->keys().begin(), p->keys().end()); } } return keys; diff --git a/gtsam/hybrid/HybridFactorGraph.h b/gtsam/hybrid/HybridFactorGraph.h index 8b59fd4f9..921cc14bf 100644 --- a/gtsam/hybrid/HybridFactorGraph.h +++ b/gtsam/hybrid/HybridFactorGraph.h @@ -38,7 +38,7 @@ using SharedFactor = std::shared_ptr; class GTSAM_EXPORT HybridFactorGraph : public FactorGraph { public: using Base = FactorGraph; - using This = HybridFactorGraph; ///< this class + using This = HybridFactorGraph; ///< this class using shared_ptr = std::shared_ptr; ///< shared_ptr to This using Values = gtsam::Values; ///< backwards compatibility @@ -59,6 +59,10 @@ class GTSAM_EXPORT HybridFactorGraph : public FactorGraph { template HybridFactorGraph(const FactorGraph& graph) : Base(graph) {} + /** Construct from container of factors (shared_ptr or plain objects) */ + template + explicit HybridFactorGraph(const CONTAINER& factors) : Base(factors) {} + /// @} /// @name Extra methods to inspect discrete/continuous keys. /// @{ @@ -66,12 +70,9 @@ class GTSAM_EXPORT HybridFactorGraph : public FactorGraph { /// Get all the discrete keys in the factor graph. std::set discreteKeys() const; - /// Get all the discrete keys in the factor graph, as a set. + /// Get all the discrete keys in the factor graph, as a set of Keys. KeySet discreteKeySet() const; - /// Get a map from Key to corresponding DiscreteKey. - std::unordered_map discreteKeyMap() const; - /// Get all the continuous keys in the factor graph. const KeySet continuousKeySet() const; diff --git a/gtsam/hybrid/GaussianMixture.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp similarity index 51% rename from gtsam/hybrid/GaussianMixture.cpp rename to gtsam/hybrid/HybridGaussianConditional.cpp index c105a329e..1db13e95b 100644 --- a/gtsam/hybrid/GaussianMixture.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -10,7 +10,7 @@ * -------------------------------------------------------------------------- */ /** - * @file GaussianMixture.cpp + * @file HybridGaussianConditional.cpp * @brief A hybrid conditional in the Conditional Linear Gaussian scheme * @author Fan Jiang * @author Varun Agrawal @@ -20,80 +20,146 @@ #include #include -#include -#include +#include +#include #include #include +#include #include +#include + +#include namespace gtsam { +/* *******************************************************************************/ +struct HybridGaussianConditional::Helper { + std::optional nrFrontals; + FactorValuePairs pairs; + Conditionals conditionals; + double minNegLogConstant; -GaussianMixture::GaussianMixture( - const KeyVector &continuousFrontals, const KeyVector &continuousParents, - const DiscreteKeys &discreteParents, - const GaussianMixture::Conditionals &conditionals) - : BaseFactor(CollectKeys(continuousFrontals, continuousParents), - discreteParents), - BaseConditional(continuousFrontals.size()), - conditionals_(conditionals) { - // Calculate logConstant_ as the maximum of the log constants of the - // conditionals, by visiting the decision tree: - logConstant_ = -std::numeric_limits::infinity(); - conditionals_.visit( - [this](const GaussianConditional::shared_ptr &conditional) { - if (conditional) { - this->logConstant_ = std::max( - this->logConstant_, conditional->logNormalizationConstant()); + using GC = GaussianConditional; + using P = std::vector>; + + /// Construct from a vector of mean and sigma pairs, plus extra args. + template + explicit Helper(const DiscreteKey &mode, const P &p, Args &&...args) { + nrFrontals = 1; + minNegLogConstant = std::numeric_limits::infinity(); + + std::vector fvs; + std::vector gcs; + fvs.reserve(p.size()); + gcs.reserve(p.size()); + for (auto &&[mean, sigma] : p) { + auto gaussianConditional = + GC::sharedMeanAndStddev(std::forward(args)..., mean, sigma); + double value = gaussianConditional->negLogConstant(); + minNegLogConstant = std::min(minNegLogConstant, value); + fvs.emplace_back(gaussianConditional, value); + gcs.push_back(gaussianConditional); + } + + conditionals = Conditionals({mode}, gcs); + pairs = FactorValuePairs({mode}, fvs); + } + + /// Construct from tree of GaussianConditionals. + explicit Helper(const Conditionals &conditionals) + : conditionals(conditionals), + minNegLogConstant(std::numeric_limits::infinity()) { + auto func = [this](const GC::shared_ptr &c) -> GaussianFactorValuePair { + double value = 0.0; + if (c) { + if (!nrFrontals.has_value()) { + nrFrontals = c->nrFrontals(); } - }); -} + value = c->negLogConstant(); + minNegLogConstant = std::min(minNegLogConstant, value); + } + return {std::dynamic_pointer_cast(c), value}; + }; + pairs = FactorValuePairs(conditionals, func); + if (!nrFrontals.has_value()) { + throw std::runtime_error( + "HybridGaussianConditional: need at least one frontal variable. " + "Provided conditionals do not contain any frontal variables."); + } + } +}; /* *******************************************************************************/ -const GaussianMixture::Conditionals &GaussianMixture::conditionals() const { +HybridGaussianConditional::HybridGaussianConditional( + const DiscreteKeys &discreteParents, const Helper &helper) + : BaseFactor(discreteParents, helper.pairs), + BaseConditional(*helper.nrFrontals), + conditionals_(helper.conditionals), + negLogConstant_(helper.minNegLogConstant) {} + +HybridGaussianConditional::HybridGaussianConditional( + const DiscreteKey &discreteParent, + const std::vector &conditionals) + : HybridGaussianConditional(DiscreteKeys{discreteParent}, + Conditionals({discreteParent}, conditionals)) {} + +HybridGaussianConditional::HybridGaussianConditional( + const DiscreteKey &discreteParent, Key key, // + const std::vector> ¶meters) + : HybridGaussianConditional(DiscreteKeys{discreteParent}, + Helper(discreteParent, parameters, key)) {} + +HybridGaussianConditional::HybridGaussianConditional( + const DiscreteKey &discreteParent, Key key, // + const Matrix &A, Key parent, + const std::vector> ¶meters) + : HybridGaussianConditional( + DiscreteKeys{discreteParent}, + Helper(discreteParent, parameters, key, A, parent)) {} + +HybridGaussianConditional::HybridGaussianConditional( + const DiscreteKey &discreteParent, Key key, // + const Matrix &A1, Key parent1, const Matrix &A2, Key parent2, + const std::vector> ¶meters) + : HybridGaussianConditional( + DiscreteKeys{discreteParent}, + Helper(discreteParent, parameters, key, A1, parent1, A2, parent2)) {} + +HybridGaussianConditional::HybridGaussianConditional( + const DiscreteKeys &discreteParents, + const HybridGaussianConditional::Conditionals &conditionals) + : HybridGaussianConditional(discreteParents, Helper(conditionals)) {} + +/* *******************************************************************************/ +const HybridGaussianConditional::Conditionals & +HybridGaussianConditional::conditionals() const { return conditionals_; } /* *******************************************************************************/ -GaussianMixture::GaussianMixture( - KeyVector &&continuousFrontals, KeyVector &&continuousParents, - DiscreteKeys &&discreteParents, - std::vector &&conditionals) - : GaussianMixture(continuousFrontals, continuousParents, discreteParents, - Conditionals(discreteParents, conditionals)) {} - -/* *******************************************************************************/ -GaussianMixture::GaussianMixture( - const KeyVector &continuousFrontals, const KeyVector &continuousParents, - const DiscreteKeys &discreteParents, - const std::vector &conditionals) - : GaussianMixture(continuousFrontals, continuousParents, discreteParents, - Conditionals(discreteParents, conditionals)) {} - -/* *******************************************************************************/ -// TODO(dellaert): This is copy/paste: GaussianMixture should be derived from -// GaussianMixtureFactor, no? -GaussianFactorGraphTree GaussianMixture::add( - const GaussianFactorGraphTree &sum) const { - using Y = GaussianFactorGraph; - auto add = [](const Y &graph1, const Y &graph2) { - auto result = graph1; - result.push_back(graph2); - return result; - }; - const auto tree = asGaussianFactorGraphTree(); - return sum.empty() ? tree : sum.apply(tree, add); -} - -/* *******************************************************************************/ -GaussianFactorGraphTree GaussianMixture::asGaussianFactorGraphTree() const { - auto wrap = [](const GaussianConditional::shared_ptr &gc) { +GaussianFactorGraphTree HybridGaussianConditional::asGaussianFactorGraphTree() + const { + auto wrap = [this](const GaussianConditional::shared_ptr &gc) { + // First check if conditional has not been pruned + if (gc) { + const double Cgm_Kgcm = gc->negLogConstant() - this->negLogConstant_; + // If there is a difference in the covariances, we need to account for + // that since the error is dependent on the mode. + if (Cgm_Kgcm > 0.0) { + // We add a constant factor which will be used when computing + // the probability of the discrete variables. + Vector c(1); + c << std::sqrt(2.0 * Cgm_Kgcm); + auto constantFactor = std::make_shared(c); + return GaussianFactorGraph{gc, constantFactor}; + } + } return GaussianFactorGraph{gc}; }; return {conditionals_, wrap}; } /* *******************************************************************************/ -size_t GaussianMixture::nrComponents() const { +size_t HybridGaussianConditional::nrComponents() const { size_t total = 0; conditionals_.visit([&total](const GaussianFactor::shared_ptr &node) { if (node) total += 1; @@ -102,7 +168,7 @@ size_t GaussianMixture::nrComponents() const { } /* *******************************************************************************/ -GaussianConditional::shared_ptr GaussianMixture::operator()( +GaussianConditional::shared_ptr HybridGaussianConditional::choose( const DiscreteValues &discreteValues) const { auto &ptr = conditionals_(discreteValues); if (!ptr) return nullptr; @@ -111,11 +177,12 @@ GaussianConditional::shared_ptr GaussianMixture::operator()( return conditional; else throw std::logic_error( - "A GaussianMixture unexpectedly contained a non-conditional"); + "A HybridGaussianConditional unexpectedly contained a non-conditional"); } /* *******************************************************************************/ -bool GaussianMixture::equals(const HybridFactor &lf, double tol) const { +bool HybridGaussianConditional::equals(const HybridFactor &lf, + double tol) const { const This *e = dynamic_cast(&lf); if (e == nullptr) return false; @@ -125,16 +192,15 @@ bool GaussianMixture::equals(const HybridFactor &lf, double tol) const { // Check the base and the factors: return BaseFactor::equals(*e, tol) && - conditionals_.equals(e->conditionals_, - [tol](const GaussianConditional::shared_ptr &f1, - const GaussianConditional::shared_ptr &f2) { - return f1->equals(*(f2), tol); - }); + conditionals_.equals( + e->conditionals_, [tol](const auto &f1, const auto &f2) { + return (!f1 && !f2) || (f1 && f2 && f1->equals(*f2, tol)); + }); } /* *******************************************************************************/ -void GaussianMixture::print(const std::string &s, - const KeyFormatter &formatter) const { +void HybridGaussianConditional::print(const std::string &s, + const KeyFormatter &formatter) const { std::cout << (s.empty() ? "" : s + "\n"); if (isContinuous()) std::cout << "Continuous "; if (isDiscrete()) std::cout << "Discrete "; @@ -144,7 +210,9 @@ void GaussianMixture::print(const std::string &s, for (auto &dk : discreteKeys()) { std::cout << "(" << formatter(dk.first) << ", " << dk.second << "), "; } - std::cout << "\n"; + std::cout << std::endl + << " logNormalizationConstant: " << -negLogConstant() << std::endl + << std::endl; conditionals_.print( "", [&](Key k) { return formatter(k); }, [&](const GaussianConditional::shared_ptr &gf) -> std::string { @@ -159,7 +227,7 @@ void GaussianMixture::print(const std::string &s, } /* ************************************************************************* */ -KeyVector GaussianMixture::continuousParents() const { +KeyVector HybridGaussianConditional::continuousParents() const { // Get all parent keys: const auto range = parents(); KeyVector continuousParentKeys(range.begin(), range.end()); @@ -175,7 +243,8 @@ KeyVector GaussianMixture::continuousParents() const { } /* ************************************************************************* */ -bool GaussianMixture::allFrontalsGiven(const VectorValues &given) const { +bool HybridGaussianConditional::allFrontalsGiven( + const VectorValues &given) const { for (auto &&kv : given) { if (given.find(kv.first) == given.end()) { return false; @@ -185,71 +254,59 @@ bool GaussianMixture::allFrontalsGiven(const VectorValues &given) const { } /* ************************************************************************* */ -std::shared_ptr GaussianMixture::likelihood( +std::shared_ptr HybridGaussianConditional::likelihood( const VectorValues &given) const { if (!allFrontalsGiven(given)) { throw std::runtime_error( - "GaussianMixture::likelihood: given values are missing some frontals."); + "HybridGaussianConditional::likelihood: given values are missing some " + "frontals."); } const DiscreteKeys discreteParentKeys = discreteKeys(); const KeyVector continuousParentKeys = continuousParents(); - const GaussianMixtureFactor::Factors likelihoods( - conditionals_, [&](const GaussianConditional::shared_ptr &conditional) { + const HybridGaussianFactor::FactorValuePairs likelihoods( + conditionals_, + [&](const GaussianConditional::shared_ptr &conditional) + -> GaussianFactorValuePair { const auto likelihood_m = conditional->likelihood(given); - const double Cgm_Kgcm = - logConstant_ - conditional->logNormalizationConstant(); + const double Cgm_Kgcm = conditional->negLogConstant() - negLogConstant_; if (Cgm_Kgcm == 0.0) { - return likelihood_m; + return {likelihood_m, 0.0}; } else { - // Add a constant factor to the likelihood in case the noise models + // Add a constant to the likelihood in case the noise models // are not all equal. - GaussianFactorGraph gfg; - gfg.push_back(likelihood_m); - Vector c(1); - c << std::sqrt(2.0 * Cgm_Kgcm); - auto constantFactor = std::make_shared(c); - gfg.push_back(constantFactor); - return std::make_shared(gfg); + return {likelihood_m, Cgm_Kgcm}; } }); - return std::make_shared( - continuousParentKeys, discreteParentKeys, likelihoods); + return std::make_shared(discreteParentKeys, + likelihoods); } /* ************************************************************************* */ std::set DiscreteKeysAsSet(const DiscreteKeys &discreteKeys) { - std::set s; - s.insert(discreteKeys.begin(), discreteKeys.end()); + std::set s(discreteKeys.begin(), discreteKeys.end()); return s; } /* ************************************************************************* */ -/** - * @brief Helper function to get the pruner functional. - * - * @param discreteProbs The probabilities of only discrete keys. - * @return std::function &, const GaussianConditional::shared_ptr &)> - */ std::function &, const GaussianConditional::shared_ptr &)> -GaussianMixture::prunerFunc(const DecisionTreeFactor &discreteProbs) { +HybridGaussianConditional::prunerFunc(const DecisionTreeFactor &discreteProbs) { // Get the discrete keys as sets for the decision tree - // and the gaussian mixture. + // and the hybrid gaussian conditional. auto discreteProbsKeySet = DiscreteKeysAsSet(discreteProbs.discreteKeys()); - auto gaussianMixtureKeySet = DiscreteKeysAsSet(this->discreteKeys()); + auto hybridGaussianCondKeySet = DiscreteKeysAsSet(this->discreteKeys()); - auto pruner = [discreteProbs, discreteProbsKeySet, gaussianMixtureKeySet]( + auto pruner = [discreteProbs, discreteProbsKeySet, hybridGaussianCondKeySet]( const Assignment &choices, const GaussianConditional::shared_ptr &conditional) -> GaussianConditional::shared_ptr { // typecast so we can use this to get probability value const DiscreteValues values(choices); - // Case where the gaussian mixture has the same + // Case where the hybrid gaussian conditional has the same // discrete keys as the decision tree. - if (gaussianMixtureKeySet == discreteProbsKeySet) { + if (hybridGaussianCondKeySet == discreteProbsKeySet) { if (discreteProbs(values) == 0.0) { // empty aka null pointer std::shared_ptr null; @@ -261,7 +318,7 @@ GaussianMixture::prunerFunc(const DecisionTreeFactor &discreteProbs) { std::vector set_diff; std::set_difference( discreteProbsKeySet.begin(), discreteProbsKeySet.end(), - gaussianMixtureKeySet.begin(), gaussianMixtureKeySet.end(), + hybridGaussianCondKeySet.begin(), hybridGaussianCondKeySet.end(), std::back_inserter(set_diff)); const std::vector assignments = @@ -285,7 +342,7 @@ GaussianMixture::prunerFunc(const DecisionTreeFactor &discreteProbs) { } /* *******************************************************************************/ -void GaussianMixture::prune(const DecisionTreeFactor &discreteProbs) { +void HybridGaussianConditional::prune(const DecisionTreeFactor &discreteProbs) { // Functional which loops over all assignments and create a set of // GaussianConditionals auto pruner = prunerFunc(discreteProbs); @@ -295,7 +352,7 @@ void GaussianMixture::prune(const DecisionTreeFactor &discreteProbs) { } /* *******************************************************************************/ -AlgebraicDecisionTree GaussianMixture::logProbability( +AlgebraicDecisionTree HybridGaussianConditional::logProbability( const VectorValues &continuousValues) const { // functor to calculate (double) logProbability value from // GaussianConditional. @@ -313,32 +370,14 @@ AlgebraicDecisionTree GaussianMixture::logProbability( } /* *******************************************************************************/ -AlgebraicDecisionTree GaussianMixture::errorTree( - const VectorValues &continuousValues) const { - auto errorFunc = [&](const GaussianConditional::shared_ptr &conditional) { - return conditional->error(continuousValues) + // - logConstant_ - conditional->logNormalizationConstant(); - }; - DecisionTree error_tree(conditionals_, errorFunc); - return error_tree; -} - -/* *******************************************************************************/ -double GaussianMixture::error(const HybridValues &values) const { - // Directly index to get the conditional, no need to build the whole tree. - auto conditional = conditionals_(values.discrete()); - return conditional->error(values.continuous()) + // - logConstant_ - conditional->logNormalizationConstant(); -} - -/* *******************************************************************************/ -double GaussianMixture::logProbability(const HybridValues &values) const { +double HybridGaussianConditional::logProbability( + const HybridValues &values) const { auto conditional = conditionals_(values.discrete()); return conditional->logProbability(values.continuous()); } /* *******************************************************************************/ -double GaussianMixture::evaluate(const HybridValues &values) const { +double HybridGaussianConditional::evaluate(const HybridValues &values) const { auto conditional = conditionals_(values.discrete()); return conditional->evaluate(values.continuous()); } diff --git a/gtsam/hybrid/GaussianMixture.h b/gtsam/hybrid/HybridGaussianConditional.h similarity index 54% rename from gtsam/hybrid/GaussianMixture.h rename to gtsam/hybrid/HybridGaussianConditional.h index 521a4ca7a..68c63e7bd 100644 --- a/gtsam/hybrid/GaussianMixture.h +++ b/gtsam/hybrid/HybridGaussianConditional.h @@ -10,7 +10,7 @@ * -------------------------------------------------------------------------- */ /** - * @file GaussianMixture.h + * @file HybridGaussianConditional.h * @brief A hybrid conditional in the Conditional Linear Gaussian scheme * @author Fan Jiang * @author Varun Agrawal @@ -23,8 +23,8 @@ #include #include #include -#include #include +#include #include #include @@ -33,8 +33,8 @@ namespace gtsam { class HybridValues; /** - * @brief A conditional of gaussian mixtures indexed by discrete variables, as - * part of a Bayes Network. This is the result of the elimination of a + * @brief A conditional of gaussian conditionals indexed by discrete variables, + * as part of a Bayes Network. This is the result of the elimination of a * continuous variable in a hybrid scheme, such that the remaining variables are * discrete+continuous. * @@ -50,85 +50,97 @@ class HybridValues; * * @ingroup hybrid */ -class GTSAM_EXPORT GaussianMixture - : public HybridFactor, - public Conditional { +class GTSAM_EXPORT HybridGaussianConditional + : public HybridGaussianFactor, + public Conditional { public: - using This = GaussianMixture; - using shared_ptr = std::shared_ptr; - using BaseFactor = HybridFactor; - using BaseConditional = Conditional; + using This = HybridGaussianConditional; + using shared_ptr = std::shared_ptr; + using BaseFactor = HybridGaussianFactor; + using BaseConditional = Conditional; /// typedef for Decision Tree of Gaussian Conditionals using Conditionals = DecisionTree; private: Conditionals conditionals_; ///< a decision tree of Gaussian conditionals. - double logConstant_; ///< log of the normalization constant. - /** - * @brief Convert a DecisionTree of factors into a DT of Gaussian FGs. - */ - GaussianFactorGraphTree asGaussianFactorGraphTree() const; - - /** - * @brief Helper function to get the pruner functor. - * - * @param discreteProbs The pruned discrete probabilities. - * @return std::function &, const GaussianConditional::shared_ptr &)> - */ - std::function &, const GaussianConditional::shared_ptr &)> - prunerFunc(const DecisionTreeFactor &discreteProbs); + ///< Negative-log of the normalization constant (log(\sqrt(|2πΣ|))). + ///< Take advantage of the neg-log space so everything is a minimization + double negLogConstant_; public: /// @name Constructors /// @{ /// Default constructor, mainly for serialization. - GaussianMixture() = default; + HybridGaussianConditional() = default; /** - * @brief Construct a new GaussianMixture object. + * @brief Construct from one discrete key and vector of conditionals. + * + * @param discreteParent Single discrete parent variable + * @param conditionals Vector of conditionals with the same size as the + * cardinality of the discrete parent. + */ + HybridGaussianConditional( + const DiscreteKey &discreteParent, + const std::vector &conditionals); + + /** + * @brief Constructs a HybridGaussianConditional with means mu_i and + * standard deviations sigma_i. + * + * @param discreteParent The discrete parent or "mode" key. + * @param key The key for this conditional variable. + * @param parameters A vector of pairs (mu_i, sigma_i). + */ + HybridGaussianConditional( + const DiscreteKey &discreteParent, Key key, + const std::vector> ¶meters); + + /** + * @brief Constructs a HybridGaussianConditional with conditional means + * A × parent + b_i and standard deviations sigma_i. + * + * @param discreteParent The discrete parent or "mode" key. + * @param key The key for this conditional variable. + * @param A The matrix A. + * @param parent The key of the parent variable. + * @param parameters A vector of pairs (b_i, sigma_i). + */ + HybridGaussianConditional( + const DiscreteKey &discreteParent, Key key, const Matrix &A, Key parent, + const std::vector> ¶meters); + + /** + * @brief Constructs a HybridGaussianConditional with conditional means + * A1 × parent1 + A2 × parent2 + b_i and standard deviations sigma_i. + * + * @param discreteParent The discrete parent or "mode" key. + * @param key The key for this conditional variable. + * @param A1 The first matrix. + * @param parent1 The key of the first parent variable. + * @param A2 The second matrix. + * @param parent2 The key of the second parent variable. + * @param parameters A vector of pairs (b_i, sigma_i). + */ + HybridGaussianConditional( + const DiscreteKey &discreteParent, Key key, // + const Matrix &A1, Key parent1, const Matrix &A2, Key parent2, + const std::vector> ¶meters); + + /** + * @brief Construct from multiple discrete keys and conditional tree. * - * @param continuousFrontals the continuous frontals. - * @param continuousParents the continuous parents. * @param discreteParents the discrete parents. Will be placed last. * @param conditionals a decision tree of GaussianConditionals. The number of * conditionals should be C^(number of discrete parents), where C is the * cardinality of the DiscreteKeys in discreteParents, since the * discreteParents will be used as the labels in the decision tree. */ - GaussianMixture(const KeyVector &continuousFrontals, - const KeyVector &continuousParents, - const DiscreteKeys &discreteParents, - const Conditionals &conditionals); - - /** - * @brief Make a Gaussian Mixture from a list of Gaussian conditionals - * - * @param continuousFrontals The continuous frontal variables - * @param continuousParents The continuous parent variables - * @param discreteParents Discrete parents variables - * @param conditionals List of conditionals - */ - GaussianMixture(KeyVector &&continuousFrontals, KeyVector &&continuousParents, - DiscreteKeys &&discreteParents, - std::vector &&conditionals); - - /** - * @brief Make a Gaussian Mixture from a list of Gaussian conditionals - * - * @param continuousFrontals The continuous frontal variables - * @param continuousParents The continuous parent variables - * @param discreteParents Discrete parents variables - * @param conditionals List of conditionals - */ - GaussianMixture( - const KeyVector &continuousFrontals, const KeyVector &continuousParents, - const DiscreteKeys &discreteParents, - const std::vector &conditionals); + HybridGaussianConditional(const DiscreteKeys &discreteParents, + const Conditionals &conditionals); /// @} /// @name Testable @@ -139,7 +151,7 @@ class GTSAM_EXPORT GaussianMixture /// Print utility void print( - const std::string &s = "GaussianMixture\n", + const std::string &s = "HybridGaussianConditional\n", const KeyFormatter &formatter = DefaultKeyFormatter) const override; /// @} @@ -147,31 +159,43 @@ class GTSAM_EXPORT GaussianMixture /// @{ /// @brief Return the conditional Gaussian for the given discrete assignment. - GaussianConditional::shared_ptr operator()( + GaussianConditional::shared_ptr choose( const DiscreteValues &discreteValues) const; + /// @brief Syntactic sugar for choose. + GaussianConditional::shared_ptr operator()( + const DiscreteValues &discreteValues) const { + return choose(discreteValues); + } + /// Returns the total number of continuous components size_t nrComponents() const; /// Returns the continuous keys among the parents. KeyVector continuousParents() const; - /// The log normalization constant is max of the the individual - /// log-normalization constants. - double logNormalizationConstant() const override { return logConstant_; } + /** + * @brief Return log normalization constant in negative log space. + * + * The log normalization constant is the min of the individual + * log-normalization constants. + * + * @return double + */ + inline double negLogConstant() const override { return negLogConstant_; } /** - * Create a likelihood factor for a Gaussian mixture, return a Mixture factor - * on the parents. + * Create a likelihood factor for a hybrid Gaussian conditional, + * return a hybrid Gaussian factor on the parents. */ - std::shared_ptr likelihood( + std::shared_ptr likelihood( const VectorValues &given) const; /// Getter for the underlying Conditionals DecisionTree const Conditionals &conditionals() const; /** - * @brief Compute logProbability of the GaussianMixture as a tree. + * @brief Compute logProbability of the HybridGaussianConditional as a tree. * * @param continuousValues The continuous VectorValues. * @return AlgebraicDecisionTree A decision tree with the same keys @@ -181,43 +205,7 @@ class GTSAM_EXPORT GaussianMixture const VectorValues &continuousValues) const; /** - * @brief Compute the error of this Gaussian Mixture. - * - * This requires some care, as different mixture components may have - * different normalization constants. Let's consider p(x|y,m), where m is - * discrete. We need the error to satisfy the invariant: - * - * error(x;y,m) = K - log(probability(x;y,m)) - * - * For all x,y,m. But note that K, the (log) normalization constant defined - * in Conditional.h, should not depend on x, y, or m, only on the parameters - * of the density. Hence, we delegate to the underlying Gaussian - * conditionals, indexed by m, which do satisfy: - * - * log(probability_m(x;y)) = K_m - error_m(x;y) - * - * We resolve by having K == max(K_m) and - * - * error(x;y,m) = error_m(x;y) + K - K_m - * - * which also makes error(x;y,m) >= 0 for all x,y,m. - * - * @param values Continuous values and discrete assignment. - * @return double - */ - double error(const HybridValues &values) const override; - - /** - * @brief Compute error of the GaussianMixture as a tree. - * - * @param continuousValues The continuous VectorValues. - * @return AlgebraicDecisionTree A decision tree on the discrete keys - * only, with the leaf values as the error for each assignment. - */ - AlgebraicDecisionTree errorTree(const VectorValues &continuousValues) const; - - /** - * @brief Compute the logProbability of this Gaussian Mixture. + * @brief Compute the logProbability of this hybrid Gaussian conditional. * * @param values Continuous values and discrete assignment. * @return double @@ -240,17 +228,30 @@ class GTSAM_EXPORT GaussianMixture */ void prune(const DecisionTreeFactor &discreteProbs); - /** - * @brief Merge the Gaussian Factor Graphs in `this` and `sum` while - * maintaining the decision tree structure. - * - * @param sum Decision Tree of Gaussian Factor Graphs - * @return GaussianFactorGraphTree - */ - GaussianFactorGraphTree add(const GaussianFactorGraphTree &sum) const; /// @} private: + /// Helper struct for private constructor. + struct Helper; + + /// Private constructor that uses helper struct above. + HybridGaussianConditional(const DiscreteKeys &discreteParents, + const Helper &helper); + + /// Convert to a DecisionTree of Gaussian factor graphs. + GaussianFactorGraphTree asGaussianFactorGraphTree() const; + + /** + * @brief Get the pruner function from discrete probabilities. + * + * @param discreteProbs The probabilities of only discrete keys. + * @return std::function &, const GaussianConditional::shared_ptr &)> + */ + std::function &, const GaussianConditional::shared_ptr &)> + prunerFunc(const DecisionTreeFactor &prunedProbabilities); + /// Check whether `given` has values for all frontal keys. bool allFrontalsGiven(const VectorValues &given) const; @@ -271,6 +272,7 @@ std::set DiscreteKeysAsSet(const DiscreteKeys &discreteKeys); // traits template <> -struct traits : public Testable {}; +struct traits + : public Testable {}; } // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianFactor.cpp b/gtsam/hybrid/HybridGaussianFactor.cpp new file mode 100644 index 000000000..b04db4977 --- /dev/null +++ b/gtsam/hybrid/HybridGaussianFactor.cpp @@ -0,0 +1,246 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridGaussianFactor.cpp + * @brief A set of Gaussian factors indexed by a set of discrete keys. + * @author Fan Jiang + * @author Varun Agrawal + * @author Frank Dellaert + * @date Mar 12, 2022 + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace gtsam { + +/* *******************************************************************************/ +HybridGaussianFactor::Factors HybridGaussianFactor::augment( + const FactorValuePairs &factors) { + // Find the minimum value so we can "proselytize" to positive values. + // Done because we can't have sqrt of negative numbers. + Factors gaussianFactors; + AlgebraicDecisionTree valueTree; + std::tie(gaussianFactors, valueTree) = unzip(factors); + + // Compute minimum value for normalization. + double min_value = valueTree.min(); + + // Finally, update the [A|b] matrices. + auto update = [&min_value](const GaussianFactorValuePair &gfv) { + auto [gf, value] = gfv; + + auto jf = std::dynamic_pointer_cast(gf); + if (!jf) return gf; + + double normalized_value = value - min_value; + + // If the value is 0, do nothing + if (normalized_value == 0.0) return gf; + + GaussianFactorGraph gfg; + gfg.push_back(jf); + + Vector c(1); + // When hiding c inside the `b` vector, value == 0.5*c^2 + c << std::sqrt(2.0 * normalized_value); + auto constantFactor = std::make_shared(c); + + gfg.push_back(constantFactor); + return std::dynamic_pointer_cast( + std::make_shared(gfg)); + }; + return Factors(factors, update); +} + +/* *******************************************************************************/ +struct HybridGaussianFactor::ConstructorHelper { + KeyVector continuousKeys; // Continuous keys extracted from factors + DiscreteKeys discreteKeys; // Discrete keys provided to the constructors + FactorValuePairs pairs; // Used only if factorsTree is empty + Factors factorsTree; + + ConstructorHelper(const DiscreteKey &discreteKey, + const std::vector &factors) + : discreteKeys({discreteKey}) { + // Extract continuous keys from the first non-null factor + for (const auto &factor : factors) { + if (factor && continuousKeys.empty()) { + continuousKeys = factor->keys(); + break; + } + } + + // Build the DecisionTree from the factor vector + factorsTree = Factors(discreteKeys, factors); + } + + ConstructorHelper(const DiscreteKey &discreteKey, + const std::vector &factorPairs) + : discreteKeys({discreteKey}) { + // Extract continuous keys from the first non-null factor + for (const auto &pair : factorPairs) { + if (pair.first && continuousKeys.empty()) { + continuousKeys = pair.first->keys(); + break; + } + } + + // Build the FactorValuePairs DecisionTree + pairs = FactorValuePairs(discreteKeys, factorPairs); + } + + ConstructorHelper(const DiscreteKeys &discreteKeys, + const FactorValuePairs &factorPairs) + : discreteKeys(discreteKeys) { + // Extract continuous keys from the first non-null factor + factorPairs.visit([&](const GaussianFactorValuePair &pair) { + if (pair.first && continuousKeys.empty()) { + continuousKeys = pair.first->keys(); + } + }); + + // Build the FactorValuePairs DecisionTree + pairs = factorPairs; + } +}; + +/* *******************************************************************************/ +HybridGaussianFactor::HybridGaussianFactor(const ConstructorHelper &helper) + : Base(helper.continuousKeys, helper.discreteKeys), + factors_(helper.factorsTree.empty() ? augment(helper.pairs) + : helper.factorsTree) {} + +/* *******************************************************************************/ +HybridGaussianFactor::HybridGaussianFactor( + const DiscreteKey &discreteKey, + const std::vector &factors) + : HybridGaussianFactor(ConstructorHelper(discreteKey, factors)) {} + +/* *******************************************************************************/ +HybridGaussianFactor::HybridGaussianFactor( + const DiscreteKey &discreteKey, + const std::vector &factorPairs) + : HybridGaussianFactor(ConstructorHelper(discreteKey, factorPairs)) {} + +/* *******************************************************************************/ +HybridGaussianFactor::HybridGaussianFactor(const DiscreteKeys &discreteKeys, + const FactorValuePairs &factors) + : HybridGaussianFactor(ConstructorHelper(discreteKeys, factors)) {} + +/* *******************************************************************************/ +bool HybridGaussianFactor::equals(const HybridFactor &lf, double tol) const { + const This *e = dynamic_cast(&lf); + if (e == nullptr) return false; + + // This will return false if either factors_ is empty or e->factors_ is + // empty, but not if both are empty or both are not empty: + if (factors_.empty() ^ e->factors_.empty()) return false; + + // Check the base and the factors: + return Base::equals(*e, tol) && + factors_.equals(e->factors_, [tol](const auto &f1, const auto &f2) { + return (!f1 && !f2) || (f1 && f2 && f1->equals(*f2, tol)); + }); +} + +/* *******************************************************************************/ +void HybridGaussianFactor::print(const std::string &s, + const KeyFormatter &formatter) const { + std::cout << (s.empty() ? "" : s + "\n"); + std::cout << "HybridGaussianFactor" << std::endl; + HybridFactor::print("", formatter); + std::cout << "{\n"; + if (factors_.empty()) { + std::cout << " empty" << std::endl; + } else { + factors_.print( + "", [&](Key k) { return formatter(k); }, + [&](const sharedFactor &gf) -> std::string { + RedirectCout rd; + std::cout << ":\n"; + if (gf) { + gf->print("", formatter); + return rd.str(); + } else { + return "nullptr"; + } + }); + } + std::cout << "}" << std::endl; +} + +/* *******************************************************************************/ +HybridGaussianFactor::sharedFactor HybridGaussianFactor::operator()( + const DiscreteValues &assignment) const { + return factors_(assignment); +} + +/* *******************************************************************************/ +GaussianFactorGraphTree HybridGaussianFactor::add( + const GaussianFactorGraphTree &sum) const { + using Y = GaussianFactorGraph; + auto add = [](const Y &graph1, const Y &graph2) { + auto result = graph1; + result.push_back(graph2); + return result; + }; + const auto tree = asGaussianFactorGraphTree(); + return sum.empty() ? tree : sum.apply(tree, add); +} + +/* *******************************************************************************/ +GaussianFactorGraphTree HybridGaussianFactor::asGaussianFactorGraphTree() + const { + auto wrap = [](const sharedFactor &gf) { return GaussianFactorGraph{gf}; }; + return {factors_, wrap}; +} + +/* *******************************************************************************/ +/// Helper method to compute the error of a component. +static double PotentiallyPrunedComponentError( + const GaussianFactor::shared_ptr &gf, const VectorValues &values) { + // Check if valid pointer + if (gf) { + return gf->error(values); + } else { + // If nullptr this component was pruned, so we return maximum error. This + // way the negative exponential will give a probability value close to 0.0. + return std::numeric_limits::max(); + } +} + +/* *******************************************************************************/ +AlgebraicDecisionTree HybridGaussianFactor::errorTree( + const VectorValues &continuousValues) const { + // functor to convert from sharedFactor to double error value. + auto errorFunc = [&continuousValues](const sharedFactor &gf) { + return PotentiallyPrunedComponentError(gf, continuousValues); + }; + DecisionTree error_tree(factors_, errorFunc); + return error_tree; +} + +/* *******************************************************************************/ +double HybridGaussianFactor::error(const HybridValues &values) const { + // Directly index to get the component, no need to build the whole tree. + const sharedFactor gf = factors_(values.discrete()); + return PotentiallyPrunedComponentError(gf, values.continuous()); +} + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianFactor.h b/gtsam/hybrid/HybridGaussianFactor.h new file mode 100644 index 000000000..e5a575409 --- /dev/null +++ b/gtsam/hybrid/HybridGaussianFactor.h @@ -0,0 +1,213 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridGaussianFactor.h + * @brief A set of GaussianFactors, indexed by a set of discrete keys. + * @author Fan Jiang + * @author Varun Agrawal + * @author Frank Dellaert + * @date Mar 12, 2022 + */ + +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace gtsam { + +class HybridValues; +class DiscreteValues; +class VectorValues; + +/// Alias for pair of GaussianFactor::shared_pointer and a double value. +using GaussianFactorValuePair = std::pair; + +/** + * @brief Implementation of a discrete-conditioned hybrid factor. + * Implements a joint discrete-continuous factor where the discrete variable + * serves to "select" a component corresponding to a GaussianFactor. + * + * Represents the underlying hybrid Gaussian components as a Decision Tree, + * where the set of discrete variables indexes to + * the continuous gaussian distribution. + * + * In factor graphs the error function typically returns 0.5*|A*x - b|^2, i.e., + * the negative log-likelihood for a Gaussian noise model. + * In hybrid factor graphs we allow *adding* an arbitrary scalar dependent on + * the discrete assignment. + * For example, adding a 70/30 mode probability is supported by providing the + * scalars $-log(.7)$ and $-log(.3)$. + * Note that adding a common constant will not make any difference in the + * optimization, so $-log(70)$ and $-log(30)$ work just as well. + * + * @ingroup hybrid + */ +class GTSAM_EXPORT HybridGaussianFactor : public HybridFactor { + public: + using Base = HybridFactor; + using This = HybridGaussianFactor; + using shared_ptr = std::shared_ptr; + + using sharedFactor = std::shared_ptr; + + /// typedef for Decision Tree of Gaussian factors and arbitrary value. + using FactorValuePairs = DecisionTree; + /// typedef for Decision Tree of Gaussian factors. + using Factors = DecisionTree; + + private: + /// Decision tree of Gaussian factors indexed by discrete keys. + Factors factors_; + + public: + /// @name Constructors + /// @{ + + /// Default constructor, mainly for serialization. + HybridGaussianFactor() = default; + + /** + * @brief Construct a new HybridGaussianFactor on a single discrete key, + * providing the factors for each mode m as a vector of factors Ï•_m(x). + * The value Ï•(x,m) for the factor is simply Ï•_m(x). + * + * @param discreteKey The discrete key for the "mode", indexing components. + * @param factors Vector of gaussian factors, one for each mode. + */ + HybridGaussianFactor(const DiscreteKey &discreteKey, + const std::vector &factors); + + /** + * @brief Construct a new HybridGaussianFactor on a single discrete key, + * including a scalar error value for each mode m. The factors and scalars are + * provided as a vector of pairs (Ï•_m(x), E_m). + * The value Ï•(x,m) for the factor is now Ï•_m(x) + E_m. + * + * @param discreteKey The discrete key for the "mode", indexing components. + * @param factorPairs Vector of gaussian factor-scalar pairs, one per mode. + */ + HybridGaussianFactor(const DiscreteKey &discreteKey, + const std::vector &factorPairs); + + /** + * @brief Construct a new HybridGaussianFactor on a several discrete keys M, + * including a scalar error value for each assignment m. The factors and + * scalars are provided as a DecisionTree of pairs (Ï•_M(x), E_M). + * The value Ï•(x,M) for the factor is again Ï•_m(x) + E_m. + * + * @param discreteKeys Discrete variables and their cardinalities. + * @param factors The decision tree of Gaussian factor/scalar pairs. + */ + HybridGaussianFactor(const DiscreteKeys &discreteKeys, + const FactorValuePairs &factors); + + /// @} + /// @name Testable + /// @{ + + bool equals(const HybridFactor &lf, double tol = 1e-9) const override; + + void print(const std::string &s = "", const KeyFormatter &formatter = + DefaultKeyFormatter) const override; + + /// @} + /// @name Standard API + /// @{ + + /// Get factor at a given discrete assignment. + sharedFactor operator()(const DiscreteValues &assignment) const; + + /** + * @brief Combine the Gaussian Factor Graphs in `sum` and `this` while + * maintaining the original tree structure. + * + * @param sum Decision Tree of Gaussian Factor Graphs indexed by the + * variables. + * @return Sum + */ + GaussianFactorGraphTree add(const GaussianFactorGraphTree &sum) const; + + /** + * @brief Compute error of the HybridGaussianFactor as a tree. + * + * @param continuousValues The continuous VectorValues. + * @return AlgebraicDecisionTree A decision tree with the same keys + * as the factors involved, and leaf values as the error. + */ + AlgebraicDecisionTree errorTree( + const VectorValues &continuousValues) const override; + + /** + * @brief Compute the log-likelihood, including the log-normalizing constant. + * @return double + */ + double error(const HybridValues &values) const override; + + /// Getter for GaussianFactor decision tree + const Factors &factors() const { return factors_; } + + /// Add HybridNonlinearFactor to a Sum, syntactic sugar. + friend GaussianFactorGraphTree &operator+=( + GaussianFactorGraphTree &sum, const HybridGaussianFactor &factor) { + sum = factor.add(sum); + return sum; + } + /// @} + + protected: + /** + * @brief Helper function to return factors and functional to create a + * DecisionTree of Gaussian Factor Graphs. + * + * @return GaussianFactorGraphTree + */ + GaussianFactorGraphTree asGaussianFactorGraphTree() const; + + private: + /** + * @brief Helper function to augment the [A|b] matrices in the factor + * components with the additional scalar values. This is done by storing the + * value in the `b` vector as an additional row. + * + * @param factors DecisionTree of GaussianFactors and arbitrary scalars. + * Gaussian factor in factors. + * @return HybridGaussianFactor::Factors + */ + static Factors augment(const FactorValuePairs &factors); + + /// Helper struct to assist private constructor below. + struct ConstructorHelper; + + // Private constructor using ConstructorHelper above. + HybridGaussianFactor(const ConstructorHelper &helper); + +#ifdef GTSAM_ENABLE_BOOST_SERIALIZATION + /** Serialization function */ + friend class boost::serialization::access; + template + void serialize(ARCHIVE &ar, const unsigned int /*version*/) { + ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base); + ar &BOOST_SERIALIZATION_NVP(factors_); + } +#endif +}; + +// traits +template <> +struct traits : public Testable {}; + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index b764dc9e0..8a2a7fd15 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -23,11 +23,11 @@ #include #include #include -#include -#include #include #include #include +#include +#include #include #include #include @@ -74,6 +74,32 @@ const Ordering HybridOrdering(const HybridGaussianFactorGraph &graph) { index, KeyVector(discrete_keys.begin(), discrete_keys.end()), true); } +/* ************************************************************************ */ +static void printFactor(const std::shared_ptr &factor, + const DiscreteValues &assignment, + const KeyFormatter &keyFormatter) { + if (auto hgf = std::dynamic_pointer_cast(factor)) { + hgf->operator()(assignment) + ->print("HybridGaussianFactor, component:", keyFormatter); + } else if (auto gf = std::dynamic_pointer_cast(factor)) { + factor->print("GaussianFactor:\n", keyFormatter); + } else if (auto df = std::dynamic_pointer_cast(factor)) { + factor->print("DiscreteFactor:\n", keyFormatter); + } else if (auto hc = std::dynamic_pointer_cast(factor)) { + if (hc->isContinuous()) { + factor->print("GaussianConditional:\n", keyFormatter); + } else if (hc->isDiscrete()) { + factor->print("DiscreteConditional:\n", keyFormatter); + } else { + hc->asHybrid() + ->choose(assignment) + ->print("HybridConditional, component:\n", keyFormatter); + } + } else { + factor->print("Unknown factor type\n", keyFormatter); + } +} + /* ************************************************************************ */ void HybridGaussianFactorGraph::printErrors( const HybridValues &values, const std::string &str, @@ -83,71 +109,19 @@ void HybridGaussianFactorGraph::printErrors( &printCondition) const { std::cout << str << "size: " << size() << std::endl << std::endl; - std::stringstream ss; - for (size_t i = 0; i < factors_.size(); i++) { auto &&factor = factors_[i]; - std::cout << "Factor " << i << ": "; - - // Clear the stringstream - ss.str(std::string()); - - if (auto gmf = std::dynamic_pointer_cast(factor)) { - if (factor == nullptr) { - std::cout << "nullptr" - << "\n"; - } else { - factor->print(ss.str(), keyFormatter); - std::cout << "error = "; - gmf->errorTree(values.continuous()).print("", keyFormatter); - std::cout << std::endl; - } - } else if (auto hc = std::dynamic_pointer_cast(factor)) { - if (factor == nullptr) { - std::cout << "nullptr" - << "\n"; - } else { - factor->print(ss.str(), keyFormatter); - - if (hc->isContinuous()) { - std::cout << "error = " << hc->asGaussian()->error(values) << "\n"; - } else if (hc->isDiscrete()) { - std::cout << "error = "; - hc->asDiscrete()->errorTree().print("", keyFormatter); - std::cout << "\n"; - } else { - // Is hybrid - std::cout << "error = "; - hc->asMixture()->errorTree(values.continuous()).print(); - std::cout << "\n"; - } - } - } else if (auto gf = std::dynamic_pointer_cast(factor)) { - const double errorValue = (factor != nullptr ? gf->error(values) : .0); - if (!printCondition(factor.get(), errorValue, i)) - continue; // User-provided filter did not pass - - if (factor == nullptr) { - std::cout << "nullptr" - << "\n"; - } else { - factor->print(ss.str(), keyFormatter); - std::cout << "error = " << errorValue << "\n"; - } - } else if (auto df = std::dynamic_pointer_cast(factor)) { - if (factor == nullptr) { - std::cout << "nullptr" - << "\n"; - } else { - factor->print(ss.str(), keyFormatter); - std::cout << "error = "; - df->errorTree().print("", keyFormatter); - } - - } else { + if (factor == nullptr) { + std::cout << "Factor " << i << ": nullptr\n"; continue; } + const double errorValue = factor->error(values); + if (!printCondition(factor.get(), errorValue, i)) + continue; // User-provided filter did not pass + // Print the factor + std::cout << "Factor " << i << ", error = " << errorValue << "\n"; + printFactor(factor, values.discrete(), keyFormatter); std::cout << "\n"; } std::cout.flush(); @@ -181,12 +155,12 @@ GaussianFactorGraphTree HybridGaussianFactorGraph::assembleGraphTree() const { // TODO(dellaert): just use a virtual method defined in HybridFactor. if (auto gf = dynamic_pointer_cast(f)) { result = addGaussian(result, gf); - } else if (auto gmf = dynamic_pointer_cast(f)) { + } else if (auto gmf = dynamic_pointer_cast(f)) { result = gmf->add(result); - } else if (auto gm = dynamic_pointer_cast(f)) { + } else if (auto gm = dynamic_pointer_cast(f)) { result = gm->add(result); } else if (auto hc = dynamic_pointer_cast(f)) { - if (auto gm = hc->asMixture()) { + if (auto gm = hc->asHybrid()) { result = gm->add(result); } else if (auto g = hc->asGaussian()) { result = addGaussian(result, g); @@ -233,6 +207,25 @@ continuousElimination(const HybridGaussianFactorGraph &factors, return {std::make_shared(result.first), result.second}; } +/* ************************************************************************ */ +/** + * @brief Exponentiate (not necessarily normalized) negative log-values, + * normalize, and then return as AlgebraicDecisionTree. + * + * @param logValues DecisionTree of (unnormalized) log values. + * @return AlgebraicDecisionTree + */ +static AlgebraicDecisionTree probabilitiesFromNegativeLogValues( + const AlgebraicDecisionTree &logValues) { + // Perform normalization + double min_log = logValues.min(); + AlgebraicDecisionTree probabilities = DecisionTree( + logValues, [&min_log](const double x) { return exp(-(x - min_log)); }); + probabilities = probabilities.normalize(probabilities.sum()); + + return probabilities; +} + /* ************************************************************************ */ static std::pair> discreteElimination(const HybridGaussianFactorGraph &factors, @@ -242,6 +235,22 @@ discreteElimination(const HybridGaussianFactorGraph &factors, for (auto &f : factors) { if (auto df = dynamic_pointer_cast(f)) { dfg.push_back(df); + } else if (auto gmf = dynamic_pointer_cast(f)) { + // Case where we have a HybridGaussianFactor with no continuous keys. + // In this case, compute discrete probabilities. + auto logProbability = + [&](const GaussianFactor::shared_ptr &factor) -> double { + if (!factor) return 0.0; + return factor->error(VectorValues()); + }; + AlgebraicDecisionTree logProbabilities = + DecisionTree(gmf->factors(), logProbability); + + AlgebraicDecisionTree probabilities = + probabilitiesFromNegativeLogValues(logProbabilities); + dfg.emplace_shared(gmf->discreteKeys(), + probabilities); + } else if (auto orphan = dynamic_pointer_cast(f)) { // Ignore orphaned clique. // TODO(dellaert): is this correct? If so explain here. @@ -277,63 +286,74 @@ GaussianFactorGraphTree removeEmpty(const GaussianFactorGraphTree &sum) { /* ************************************************************************ */ using Result = std::pair, - GaussianMixtureFactor::sharedFactor>; + HybridGaussianFactor::sharedFactor>; -// Integrate the probability mass in the last continuous conditional using -// the unnormalized probability q(μ;m) = exp(-error(μ;m)) at the mean. -// discrete_probability = exp(-error(μ;m)) * sqrt(det(2Ï€ Σ_m)) +/** + * Compute the probability p(μ;m) = exp(-error(μ;m)) * sqrt(det(2Ï€ Σ_m) + * from the residual error ||b||^2 at the mean μ. + * The residual error contains no keys, and only + * depends on the discrete separator if present. + */ static std::shared_ptr createDiscreteFactor( const DecisionTree &eliminationResults, const DiscreteKeys &discreteSeparator) { - auto probability = [&](const Result &pair) -> double { + auto negLogProbability = [&](const Result &pair) -> double { const auto &[conditional, factor] = pair; static const VectorValues kEmpty; // If the factor is not null, it has no keys, just contains the residual. if (!factor) return 1.0; // TODO(dellaert): not loving this. - return exp(-factor->error(kEmpty)) / conditional->normalizationConstant(); + + // Negative logspace version of: + // exp(-factor->error(kEmpty)) / conditional->normalizationConstant(); + // negLogConstant gives `-log(k)` + // which is `-log(k) = log(1/k) = log(\sqrt{|2πΣ|})`. + return factor->error(kEmpty) - conditional->negLogConstant(); }; - DecisionTree probabilities(eliminationResults, probability); + AlgebraicDecisionTree negLogProbabilities( + DecisionTree(eliminationResults, negLogProbability)); + AlgebraicDecisionTree probabilities = + probabilitiesFromNegativeLogValues(negLogProbabilities); return std::make_shared(discreteSeparator, probabilities); } -// Create GaussianMixtureFactor on the separator, taking care to correct +// Create HybridGaussianFactor on the separator, taking care to correct // for conditional constants. -static std::shared_ptr createGaussianMixtureFactor( +static std::shared_ptr createHybridGaussianFactor( const DecisionTree &eliminationResults, - const KeyVector &continuousSeparator, const DiscreteKeys &discreteSeparator) { // Correct for the normalization constant used up by the conditional - auto correct = [&](const Result &pair) -> GaussianFactor::shared_ptr { + auto correct = [&](const Result &pair) -> GaussianFactorValuePair { const auto &[conditional, factor] = pair; if (factor) { auto hf = std::dynamic_pointer_cast(factor); if (!hf) throw std::runtime_error("Expected HessianFactor!"); - hf->constantTerm() += 2.0 * conditional->logNormalizationConstant(); + // Add 2.0 term since the constant term will be premultiplied by 0.5 + // as per the Hessian definition, + // and negative since we want log(k) + hf->constantTerm() += -2.0 * conditional->negLogConstant(); } - return factor; + return {factor, 0.0}; }; - DecisionTree newFactors(eliminationResults, - correct); + DecisionTree newFactors(eliminationResults, + correct); - return std::make_shared(continuousSeparator, - discreteSeparator, newFactors); + return std::make_shared(discreteSeparator, newFactors); } -static std::pair> -hybridElimination(const HybridGaussianFactorGraph &factors, - const Ordering &frontalKeys, - const KeyVector &continuousSeparator, - const std::set &discreteSeparatorSet) { - // NOTE: since we use the special JunctionTree, - // only possibility is continuous conditioned on discrete. - DiscreteKeys discreteSeparator(discreteSeparatorSet.begin(), - discreteSeparatorSet.end()); +/* *******************************************************************************/ +std::pair> +HybridGaussianFactorGraph::eliminate(const Ordering &keys) const { + // Since we eliminate all continuous variables first, + // the discrete separator will be *all* the discrete keys. + const std::set keysForDiscreteVariables = discreteKeys(); + DiscreteKeys discreteSeparator(keysForDiscreteVariables.begin(), + keysForDiscreteVariables.end()); // Collect all the factors to create a set of Gaussian factor graphs in a // decision tree indexed by all discrete keys involved. - GaussianFactorGraphTree factorGraphTree = factors.assembleGraphTree(); + GaussianFactorGraphTree factorGraphTree = assembleGraphTree(); // Convert factor graphs with a nullptr to an empty factor graph. // This is done after assembly since it is non-trivial to keep track of which @@ -341,12 +361,17 @@ hybridElimination(const HybridGaussianFactorGraph &factors, factorGraphTree = removeEmpty(factorGraphTree); // This is the elimination method on the leaf nodes + bool someContinuousLeft = false; auto eliminate = [&](const GaussianFactorGraph &graph) -> Result { if (graph.empty()) { return {nullptr, nullptr}; } - auto result = EliminatePreferCholesky(graph, frontalKeys); + // Expensive elimination of product factor. + auto result = EliminatePreferCholesky(graph, keys); + + // Record whether there any continuous variables left + someContinuousLeft |= !result.second->empty(); return result; }; @@ -355,21 +380,20 @@ hybridElimination(const HybridGaussianFactorGraph &factors, DecisionTree eliminationResults(factorGraphTree, eliminate); // If there are no more continuous parents we create a DiscreteFactor with the - // error for each discrete choice. Otherwise, create a GaussianMixtureFactor + // error for each discrete choice. Otherwise, create a HybridGaussianFactor // on the separator, taking care to correct for conditional constants. auto newFactor = - continuousSeparator.empty() - ? createDiscreteFactor(eliminationResults, discreteSeparator) - : createGaussianMixtureFactor(eliminationResults, continuousSeparator, - discreteSeparator); + someContinuousLeft + ? createHybridGaussianFactor(eliminationResults, discreteSeparator) + : createDiscreteFactor(eliminationResults, discreteSeparator); - // Create the GaussianMixture from the conditionals - GaussianMixture::Conditionals conditionals( + // Create the HybridGaussianConditional from the conditionals + HybridGaussianConditional::Conditionals conditionals( eliminationResults, [](const Result &pair) { return pair.first; }); - auto gaussianMixture = std::make_shared( - frontalKeys, continuousSeparator, discreteSeparator, conditionals); + auto hybridGaussian = std::make_shared( + discreteSeparator, conditionals); - return {std::make_shared(gaussianMixture), newFactor}; + return {std::make_shared(hybridGaussian), newFactor}; } /* ************************************************************************ @@ -388,11 +412,11 @@ hybridElimination(const HybridGaussianFactorGraph &factors, */ std::pair> // EliminateHybrid(const HybridGaussianFactorGraph &factors, - const Ordering &frontalKeys) { + const Ordering &keys) { // NOTE: Because we are in the Conditional Gaussian regime there are only // a few cases: - // 1. continuous variable, make a Gaussian Mixture if there are hybrid - // factors; + // 1. continuous variable, make a hybrid Gaussian conditional if there are + // hybrid factors; // 2. continuous variable, we make a Gaussian Factor if there are no hybrid // factors; // 3. discrete variable, no continuous factor is allowed @@ -413,14 +437,14 @@ EliminateHybrid(const HybridGaussianFactorGraph &factors, // Because of all these reasons, we carefully consider how to // implement the hybrid factors so that we do not get poor performance. - // The first thing is how to represent the GaussianMixture. + // The first thing is how to represent the HybridGaussianConditional. // A very possible scenario is that the incoming factors will have different // levels of discrete keys. For example, imagine we are going to eliminate the // fragment: $\phi(x1,c1,c2)$, $\phi(x1,c2,c3)$, which is perfectly valid. // Now we will need to know how to retrieve the corresponding continuous // densities for the assignment (c1,c2,c3) (OR (c2,c3,c1), note there is NO // defined order!). We also need to consider when there is pruning. Two - // mixture factors could have different pruning patterns - one could have + // hybrid factors could have different pruning patterns - one could have // (c1=0,c2=1) pruned, and another could have (c2=0,c3=1) pruned, and this // creates a big problem in how to identify the intersection of non-pruned // branches. @@ -462,39 +486,13 @@ EliminateHybrid(const HybridGaussianFactorGraph &factors, if (only_discrete) { // Case 1: we are only dealing with discrete - return discreteElimination(factors, frontalKeys); + return discreteElimination(factors, keys); } else if (only_continuous) { // Case 2: we are only dealing with continuous - return continuousElimination(factors, frontalKeys); + return continuousElimination(factors, keys); } else { // Case 3: We are now in the hybrid land! - KeySet frontalKeysSet(frontalKeys.begin(), frontalKeys.end()); - - // Find all the keys in the set of continuous keys - // which are not in the frontal keys. This is our continuous separator. - KeyVector continuousSeparator; - auto continuousKeySet = factors.continuousKeySet(); - std::set_difference( - continuousKeySet.begin(), continuousKeySet.end(), - frontalKeysSet.begin(), frontalKeysSet.end(), - std::inserter(continuousSeparator, continuousSeparator.begin())); - - // Similarly for the discrete separator. - KeySet discreteSeparatorSet; - std::set discreteSeparator; - auto discreteKeySet = factors.discreteKeySet(); - std::set_difference( - discreteKeySet.begin(), discreteKeySet.end(), frontalKeysSet.begin(), - frontalKeysSet.end(), - std::inserter(discreteSeparatorSet, discreteSeparatorSet.begin())); - // Convert from set of keys to set of DiscreteKeys - auto discreteKeyMap = factors.discreteKeyMap(); - for (auto key : discreteSeparatorSet) { - discreteSeparator.insert(discreteKeyMap.at(key)); - } - - return hybridElimination(factors, frontalKeys, continuousSeparator, - discreteSeparator); + return factors.eliminate(keys); } } @@ -502,30 +500,20 @@ EliminateHybrid(const HybridGaussianFactorGraph &factors, AlgebraicDecisionTree HybridGaussianFactorGraph::errorTree( const VectorValues &continuousValues) const { AlgebraicDecisionTree error_tree(0.0); - // Iterate over each factor. - for (auto &f : factors_) { - // TODO(dellaert): just use a virtual method defined in HybridFactor. - AlgebraicDecisionTree factor_error; - - if (auto gaussianMixture = dynamic_pointer_cast(f)) { - // Compute factor error and add it. - error_tree = error_tree + gaussianMixture->errorTree(continuousValues); - } else if (auto gaussian = dynamic_pointer_cast(f)) { - // If continuous only, get the (double) error - // and add it to the error_tree - double error = gaussian->error(continuousValues); - // Add the gaussian factor error to every leaf of the error tree. - error_tree = error_tree.apply( - [error](double leaf_value) { return leaf_value + error; }); - } else if (dynamic_pointer_cast(f)) { - // If factor at `idx` is discrete-only, we skip. + for (auto &factor : factors_) { + if (auto f = std::dynamic_pointer_cast(factor)) { + // Check for HybridFactor, and call errorTree + error_tree = error_tree + f->errorTree(continuousValues); + } else if (auto f = std::dynamic_pointer_cast(factor)) { + // Skip discrete factors continue; } else { - throwRuntimeError("HybridGaussianFactorGraph::error(VV)", f); + // Everything else is a continuous only factor + HybridValues hv(continuousValues, DiscreteValues()); + error_tree = error_tree + AlgebraicDecisionTree(factor->error(hv)); } } - return error_tree; } @@ -547,4 +535,24 @@ AlgebraicDecisionTree HybridGaussianFactorGraph::probPrime( return prob_tree; } +/* ************************************************************************ */ +GaussianFactorGraph HybridGaussianFactorGraph::operator()( + const DiscreteValues &assignment) const { + GaussianFactorGraph gfg; + for (auto &&f : *this) { + if (auto gf = std::dynamic_pointer_cast(f)) { + gfg.push_back(gf); + } else if (auto gc = std::dynamic_pointer_cast(f)) { + gfg.push_back(gf); + } else if (auto hgf = std::dynamic_pointer_cast(f)) { + gfg.push_back((*hgf)(assignment)); + } else if (auto hgc = dynamic_pointer_cast(f)) { + gfg.push_back((*hgc)(assignment)); + } else { + continue; + } + } + return gfg; +} + } // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.h b/gtsam/hybrid/HybridGaussianFactorGraph.h index 1708ff64b..7e3aac663 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.h +++ b/gtsam/hybrid/HybridGaussianFactorGraph.h @@ -18,9 +18,9 @@ #pragma once -#include #include #include +#include #include #include #include @@ -126,6 +126,11 @@ class GTSAM_EXPORT HybridGaussianFactorGraph /// @brief Default constructor. HybridGaussianFactorGraph() = default; + /** Construct from container of factors (shared_ptr or plain objects) */ + template + explicit HybridGaussianFactorGraph(const CONTAINER& factors) + : Base(factors) {} + /** * Implicit copy/downcast constructor to override explicit template container * constructor. In BayesTree this is used for: @@ -144,6 +149,14 @@ class GTSAM_EXPORT HybridGaussianFactorGraph // const std::string& s = "HybridGaussianFactorGraph", // const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override; + /** + * @brief Print the errors of each factor in the hybrid factor graph. + * + * @param values The HybridValues for the variables used to compute the error. + * @param str String that is output before the factor graph and errors. + * @param keyFormatter Formatter function for the keys in the factors. + * @param printCondition A condition to check if a factor should be printed. + */ void printErrors( const HybridValues& values, const std::string& str = "HybridGaussianFactorGraph: ", @@ -197,14 +210,30 @@ class GTSAM_EXPORT HybridGaussianFactorGraph * @brief Create a decision tree of factor graphs out of this hybrid factor * graph. * - * For example, if there are two mixture factors, one with a discrete key A + * For example, if there are two hybrid factors, one with a discrete key A * and one with a discrete key B, then the decision tree will have two levels, * one for A and one for B. The leaves of the tree will be the Gaussian * factors that have only continuous keys. */ GaussianFactorGraphTree assembleGraphTree() const; + /** + * @brief Eliminate the given continuous keys. + * + * @param keys The continuous keys to eliminate. + * @return The conditional on the keys and a factor on the separator. + */ + std::pair, std::shared_ptr> + eliminate(const Ordering& keys) const; /// @} + + /// Get the GaussianFactorGraph at a given discrete assignment. + GaussianFactorGraph operator()(const DiscreteValues& assignment) const; }; +// traits +template <> +struct traits + : public Testable {}; + } // namespace gtsam diff --git a/gtsam/hybrid/HybridJunctionTree.h b/gtsam/hybrid/HybridJunctionTree.h index a197e6e3c..17b4649fc 100644 --- a/gtsam/hybrid/HybridJunctionTree.h +++ b/gtsam/hybrid/HybridJunctionTree.h @@ -51,11 +51,10 @@ class HybridEliminationTree; */ class GTSAM_EXPORT HybridJunctionTree : public JunctionTree { - public: typedef JunctionTree - Base; ///< Base class - typedef HybridJunctionTree This; ///< This class + Base; ///< Base class + typedef HybridJunctionTree This; ///< This class typedef std::shared_ptr shared_ptr; ///< Shared pointer to this class /** diff --git a/gtsam/hybrid/HybridNonlinearFactor.cpp b/gtsam/hybrid/HybridNonlinearFactor.cpp new file mode 100644 index 000000000..9378d07fe --- /dev/null +++ b/gtsam/hybrid/HybridNonlinearFactor.cpp @@ -0,0 +1,206 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridNonlinearFactor.h + * @brief A set of nonlinear factors indexed by a set of discrete keys. + * @author Varun Agrawal + * @date Sep 12, 2024 + */ + +#include +#include +#include + +#include + +namespace gtsam { + +/* *******************************************************************************/ +struct HybridNonlinearFactor::ConstructorHelper { + KeyVector continuousKeys; // Continuous keys extracted from factors + DiscreteKeys discreteKeys; // Discrete keys provided to the constructors + FactorValuePairs factorTree; + + void copyOrCheckContinuousKeys(const NoiseModelFactor::shared_ptr& factor) { + if (!factor) return; + if (continuousKeys.empty()) { + continuousKeys = factor->keys(); + } else if (factor->keys() != continuousKeys) { + throw std::runtime_error( + "HybridNonlinearFactor: all factors should have the same keys!"); + } + } + + ConstructorHelper(const DiscreteKey& discreteKey, + const std::vector& factors) + : discreteKeys({discreteKey}) { + std::vector pairs; + // Extract continuous keys from the first non-null factor + for (const auto& factor : factors) { + pairs.emplace_back(factor, 0.0); + copyOrCheckContinuousKeys(factor); + } + factorTree = FactorValuePairs({discreteKey}, pairs); + } + + ConstructorHelper(const DiscreteKey& discreteKey, + const std::vector& pairs) + : discreteKeys({discreteKey}) { + // Extract continuous keys from the first non-null factor + for (const auto& pair : pairs) { + copyOrCheckContinuousKeys(pair.first); + } + factorTree = FactorValuePairs({discreteKey}, pairs); + } + + ConstructorHelper(const DiscreteKeys& discreteKeys, + const FactorValuePairs& factorPairs) + : discreteKeys(discreteKeys), factorTree(factorPairs) { + // Extract continuous keys from the first non-null factor + factorPairs.visit([&](const NonlinearFactorValuePair& pair) { + copyOrCheckContinuousKeys(pair.first); + }); + } +}; + +/* *******************************************************************************/ +HybridNonlinearFactor::HybridNonlinearFactor(const ConstructorHelper& helper) + : Base(helper.continuousKeys, helper.discreteKeys), + factors_(helper.factorTree) {} + +HybridNonlinearFactor::HybridNonlinearFactor( + const DiscreteKey& discreteKey, + const std::vector& factors) + : HybridNonlinearFactor(ConstructorHelper(discreteKey, factors)) {} + +HybridNonlinearFactor::HybridNonlinearFactor( + const DiscreteKey& discreteKey, + const std::vector& pairs) + : HybridNonlinearFactor(ConstructorHelper(discreteKey, pairs)) {} + +HybridNonlinearFactor::HybridNonlinearFactor(const DiscreteKeys& discreteKeys, + const FactorValuePairs& factors) + : HybridNonlinearFactor(ConstructorHelper(discreteKeys, factors)) {} + +/* *******************************************************************************/ +AlgebraicDecisionTree HybridNonlinearFactor::errorTree( + const Values& continuousValues) const { + // functor to convert from sharedFactor to double error value. + auto errorFunc = + [continuousValues](const std::pair& f) { + auto [factor, val] = f; + return factor->error(continuousValues) + val; + }; + DecisionTree result(factors_, errorFunc); + return result; +} + +/* *******************************************************************************/ +double HybridNonlinearFactor::error( + const Values& continuousValues, + const DiscreteValues& discreteValues) const { + // Retrieve the factor corresponding to the assignment in discreteValues. + auto [factor, val] = factors_(discreteValues); + // Compute the error for the selected factor + const double factorError = factor->error(continuousValues); + return factorError + val; +} + +/* *******************************************************************************/ +double HybridNonlinearFactor::error(const HybridValues& values) const { + return error(values.nonlinear(), values.discrete()); +} + +/* *******************************************************************************/ +size_t HybridNonlinearFactor::dim() const { + const auto assignments = DiscreteValues::CartesianProduct(discreteKeys_); + auto [factor, val] = factors_(assignments.at(0)); + return factor->dim(); +} + +/* *******************************************************************************/ +void HybridNonlinearFactor::print(const std::string& s, + const KeyFormatter& keyFormatter) const { + std::cout << (s.empty() ? "" : s + " "); + Base::print("", keyFormatter); + std::cout << "\nHybridNonlinearFactor\n"; + auto valueFormatter = [](const std::pair& v) { + auto [factor, val] = v; + if (factor) { + return "Nonlinear factor on " + std::to_string(factor->size()) + " keys"; + } else { + return std::string("nullptr"); + } + }; + factors_.print("", keyFormatter, valueFormatter); +} + +/* *******************************************************************************/ +bool HybridNonlinearFactor::equals(const HybridFactor& other, + double tol) const { + // We attempt a dynamic cast from HybridFactor to HybridNonlinearFactor. If + // it fails, return false. + if (!dynamic_cast(&other)) return false; + + // If the cast is successful, we'll properly construct a + // HybridNonlinearFactor object from `other` + const HybridNonlinearFactor& f( + static_cast(other)); + + // Ensure that this HybridNonlinearFactor and `f` have the same `factors_`. + auto compare = [tol](const std::pair& a, + const std::pair& b) { + return a.first->equals(*b.first, tol) && (a.second == b.second); + }; + if (!factors_.equals(f.factors_, compare)) return false; + + // If everything above passes, and the keys_ and discreteKeys_ + // member variables are identical, return true. + return (std::equal(keys_.begin(), keys_.end(), f.keys().begin()) && + (discreteKeys_ == f.discreteKeys_)); +} + +/* *******************************************************************************/ +GaussianFactor::shared_ptr HybridNonlinearFactor::linearize( + const Values& continuousValues, + const DiscreteValues& discreteValues) const { + auto factor = factors_(discreteValues).first; + return factor->linearize(continuousValues); +} + +/* *******************************************************************************/ +std::shared_ptr HybridNonlinearFactor::linearize( + const Values& continuousValues) const { + // functional to linearize each factor in the decision tree + auto linearizeDT = + [continuousValues]( + const std::pair& f) -> GaussianFactorValuePair { + auto [factor, val] = f; + if (auto gaussian = std::dynamic_pointer_cast( + factor->noiseModel())) { + return {factor->linearize(continuousValues), + val + gaussian->negLogConstant()}; + } else { + throw std::runtime_error( + "HybridNonlinearFactor: linearize() only supports NoiseModelFactors " + "with Gaussian (or derived) noise models."); + } + }; + + DecisionTree> + linearized_factors(factors_, linearizeDT); + + return std::make_shared(discreteKeys_, + linearized_factors); +} + +} // namespace gtsam \ No newline at end of file diff --git a/gtsam/hybrid/HybridNonlinearFactor.h b/gtsam/hybrid/HybridNonlinearFactor.h new file mode 100644 index 000000000..325fa3eaa --- /dev/null +++ b/gtsam/hybrid/HybridNonlinearFactor.h @@ -0,0 +1,192 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridNonlinearFactor.h + * @brief A set of nonlinear factors indexed by a set of discrete keys. + * @author Kevin Doherty, kdoherty@mit.edu + * @author Varun Agrawal + * @date December 2021 + */ + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include + +namespace gtsam { + +/// Alias for a NoiseModelFactor shared pointer and double scalar pair. +using NonlinearFactorValuePair = + std::pair; + +/** + * @brief Implementation of a discrete-conditioned hybrid factor. + * + * Implements a joint discrete-continuous factor where the discrete variable + * serves to "select" a hybrid component corresponding to a NoiseModelFactor. + * + * This class stores all factors as HybridFactors which can then be typecast to + * one of (NoiseModelFactor, GaussianFactor) which can then be checked to + * perform the correct operation. + * + * In factor graphs the error function typically returns 0.5*|h(x)-z|^2, i.e., + * the negative log-likelihood for a Gaussian noise model. + * In hybrid factor graphs we allow *adding* an arbitrary scalar dependent on + * the discrete assignment. + * For example, adding a 70/30 mode probability is supported by providing the + * scalars $-log(.7)$ and $-log(.3)$. + * Note that adding a common constant will not make any difference in the + * optimization, so $-log(70)$ and $-log(30)$ work just as well. + * + * @ingroup hybrid + */ +class GTSAM_EXPORT HybridNonlinearFactor : public HybridFactor { + public: + using Base = HybridFactor; + using This = HybridNonlinearFactor; + using shared_ptr = std::shared_ptr; + using sharedFactor = std::shared_ptr; + + /** + * @brief typedef for DecisionTree which has Keys as node labels and + * pairs of NoiseModelFactor & an arbitrary scalar as leaf nodes. + */ + using FactorValuePairs = DecisionTree; + + private: + /// Decision tree of nonlinear factors indexed by discrete keys. + FactorValuePairs factors_; + + /// HybridFactor method implementation. Should not be used. + AlgebraicDecisionTree errorTree( + const VectorValues& continuousValues) const override { + throw std::runtime_error( + "HybridNonlinearFactor::error does not take VectorValues."); + } + + public: + /// Default constructor, mainly for serialization. + HybridNonlinearFactor() = default; + + /** + * @brief Construct a new HybridNonlinearFactor on a single discrete key, + * providing the factors for each mode m as a vector of factors Ï•_m(x). + * The value Ï•(x,m) for the factor is simply Ï•_m(x). + * + * @param discreteKey The discrete key for the "mode", indexing components. + * @param factors Vector of gaussian factors, one for each mode. + */ + HybridNonlinearFactor( + const DiscreteKey& discreteKey, + const std::vector& factors); + + /** + * @brief Construct a new HybridNonlinearFactor on a single discrete key, + * including a scalar error value for each mode m. The factors and scalars are + * provided as a vector of pairs (Ï•_m(x), E_m). + * The value Ï•(x,m) for the factor is now Ï•_m(x) + E_m. + * + * @param discreteKey The discrete key for the "mode", indexing components. + * @param pairs Vector of gaussian factor-scalar pairs, one per mode. + */ + HybridNonlinearFactor(const DiscreteKey& discreteKey, + const std::vector& pairs); + + /** + * @brief Construct a new HybridNonlinearFactor on a several discrete keys M, + * including a scalar error value for each assignment m. The factors and + * scalars are provided as a DecisionTree of pairs (Ï•_M(x), E_M). + * The value Ï•(x,M) for the factor is again Ï•_m(x) + E_m. + * + * @param discreteKeys Discrete variables and their cardinalities. + * @param factors The decision tree of nonlinear factor/scalar pairs. + */ + HybridNonlinearFactor(const DiscreteKeys& discreteKeys, + const FactorValuePairs& factors); + + /** + * @brief Compute error of the HybridNonlinearFactor as a tree. + * + * @param continuousValues The continuous values for which to compute the + * error. + * @return AlgebraicDecisionTree A decision tree with the same keys + * as the factor, and leaf values as the error. + */ + AlgebraicDecisionTree errorTree(const Values& continuousValues) const; + + /** + * @brief Compute error of factor given both continuous and discrete values. + * + * @param continuousValues The continuous Values. + * @param discreteValues The discrete Values. + * @return double The error of this factor. + */ + double error(const Values& continuousValues, + const DiscreteValues& discreteValues) const; + + /** + * @brief Compute error of factor given hybrid values. + * + * @param values The continuous Values and the discrete assignment. + * @return double The error of this factor. + */ + double error(const HybridValues& values) const override; + + /** + * @brief Get the dimension of the factor (number of rows on linearization). + * Returns the dimension of the first component factor. + * @return size_t + */ + size_t dim() const; + + /// Testable + /// @{ + + /// print to stdout + void print(const std::string& s = "", const KeyFormatter& keyFormatter = + DefaultKeyFormatter) const override; + + /// Check equality + bool equals(const HybridFactor& other, double tol = 1e-9) const override; + + /// @} + + /// Linearize specific nonlinear factors based on the assignment in + /// discreteValues. + GaussianFactor::shared_ptr linearize( + const Values& continuousValues, + const DiscreteValues& discreteValues) const; + + /// Linearize all the continuous factors to get a HybridGaussianFactor. + std::shared_ptr linearize( + const Values& continuousValues) const; + + private: + /// Helper struct to assist private constructor below. + struct ConstructorHelper; + + // Private constructor using ConstructorHelper above. + HybridNonlinearFactor(const ConstructorHelper& helper); +}; + +// traits +template <> +struct traits : public Testable { +}; + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridNonlinearFactorGraph.cpp b/gtsam/hybrid/HybridNonlinearFactorGraph.cpp index cdd448412..0e7e9c692 100644 --- a/gtsam/hybrid/HybridNonlinearFactorGraph.cpp +++ b/gtsam/hybrid/HybridNonlinearFactorGraph.cpp @@ -18,10 +18,10 @@ #include #include -#include +#include #include +#include #include -#include #include namespace gtsam { @@ -59,7 +59,7 @@ void HybridNonlinearFactorGraph::printErrors( // Clear the stringstream ss.str(std::string()); - if (auto mf = std::dynamic_pointer_cast(factor)) { + if (auto mf = std::dynamic_pointer_cast(factor)) { if (factor == nullptr) { std::cout << "nullptr" << "\n"; @@ -70,7 +70,7 @@ void HybridNonlinearFactorGraph::printErrors( std::cout << std::endl; } } else if (auto gmf = - std::dynamic_pointer_cast(factor)) { + std::dynamic_pointer_cast(factor)) { if (factor == nullptr) { std::cout << "nullptr" << "\n"; @@ -80,7 +80,8 @@ void HybridNonlinearFactorGraph::printErrors( gmf->errorTree(values.continuous()).print("", keyFormatter); std::cout << std::endl; } - } else if (auto gm = std::dynamic_pointer_cast(factor)) { + } else if (auto gm = std::dynamic_pointer_cast( + factor)) { if (factor == nullptr) { std::cout << "nullptr" << "\n"; @@ -150,9 +151,9 @@ HybridGaussianFactorGraph::shared_ptr HybridNonlinearFactorGraph::linearize( if (!f) { continue; } - // Check if it is a nonlinear mixture factor - if (auto mf = dynamic_pointer_cast(f)) { - const GaussianMixtureFactor::shared_ptr& gmf = + // Check if it is a hybrid nonlinear factor + if (auto mf = dynamic_pointer_cast(f)) { + const HybridGaussianFactor::shared_ptr& gmf = mf->linearize(continuousValues); linearFG->push_back(gmf); } else if (auto nlf = dynamic_pointer_cast(f)) { @@ -161,9 +162,9 @@ HybridGaussianFactorGraph::shared_ptr HybridNonlinearFactorGraph::linearize( } else if (dynamic_pointer_cast(f)) { // If discrete-only: doesn't need linearization. linearFG->push_back(f); - } else if (auto gmf = dynamic_pointer_cast(f)) { + } else if (auto gmf = dynamic_pointer_cast(f)) { linearFG->push_back(gmf); - } else if (auto gm = dynamic_pointer_cast(f)) { + } else if (auto gm = dynamic_pointer_cast(f)) { linearFG->push_back(gm); } else if (dynamic_pointer_cast(f)) { linearFG->push_back(f); @@ -178,4 +179,35 @@ HybridGaussianFactorGraph::shared_ptr HybridNonlinearFactorGraph::linearize( return linearFG; } +/* ************************************************************************* */ +AlgebraicDecisionTree HybridNonlinearFactorGraph::errorTree( + const Values& values) const { + AlgebraicDecisionTree result(0.0); + + // Iterate over each factor. + for (auto& factor : factors_) { + if (auto hnf = std::dynamic_pointer_cast(factor)) { + // Compute factor error and add it. + result = result + hnf->errorTree(values); + + } else if (auto nf = std::dynamic_pointer_cast(factor)) { + // If continuous only, get the (double) error + // and add it to every leaf of the result + result = result + nf->error(values); + + } else if (auto df = std::dynamic_pointer_cast(factor)) { + // If discrete, just add its errorTree as well + result = result + df->errorTree(); + + } else { + throw std::runtime_error( + "HybridNonlinearFactorGraph::errorTree(Values) not implemented for " + "factor type " + + demangle(typeid(factor).name()) + "."); + } + } + + return result; +} + } // namespace gtsam diff --git a/gtsam/hybrid/HybridNonlinearFactorGraph.h b/gtsam/hybrid/HybridNonlinearFactorGraph.h index 54dc9e93f..53920a4aa 100644 --- a/gtsam/hybrid/HybridNonlinearFactorGraph.h +++ b/gtsam/hybrid/HybridNonlinearFactorGraph.h @@ -86,6 +86,23 @@ class GTSAM_EXPORT HybridNonlinearFactorGraph : public HybridFactorGraph { */ std::shared_ptr linearize( const Values& continuousValues) const; + + /// Expose error(const HybridValues&) method. + using Base::error; + + /** + * @brief Compute error of (hybrid) nonlinear factors and discrete factors + * over each discrete assignment, and return as a tree. + * + * Error \f$ e = \Vert f(x) - \mu \Vert_{\Sigma} \f$. + * + * @note: Gaussian and hybrid Gaussian factors are not considered! + * + * @param values Manifold values at which to compute the error. + * @return AlgebraicDecisionTree + */ + AlgebraicDecisionTree errorTree(const Values& values) const; + /// @} }; diff --git a/gtsam/hybrid/HybridNonlinearISAM.h b/gtsam/hybrid/HybridNonlinearISAM.h index 3372593be..e41b4ebe1 100644 --- a/gtsam/hybrid/HybridNonlinearISAM.h +++ b/gtsam/hybrid/HybridNonlinearISAM.h @@ -19,6 +19,7 @@ #include #include + #include namespace gtsam { diff --git a/gtsam/hybrid/HybridSmoother.cpp b/gtsam/hybrid/HybridSmoother.cpp index afa8340d2..b898c0520 100644 --- a/gtsam/hybrid/HybridSmoother.cpp +++ b/gtsam/hybrid/HybridSmoother.cpp @@ -100,7 +100,7 @@ HybridSmoother::addConditionals(const HybridGaussianFactorGraph &originalGraph, // If hybridBayesNet is not empty, // it means we have conditionals to add to the factor graph. if (!hybridBayesNet.empty()) { - // We add all relevant conditional mixtures on the last continuous variable + // We add all relevant hybrid conditionals on the last continuous variable // in the previous `hybridBayesNet` to the graph // Conditionals to remove from the bayes net @@ -138,9 +138,9 @@ HybridSmoother::addConditionals(const HybridGaussianFactorGraph &originalGraph, } /* ************************************************************************* */ -GaussianMixture::shared_ptr HybridSmoother::gaussianMixture( +HybridGaussianConditional::shared_ptr HybridSmoother::gaussianMixture( size_t index) const { - return hybridBayesNet_.at(index)->asMixture(); + return hybridBayesNet_.at(index)->asHybrid(); } /* ************************************************************************* */ diff --git a/gtsam/hybrid/HybridSmoother.h b/gtsam/hybrid/HybridSmoother.h index 5c2e4f546..267746ab6 100644 --- a/gtsam/hybrid/HybridSmoother.h +++ b/gtsam/hybrid/HybridSmoother.h @@ -34,7 +34,7 @@ class GTSAM_EXPORT HybridSmoother { * Given new factors, perform an incremental update. * The relevant densities in the `hybridBayesNet` will be added to the input * graph (fragment), and then eliminated according to the `ordering` - * presented. The remaining factor graph contains Gaussian mixture factors + * presented. The remaining factor graph contains hybrid Gaussian factors * that are not connected to the variables in the ordering, or a single * discrete factor on all discrete keys, plus all discrete factors in the * original graph. @@ -68,8 +68,14 @@ class GTSAM_EXPORT HybridSmoother { const HybridGaussianFactorGraph& graph, const HybridBayesNet& hybridBayesNet, const Ordering& ordering) const; - /// Get the Gaussian Mixture from the Bayes Net posterior at `index`. - GaussianMixture::shared_ptr gaussianMixture(size_t index) const; + /** + * @brief Get the hybrid Gaussian conditional from + * the Bayes Net posterior at `index`. + * + * @param index Indexing value. + * @return HybridGaussianConditional::shared_ptr + */ + HybridGaussianConditional::shared_ptr gaussianMixture(size_t index) const; /// Return the Bayes Net posterior. const HybridBayesNet& hybridBayesNet() const; diff --git a/gtsam/hybrid/HybridValues.cpp b/gtsam/hybrid/HybridValues.cpp new file mode 100644 index 000000000..533cd6eab --- /dev/null +++ b/gtsam/hybrid/HybridValues.cpp @@ -0,0 +1,170 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridValues.cpp + * @author Varun Agrawal + * @date August 2024 + */ + +#include +#include +#include +#include +#include + +namespace gtsam { + +/* ************************************************************************* */ +HybridValues::HybridValues(const VectorValues& cv, const DiscreteValues& dv) + : continuous_(cv), discrete_(dv) {} + +/* ************************************************************************* */ +HybridValues::HybridValues(const VectorValues& cv, const DiscreteValues& dv, + const Values& v) + : continuous_(cv), discrete_(dv), nonlinear_(v) {} + +/* ************************************************************************* */ +void HybridValues::print(const std::string& s, + const KeyFormatter& keyFormatter) const { + std::cout << s << ": \n"; + // print continuous components + continuous_.print(" Continuous", keyFormatter); + // print discrete components + discrete_.print(" Discrete", keyFormatter); + // print nonlinear components + nonlinear_.print(" Nonlinear", keyFormatter); +} + +/* ************************************************************************* */ +bool HybridValues::equals(const HybridValues& other, double tol) const { + return continuous_.equals(other.continuous_, tol) && + discrete_.equals(other.discrete_, tol); +} + +/* ************************************************************************* */ +const VectorValues& HybridValues::continuous() const { return continuous_; } + +/* ************************************************************************* */ +const DiscreteValues& HybridValues::discrete() const { return discrete_; } + +/* ************************************************************************* */ +const Values& HybridValues::nonlinear() const { return nonlinear_; } + +/* ************************************************************************* */ +bool HybridValues::existsVector(Key j) { return continuous_.exists(j); } + +/* ************************************************************************* */ +bool HybridValues::existsDiscrete(Key j) { + return (discrete_.find(j) != discrete_.end()); +} + +/* ************************************************************************* */ +bool HybridValues::existsNonlinear(Key j) { return nonlinear_.exists(j); } + +/* ************************************************************************* */ +bool HybridValues::exists(Key j) { + return existsVector(j) || existsDiscrete(j) || existsNonlinear(j); +} + +/* ************************************************************************* */ +HybridValues HybridValues::retract(const VectorValues& delta) const { + HybridValues updated(continuous_, discrete_, nonlinear_.retract(delta)); + return updated; +} + +/* ************************************************************************* */ +void HybridValues::insert(Key j, const Vector& value) { + continuous_.insert(j, value); +} + +/* ************************************************************************* */ +void HybridValues::insert(Key j, size_t value) { discrete_[j] = value; } + +/* ************************************************************************* */ +void HybridValues::insert_or_assign(Key j, const Vector& value) { + continuous_.insert_or_assign(j, value); +} + +/* ************************************************************************* */ +void HybridValues::insert_or_assign(Key j, size_t value) { + discrete_[j] = value; +} + +/* ************************************************************************* */ +HybridValues& HybridValues::insert(const VectorValues& values) { + continuous_.insert(values); + return *this; +} + +/* ************************************************************************* */ +HybridValues& HybridValues::insert(const DiscreteValues& values) { + discrete_.insert(values); + return *this; +} + +/* ************************************************************************* */ +HybridValues& HybridValues::insert(const Values& values) { + nonlinear_.insert(values); + return *this; +} + +/* ************************************************************************* */ +HybridValues& HybridValues::insert(const HybridValues& values) { + continuous_.insert(values.continuous()); + discrete_.insert(values.discrete()); + nonlinear_.insert(values.nonlinear()); + return *this; +} + +/* ************************************************************************* */ +Vector& HybridValues::at(Key j) { return continuous_.at(j); } + +/* ************************************************************************* */ +size_t& HybridValues::atDiscrete(Key j) { return discrete_.at(j); } + +/* ************************************************************************* */ +HybridValues& HybridValues::update(const VectorValues& values) { + continuous_.update(values); + return *this; +} + +/* ************************************************************************* */ +HybridValues& HybridValues::update(const DiscreteValues& values) { + discrete_.update(values); + return *this; +} + +/* ************************************************************************* */ +HybridValues& HybridValues::update(const HybridValues& values) { + continuous_.update(values.continuous()); + discrete_.update(values.discrete()); + return *this; +} + +/* ************************************************************************* */ +VectorValues HybridValues::continuousSubset(const KeyVector& keys) const { + VectorValues measurements; + for (const auto& key : keys) { + measurements.insert(key, continuous_.at(key)); + } + return measurements; +} + +/* ************************************************************************* */ +std::string HybridValues::html(const KeyFormatter& keyFormatter) const { + std::stringstream ss; + ss << this->continuous_.html(keyFormatter); + ss << this->discrete_.html(keyFormatter); + return ss.str(); +} + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridValues.h b/gtsam/hybrid/HybridValues.h index 40d4b5e92..07ff3e9e3 100644 --- a/gtsam/hybrid/HybridValues.h +++ b/gtsam/hybrid/HybridValues.h @@ -12,13 +12,12 @@ /** * @file HybridValues.h * @date Jul 28, 2022 + * @author Varun Agrawal * @author Shangjie Xue */ #pragma once -#include -#include #include #include #include @@ -54,13 +53,11 @@ class GTSAM_EXPORT HybridValues { HybridValues() = default; /// Construct from DiscreteValues and VectorValues. - HybridValues(const VectorValues &cv, const DiscreteValues &dv) - : continuous_(cv), discrete_(dv){} + HybridValues(const VectorValues& cv, const DiscreteValues& dv); /// Construct from all values types. HybridValues(const VectorValues& cv, const DiscreteValues& dv, - const Values& v) - : continuous_(cv), discrete_(dv), nonlinear_(v){} + const Values& v); /// @} /// @name Testable @@ -68,148 +65,105 @@ class GTSAM_EXPORT HybridValues { /// print required by Testable for unit testing void print(const std::string& s = "HybridValues", - const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { - std::cout << s << ": \n"; - continuous_.print(" Continuous", - keyFormatter); // print continuous components - discrete_.print(" Discrete", keyFormatter); // print discrete components - } + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const; /// equals required by Testable for unit testing - bool equals(const HybridValues& other, double tol = 1e-9) const { - return continuous_.equals(other.continuous_, tol) && - discrete_.equals(other.discrete_, tol); - } + bool equals(const HybridValues& other, double tol = 1e-9) const; /// @} /// @name Interface /// @{ /// Return the multi-dimensional vector values. - const VectorValues& continuous() const { return continuous_; } + const VectorValues& continuous() const; /// Return the discrete values. - const DiscreteValues& discrete() const { return discrete_; } + const DiscreteValues& discrete() const; /// Return the nonlinear values. - const Values& nonlinear() const { return nonlinear_; } + const Values& nonlinear() const; /// Check whether a variable with key \c j exists in VectorValues. - bool existsVector(Key j) { return continuous_.exists(j); } + bool existsVector(Key j); /// Check whether a variable with key \c j exists in DiscreteValues. - bool existsDiscrete(Key j) { return (discrete_.find(j) != discrete_.end()); } + bool existsDiscrete(Key j); /// Check whether a variable with key \c j exists in values. - bool existsNonlinear(Key j) { - return nonlinear_.exists(j); - } + bool existsNonlinear(Key j); /// Check whether a variable with key \c j exists. - bool exists(Key j) { - return existsVector(j) || existsDiscrete(j) || existsNonlinear(j); - } + bool exists(Key j); + + /** Add a delta config to current config and returns a new config */ + HybridValues retract(const VectorValues& delta) const; /** Insert a vector \c value with key \c j. Throws an invalid_argument * exception if the key \c j is already used. * @param value The vector to be inserted. * @param j The index with which the value will be associated. */ - void insert(Key j, const Vector& value) { continuous_.insert(j, value); } + void insert(Key j, const Vector& value); /** Insert a discrete \c value with key \c j. Replaces the existing value if * the key \c j is already used. * @param value The vector to be inserted. * @param j The index with which the value will be associated. */ - void insert(Key j, size_t value) { discrete_[j] = value; } + void insert(Key j, size_t value); /// insert_or_assign() , similar to Values.h - void insert_or_assign(Key j, const Vector& value) { - continuous_.insert_or_assign(j, value); - } + void insert_or_assign(Key j, const Vector& value); /// insert_or_assign() , similar to Values.h - void insert_or_assign(Key j, size_t value) { - discrete_[j] = value; - } + void insert_or_assign(Key j, size_t value); /** Insert all continuous values from \c values. Throws an invalid_argument * exception if any keys to be inserted are already used. */ - HybridValues& insert(const VectorValues& values) { - continuous_.insert(values); - return *this; - } + HybridValues& insert(const VectorValues& values); /** Insert all discrete values from \c values. Throws an invalid_argument * exception if any keys to be inserted are already used. */ - HybridValues& insert(const DiscreteValues& values) { - discrete_.insert(values); - return *this; - } + HybridValues& insert(const DiscreteValues& values); /** Insert all values from \c values. Throws an invalid_argument * exception if any keys to be inserted are already used. */ - HybridValues& insert(const Values& values) { - nonlinear_.insert(values); - return *this; - } + HybridValues& insert(const Values& values); /** Insert all values from \c values. Throws an invalid_argument exception if * any keys to be inserted are already used. */ - HybridValues& insert(const HybridValues& values) { - continuous_.insert(values.continuous()); - discrete_.insert(values.discrete()); - nonlinear_.insert(values.nonlinear()); - return *this; - } + HybridValues& insert(const HybridValues& values); /** * Read/write access to the vector value with key \c j, throws * std::out_of_range if \c j does not exist. */ - Vector& at(Key j) { return continuous_.at(j); } + Vector& at(Key j); /** * Read/write access to the discrete value with key \c j, throws * std::out_of_range if \c j does not exist. */ - size_t& atDiscrete(Key j) { return discrete_.at(j); } + size_t& atDiscrete(Key j); /** For all key/value pairs in \c values, replace continuous values with * corresponding keys in this object with those in \c values. Throws * std::out_of_range if any keys in \c values are not present in this object. */ - HybridValues& update(const VectorValues& values) { - continuous_.update(values); - return *this; - } + HybridValues& update(const VectorValues& values); /** For all key/value pairs in \c values, replace discrete values with * corresponding keys in this object with those in \c values. Throws * std::out_of_range if any keys in \c values are not present in this object. */ - HybridValues& update(const DiscreteValues& values) { - discrete_.update(values); - return *this; - } + HybridValues& update(const DiscreteValues& values); /** For all key/value pairs in \c values, replace all values with * corresponding keys in this object with those in \c values. Throws * std::out_of_range if any keys in \c values are not present in this object. */ - HybridValues& update(const HybridValues& values) { - continuous_.update(values.continuous()); - discrete_.update(values.discrete()); - return *this; - } + HybridValues& update(const HybridValues& values); /// Extract continuous values with given keys. - VectorValues continuousSubset(const KeyVector& keys) const { - VectorValues measurements; - for (const auto& key : keys) { - measurements.insert(key, continuous_.at(key)); - } - return measurements; - } + VectorValues continuousSubset(const KeyVector& keys) const; /// @} /// @name Wrapper support @@ -222,12 +176,7 @@ class GTSAM_EXPORT HybridValues { * @return string html output. */ std::string html( - const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { - std::stringstream ss; - ss << this->continuous_.html(keyFormatter); - ss << this->discrete_.html(keyFormatter); - return ss.str(); - } + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const; /// @} }; diff --git a/gtsam/hybrid/MixtureFactor.h b/gtsam/hybrid/MixtureFactor.h deleted file mode 100644 index 09a641b48..000000000 --- a/gtsam/hybrid/MixtureFactor.h +++ /dev/null @@ -1,292 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file MixtureFactor.h - * @brief Nonlinear Mixture factor of continuous and discrete. - * @author Kevin Doherty, kdoherty@mit.edu - * @author Varun Agrawal - * @date December 2021 - */ - -#pragma once - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -namespace gtsam { - -/** - * @brief Implementation of a discrete conditional mixture factor. - * - * Implements a joint discrete-continuous factor where the discrete variable - * serves to "select" a mixture component corresponding to a NonlinearFactor - * type of measurement. - * - * This class stores all factors as HybridFactors which can then be typecast to - * one of (NonlinearFactor, GaussianFactor) which can then be checked to perform - * the correct operation. - */ -class MixtureFactor : public HybridFactor { - public: - using Base = HybridFactor; - using This = MixtureFactor; - using shared_ptr = std::shared_ptr; - using sharedFactor = std::shared_ptr; - - /** - * @brief typedef for DecisionTree which has Keys as node labels and - * NonlinearFactor as leaf nodes. - */ - using Factors = DecisionTree; - - private: - /// Decision tree of Gaussian factors indexed by discrete keys. - Factors factors_; - bool normalized_; - - public: - MixtureFactor() = default; - - /** - * @brief Construct from Decision tree. - * - * @param keys Vector of keys for continuous factors. - * @param discreteKeys Vector of discrete keys. - * @param factors Decision tree with of shared factors. - * @param normalized Flag indicating if the factor error is already - * normalized. - */ - MixtureFactor(const KeyVector& keys, const DiscreteKeys& discreteKeys, - const Factors& factors, bool normalized = false) - : Base(keys, discreteKeys), factors_(factors), normalized_(normalized) {} - - /** - * @brief Convenience constructor that generates the underlying factor - * decision tree for us. - * - * Here it is important that the vector of factors has the correct number of - * elements based on the number of discrete keys and the cardinality of the - * keys, so that the decision tree is constructed appropriately. - * - * @tparam FACTOR The type of the factor shared pointers being passed in. - * Will be typecast to NonlinearFactor shared pointers. - * @param keys Vector of keys for continuous factors. - * @param discreteKeys Vector of discrete keys. - * @param factors Vector of nonlinear factors. - * @param normalized Flag indicating if the factor error is already - * normalized. - */ - template - MixtureFactor(const KeyVector& keys, const DiscreteKeys& discreteKeys, - const std::vector>& factors, - bool normalized = false) - : Base(keys, discreteKeys), normalized_(normalized) { - std::vector nonlinear_factors; - KeySet continuous_keys_set(keys.begin(), keys.end()); - KeySet factor_keys_set; - for (auto&& f : factors) { - // Insert all factor continuous keys in the continuous keys set. - std::copy(f->keys().begin(), f->keys().end(), - std::inserter(factor_keys_set, factor_keys_set.end())); - - if (auto nf = std::dynamic_pointer_cast(f)) { - nonlinear_factors.push_back(nf); - } else { - throw std::runtime_error( - "Factors passed into MixtureFactor need to be nonlinear!"); - } - } - factors_ = Factors(discreteKeys, nonlinear_factors); - - if (continuous_keys_set != factor_keys_set) { - throw std::runtime_error( - "The specified continuous keys and the keys in the factors don't " - "match!"); - } - } - - /** - * @brief Compute error of the MixtureFactor as a tree. - * - * @param continuousValues The continuous values for which to compute the - * error. - * @return AlgebraicDecisionTree A decision tree with the same keys - * as the factor, and leaf values as the error. - */ - AlgebraicDecisionTree errorTree(const Values& continuousValues) const { - // functor to convert from sharedFactor to double error value. - auto errorFunc = [continuousValues](const sharedFactor& factor) { - return factor->error(continuousValues); - }; - DecisionTree result(factors_, errorFunc); - return result; - } - - /** - * @brief Compute error of factor given both continuous and discrete values. - * - * @param continuousValues The continuous Values. - * @param discreteValues The discrete Values. - * @return double The error of this factor. - */ - double error(const Values& continuousValues, - const DiscreteValues& discreteValues) const { - // Retrieve the factor corresponding to the assignment in discreteValues. - auto factor = factors_(discreteValues); - // Compute the error for the selected factor - const double factorError = factor->error(continuousValues); - if (normalized_) return factorError; - return factorError + this->nonlinearFactorLogNormalizingConstant( - factor, continuousValues); - } - - /** - * @brief Compute error of factor given hybrid values. - * - * @param values The continuous Values and the discrete assignment. - * @return double The error of this factor. - */ - double error(const HybridValues& values) const override { - return error(values.nonlinear(), values.discrete()); - } - - /** - * @brief Get the dimension of the factor (number of rows on linearization). - * Returns the dimension of the first component factor. - * @return size_t - */ - size_t dim() const { - const auto assignments = DiscreteValues::CartesianProduct(discreteKeys_); - auto factor = factors_(assignments.at(0)); - return factor->dim(); - } - - /// Testable - /// @{ - - /// print to stdout - void print( - const std::string& s = "", - const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { - std::cout << (s.empty() ? "" : s + " "); - Base::print("", keyFormatter); - std::cout << "\nMixtureFactor\n"; - auto valueFormatter = [](const sharedFactor& v) { - if (v) { - return "Nonlinear factor on " + std::to_string(v->size()) + " keys"; - } else { - return std::string("nullptr"); - } - }; - factors_.print("", keyFormatter, valueFormatter); - } - - /// Check equality - bool equals(const HybridFactor& other, double tol = 1e-9) const override { - // We attempt a dynamic cast from HybridFactor to MixtureFactor. If it - // fails, return false. - if (!dynamic_cast(&other)) return false; - - // If the cast is successful, we'll properly construct a MixtureFactor - // object from `other` - const MixtureFactor& f(static_cast(other)); - - // Ensure that this MixtureFactor and `f` have the same `factors_`. - auto compare = [tol](const sharedFactor& a, const sharedFactor& b) { - return traits::Equals(*a, *b, tol); - }; - if (!factors_.equals(f.factors_, compare)) return false; - - // If everything above passes, and the keys_, discreteKeys_ and normalized_ - // member variables are identical, return true. - return (std::equal(keys_.begin(), keys_.end(), f.keys().begin()) && - (discreteKeys_ == f.discreteKeys_) && - (normalized_ == f.normalized_)); - } - - /// @} - - /// Linearize specific nonlinear factors based on the assignment in - /// discreteValues. - GaussianFactor::shared_ptr linearize( - const Values& continuousValues, - const DiscreteValues& discreteValues) const { - auto factor = factors_(discreteValues); - return factor->linearize(continuousValues); - } - - /// Linearize all the continuous factors to get a GaussianMixtureFactor. - std::shared_ptr linearize( - const Values& continuousValues) const { - // functional to linearize each factor in the decision tree - auto linearizeDT = [continuousValues](const sharedFactor& factor) { - return factor->linearize(continuousValues); - }; - - DecisionTree linearized_factors( - factors_, linearizeDT); - - return std::make_shared( - continuousKeys_, discreteKeys_, linearized_factors); - } - - /** - * If the component factors are not already normalized, we want to compute - * their normalizing constants so that the resulting joint distribution is - * appropriately computed. Remember, this is the _negative_ normalizing - * constant for the measurement likelihood (since we are minimizing the - * _negative_ log-likelihood). - */ - double nonlinearFactorLogNormalizingConstant(const sharedFactor& factor, - const Values& values) const { - // Information matrix (inverse covariance matrix) for the factor. - Matrix infoMat; - - // If this is a NoiseModelFactor, we'll use its noiseModel to - // otherwise noiseModelFactor will be nullptr - if (auto noiseModelFactor = - std::dynamic_pointer_cast(factor)) { - // If dynamic cast to NoiseModelFactor succeeded, see if the noise model - // is Gaussian - auto noiseModel = noiseModelFactor->noiseModel(); - - auto gaussianNoiseModel = - std::dynamic_pointer_cast(noiseModel); - if (gaussianNoiseModel) { - // If the noise model is Gaussian, retrieve the information matrix - infoMat = gaussianNoiseModel->information(); - } else { - // If the factor is not a Gaussian factor, we'll linearize it to get - // something with a normalized noise model - // TODO(kevin): does this make sense to do? I think maybe not in - // general? Should we just yell at the user? - auto gaussianFactor = factor->linearize(values); - infoMat = gaussianFactor->information(); - } - } - - // Compute the (negative) log of the normalizing constant - return -(factor->dim() * log(2.0 * M_PI) / 2.0) - - (log(infoMat.determinant()) / 2.0); - } -}; - -} // namespace gtsam diff --git a/gtsam/hybrid/hybrid.i b/gtsam/hybrid/hybrid.i index c1d57715e..2d59c100f 100644 --- a/gtsam/hybrid/hybrid.i +++ b/gtsam/hybrid/hybrid.i @@ -10,26 +10,26 @@ class HybridValues { gtsam::DiscreteValues discrete() const; HybridValues(); - HybridValues(const gtsam::VectorValues &cv, const gtsam::DiscreteValues &dv); + HybridValues(const gtsam::VectorValues& cv, const gtsam::DiscreteValues& dv); void print(string s = "HybridValues", const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter) const; bool equals(const gtsam::HybridValues& other, double tol) const; - + void insert(gtsam::Key j, int value); void insert(gtsam::Key j, const gtsam::Vector& value); - + void insert_or_assign(gtsam::Key j, const gtsam::Vector& value); void insert_or_assign(gtsam::Key j, size_t value); void insert(const gtsam::VectorValues& values); void insert(const gtsam::DiscreteValues& values); void insert(const gtsam::HybridValues& values); - + void update(const gtsam::VectorValues& values); void update(const gtsam::DiscreteValues& values); void update(const gtsam::HybridValues& values); - + size_t& atDiscrete(gtsam::Key j); gtsam::Vector& at(gtsam::Key j); }; @@ -42,7 +42,7 @@ virtual class HybridFactor : gtsam::Factor { bool equals(const gtsam::HybridFactor& other, double tol = 1e-9) const; // Standard interface: - double error(const gtsam::HybridValues &values) const; + double error(const gtsam::HybridValues& values) const; bool isDiscrete() const; bool isContinuous() const; bool isHybrid() const; @@ -61,40 +61,47 @@ virtual class HybridConditional { size_t nrParents() const; // Standard interface: - double logNormalizationConstant() const; + double negLogConstant() const; double logProbability(const gtsam::HybridValues& values) const; double evaluate(const gtsam::HybridValues& values) const; double operator()(const gtsam::HybridValues& values) const; - gtsam::GaussianMixture* asMixture() const; + gtsam::HybridGaussianConditional* asHybrid() const; gtsam::GaussianConditional* asGaussian() const; gtsam::DiscreteConditional* asDiscrete() const; gtsam::Factor* inner(); double error(const gtsam::HybridValues& values) const; }; -#include -class GaussianMixtureFactor : gtsam::HybridFactor { - GaussianMixtureFactor( - const gtsam::KeyVector& continuousKeys, - const gtsam::DiscreteKeys& discreteKeys, - const std::vector& factorsList); +#include +class HybridGaussianFactor : gtsam::HybridFactor { + HybridGaussianFactor( + const gtsam::DiscreteKey& discreteKey, + const std::vector& factors); + HybridGaussianFactor( + const gtsam::DiscreteKey& discreteKey, + const std::vector>& + factorPairs); - void print(string s = "GaussianMixtureFactor\n", + void print(string s = "HybridGaussianFactor\n", const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter) const; }; -#include -class GaussianMixture : gtsam::HybridFactor { - GaussianMixture(const gtsam::KeyVector& continuousFrontals, - const gtsam::KeyVector& continuousParents, - const gtsam::DiscreteKeys& discreteParents, - const std::vector& - conditionalsList); +#include +class HybridGaussianConditional : gtsam::HybridFactor { + HybridGaussianConditional( + const gtsam::DiscreteKeys& discreteParents, + const gtsam::HybridGaussianConditional::Conditionals& conditionals); + HybridGaussianConditional( + const gtsam::DiscreteKey& discreteParent, + const std::vector& conditionals); - gtsam::GaussianMixtureFactor* likelihood(const gtsam::VectorValues &frontals) const; + gtsam::HybridGaussianFactor* likelihood( + const gtsam::VectorValues& frontals) const; + double logProbability(const gtsam::HybridValues& values) const; + double evaluate(const gtsam::HybridValues& values) const; - void print(string s = "GaussianMixture\n", + void print(string s = "HybridGaussianConditional\n", const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter) const; }; @@ -128,7 +135,7 @@ class HybridBayesTree { #include class HybridBayesNet { HybridBayesNet(); - void push_back(const gtsam::GaussianMixture* s); + void push_back(const gtsam::HybridGaussianConditional* s); void push_back(const gtsam::GaussianConditional* s); void push_back(const gtsam::DiscreteConditional* s); @@ -136,7 +143,7 @@ class HybridBayesNet { size_t size() const; gtsam::KeySet keys() const; const gtsam::HybridConditional* at(size_t i) const; - + // Standard interface: double logProbability(const gtsam::HybridValues& values) const; double evaluate(const gtsam::HybridValues& values) const; @@ -146,7 +153,7 @@ class HybridBayesNet { const gtsam::VectorValues& measurements) const; gtsam::HybridValues optimize() const; - gtsam::HybridValues sample(const gtsam::HybridValues &given) const; + gtsam::HybridValues sample(const gtsam::HybridValues& given) const; gtsam::HybridValues sample() const; void print(string s = "HybridBayesNet\n", @@ -174,7 +181,7 @@ class HybridGaussianFactorGraph { void push_back(const gtsam::HybridGaussianFactorGraph& graph); void push_back(const gtsam::HybridBayesNet& bayesNet); void push_back(const gtsam::HybridBayesTree& bayesTree); - void push_back(const gtsam::GaussianMixtureFactor* gmm); + void push_back(const gtsam::HybridGaussianFactor* gmm); void push_back(gtsam::DecisionTreeFactor* factor); void push_back(gtsam::TableFactor* factor); void push_back(gtsam::JacobianFactor* factor); @@ -186,7 +193,8 @@ class HybridGaussianFactorGraph { const gtsam::HybridFactor* at(size_t i) const; void print(string s = "") const; - bool equals(const gtsam::HybridGaussianFactorGraph& fg, double tol = 1e-9) const; + bool equals(const gtsam::HybridGaussianFactorGraph& fg, + double tol = 1e-9) const; // evaluation double error(const gtsam::HybridValues& values) const; @@ -219,7 +227,8 @@ class HybridNonlinearFactorGraph { void push_back(gtsam::HybridFactor* factor); void push_back(gtsam::NonlinearFactor* factor); void push_back(gtsam::DiscreteFactor* factor); - gtsam::HybridGaussianFactorGraph linearize(const gtsam::Values& continuousValues) const; + gtsam::HybridGaussianFactorGraph linearize( + const gtsam::Values& continuousValues) const; bool empty() const; void remove(size_t i); @@ -228,32 +237,31 @@ class HybridNonlinearFactorGraph { const gtsam::HybridFactor* at(size_t i) const; void print(string s = "HybridNonlinearFactorGraph\n", - const gtsam::KeyFormatter& keyFormatter = - gtsam::DefaultKeyFormatter) const; + const gtsam::KeyFormatter& keyFormatter = + gtsam::DefaultKeyFormatter) const; }; -#include -class MixtureFactor : gtsam::HybridFactor { - MixtureFactor( - const gtsam::KeyVector& keys, const gtsam::DiscreteKeys& discreteKeys, - const gtsam::DecisionTree& factors, - bool normalized = false); +#include +class HybridNonlinearFactor : gtsam::HybridFactor { + HybridNonlinearFactor( + const gtsam::DiscreteKey& discreteKey, + const std::vector& factors); - template - MixtureFactor(const gtsam::KeyVector& keys, const gtsam::DiscreteKeys& discreteKeys, - const std::vector& factors, - bool normalized = false); + HybridNonlinearFactor( + const gtsam::DiscreteKey& discreteKey, + const std::vector>& factors); + + HybridNonlinearFactor( + const gtsam::DiscreteKeys& discreteKeys, + const gtsam::DecisionTree< + gtsam::Key, std::pair>& factors); double error(const gtsam::Values& continuousValues, const gtsam::DiscreteValues& discreteValues) const; - double nonlinearFactorLogNormalizingConstant(const gtsam::NonlinearFactor* factor, - const gtsam::Values& values) const; + HybridGaussianFactor* linearize(const gtsam::Values& continuousValues) const; - GaussianMixtureFactor* linearize( - const gtsam::Values& continuousValues) const; - - void print(string s = "MixtureFactor\n", + void print(string s = "HybridNonlinearFactor\n", const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter) const; }; diff --git a/gtsam/hybrid/tests/Switching.h b/gtsam/hybrid/tests/Switching.h index 4b2d3f11b..547facce9 100644 --- a/gtsam/hybrid/tests/Switching.h +++ b/gtsam/hybrid/tests/Switching.h @@ -19,13 +19,16 @@ #include #include #include -#include +#include #include +#include #include -#include #include +#include +#include #include #include +#include #include #include @@ -44,29 +47,28 @@ using symbol_shorthand::X; * system which depends on a discrete mode at each time step of the chain. * * @param n The number of chain elements. - * @param keyFunc The functional to help specify the continuous key. - * @param dKeyFunc The functional to help specify the discrete key. + * @param x The functional to help specify the continuous key. + * @param m The functional to help specify the discrete key. * @return HybridGaussianFactorGraph::shared_ptr */ inline HybridGaussianFactorGraph::shared_ptr makeSwitchingChain( - size_t n, std::function keyFunc = X, - std::function dKeyFunc = M) { + size_t n, std::function x = X, std::function m = M) { HybridGaussianFactorGraph hfg; - hfg.add(JacobianFactor(keyFunc(1), I_3x3, Z_3x1)); + hfg.add(JacobianFactor(x(1), I_3x3, Z_3x1)); - // keyFunc(1) to keyFunc(n+1) + // x(1) to x(n+1) for (size_t t = 1; t < n; t++) { - hfg.add(GaussianMixtureFactor( - {keyFunc(t), keyFunc(t + 1)}, {{dKeyFunc(t), 2}}, - {std::make_shared(keyFunc(t), I_3x3, keyFunc(t + 1), - I_3x3, Z_3x1), - std::make_shared(keyFunc(t), I_3x3, keyFunc(t + 1), - I_3x3, Vector3::Ones())})); + DiscreteKeys dKeys{{m(t), 2}}; + std::vector components; + components.emplace_back( + new JacobianFactor(x(t), I_3x3, x(t + 1), I_3x3, Z_3x1)); + components.emplace_back( + new JacobianFactor(x(t), I_3x3, x(t + 1), I_3x3, Vector3::Ones())); + hfg.add(HybridGaussianFactor({m(t), 2}, components)); if (t > 1) { - hfg.add(DecisionTreeFactor({{dKeyFunc(t - 1), 2}, {dKeyFunc(t), 2}}, - "0 1 1 3")); + hfg.add(DecisionTreeFactor({{m(t - 1), 2}, {m(t), 2}}, "0 1 1 3")); } } @@ -112,11 +114,11 @@ inline std::pair> makeBinaryOrdering( return {new_order, levels}; } -/* *************************************************************************** - */ +/* ****************************************************************************/ using MotionModel = BetweenFactor; // Test fixture with switching network. +/// Ï•(X(0)) .. Ï•(X(k),X(k+1)) .. Ï•(X(k);z_k) .. Ï•(M(0)) .. Ï•(M(k),M(k+1)) struct Switching { size_t K; DiscreteKeys modes; @@ -138,8 +140,8 @@ struct Switching { : K(K) { using noiseModel::Isotropic; - // Create DiscreteKeys for binary K modes. - for (size_t k = 0; k < K; k++) { + // Create DiscreteKeys for K-1 binary modes. + for (size_t k = 0; k < K - 1; k++) { modes.emplace_back(M(k), 2); } @@ -151,30 +153,26 @@ struct Switching { } // Create hybrid factor graph. - // Add a prior on X(0). + + // Add a prior Ï•(X(0)) on X(0). nonlinearFactorGraph.emplace_shared>( X(0), measurements.at(0), Isotropic::Sigma(1, prior_sigma)); - // Add "motion models". + // Add "motion models" Ï•(X(k),X(k+1)). for (size_t k = 0; k < K - 1; k++) { - KeyVector keys = {X(k), X(k + 1)}; auto motion_models = motionModels(k, between_sigma); - std::vector components; - for (auto &&f : motion_models) { - components.push_back(std::dynamic_pointer_cast(f)); - } - nonlinearFactorGraph.emplace_shared( - keys, DiscreteKeys{modes[k]}, components); + nonlinearFactorGraph.emplace_shared(modes[k], + motion_models); } - // Add measurement factors + // Add measurement factors Ï•(X(k);z_k). auto measurement_noise = Isotropic::Sigma(1, prior_sigma); for (size_t k = 1; k < K; k++) { nonlinearFactorGraph.emplace_shared>( X(k), measurements.at(k), measurement_noise); } - // Add "mode chain" + // Add "mode chain" Ï•(M(0)) Ï•(M(0),M(1)) ... Ï•(M(K-3),M(K-2)) addModeChain(&nonlinearFactorGraph, discrete_transition_prob); // Create the linearization point. @@ -182,14 +180,12 @@ struct Switching { linearizationPoint.insert(X(k), static_cast(k + 1)); } - // The ground truth is robot moving forward - // and one less than the linearization point linearizedFactorGraph = *nonlinearFactorGraph.linearize(linearizationPoint); } // Create motion models for a given time step - static std::vector motionModels(size_t k, - double sigma = 1.0) { + static std::vector motionModels( + size_t k, double sigma = 1.0) { auto noise_model = noiseModel::Isotropic::Sigma(1, sigma); auto still = std::make_shared(X(k), X(k + 1), 0.0, noise_model), @@ -199,7 +195,7 @@ struct Switching { } /** - * @brief Add "mode chain" to HybridNonlinearFactorGraph from M(0) to M(K-2). + * @brief Add "mode chain" to HybridNonlinearFactorGraph from M(0) to M(K-1). * E.g. if K=4, we want M0, M1 and M2. * * @param fg The factor graph to which the mode chain is added. diff --git a/gtsam/hybrid/tests/TinyHybridExample.h b/gtsam/hybrid/tests/TinyHybridExample.h index 39a1a1a9e..1862e8a31 100644 --- a/gtsam/hybrid/tests/TinyHybridExample.h +++ b/gtsam/hybrid/tests/TinyHybridExample.h @@ -40,15 +40,13 @@ inline HybridBayesNet createHybridBayesNet(size_t num_measurements = 1, bool manyModes = false) { HybridBayesNet bayesNet; - // Create Gaussian mixture z_i = x0 + noise for each measurement. + // Create hybrid Gaussian factor z_i = x0 + noise for each measurement. + std::vector> measurementModels{{Z_1x1, 0.5}, + {Z_1x1, 3.0}}; for (size_t i = 0; i < num_measurements; i++) { const auto mode_i = manyModes ? DiscreteKey{M(i), 2} : mode; - bayesNet.emplace_back( - new GaussianMixture({Z(i)}, {X(0)}, {mode_i}, - {GaussianConditional::sharedMeanAndStddev( - Z(i), I_1x1, X(0), Z_1x1, 0.5), - GaussianConditional::sharedMeanAndStddev( - Z(i), I_1x1, X(0), Z_1x1, 3)})); + bayesNet.emplace_shared(mode_i, Z(i), I_1x1, + X(0), measurementModels); } // Create prior on X(0). @@ -58,7 +56,7 @@ inline HybridBayesNet createHybridBayesNet(size_t num_measurements = 1, // Add prior on mode. const size_t nrModes = manyModes ? num_measurements : 1; for (size_t i = 0; i < nrModes; i++) { - bayesNet.emplace_back(new DiscreteConditional({M(i), 2}, "4/6")); + bayesNet.emplace_shared(DiscreteKey{M(i), 2}, "4/6"); } return bayesNet; } @@ -70,8 +68,7 @@ inline HybridBayesNet createHybridBayesNet(size_t num_measurements = 1, * the generative Bayes net model HybridBayesNet::Example(num_measurements) */ inline HybridGaussianFactorGraph createHybridGaussianFactorGraph( - size_t num_measurements = 1, - std::optional measurements = {}, + size_t num_measurements = 1, std::optional measurements = {}, bool manyModes = false) { auto bayesNet = createHybridBayesNet(num_measurements, manyModes); if (measurements) { diff --git a/gtsam/hybrid/tests/testGaussianMixture.cpp b/gtsam/hybrid/tests/testGaussianMixture.cpp index 4da61912e..3256f5686 100644 --- a/gtsam/hybrid/tests/testGaussianMixture.cpp +++ b/gtsam/hybrid/tests/testGaussianMixture.cpp @@ -11,214 +11,145 @@ /** * @file testGaussianMixture.cpp - * @brief Unit tests for GaussianMixture class + * @brief Test hybrid elimination with a simple mixture model * @author Varun Agrawal - * @author Fan Jiang * @author Frank Dellaert - * @date December 2021 + * @date September 2024 */ -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include #include #include - -#include +#include // Include for test suite #include using namespace gtsam; -using noiseModel::Isotropic; using symbol_shorthand::M; -using symbol_shorthand::X; using symbol_shorthand::Z; -// Common constants -static const Key modeKey = M(0); -static const DiscreteKey mode(modeKey, 2); -static const VectorValues vv{{Z(0), Vector1(4.9)}, {X(0), Vector1(5.0)}}; -static const DiscreteValues assignment0{{M(0), 0}}, assignment1{{M(0), 1}}; -static const HybridValues hv0{vv, assignment0}; -static const HybridValues hv1{vv, assignment1}; +// Define mode key and an assignment m==1 +const DiscreteKey m(M(0), 2); +const DiscreteValues m1Assignment{{M(0), 1}}; -/* ************************************************************************* */ -namespace equal_constants { -// Create a simple GaussianMixture -const double commonSigma = 2.0; -const std::vector conditionals{ - GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), - commonSigma), - GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), - commonSigma)}; -const GaussianMixture mixture({Z(0)}, {X(0)}, {mode}, conditionals); -} // namespace equal_constants +// Define a 50/50 prior on the mode +DiscreteConditional::shared_ptr mixing = + std::make_shared(m, "60/40"); -/* ************************************************************************* */ -/// Check that invariants hold -TEST(GaussianMixture, Invariants) { - using namespace equal_constants; +/// Gaussian density function +double Gaussian(double mu, double sigma, double z) { + return exp(-0.5 * pow((z - mu) / sigma, 2)) / sqrt(2 * M_PI * sigma * sigma); +}; - // Check that the mixture normalization constant is the max of all constants - // which are all equal, in this case, hence: - const double K = mixture.logNormalizationConstant(); - EXPECT_DOUBLES_EQUAL(K, conditionals[0]->logNormalizationConstant(), 1e-8); - EXPECT_DOUBLES_EQUAL(K, conditionals[1]->logNormalizationConstant(), 1e-8); +/** + * Closed form computation of P(m=1|z). + * If sigma0 == sigma1, it simplifies to a sigmoid function. + * Hardcodes 60/40 prior on mode. + */ +double prob_m_z(double mu0, double mu1, double sigma0, double sigma1, + double z) { + const double p0 = 0.6 * Gaussian(mu0, sigma0, z); + const double p1 = 0.4 * Gaussian(mu1, sigma1, z); + return p1 / (p0 + p1); +}; - EXPECT(GaussianMixture::CheckInvariants(mixture, hv0)); - EXPECT(GaussianMixture::CheckInvariants(mixture, hv1)); +/// Given \phi(m;z)\phi(m) use eliminate to obtain P(m|z). +DiscreteConditional SolveHFG(const HybridGaussianFactorGraph &hfg) { + return *hfg.eliminateSequential()->at(0)->asDiscrete(); } -/* ************************************************************************* */ -/// Check LogProbability. -TEST(GaussianMixture, LogProbability) { - using namespace equal_constants; - auto actual = mixture.logProbability(vv); +/// Given p(z,m) and z, convert to HFG and solve. +DiscreteConditional SolveHBN(const HybridBayesNet &hbn, double z) { + VectorValues given{{Z(0), Vector1(z)}}; + return SolveHFG(hbn.toFactorGraph(given)); +} - // Check result. - std::vector discrete_keys = {mode}; - std::vector leaves = {conditionals[0]->logProbability(vv), - conditionals[1]->logProbability(vv)}; - AlgebraicDecisionTree expected(discrete_keys, leaves); +/* + * Test a Gaussian Mixture Model P(m)p(z|m) with same sigma. + * The posterior, as a function of z, should be a sigmoid function. + */ +TEST(GaussianMixture, GaussianMixtureModel) { + double mu0 = 1.0, mu1 = 3.0; + double sigma = 2.0; - EXPECT(assert_equal(expected, actual, 1e-6)); + // Create a Gaussian mixture model p(z|m) with same sigma. + HybridBayesNet gmm; + std::vector> parameters{{Vector1(mu0), sigma}, + {Vector1(mu1), sigma}}; + gmm.emplace_shared(m, Z(0), parameters); + gmm.push_back(mixing); - // Check for non-tree version. - for (size_t mode : {0, 1}) { - const HybridValues hv{vv, {{M(0), mode}}}; - EXPECT_DOUBLES_EQUAL(conditionals[mode]->logProbability(vv), - mixture.logProbability(hv), 1e-8); + // At the halfway point between the means, we should get P(m|z)=0.5 + double midway = mu1 - mu0; + auto pMid = SolveHBN(gmm, midway); + EXPECT(assert_equal(DiscreteConditional(m, "60/40"), pMid)); + + // Everywhere else, the result should be a sigmoid. + for (const double shift : {-4, -2, 0, 2, 4}) { + const double z = midway + shift; + const double expected = prob_m_z(mu0, mu1, sigma, sigma, z); + + // Workflow 1: convert HBN to HFG and solve + auto posterior1 = SolveHBN(gmm, z); + EXPECT_DOUBLES_EQUAL(expected, posterior1(m1Assignment), 1e-8); + + // Workflow 2: directly specify HFG and solve + HybridGaussianFactorGraph hfg1; + hfg1.emplace_shared( + m, std::vector{Gaussian(mu0, sigma, z), Gaussian(mu1, sigma, z)}); + hfg1.push_back(mixing); + auto posterior2 = SolveHFG(hfg1); + EXPECT_DOUBLES_EQUAL(expected, posterior2(m1Assignment), 1e-8); } } -/* ************************************************************************* */ -/// Check error. -TEST(GaussianMixture, Error) { - using namespace equal_constants; - auto actual = mixture.errorTree(vv); +/* + * Test a Gaussian Mixture Model P(m)p(z|m) with different sigmas. + * The posterior, as a function of z, should be a unimodal function. + */ +TEST(GaussianMixture, GaussianMixtureModel2) { + double mu0 = 1.0, mu1 = 3.0; + double sigma0 = 8.0, sigma1 = 4.0; - // Check result. - std::vector discrete_keys = {mode}; - std::vector leaves = {conditionals[0]->error(vv), - conditionals[1]->error(vv)}; - AlgebraicDecisionTree expected(discrete_keys, leaves); + // Create a Gaussian mixture model p(z|m) with same sigma. + HybridBayesNet gmm; + std::vector> parameters{{Vector1(mu0), sigma0}, + {Vector1(mu1), sigma1}}; + gmm.emplace_shared(m, Z(0), parameters); + gmm.push_back(mixing); - EXPECT(assert_equal(expected, actual, 1e-6)); + // We get zMax=3.1333 by finding the maximum value of the function, at which + // point the mode m==1 is about twice as probable as m==0. + double zMax = 3.133; + auto pMax = SolveHBN(gmm, zMax); + EXPECT(assert_equal(DiscreteConditional(m, "42/58"), pMax, 1e-4)); - // Check for non-tree version. - for (size_t mode : {0, 1}) { - const HybridValues hv{vv, {{M(0), mode}}}; - EXPECT_DOUBLES_EQUAL(conditionals[mode]->error(vv), mixture.error(hv), - 1e-8); + // Everywhere else, the result should be a bell curve like function. + for (const double shift : {-4, -2, 0, 2, 4}) { + const double z = zMax + shift; + const double expected = prob_m_z(mu0, mu1, sigma0, sigma1, z); + + // Workflow 1: convert HBN to HFG and solve + auto posterior1 = SolveHBN(gmm, z); + EXPECT_DOUBLES_EQUAL(expected, posterior1(m1Assignment), 1e-8); + + // Workflow 2: directly specify HFG and solve + HybridGaussianFactorGraph hfg; + hfg.emplace_shared( + m, std::vector{Gaussian(mu0, sigma0, z), Gaussian(mu1, sigma1, z)}); + hfg.push_back(mixing); + auto posterior2 = SolveHFG(hfg); + EXPECT_DOUBLES_EQUAL(expected, posterior2(m1Assignment), 1e-8); } } -/* ************************************************************************* */ -/// Check that the likelihood is proportional to the conditional density given -/// the measurements. -TEST(GaussianMixture, Likelihood) { - using namespace equal_constants; - - // Compute likelihood - auto likelihood = mixture.likelihood(vv); - - // Check that the mixture error and the likelihood error are the same. - EXPECT_DOUBLES_EQUAL(mixture.error(hv0), likelihood->error(hv0), 1e-8); - EXPECT_DOUBLES_EQUAL(mixture.error(hv1), likelihood->error(hv1), 1e-8); - - // Check that likelihood error is as expected, i.e., just the errors of the - // individual likelihoods, in the `equal_constants` case. - std::vector discrete_keys = {mode}; - std::vector leaves = {conditionals[0]->likelihood(vv)->error(vv), - conditionals[1]->likelihood(vv)->error(vv)}; - AlgebraicDecisionTree expected(discrete_keys, leaves); - EXPECT(assert_equal(expected, likelihood->errorTree(vv), 1e-6)); - - // Check that the ratio of probPrime to evaluate is the same for all modes. - std::vector ratio(2); - for (size_t mode : {0, 1}) { - const HybridValues hv{vv, {{M(0), mode}}}; - ratio[mode] = std::exp(-likelihood->error(hv)) / mixture.evaluate(hv); - } - EXPECT_DOUBLES_EQUAL(ratio[0], ratio[1], 1e-8); -} - -/* ************************************************************************* */ -namespace mode_dependent_constants { -// Create a GaussianMixture with mode-dependent noise models. -// 0 is low-noise, 1 is high-noise. -const std::vector conditionals{ - GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), - 0.5), - GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), - 3.0)}; -const GaussianMixture mixture({Z(0)}, {X(0)}, {mode}, conditionals); -} // namespace mode_dependent_constants - -/* ************************************************************************* */ -// Create a test for continuousParents. -TEST(GaussianMixture, ContinuousParents) { - using namespace mode_dependent_constants; - const KeyVector continuousParentKeys = mixture.continuousParents(); - // Check that the continuous parent keys are correct: - EXPECT(continuousParentKeys.size() == 1); - EXPECT(continuousParentKeys[0] == X(0)); -} - -/* ************************************************************************* */ -/// Check that the likelihood is proportional to the conditional density given -/// the measurements. -TEST(GaussianMixture, Likelihood2) { - using namespace mode_dependent_constants; - - // Compute likelihood - auto likelihood = mixture.likelihood(vv); - - // Check that the mixture error and the likelihood error are as expected, - // this invariant is the same as the equal noise case: - EXPECT_DOUBLES_EQUAL(mixture.error(hv0), likelihood->error(hv0), 1e-8); - EXPECT_DOUBLES_EQUAL(mixture.error(hv1), likelihood->error(hv1), 1e-8); - - // Check the detailed JacobianFactor calculation for mode==1. - { - // We have a JacobianFactor - const auto gf1 = (*likelihood)(assignment1); - const auto jf1 = std::dynamic_pointer_cast(gf1); - CHECK(jf1); - - // It has 2 rows, not 1! - CHECK(jf1->rows() == 2); - - // Check that the constant C1 is properly encoded in the JacobianFactor. - const double C1 = mixture.logNormalizationConstant() - - conditionals[1]->logNormalizationConstant(); - const double c1 = std::sqrt(2.0 * C1); - Vector expected_unwhitened(2); - expected_unwhitened << 4.9 - 5.0, -c1; - Vector actual_unwhitened = jf1->unweighted_error(vv); - EXPECT(assert_equal(expected_unwhitened, actual_unwhitened)); - - // Make sure the noise model does not touch it. - Vector expected_whitened(2); - expected_whitened << (4.9 - 5.0) / 3.0, -c1; - Vector actual_whitened = jf1->error_vector(vv); - EXPECT(assert_equal(expected_whitened, actual_whitened)); - - // Check that the error is equal to the mixture error: - EXPECT_DOUBLES_EQUAL(mixture.error(hv1), jf1->error(hv1), 1e-8); - } - - // Check that the ratio of probPrime to evaluate is the same for all modes. - std::vector ratio(2); - for (size_t mode : {0, 1}) { - const HybridValues hv{vv, {{M(0), mode}}}; - ratio[mode] = std::exp(-likelihood->error(hv)) / mixture.evaluate(hv); - } - EXPECT_DOUBLES_EQUAL(ratio[0], ratio[1], 1e-8); -} - /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam/hybrid/tests/testGaussianMixtureFactor.cpp b/gtsam/hybrid/tests/testGaussianMixtureFactor.cpp deleted file mode 100644 index 9cc7e6bfd..000000000 --- a/gtsam/hybrid/tests/testGaussianMixtureFactor.cpp +++ /dev/null @@ -1,203 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file testGaussianMixtureFactor.cpp - * @brief Unit tests for GaussianMixtureFactor - * @author Varun Agrawal - * @author Fan Jiang - * @author Frank Dellaert - * @date December 2021 - */ - -#include -#include -#include -#include -#include -#include -#include - -// Include for test suite -#include - -using namespace std; -using namespace gtsam; -using noiseModel::Isotropic; -using symbol_shorthand::M; -using symbol_shorthand::X; - -/* ************************************************************************* */ -// Check iterators of empty mixture. -TEST(GaussianMixtureFactor, Constructor) { - GaussianMixtureFactor factor; - GaussianMixtureFactor::const_iterator const_it = factor.begin(); - CHECK(const_it == factor.end()); - GaussianMixtureFactor::iterator it = factor.begin(); - CHECK(it == factor.end()); -} - -/* ************************************************************************* */ -// "Add" two mixture factors together. -TEST(GaussianMixtureFactor, Sum) { - DiscreteKey m1(1, 2), m2(2, 3); - - auto A1 = Matrix::Zero(2, 1); - auto A2 = Matrix::Zero(2, 2); - auto A3 = Matrix::Zero(2, 3); - auto b = Matrix::Zero(2, 1); - Vector2 sigmas; - sigmas << 1, 2; - auto model = noiseModel::Diagonal::Sigmas(sigmas, true); - - auto f10 = std::make_shared(X(1), A1, X(2), A2, b); - auto f11 = std::make_shared(X(1), A1, X(2), A2, b); - auto f20 = std::make_shared(X(1), A1, X(3), A3, b); - auto f21 = std::make_shared(X(1), A1, X(3), A3, b); - auto f22 = std::make_shared(X(1), A1, X(3), A3, b); - std::vector factorsA{f10, f11}; - std::vector factorsB{f20, f21, f22}; - - // TODO(Frank): why specify keys at all? And: keys in factor should be *all* - // keys, deviating from Kevin's scheme. Should we index DT on DiscreteKey? - // Design review! - GaussianMixtureFactor mixtureFactorA({X(1), X(2)}, {m1}, factorsA); - GaussianMixtureFactor mixtureFactorB({X(1), X(3)}, {m2}, factorsB); - - // Check that number of keys is 3 - EXPECT_LONGS_EQUAL(3, mixtureFactorA.keys().size()); - - // Check that number of discrete keys is 1 - EXPECT_LONGS_EQUAL(1, mixtureFactorA.discreteKeys().size()); - - // Create sum of two mixture factors: it will be a decision tree now on both - // discrete variables m1 and m2: - GaussianFactorGraphTree sum; - sum += mixtureFactorA; - sum += mixtureFactorB; - - // Let's check that this worked: - Assignment mode; - mode[m1.first] = 1; - mode[m2.first] = 2; - auto actual = sum(mode); - EXPECT(actual.at(0) == f11); - EXPECT(actual.at(1) == f22); -} - -/* ************************************************************************* */ -TEST(GaussianMixtureFactor, Printing) { - DiscreteKey m1(1, 2); - auto A1 = Matrix::Zero(2, 1); - auto A2 = Matrix::Zero(2, 2); - auto b = Matrix::Zero(2, 1); - auto f10 = std::make_shared(X(1), A1, X(2), A2, b); - auto f11 = std::make_shared(X(1), A1, X(2), A2, b); - std::vector factors{f10, f11}; - - GaussianMixtureFactor mixtureFactor({X(1), X(2)}, {m1}, factors); - - std::string expected = - R"(Hybrid [x1 x2; 1]{ - Choice(1) - 0 Leaf : - A[x1] = [ - 0; - 0 -] - A[x2] = [ - 0, 0; - 0, 0 -] - b = [ 0 0 ] - No noise model - - 1 Leaf : - A[x1] = [ - 0; - 0 -] - A[x2] = [ - 0, 0; - 0, 0 -] - b = [ 0 0 ] - No noise model - -} -)"; - EXPECT(assert_print_equal(expected, mixtureFactor)); -} - -/* ************************************************************************* */ -TEST(GaussianMixtureFactor, GaussianMixture) { - KeyVector keys; - keys.push_back(X(0)); - keys.push_back(X(1)); - - DiscreteKeys dKeys; - dKeys.emplace_back(M(0), 2); - dKeys.emplace_back(M(1), 2); - - auto gaussians = std::make_shared(); - GaussianMixture::Conditionals conditionals(gaussians); - GaussianMixture gm({}, keys, dKeys, conditionals); - - EXPECT_LONGS_EQUAL(2, gm.discreteKeys().size()); -} - -/* ************************************************************************* */ -// Test the error of the GaussianMixtureFactor -TEST(GaussianMixtureFactor, Error) { - DiscreteKey m1(1, 2); - - auto A01 = Matrix2::Identity(); - auto A02 = Matrix2::Identity(); - - auto A11 = Matrix2::Identity(); - auto A12 = Matrix2::Identity() * 2; - - auto b = Vector2::Zero(); - - auto f0 = std::make_shared(X(1), A01, X(2), A02, b); - auto f1 = std::make_shared(X(1), A11, X(2), A12, b); - std::vector factors{f0, f1}; - - GaussianMixtureFactor mixtureFactor({X(1), X(2)}, {m1}, factors); - - VectorValues continuousValues; - continuousValues.insert(X(1), Vector2(0, 0)); - continuousValues.insert(X(2), Vector2(1, 1)); - - // error should return a tree of errors, with nodes for each discrete value. - AlgebraicDecisionTree error_tree = mixtureFactor.errorTree(continuousValues); - - std::vector discrete_keys = {m1}; - // Error values for regression test - std::vector errors = {1, 4}; - AlgebraicDecisionTree expected_error(discrete_keys, errors); - - EXPECT(assert_equal(expected_error, error_tree)); - - // Test for single leaf given discrete assignment P(X|M,Z). - DiscreteValues discreteValues; - discreteValues[m1.first] = 1; - EXPECT_DOUBLES_EQUAL( - 4.0, mixtureFactor.error({continuousValues, discreteValues}), - 1e-9); -} - -/* ************************************************************************* */ -int main() { - TestResult tr; - return TestRegistry::runAllTests(tr); -} -/* ************************************************************************* */ \ No newline at end of file diff --git a/gtsam/hybrid/tests/testHybridBayesNet.cpp b/gtsam/hybrid/tests/testHybridBayesNet.cpp index 00dc36cd0..79979ac83 100644 --- a/gtsam/hybrid/tests/testHybridBayesNet.cpp +++ b/gtsam/hybrid/tests/testHybridBayesNet.cpp @@ -20,6 +20,7 @@ #include #include +#include #include #include "Switching.h" @@ -31,7 +32,6 @@ using namespace std; using namespace gtsam; -using noiseModel::Isotropic; using symbol_shorthand::M; using symbol_shorthand::X; using symbol_shorthand::Z; @@ -43,7 +43,7 @@ static const DiscreteKey Asia(asiaKey, 2); // Test creation of a pure discrete Bayes net. TEST(HybridBayesNet, Creation) { HybridBayesNet bayesNet; - bayesNet.emplace_back(new DiscreteConditional(Asia, "99/1")); + bayesNet.emplace_shared(Asia, "99/1"); DiscreteConditional expected(Asia, "99/1"); CHECK(bayesNet.at(0)->asDiscrete()); @@ -54,7 +54,7 @@ TEST(HybridBayesNet, Creation) { // Test adding a Bayes net to another one. TEST(HybridBayesNet, Add) { HybridBayesNet bayesNet; - bayesNet.emplace_back(new DiscreteConditional(Asia, "99/1")); + bayesNet.emplace_shared(Asia, "99/1"); HybridBayesNet other; other.add(bayesNet); @@ -62,68 +62,165 @@ TEST(HybridBayesNet, Add) { } /* ****************************************************************************/ -// Test evaluate for a pure discrete Bayes net P(Asia). +// Test API for a pure discrete Bayes net P(Asia). TEST(HybridBayesNet, EvaluatePureDiscrete) { HybridBayesNet bayesNet; - bayesNet.emplace_back(new DiscreteConditional(Asia, "4/6")); - HybridValues values; - values.insert(asiaKey, 0); - EXPECT_DOUBLES_EQUAL(0.4, bayesNet.evaluate(values), 1e-9); + const auto pAsia = std::make_shared(Asia, "4/6"); + bayesNet.push_back(pAsia); + HybridValues zero{{}, {{asiaKey, 0}}}, one{{}, {{asiaKey, 1}}}; + + // choose + GaussianBayesNet empty; + EXPECT(assert_equal(empty, bayesNet.choose(zero.discrete()), 1e-9)); + + // evaluate + EXPECT_DOUBLES_EQUAL(0.4, bayesNet.evaluate(zero), 1e-9); + EXPECT_DOUBLES_EQUAL(0.4, bayesNet(zero), 1e-9); + + // optimize + EXPECT(assert_equal(one, bayesNet.optimize())); + EXPECT(assert_equal(VectorValues{}, bayesNet.optimize(one.discrete()))); + + // sample + std::mt19937_64 rng(42); + EXPECT(assert_equal(zero, bayesNet.sample(&rng))); + EXPECT(assert_equal(one, bayesNet.sample(one, &rng))); + EXPECT(assert_equal(zero, bayesNet.sample(zero, &rng))); + + // error + EXPECT_DOUBLES_EQUAL(-log(0.4), bayesNet.error(zero), 1e-9); + EXPECT_DOUBLES_EQUAL(-log(0.6), bayesNet.error(one), 1e-9); + + // logProbability + EXPECT_DOUBLES_EQUAL(log(0.4), bayesNet.logProbability(zero), 1e-9); + EXPECT_DOUBLES_EQUAL(log(0.6), bayesNet.logProbability(one), 1e-9); + + // toFactorGraph + HybridGaussianFactorGraph expectedFG{pAsia}, fg = bayesNet.toFactorGraph({}); + EXPECT(assert_equal(expectedFG, fg)); + + // prune, imperative :-( + EXPECT(assert_equal(bayesNet, bayesNet.prune(2))); + EXPECT_LONGS_EQUAL(1, bayesNet.prune(1).at(0)->size()); } /* ****************************************************************************/ // Test creation of a tiny hybrid Bayes net. TEST(HybridBayesNet, Tiny) { - auto bn = tiny::createHybridBayesNet(); - EXPECT_LONGS_EQUAL(3, bn.size()); + auto bayesNet = tiny::createHybridBayesNet(); // P(z|x,mode)P(x)P(mode) + EXPECT_LONGS_EQUAL(3, bayesNet.size()); const VectorValues vv{{Z(0), Vector1(5.0)}, {X(0), Vector1(5.0)}}; - auto fg = bn.toFactorGraph(vv); + HybridValues zero{vv, {{M(0), 0}}}, one{vv, {{M(0), 1}}}; + + // Check Invariants for components + HybridGaussianConditional::shared_ptr hgc = bayesNet.at(0)->asHybrid(); + GaussianConditional::shared_ptr gc0 = hgc->choose(zero.discrete()), + gc1 = hgc->choose(one.discrete()); + GaussianConditional::shared_ptr px = bayesNet.at(1)->asGaussian(); + GaussianConditional::CheckInvariants(*gc0, vv); + GaussianConditional::CheckInvariants(*gc1, vv); + GaussianConditional::CheckInvariants(*px, vv); + HybridGaussianConditional::CheckInvariants(*hgc, zero); + HybridGaussianConditional::CheckInvariants(*hgc, one); + + // choose + GaussianBayesNet expectedChosen; + expectedChosen.push_back(gc0); + expectedChosen.push_back(px); + auto chosen0 = bayesNet.choose(zero.discrete()); + auto chosen1 = bayesNet.choose(one.discrete()); + EXPECT(assert_equal(expectedChosen, chosen0, 1e-9)); + + // logProbability + const double logP0 = chosen0.logProbability(vv) + log(0.4); // 0.4 is prior + const double logP1 = chosen1.logProbability(vv) + log(0.6); // 0.6 is prior + EXPECT_DOUBLES_EQUAL(logP0, bayesNet.logProbability(zero), 1e-9); + EXPECT_DOUBLES_EQUAL(logP1, bayesNet.logProbability(one), 1e-9); + + // evaluate + EXPECT_DOUBLES_EQUAL(exp(logP0), bayesNet.evaluate(zero), 1e-9); + + // optimize + EXPECT(assert_equal(one, bayesNet.optimize())); + EXPECT(assert_equal(chosen0.optimize(), bayesNet.optimize(zero.discrete()))); + + // sample + std::mt19937_64 rng(42); + EXPECT(assert_equal({{M(0), 1}}, bayesNet.sample(&rng).discrete())); + + // error + const double error0 = chosen0.error(vv) + gc0->negLogConstant() - + px->negLogConstant() - log(0.4); + const double error1 = chosen1.error(vv) + gc1->negLogConstant() - + px->negLogConstant() - log(0.6); + EXPECT_DOUBLES_EQUAL(error0, bayesNet.error(zero), 1e-9); + EXPECT_DOUBLES_EQUAL(error1, bayesNet.error(one), 1e-9); + EXPECT_DOUBLES_EQUAL(error0 + logP0, error1 + logP1, 1e-9); + + // toFactorGraph + auto fg = bayesNet.toFactorGraph({{Z(0), Vector1(5.0)}}); EXPECT_LONGS_EQUAL(3, fg.size()); // Check that the ratio of probPrime to evaluate is the same for all modes. std::vector ratio(2); - for (size_t mode : {0, 1}) { - const HybridValues hv{vv, {{M(0), mode}}}; - ratio[mode] = std::exp(-fg.error(hv)) / bn.evaluate(hv); - } + ratio[0] = std::exp(-fg.error(zero)) / bayesNet.evaluate(zero); + ratio[1] = std::exp(-fg.error(one)) / bayesNet.evaluate(one); EXPECT_DOUBLES_EQUAL(ratio[0], ratio[1], 1e-8); + + // prune, imperative :-( + auto pruned = bayesNet.prune(1); + EXPECT_LONGS_EQUAL(1, pruned.at(0)->asHybrid()->nrComponents()); + EXPECT(!pruned.equals(bayesNet)); + } +/* ****************************************************************************/ +// Hybrid Bayes net P(X0|X1) P(X1|Asia) P(Asia). +namespace different_sigmas { +const auto gc = GaussianConditional::sharedMeanAndStddev(X(0), 2 * I_1x1, X(1), + Vector1(-4.0), 5.0); + +const std::vector> parms{{Vector1(5), 2.0}, + {Vector1(2), 3.0}}; +const auto hgc = std::make_shared(Asia, X(1), parms); + +const auto prior = std::make_shared(Asia, "99/1"); +auto wrap = [](const auto& c) { + return std::make_shared(c); +}; +const HybridBayesNet bayesNet{wrap(gc), wrap(hgc), wrap(prior)}; + +// Create values at which to evaluate. +HybridValues values{{{X(0), Vector1(-6)}, {X(1), Vector1(1)}}, {{asiaKey, 0}}}; +} // namespace different_sigmas + /* ****************************************************************************/ // Test evaluate for a hybrid Bayes net P(X0|X1) P(X1|Asia) P(Asia). TEST(HybridBayesNet, evaluateHybrid) { - const auto continuousConditional = GaussianConditional::sharedMeanAndStddev( - X(0), 2 * I_1x1, X(1), Vector1(-4.0), 5.0); + using namespace different_sigmas; - const SharedDiagonal model0 = noiseModel::Diagonal::Sigmas(Vector1(2.0)), - model1 = noiseModel::Diagonal::Sigmas(Vector1(3.0)); - - const auto conditional0 = std::make_shared( - X(1), Vector1::Constant(5), I_1x1, model0), - conditional1 = std::make_shared( - X(1), Vector1::Constant(2), I_1x1, model1); - - // Create hybrid Bayes net. - HybridBayesNet bayesNet; - bayesNet.push_back(continuousConditional); - bayesNet.emplace_back( - new GaussianMixture({X(1)}, {}, {Asia}, {conditional0, conditional1})); - bayesNet.emplace_back(new DiscreteConditional(Asia, "99/1")); - - // Create values at which to evaluate. - HybridValues values; - values.insert(asiaKey, 0); - values.insert(X(0), Vector1(-6)); - values.insert(X(1), Vector1(1)); - - const double conditionalProbability = - continuousConditional->evaluate(values.continuous()); - const double mixtureProbability = conditional0->evaluate(values.continuous()); + const double conditionalProbability = gc->evaluate(values.continuous()); + const double mixtureProbability = hgc->evaluate(values); EXPECT_DOUBLES_EQUAL(conditionalProbability * mixtureProbability * 0.99, bayesNet.evaluate(values), 1e-9); } +/* ****************************************************************************/ +// Test error for a hybrid Bayes net P(X0|X1) P(X1|Asia) P(Asia). +TEST(HybridBayesNet, Error) { + using namespace different_sigmas; + + AlgebraicDecisionTree actual = bayesNet.errorTree(values.continuous()); + + // Regression. + // Manually added all the error values from the 3 conditional types. + AlgebraicDecisionTree expected( + {Asia}, std::vector{2.33005033585, 5.38619084965}); + + EXPECT(assert_equal(expected, actual)); +} + /* ****************************************************************************/ // Test choosing an assignment of conditionals TEST(HybridBayesNet, Choose) { @@ -143,55 +240,16 @@ TEST(HybridBayesNet, Choose) { EXPECT_LONGS_EQUAL(4, gbn.size()); - EXPECT(assert_equal(*(*hybridBayesNet->at(0)->asMixture())(assignment), + EXPECT(assert_equal(*(*hybridBayesNet->at(0)->asHybrid())(assignment), *gbn.at(0))); - EXPECT(assert_equal(*(*hybridBayesNet->at(1)->asMixture())(assignment), + EXPECT(assert_equal(*(*hybridBayesNet->at(1)->asHybrid())(assignment), *gbn.at(1))); - EXPECT(assert_equal(*(*hybridBayesNet->at(2)->asMixture())(assignment), + EXPECT(assert_equal(*(*hybridBayesNet->at(2)->asHybrid())(assignment), *gbn.at(2))); - EXPECT(assert_equal(*(*hybridBayesNet->at(3)->asMixture())(assignment), + EXPECT(assert_equal(*(*hybridBayesNet->at(3)->asHybrid())(assignment), *gbn.at(3))); } -/* ****************************************************************************/ -// Test error for a hybrid Bayes net P(X0|X1) P(X1|Asia) P(Asia). -TEST(HybridBayesNet, Error) { - const auto continuousConditional = GaussianConditional::sharedMeanAndStddev( - X(0), 2 * I_1x1, X(1), Vector1(-4.0), 5.0); - - const SharedDiagonal model0 = noiseModel::Diagonal::Sigmas(Vector1(2.0)), - model1 = noiseModel::Diagonal::Sigmas(Vector1(3.0)); - - const auto conditional0 = std::make_shared( - X(1), Vector1::Constant(5), I_1x1, model0), - conditional1 = std::make_shared( - X(1), Vector1::Constant(2), I_1x1, model1); - - auto gm = - new GaussianMixture({X(1)}, {}, {Asia}, {conditional0, conditional1}); - // Create hybrid Bayes net. - HybridBayesNet bayesNet; - bayesNet.push_back(continuousConditional); - bayesNet.emplace_back(gm); - bayesNet.emplace_back(new DiscreteConditional(Asia, "99/1")); - - // Create values at which to evaluate. - HybridValues values; - values.insert(asiaKey, 0); - values.insert(X(0), Vector1(-6)); - values.insert(X(1), Vector1(1)); - - AlgebraicDecisionTree actual_errors = - bayesNet.errorTree(values.continuous()); - - // Regression. - // Manually added all the error values from the 3 conditional types. - AlgebraicDecisionTree expected_errors( - {Asia}, std::vector{2.33005033585, 5.38619084965}); - - EXPECT(assert_equal(expected_errors, actual_errors)); -} - /* ****************************************************************************/ // Test Bayes net optimize TEST(HybridBayesNet, OptimizeAssignment) { @@ -250,12 +308,15 @@ TEST(HybridBayesNet, Optimize) { /* ****************************************************************************/ // Test Bayes net error TEST(HybridBayesNet, Pruning) { + // Create switching network with three continuous variables and two discrete: + // Ï•(x0) Ï•(x0,x1,m0) Ï•(x1,x2,m1) Ï•(x0;z0) Ï•(x1;z1) Ï•(x2;z2) Ï•(m0) Ï•(m0,m1) Switching s(3); HybridBayesNet::shared_ptr posterior = s.linearizedFactorGraph.eliminateSequential(); EXPECT_LONGS_EQUAL(5, posterior->size()); + // Optimize HybridValues delta = posterior->optimize(); auto actualTree = posterior->evaluate(delta.continuous()); @@ -278,10 +339,9 @@ TEST(HybridBayesNet, Pruning) { const DiscreteValues discrete_values{{M(0), 1}, {M(1), 1}}; const HybridValues hybridValues{delta.continuous(), discrete_values}; double logProbability = 0; - logProbability += posterior->at(0)->asMixture()->logProbability(hybridValues); - logProbability += posterior->at(1)->asMixture()->logProbability(hybridValues); - logProbability += posterior->at(2)->asMixture()->logProbability(hybridValues); - // NOTE(dellaert): the discrete errors were not added in logProbability tree! + logProbability += posterior->at(0)->asHybrid()->logProbability(hybridValues); + logProbability += posterior->at(1)->asHybrid()->logProbability(hybridValues); + logProbability += posterior->at(2)->asHybrid()->logProbability(hybridValues); logProbability += posterior->at(3)->asDiscrete()->logProbability(hybridValues); logProbability += @@ -343,10 +403,9 @@ TEST(HybridBayesNet, UpdateDiscreteConditionals) { #endif // regression - DiscreteKeys dkeys{{M(0), 2}, {M(1), 2}, {M(2), 2}}; DecisionTreeFactor::ADT potentials( - dkeys, std::vector{0, 0, 0, 0.505145423, 0, 1, 0, 0.494854577}); - DiscreteConditional expected_discrete_conditionals(1, dkeys, potentials); + s.modes, std::vector{0, 0, 0, 0.505145423, 0, 1, 0, 0.494854577}); + DiscreteConditional expected_discrete_conditionals(1, s.modes, potentials); // Prune! posterior->prune(maxNrLeaves); @@ -381,14 +440,15 @@ TEST(HybridBayesNet, Sampling) { HybridNonlinearFactorGraph nfg; auto noise_model = noiseModel::Diagonal::Sigmas(Vector1(1.0)); + nfg.emplace_shared>(X(0), 0.0, noise_model); + auto zero_motion = std::make_shared>(X(0), X(1), 0, noise_model); auto one_motion = std::make_shared>(X(0), X(1), 1, noise_model); - std::vector factors = {zero_motion, one_motion}; - nfg.emplace_shared>(X(0), 0.0, noise_model); - nfg.emplace_shared( - KeyVector{X(0), X(1)}, DiscreteKeys{DiscreteKey(M(0), 2)}, factors); + nfg.emplace_shared( + DiscreteKey(M(0), 2), + std::vector{zero_motion, one_motion}); DiscreteKey mode(M(0), 2); nfg.emplace_shared(mode, "1/1"); @@ -442,6 +502,57 @@ TEST(HybridBayesNet, Sampling) { // num_samples))); } +/* ****************************************************************************/ +// Test hybrid gaussian factor graph errorTree when +// there is a HybridConditional in the graph +TEST(HybridBayesNet, ErrorTreeWithConditional) { + using symbol_shorthand::F; + + Key z0 = Z(0), f01 = F(0); + Key x0 = X(0), x1 = X(1); + + HybridBayesNet hbn; + + auto prior_model = noiseModel::Isotropic::Sigma(1, 1e-1); + auto measurement_model = noiseModel::Isotropic::Sigma(1, 2.0); + + // Set a prior P(x0) at x0=0 + hbn.emplace_shared(x0, Vector1(0.0), I_1x1, prior_model); + + // Add measurement P(z0 | x0) + hbn.emplace_shared(z0, Vector1(0.0), -I_1x1, x0, I_1x1, + measurement_model); + + // Add hybrid motion model + double mu = 0.0; + double sigma0 = 1e2, sigma1 = 1e-2; + auto model0 = noiseModel::Isotropic::Sigma(1, sigma0); + auto model1 = noiseModel::Isotropic::Sigma(1, sigma1); + auto c0 = make_shared(f01, Vector1(mu), I_1x1, x1, I_1x1, + x0, -I_1x1, model0), + c1 = make_shared(f01, Vector1(mu), I_1x1, x1, I_1x1, + x0, -I_1x1, model1); + DiscreteKey m1(M(2), 2); + hbn.emplace_shared(m1, std::vector{c0, c1}); + + // Discrete uniform prior. + hbn.emplace_shared(m1, "0.5/0.5"); + + VectorValues given; + given.insert(z0, Vector1(0.0)); + given.insert(f01, Vector1(0.0)); + auto gfg = hbn.toFactorGraph(given); + + VectorValues vv; + vv.insert(x0, Vector1(1.0)); + vv.insert(x1, Vector1(2.0)); + AlgebraicDecisionTree errorTree = gfg.errorTree(vv); + + // regression + AlgebraicDecisionTree expected(m1, 59.335390372, 5050.125); + EXPECT(assert_equal(expected, errorTree, 1e-9)); +} + /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam/hybrid/tests/testHybridConditional.cpp b/gtsam/hybrid/tests/testHybridConditional.cpp index 406306df7..365dde3bc 100644 --- a/gtsam/hybrid/tests/testHybridConditional.cpp +++ b/gtsam/hybrid/tests/testHybridConditional.cpp @@ -43,9 +43,9 @@ TEST(HybridConditional, Invariants) { auto hc0 = bn.at(0); CHECK(hc0->isHybrid()); - // Check invariants as a GaussianMixture. - const auto mixture = hc0->asMixture(); - EXPECT(GaussianMixture::CheckInvariants(*mixture, values)); + // Check invariants as a HybridGaussianConditional. + const auto conditional = hc0->asHybrid(); + EXPECT(HybridGaussianConditional::CheckInvariants(*conditional, values)); // Check invariants as a HybridConditional. EXPECT(HybridConditional::CheckInvariants(*hc0, values)); diff --git a/gtsam/hybrid/tests/testHybridEstimation.cpp b/gtsam/hybrid/tests/testHybridEstimation.cpp index b8edc39d8..58decc695 100644 --- a/gtsam/hybrid/tests/testHybridEstimation.cpp +++ b/gtsam/hybrid/tests/testHybridEstimation.cpp @@ -19,10 +19,11 @@ #include #include #include +#include +#include #include #include #include -#include #include #include #include @@ -333,7 +334,6 @@ TEST(HybridEstimation, Probability) { for (auto discrete_conditional : *discreteBayesNet) { bayesNet->add(discrete_conditional); } - auto discreteConditional = discreteBayesNet->at(0)->asDiscrete(); HybridValues hybrid_values = bayesNet->optimize(); @@ -430,15 +430,15 @@ static HybridNonlinearFactorGraph createHybridNonlinearFactorGraph() { nfg.emplace_shared>(X(0), 0.0, noise_model); nfg.emplace_shared>(X(1), 1.0, noise_model); - // Add mixture factor: + // Add hybrid nonlinear factor: DiscreteKey m(M(0), 2); const auto zero_motion = std::make_shared>(X(0), X(1), 0, noise_model); const auto one_motion = std::make_shared>(X(0), X(1), 1, noise_model); - nfg.emplace_shared( - KeyVector{X(0), X(1)}, DiscreteKeys{m}, - std::vector{zero_motion, one_motion}); + std::vector components = {zero_motion, + one_motion}; + nfg.emplace_shared(m, components); return nfg; } @@ -527,46 +527,6 @@ TEST(HybridEstimation, CorrectnessViaSampling) { } } -/****************************************************************************/ -/** - * Helper function to add the constant term corresponding to - * the difference in noise models. - */ -std::shared_ptr mixedVarianceFactor( - const MixtureFactor& mf, const Values& initial, const Key& mode, - double noise_tight, double noise_loose, size_t d, size_t tight_index) { - GaussianMixtureFactor::shared_ptr gmf = mf.linearize(initial); - - constexpr double log2pi = 1.8378770664093454835606594728112; - // logConstant will be of the tighter model - double logNormalizationConstant = log(1.0 / noise_tight); - double logConstant = -0.5 * d * log2pi + logNormalizationConstant; - - auto func = [&](const Assignment& assignment, - const GaussianFactor::shared_ptr& gf) { - if (assignment.at(mode) != tight_index) { - double factor_log_constant = -0.5 * d * log2pi + log(1.0 / noise_loose); - - GaussianFactorGraph _gfg; - _gfg.push_back(gf); - Vector c(d); - for (size_t i = 0; i < d; i++) { - c(i) = std::sqrt(2.0 * (logConstant - factor_log_constant)); - } - - _gfg.emplace_shared(c); - return std::make_shared(_gfg); - } else { - return dynamic_pointer_cast(gf); - } - }; - auto updated_components = gmf->factors().apply(func); - auto updated_gmf = std::make_shared( - gmf->continuousKeys(), gmf->discreteKeys(), updated_components); - - return updated_gmf; -} - /****************************************************************************/ TEST(HybridEstimation, ModeSelection) { HybridNonlinearFactorGraph graph; @@ -578,9 +538,6 @@ TEST(HybridEstimation, ModeSelection) { graph.emplace_shared>(X(0), 0.0, measurement_model); graph.emplace_shared>(X(1), 0.0, measurement_model); - DiscreteKeys modes; - modes.emplace_back(M(0), 2); - // The size of the noise model size_t d = 1; double noise_tight = 0.5, noise_loose = 5.0; @@ -589,17 +546,14 @@ TEST(HybridEstimation, ModeSelection) { X(0), X(1), 0.0, noiseModel::Isotropic::Sigma(d, noise_loose)), model1 = std::make_shared( X(0), X(1), 0.0, noiseModel::Isotropic::Sigma(d, noise_tight)); + std::vector components = {model0, model1}; - std::vector components = {model0, model1}; - - KeyVector keys = {X(0), X(1)}; - MixtureFactor mf(keys, modes, components); + HybridNonlinearFactor mf({M(0), 2}, components); initial.insert(X(0), 0.0); initial.insert(X(1), 0.0); - auto gmf = - mixedVarianceFactor(mf, initial, M(0), noise_tight, noise_loose, d, 1); + auto gmf = mf.linearize(initial); graph.add(gmf); auto gfg = graph.linearize(initial); @@ -611,18 +565,17 @@ TEST(HybridEstimation, ModeSelection) { /**************************************************************/ HybridBayesNet bn; - const DiscreteKey mode{M(0), 2}; + const DiscreteKey mode(M(0), 2); bn.push_back( GaussianConditional::sharedMeanAndStddev(Z(0), -I_1x1, X(0), Z_1x1, 0.1)); bn.push_back( GaussianConditional::sharedMeanAndStddev(Z(0), -I_1x1, X(1), Z_1x1, 0.1)); - bn.emplace_back(new GaussianMixture( - {Z(0)}, {X(0), X(1)}, {mode}, - {GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), -I_1x1, X(1), - Z_1x1, noise_loose), - GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), -I_1x1, X(1), - Z_1x1, noise_tight)})); + + std::vector> parameters{{Z_1x1, noise_loose}, + {Z_1x1, noise_tight}}; + bn.emplace_shared(mode, Z(0), I_1x1, X(0), -I_1x1, + X(1), parameters); VectorValues vv; vv.insert(Z(0), Z_1x1); @@ -642,18 +595,17 @@ TEST(HybridEstimation, ModeSelection2) { double noise_tight = 0.5, noise_loose = 5.0; HybridBayesNet bn; - const DiscreteKey mode{M(0), 2}; + const DiscreteKey mode(M(0), 2); bn.push_back( GaussianConditional::sharedMeanAndStddev(Z(0), -I_3x3, X(0), Z_3x1, 0.1)); bn.push_back( GaussianConditional::sharedMeanAndStddev(Z(0), -I_3x3, X(1), Z_3x1, 0.1)); - bn.emplace_back(new GaussianMixture( - {Z(0)}, {X(0), X(1)}, {mode}, - {GaussianConditional::sharedMeanAndStddev(Z(0), I_3x3, X(0), -I_3x3, X(1), - Z_3x1, noise_loose), - GaussianConditional::sharedMeanAndStddev(Z(0), I_3x3, X(0), -I_3x3, X(1), - Z_3x1, noise_tight)})); + + std::vector> parameters{{Z_3x1, noise_loose}, + {Z_3x1, noise_tight}}; + bn.emplace_shared(mode, Z(0), I_3x3, X(0), -I_3x3, + X(1), parameters); VectorValues vv; vv.insert(Z(0), Z_3x1); @@ -673,24 +625,18 @@ TEST(HybridEstimation, ModeSelection2) { graph.emplace_shared>(X(0), Z_3x1, measurement_model); graph.emplace_shared>(X(1), Z_3x1, measurement_model); - DiscreteKeys modes; - modes.emplace_back(M(0), 2); - auto model0 = std::make_shared>( X(0), X(1), Z_3x1, noiseModel::Isotropic::Sigma(d, noise_loose)), model1 = std::make_shared>( X(0), X(1), Z_3x1, noiseModel::Isotropic::Sigma(d, noise_tight)); + std::vector components = {model0, model1}; - std::vector components = {model0, model1}; - - KeyVector keys = {X(0), X(1)}; - MixtureFactor mf(keys, modes, components); + HybridNonlinearFactor mf({M(0), 2}, components); initial.insert(X(0), Z_3x1); initial.insert(X(1), Z_3x1); - auto gmf = - mixedVarianceFactor(mf, initial, M(0), noise_tight, noise_loose, d, 1); + auto gmf = mf.linearize(initial); graph.add(gmf); auto gfg = graph.linearize(initial); diff --git a/gtsam/hybrid/tests/testHybridFactorGraph.cpp b/gtsam/hybrid/tests/testHybridFactorGraph.cpp index 33c0761eb..c6020de85 100644 --- a/gtsam/hybrid/tests/testHybridFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridFactorGraph.cpp @@ -50,12 +50,12 @@ TEST(HybridFactorGraph, Keys) { // Add factor between x0 and x1 hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1)); - // Add a gaussian mixture factor Ï•(x1, c1) + // Add a hybrid Gaussian factor Ï•(x1, c1) DiscreteKey m1(M(1), 2); - DecisionTree dt( - M(1), std::make_shared(X(1), I_3x3, Z_3x1), - std::make_shared(X(1), I_3x3, Vector3::Ones())); - hfg.add(GaussianMixtureFactor({X(1)}, {m1}, dt)); + std::vector components{ + std::make_shared(X(1), I_3x3, Z_3x1), + std::make_shared(X(1), I_3x3, Vector3::Ones())}; + hfg.add(HybridGaussianFactor(m1, components)); KeySet expected_continuous{X(0), X(1)}; EXPECT( diff --git a/gtsam/hybrid/tests/testHybridGaussianConditional.cpp b/gtsam/hybrid/tests/testHybridGaussianConditional.cpp new file mode 100644 index 000000000..803d42f03 --- /dev/null +++ b/gtsam/hybrid/tests/testHybridGaussianConditional.cpp @@ -0,0 +1,268 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testHybridGaussianConditional.cpp + * @brief Unit tests for HybridGaussianConditional class + * @author Varun Agrawal + * @author Fan Jiang + * @author Frank Dellaert + * @date December 2021 + */ + +#include +#include +#include +#include +#include +#include + +#include + +// Include for test suite +#include + +using namespace gtsam; +using symbol_shorthand::M; +using symbol_shorthand::X; +using symbol_shorthand::Z; + +// Common constants +static const Key modeKey = M(0); +static const DiscreteKey mode(modeKey, 2); +static const VectorValues vv{{Z(0), Vector1(4.9)}, {X(0), Vector1(5.0)}}; +static const DiscreteValues assignment0{{M(0), 0}}, assignment1{{M(0), 1}}; +static const HybridValues hv0{vv, assignment0}; +static const HybridValues hv1{vv, assignment1}; + +/* ************************************************************************* */ +namespace equal_constants { +// Create a simple HybridGaussianConditional +const double commonSigma = 2.0; +const std::vector conditionals{ + GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), + commonSigma), + GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), + commonSigma)}; +const HybridGaussianConditional hybrid_conditional(mode, conditionals); +} // namespace equal_constants + +/* ************************************************************************* */ +/// Check that invariants hold +TEST(HybridGaussianConditional, Invariants) { + using namespace equal_constants; + + // Check that the conditional (negative log) normalization constant is the min + // of all constants which are all equal, in this case, hence: + const double K = hybrid_conditional.negLogConstant(); + EXPECT_DOUBLES_EQUAL(K, conditionals[0]->negLogConstant(), 1e-8); + EXPECT_DOUBLES_EQUAL(K, conditionals[1]->negLogConstant(), 1e-8); + + EXPECT(HybridGaussianConditional::CheckInvariants(hybrid_conditional, hv0)); + EXPECT(HybridGaussianConditional::CheckInvariants(hybrid_conditional, hv1)); +} + +/* ************************************************************************* */ +/// Check LogProbability. +TEST(HybridGaussianConditional, LogProbability) { + using namespace equal_constants; + auto actual = hybrid_conditional.logProbability(vv); + + // Check result. + std::vector discrete_keys = {mode}; + std::vector leaves = {conditionals[0]->logProbability(vv), + conditionals[1]->logProbability(vv)}; + AlgebraicDecisionTree expected(discrete_keys, leaves); + + EXPECT(assert_equal(expected, actual, 1e-6)); + + // Check for non-tree version. + for (size_t mode : {0, 1}) { + const HybridValues hv{vv, {{M(0), mode}}}; + EXPECT_DOUBLES_EQUAL(conditionals[mode]->logProbability(vv), + hybrid_conditional.logProbability(hv), 1e-8); + } +} + +/* ************************************************************************* */ +/// Check error. +TEST(HybridGaussianConditional, Error) { + using namespace equal_constants; + auto actual = hybrid_conditional.errorTree(vv); + + // Check result. + DiscreteKeys discrete_keys{mode}; + std::vector leaves = {conditionals[0]->error(vv), + conditionals[1]->error(vv)}; + AlgebraicDecisionTree expected(discrete_keys, leaves); + + EXPECT(assert_equal(expected, actual, 1e-6)); + + // Check for non-tree version. + for (size_t mode : {0, 1}) { + const HybridValues hv{vv, {{M(0), mode}}}; + EXPECT_DOUBLES_EQUAL(conditionals[mode]->error(vv), + hybrid_conditional.error(hv), 1e-8); + } +} + +/* ************************************************************************* */ +/// Check that the likelihood is proportional to the conditional density given +/// the measurements. +TEST(HybridGaussianConditional, Likelihood) { + using namespace equal_constants; + + // Compute likelihood + auto likelihood = hybrid_conditional.likelihood(vv); + + // Check that the hybrid conditional error and the likelihood error are the + // same. + EXPECT_DOUBLES_EQUAL(hybrid_conditional.error(hv0), likelihood->error(hv0), + 1e-8); + EXPECT_DOUBLES_EQUAL(hybrid_conditional.error(hv1), likelihood->error(hv1), + 1e-8); + + // Check that likelihood error is as expected, i.e., just the errors of the + // individual likelihoods, in the `equal_constants` case. + std::vector discrete_keys = {mode}; + std::vector leaves = {conditionals[0]->likelihood(vv)->error(vv), + conditionals[1]->likelihood(vv)->error(vv)}; + AlgebraicDecisionTree expected(discrete_keys, leaves); + EXPECT(assert_equal(expected, likelihood->errorTree(vv), 1e-6)); + + // Check that the ratio of probPrime to evaluate is the same for all modes. + std::vector ratio(2); + for (size_t mode : {0, 1}) { + const HybridValues hv{vv, {{M(0), mode}}}; + ratio[mode] = + std::exp(-likelihood->error(hv)) / hybrid_conditional.evaluate(hv); + } + EXPECT_DOUBLES_EQUAL(ratio[0], ratio[1], 1e-8); +} + +/* ************************************************************************* */ +namespace mode_dependent_constants { +// Create a HybridGaussianConditional with mode-dependent noise models. +// 0 is low-noise, 1 is high-noise. +const std::vector conditionals{ + GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), + 0.5), + GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), + 3.0)}; +const HybridGaussianConditional hybrid_conditional(mode, conditionals); +} // namespace mode_dependent_constants + +/* ************************************************************************* */ +// Create a test for continuousParents. +TEST(HybridGaussianConditional, ContinuousParents) { + using namespace mode_dependent_constants; + const KeyVector continuousParentKeys = hybrid_conditional.continuousParents(); + // Check that the continuous parent keys are correct: + EXPECT(continuousParentKeys.size() == 1); + EXPECT(continuousParentKeys[0] == X(0)); + + EXPECT(HybridGaussianConditional::CheckInvariants(hybrid_conditional, hv0)); + EXPECT(HybridGaussianConditional::CheckInvariants(hybrid_conditional, hv1)); +} + +/* ************************************************************************* */ +/// Check error with mode dependent constants. +TEST(HybridGaussianConditional, Error2) { + using namespace mode_dependent_constants; + auto actual = hybrid_conditional.errorTree(vv); + + // Check result. + DiscreteKeys discrete_keys{mode}; + double negLogConstant0 = conditionals[0]->negLogConstant(); + double negLogConstant1 = conditionals[1]->negLogConstant(); + double minErrorConstant = std::min(negLogConstant0, negLogConstant1); + + // Expected error is e(X) + log(sqrt(|2πΣ|)). + // We normalize log(sqrt(|2πΣ|)) with min(negLogConstant) + // so it is non-negative. + std::vector leaves = { + conditionals[0]->error(vv) + negLogConstant0 - minErrorConstant, + conditionals[1]->error(vv) + negLogConstant1 - minErrorConstant}; + AlgebraicDecisionTree expected(discrete_keys, leaves); + + EXPECT(assert_equal(expected, actual, 1e-6)); + + // Check for non-tree version. + for (size_t mode : {0, 1}) { + const HybridValues hv{vv, {{M(0), mode}}}; + EXPECT_DOUBLES_EQUAL(conditionals[mode]->error(vv) + + conditionals[mode]->negLogConstant() - + minErrorConstant, + hybrid_conditional.error(hv), 1e-8); + } +} + +/* ************************************************************************* */ +/// Check that the likelihood is proportional to the conditional density given +/// the measurements. +TEST(HybridGaussianConditional, Likelihood2) { + using namespace mode_dependent_constants; + + // Compute likelihood + auto likelihood = hybrid_conditional.likelihood(vv); + + // Check that the hybrid conditional error and the likelihood error are as + // expected, this invariant is the same as the equal noise case: + EXPECT_DOUBLES_EQUAL(hybrid_conditional.error(hv0), likelihood->error(hv0), + 1e-8); + EXPECT_DOUBLES_EQUAL(hybrid_conditional.error(hv1), likelihood->error(hv1), + 1e-8); + + // Check the detailed JacobianFactor calculation for mode==1. + { + // We have a JacobianFactor + const auto gf1 = (*likelihood)(assignment1); + const auto jf1 = std::dynamic_pointer_cast(gf1); + CHECK(jf1); + + // It has 2 rows, not 1! + CHECK(jf1->rows() == 2); + + // Check that the constant C1 is properly encoded in the JacobianFactor. + const double C1 = + conditionals[1]->negLogConstant() - hybrid_conditional.negLogConstant(); + const double c1 = std::sqrt(2.0 * C1); + Vector expected_unwhitened(2); + expected_unwhitened << 4.9 - 5.0, -c1; + Vector actual_unwhitened = jf1->unweighted_error(vv); + EXPECT(assert_equal(expected_unwhitened, actual_unwhitened)); + + // Make sure the noise model does not touch it. + Vector expected_whitened(2); + expected_whitened << (4.9 - 5.0) / 3.0, -c1; + Vector actual_whitened = jf1->error_vector(vv); + EXPECT(assert_equal(expected_whitened, actual_whitened)); + + // Check that the error is equal to the conditional error: + EXPECT_DOUBLES_EQUAL(hybrid_conditional.error(hv1), jf1->error(hv1), 1e-8); + } + + // Check that the ratio of probPrime to evaluate is the same for all modes. + std::vector ratio(2); + for (size_t mode : {0, 1}) { + const HybridValues hv{vv, {{M(0), mode}}}; + ratio[mode] = + std::exp(-likelihood->error(hv)) / hybrid_conditional.evaluate(hv); + } + EXPECT_DOUBLES_EQUAL(ratio[0], ratio[1], 1e-8); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/hybrid/tests/testHybridGaussianFactor.cpp b/gtsam/hybrid/tests/testHybridGaussianFactor.cpp new file mode 100644 index 000000000..c2ffe24c8 --- /dev/null +++ b/gtsam/hybrid/tests/testHybridGaussianFactor.cpp @@ -0,0 +1,381 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testHybridGaussianFactor.cpp + * @brief Unit tests for HybridGaussianFactor + * @author Varun Agrawal + * @author Fan Jiang + * @author Frank Dellaert + * @date December 2021 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Include for test suite +#include + +#include + +using namespace std; +using namespace gtsam; +using symbol_shorthand::M; +using symbol_shorthand::X; +using symbol_shorthand::Z; + +/* ************************************************************************* */ +// Check iterators of empty hybrid factor. +TEST(HybridGaussianFactor, Constructor) { + HybridGaussianFactor factor; + HybridGaussianFactor::const_iterator const_it = factor.begin(); + CHECK(const_it == factor.end()); + HybridGaussianFactor::iterator it = factor.begin(); + CHECK(it == factor.end()); +} + +/* ************************************************************************* */ +namespace test_constructor { +DiscreteKey m1(1, 2); + +auto A1 = Matrix::Zero(2, 1); +auto A2 = Matrix::Zero(2, 2); +auto b = Matrix::Zero(2, 1); + +auto f10 = std::make_shared(X(1), A1, X(2), A2, b); +auto f11 = std::make_shared(X(1), A1, X(2), A2, b); +} // namespace test_constructor + +/* ************************************************************************* */ +// Test simple to complex constructors... +TEST(HybridGaussianFactor, ConstructorVariants) { + using namespace test_constructor; + HybridGaussianFactor fromFactors(m1, {f10, f11}); + + std::vector pairs{{f10, 0.0}, {f11, 0.0}}; + HybridGaussianFactor fromPairs(m1, pairs); + assert_equal(fromFactors, fromPairs); + + HybridGaussianFactor::FactorValuePairs decisionTree({m1}, pairs); + HybridGaussianFactor fromDecisionTree({m1}, decisionTree); + assert_equal(fromDecisionTree, fromPairs); +} + +/* ************************************************************************* */ +// "Add" two hybrid factors together. +TEST(HybridGaussianFactor, Sum) { + using namespace test_constructor; + DiscreteKey m2(2, 3); + + auto A3 = Matrix::Zero(2, 3); + auto f20 = std::make_shared(X(1), A1, X(3), A3, b); + auto f21 = std::make_shared(X(1), A1, X(3), A3, b); + auto f22 = std::make_shared(X(1), A1, X(3), A3, b); + + // TODO(Frank): why specify keys at all? And: keys in factor should be *all* + // keys, deviating from Kevin's scheme. Should we index DT on DiscreteKey? + // Design review! + HybridGaussianFactor hybridFactorA(m1, {f10, f11}); + HybridGaussianFactor hybridFactorB(m2, {f20, f21, f22}); + + // Check the number of keys matches what we expect + EXPECT_LONGS_EQUAL(3, hybridFactorA.keys().size()); + EXPECT_LONGS_EQUAL(2, hybridFactorA.continuousKeys().size()); + EXPECT_LONGS_EQUAL(1, hybridFactorA.discreteKeys().size()); + + // Create sum of two hybrid factors: it will be a decision tree now on both + // discrete variables m1 and m2: + GaussianFactorGraphTree sum; + sum += hybridFactorA; + sum += hybridFactorB; + + // Let's check that this worked: + Assignment mode; + mode[m1.first] = 1; + mode[m2.first] = 2; + auto actual = sum(mode); + EXPECT(actual.at(0) == f11); + EXPECT(actual.at(1) == f22); +} + +/* ************************************************************************* */ +TEST(HybridGaussianFactor, Printing) { + using namespace test_constructor; + HybridGaussianFactor hybridFactor(m1, {f10, f11}); + + std::string expected = + R"(HybridGaussianFactor +Hybrid [x1 x2; 1]{ + Choice(1) + 0 Leaf : + A[x1] = [ + 0; + 0 +] + A[x2] = [ + 0, 0; + 0, 0 +] + b = [ 0 0 ] + No noise model + + 1 Leaf : + A[x1] = [ + 0; + 0 +] + A[x2] = [ + 0, 0; + 0, 0 +] + b = [ 0 0 ] + No noise model + +} +)"; + EXPECT(assert_print_equal(expected, hybridFactor)); +} + +/* ************************************************************************* */ +TEST(HybridGaussianFactor, HybridGaussianConditional) { + DiscreteKeys dKeys; + dKeys.emplace_back(M(0), 2); + dKeys.emplace_back(M(1), 2); + + auto gaussians = std::make_shared(); + HybridGaussianConditional::Conditionals conditionals(gaussians); + HybridGaussianConditional gm(dKeys, conditionals); + + EXPECT_LONGS_EQUAL(2, gm.discreteKeys().size()); +} + +/* ************************************************************************* */ +// Test the error of the HybridGaussianFactor +TEST(HybridGaussianFactor, Error) { + DiscreteKey m1(1, 2); + + auto A01 = Matrix2::Identity(); + auto A02 = Matrix2::Identity(); + + auto A11 = Matrix2::Identity(); + auto A12 = Matrix2::Identity() * 2; + + auto b = Vector2::Zero(); + + auto f0 = std::make_shared(X(1), A01, X(2), A02, b); + auto f1 = std::make_shared(X(1), A11, X(2), A12, b); + HybridGaussianFactor hybridFactor(m1, {f0, f1}); + + VectorValues continuousValues; + continuousValues.insert(X(1), Vector2(0, 0)); + continuousValues.insert(X(2), Vector2(1, 1)); + + // error should return a tree of errors, with nodes for each discrete value. + AlgebraicDecisionTree error_tree = + hybridFactor.errorTree(continuousValues); + + std::vector discrete_keys = {m1}; + // Error values for regression test + std::vector errors = {1, 4}; + AlgebraicDecisionTree expected_error(discrete_keys, errors); + + EXPECT(assert_equal(expected_error, error_tree)); + + // Test for single leaf given discrete assignment P(X|M,Z). + DiscreteValues discreteValues; + discreteValues[m1.first] = 1; + EXPECT_DOUBLES_EQUAL( + 4.0, hybridFactor.error({continuousValues, discreteValues}), 1e-9); +} + +/* ************************************************************************* */ +namespace test_direct_factor_graph { +/** + * @brief Create a Factor Graph by directly specifying all + * the factors instead of creating conditionals first. + * This way we can directly provide the likelihoods and + * then perform linearization. + * + * @param values Initial values to linearize around. + * @param means The means of the HybridGaussianFactor components. + * @param sigmas The covariances of the HybridGaussianFactor components. + * @param m1 The discrete key. + * @return HybridGaussianFactorGraph + */ +static HybridGaussianFactorGraph CreateFactorGraph( + const gtsam::Values &values, const std::vector &means, + const std::vector &sigmas, DiscreteKey &m1, + double measurement_noise = 1e-3) { + auto model0 = noiseModel::Isotropic::Sigma(1, sigmas[0]); + auto model1 = noiseModel::Isotropic::Sigma(1, sigmas[1]); + auto prior_noise = noiseModel::Isotropic::Sigma(1, measurement_noise); + + auto f0 = + std::make_shared>(X(0), X(1), means[0], model0) + ->linearize(values); + auto f1 = + std::make_shared>(X(0), X(1), means[1], model1) + ->linearize(values); + + // Create HybridGaussianFactor + // We take negative since we want + // the underlying scalar to be log(\sqrt(|2πΣ|)) + std::vector factors{{f0, model0->negLogConstant()}, + {f1, model1->negLogConstant()}}; + HybridGaussianFactor motionFactor(m1, factors); + + HybridGaussianFactorGraph hfg; + hfg.push_back(motionFactor); + + hfg.push_back(PriorFactor(X(0), values.at(X(0)), prior_noise) + .linearize(values)); + + return hfg; +} +} // namespace test_direct_factor_graph + +/* ************************************************************************* */ +/** + * @brief Test components with differing means but the same covariances. + * The factor graph is + * *-X1-*-X2 + * | + * M1 + */ +TEST(HybridGaussianFactor, DifferentMeansFG) { + using namespace test_direct_factor_graph; + + DiscreteKey m1(M(1), 2); + + Values values; + double x1 = 0.0, x2 = 1.75; + values.insert(X(0), x1); + values.insert(X(1), x2); + + std::vector means = {0.0, 2.0}, sigmas = {1e-0, 1e-0}; + + HybridGaussianFactorGraph hfg = CreateFactorGraph(values, means, sigmas, m1); + + { + auto bn = hfg.eliminateSequential(); + HybridValues actual = bn->optimize(); + + HybridValues expected( + VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(-1.75)}}, + DiscreteValues{{M(1), 0}}); + + EXPECT(assert_equal(expected, actual)); + + DiscreteValues dv0{{M(1), 0}}; + VectorValues cont0 = bn->optimize(dv0); + double error0 = bn->error(HybridValues(cont0, dv0)); + // regression + EXPECT_DOUBLES_EQUAL(0.69314718056, error0, 1e-9); + + DiscreteValues dv1{{M(1), 1}}; + VectorValues cont1 = bn->optimize(dv1); + double error1 = bn->error(HybridValues(cont1, dv1)); + EXPECT_DOUBLES_EQUAL(error0, error1, 1e-9); + } + + { + auto prior_noise = noiseModel::Isotropic::Sigma(1, 1e-3); + hfg.push_back( + PriorFactor(X(1), means[1], prior_noise).linearize(values)); + + auto bn = hfg.eliminateSequential(); + HybridValues actual = bn->optimize(); + + HybridValues expected( + VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(0.25)}}, + DiscreteValues{{M(1), 1}}); + + EXPECT(assert_equal(expected, actual)); + + { + DiscreteValues dv{{M(1), 0}}; + VectorValues cont = bn->optimize(dv); + double error = bn->error(HybridValues(cont, dv)); + // regression + EXPECT_DOUBLES_EQUAL(2.12692448787, error, 1e-9); + } + { + DiscreteValues dv{{M(1), 1}}; + VectorValues cont = bn->optimize(dv); + double error = bn->error(HybridValues(cont, dv)); + // regression + EXPECT_DOUBLES_EQUAL(0.126928487854, error, 1e-9); + } + } +} + +/* ************************************************************************* */ +/** + * @brief Test components with differing covariances but the same means. + * The factor graph is + * *-X1-*-X2 + * | + * M1 + */ +TEST(HybridGaussianFactor, DifferentCovariancesFG) { + using namespace test_direct_factor_graph; + + DiscreteKey m1(M(1), 2); + + Values values; + double x1 = 1.0, x2 = 1.0; + values.insert(X(0), x1); + values.insert(X(1), x2); + + std::vector means = {0.0, 0.0}, sigmas = {1e2, 1e-2}; + + // Create FG with HybridGaussianFactor and prior on X1 + HybridGaussianFactorGraph fg = CreateFactorGraph(values, means, sigmas, m1); + auto hbn = fg.eliminateSequential(); + + VectorValues cv; + cv.insert(X(0), Vector1(0.0)); + cv.insert(X(1), Vector1(0.0)); + + // Check that the error values at the MLE point μ. + AlgebraicDecisionTree errorTree = hbn->errorTree(cv); + + DiscreteValues dv0{{M(1), 0}}; + DiscreteValues dv1{{M(1), 1}}; + + // regression + EXPECT_DOUBLES_EQUAL(9.90348755254, errorTree(dv0), 1e-9); + EXPECT_DOUBLES_EQUAL(0.69314718056, errorTree(dv1), 1e-9); + + DiscreteConditional expected_m1(m1, "0.5/0.5"); + DiscreteConditional actual_m1 = *(hbn->at(2)->asDiscrete()); + + EXPECT(assert_equal(expected_m1, actual_m1)); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp index 5be2f2742..6aef60386 100644 --- a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp @@ -18,15 +18,16 @@ #include #include #include +#include #include #include #include -#include -#include #include #include #include #include +#include +#include #include #include #include @@ -42,6 +43,7 @@ #include #include #include +#include #include #include @@ -61,6 +63,8 @@ using gtsam::symbol_shorthand::Z; // Set up sampling std::mt19937_64 kRng(42); +static const DiscreteKey m1(M(1), 2); + /* ************************************************************************* */ TEST(HybridGaussianFactorGraph, Creation) { HybridConditional conditional; @@ -69,15 +73,13 @@ TEST(HybridGaussianFactorGraph, Creation) { hfg.emplace_shared(X(0), I_3x3, Z_3x1); - // Define a gaussian mixture conditional P(x0|x1, c0) and add it to the factor - // graph - GaussianMixture gm({X(0)}, {X(1)}, DiscreteKeys(DiscreteKey{M(0), 2}), - GaussianMixture::Conditionals( - M(0), - std::make_shared( - X(0), Z_3x1, I_3x3, X(1), I_3x3), - std::make_shared( - X(0), Vector3::Ones(), I_3x3, X(1), I_3x3))); + // Define a hybrid gaussian conditional P(x0|x1, c0) + // and add it to the factor graph. + HybridGaussianConditional gm( + {M(0), 2}, + {std::make_shared(X(0), Z_3x1, I_3x3, X(1), I_3x3), + std::make_shared(X(0), Vector3::Ones(), I_3x3, X(1), + I_3x3)}); hfg.add(gm); EXPECT_LONGS_EQUAL(2, hfg.size()); @@ -100,11 +102,9 @@ TEST(HybridGaussianFactorGraph, EliminateMultifrontal) { // Test multifrontal elimination HybridGaussianFactorGraph hfg; - DiscreteKey m(M(1), 2); - // Add priors on x0 and c1 hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1)); - hfg.add(DecisionTreeFactor(m, {2, 8})); + hfg.add(DecisionTreeFactor(m1, {2, 8})); Ordering ordering; ordering.push_back(X(0)); @@ -113,6 +113,33 @@ TEST(HybridGaussianFactorGraph, EliminateMultifrontal) { EXPECT_LONGS_EQUAL(result.first->size(), 1); EXPECT_LONGS_EQUAL(result.second->size(), 1); } +/* ************************************************************************* */ + +namespace two { +std::vector components(Key key) { + return {std::make_shared(key, I_3x3, Z_3x1), + std::make_shared(key, I_3x3, Vector3::Ones())}; +} +} // namespace two + +/* ************************************************************************* */ +TEST(HybridGaussianFactorGraph, hybridEliminationOneFactor) { + HybridGaussianFactorGraph hfg; + hfg.add(HybridGaussianFactor(m1, two::components(X(1)))); + + auto result = hfg.eliminate({X(1)}); + + // Check that we have a valid Gaussian conditional. + auto hgc = result.first->asHybrid(); + CHECK(hgc); + const HybridValues values{{{X(1), Z_3x1}}, {{M(1), 1}}}; + EXPECT(HybridConditional::CheckInvariants(*result.first, values)); + + // Check that factor is discrete and correct + auto factor = std::dynamic_pointer_cast(result.second); + CHECK(factor); + EXPECT(assert_equal(DecisionTreeFactor{m1, "1 1"}, *factor)); +} /* ************************************************************************* */ TEST(HybridGaussianFactorGraph, eliminateFullSequentialEqualChance) { @@ -124,12 +151,8 @@ TEST(HybridGaussianFactorGraph, eliminateFullSequentialEqualChance) { // Add factor between x0 and x1 hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1)); - // Add a gaussian mixture factor Ï•(x1, c1) - DiscreteKey m1(M(1), 2); - DecisionTree dt( - M(1), std::make_shared(X(1), I_3x3, Z_3x1), - std::make_shared(X(1), I_3x3, Vector3::Ones())); - hfg.add(GaussianMixtureFactor({X(1)}, {m1}, dt)); + // Add a hybrid gaussian factor Ï•(x1, c1) + hfg.add(HybridGaussianFactor(m1, two::components(X(1)))); auto result = hfg.eliminateSequential(); @@ -145,17 +168,12 @@ TEST(HybridGaussianFactorGraph, eliminateFullSequentialEqualChance) { TEST(HybridGaussianFactorGraph, eliminateFullSequentialSimple) { HybridGaussianFactorGraph hfg; - DiscreteKey m1(M(1), 2); - // Add prior on x0 hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1)); // Add factor between x0 and x1 hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1)); - std::vector factors = { - std::make_shared(X(1), I_3x3, Z_3x1), - std::make_shared(X(1), I_3x3, Vector3::Ones())}; - hfg.add(GaussianMixtureFactor({X(1)}, {m1}, factors)); + hfg.add(HybridGaussianFactor(m1, two::components(X(1)))); // Discrete probability table for c1 hfg.add(DecisionTreeFactor(m1, {2, 8})); @@ -172,15 +190,10 @@ TEST(HybridGaussianFactorGraph, eliminateFullSequentialSimple) { TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalSimple) { HybridGaussianFactorGraph hfg; - DiscreteKey m1(M(1), 2); - hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1)); hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1)); - hfg.add(GaussianMixtureFactor( - {X(1)}, {{M(1), 2}}, - {std::make_shared(X(1), I_3x3, Z_3x1), - std::make_shared(X(1), I_3x3, Vector3::Ones())})); + hfg.add(HybridGaussianFactor({M(1), 2}, two::components(X(1)))); hfg.add(DecisionTreeFactor(m1, {2, 8})); // TODO(Varun) Adding extra discrete variable not connected to continuous @@ -199,22 +212,15 @@ TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalSimple) { TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalCLG) { HybridGaussianFactorGraph hfg; - DiscreteKey m(M(1), 2); - // Prior on x0 hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1)); // Factor between x0-x1 hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1)); - // Decision tree with different modes on x1 - DecisionTree dt( - M(1), std::make_shared(X(1), I_3x3, Z_3x1), - std::make_shared(X(1), I_3x3, Vector3::Ones())); - // Hybrid factor P(x1|c1) - hfg.add(GaussianMixtureFactor({X(1)}, {m}, dt)); + hfg.add(HybridGaussianFactor(m1, two::components(X(1)))); // Prior factor on c1 - hfg.add(DecisionTreeFactor(m, {2, 8})); + hfg.add(DecisionTreeFactor(m1, {2, 8})); // Get a constrained ordering keeping c1 last auto ordering_full = HybridOrdering(hfg); @@ -236,37 +242,16 @@ TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalTwoClique) { hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1)); hfg.add(JacobianFactor(X(1), I_3x3, X(2), -I_3x3, Z_3x1)); - { - hfg.add(GaussianMixtureFactor( - {X(0)}, {{M(0), 2}}, - {std::make_shared(X(0), I_3x3, Z_3x1), - std::make_shared(X(0), I_3x3, Vector3::Ones())})); - - DecisionTree dt1( - M(1), std::make_shared(X(2), I_3x3, Z_3x1), - std::make_shared(X(2), I_3x3, Vector3::Ones())); - - hfg.add(GaussianMixtureFactor({X(2)}, {{M(1), 2}}, dt1)); - } + hfg.add(HybridGaussianFactor({M(0), 2}, two::components(X(0)))); + hfg.add(HybridGaussianFactor({M(1), 2}, two::components(X(2)))); hfg.add(DecisionTreeFactor({{M(1), 2}, {M(2), 2}}, "1 2 3 4")); hfg.add(JacobianFactor(X(3), I_3x3, X(4), -I_3x3, Z_3x1)); hfg.add(JacobianFactor(X(4), I_3x3, X(5), -I_3x3, Z_3x1)); - { - DecisionTree dt( - M(3), std::make_shared(X(3), I_3x3, Z_3x1), - std::make_shared(X(3), I_3x3, Vector3::Ones())); - - hfg.add(GaussianMixtureFactor({X(3)}, {{M(3), 2}}, dt)); - - DecisionTree dt1( - M(2), std::make_shared(X(5), I_3x3, Z_3x1), - std::make_shared(X(5), I_3x3, Vector3::Ones())); - - hfg.add(GaussianMixtureFactor({X(5)}, {{M(2), 2}}, dt1)); - } + hfg.add(HybridGaussianFactor({M(3), 2}, two::components(X(3)))); + hfg.add(HybridGaussianFactor({M(2), 2}, two::components(X(5)))); auto ordering_full = Ordering::ColamdConstrainedLast(hfg, {M(0), M(1), M(2), M(3)}); @@ -278,7 +263,7 @@ TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalTwoClique) { EXPECT_LONGS_EQUAL(0, remaining->size()); /* - (Fan) Explanation: the Junction tree will need to reeliminate to get to the + (Fan) Explanation: the Junction tree will need to re-eliminate to get to the marginal on X(1), which is not possible because it involves eliminating discrete before continuous. The solution to this, however, is in Murphy02. TLDR is that this is 1. expensive and 2. inexact. nevertheless it is doable. @@ -490,26 +475,71 @@ TEST(HybridGaussianFactorGraph, SwitchingTwoVar) { } } +/* ****************************************************************************/ +// Select a particular continuous factor graph given a discrete assignment +TEST(HybridGaussianFactorGraph, DiscreteSelection) { + Switching s(3); + + HybridGaussianFactorGraph graph = s.linearizedFactorGraph; + + DiscreteValues dv00{{M(0), 0}, {M(1), 0}}; + GaussianFactorGraph continuous_00 = graph(dv00); + GaussianFactorGraph expected_00; + expected_00.push_back(JacobianFactor(X(0), I_1x1 * 10, Vector1(-10))); + expected_00.push_back(JacobianFactor(X(0), -I_1x1, X(1), I_1x1, Vector1(-1))); + expected_00.push_back(JacobianFactor(X(1), -I_1x1, X(2), I_1x1, Vector1(-1))); + expected_00.push_back(JacobianFactor(X(1), I_1x1 * 10, Vector1(-10))); + expected_00.push_back(JacobianFactor(X(2), I_1x1 * 10, Vector1(-10))); + + EXPECT(assert_equal(expected_00, continuous_00)); + + DiscreteValues dv01{{M(0), 0}, {M(1), 1}}; + GaussianFactorGraph continuous_01 = graph(dv01); + GaussianFactorGraph expected_01; + expected_01.push_back(JacobianFactor(X(0), I_1x1 * 10, Vector1(-10))); + expected_01.push_back(JacobianFactor(X(0), -I_1x1, X(1), I_1x1, Vector1(-1))); + expected_01.push_back(JacobianFactor(X(1), -I_1x1, X(2), I_1x1, Vector1(-0))); + expected_01.push_back(JacobianFactor(X(1), I_1x1 * 10, Vector1(-10))); + expected_01.push_back(JacobianFactor(X(2), I_1x1 * 10, Vector1(-10))); + + EXPECT(assert_equal(expected_01, continuous_01)); + + DiscreteValues dv10{{M(0), 1}, {M(1), 0}}; + GaussianFactorGraph continuous_10 = graph(dv10); + GaussianFactorGraph expected_10; + expected_10.push_back(JacobianFactor(X(0), I_1x1 * 10, Vector1(-10))); + expected_10.push_back(JacobianFactor(X(0), -I_1x1, X(1), I_1x1, Vector1(-0))); + expected_10.push_back(JacobianFactor(X(1), -I_1x1, X(2), I_1x1, Vector1(-1))); + expected_10.push_back(JacobianFactor(X(1), I_1x1 * 10, Vector1(-10))); + expected_10.push_back(JacobianFactor(X(2), I_1x1 * 10, Vector1(-10))); + + EXPECT(assert_equal(expected_10, continuous_10)); + + DiscreteValues dv11{{M(0), 1}, {M(1), 1}}; + GaussianFactorGraph continuous_11 = graph(dv11); + GaussianFactorGraph expected_11; + expected_11.push_back(JacobianFactor(X(0), I_1x1 * 10, Vector1(-10))); + expected_11.push_back(JacobianFactor(X(0), -I_1x1, X(1), I_1x1, Vector1(-0))); + expected_11.push_back(JacobianFactor(X(1), -I_1x1, X(2), I_1x1, Vector1(-0))); + expected_11.push_back(JacobianFactor(X(1), I_1x1 * 10, Vector1(-10))); + expected_11.push_back(JacobianFactor(X(2), I_1x1 * 10, Vector1(-10))); + + EXPECT(assert_equal(expected_11, continuous_11)); +} + /* ************************************************************************* */ TEST(HybridGaussianFactorGraph, optimize) { HybridGaussianFactorGraph hfg; - DiscreteKey c1(C(1), 2); - hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1)); hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1)); - - DecisionTree dt( - C(1), std::make_shared(X(1), I_3x3, Z_3x1), - std::make_shared(X(1), I_3x3, Vector3::Ones())); - - hfg.add(GaussianMixtureFactor({X(1)}, {c1}, dt)); + hfg.add(HybridGaussianFactor(m1, two::components(X(1)))); auto result = hfg.eliminateSequential(); HybridValues hv = result->optimize(); - EXPECT(assert_equal(hv.atDiscrete(C(1)), int(0))); + EXPECT(assert_equal(hv.atDiscrete(M(1)), int(0))); } /* ************************************************************************* */ @@ -589,13 +619,62 @@ TEST(HybridGaussianFactorGraph, ErrorAndProbPrimeTree) { // regression EXPECT(assert_equal(expected_error, error_tree, 1e-7)); - auto probs = graph.probPrime(delta.continuous()); + auto probabilities = graph.probPrime(delta.continuous()); std::vector prob_leaves = {0.36793249, 0.61247742, 0.59489556, 0.99029064}; - AlgebraicDecisionTree expected_probs(discrete_keys, prob_leaves); + AlgebraicDecisionTree expected_probabilities(discrete_keys, prob_leaves); // regression - EXPECT(assert_equal(expected_probs, probs, 1e-7)); + EXPECT(assert_equal(expected_probabilities, probabilities, 1e-7)); +} + +/* ****************************************************************************/ +// Test hybrid gaussian factor graph errorTree during +// incremental operation +TEST(HybridGaussianFactorGraph, IncrementalErrorTree) { + Switching s(4); + + HybridGaussianFactorGraph graph; + graph.push_back(s.linearizedFactorGraph.at(0)); // f(X0) + graph.push_back(s.linearizedFactorGraph.at(1)); // f(X0, X1, M0) + graph.push_back(s.linearizedFactorGraph.at(2)); // f(X1, X2, M1) + graph.push_back(s.linearizedFactorGraph.at(4)); // f(X1) + graph.push_back(s.linearizedFactorGraph.at(5)); // f(X2) + graph.push_back(s.linearizedFactorGraph.at(7)); // f(M0) + graph.push_back(s.linearizedFactorGraph.at(8)); // f(M0, M1) + + HybridBayesNet::shared_ptr hybridBayesNet = graph.eliminateSequential(); + EXPECT_LONGS_EQUAL(5, hybridBayesNet->size()); + + HybridValues delta = hybridBayesNet->optimize(); + auto error_tree = graph.errorTree(delta.continuous()); + + std::vector discrete_keys = {{M(0), 2}, {M(1), 2}}; + std::vector leaves = {0.99985581, 0.4902432, 0.51936941, + 0.0097568009}; + AlgebraicDecisionTree expected_error(discrete_keys, leaves); + + // regression + EXPECT(assert_equal(expected_error, error_tree, 1e-7)); + + graph = HybridGaussianFactorGraph(); + graph.push_back(*hybridBayesNet); + graph.push_back(s.linearizedFactorGraph.at(3)); // f(X2, X3, M2) + graph.push_back(s.linearizedFactorGraph.at(6)); // f(X3) + + hybridBayesNet = graph.eliminateSequential(); + EXPECT_LONGS_EQUAL(7, hybridBayesNet->size()); + + delta = hybridBayesNet->optimize(); + auto error_tree2 = graph.errorTree(delta.continuous()); + + discrete_keys = {{M(0), 2}, {M(1), 2}, {M(2), 2}}; + leaves = {0.50985198, 0.0097577296, 0.50009425, 0, + 0.52922138, 0.029127133, 0.50985105, 0.0097567964}; + AlgebraicDecisionTree expected_error2(discrete_keys, leaves); + + // regression + EXPECT(assert_equal(expected_error, error_tree, 1e-7)); } /* ****************************************************************************/ @@ -612,9 +691,9 @@ TEST(HybridGaussianFactorGraph, assembleGraphTree) { // Create expected decision tree with two factor graphs: - // Get mixture factor: - auto mixture = fg.at(0); - CHECK(mixture); + // Get hybrid factor: + auto hybrid = fg.at(0); + CHECK(hybrid); // Get prior factor: const auto gf = fg.at(1); @@ -629,8 +708,8 @@ TEST(HybridGaussianFactorGraph, assembleGraphTree) { // Expected decision tree with two factor graphs: // f(x0;mode=0)P(x0) and f(x0;mode=1)P(x0) GaussianFactorGraphTree expected{ - M(0), GaussianFactorGraph(std::vector{(*mixture)(d0), prior}), - GaussianFactorGraph(std::vector{(*mixture)(d1), prior})}; + M(0), GaussianFactorGraph(std::vector{(*hybrid)(d0), prior}), + GaussianFactorGraph(std::vector{(*hybrid)(d1), prior})}; EXPECT(assert_equal(expected(d0), actual(d0), 1e-5)); EXPECT(assert_equal(expected(d1), actual(d1), 1e-5)); @@ -694,18 +773,18 @@ TEST(HybridGaussianFactorGraph, EliminateTiny1) { // Create expected Bayes Net: HybridBayesNet expectedBayesNet; - // Create Gaussian mixture on X(0). + // Create hybrid Gaussian factor on X(0). using tiny::mode; // regression, but mean checked to be 5.0 in both cases: const auto conditional0 = std::make_shared( X(0), Vector1(14.1421), I_1x1 * 2.82843), conditional1 = std::make_shared( X(0), Vector1(10.1379), I_1x1 * 2.02759); - expectedBayesNet.emplace_back( - new GaussianMixture({X(0)}, {}, {mode}, {conditional0, conditional1})); + expectedBayesNet.emplace_shared( + mode, std::vector{conditional0, conditional1}); // Add prior on mode. - expectedBayesNet.emplace_back(new DiscreteConditional(mode, "74/26")); + expectedBayesNet.emplace_shared(mode, "74/26"); // Test elimination const auto posterior = fg.eliminateSequential(); @@ -720,23 +799,19 @@ TEST(HybridGaussianFactorGraph, EliminateTiny1) { TEST(HybridGaussianFactorGraph, EliminateTiny1Swapped) { const VectorValues measurements{{Z(0), Vector1(5.0)}}; - // Create mode key: 1 is low-noise, 0 is high-noise. - const DiscreteKey mode{M(0), 2}; HybridBayesNet bn; - // Create Gaussian mixture z_0 = x0 + noise for each measurement. - bn.emplace_back(new GaussianMixture( - {Z(0)}, {X(0)}, {mode}, - {GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Z_1x1, 3), - GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Z_1x1, - 0.5)})); + // mode-dependent: 1 is low-noise, 0 is high-noise. + // Create hybrid Gaussian factor z_0 = x0 + noise for each measurement. + std::vector> parms{{Z_1x1, 3}, {Z_1x1, 0.5}}; + bn.emplace_shared(m1, Z(0), I_1x1, X(0), parms); // Create prior on X(0). bn.push_back( GaussianConditional::sharedMeanAndStddev(X(0), Vector1(5.0), 0.5)); - // Add prior on mode. - bn.emplace_back(new DiscreteConditional(mode, "1/1")); + // Add prior on m1. + bn.emplace_shared(m1, "1/1"); // bn.print(); auto fg = bn.toFactorGraph(measurements); @@ -749,17 +824,17 @@ TEST(HybridGaussianFactorGraph, EliminateTiny1Swapped) { // Create expected Bayes Net: HybridBayesNet expectedBayesNet; - // Create Gaussian mixture on X(0). + // Create hybrid Gaussian factor on X(0). // regression, but mean checked to be 5.0 in both cases: const auto conditional0 = std::make_shared( X(0), Vector1(10.1379), I_1x1 * 2.02759), conditional1 = std::make_shared( X(0), Vector1(14.1421), I_1x1 * 2.82843); - expectedBayesNet.emplace_back( - new GaussianMixture({X(0)}, {}, {mode}, {conditional0, conditional1})); + expectedBayesNet.emplace_shared( + m1, std::vector{conditional0, conditional1}); - // Add prior on mode. - expectedBayesNet.emplace_back(new DiscreteConditional(mode, "1/1")); + // Add prior on m1. + expectedBayesNet.emplace_shared(m1, "1/1"); // Test elimination const auto posterior = fg.eliminateSequential(); @@ -784,18 +859,18 @@ TEST(HybridGaussianFactorGraph, EliminateTiny2) { // Create expected Bayes Net: HybridBayesNet expectedBayesNet; - // Create Gaussian mixture on X(0). + // Create hybrid Gaussian factor on X(0). using tiny::mode; // regression, but mean checked to be 5.0 in both cases: const auto conditional0 = std::make_shared( X(0), Vector1(17.3205), I_1x1 * 3.4641), conditional1 = std::make_shared( X(0), Vector1(10.274), I_1x1 * 2.0548); - expectedBayesNet.emplace_back( - new GaussianMixture({X(0)}, {}, {mode}, {conditional0, conditional1})); + expectedBayesNet.emplace_shared( + mode, std::vector{conditional0, conditional1}); // Add prior on mode. - expectedBayesNet.emplace_back(new DiscreteConditional(mode, "23/77")); + expectedBayesNet.emplace_shared(mode, "23/77"); // Test elimination const auto posterior = fg.eliminateSequential(); @@ -834,33 +909,30 @@ TEST(HybridGaussianFactorGraph, EliminateSwitchingNetwork) { // NOTE: we add reverse topological so we can sample from the Bayes net.: // Add measurements: + std::vector> measurementModels{{Z_1x1, 3}, + {Z_1x1, 0.5}}; for (size_t t : {0, 1, 2}) { - // Create Gaussian mixture on Z(t) conditioned on X(t) and mode N(t): + // Create hybrid Gaussian factor on Z(t) conditioned on X(t) and mode N(t): const auto noise_mode_t = DiscreteKey{N(t), 2}; - bn.emplace_back( - new GaussianMixture({Z(t)}, {X(t)}, {noise_mode_t}, - {GaussianConditional::sharedMeanAndStddev( - Z(t), I_1x1, X(t), Z_1x1, 0.5), - GaussianConditional::sharedMeanAndStddev( - Z(t), I_1x1, X(t), Z_1x1, 3.0)})); + bn.emplace_shared(noise_mode_t, Z(t), I_1x1, + X(t), measurementModels); // Create prior on discrete mode N(t): - bn.emplace_back(new DiscreteConditional(noise_mode_t, "20/80")); + bn.emplace_shared(noise_mode_t, "20/80"); } - // Add motion models: + // Add motion models. TODO(frank): why are they exactly the same? + std::vector> motionModels{{Z_1x1, 0.2}, + {Z_1x1, 0.2}}; for (size_t t : {2, 1}) { - // Create Gaussian mixture on X(t) conditioned on X(t-1) and mode M(t-1): + // Create hybrid Gaussian factor on X(t) conditioned on X(t-1) + // and mode M(t-1): const auto motion_model_t = DiscreteKey{M(t), 2}; - bn.emplace_back( - new GaussianMixture({X(t)}, {X(t - 1)}, {motion_model_t}, - {GaussianConditional::sharedMeanAndStddev( - X(t), I_1x1, X(t - 1), Z_1x1, 0.2), - GaussianConditional::sharedMeanAndStddev( - X(t), I_1x1, X(t - 1), I_1x1, 0.2)})); + bn.emplace_shared(motion_model_t, X(t), I_1x1, + X(t - 1), motionModels); // Create prior on motion model M(t): - bn.emplace_back(new DiscreteConditional(motion_model_t, "40/60")); + bn.emplace_shared(motion_model_t, "40/60"); } // Create Gaussian prior on continuous X(0) using sharedMeanAndStddev: diff --git a/gtsam/hybrid/tests/testHybridGaussianISAM.cpp b/gtsam/hybrid/tests/testHybridGaussianISAM.cpp index 249cbf9c3..70fa321ad 100644 --- a/gtsam/hybrid/tests/testHybridGaussianISAM.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianISAM.cpp @@ -126,31 +126,34 @@ TEST(HybridGaussianElimination, IncrementalInference) { /********************************************************/ // Run batch elimination so we can compare results. - const Ordering ordering {X(0), X(1), X(2)}; + const Ordering ordering{X(0), X(1), X(2)}; // Now we calculate the expected factors using full elimination const auto [expectedHybridBayesTree, expectedRemainingGraph] = switching.linearizedFactorGraph.eliminatePartialMultifrontal(ordering); // The densities on X(0) should be the same - auto x0_conditional = - dynamic_pointer_cast(isam[X(0)]->conditional()->inner()); - auto expected_x0_conditional = dynamic_pointer_cast( - (*expectedHybridBayesTree)[X(0)]->conditional()->inner()); + auto x0_conditional = dynamic_pointer_cast( + isam[X(0)]->conditional()->inner()); + auto expected_x0_conditional = + dynamic_pointer_cast( + (*expectedHybridBayesTree)[X(0)]->conditional()->inner()); EXPECT(assert_equal(*x0_conditional, *expected_x0_conditional)); // The densities on X(1) should be the same - auto x1_conditional = - dynamic_pointer_cast(isam[X(1)]->conditional()->inner()); - auto expected_x1_conditional = dynamic_pointer_cast( - (*expectedHybridBayesTree)[X(1)]->conditional()->inner()); + auto x1_conditional = dynamic_pointer_cast( + isam[X(1)]->conditional()->inner()); + auto expected_x1_conditional = + dynamic_pointer_cast( + (*expectedHybridBayesTree)[X(1)]->conditional()->inner()); EXPECT(assert_equal(*x1_conditional, *expected_x1_conditional)); // The densities on X(2) should be the same - auto x2_conditional = - dynamic_pointer_cast(isam[X(2)]->conditional()->inner()); - auto expected_x2_conditional = dynamic_pointer_cast( - (*expectedHybridBayesTree)[X(2)]->conditional()->inner()); + auto x2_conditional = dynamic_pointer_cast( + isam[X(2)]->conditional()->inner()); + auto expected_x2_conditional = + dynamic_pointer_cast( + (*expectedHybridBayesTree)[X(2)]->conditional()->inner()); EXPECT(assert_equal(*x2_conditional, *expected_x2_conditional)); // We only perform manual continuous elimination for 0,0. @@ -279,9 +282,9 @@ TEST(HybridGaussianElimination, Approx_inference) { // Check that the hybrid nodes of the bayes net match those of the pre-pruning // bayes net, at the same positions. - auto &unprunedLastDensity = *dynamic_pointer_cast( + auto &unprunedLastDensity = *dynamic_pointer_cast( unprunedHybridBayesTree->clique(X(3))->conditional()->inner()); - auto &lastDensity = *dynamic_pointer_cast( + auto &lastDensity = *dynamic_pointer_cast( incrementalHybrid[X(3)]->conditional()->inner()); std::vector> assignments = @@ -330,13 +333,13 @@ TEST(HybridGaussianElimination, Incremental_approximate) { // each with 2, 4, 8, and 5 (pruned) leaves respetively. EXPECT_LONGS_EQUAL(4, incrementalHybrid.size()); EXPECT_LONGS_EQUAL( - 2, incrementalHybrid[X(0)]->conditional()->asMixture()->nrComponents()); + 2, incrementalHybrid[X(0)]->conditional()->asHybrid()->nrComponents()); EXPECT_LONGS_EQUAL( - 3, incrementalHybrid[X(1)]->conditional()->asMixture()->nrComponents()); + 3, incrementalHybrid[X(1)]->conditional()->asHybrid()->nrComponents()); EXPECT_LONGS_EQUAL( - 5, incrementalHybrid[X(2)]->conditional()->asMixture()->nrComponents()); + 5, incrementalHybrid[X(2)]->conditional()->asHybrid()->nrComponents()); EXPECT_LONGS_EQUAL( - 5, incrementalHybrid[X(3)]->conditional()->asMixture()->nrComponents()); + 5, incrementalHybrid[X(3)]->conditional()->asHybrid()->nrComponents()); /***** Run Round 2 *****/ HybridGaussianFactorGraph graph2; @@ -351,9 +354,9 @@ TEST(HybridGaussianElimination, Incremental_approximate) { // with 5 (pruned) leaves. CHECK_EQUAL(5, incrementalHybrid.size()); EXPECT_LONGS_EQUAL( - 5, incrementalHybrid[X(3)]->conditional()->asMixture()->nrComponents()); + 5, incrementalHybrid[X(3)]->conditional()->asHybrid()->nrComponents()); EXPECT_LONGS_EQUAL( - 5, incrementalHybrid[X(4)]->conditional()->asMixture()->nrComponents()); + 5, incrementalHybrid[X(4)]->conditional()->asHybrid()->nrComponents()); } /* ************************************************************************/ @@ -381,11 +384,11 @@ TEST(HybridGaussianISAM, NonTrivial) { // Add connecting poses similar to PoseFactors in GTD fg.emplace_shared>(X(0), Y(0), Pose2(0, 1.0, 0), - poseNoise); + poseNoise); fg.emplace_shared>(Y(0), Z(0), Pose2(0, 1.0, 0), - poseNoise); + poseNoise); fg.emplace_shared>(Z(0), W(0), Pose2(0, 1.0, 0), - poseNoise); + poseNoise); // Create initial estimate Values initial; @@ -411,27 +414,24 @@ TEST(HybridGaussianISAM, NonTrivial) { // Add odometry factor with discrete modes. Pose2 odometry(1.0, 0.0, 0.0); - KeyVector contKeys = {W(0), W(1)}; auto noise_model = noiseModel::Isotropic::Sigma(3, 1.0); - auto still = std::make_shared(W(0), W(1), Pose2(0, 0, 0), - noise_model), - moving = std::make_shared(W(0), W(1), odometry, - noise_model); - std::vector components = {moving, still}; - auto mixtureFactor = std::make_shared( - contKeys, DiscreteKeys{gtsam::DiscreteKey(M(1), 2)}, components); - fg.push_back(mixtureFactor); + std::vector components; + components.emplace_back( + new PlanarMotionModel(W(0), W(1), odometry, noise_model)); // moving + components.emplace_back( + new PlanarMotionModel(W(0), W(1), Pose2(0, 0, 0), noise_model)); // still + fg.emplace_shared(DiscreteKey(M(1), 2), components); // Add equivalent of ImuFactor fg.emplace_shared>(X(0), X(1), Pose2(1.0, 0.0, 0), - poseNoise); + poseNoise); // PoseFactors-like at k=1 fg.emplace_shared>(X(1), Y(1), Pose2(0, 1, 0), - poseNoise); + poseNoise); fg.emplace_shared>(Y(1), Z(1), Pose2(0, 1, 0), - poseNoise); + poseNoise); fg.emplace_shared>(Z(1), W(1), Pose2(-1, 1, 0), - poseNoise); + poseNoise); initial.insert(X(1), Pose2(1.0, 0.0, 0.0)); initial.insert(Y(1), Pose2(1.0, 1.0, 0.0)); @@ -452,26 +452,23 @@ TEST(HybridGaussianISAM, NonTrivial) { /*************** Run Round 3 ***************/ // Add odometry factor with discrete modes. - contKeys = {W(1), W(2)}; - still = std::make_shared(W(1), W(2), Pose2(0, 0, 0), - noise_model); - moving = - std::make_shared(W(1), W(2), odometry, noise_model); - components = {moving, still}; - mixtureFactor = std::make_shared( - contKeys, DiscreteKeys{gtsam::DiscreteKey(M(2), 2)}, components); - fg.push_back(mixtureFactor); + components.clear(); + components.emplace_back( + new PlanarMotionModel(W(1), W(2), odometry, noise_model)); // moving + components.emplace_back( + new PlanarMotionModel(W(1), W(2), Pose2(0, 0, 0), noise_model)); // still + fg.emplace_shared(DiscreteKey(M(2), 2), components); // Add equivalent of ImuFactor fg.emplace_shared>(X(1), X(2), Pose2(1.0, 0.0, 0), - poseNoise); + poseNoise); // PoseFactors-like at k=1 fg.emplace_shared>(X(2), Y(2), Pose2(0, 1, 0), - poseNoise); + poseNoise); fg.emplace_shared>(Y(2), Z(2), Pose2(0, 1, 0), - poseNoise); + poseNoise); fg.emplace_shared>(Z(2), W(2), Pose2(-2, 1, 0), - poseNoise); + poseNoise); initial.insert(X(2), Pose2(2.0, 0.0, 0.0)); initial.insert(Y(2), Pose2(2.0, 1.0, 0.0)); @@ -495,26 +492,23 @@ TEST(HybridGaussianISAM, NonTrivial) { /*************** Run Round 4 ***************/ // Add odometry factor with discrete modes. - contKeys = {W(2), W(3)}; - still = std::make_shared(W(2), W(3), Pose2(0, 0, 0), - noise_model); - moving = - std::make_shared(W(2), W(3), odometry, noise_model); - components = {moving, still}; - mixtureFactor = std::make_shared( - contKeys, DiscreteKeys{gtsam::DiscreteKey(M(3), 2)}, components); - fg.push_back(mixtureFactor); + components.clear(); + components.emplace_back( + new PlanarMotionModel(W(2), W(3), odometry, noise_model)); // moving + components.emplace_back( + new PlanarMotionModel(W(2), W(3), Pose2(0, 0, 0), noise_model)); // still + fg.emplace_shared(DiscreteKey(M(3), 2), components); // Add equivalent of ImuFactor fg.emplace_shared>(X(2), X(3), Pose2(1.0, 0.0, 0), - poseNoise); + poseNoise); // PoseFactors-like at k=3 fg.emplace_shared>(X(3), Y(3), Pose2(0, 1, 0), - poseNoise); + poseNoise); fg.emplace_shared>(Y(3), Z(3), Pose2(0, 1, 0), - poseNoise); + poseNoise); fg.emplace_shared>(Z(3), W(3), Pose2(-3, 1, 0), - poseNoise); + poseNoise); initial.insert(X(3), Pose2(3.0, 0.0, 0.0)); initial.insert(Y(3), Pose2(3.0, 1.0, 0.0)); @@ -547,7 +541,7 @@ TEST(HybridGaussianISAM, NonTrivial) { // Test if pruning worked correctly by checking that we only have 3 leaves in // the last node. - auto lastConditional = inc[X(3)]->conditional()->asMixture(); + auto lastConditional = inc[X(3)]->conditional()->asHybrid(); EXPECT_LONGS_EQUAL(3, lastConditional->nrComponents()); } diff --git a/gtsam/hybrid/tests/testHybridMotionModel.cpp b/gtsam/hybrid/tests/testHybridMotionModel.cpp new file mode 100644 index 000000000..cbfdc7fe4 --- /dev/null +++ b/gtsam/hybrid/tests/testHybridMotionModel.cpp @@ -0,0 +1,382 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testHybridMotionModel.cpp + * @brief Tests hybrid inference with a simple switching motion model + * @author Varun Agrawal + * @author Fan Jiang + * @author Frank Dellaert + * @date December 2021 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Include for test suite +#include + +#include + +using namespace std; +using namespace gtsam; +using symbol_shorthand::M; +using symbol_shorthand::X; +using symbol_shorthand::Z; + +DiscreteKey m1(M(1), 2); + +void addMeasurement(HybridBayesNet &hbn, Key z_key, Key x_key, double sigma) { + auto measurement_model = noiseModel::Isotropic::Sigma(1, sigma); + hbn.emplace_shared(z_key, Vector1(0.0), I_1x1, x_key, + -I_1x1, measurement_model); +} + +/// Create hybrid motion model p(x1 | x0, m1) +static HybridGaussianConditional::shared_ptr CreateHybridMotionModel( + double mu0, double mu1, double sigma0, double sigma1) { + std::vector> motionModels{{Vector1(mu0), sigma0}, + {Vector1(mu1), sigma1}}; + return std::make_shared(m1, X(1), I_1x1, X(0), + motionModels); +} + +/// Create two state Bayes network with 1 or two measurement models +HybridBayesNet CreateBayesNet( + const HybridGaussianConditional::shared_ptr &hybridMotionModel, + bool add_second_measurement = false) { + HybridBayesNet hbn; + + // Add measurement model p(z0 | x0) + addMeasurement(hbn, Z(0), X(0), 3.0); + + // Optionally add second measurement model p(z1 | x1) + if (add_second_measurement) { + addMeasurement(hbn, Z(1), X(1), 3.0); + } + + // Add hybrid motion model + hbn.push_back(hybridMotionModel); + + // Discrete uniform prior. + hbn.emplace_shared(m1, "50/50"); + + return hbn; +} + +/// Approximate the discrete marginal P(m1) using importance sampling +std::pair approximateDiscreteMarginal( + const HybridBayesNet &hbn, + const HybridGaussianConditional::shared_ptr &hybridMotionModel, + const VectorValues &given, size_t N = 100000) { + /// Create importance sampling network q(x0,x1,m) = p(x1|x0,m1) q(x0) P(m1), + /// using q(x0) = N(z0, sigmaQ) to sample x0. + HybridBayesNet q; + q.push_back(hybridMotionModel); // Add hybrid motion model + q.emplace_shared(GaussianConditional::FromMeanAndStddev( + X(0), given.at(Z(0)), /* sigmaQ = */ 3.0)); // Add proposal q(x0) for x0 + q.emplace_shared(m1, "50/50"); // Discrete prior. + + // Do importance sampling + double w0 = 0.0, w1 = 0.0; + std::mt19937_64 rng(42); + for (int i = 0; i < N; i++) { + HybridValues sample = q.sample(&rng); + sample.insert(given); + double weight = hbn.evaluate(sample) / q.evaluate(sample); + (sample.atDiscrete(M(1)) == 0) ? w0 += weight : w1 += weight; + } + double pm1 = w1 / (w0 + w1); + std::cout << "p(m0) = " << 100 * (1.0 - pm1) << std::endl; + std::cout << "p(m1) = " << 100 * pm1 << std::endl; + return {1.0 - pm1, pm1}; +} + +/* ************************************************************************* */ +/** + * Test a model p(z0|x0)p(z1|x1)p(x1|x0,m1)P(m1). + * + * p(x1|x0,m1) has mode-dependent mean but same covariance. + * + * Converting to a factor graph gives us Ï•(x0;z0)Ï•(x1;z1)Ï•(x1,x0,m1)P(m1) + * + * If we only have a measurement on x0, then + * the posterior probability of m1 should be 0.5/0.5. + * Getting a measurement on z1 gives use more information. + */ +TEST(HybridGaussianFactor, TwoStateModel) { + double mu0 = 1.0, mu1 = 3.0; + double sigma = 0.5; + auto hybridMotionModel = CreateHybridMotionModel(mu0, mu1, sigma, sigma); + + // Start with no measurement on x1, only on x0 + const Vector1 z0(0.5); + + VectorValues given; + given.insert(Z(0), z0); + + { + HybridBayesNet hbn = CreateBayesNet(hybridMotionModel); + HybridGaussianFactorGraph gfg = hbn.toFactorGraph(given); + HybridBayesNet::shared_ptr bn = gfg.eliminateSequential(); + + // Since no measurement on x1, we hedge our bets + // Importance sampling run with 100k samples gives 50.051/49.949 + // approximateDiscreteMarginal(hbn, hybridMotionModel, given); + DiscreteConditional expected(m1, "50/50"); + EXPECT(assert_equal(expected, *(bn->at(2)->asDiscrete()))); + } + + { + // If we set z1=4.5 (>> 2.5 which is the halfway point), + // probability of discrete mode should be leaning to m1==1. + const Vector1 z1(4.5); + given.insert(Z(1), z1); + + HybridBayesNet hbn = CreateBayesNet(hybridMotionModel, true); + HybridGaussianFactorGraph gfg = hbn.toFactorGraph(given); + HybridBayesNet::shared_ptr bn = gfg.eliminateSequential(); + + // Since we have a measurement on x1, we get a definite result + // Values taken from an importance sampling run with 100k samples: + // approximateDiscreteMarginal(hbn, hybridMotionModel, given); + DiscreteConditional expected(m1, "44.3854/55.6146"); + EXPECT(assert_equal(expected, *(bn->at(2)->asDiscrete()), 0.002)); + } +} + +/* ************************************************************************* */ +/** + * Test a model P(z0|x0)P(x1|x0,m1)P(z1|x1)P(m1). + * + * P(x1|x0,m1) has different means and different covariances. + * + * Converting to a factor graph gives us + * Ï•(x0)Ï•(x1,x0,m1)Ï•(x1)P(m1) + * + * If we only have a measurement on z0, then + * the P(m1) should be 0.5/0.5. + * Getting a measurement on z1 gives use more information. + */ +TEST(HybridGaussianFactor, TwoStateModel2) { + double mu0 = 1.0, mu1 = 3.0; + double sigma0 = 0.5, sigma1 = 2.0; + auto hybridMotionModel = CreateHybridMotionModel(mu0, mu1, sigma0, sigma1); + + // Start with no measurement on x1, only on x0 + const Vector1 z0(0.5); + VectorValues given; + given.insert(Z(0), z0); + + { + HybridBayesNet hbn = CreateBayesNet(hybridMotionModel); + HybridGaussianFactorGraph gfg = hbn.toFactorGraph(given); + + // Check that ratio of Bayes net and factor graph for different modes is + // equal for several values of {x0,x1}. + for (VectorValues vv : + {VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(1.0)}}, + VectorValues{{X(0), Vector1(0.5)}, {X(1), Vector1(3.0)}}}) { + vv.insert(given); // add measurements for HBN + HybridValues hv0(vv, {{M(1), 0}}), hv1(vv, {{M(1), 1}}); + EXPECT_DOUBLES_EQUAL(gfg.error(hv0) / hbn.error(hv0), + gfg.error(hv1) / hbn.error(hv1), 1e-9); + } + + HybridBayesNet::shared_ptr bn = gfg.eliminateSequential(); + + // Importance sampling run with 100k samples gives 50.095/49.905 + // approximateDiscreteMarginal(hbn, hybridMotionModel, given); + + // Since no measurement on x1, we a 50/50 probability + auto p_m = bn->at(2)->asDiscrete(); + EXPECT_DOUBLES_EQUAL(0.5, p_m->operator()({{M(1), 0}}), 1e-9); + EXPECT_DOUBLES_EQUAL(0.5, p_m->operator()({{M(1), 1}}), 1e-9); + } + + { + // Now we add a measurement z1 on x1 + const Vector1 z1(4.0); // favors m==1 + given.insert(Z(1), z1); + + HybridBayesNet hbn = CreateBayesNet(hybridMotionModel, true); + HybridGaussianFactorGraph gfg = hbn.toFactorGraph(given); + + // Check that ratio of Bayes net and factor graph for different modes is + // equal for several values of {x0,x1}. + for (VectorValues vv : + {VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(1.0)}}, + VectorValues{{X(0), Vector1(0.5)}, {X(1), Vector1(3.0)}}}) { + vv.insert(given); // add measurements for HBN + HybridValues hv0(vv, {{M(1), 0}}), hv1(vv, {{M(1), 1}}); + EXPECT_DOUBLES_EQUAL(gfg.error(hv0) / hbn.error(hv0), + gfg.error(hv1) / hbn.error(hv1), 1e-9); + } + + HybridBayesNet::shared_ptr bn = gfg.eliminateSequential(); + + // Values taken from an importance sampling run with 100k samples: + // approximateDiscreteMarginal(hbn, hybridMotionModel, given); + DiscreteConditional expected(m1, "48.3158/51.6842"); + EXPECT(assert_equal(expected, *(bn->at(2)->asDiscrete()), 0.002)); + } + + { + // Add a different measurement z1 on x1 that favors m==0 + const Vector1 z1(1.1); + given.insert_or_assign(Z(1), z1); + + HybridBayesNet hbn = CreateBayesNet(hybridMotionModel, true); + HybridGaussianFactorGraph gfg = hbn.toFactorGraph(given); + HybridBayesNet::shared_ptr bn = gfg.eliminateSequential(); + + // Values taken from an importance sampling run with 100k samples: + // approximateDiscreteMarginal(hbn, hybridMotionModel, given); + DiscreteConditional expected(m1, "55.396/44.604"); + EXPECT(assert_equal(expected, *(bn->at(2)->asDiscrete()), 0.002)); + } +} + +/* ************************************************************************* */ +/** + * Test a model p(z0|x0)p(x1|x0,m1)p(z1|x1)p(m1). + * + * p(x1|x0,m1) has the same means but different covariances. + * + * Converting to a factor graph gives us + * Ï•(x0)Ï•(x1,x0,m1)Ï•(x1)p(m1) + * + * If we only have a measurement on z0, then + * the p(m1) should be 0.5/0.5. + * Getting a measurement on z1 gives use more information. + */ +TEST(HybridGaussianFactor, TwoStateModel3) { + double mu = 1.0; + double sigma0 = 0.5, sigma1 = 2.0; + auto hybridMotionModel = CreateHybridMotionModel(mu, mu, sigma0, sigma1); + + // Start with no measurement on x1, only on x0 + const Vector1 z0(0.5); + VectorValues given; + given.insert(Z(0), z0); + + { + HybridBayesNet hbn = CreateBayesNet(hybridMotionModel); + HybridGaussianFactorGraph gfg = hbn.toFactorGraph(given); + + // Check that ratio of Bayes net and factor graph for different modes is + // equal for several values of {x0,x1}. + for (VectorValues vv : + {VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(1.0)}}, + VectorValues{{X(0), Vector1(0.5)}, {X(1), Vector1(3.0)}}}) { + vv.insert(given); // add measurements for HBN + HybridValues hv0(vv, {{M(1), 0}}), hv1(vv, {{M(1), 1}}); + EXPECT_DOUBLES_EQUAL(gfg.error(hv0) / hbn.error(hv0), + gfg.error(hv1) / hbn.error(hv1), 1e-9); + } + + HybridBayesNet::shared_ptr bn = gfg.eliminateSequential(); + + // Importance sampling run with 100k samples gives 50.095/49.905 + // approximateDiscreteMarginal(hbn, hybridMotionModel, given); + + // Since no measurement on x1, we a 50/50 probability + auto p_m = bn->at(2)->asDiscrete(); + EXPECT_DOUBLES_EQUAL(0.5, p_m->operator()({{M(1), 0}}), 1e-9); + EXPECT_DOUBLES_EQUAL(0.5, p_m->operator()({{M(1), 1}}), 1e-9); + } + + { + // Now we add a measurement z1 on x1 + const Vector1 z1(4.0); // favors m==1 + given.insert(Z(1), z1); + + HybridBayesNet hbn = CreateBayesNet(hybridMotionModel, true); + HybridGaussianFactorGraph gfg = hbn.toFactorGraph(given); + + // Check that ratio of Bayes net and factor graph for different modes is + // equal for several values of {x0,x1}. + for (VectorValues vv : + {VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(1.0)}}, + VectorValues{{X(0), Vector1(0.5)}, {X(1), Vector1(3.0)}}}) { + vv.insert(given); // add measurements for HBN + HybridValues hv0(vv, {{M(1), 0}}), hv1(vv, {{M(1), 1}}); + EXPECT_DOUBLES_EQUAL(gfg.error(hv0) / hbn.error(hv0), + gfg.error(hv1) / hbn.error(hv1), 1e-9); + } + + HybridBayesNet::shared_ptr bn = gfg.eliminateSequential(); + + // Values taken from an importance sampling run with 100k samples: + // approximateDiscreteMarginal(hbn, hybridMotionModel, given); + DiscreteConditional expected(m1, "51.7762/48.2238"); + EXPECT(assert_equal(expected, *(bn->at(2)->asDiscrete()), 0.002)); + } + + { + // Add a different measurement z1 on x1 that favors m==1 + const Vector1 z1(7.0); + given.insert_or_assign(Z(1), z1); + + HybridBayesNet hbn = CreateBayesNet(hybridMotionModel, true); + HybridGaussianFactorGraph gfg = hbn.toFactorGraph(given); + HybridBayesNet::shared_ptr bn = gfg.eliminateSequential(); + + // Values taken from an importance sampling run with 100k samples: + // approximateDiscreteMarginal(hbn, hybridMotionModel, given); + DiscreteConditional expected(m1, "49.0762/50.9238"); + EXPECT(assert_equal(expected, *(bn->at(2)->asDiscrete()), 0.005)); + } +} + +/* ************************************************************************* */ +/** + * Same model, P(z0|x0)P(x1|x0,m1)P(z1|x1)P(m1), but now with very informative + * measurements and vastly different motion model: either stand still or move + * far. This yields a very informative posterior. + */ +TEST(HybridGaussianFactor, TwoStateModel4) { + double mu0 = 0.0, mu1 = 10.0; + double sigma0 = 0.2, sigma1 = 5.0; + auto hybridMotionModel = CreateHybridMotionModel(mu0, mu1, sigma0, sigma1); + + // We only check the 2-measurement case + const Vector1 z0(0.0), z1(10.0); + VectorValues given{{Z(0), z0}, {Z(1), z1}}; + + HybridBayesNet hbn = CreateBayesNet(hybridMotionModel, true); + HybridGaussianFactorGraph gfg = hbn.toFactorGraph(given); + HybridBayesNet::shared_ptr bn = gfg.eliminateSequential(); + + // Values taken from an importance sampling run with 100k samples: + // approximateDiscreteMarginal(hbn, hybridMotionModel, given); + DiscreteConditional expected(m1, "8.91527/91.0847"); + EXPECT(assert_equal(expected, *(bn->at(2)->asDiscrete()), 0.002)); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/hybrid/tests/testMixtureFactor.cpp b/gtsam/hybrid/tests/testHybridNonlinearFactor.cpp similarity index 52% rename from gtsam/hybrid/tests/testMixtureFactor.cpp rename to gtsam/hybrid/tests/testHybridNonlinearFactor.cpp index 0b2564403..2b441ab13 100644 --- a/gtsam/hybrid/tests/testMixtureFactor.cpp +++ b/gtsam/hybrid/tests/testHybridNonlinearFactor.cpp @@ -10,15 +10,18 @@ * -------------------------------------------------------------------------- */ /** - * @file testMixtureFactor.cpp - * @brief Unit tests for MixtureFactor + * @file testHybridNonlinearFactor.cpp + * @brief Unit tests for HybridNonlinearFactor * @author Varun Agrawal * @date October 2022 */ #include #include -#include +#include +#include +#include +#include #include #include @@ -32,45 +35,60 @@ using symbol_shorthand::M; using symbol_shorthand::X; /* ************************************************************************* */ -// Check iterators of empty mixture. -TEST(MixtureFactor, Constructor) { - MixtureFactor factor; - MixtureFactor::const_iterator const_it = factor.begin(); +// Check iterators of empty hybrid factor. +TEST(HybridNonlinearFactor, Constructor) { + HybridNonlinearFactor factor; + HybridNonlinearFactor::const_iterator const_it = factor.begin(); CHECK(const_it == factor.end()); - MixtureFactor::iterator it = factor.begin(); + HybridNonlinearFactor::iterator it = factor.begin(); CHECK(it == factor.end()); } +/* ************************************************************************* */ +namespace test_constructor { +DiscreteKey m1(1, 2); +double between0 = 0.0; +double between1 = 1.0; + +Vector1 sigmas = Vector1(1.0); +auto model = noiseModel::Diagonal::Sigmas(sigmas, false); + +auto f0 = std::make_shared>(X(1), X(2), between0, model); +auto f1 = std::make_shared>(X(1), X(2), between1, model); +} // namespace test_constructor + +/* ************************************************************************* */ +// Test simple to complex constructors... +TEST(HybridGaussianFactor, ConstructorVariants) { + using namespace test_constructor; + HybridNonlinearFactor fromFactors(m1, {f0, f1}); + + std::vector pairs{{f0, 0.0}, {f1, 0.0}}; + HybridNonlinearFactor fromPairs(m1, pairs); + assert_equal(fromFactors, fromPairs); + + HybridNonlinearFactor::FactorValuePairs decisionTree({m1}, pairs); + HybridNonlinearFactor fromDecisionTree({m1}, decisionTree); + assert_equal(fromDecisionTree, fromPairs); +} /* ************************************************************************* */ // Test .print() output. -TEST(MixtureFactor, Printing) { - DiscreteKey m1(1, 2); - double between0 = 0.0; - double between1 = 1.0; - - Vector1 sigmas = Vector1(1.0); - auto model = noiseModel::Diagonal::Sigmas(sigmas, false); - - auto f0 = - std::make_shared>(X(1), X(2), between0, model); - auto f1 = - std::make_shared>(X(1), X(2), between1, model); - std::vector factors{f0, f1}; - - MixtureFactor mixtureFactor({X(1), X(2)}, {m1}, factors); +TEST(HybridNonlinearFactor, Printing) { + using namespace test_constructor; + HybridNonlinearFactor hybridFactor({m1}, {f0, f1}); std::string expected = R"(Hybrid [x1 x2; 1] -MixtureFactor +HybridNonlinearFactor Choice(1) 0 Leaf Nonlinear factor on 2 keys 1 Leaf Nonlinear factor on 2 keys )"; - EXPECT(assert_print_equal(expected, mixtureFactor)); + EXPECT(assert_print_equal(expected, hybridFactor)); } /* ************************************************************************* */ -static MixtureFactor getMixtureFactor() { +static HybridNonlinearFactor getHybridNonlinearFactor() { DiscreteKey m1(1, 2); double between0 = 0.0; @@ -83,22 +101,20 @@ static MixtureFactor getMixtureFactor() { std::make_shared>(X(1), X(2), between0, model); auto f1 = std::make_shared>(X(1), X(2), between1, model); - std::vector factors{f0, f1}; - - return MixtureFactor({X(1), X(2)}, {m1}, factors); + return HybridNonlinearFactor(m1, {f0, f1}); } /* ************************************************************************* */ -// Test the error of the MixtureFactor -TEST(MixtureFactor, Error) { - auto mixtureFactor = getMixtureFactor(); +// Test the error of the HybridNonlinearFactor +TEST(HybridNonlinearFactor, Error) { + auto hybridFactor = getHybridNonlinearFactor(); Values continuousValues; continuousValues.insert(X(1), 0); continuousValues.insert(X(2), 1); AlgebraicDecisionTree error_tree = - mixtureFactor.errorTree(continuousValues); + hybridFactor.errorTree(continuousValues); DiscreteKey m1(1, 2); std::vector discrete_keys = {m1}; @@ -109,10 +125,10 @@ TEST(MixtureFactor, Error) { } /* ************************************************************************* */ -// Test dim of the MixtureFactor -TEST(MixtureFactor, Dim) { - auto mixtureFactor = getMixtureFactor(); - EXPECT_LONGS_EQUAL(1, mixtureFactor.dim()); +// Test dim of the HybridNonlinearFactor +TEST(HybridNonlinearFactor, Dim) { + auto hybridFactor = getHybridNonlinearFactor(); + EXPECT_LONGS_EQUAL(1, hybridFactor.dim()); } /* ************************************************************************* */ diff --git a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp index 93081d309..647a8b646 100644 --- a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp @@ -23,17 +23,16 @@ #include #include #include +#include #include -#include #include #include +#include #include #include #include #include -#include - #include "Switching.h" // Include for test suite @@ -50,7 +49,7 @@ using symbol_shorthand::X; * Test that any linearizedFactorGraph gaussian factors are appended to the * existing gaussian factor graph in the hybrid factor graph. */ -TEST(HybridFactorGraph, GaussianFactorGraph) { +TEST(HybridNonlinearFactorGraph, GaussianFactorGraph) { HybridNonlinearFactorGraph fg; // Add a simple prior factor to the nonlinear factor graph @@ -105,7 +104,7 @@ TEST(HybridNonlinearFactorGraph, Resize) { auto discreteFactor = std::make_shared(); fg.push_back(discreteFactor); - auto dcFactor = std::make_shared(); + auto dcFactor = std::make_shared(); fg.push_back(dcFactor); EXPECT_LONGS_EQUAL(fg.size(), 3); @@ -114,34 +113,38 @@ TEST(HybridNonlinearFactorGraph, Resize) { EXPECT_LONGS_EQUAL(fg.size(), 0); } +/***************************************************************************/ +namespace test_motion { +gtsam::DiscreteKey m1(M(1), 2); +auto noise_model = noiseModel::Isotropic::Sigma(1, 1.0); +std::vector components = { + std::make_shared(X(0), X(1), 0.0, noise_model), + std::make_shared(X(0), X(1), 1.0, noise_model)}; +} // namespace test_motion + /*************************************************************************** * Test that the resize method works correctly for a * HybridGaussianFactorGraph. */ TEST(HybridGaussianFactorGraph, Resize) { - HybridNonlinearFactorGraph nhfg; + using namespace test_motion; + + HybridNonlinearFactorGraph hnfg; auto nonlinearFactor = std::make_shared>( X(0), X(1), 0.0, Isotropic::Sigma(1, 0.1)); - nhfg.push_back(nonlinearFactor); + hnfg.push_back(nonlinearFactor); auto discreteFactor = std::make_shared(); - nhfg.push_back(discreteFactor); + hnfg.push_back(discreteFactor); - KeyVector contKeys = {X(0), X(1)}; - auto noise_model = noiseModel::Isotropic::Sigma(1, 1.0); - auto still = std::make_shared(X(0), X(1), 0.0, noise_model), - moving = std::make_shared(X(0), X(1), 1.0, noise_model); - - std::vector components = {still, moving}; - auto dcFactor = std::make_shared( - contKeys, DiscreteKeys{gtsam::DiscreteKey(M(1), 2)}, components); - nhfg.push_back(dcFactor); + auto dcFactor = std::make_shared(m1, components); + hnfg.push_back(dcFactor); Values linearizationPoint; linearizationPoint.insert(X(0), 0); linearizationPoint.insert(X(1), 1); // Generate `HybridGaussianFactorGraph` by linearizing - HybridGaussianFactorGraph gfg = *nhfg.linearize(linearizationPoint); + HybridGaussianFactorGraph gfg = *hnfg.linearize(linearizationPoint); EXPECT_LONGS_EQUAL(gfg.size(), 3); @@ -149,36 +152,10 @@ TEST(HybridGaussianFactorGraph, Resize) { EXPECT_LONGS_EQUAL(gfg.size(), 0); } -/*************************************************************************** - * Test that the MixtureFactor reports correctly if the number of continuous - * keys provided do not match the keys in the factors. - */ -TEST(HybridGaussianFactorGraph, MixtureFactor) { - auto nonlinearFactor = std::make_shared>( - X(0), X(1), 0.0, Isotropic::Sigma(1, 0.1)); - auto discreteFactor = std::make_shared(); - - auto noise_model = noiseModel::Isotropic::Sigma(1, 1.0); - auto still = std::make_shared(X(0), X(1), 0.0, noise_model), - moving = std::make_shared(X(0), X(1), 1.0, noise_model); - - std::vector components = {still, moving}; - - // Check for exception when number of continuous keys are under-specified. - KeyVector contKeys = {X(0)}; - THROWS_EXCEPTION(std::make_shared( - contKeys, DiscreteKeys{gtsam::DiscreteKey(M(1), 2)}, components)); - - // Check for exception when number of continuous keys are too many. - contKeys = {X(0), X(1), X(2)}; - THROWS_EXCEPTION(std::make_shared( - contKeys, DiscreteKeys{gtsam::DiscreteKey(M(1), 2)}, components)); -} - /***************************************************************************** * Test push_back on HFG makes the correct distinction. */ -TEST(HybridFactorGraph, PushBack) { +TEST(HybridNonlinearFactorGraph, PushBack) { HybridNonlinearFactorGraph fg; auto nonlinearFactor = std::make_shared>(); @@ -195,7 +172,7 @@ TEST(HybridFactorGraph, PushBack) { fg = HybridNonlinearFactorGraph(); - auto dcFactor = std::make_shared(); + auto dcFactor = std::make_shared(); fg.push_back(dcFactor); EXPECT_LONGS_EQUAL(fg.size(), 1); @@ -228,16 +205,47 @@ TEST(HybridFactorGraph, PushBack) { factors.emplace_shared>(1, Pose2(1, 0, 0), noise); factors.emplace_shared>(2, Pose2(2, 0, 0), noise); // TODO(Varun) This does not currently work. It should work once HybridFactor - // becomes a base class of NonlinearFactor. + // becomes a base class of NoiseModelFactor. // hnfg.push_back(factors.begin(), factors.end()); // EXPECT_LONGS_EQUAL(3, hnfg.size()); } +/* ****************************************************************************/ +// Test hybrid nonlinear factor graph errorTree +TEST(HybridNonlinearFactorGraph, ErrorTree) { + Switching s(3); + + HybridNonlinearFactorGraph graph = s.nonlinearFactorGraph; + Values values = s.linearizationPoint; + + auto error_tree = graph.errorTree(s.linearizationPoint); + + auto dkeys = graph.discreteKeys(); + DiscreteKeys discrete_keys(dkeys.begin(), dkeys.end()); + + // Compute the sum of errors for each factor. + auto assignments = DiscreteValues::CartesianProduct(discrete_keys); + std::vector leaves(assignments.size()); + for (auto &&factor : graph) { + for (size_t i = 0; i < assignments.size(); ++i) { + leaves[i] += + factor->error(HybridValues(VectorValues(), assignments[i], values)); + } + } + // Swap i=1 and i=2 to give correct ordering. + double temp = leaves[1]; + leaves[1] = leaves[2]; + leaves[2] = temp; + AlgebraicDecisionTree expected_error(discrete_keys, leaves); + + EXPECT(assert_equal(expected_error, error_tree, 1e-7)); +} + /**************************************************************************** * Test construction of switching-like hybrid factor graph. */ -TEST(HybridFactorGraph, Switching) { +TEST(HybridNonlinearFactorGraph, Switching) { Switching self(3); EXPECT_LONGS_EQUAL(7, self.nonlinearFactorGraph.size()); @@ -247,7 +255,7 @@ TEST(HybridFactorGraph, Switching) { /**************************************************************************** * Test linearization on a switching-like hybrid factor graph. */ -TEST(HybridFactorGraph, Linearization) { +TEST(HybridNonlinearFactorGraph, Linearization) { Switching self(3); // Linearize here: @@ -260,7 +268,7 @@ TEST(HybridFactorGraph, Linearization) { /**************************************************************************** * Test elimination tree construction */ -TEST(HybridFactorGraph, EliminationTree) { +TEST(HybridNonlinearFactorGraph, EliminationTree) { Switching self(3); // Create ordering. @@ -278,7 +286,7 @@ TEST(HybridFactorGraph, EliminationTree) { TEST(GaussianElimination, Eliminate_x0) { Switching self(3); - // Gather factors on x1, has a simple Gaussian and a mixture factor. + // Gather factors on x1, has a simple Gaussian and a hybrid factor. HybridGaussianFactorGraph factors; // Add gaussian prior factors.push_back(self.linearizedFactorGraph[0]); @@ -303,7 +311,7 @@ TEST(GaussianElimination, Eliminate_x0) { TEST(HybridsGaussianElimination, Eliminate_x1) { Switching self(3); - // Gather factors on x1, will be two mixture factors (with x0 and x2, resp.). + // Gather factors on x1, will be two hybrid factors (with x0 and x2, resp.). HybridGaussianFactorGraph factors; factors.push_back(self.linearizedFactorGraph[1]); // involves m0 factors.push_back(self.linearizedFactorGraph[2]); // involves m1 @@ -346,17 +354,18 @@ TEST(HybridGaussianElimination, EliminateHybrid_2_Variable) { // Eliminate x0 const Ordering ordering{X(0), X(1)}; - const auto [hybridConditionalMixture, factorOnModes] = + const auto [hybridConditional, factorOnModes] = EliminateHybrid(factors, ordering); - auto gaussianConditionalMixture = - dynamic_pointer_cast(hybridConditionalMixture->inner()); + auto hybridGaussianConditional = + dynamic_pointer_cast( + hybridConditional->inner()); - CHECK(gaussianConditionalMixture); + CHECK(hybridGaussianConditional); // Frontals = [x0, x1] - EXPECT_LONGS_EQUAL(2, gaussianConditionalMixture->nrFrontals()); + EXPECT_LONGS_EQUAL(2, hybridGaussianConditional->nrFrontals()); // 1 parent, which is the mode - EXPECT_LONGS_EQUAL(1, gaussianConditionalMixture->nrParents()); + EXPECT_LONGS_EQUAL(1, hybridGaussianConditional->nrParents()); // This is now a discreteFactor auto discreteFactor = dynamic_pointer_cast(factorOnModes); @@ -368,7 +377,7 @@ TEST(HybridGaussianElimination, EliminateHybrid_2_Variable) { /**************************************************************************** * Test partial elimination */ -TEST(HybridFactorGraph, Partial_Elimination) { +TEST(HybridNonlinearFactorGraph, Partial_Elimination) { Switching self(3); auto linearizedFactorGraph = self.linearizedFactorGraph; @@ -397,7 +406,39 @@ TEST(HybridFactorGraph, Partial_Elimination) { EXPECT(remainingFactorGraph->at(2)->keys() == KeyVector({M(0), M(1)})); } -TEST(HybridFactorGraph, PrintErrors) { +/* ****************************************************************************/ +TEST(HybridNonlinearFactorGraph, Error) { + Switching self(3); + HybridNonlinearFactorGraph fg = self.nonlinearFactorGraph; + + { + HybridValues values(VectorValues(), DiscreteValues{{M(0), 0}, {M(1), 0}}, + self.linearizationPoint); + // regression + EXPECT_DOUBLES_EQUAL(152.791759469, fg.error(values), 1e-9); + } + { + HybridValues values(VectorValues(), DiscreteValues{{M(0), 0}, {M(1), 1}}, + self.linearizationPoint); + // regression + EXPECT_DOUBLES_EQUAL(151.598612289, fg.error(values), 1e-9); + } + { + HybridValues values(VectorValues(), DiscreteValues{{M(0), 1}, {M(1), 0}}, + self.linearizationPoint); + // regression + EXPECT_DOUBLES_EQUAL(151.703972804, fg.error(values), 1e-9); + } + { + HybridValues values(VectorValues(), DiscreteValues{{M(0), 1}, {M(1), 1}}, + self.linearizationPoint); + // regression + EXPECT_DOUBLES_EQUAL(151.609437912, fg.error(values), 1e-9); + } +} + +/* ****************************************************************************/ +TEST(HybridNonlinearFactorGraph, PrintErrors) { Switching self(3); // Get nonlinear factor graph and add linear factors to be holistic @@ -413,13 +454,14 @@ TEST(HybridFactorGraph, PrintErrors) { // fg.print(); // std::cout << "\n\n\n" << std::endl; // fg.printErrors( - // HybridValues(hv.continuous(), DiscreteValues(), self.linearizationPoint)); + // HybridValues(hv.continuous(), DiscreteValues(), + // self.linearizationPoint)); } /**************************************************************************** * Test full elimination */ -TEST(HybridFactorGraph, Full_Elimination) { +TEST(HybridNonlinearFactorGraph, Full_Elimination) { Switching self(3); auto linearizedFactorGraph = self.linearizedFactorGraph; @@ -438,7 +480,7 @@ TEST(HybridFactorGraph, Full_Elimination) { DiscreteFactorGraph discrete_fg; // TODO(Varun) Make this a function of HybridGaussianFactorGraph? - for (auto& factor : (*remainingFactorGraph_partial)) { + for (auto &factor : (*remainingFactorGraph_partial)) { auto df = dynamic_pointer_cast(factor); assert(df); discrete_fg.push_back(df); @@ -487,7 +529,7 @@ TEST(HybridFactorGraph, Full_Elimination) { /**************************************************************************** * Test printing */ -TEST(HybridFactorGraph, Printing) { +TEST(HybridNonlinearFactorGraph, Printing) { Switching self(3); auto linearizedFactorGraph = self.linearizedFactorGraph; @@ -510,6 +552,7 @@ factor 0: b = [ -10 ] No noise model factor 1: +HybridGaussianFactor Hybrid [x0 x1; m0]{ Choice(m0) 0 Leaf : @@ -534,6 +577,7 @@ Hybrid [x0 x1; m0]{ } factor 2: +HybridGaussianFactor Hybrid [x1 x2; m1]{ Choice(m1) 0 Leaf : @@ -675,33 +719,41 @@ factor 6: P( m1 | m0 ): size: 3 conditional 0: Hybrid P( x0 | x1 m0) Discrete Keys = (m0, 2), + logNormalizationConstant: 1.38862 + Choice(m0) 0 Leaf p(x0 | x1) R = [ 10.0499 ] S[x1] = [ -0.0995037 ] d = [ -9.85087 ] + logNormalizationConstant: 1.38862 No noise model 1 Leaf p(x0 | x1) R = [ 10.0499 ] S[x1] = [ -0.0995037 ] d = [ -9.95037 ] + logNormalizationConstant: 1.38862 No noise model conditional 1: Hybrid P( x1 | x2 m0 m1) Discrete Keys = (m0, 2), (m1, 2), + logNormalizationConstant: 1.3935 + Choice(m1) 0 Choice(m0) 0 0 Leaf p(x1 | x2) R = [ 10.099 ] S[x2] = [ -0.0990196 ] d = [ -9.99901 ] + logNormalizationConstant: 1.3935 No noise model 0 1 Leaf p(x1 | x2) R = [ 10.099 ] S[x2] = [ -0.0990196 ] d = [ -9.90098 ] + logNormalizationConstant: 1.3935 No noise model 1 Choice(m0) @@ -709,16 +761,20 @@ conditional 1: Hybrid P( x1 | x2 m0 m1) R = [ 10.099 ] S[x2] = [ -0.0990196 ] d = [ -10.098 ] + logNormalizationConstant: 1.3935 No noise model 1 1 Leaf p(x1 | x2) R = [ 10.099 ] S[x2] = [ -0.0990196 ] d = [ -10 ] + logNormalizationConstant: 1.3935 No noise model conditional 2: Hybrid P( x2 | m0 m1) Discrete Keys = (m0, 2), (m1, 2), + logNormalizationConstant: 1.38857 + Choice(m1) 0 Choice(m0) 0 0 Leaf p(x2) @@ -726,6 +782,7 @@ conditional 2: Hybrid P( x2 | m0 m1) d = [ -10.1489 ] mean: 1 elements x2: -1.0099 + logNormalizationConstant: 1.38857 No noise model 0 1 Leaf p(x2) @@ -733,6 +790,7 @@ conditional 2: Hybrid P( x2 | m0 m1) d = [ -10.1479 ] mean: 1 elements x2: -1.0098 + logNormalizationConstant: 1.38857 No noise model 1 Choice(m0) @@ -741,6 +799,7 @@ conditional 2: Hybrid P( x2 | m0 m1) d = [ -10.0504 ] mean: 1 elements x2: -1.0001 + logNormalizationConstant: 1.38857 No noise model 1 1 Leaf p(x2) @@ -748,6 +807,7 @@ conditional 2: Hybrid P( x2 | m0 m1) d = [ -10.0494 ] mean: 1 elements x2: -1 + logNormalizationConstant: 1.38857 No noise model )"; @@ -761,7 +821,7 @@ conditional 2: Hybrid P( x2 | m0 m1) * The issue arises if we eliminate a landmark variable first since it is not * connected to a HybridFactor. */ -TEST(HybridFactorGraph, DefaultDecisionTree) { +TEST(HybridNonlinearFactorGraph, DefaultDecisionTree) { HybridNonlinearFactorGraph fg; // Add a prior on pose x0 at the origin. @@ -775,15 +835,12 @@ TEST(HybridFactorGraph, DefaultDecisionTree) { // Add odometry factor Pose2 odometry(2.0, 0.0, 0.0); - KeyVector contKeys = {X(0), X(1)}; auto noise_model = noiseModel::Isotropic::Sigma(3, 1.0); - auto still = std::make_shared(X(0), X(1), Pose2(0, 0, 0), - noise_model), - moving = std::make_shared(X(0), X(1), odometry, - noise_model); - std::vector motion_models = {still, moving}; - fg.emplace_shared( - contKeys, DiscreteKeys{gtsam::DiscreteKey(M(1), 2)}, motion_models); + std::vector motion_models = { + std::make_shared(X(0), X(1), Pose2(0, 0, 0), + noise_model), + std::make_shared(X(0), X(1), odometry, noise_model)}; + fg.emplace_shared(DiscreteKey{M(1), 2}, motion_models); // Add Range-Bearing measurements to from X0 to L0 and X1 to L1. // create a noise model for the landmark measurements @@ -818,9 +875,223 @@ TEST(HybridFactorGraph, DefaultDecisionTree) { EXPECT_LONGS_EQUAL(1, remainingFactorGraph->size()); } +namespace test_relinearization { +/** + * @brief Create a Factor Graph by directly specifying all + * the factors instead of creating conditionals first. + * This way we can directly provide the likelihoods and + * then perform (re-)linearization. + * + * @param means The means of the HybridGaussianFactor components. + * @param sigmas The covariances of the HybridGaussianFactor components. + * @param m1 The discrete key. + * @param x0_measurement A measurement on X0 + * @return HybridGaussianFactorGraph + */ +static HybridNonlinearFactorGraph CreateFactorGraph( + const std::vector &means, const std::vector &sigmas, + DiscreteKey &m1, double x0_measurement, double measurement_noise = 1e-3) { + auto model0 = noiseModel::Isotropic::Sigma(1, sigmas[0]); + auto model1 = noiseModel::Isotropic::Sigma(1, sigmas[1]); + auto prior_noise = noiseModel::Isotropic::Sigma(1, measurement_noise); + + auto f0 = + std::make_shared>(X(0), X(1), means[0], model0); + auto f1 = + std::make_shared>(X(0), X(1), means[1], model1); + + // Create HybridNonlinearFactor + // We take negative since we want + // the underlying scalar to be log(\sqrt(|2πΣ|)) + std::vector factors{{f0, 0.0}, {f1, 0.0}}; + + HybridNonlinearFactor mixtureFactor(m1, factors); + + HybridNonlinearFactorGraph hfg; + hfg.push_back(mixtureFactor); + + hfg.push_back(PriorFactor(X(0), x0_measurement, prior_noise)); + + return hfg; +} +} // namespace test_relinearization + /* ************************************************************************* */ +/** + * @brief Test components with differing means but the same covariances. + * The factor graph is + * *-X1-*-X2 + * | + * M1 + */ +TEST(HybridNonlinearFactorGraph, DifferentMeans) { + using namespace test_relinearization; + + DiscreteKey m1(M(1), 2); + + Values values; + double x0 = 0.0, x1 = 1.75; + values.insert(X(0), x0); + values.insert(X(1), x1); + + std::vector means = {0.0, 2.0}, sigmas = {1e-0, 1e-0}; + + HybridNonlinearFactorGraph hfg = CreateFactorGraph(means, sigmas, m1, x0); + + { + auto bn = hfg.linearize(values)->eliminateSequential(); + HybridValues actual = bn->optimize(); + + HybridValues expected( + VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(-1.75)}}, + DiscreteValues{{M(1), 0}}); + + EXPECT(assert_equal(expected, actual)); + + DiscreteValues dv0{{M(1), 0}}; + VectorValues cont0 = bn->optimize(dv0); + double error0 = bn->error(HybridValues(cont0, dv0)); + + // TODO(Varun) Perform importance sampling to estimate error? + + // regression + EXPECT_DOUBLES_EQUAL(0.69314718056, error0, 1e-9); + + DiscreteValues dv1{{M(1), 1}}; + VectorValues cont1 = bn->optimize(dv1); + double error1 = bn->error(HybridValues(cont1, dv1)); + EXPECT_DOUBLES_EQUAL(error0, error1, 1e-9); + } + + { + // Add measurement on x1 + auto prior_noise = noiseModel::Isotropic::Sigma(1, 1e-3); + hfg.push_back(PriorFactor(X(1), means[1], prior_noise)); + + auto bn = hfg.linearize(values)->eliminateSequential(); + HybridValues actual = bn->optimize(); + + HybridValues expected( + VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(0.25)}}, + DiscreteValues{{M(1), 1}}); + + EXPECT(assert_equal(expected, actual)); + + { + DiscreteValues dv{{M(1), 0}}; + VectorValues cont = bn->optimize(dv); + double error = bn->error(HybridValues(cont, dv)); + // regression + EXPECT_DOUBLES_EQUAL(2.12692448787, error, 1e-9); + } + { + DiscreteValues dv{{M(1), 1}}; + VectorValues cont = bn->optimize(dv); + double error = bn->error(HybridValues(cont, dv)); + // regression + EXPECT_DOUBLES_EQUAL(0.126928487854, error, 1e-9); + } + } +} + +/* ************************************************************************* */ +/** + * @brief Test components with differing covariances but the same means. + * The factor graph is + * *-X1-*-X2 + * | + * M1 + */ +TEST(HybridNonlinearFactorGraph, DifferentCovariances) { + using namespace test_relinearization; + + DiscreteKey m1(M(1), 2); + + Values values; + double x0 = 1.0, x1 = 1.0; + values.insert(X(0), x0); + values.insert(X(1), x1); + + std::vector means = {0.0, 0.0}, sigmas = {1e2, 1e-2}; + + // Create FG with HybridNonlinearFactor and prior on X1 + HybridNonlinearFactorGraph hfg = CreateFactorGraph(means, sigmas, m1, x0); + // Linearize + auto hgfg = hfg.linearize(values); + // and eliminate + auto hbn = hgfg->eliminateSequential(); + + VectorValues cv; + cv.insert(X(0), Vector1(0.0)); + cv.insert(X(1), Vector1(0.0)); + + // Check that the error values at the MLE point μ. + AlgebraicDecisionTree errorTree = hbn->errorTree(cv); + + DiscreteValues dv0{{M(1), 0}}; + DiscreteValues dv1{{M(1), 1}}; + + // regression + EXPECT_DOUBLES_EQUAL(9.90348755254, errorTree(dv0), 1e-9); + EXPECT_DOUBLES_EQUAL(0.69314718056, errorTree(dv1), 1e-9); + + DiscreteConditional expected_m1(m1, "0.5/0.5"); + DiscreteConditional actual_m1 = *(hbn->at(2)->asDiscrete()); + + EXPECT(assert_equal(expected_m1, actual_m1)); +} + +TEST(HybridNonlinearFactorGraph, Relinearization) { + using namespace test_relinearization; + + DiscreteKey m1(M(1), 2); + + Values values; + double x0 = 0.0, x1 = 0.8; + values.insert(X(0), x0); + values.insert(X(1), x1); + + std::vector means = {0.0, 1.0}, sigmas = {1e-2, 1e-2}; + + double prior_sigma = 1e-2; + // Create FG with HybridNonlinearFactor and prior on X1 + HybridNonlinearFactorGraph hfg = + CreateFactorGraph(means, sigmas, m1, 0.0, prior_sigma); + hfg.push_back(PriorFactor( + X(1), 1.2, noiseModel::Isotropic::Sigma(1, prior_sigma))); + + // Linearize + auto hgfg = hfg.linearize(values); + // and eliminate + auto hbn = hgfg->eliminateSequential(); + + HybridValues delta = hbn->optimize(); + values = values.retract(delta.continuous()); + + Values expected_first_result; + expected_first_result.insert(X(0), 0.0666666666667); + expected_first_result.insert(X(1), 1.13333333333); + EXPECT(assert_equal(expected_first_result, values)); + + // Re-linearize + hgfg = hfg.linearize(values); + // and eliminate + hbn = hgfg->eliminateSequential(); + delta = hbn->optimize(); + HybridValues result(delta.continuous(), delta.discrete(), + values.retract(delta.continuous())); + + HybridValues expected_result( + VectorValues{{X(0), Vector1(0)}, {X(1), Vector1(0)}}, + DiscreteValues{{M(1), 1}}, expected_first_result); + EXPECT(assert_equal(expected_result, result)); +} + +/* ************************************************************************* + */ int main() { TestResult tr; return TestRegistry::runAllTests(tr); } -/* ************************************************************************* */ +/* ************************************************************************* + */ diff --git a/gtsam/hybrid/tests/testHybridNonlinearISAM.cpp b/gtsam/hybrid/tests/testHybridNonlinearISAM.cpp index 2fb6fd8ba..b804a176b 100644 --- a/gtsam/hybrid/tests/testHybridNonlinearISAM.cpp +++ b/gtsam/hybrid/tests/testHybridNonlinearISAM.cpp @@ -143,7 +143,7 @@ TEST(HybridNonlinearISAM, IncrementalInference) { /********************************************************/ // Run batch elimination so we can compare results. - const Ordering ordering {X(0), X(1), X(2)}; + const Ordering ordering{X(0), X(1), X(2)}; // Now we calculate the actual factors using full elimination const auto [expectedHybridBayesTree, expectedRemainingGraph] = @@ -151,24 +151,27 @@ TEST(HybridNonlinearISAM, IncrementalInference) { .BaseEliminateable::eliminatePartialMultifrontal(ordering); // The densities on X(1) should be the same - auto x0_conditional = dynamic_pointer_cast( + auto x0_conditional = dynamic_pointer_cast( bayesTree[X(0)]->conditional()->inner()); - auto expected_x0_conditional = dynamic_pointer_cast( - (*expectedHybridBayesTree)[X(0)]->conditional()->inner()); + auto expected_x0_conditional = + dynamic_pointer_cast( + (*expectedHybridBayesTree)[X(0)]->conditional()->inner()); EXPECT(assert_equal(*x0_conditional, *expected_x0_conditional)); // The densities on X(1) should be the same - auto x1_conditional = dynamic_pointer_cast( + auto x1_conditional = dynamic_pointer_cast( bayesTree[X(1)]->conditional()->inner()); - auto expected_x1_conditional = dynamic_pointer_cast( - (*expectedHybridBayesTree)[X(1)]->conditional()->inner()); + auto expected_x1_conditional = + dynamic_pointer_cast( + (*expectedHybridBayesTree)[X(1)]->conditional()->inner()); EXPECT(assert_equal(*x1_conditional, *expected_x1_conditional)); // The densities on X(2) should be the same - auto x2_conditional = dynamic_pointer_cast( + auto x2_conditional = dynamic_pointer_cast( bayesTree[X(2)]->conditional()->inner()); - auto expected_x2_conditional = dynamic_pointer_cast( - (*expectedHybridBayesTree)[X(2)]->conditional()->inner()); + auto expected_x2_conditional = + dynamic_pointer_cast( + (*expectedHybridBayesTree)[X(2)]->conditional()->inner()); EXPECT(assert_equal(*x2_conditional, *expected_x2_conditional)); // We only perform manual continuous elimination for 0,0. @@ -300,9 +303,9 @@ TEST(HybridNonlinearISAM, Approx_inference) { // Check that the hybrid nodes of the bayes net match those of the pre-pruning // bayes net, at the same positions. - auto &unprunedLastDensity = *dynamic_pointer_cast( + auto &unprunedLastDensity = *dynamic_pointer_cast( unprunedHybridBayesTree->clique(X(3))->conditional()->inner()); - auto &lastDensity = *dynamic_pointer_cast( + auto &lastDensity = *dynamic_pointer_cast( bayesTree[X(3)]->conditional()->inner()); std::vector> assignments = @@ -355,13 +358,13 @@ TEST(HybridNonlinearISAM, Incremental_approximate) { // each with 2, 4, 8, and 5 (pruned) leaves respetively. EXPECT_LONGS_EQUAL(4, bayesTree.size()); EXPECT_LONGS_EQUAL( - 2, bayesTree[X(0)]->conditional()->asMixture()->nrComponents()); + 2, bayesTree[X(0)]->conditional()->asHybrid()->nrComponents()); EXPECT_LONGS_EQUAL( - 3, bayesTree[X(1)]->conditional()->asMixture()->nrComponents()); + 3, bayesTree[X(1)]->conditional()->asHybrid()->nrComponents()); EXPECT_LONGS_EQUAL( - 5, bayesTree[X(2)]->conditional()->asMixture()->nrComponents()); + 5, bayesTree[X(2)]->conditional()->asHybrid()->nrComponents()); EXPECT_LONGS_EQUAL( - 5, bayesTree[X(3)]->conditional()->asMixture()->nrComponents()); + 5, bayesTree[X(3)]->conditional()->asHybrid()->nrComponents()); /***** Run Round 2 *****/ HybridGaussianFactorGraph graph2; @@ -379,9 +382,9 @@ TEST(HybridNonlinearISAM, Incremental_approximate) { // with 5 (pruned) leaves. CHECK_EQUAL(5, bayesTree.size()); EXPECT_LONGS_EQUAL( - 5, bayesTree[X(3)]->conditional()->asMixture()->nrComponents()); + 5, bayesTree[X(3)]->conditional()->asHybrid()->nrComponents()); EXPECT_LONGS_EQUAL( - 5, bayesTree[X(4)]->conditional()->asMixture()->nrComponents()); + 5, bayesTree[X(4)]->conditional()->asHybrid()->nrComponents()); } /* ************************************************************************/ @@ -410,11 +413,11 @@ TEST(HybridNonlinearISAM, NonTrivial) { // Add connecting poses similar to PoseFactors in GTD fg.emplace_shared>(X(0), Y(0), Pose2(0, 1.0, 0), - poseNoise); + poseNoise); fg.emplace_shared>(Y(0), Z(0), Pose2(0, 1.0, 0), - poseNoise); + poseNoise); fg.emplace_shared>(Z(0), W(0), Pose2(0, 1.0, 0), - poseNoise); + poseNoise); // Create initial estimate Values initial; @@ -430,27 +433,24 @@ TEST(HybridNonlinearISAM, NonTrivial) { /*************** Run Round 2 ***************/ // Add odometry factor with discrete modes. Pose2 odometry(1.0, 0.0, 0.0); - KeyVector contKeys = {W(0), W(1)}; auto noise_model = noiseModel::Isotropic::Sigma(3, 1.0); auto still = std::make_shared(W(0), W(1), Pose2(0, 0, 0), - noise_model), + noise_model), moving = std::make_shared(W(0), W(1), odometry, - noise_model); - std::vector components = {moving, still}; - auto mixtureFactor = std::make_shared( - contKeys, DiscreteKeys{gtsam::DiscreteKey(M(1), 2)}, components); - fg.push_back(mixtureFactor); + noise_model); + std::vector components{moving, still}; + fg.emplace_shared(DiscreteKey(M(1), 2), components); // Add equivalent of ImuFactor fg.emplace_shared>(X(0), X(1), Pose2(1.0, 0.0, 0), - poseNoise); + poseNoise); // PoseFactors-like at k=1 fg.emplace_shared>(X(1), Y(1), Pose2(0, 1, 0), - poseNoise); + poseNoise); fg.emplace_shared>(Y(1), Z(1), Pose2(0, 1, 0), - poseNoise); + poseNoise); fg.emplace_shared>(Z(1), W(1), Pose2(-1, 1, 0), - poseNoise); + poseNoise); initial.insert(X(1), Pose2(1.0, 0.0, 0.0)); initial.insert(Y(1), Pose2(1.0, 1.0, 0.0)); @@ -471,26 +471,23 @@ TEST(HybridNonlinearISAM, NonTrivial) { /*************** Run Round 3 ***************/ // Add odometry factor with discrete modes. - contKeys = {W(1), W(2)}; still = std::make_shared(W(1), W(2), Pose2(0, 0, 0), - noise_model); + noise_model); moving = std::make_shared(W(1), W(2), odometry, noise_model); components = {moving, still}; - mixtureFactor = std::make_shared( - contKeys, DiscreteKeys{gtsam::DiscreteKey(M(2), 2)}, components); - fg.push_back(mixtureFactor); + fg.emplace_shared(DiscreteKey(M(2), 2), components); // Add equivalent of ImuFactor fg.emplace_shared>(X(1), X(2), Pose2(1.0, 0.0, 0), - poseNoise); + poseNoise); // PoseFactors-like at k=1 fg.emplace_shared>(X(2), Y(2), Pose2(0, 1, 0), - poseNoise); + poseNoise); fg.emplace_shared>(Y(2), Z(2), Pose2(0, 1, 0), - poseNoise); + poseNoise); fg.emplace_shared>(Z(2), W(2), Pose2(-2, 1, 0), - poseNoise); + poseNoise); initial.insert(X(2), Pose2(2.0, 0.0, 0.0)); initial.insert(Y(2), Pose2(2.0, 1.0, 0.0)); @@ -514,26 +511,23 @@ TEST(HybridNonlinearISAM, NonTrivial) { /*************** Run Round 4 ***************/ // Add odometry factor with discrete modes. - contKeys = {W(2), W(3)}; still = std::make_shared(W(2), W(3), Pose2(0, 0, 0), - noise_model); + noise_model); moving = std::make_shared(W(2), W(3), odometry, noise_model); components = {moving, still}; - mixtureFactor = std::make_shared( - contKeys, DiscreteKeys{gtsam::DiscreteKey(M(3), 2)}, components); - fg.push_back(mixtureFactor); + fg.emplace_shared(DiscreteKey(M(3), 2), components); // Add equivalent of ImuFactor fg.emplace_shared>(X(2), X(3), Pose2(1.0, 0.0, 0), - poseNoise); + poseNoise); // PoseFactors-like at k=3 fg.emplace_shared>(X(3), Y(3), Pose2(0, 1, 0), - poseNoise); + poseNoise); fg.emplace_shared>(Y(3), Z(3), Pose2(0, 1, 0), - poseNoise); + poseNoise); fg.emplace_shared>(Z(3), W(3), Pose2(-3, 1, 0), - poseNoise); + poseNoise); initial.insert(X(3), Pose2(3.0, 0.0, 0.0)); initial.insert(Y(3), Pose2(3.0, 1.0, 0.0)); @@ -568,7 +562,7 @@ TEST(HybridNonlinearISAM, NonTrivial) { // Test if pruning worked correctly by checking that // we only have 3 leaves in the last node. - auto lastConditional = bayesTree[X(3)]->conditional()->asMixture(); + auto lastConditional = bayesTree[X(3)]->conditional()->asHybrid(); EXPECT_LONGS_EQUAL(3, lastConditional->nrComponents()); } diff --git a/gtsam/hybrid/tests/testSerializationHybrid.cpp b/gtsam/hybrid/tests/testSerializationHybrid.cpp index 961618626..e09669117 100644 --- a/gtsam/hybrid/tests/testSerializationHybrid.cpp +++ b/gtsam/hybrid/tests/testSerializationHybrid.cpp @@ -18,11 +18,11 @@ #include #include -#include -#include #include #include #include +#include +#include #include #include @@ -51,31 +51,31 @@ BOOST_CLASS_EXPORT_GUID(ADT, "gtsam_AlgebraicDecisionTree"); BOOST_CLASS_EXPORT_GUID(ADT::Leaf, "gtsam_AlgebraicDecisionTree_Leaf"); BOOST_CLASS_EXPORT_GUID(ADT::Choice, "gtsam_AlgebraicDecisionTree_Choice") -BOOST_CLASS_EXPORT_GUID(GaussianMixtureFactor, "gtsam_GaussianMixtureFactor"); -BOOST_CLASS_EXPORT_GUID(GaussianMixtureFactor::Factors, - "gtsam_GaussianMixtureFactor_Factors"); -BOOST_CLASS_EXPORT_GUID(GaussianMixtureFactor::Factors::Leaf, - "gtsam_GaussianMixtureFactor_Factors_Leaf"); -BOOST_CLASS_EXPORT_GUID(GaussianMixtureFactor::Factors::Choice, - "gtsam_GaussianMixtureFactor_Factors_Choice"); +BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor, "gtsam_HybridGaussianFactor"); +BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor::Factors, + "gtsam_HybridGaussianFactor_Factors"); +BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor::Factors::Leaf, + "gtsam_HybridGaussianFactor_Factors_Leaf"); +BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor::Factors::Choice, + "gtsam_HybridGaussianFactor_Factors_Choice"); -BOOST_CLASS_EXPORT_GUID(GaussianMixture, "gtsam_GaussianMixture"); -BOOST_CLASS_EXPORT_GUID(GaussianMixture::Conditionals, - "gtsam_GaussianMixture_Conditionals"); -BOOST_CLASS_EXPORT_GUID(GaussianMixture::Conditionals::Leaf, - "gtsam_GaussianMixture_Conditionals_Leaf"); -BOOST_CLASS_EXPORT_GUID(GaussianMixture::Conditionals::Choice, - "gtsam_GaussianMixture_Conditionals_Choice"); +BOOST_CLASS_EXPORT_GUID(HybridGaussianConditional, + "gtsam_HybridGaussianConditional"); +BOOST_CLASS_EXPORT_GUID(HybridGaussianConditional::Conditionals, + "gtsam_HybridGaussianConditional_Conditionals"); +BOOST_CLASS_EXPORT_GUID(HybridGaussianConditional::Conditionals::Leaf, + "gtsam_HybridGaussianConditional_Conditionals_Leaf"); +BOOST_CLASS_EXPORT_GUID(HybridGaussianConditional::Conditionals::Choice, + "gtsam_HybridGaussianConditional_Conditionals_Choice"); // Needed since GaussianConditional::FromMeanAndStddev uses it BOOST_CLASS_EXPORT_GUID(noiseModel::Isotropic, "gtsam_noiseModel_Isotropic"); BOOST_CLASS_EXPORT_GUID(HybridBayesNet, "gtsam_HybridBayesNet"); /* ****************************************************************************/ -// Test GaussianMixtureFactor serialization. -TEST(HybridSerialization, GaussianMixtureFactor) { - KeyVector continuousKeys{X(0)}; - DiscreteKeys discreteKeys{{M(0), 2}}; +// Test HybridGaussianFactor serialization. +TEST(HybridSerialization, HybridGaussianFactor) { + DiscreteKey discreteKey{M(0), 2}; auto A = Matrix::Zero(2, 1); auto b0 = Matrix::Zero(2, 1); @@ -84,11 +84,11 @@ TEST(HybridSerialization, GaussianMixtureFactor) { auto f1 = std::make_shared(X(0), A, b1); std::vector factors{f0, f1}; - const GaussianMixtureFactor factor(continuousKeys, discreteKeys, factors); + const HybridGaussianFactor factor(discreteKey, factors); - EXPECT(equalsObj(factor)); - EXPECT(equalsXML(factor)); - EXPECT(equalsBinary(factor)); + EXPECT(equalsObj(factor)); + EXPECT(equalsXML(factor)); + EXPECT(equalsBinary(factor)); } /* ****************************************************************************/ @@ -106,20 +106,19 @@ TEST(HybridSerialization, HybridConditional) { } /* ****************************************************************************/ -// Test GaussianMixture serialization. -TEST(HybridSerialization, GaussianMixture) { +// Test HybridGaussianConditional serialization. +TEST(HybridSerialization, HybridGaussianConditional) { const DiscreteKey mode(M(0), 2); Matrix1 I = Matrix1::Identity(); const auto conditional0 = std::make_shared( GaussianConditional::FromMeanAndStddev(Z(0), I, X(0), Vector1(0), 0.5)); const auto conditional1 = std::make_shared( GaussianConditional::FromMeanAndStddev(Z(0), I, X(0), Vector1(0), 3)); - const GaussianMixture gm({Z(0)}, {X(0)}, {mode}, - {conditional0, conditional1}); + const HybridGaussianConditional gm(mode, {conditional0, conditional1}); - EXPECT(equalsObj(gm)); - EXPECT(equalsXML(gm)); - EXPECT(equalsBinary(gm)); + EXPECT(equalsObj(gm)); + EXPECT(equalsXML(gm)); + EXPECT(equalsBinary(gm)); } /* ****************************************************************************/ diff --git a/gtsam/inference/Conditional-inst.h b/gtsam/inference/Conditional-inst.h index 9377e8cc4..c21a75d26 100644 --- a/gtsam/inference/Conditional-inst.h +++ b/gtsam/inference/Conditional-inst.h @@ -59,16 +59,8 @@ double Conditional::evaluate( /* ************************************************************************* */ template -double Conditional::logNormalizationConstant() - const { - throw std::runtime_error( - "Conditional::logNormalizationConstant is not implemented"); -} - -/* ************************************************************************* */ -template -double Conditional::normalizationConstant() const { - return std::exp(logNormalizationConstant()); +double Conditional::negLogConstant() const { + throw std::runtime_error("Conditional::negLogConstant is not implemented"); } /* ************************************************************************* */ @@ -83,13 +75,9 @@ bool Conditional::CheckInvariants( const double logProb = conditional.logProbability(values); if (std::abs(prob_or_density - std::exp(logProb)) > 1e-9) return false; // logProb is not consistent with prob_or_density - if (std::abs(conditional.logNormalizationConstant() - - std::log(conditional.normalizationConstant())) > 1e-9) - return false; // log normalization constant is not consistent with - // normalization constant const double error = conditional.error(values); if (error < 0.0) return false; // prob_or_density is negative. - const double expected = conditional.logNormalizationConstant() - error; + const double expected = -(conditional.negLogConstant() + error); if (std::abs(logProb - expected) > 1e-9) return false; // logProb is not consistent with error return true; diff --git a/gtsam/inference/Conditional.h b/gtsam/inference/Conditional.h index 0ab8b49a4..f9da36b7b 100644 --- a/gtsam/inference/Conditional.h +++ b/gtsam/inference/Conditional.h @@ -34,11 +34,13 @@ namespace gtsam { * `logProbability` is the main methods that need to be implemented in derived * classes. These two methods relate to the `error` method in the factor by: * probability(x) = k exp(-error(x)) - * where k is a normalization constant making \int probability(x) == 1.0, and - * logProbability(x) = K - error(x) - * i.e., K = log(K). The normalization constant K is assumed to *not* depend + * where k is a normalization constant making + * \int probability(x) = \int k exp(-error(x)) == 1.0, and + * logProbability(x) = -(K + error(x)) + * i.e., K = -log(k). The normalization constant k is assumed to *not* depend * on any argument, only (possibly) on the conditional parameters. - * This class provides a default logNormalizationConstant() == 0.0. + * This class provides a default negative log normalization constant + * negLogConstant() == 0.0. * * There are four broad classes of conditionals that derive from Conditional: * @@ -46,7 +48,7 @@ namespace gtsam { * Gaussian density over a set of continuous variables. * - \b Discrete conditionals, implemented in \class DiscreteConditional, which * represent a discrete conditional distribution over discrete variables. - * - \b Hybrid conditional densities, such as \class GaussianMixture, which is + * - \b Hybrid conditional densities, such as \class HybridGaussianConditional, which is * a density over continuous variables given discrete/continuous parents. * - \b Symbolic factors, used to represent a graph structure, implemented in * \class SymbolicConditional. Only used for symbolic elimination etc. @@ -163,13 +165,12 @@ namespace gtsam { } /** - * All conditional types need to implement a log normalization constant to - * make it such that error>=0. + * @brief All conditional types need to implement this as the negative log + * of the normalization constant to make it such that error>=0. + * + * @return double */ - virtual double logNormalizationConstant() const; - - /** Non-virtual, exponentiate logNormalizationConstant. */ - double normalizationConstant() const; + virtual double negLogConstant() const; /// @} /// @name Advanced Interface @@ -208,9 +209,9 @@ namespace gtsam { * - evaluate >= 0.0 * - evaluate(x) == conditional(x) * - exp(logProbability(x)) == evaluate(x) - * - logNormalizationConstant() = log(normalizationConstant()) + * - negLogConstant() = -log(normalizationConstant()) * - error >= 0.0 - * - logProbability(x) == logNormalizationConstant() - error(x) + * - logProbability(x) == -(negLogConstant() + error(x)) * * @param conditional The conditional to test, as a reference to the derived type. * @tparam VALUES HybridValues, or a more narrow type like DiscreteValues. diff --git a/gtsam/inference/Factor.h b/gtsam/inference/Factor.h index e357a9c88..074151e16 100644 --- a/gtsam/inference/Factor.h +++ b/gtsam/inference/Factor.h @@ -24,12 +24,13 @@ #ifdef GTSAM_ENABLE_BOOST_SERIALIZATION #include #endif -#include - -#include #include +#include #include +#include +#include + namespace gtsam { /// Define collection types: diff --git a/gtsam/linear/GaussianBayesNet.cpp b/gtsam/linear/GaussianBayesNet.cpp index b04878ac5..f8c4ddc4c 100644 --- a/gtsam/linear/GaussianBayesNet.cpp +++ b/gtsam/linear/GaussianBayesNet.cpp @@ -243,5 +243,26 @@ namespace gtsam { } /* ************************************************************************* */ + double GaussianBayesNet::negLogConstant() const { + /* + normalization constant = 1.0 / sqrt((2*pi)^n*det(Sigma)) + negLogConstant = -log(normalizationConstant) + = 0.5 * n*log(2*pi) + 0.5 * log(det(Sigma)) + + log(det(Sigma)) = -2.0 * logDeterminant() + thus, negLogConstant = 0.5*n*log(2*pi) - logDeterminant() + + BayesNet negLogConstant = sum(0.5*n_i*log(2*pi) - logDeterminant_i()) + = sum(0.5*n_i*log(2*pi)) + sum(logDeterminant_i()) + = sum(0.5*n_i*log(2*pi)) + bn->logDeterminant() + */ + double negLogNormConst = 0.0; + for (const sharedConditional& cg : *this) { + negLogNormConst += cg->negLogConstant(); + } + return negLogNormConst; + } + + /* ************************************************************************* */ } // namespace gtsam diff --git a/gtsam/linear/GaussianBayesNet.h b/gtsam/linear/GaussianBayesNet.h index 19781d1e6..e858bbe43 100644 --- a/gtsam/linear/GaussianBayesNet.h +++ b/gtsam/linear/GaussianBayesNet.h @@ -82,6 +82,12 @@ namespace gtsam { /** Check equality */ bool equals(const This& bn, double tol = 1e-9) const; + /// Check exact equality. + friend bool operator==(const GaussianBayesNet& lhs, + const GaussianBayesNet& rhs) { + return lhs.isEqual(rhs); + } + /// print graph void print( const std::string& s = "", @@ -228,6 +234,14 @@ namespace gtsam { * @return The determinant */ double logDeterminant() const; + /** + * @brief Get the negative log of the normalization constant corresponding + * to the joint Gaussian density represented by this Bayes net. + * + * @return double + */ + double negLogConstant() const; + /** * Backsubstitute with a different RHS vector than the one stored in this BayesNet. * gy=inv(R*inv(Sigma))*gx diff --git a/gtsam/linear/GaussianConditional.cpp b/gtsam/linear/GaussianConditional.cpp index 0112835aa..7735e5129 100644 --- a/gtsam/linear/GaussianConditional.cpp +++ b/gtsam/linear/GaussianConditional.cpp @@ -121,6 +121,7 @@ namespace gtsam { const auto mean = solve({}); // solve for mean. mean.print(" mean", formatter); } + cout << " logNormalizationConstant: " << -negLogConstant() << endl; if (model_) model_->print(" Noise model: "); else @@ -180,19 +181,24 @@ namespace gtsam { /* ************************************************************************* */ // normalization constant = 1.0 / sqrt((2*pi)^n*det(Sigma)) - // log = - 0.5 * n*log(2*pi) - 0.5 * log det(Sigma) - double GaussianConditional::logNormalizationConstant() const { + // neg-log = 0.5 * n*log(2*pi) + 0.5 * log det(Sigma) + double GaussianConditional::negLogConstant() const { constexpr double log2pi = 1.8378770664093454835606594728112; size_t n = d().size(); - // log det(Sigma)) = - 2.0 * logDeterminant() - return - 0.5 * n * log2pi + logDeterminant(); + // Sigma = (R'R)^{-1}, det(Sigma) = det((R'R)^{-1}) = det(R'R)^{-1} + // log det(Sigma) = -log(det(R'R)) = -2*log(det(R)) + // Hence, log det(Sigma)) = -2.0 * logDeterminant() + // which gives neg-log = 0.5*n*log(2*pi) + 0.5*(-2.0 * logDeterminant()) + // = 0.5*n*log(2*pi) - (0.5*2.0 * logDeterminant()) + // = 0.5*n*log(2*pi) - logDeterminant() + return 0.5 * n * log2pi - logDeterminant(); } /* ************************************************************************* */ // density = k exp(-error(x)) // log = log(k) - error(x) double GaussianConditional::logProbability(const VectorValues& x) const { - return logNormalizationConstant() - error(x); + return -(negLogConstant() + error(x)); } double GaussianConditional::logProbability(const HybridValues& x) const { diff --git a/gtsam/linear/GaussianConditional.h b/gtsam/linear/GaussianConditional.h index 420efabca..27270fece 100644 --- a/gtsam/linear/GaussianConditional.h +++ b/gtsam/linear/GaussianConditional.h @@ -133,10 +133,14 @@ namespace gtsam { /// @{ /** - * normalization constant = 1.0 / sqrt((2*pi)^n*det(Sigma)) - * log = - 0.5 * n*log(2*pi) - 0.5 * log det(Sigma) + * @brief Return the negative log of the normalization constant. + * + * normalization constant k = 1.0 / sqrt((2*pi)^n*det(Sigma)) + * -log(k) = 0.5 * n*log(2*pi) + 0.5 * log det(Sigma) + * + * @return double */ - double logNormalizationConstant() const override; + double negLogConstant() const override; /** * Calculate log-probability log(evaluate(x)) for given values `x`: diff --git a/gtsam/linear/IterativeSolver.cpp b/gtsam/linear/IterativeSolver.cpp index c5a25c4b8..e9b0a5f13 100644 --- a/gtsam/linear/IterativeSolver.cpp +++ b/gtsam/linear/IterativeSolver.cpp @@ -46,6 +46,12 @@ void IterativeOptimizationParameters::print(ostream &os) const { << verbosityTranslator(verbosity_) << endl; } +/*****************************************************************************/ +bool IterativeOptimizationParameters::equals( + const IterativeOptimizationParameters &other, double tol) const { + return verbosity_ == other.verbosity(); +} + /*****************************************************************************/ ostream& operator<<(ostream &os, const IterativeOptimizationParameters &p) { p.print(os); diff --git a/gtsam/linear/IterativeSolver.h b/gtsam/linear/IterativeSolver.h index 0441cd9da..1a66708d4 100644 --- a/gtsam/linear/IterativeSolver.h +++ b/gtsam/linear/IterativeSolver.h @@ -41,15 +41,14 @@ class VectorValues; * parameters for iterative linear solvers */ class IterativeOptimizationParameters { - -public: - + public: typedef std::shared_ptr shared_ptr; - enum Verbosity { - SILENT = 0, COMPLEXITY, ERROR - } verbosity_; + enum Verbosity { SILENT = 0, COMPLEXITY, ERROR }; -public: + protected: + Verbosity verbosity_; + + public: IterativeOptimizationParameters(Verbosity v = SILENT) : verbosity_(v) { @@ -71,6 +70,9 @@ public: /* virtual print function */ GTSAM_EXPORT virtual void print(std::ostream &os) const; + GTSAM_EXPORT virtual bool equals(const IterativeOptimizationParameters &other, + double tol = 1e-9) const; + /* for serialization */ GTSAM_EXPORT friend std::ostream &operator<<( std::ostream &os, const IterativeOptimizationParameters &p); diff --git a/gtsam/linear/NoiseModel.cpp b/gtsam/linear/NoiseModel.cpp index 7ea8e5a54..884c87270 100644 --- a/gtsam/linear/NoiseModel.cpp +++ b/gtsam/linear/NoiseModel.cpp @@ -238,13 +238,38 @@ void Gaussian::WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const Matrix Gaussian::information() const { return R().transpose() * R(); } +/* *******************************************************************************/ +double Gaussian::logDetR() const { + double logDetR = + R().diagonal().unaryExpr([](double x) { return log(x); }).sum(); + return logDetR; +} + +/* *******************************************************************************/ +double Gaussian::logDeterminant() const { + // Since noise models are Gaussian, we can get the logDeterminant easily + // Sigma = (R'R)^{-1}, det(Sigma) = det((R'R)^{-1}) = det(R'R)^{-1} + // log det(Sigma) = -log(det(R'R)) = -2*log(det(R)) + // Hence, log det(Sigma)) = -2.0 * logDetR() + return -2.0 * logDetR(); +} + +/* *******************************************************************************/ +double Gaussian::negLogConstant() const { + // log(det(Sigma)) = -2.0 * logDetR + // which gives neg-log = 0.5*n*log(2*pi) + 0.5*(-2.0 * logDetR()) + // = 0.5*n*log(2*pi) - (0.5*2.0 * logDetR()) + // = 0.5*n*log(2*pi) - logDetR() + size_t n = dim(); + constexpr double log2pi = 1.8378770664093454835606594728112; + // Get -log(1/\sqrt(|2pi Sigma|)) = 0.5*log(|2pi Sigma|) + return 0.5 * n * log2pi - logDetR(); +} + /* ************************************************************************* */ // Diagonal /* ************************************************************************* */ -Diagonal::Diagonal() : - Gaussian(1) // TODO: Frank asks: really sure about this? -{ -} +Diagonal::Diagonal() : Gaussian() {} /* ************************************************************************* */ Diagonal::Diagonal(const Vector& sigmas) @@ -256,31 +281,30 @@ Diagonal::Diagonal(const Vector& sigmas) /* ************************************************************************* */ Diagonal::shared_ptr Diagonal::Variances(const Vector& variances, bool smart) { - if (smart) { - // check whether all the same entry - size_t n = variances.size(); - for (size_t j = 1; j < n; j++) - if (variances(j) != variances(0)) goto full; - return Isotropic::Variance(n, variances(0), true); - } - full: return shared_ptr(new Diagonal(variances.cwiseSqrt())); + // check whether all the same entry + return (smart && (variances.array() == variances(0)).all()) + ? Isotropic::Variance(variances.size(), variances(0), true) + : shared_ptr(new Diagonal(variances.cwiseSqrt())); } /* ************************************************************************* */ Diagonal::shared_ptr Diagonal::Sigmas(const Vector& sigmas, bool smart) { if (smart) { size_t n = sigmas.size(); - if (n==0) goto full; + if (n == 0) goto full; + // look for zeros to make a constraint - for (size_t j=0; j< n; ++j) - if (sigmas(j)<1e-8) - return Constrained::MixedSigmas(sigmas); + if ((sigmas.array() < 1e-8).any()) { + return Constrained::MixedSigmas(sigmas); + } + // check whether all the same entry - for (size_t j = 1; j < n; j++) - if (sigmas(j) != sigmas(0)) goto full; - return Isotropic::Sigma(n, sigmas(0), true); + if ((sigmas.array() == sigmas(0)).all()) { + return Isotropic::Sigma(n, sigmas(0), true); + } } - full: return Diagonal::shared_ptr(new Diagonal(sigmas)); +full: + return Diagonal::shared_ptr(new Diagonal(sigmas)); } /* ************************************************************************* */ @@ -288,6 +312,7 @@ Diagonal::shared_ptr Diagonal::Precisions(const Vector& precisions, bool smart) { return Variances(precisions.array().inverse(), smart); } + /* ************************************************************************* */ void Diagonal::print(const string& name) const { gtsam::print(sigmas_, name + "diagonal sigmas "); @@ -314,6 +339,11 @@ void Diagonal::WhitenInPlace(Eigen::Block H) const { H = invsigmas().asDiagonal() * H; } +/* *******************************************************************************/ +double Diagonal::logDetR() const { + return invsigmas_.unaryExpr([](double x) { return log(x); }).sum(); +} + /* ************************************************************************* */ // Constrained /* ************************************************************************* */ @@ -642,6 +672,9 @@ void Isotropic::WhitenInPlace(Eigen::Block H) const { H *= invsigma_; } +/* *******************************************************************************/ +double Isotropic::logDetR() const { return log(invsigma_) * dim(); } + /* ************************************************************************* */ // Unit /* ************************************************************************* */ @@ -654,6 +687,9 @@ double Unit::squaredMahalanobisDistance(const Vector& v) const { return v.dot(v); } +/* *******************************************************************************/ +double Unit::logDetR() const { return 0.0; } + /* ************************************************************************* */ // Robust /* ************************************************************************* */ @@ -707,6 +743,5 @@ const RobustModel::shared_ptr &robust, const NoiseModel::shared_ptr noise){ } /* ************************************************************************* */ - -} +} // namespace noiseModel } // gtsam diff --git a/gtsam/linear/NoiseModel.h b/gtsam/linear/NoiseModel.h index 489f11e4c..34fa63e4c 100644 --- a/gtsam/linear/NoiseModel.h +++ b/gtsam/linear/NoiseModel.h @@ -183,6 +183,8 @@ namespace gtsam { return *sqrt_information_; } + /// Compute the log of |R|. Used for computing log(|Σ|) + virtual double logDetR() const; public: @@ -266,7 +268,18 @@ namespace gtsam { /// Compute covariance matrix virtual Matrix covariance() const; - private: + /// Compute the log of |Σ| + double logDeterminant() const; + + /** + * @brief Compute the negative log of the normalization constant + * for a Gaussian noise model k = 1/\sqrt(|2πΣ|). + * + * @return double + */ + double negLogConstant() const; + + private: #ifdef GTSAM_ENABLE_BOOST_SERIALIZATION /** Serialization function */ friend class boost::serialization::access; @@ -295,11 +308,12 @@ namespace gtsam { */ Vector sigmas_, invsigmas_, precisions_; - protected: - /** constructor to allow for disabling initialization of invsigmas */ Diagonal(const Vector& sigmas); + /// Compute the log of |R|. Used for computing log(|Σ|) + virtual double logDetR() const override; + public: /** constructor - no initializations, for serialization */ Diagonal(); @@ -532,6 +546,9 @@ namespace gtsam { Isotropic(size_t dim, double sigma) : Diagonal(Vector::Constant(dim, sigma)),sigma_(sigma),invsigma_(1.0/sigma) {} + /// Compute the log of |R|. Used for computing log(|Σ|) + virtual double logDetR() const override; + public: /* dummy constructor to allow for serialization */ @@ -595,6 +612,10 @@ namespace gtsam { * Unit: i.i.d. unit-variance noise on all m dimensions. */ class GTSAM_EXPORT Unit : public Isotropic { + protected: + /// Compute the log of |R|. Used for computing log(|Σ|) + virtual double logDetR() const override; + public: typedef std::shared_ptr shared_ptr; diff --git a/gtsam/linear/linear.i b/gtsam/linear/linear.i index 4b0963b8d..af6c2ee22 100644 --- a/gtsam/linear/linear.i +++ b/gtsam/linear/linear.i @@ -375,7 +375,8 @@ virtual class JacobianFactor : gtsam::GaussianFactor { void serialize() const; }; -pair EliminateQR(const gtsam::GaussianFactorGraph& factors, const gtsam::Ordering& keys); +pair EliminateQR( + const gtsam::GaussianFactorGraph& factors, const gtsam::Ordering& keys); #include virtual class HessianFactor : gtsam::GaussianFactor { @@ -510,12 +511,17 @@ virtual class GaussianConditional : gtsam::JacobianFactor { GaussianConditional(size_t key, gtsam::Vector d, gtsam::Matrix R, size_t name1, gtsam::Matrix S, size_t name2, gtsam::Matrix T, const gtsam::noiseModel::Diagonal* sigmas); + GaussianConditional(const std::vector> terms, + size_t nrFrontals, gtsam::Vector d, + const gtsam::noiseModel::Diagonal* sigmas); // Constructors with no noise model GaussianConditional(size_t key, gtsam::Vector d, gtsam::Matrix R); GaussianConditional(size_t key, gtsam::Vector d, gtsam::Matrix R, size_t name1, gtsam::Matrix S); GaussianConditional(size_t key, gtsam::Vector d, gtsam::Matrix R, size_t name1, gtsam::Matrix S, size_t name2, gtsam::Matrix T); + GaussianConditional(const gtsam::KeyVector& keys, size_t nrFrontals, + const gtsam::VerticalBlockMatrix& augmentedMatrix); // Named constructors static gtsam::GaussianConditional FromMeanAndStddev(gtsam::Key key, @@ -542,7 +548,7 @@ virtual class GaussianConditional : gtsam::JacobianFactor { bool equals(const gtsam::GaussianConditional& cg, double tol) const; // Standard Interface - double logNormalizationConstant() const; + double negLogConstant() const; double logProbability(const gtsam::VectorValues& x) const; double evaluate(const gtsam::VectorValues& x) const; double error(const gtsam::VectorValues& x) const; @@ -767,4 +773,4 @@ class KalmanFilter { gtsam::Vector z, gtsam::Matrix Q); }; -} \ No newline at end of file +} diff --git a/gtsam/linear/tests/testGaussianBayesNet.cpp b/gtsam/linear/tests/testGaussianBayesNet.cpp index 966b70915..b1cfbe12c 100644 --- a/gtsam/linear/tests/testGaussianBayesNet.cpp +++ b/gtsam/linear/tests/testGaussianBayesNet.cpp @@ -76,10 +76,11 @@ TEST(GaussianBayesNet, Evaluate1) { // the normalization constant 1.0/sqrt((2*pi*Sigma).det()). // The covariance matrix inv(Sigma) = R'*R, so the determinant is const double constant = sqrt((invSigma / (2 * M_PI)).determinant()); - EXPECT_DOUBLES_EQUAL(log(constant), - smallBayesNet.at(0)->logNormalizationConstant() + - smallBayesNet.at(1)->logNormalizationConstant(), + EXPECT_DOUBLES_EQUAL(-log(constant), + smallBayesNet.at(0)->negLogConstant() + + smallBayesNet.at(1)->negLogConstant(), 1e-9); + EXPECT_DOUBLES_EQUAL(-log(constant), smallBayesNet.negLogConstant(), 1e-9); const double actual = smallBayesNet.evaluate(mean); EXPECT_DOUBLES_EQUAL(constant, actual, 1e-9); } diff --git a/gtsam/linear/tests/testGaussianConditional.cpp b/gtsam/linear/tests/testGaussianConditional.cpp index a4a722012..68894c314 100644 --- a/gtsam/linear/tests/testGaussianConditional.cpp +++ b/gtsam/linear/tests/testGaussianConditional.cpp @@ -486,16 +486,17 @@ TEST(GaussianConditional, Error) { /* ************************************************************************* */ // Similar test for multivariate gaussian but with sigma 2.0 -TEST(GaussianConditional, LogNormalizationConstant) { +TEST(GaussianConditional, NegLogConstant) { double sigma = 2.0; auto conditional = GaussianConditional::FromMeanAndStddev(X(0), Vector3::Zero(), sigma); VectorValues x; x.insert(X(0), Vector3::Zero()); Matrix3 Sigma = I_3x3 * sigma * sigma; - double expectedLogNormalizingConstant = log(1 / sqrt((2 * M_PI * Sigma).determinant())); + double expectedNegLogConstant = + -log(1 / sqrt((2 * M_PI * Sigma).determinant())); - EXPECT_DOUBLES_EQUAL(expectedLogNormalizingConstant, - conditional.logNormalizationConstant(), 1e-9); + EXPECT_DOUBLES_EQUAL(expectedNegLogConstant, conditional.negLogConstant(), + 1e-9); } /* ************************************************************************* */ @@ -516,6 +517,7 @@ TEST(GaussianConditional, Print) { " d = [ 20 40 ]\n" " mean: 1 elements\n" " x0: 20 40\n" + " logNormalizationConstant: -4.0351\n" "isotropic dim=2 sigma=3\n"; EXPECT(assert_print_equal(expected, conditional, "GaussianConditional")); @@ -530,6 +532,7 @@ TEST(GaussianConditional, Print) { " S[x1] = [ -1 -2 ]\n" " [ -3 -4 ]\n" " d = [ 20 40 ]\n" + " logNormalizationConstant: -4.0351\n" "isotropic dim=2 sigma=3\n"; EXPECT(assert_print_equal(expected1, conditional1, "GaussianConditional")); @@ -545,6 +548,7 @@ TEST(GaussianConditional, Print) { " S[y1] = [ -5 -6 ]\n" " [ -7 -8 ]\n" " d = [ 20 40 ]\n" + " logNormalizationConstant: -4.0351\n" "isotropic dim=2 sigma=3\n"; EXPECT(assert_print_equal(expected2, conditional2, "GaussianConditional")); } diff --git a/gtsam/linear/tests/testGaussianDensity.cpp b/gtsam/linear/tests/testGaussianDensity.cpp index 97eb0de70..b2861e833 100644 --- a/gtsam/linear/tests/testGaussianDensity.cpp +++ b/gtsam/linear/tests/testGaussianDensity.cpp @@ -55,7 +55,7 @@ TEST(GaussianDensity, FromMeanAndStddev) { double expected1 = 0.5 * e.dot(e); EXPECT_DOUBLES_EQUAL(expected1, density.error(values), 1e-9); - double expected2 = density.logNormalizationConstant()- 0.5 * e.dot(e); + double expected2 = -(density.negLogConstant() + 0.5 * e.dot(e)); EXPECT_DOUBLES_EQUAL(expected2, density.logProbability(values), 1e-9); } diff --git a/gtsam/linear/tests/testNoiseModel.cpp b/gtsam/linear/tests/testNoiseModel.cpp index 725680501..2518e4c49 100644 --- a/gtsam/linear/tests/testNoiseModel.cpp +++ b/gtsam/linear/tests/testNoiseModel.cpp @@ -807,6 +807,83 @@ TEST(NoiseModel, NonDiagonalGaussian) } } +TEST(NoiseModel, NegLogNormalizationConstant1D) { + // Very simple 1D noise model, which we can compute by hand. + double sigma = 0.1; + // For expected values, we compute -log(1/sqrt(|2πΣ|)) by hand. + // = 0.5*(log(2Ï€) - log(Σ)) (since it is 1D) + double expected_value = 0.5 * log(2 * M_PI * sigma * sigma); + + // Gaussian + { + Matrix11 R; + R << 1 / sigma; + auto noise_model = Gaussian::SqrtInformation(R); + double actual_value = noise_model->negLogConstant(); + EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); + } + // Diagonal + { + auto noise_model = Diagonal::Sigmas(Vector1(sigma)); + double actual_value = noise_model->negLogConstant(); + EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); + } + // Isotropic + { + auto noise_model = Isotropic::Sigma(1, sigma); + double actual_value = noise_model->negLogConstant(); + EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); + } + // Unit + { + auto noise_model = Unit::Create(1); + double actual_value = noise_model->negLogConstant(); + double sigma = 1.0; + expected_value = 0.5 * log(2 * M_PI * sigma * sigma); + EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); + } +} + +TEST(NoiseModel, NegLogNormalizationConstant3D) { + // Simple 3D noise model, which we can compute by hand. + double sigma = 0.1; + size_t n = 3; + // We compute the expected values just like in the NegLogNormalizationConstant1D + // test, but we multiply by 3 due to the determinant. + double expected_value = 0.5 * n * log(2 * M_PI * sigma * sigma); + + // Gaussian + { + Matrix33 R; + R << 1 / sigma, 2, 3, // + 0, 1 / sigma, 4, // + 0, 0, 1 / sigma; + auto noise_model = Gaussian::SqrtInformation(R); + double actual_value = noise_model->negLogConstant(); + EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); + } + // Diagonal + { + auto noise_model = Diagonal::Sigmas(Vector3(sigma, sigma, sigma)); + double actual_value = noise_model->negLogConstant(); + EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); + } + // Isotropic + { + auto noise_model = Isotropic::Sigma(n, sigma); + double actual_value = noise_model->negLogConstant(); + EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); + } + // Unit + { + auto noise_model = Unit::Create(3); + double actual_value = noise_model->negLogConstant(); + double sigma = 1.0; + expected_value = 0.5 * n * log(2 * M_PI * sigma * sigma); + EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); + } +} + /* ************************************************************************* */ int main() { TestResult tr; return TestRegistry::runAllTests(tr); } /* ************************************************************************* */ diff --git a/gtsam/navigation/PreintegratedRotation.cpp b/gtsam/navigation/PreintegratedRotation.cpp index 04e201a34..6c8e510e5 100644 --- a/gtsam/navigation/PreintegratedRotation.cpp +++ b/gtsam/navigation/PreintegratedRotation.cpp @@ -75,13 +75,11 @@ Rot3 IncrementalRotation::operator()( Vector3 correctedOmega = measuredOmega - bias; // Then compensate for sensor-body displacement: we express the quantities - // (originally in the IMU frame) into the body frame. If Jacobian is - // requested, the rotation matrix is obtained as `rotate` Jacobian. - Matrix3 body_R_sensor; + // (originally in the IMU frame) into the body frame. + // Note that the rotate Jacobian is just body_P_sensor->rotation().matrix(). if (body_P_sensor) { // rotation rate vector in the body frame - correctedOmega = body_P_sensor->rotation().rotate( - correctedOmega, {}, H_bias ? &body_R_sensor : nullptr); + correctedOmega = body_P_sensor->rotation() * correctedOmega; } // rotation vector describing rotation increment computed from the @@ -90,7 +88,7 @@ Rot3 IncrementalRotation::operator()( Rot3 incrR = Rot3::Expmap(integratedOmega, H_bias); // expensive !! if (H_bias) { *H_bias *= -deltaT; // Correct so accurately reflects bias derivative - if (body_P_sensor) *H_bias *= body_R_sensor; + if (body_P_sensor) *H_bias *= body_P_sensor->rotation().matrix(); } return incrR; } diff --git a/gtsam/navigation/tests/testManifoldPreintegration.cpp b/gtsam/navigation/tests/testManifoldPreintegration.cpp index 4016240cf..82f9876fb 100644 --- a/gtsam/navigation/tests/testManifoldPreintegration.cpp +++ b/gtsam/navigation/tests/testManifoldPreintegration.cpp @@ -118,17 +118,18 @@ TEST(ManifoldPreintegration, CompareWithPreintegratedRotation) { // Integrate a single measurement const double omega = 0.1; const Vector3 trueOmega(omega, 0, 0); - const Vector3 bias(1, 2, 3); - const Vector3 measuredOmega = trueOmega + bias; + const Vector3 biasOmega(1, 2, 3); + const Vector3 measuredOmega = trueOmega + biasOmega; const double deltaT = 0.5; // Check the accumulated rotation. Rot3 expected = Rot3::Roll(omega * deltaT); - pim.integrateGyroMeasurement(measuredOmega, bias, deltaT); + const Vector biasOmegaHat = biasOmega; + pim.integrateGyroMeasurement(measuredOmega, biasOmegaHat, deltaT); EXPECT(assert_equal(expected, pim.deltaRij(), 1e-9)); // Now do the same for a ManifoldPreintegration object - imuBias::ConstantBias biasHat {Z_3x1, bias}; + imuBias::ConstantBias biasHat {Z_3x1, biasOmega}; ManifoldPreintegration manifoldPim(testing::Params(), biasHat); manifoldPim.integrateMeasurement(Z_3x1, measuredOmega, deltaT); EXPECT(assert_equal(expected, manifoldPim.deltaRij(), 1e-9)); @@ -136,17 +137,21 @@ TEST(ManifoldPreintegration, CompareWithPreintegratedRotation) { // Check their internal Jacobians are the same: EXPECT(assert_equal(pim.delRdelBiasOmega(), manifoldPim.delRdelBiasOmega())); - // Check PreintegratedRotation::biascorrectedDeltaRij. - Matrix3 H; + // Let's check the derivatives a delta away from the bias hat const double delta = 0.05; const Vector3 biasOmegaIncr(delta, 0, 0); + imuBias::ConstantBias bias_i {Z_3x1, biasOmegaHat + biasOmegaIncr}; + + // Check PreintegratedRotation::biascorrectedDeltaRij. + // Note that biascorrectedDeltaRij expects the biasHat to be subtracted already + Matrix3 H; Rot3 corrected = pim.biascorrectedDeltaRij(biasOmegaIncr, H); EQUALITY(Vector3(-deltaT * delta, 0, 0), expected.logmap(corrected)); const Rot3 expected2 = Rot3::Roll((omega - delta) * deltaT); EXPECT(assert_equal(expected2, corrected, 1e-9)); // Check ManifoldPreintegration::biasCorrectedDelta. - imuBias::ConstantBias bias_i {Z_3x1, bias + biasOmegaIncr}; + // Note that, confusingly, biasCorrectedDelta will subtract biasHat inside Matrix96 H2; Vector9 biasCorrected = manifoldPim.biasCorrectedDelta(bias_i, H2); Vector9 expected3; @@ -154,12 +159,11 @@ TEST(ManifoldPreintegration, CompareWithPreintegratedRotation) { EXPECT(assert_equal(expected3, biasCorrected, 1e-9)); // Check that this one is sane: - auto g = [&](const Vector3& increment) { - return manifoldPim.biasCorrectedDelta({Z_3x1, bias + increment}, {}); + auto g = [&](const Vector3& b) { + return manifoldPim.biasCorrectedDelta({Z_3x1, b}, {}); }; - EXPECT(assert_equal(numericalDerivative11(g, Z_3x1), - H2.rightCols<3>(), - 1e-4)); // NOTE(frank): reduced tolerance + EXPECT(assert_equal(numericalDerivative11(g, bias_i.gyroscope()), + H2.rightCols<3>())); } /* ************************************************************************* */ diff --git a/gtsam/navigation/tests/testPreintegratedRotation.cpp b/gtsam/navigation/tests/testPreintegratedRotation.cpp index 1e27ca1e4..468701e3c 100644 --- a/gtsam/navigation/tests/testPreintegratedRotation.cpp +++ b/gtsam/navigation/tests/testPreintegratedRotation.cpp @@ -36,40 +36,33 @@ const Vector3 measuredOmega = trueOmega + bias; const double deltaT = 0.5; } // namespace biased_x_rotation -// Create params where x and y axes are exchanged. -static std::shared_ptr paramsWithTransform() { - auto p = std::make_shared(); - p->setBodyPSensor({Rot3::Yaw(M_PI_2), {0, 0, 0}}); - return p; -} - //****************************************************************************** TEST(PreintegratedRotation, integrateGyroMeasurement) { // Example where IMU is identical to body frame, then omega is roll using namespace biased_x_rotation; auto p = std::make_shared(); - PreintegratedRotation pim(p); // Check the value. Matrix3 H_bias; - internal::IncrementalRotation f{measuredOmega, deltaT, p->getBodyPSensor()}; + const internal::IncrementalRotation f{measuredOmega, deltaT, p->getBodyPSensor()}; const Rot3 incrR = f(bias, H_bias); - Rot3 expected = Rot3::Roll(omega * deltaT); - EXPECT(assert_equal(expected, incrR, 1e-9)); + const Rot3 expected = Rot3::Roll(omega * deltaT); + EXPECT(assert_equal(expected, incrR, 1e-9)) // Check the derivative: - EXPECT(assert_equal(numericalDerivative11(f, bias), H_bias)); + EXPECT(assert_equal(numericalDerivative11(f, bias), H_bias)) // Check value of deltaRij() after integration. Matrix3 F; + PreintegratedRotation pim(p); pim.integrateGyroMeasurement(measuredOmega, bias, deltaT, F); - EXPECT(assert_equal(expected, pim.deltaRij(), 1e-9)); + EXPECT(assert_equal(expected, pim.deltaRij(), 1e-9)) // Check that system matrix F is the first derivative of compose: - EXPECT(assert_equal(pim.deltaRij().inverse().AdjointMap(), F)); + EXPECT(assert_equal(pim.deltaRij().inverse().AdjointMap(), F)) // Make sure delRdelBiasOmega is H_bias after integration. - EXPECT(assert_equal(H_bias, pim.delRdelBiasOmega())); + EXPECT(assert_equal(H_bias, pim.delRdelBiasOmega())) // Check if we make a correction to the bias, the value and Jacobian are // correct. Note that the bias is subtracted from the measurement, and the @@ -78,56 +71,127 @@ TEST(PreintegratedRotation, integrateGyroMeasurement) { const double delta = 0.05; const Vector3 biasOmegaIncr(delta, 0, 0); Rot3 corrected = pim.biascorrectedDeltaRij(biasOmegaIncr, H); - EQUALITY(Vector3(-deltaT * delta, 0, 0), expected.logmap(corrected)); - EXPECT(assert_equal(Rot3::Roll((omega - delta) * deltaT), corrected, 1e-9)); + EQUALITY(Vector3(-deltaT * delta, 0, 0), expected.logmap(corrected)) + EXPECT(assert_equal(Rot3::Roll((omega - delta) * deltaT), corrected, 1e-9)) - // TODO(frank): again the derivative is not the *sane* one! - // auto g = [&](const Vector3& increment) { - // return pim.biascorrectedDeltaRij(increment, {}); - // }; - // EXPECT(assert_equal(numericalDerivative11(g, Z_3x1), H)); + // Check the derivative matches the numerical one + auto g = [&](const Vector3& increment) { + return pim.biascorrectedDeltaRij(increment, {}); + }; + Matrix3 expectedH = numericalDerivative11(g, biasOmegaIncr); + EXPECT(assert_equal(expectedH, H)); + + // Let's integrate a second IMU measurement and check the Jacobian update: + pim.integrateGyroMeasurement(measuredOmega, bias, deltaT); + expectedH = numericalDerivative11(g, biasOmegaIncr); + corrected = pim.biascorrectedDeltaRij(biasOmegaIncr, H); + EXPECT(assert_equal(expectedH, H)); } //****************************************************************************** + +// Create params where x and y axes are exchanged. +static std::shared_ptr paramsWithTransform() { + auto p = std::make_shared(); + p->setBodyPSensor({Rot3::Yaw(M_PI_2), {0, 0, 0}}); + return p; +} + TEST(PreintegratedRotation, integrateGyroMeasurementWithTransform) { // Example where IMU is rotated, so measured omega indicates pitch. using namespace biased_x_rotation; auto p = paramsWithTransform(); - PreintegratedRotation pim(p); // Check the value. Matrix3 H_bias; - internal::IncrementalRotation f{measuredOmega, deltaT, p->getBodyPSensor()}; - Rot3 expected = Rot3::Pitch(omega * deltaT); - EXPECT(assert_equal(expected, f(bias, H_bias), 1e-9)); + const internal::IncrementalRotation f{measuredOmega, deltaT, p->getBodyPSensor()}; + const Rot3 expected = Rot3::Pitch(omega * deltaT); // Pitch, because of sensor-IMU rotation! + EXPECT(assert_equal(expected, f(bias, H_bias), 1e-9)) // Check the derivative: - EXPECT(assert_equal(numericalDerivative11(f, bias), H_bias)); + EXPECT(assert_equal(numericalDerivative11(f, bias), H_bias)) // Check value of deltaRij() after integration. Matrix3 F; + PreintegratedRotation pim(p); pim.integrateGyroMeasurement(measuredOmega, bias, deltaT, F); - EXPECT(assert_equal(expected, pim.deltaRij(), 1e-9)); + EXPECT(assert_equal(expected, pim.deltaRij(), 1e-9)) // Check that system matrix F is the first derivative of compose: - EXPECT(assert_equal(pim.deltaRij().inverse().AdjointMap(), F)); + EXPECT(assert_equal(pim.deltaRij().inverse().AdjointMap(), F)) // Make sure delRdelBiasOmega is H_bias after integration. - EXPECT(assert_equal(H_bias, pim.delRdelBiasOmega())); + EXPECT(assert_equal(H_bias, pim.delRdelBiasOmega())) // Check the bias correction in same way, but will now yield pitch change. Matrix3 H; const double delta = 0.05; const Vector3 biasOmegaIncr(delta, 0, 0); Rot3 corrected = pim.biascorrectedDeltaRij(biasOmegaIncr, H); - EQUALITY(Vector3(0, -deltaT * delta, 0), expected.logmap(corrected)); - EXPECT(assert_equal(Rot3::Pitch((omega - delta) * deltaT), corrected, 1e-9)); + EQUALITY(Vector3(0, -deltaT * delta, 0), expected.logmap(corrected)) + EXPECT(assert_equal(Rot3::Pitch((omega - delta) * deltaT), corrected, 1e-9)) - // TODO(frank): again the derivative is not the *sane* one! - // auto g = [&](const Vector3& increment) { - // return pim.biascorrectedDeltaRij(increment, {}); - // }; - // EXPECT(assert_equal(numericalDerivative11(g, Z_3x1), H)); + // Check the derivative matches the *expectedH* one + auto g = [&](const Vector3& increment) { + return pim.biascorrectedDeltaRij(increment, {}); + }; + Matrix3 expectedH = numericalDerivative11(g, biasOmegaIncr); + EXPECT(assert_equal(expectedH, H)); + + // Let's integrate a second IMU measurement and check the Jacobian update: + pim.integrateGyroMeasurement(measuredOmega, bias, deltaT); + corrected = pim.biascorrectedDeltaRij(biasOmegaIncr, H); + expectedH = numericalDerivative11(g, biasOmegaIncr); + EXPECT(assert_equal(expectedH, H)); +} + +// Create params we have a non-axis-aligned rotation and even an offset. +static std::shared_ptr paramsWithArbitraryTransform() { + auto p = std::make_shared(); + p->setBodyPSensor({Rot3::Expmap({1,2,3}), {4,5,6}}); + return p; +} + +TEST(PreintegratedRotation, integrateGyroMeasurementWithArbitraryTransform) { + // Example with a non-axis-aligned transform and some position. + using namespace biased_x_rotation; + auto p = paramsWithArbitraryTransform(); + + // Check the derivative: + Matrix3 H_bias; + const internal::IncrementalRotation f{measuredOmega, deltaT, p->getBodyPSensor()}; + f(bias, H_bias); + EXPECT(assert_equal(numericalDerivative11(f, bias), H_bias)) + + // Check derivative of deltaRij() after integration. + Matrix3 F; + PreintegratedRotation pim(p); + pim.integrateGyroMeasurement(measuredOmega, bias, deltaT, F); + + // Check that system matrix F is the first derivative of compose: + EXPECT(assert_equal(pim.deltaRij().inverse().AdjointMap(), F)) + + // Make sure delRdelBiasOmega is H_bias after integration. + EXPECT(assert_equal(H_bias, pim.delRdelBiasOmega())) + + // Check the bias correction in same way, but will now yield pitch change. + Matrix3 H; + const double delta = 0.05; + const Vector3 biasOmegaIncr(delta, 0, 0); + Rot3 corrected = pim.biascorrectedDeltaRij(biasOmegaIncr, H); + + // Check the derivative matches the numerical one + auto g = [&](const Vector3& increment) { + return pim.biascorrectedDeltaRij(increment, {}); + }; + Matrix3 expectedH = numericalDerivative11(g, biasOmegaIncr); + EXPECT(assert_equal(expectedH, H)); + + // Let's integrate a second IMU measurement and check the Jacobian update: + pim.integrateGyroMeasurement(measuredOmega, bias, deltaT); + corrected = pim.biascorrectedDeltaRij(biasOmegaIncr, H); + expectedH = numericalDerivative11(g, biasOmegaIncr); + EXPECT(assert_equal(expectedH, H)); } //****************************************************************************** diff --git a/gtsam/nonlinear/ISAM2.cpp b/gtsam/nonlinear/ISAM2.cpp index 0b5449b58..47857a2a2 100644 --- a/gtsam/nonlinear/ISAM2.cpp +++ b/gtsam/nonlinear/ISAM2.cpp @@ -424,6 +424,11 @@ ISAM2Result ISAM2::update(const NonlinearFactorGraph& newFactors, ISAM2Result result(params_.enableDetailedResults); UpdateImpl update(params_, updateParams); + // Initialize any new variables \Theta_{new} and add + // \Theta:=\Theta\cup\Theta_{new}. + // Needed before delta update if using Dogleg optimizer. + addVariables(newTheta, result.details()); + // Update delta if we need it to check relinearization later if (update.relinarizationNeeded(update_count_)) updateDelta(updateParams.forceFullSolve); @@ -435,9 +440,7 @@ ISAM2Result ISAM2::update(const NonlinearFactorGraph& newFactors, update.computeUnusedKeys(newFactors, variableIndex_, result.keysWithRemovedFactors, &result.unusedKeys); - // 2. Initialize any new variables \Theta_{new} and add - // \Theta:=\Theta\cup\Theta_{new}. - addVariables(newTheta, result.details()); + // 2. Compute new error to check for relinearization if (params_.evaluateNonlinearError) update.error(nonlinearFactors_, calculateEstimate(), &result.errorBefore); @@ -731,6 +734,7 @@ void ISAM2::updateDelta(bool forceFullSolve) const { effectiveWildfireThreshold, &delta_); deltaReplacedMask_.clear(); gttoc(Wildfire_update); + } else if (std::holds_alternative(params_.optimizationParams)) { // If using Dogleg, do a Dogleg step const ISAM2DoglegParams& doglegParams = @@ -769,9 +773,8 @@ void ISAM2::updateDelta(bool forceFullSolve) const { gttic(Copy_dx_d); // Update Delta and linear step doglegDelta_ = doglegResult.delta; - delta_ = - doglegResult - .dx_d; // Copy the VectorValues containing with the linear solution + // Copy the VectorValues containing with the linear solution + delta_ = doglegResult.dx_d; gttoc(Copy_dx_d); } else { throw std::runtime_error("iSAM2: unknown ISAM2Params type"); diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index c49e3a65a..99682fb77 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -236,9 +236,9 @@ bool LevenbergMarquardtOptimizer::tryLambda(const GaussianFactorGraph& linear, if (currentState->iterations == 0) { cout << "iter cost cost_change lambda success iter_time" << endl; } - cout << setw(4) << currentState->iterations << " " << setw(8) << newError << " " << setw(3) << setprecision(2) - << costChange << " " << setw(3) << setprecision(2) << currentState->lambda << " " << setw(4) - << systemSolvedSuccessfully << " " << setw(3) << setprecision(2) << iterationTime << endl; + cout << setw(4) << currentState->iterations << " " << setw(12) << newError << " " << setw(12) << setprecision(2) + << costChange << " " << setw(10) << setprecision(2) << currentState->lambda << " " << setw(6) + << systemSolvedSuccessfully << " " << setw(10) << setprecision(2) << iterationTime << endl; } if (step_is_successful) { // we have successfully decreased the cost and we have good modelFidelity diff --git a/gtsam/nonlinear/NonlinearOptimizerParams.cpp b/gtsam/nonlinear/NonlinearOptimizerParams.cpp index 671dbe34d..55dfd4561 100644 --- a/gtsam/nonlinear/NonlinearOptimizerParams.cpp +++ b/gtsam/nonlinear/NonlinearOptimizerParams.cpp @@ -123,6 +123,28 @@ void NonlinearOptimizerParams::print(const std::string& str) const { std::cout.flush(); } +/* ************************************************************************* */ +bool NonlinearOptimizerParams::equals(const NonlinearOptimizerParams& other, + double tol) const { + // Check for equality of shared ptrs + bool iterative_params_equal = iterativeParams == other.iterativeParams; + // Check equality of components + if (iterativeParams && other.iterativeParams) { + iterative_params_equal = iterativeParams->equals(*other.iterativeParams); + } else { + // Check if either is null. If both are null, then true + iterative_params_equal = !iterativeParams && !other.iterativeParams; + } + + return maxIterations == other.getMaxIterations() && + std::abs(relativeErrorTol - other.getRelativeErrorTol()) <= tol && + std::abs(absoluteErrorTol - other.getAbsoluteErrorTol()) <= tol && + std::abs(errorTol - other.getErrorTol()) <= tol && + verbosityTranslator(verbosity) == other.getVerbosity() && + orderingType == other.orderingType && ordering == other.ordering && + linearSolverType == other.linearSolverType && iterative_params_equal; +} + /* ************************************************************************* */ std::string NonlinearOptimizerParams::linearSolverTranslator( LinearSolverType linearSolverType) const { diff --git a/gtsam/nonlinear/NonlinearOptimizerParams.h b/gtsam/nonlinear/NonlinearOptimizerParams.h index cafb235bc..cef7e85e3 100644 --- a/gtsam/nonlinear/NonlinearOptimizerParams.h +++ b/gtsam/nonlinear/NonlinearOptimizerParams.h @@ -114,16 +114,7 @@ public: virtual void print(const std::string& str = "") const; - bool equals(const NonlinearOptimizerParams& other, double tol = 1e-9) const { - return maxIterations == other.getMaxIterations() - && std::abs(relativeErrorTol - other.getRelativeErrorTol()) <= tol - && std::abs(absoluteErrorTol - other.getAbsoluteErrorTol()) <= tol - && std::abs(errorTol - other.getErrorTol()) <= tol - && verbosityTranslator(verbosity) == other.getVerbosity(); - // && orderingType.equals(other.getOrderingType()_; - // && linearSolverType == other.getLinearSolverType(); - // TODO: check ordering, iterativeParams, and iterationsHook - } + bool equals(const NonlinearOptimizerParams& other, double tol = 1e-9) const; inline bool isMultifrontal() const { return (linearSolverType == MULTIFRONTAL_CHOLESKY) diff --git a/gtsam/nonlinear/nonlinear.i b/gtsam/nonlinear/nonlinear.i index 9084a50fa..f40deab5a 100644 --- a/gtsam/nonlinear/nonlinear.i +++ b/gtsam/nonlinear/nonlinear.i @@ -381,17 +381,22 @@ typedef gtsam::GncOptimizer> G #include virtual class LevenbergMarquardtOptimizer : gtsam::NonlinearOptimizer { LevenbergMarquardtOptimizer(const gtsam::NonlinearFactorGraph& graph, - const gtsam::Values& initialValues); + const gtsam::Values& initialValues, + const gtsam::LevenbergMarquardtParams& params = + gtsam::LevenbergMarquardtParams()); LevenbergMarquardtOptimizer(const gtsam::NonlinearFactorGraph& graph, const gtsam::Values& initialValues, - const gtsam::LevenbergMarquardtParams& params); + const gtsam::Ordering& ordering, + const gtsam::LevenbergMarquardtParams& params = + gtsam::LevenbergMarquardtParams()); + double lambda() const; void print(string s = "") const; }; #include class ISAM2GaussNewtonParams { - ISAM2GaussNewtonParams(); + ISAM2GaussNewtonParams(double _wildfireThreshold = 0.001); void print(string s = "") const; diff --git a/gtsam/sfm/TranslationFactor.h b/gtsam/sfm/TranslationFactor.h index a7289f759..33bc82f5a 100644 --- a/gtsam/sfm/TranslationFactor.h +++ b/gtsam/sfm/TranslationFactor.h @@ -18,6 +18,7 @@ * @brief Binary factor for a relative translation direction measurement. */ +#include #include #include #include @@ -36,8 +37,6 @@ namespace gtsam { * normalized(Tb - Ta) - w_aZb.point3() * * @ingroup sfm - * - * */ class TranslationFactor : public NoiseModelFactorN { private: @@ -45,7 +44,6 @@ class TranslationFactor : public NoiseModelFactorN { Point3 measured_w_aZb_; public: - // Provide access to the Matrix& version of evaluateError: using NoiseModelFactor2::evaluateError; @@ -60,20 +58,20 @@ class TranslationFactor : public NoiseModelFactorN { * @brief Caclulate error: (norm(Tb - Ta) - measurement) * where Tb and Ta are Point3 translations and measurement is * the Unit3 translation direction from a to b. - * + * * @param Ta translation for key a * @param Tb translation for key b * @param H1 optional jacobian in Ta * @param H2 optional jacobian in Tb * @return * Vector */ - Vector evaluateError( - const Point3& Ta, const Point3& Tb, - OptionalMatrixType H1, - OptionalMatrixType H2) const override { + Vector evaluateError(const Point3& Ta, const Point3& Tb, + OptionalMatrixType H1, + OptionalMatrixType H2) const override { const Point3 dir = Tb - Ta; Matrix33 H_predicted_dir; - const Point3 predicted = normalize(dir, H1 || H2 ? &H_predicted_dir : nullptr); + const Point3 predicted = + normalize(dir, H1 || H2 ? &H_predicted_dir : nullptr); if (H1) *H1 = -H_predicted_dir; if (H2) *H2 = H_predicted_dir; return predicted - measured_w_aZb_; @@ -89,4 +87,75 @@ class TranslationFactor : public NoiseModelFactorN { } #endif }; // \ TranslationFactor + +/** + * Binary factor for a relative translation direction measurement + * w_aZb. The measurement equation is + * w_aZb = scale * (Tb - Ta) + * i.e., w_aZb is the translation direction from frame A to B, in world + * coordinates. + * + * Instead of normalizing the Tb - Ta vector, we multiply it by a scalar + * which is also jointly optimized. This is inspired by the work on + * BATA (Zhuang et al, CVPR 2018). + * + * @ingroup sfm + */ +class BilinearAngleTranslationFactor + : public NoiseModelFactorN { + private: + typedef NoiseModelFactorN Base; + Point3 measured_w_aZb_; + + public: + /// default constructor + BilinearAngleTranslationFactor() {} + + BilinearAngleTranslationFactor(Key a, Key b, Key scale_key, + const Unit3& w_aZb, + const SharedNoiseModel& noiseModel) + : Base(noiseModel, a, b, scale_key), measured_w_aZb_(w_aZb.point3()) {} + + // Provide access to the Matrix& version of evaluateError: + using NoiseModelFactor2::evaluateError; + + /** + * @brief Caclulate error: (scale * (Tb - Ta) - measurement) + * where Tb and Ta are Point3 translations and measurement is + * the Unit3 translation direction from a to b. + * + * @param Ta translation for key a + * @param Tb translation for key b + * @param H1 optional jacobian in Ta + * @param H2 optional jacobian in Tb + * @return * Vector + */ + Vector evaluateError(const Point3& Ta, const Point3& Tb, const Vector1& scale, + OptionalMatrixType H1, OptionalMatrixType H2, + OptionalMatrixType H3) const override { + // Ideally we should use a positive real valued scalar datatype for scale. + const double abs_scale = abs(scale[0]); + const Point3 predicted = (Tb - Ta) * abs_scale; + if (H1) { + *H1 = -Matrix3::Identity() * abs_scale; + } + if (H2) { + *H2 = Matrix3::Identity() * abs_scale; + } + if (H3) { + *H3 = scale[0] >= 0 ? (Tb - Ta) : (Ta - Tb); + } + return predicted - measured_w_aZb_; + } + + private: +#ifdef GTSAM_ENABLE_BOOST_SERIALIZATION + friend class boost::serialization::access; + template + void serialize(ARCHIVE& ar, const unsigned int /*version*/) { + ar& boost::serialization::make_nvp( + "Base", boost::serialization::base_object(*this)); + } +#endif +}; // \ BilinearAngleTranslationFactor } // namespace gtsam diff --git a/gtsam/sfm/TranslationRecovery.cpp b/gtsam/sfm/TranslationRecovery.cpp index c7ef2e526..02c78133e 100644 --- a/gtsam/sfm/TranslationRecovery.cpp +++ b/gtsam/sfm/TranslationRecovery.cpp @@ -101,9 +101,17 @@ NonlinearFactorGraph TranslationRecovery::buildGraph( NonlinearFactorGraph graph; // Add translation factors for input translation directions. + uint64_t i = 0; for (auto edge : relativeTranslations) { - graph.emplace_shared(edge.key1(), edge.key2(), - edge.measured(), edge.noiseModel()); + if (use_bilinear_translation_factor_) { + graph.emplace_shared( + edge.key1(), edge.key2(), Symbol('S', i), edge.measured(), + edge.noiseModel()); + } else { + graph.emplace_shared( + edge.key1(), edge.key2(), edge.measured(), edge.noiseModel()); + } + i++; } return graph; } @@ -163,6 +171,12 @@ Values TranslationRecovery::initializeRandomly( insert(edge.key1()); insert(edge.key2()); } + + if (use_bilinear_translation_factor_) { + for (uint64_t i = 0; i < relativeTranslations.size(); i++) { + initial.insert(Symbol('S', i), Vector1(1.0)); + } + } return initial; } diff --git a/gtsam/sfm/TranslationRecovery.h b/gtsam/sfm/TranslationRecovery.h index 4848d7cfa..a91ef01f9 100644 --- a/gtsam/sfm/TranslationRecovery.h +++ b/gtsam/sfm/TranslationRecovery.h @@ -60,14 +60,18 @@ class GTSAM_EXPORT TranslationRecovery { // Parameters. LevenbergMarquardtParams lmParams_; + const bool use_bilinear_translation_factor_ = false; + public: /** * @brief Construct a new Translation Recovery object * * @param lmParams parameters for optimization. */ - TranslationRecovery(const LevenbergMarquardtParams &lmParams) - : lmParams_(lmParams) {} + TranslationRecovery(const LevenbergMarquardtParams &lmParams, + bool use_bilinear_translation_factor = false) + : lmParams_(lmParams), + use_bilinear_translation_factor_(use_bilinear_translation_factor) {} /** * @brief Default constructor. diff --git a/gtsam/sfm/sfm.i b/gtsam/sfm/sfm.i index ba25cf793..142e65d7e 100644 --- a/gtsam/sfm/sfm.i +++ b/gtsam/sfm/sfm.i @@ -23,7 +23,7 @@ virtual class SfmTrack : gtsam::SfmTrack2d { SfmTrack(); SfmTrack(const gtsam::Point3& pt); const Point3& point3() const; - + Point3 p; double r; @@ -37,8 +37,8 @@ virtual class SfmTrack : gtsam::SfmTrack2d { bool equals(const gtsam::SfmTrack& expected, double tol) const; }; -#include #include +#include #include class SfmData { SfmData(); @@ -268,6 +268,8 @@ class MFAS { #include class TranslationRecovery { + TranslationRecovery(const gtsam::LevenbergMarquardtParams& lmParams, + const bool use_bilinear_translation_factor); TranslationRecovery(const gtsam::LevenbergMarquardtParams& lmParams); TranslationRecovery(); // default params. void addPrior(const gtsam::BinaryMeasurementsUnit3& relativeTranslations, diff --git a/gtsam/sfm/tests/testTranslationFactor.cpp b/gtsam/sfm/tests/testTranslationFactor.cpp index 818f2bce5..b25745692 100644 --- a/gtsam/sfm/tests/testTranslationFactor.cpp +++ b/gtsam/sfm/tests/testTranslationFactor.cpp @@ -30,7 +30,7 @@ using namespace gtsam; static SharedNoiseModel model(noiseModel::Isotropic::Sigma(3, 0.05)); // Keys are deliberately *not* in sorted order to test that case. -static const Key kKey1(2), kKey2(1); +static const Key kKey1(2), kKey2(1), kEdgeKey(3); static const Unit3 kMeasured(1, 0, 0); /* ************************************************************************* */ @@ -99,6 +99,79 @@ TEST(TranslationFactor, Jacobian) { EXPECT(assert_equal(H2Expected, H2Actual, 1e-9)); } + +/* ************************************************************************* */ +TEST(BilinearAngleTranslationFactor, Constructor) { + BilinearAngleTranslationFactor factor(kKey1, kKey2, kEdgeKey, kMeasured, model); +} + +/* ************************************************************************* */ +TEST(BilinearAngleTranslationFactor, ZeroError) { + // Create a factor + BilinearAngleTranslationFactor factor(kKey1, kKey2, kEdgeKey, kMeasured, model); + + // Set the linearization + Point3 T1(1, 0, 0), T2(2, 0, 0); + Vector1 scale(1.0); + + // Use the factor to calculate the error + Vector actualError(factor.evaluateError(T1, T2, scale)); + + // Verify we get the expected error + Vector expected = (Vector3() << 0, 0, 0).finished(); + EXPECT(assert_equal(expected, actualError, 1e-9)); +} + +/* ************************************************************************* */ +TEST(BilinearAngleTranslationFactor, NonZeroError) { + // create a factor + BilinearAngleTranslationFactor factor(kKey1, kKey2, kEdgeKey, kMeasured, model); + + // set the linearization + Point3 T1(0, 1, 1), T2(0, 2, 2); + Vector1 scale(1.0 / sqrt(2)); + + // use the factor to calculate the error + Vector actualError(factor.evaluateError(T1, T2, scale)); + + // verify we get the expected error + Vector expected = (Vector3() << -1, 1 / sqrt(2), 1 / sqrt(2)).finished(); + EXPECT(assert_equal(expected, actualError, 1e-9)); +} + +/* ************************************************************************* */ +Vector bilinearAngleFactorError(const Point3 &T1, const Point3 &T2, const Vector1 &scale, + const BilinearAngleTranslationFactor &factor) { + return factor.evaluateError(T1, T2, scale); +} + +TEST(BilinearAngleTranslationFactor, Jacobian) { + // Create a factor + BilinearAngleTranslationFactor factor(kKey1, kKey2, kEdgeKey, kMeasured, model); + + // Set the linearization + Point3 T1(1, 0, 0), T2(2, 0, 0); + Vector1 scale(1.0); + + // Use the factor to calculate the Jacobians + Matrix H1Actual, H2Actual, H3Actual; + factor.evaluateError(T1, T2, scale, H1Actual, H2Actual, H3Actual); + + // Use numerical derivatives to calculate the Jacobians + Matrix H1Expected, H2Expected, H3Expected; + H1Expected = numericalDerivative11( + std::bind(&bilinearAngleFactorError, std::placeholders::_1, T2, scale, factor), T1); + H2Expected = numericalDerivative11( + std::bind(&bilinearAngleFactorError, T1, std::placeholders::_1, scale, factor), T2); + H3Expected = numericalDerivative11( + std::bind(&bilinearAngleFactorError, T1, T2, std::placeholders::_1, factor), scale); + + // Verify the Jacobians are correct + EXPECT(assert_equal(H1Expected, H1Actual, 1e-9)); + EXPECT(assert_equal(H2Expected, H2Actual, 1e-9)); + EXPECT(assert_equal(H3Expected, H3Actual, 1e-9)); +} + /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam/slam/README.md b/gtsam/slam/README.md index ae5edfdac..018126a66 100644 --- a/gtsam/slam/README.md +++ b/gtsam/slam/README.md @@ -63,6 +63,6 @@ A RegularJacobianFactor that uses some badly documented reduction on the Jacobia A RegularJacobianFactor that eliminates a point using sequential elimination. -### JacobianFactorQR +### JacobianFactorSVD -A RegularJacobianFactor that uses the "Nullspace Trick" by Mourikis et al. See the documentation in the file, which *is* well documented. \ No newline at end of file +A RegularJacobianFactor that uses the "Nullspace Trick" by Mourikis et al. See the documentation in the file, which *is* well documented. diff --git a/gtsam/slam/TriangulationFactor.h b/gtsam/slam/TriangulationFactor.h index 32aa590a6..3b8486a59 100644 --- a/gtsam/slam/TriangulationFactor.h +++ b/gtsam/slam/TriangulationFactor.h @@ -34,6 +34,8 @@ public: /// CAMERA type using Camera = CAMERA; + /// shorthand for measurement type, e.g. Point2 or StereoPoint2 + using Measurement = typename CAMERA::Measurement; protected: @@ -43,9 +45,6 @@ protected: /// shorthand for this class using This = TriangulationFactor; - /// shorthand for measurement type, e.g. Point2 or StereoPoint2 - using Measurement = typename CAMERA::Measurement; - // Keep a copy of measurement and calibration for I/O const CAMERA camera_; ///< CAMERA in which this landmark was seen const Measurement measured_; ///< 2D measurement diff --git a/gtsam/slam/slam.i b/gtsam/slam/slam.i index f054aaab4..97dcfcae7 100644 --- a/gtsam/slam/slam.i +++ b/gtsam/slam/slam.i @@ -347,11 +347,50 @@ virtual class FrobeniusBetweenFactor : gtsam::NoiseModelFactor { gtsam::Vector evaluateError(const T& R1, const T& R2); }; - + +#include +template +virtual class TriangulationFactor : gtsam::NoiseModelFactor { + TriangulationFactor(); + TriangulationFactor(const CAMERA& camera, const gtsam::This::Measurement& measured, + const gtsam::noiseModel::Base* model, gtsam::Key pointKey, + bool throwCheirality = false, + bool verboseCheirality = false); + + void print(const string& s = "", const gtsam::KeyFormatter& keyFormatter = + gtsam::DefaultKeyFormatter) const; + bool equals(const This& p, double tol = 1e-9) const; + + gtsam::Vector evaluateError(const gtsam::Point3& point) const; + + const gtsam::This::Measurement& measured() const; +}; +typedef gtsam::TriangulationFactor> + TriangulationFactorCal3_S2; +typedef gtsam::TriangulationFactor> + TriangulationFactorCal3DS2; +typedef gtsam::TriangulationFactor> + TriangulationFactorCal3Bundler; +typedef gtsam::TriangulationFactor> + TriangulationFactorCal3Fisheye; +typedef gtsam::TriangulationFactor> + TriangulationFactorCal3Unified; + +typedef gtsam::TriangulationFactor> + TriangulationFactorPoseCal3_S2; +typedef gtsam::TriangulationFactor> + TriangulationFactorPoseCal3DS2; +typedef gtsam::TriangulationFactor> + TriangulationFactorPoseCal3Bundler; +typedef gtsam::TriangulationFactor> + TriangulationFactorPoseCal3Fisheye; +typedef gtsam::TriangulationFactor> + TriangulationFactorPoseCal3Unified; + #include namespace lago { gtsam::Values initialize(const gtsam::NonlinearFactorGraph& graph, bool useOdometricPath = true); gtsam::Values initialize(const gtsam::NonlinearFactorGraph& graph, const gtsam::Values& initialGuess); } - + } // namespace gtsam diff --git a/gtsam/symbolic/SymbolicFactor-inst.h b/gtsam/symbolic/SymbolicFactor-inst.h index db0eb05ca..a5410aad5 100644 --- a/gtsam/symbolic/SymbolicFactor-inst.h +++ b/gtsam/symbolic/SymbolicFactor-inst.h @@ -42,7 +42,9 @@ namespace gtsam // Gather all keys KeySet allKeys; for(const std::shared_ptr& factor: factors) { - allKeys.insert(factor->begin(), factor->end()); + // Non-active factors are nullptr + if (factor) + allKeys.insert(factor->begin(), factor->end()); } // Check keys diff --git a/gtsam_unstable/gtsam_unstable.i b/gtsam_unstable/gtsam_unstable.i index 8566934dd..3beb845fb 100644 --- a/gtsam_unstable/gtsam_unstable.i +++ b/gtsam_unstable/gtsam_unstable.i @@ -671,6 +671,21 @@ class AHRS { //void print(string s) const; }; +#include +template +virtual class PartialPriorFactor : gtsam::NoiseModelFactor { + PartialPriorFactor(gtsam::Key key, size_t idx, double prior, + const gtsam::noiseModel::Base* model); + PartialPriorFactor(gtsam::Key key, const std::vector& indices, + const gtsam::Vector& prior, + const gtsam::noiseModel::Base* model); + + // enabling serialization functionality + void serialize() const; + + const gtsam::Vector& prior(); +}; + // Tectonic SAM Factors #include diff --git a/gtsam_unstable/slam/PartialPriorFactor.h b/gtsam_unstable/slam/PartialPriorFactor.h index b70fe8b19..7722e5d82 100644 --- a/gtsam_unstable/slam/PartialPriorFactor.h +++ b/gtsam_unstable/slam/PartialPriorFactor.h @@ -50,9 +50,6 @@ namespace gtsam { Vector prior_; ///< Measurement on tangent space parameters, in compressed form. std::vector indices_; ///< Indices of the measured tangent space parameters. - /** default constructor - only use for serialization */ - PartialPriorFactor() {} - /** * constructor with just minimum requirements for a factor - allows more * computation in the constructor. This should only be used by subclasses @@ -65,7 +62,8 @@ namespace gtsam { // Provide access to the Matrix& version of evaluateError: using Base::evaluateError; - ~PartialPriorFactor() override {} + /** default constructor - only use for serialization */ + PartialPriorFactor() {} /** Single Element Constructor: Prior on a single parameter at index 'idx' in the tangent vector.*/ PartialPriorFactor(Key key, size_t idx, double prior, const SharedNoiseModel& model) : @@ -85,6 +83,8 @@ namespace gtsam { assert(model->dim() == (size_t)prior.size()); } + ~PartialPriorFactor() override {} + /// @return a deep copy of this factor gtsam::NonlinearFactor::shared_ptr clone() const override { return std::static_pointer_cast( diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index ba55ac2af..9070d191b 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -8,7 +8,7 @@ endif() # Generate setup.py. file(READ "${PROJECT_SOURCE_DIR}/README.md" README_CONTENTS) configure_file(${PROJECT_PYTHON_SOURCE_DIR}/setup.py.in - ${GTSAM_PYTHON_BUILD_DIRECTORY}/setup.py) + ${GTSAM_PYTHON_BUILD_DIRECTORY}/setup.py) # Supply MANIFEST.in for older versions of Python file(COPY ${PROJECT_PYTHON_SOURCE_DIR}/MANIFEST.in @@ -18,22 +18,31 @@ set(WRAP_BUILD_TYPE_POSTFIXES ${GTSAM_BUILD_TYPE_POSTFIXES}) include(PybindWrap) +macro(SET_PYTHON_TARGET_PROPERTIES PYTHON_TARGET OUTPUT_NAME OUTPUT_DIRECTORY) + set_target_properties(${PYTHON_TARGET} PROPERTIES + INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib" + INSTALL_RPATH_USE_LINK_PATH TRUE + OUTPUT_NAME "${OUTPUT_NAME}" + LIBRARY_OUTPUT_DIRECTORY "${OUTPUT_DIRECTORY}" + DEBUG_POSTFIX "" # Otherwise you will have a wrong name + RELWITHDEBINFO_POSTFIX "" # Otherwise you will have a wrong name + TIMING_POSTFIX "" # Otherwise you will have a wrong name + PROFILING_POSTFIX "" # Otherwise you will have a wrong name + ) +endmacro() + ############################################################ ## Load the necessary files to compile the wrapper # Load the pybind11 code - # This is required to avoid an error in modern pybind11 cmake scripts: if(POLICY CMP0057) - cmake_policy(SET CMP0057 NEW) + cmake_policy(SET CMP0057 NEW) endif() -# Prefer system pybind11 first, if not found, rely on bundled version: -find_package(pybind11 CONFIG QUIET) -if (NOT pybind11_FOUND) - add_subdirectory(${PROJECT_SOURCE_DIR}/wrap/pybind11 pybind11) -endif() +# Use bundled pybind11 version +add_subdirectory(${PROJECT_SOURCE_DIR}/wrap/pybind11 pybind11) # Set the wrapping script variable set(PYBIND_WRAP_SCRIPT "${PROJECT_SOURCE_DIR}/wrap/scripts/pybind_wrap.py") @@ -114,14 +123,7 @@ pybind_wrap(${GTSAM_PYTHON_TARGET} # target ${GTSAM_ENABLE_BOOST_SERIALIZATION} # use_boost_serialization ) -set_target_properties(${GTSAM_PYTHON_TARGET} PROPERTIES - INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib" - INSTALL_RPATH_USE_LINK_PATH TRUE - OUTPUT_NAME "${GTSAM_OUTPUT_NAME}" - LIBRARY_OUTPUT_DIRECTORY "${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam" - DEBUG_POSTFIX "" # Otherwise you will have a wrong name - RELWITHDEBINFO_POSTFIX "" # Otherwise you will have a wrong name - ) +SET_PYTHON_TARGET_PROPERTIES(${GTSAM_PYTHON_TARGET} ${GTSAM_OUTPUT_NAME} "${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam") if(WIN32) set_target_properties(${GTSAM_PYTHON_TARGET} PROPERTIES @@ -130,14 +132,14 @@ if(WIN32) ADD_CUSTOM_COMMAND(TARGET ${GTSAM_PYTHON_TARGET} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_if_different "${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam/${GTSAM_OUTPUT_NAME}.pyd" - "${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam/gtsam.pyd" + "${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam/gtsam.pyd" ) ADD_CUSTOM_COMMAND(TARGET ${GTSAM_PYTHON_TARGET} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_if_different "$;$" - "${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam/" + "${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam/" COMMAND_EXPAND_LISTS - VERBATIM + VERBATIM ) endif() @@ -174,6 +176,7 @@ file(COPY "${GTSAM_SOURCE_DIR}/examples/Data" DESTINATION "${GTSAM_MODULE_PATH}" # Add gtsam as a dependency to the install target set(GTSAM_PYTHON_DEPENDENCIES ${GTSAM_PYTHON_TARGET}) +set(GTSAM_PYTHON_INSTALL_EXTRA "") if(GTSAM_UNSTABLE_BUILD_PYTHON) set(ignore @@ -189,6 +192,8 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON) gtsam::BinaryMeasurementsPoint3 gtsam::BinaryMeasurementsUnit3 gtsam::BinaryMeasurementsRot3 + gtsam::SimWall2DVector + gtsam::SimPolygon2DVector gtsam::CameraSetCal3_S2 gtsam::CameraSetCal3Bundler gtsam::CameraSetCal3Unified @@ -198,7 +203,6 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON) gtsam::gtsfm::KeypointsVector gtsam::gtsfm::SfmTrack2dVector) - pybind_wrap(${GTSAM_PYTHON_UNSTABLE_TARGET} # target ${PROJECT_SOURCE_DIR}/gtsam_unstable/gtsam_unstable.i # interface_header "gtsam_unstable.cpp" # generated_cpp @@ -211,20 +215,13 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON) ${GTSAM_ENABLE_BOOST_SERIALIZATION} # use_boost_serialization ) - set_target_properties(${GTSAM_PYTHON_UNSTABLE_TARGET} PROPERTIES - INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib" - INSTALL_RPATH_USE_LINK_PATH TRUE - OUTPUT_NAME "${GTSAM_UNSTABLE_OUTPUT_NAME}" - LIBRARY_OUTPUT_DIRECTORY "${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam_unstable" - DEBUG_POSTFIX "" # Otherwise you will have a wrong name - RELWITHDEBINFO_POSTFIX "" # Otherwise you will have a wrong name - ) + SET_PYTHON_TARGET_PROPERTIES(${GTSAM_PYTHON_UNSTABLE_TARGET} ${GTSAM_UNSTABLE_OUTPUT_NAME} "${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam_unstable") set(GTSAM_UNSTABLE_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam_unstable) # Copy all python files to build folder. copy_directory("${CMAKE_CURRENT_SOURCE_DIR}/gtsam_unstable" - "${GTSAM_UNSTABLE_MODULE_PATH}") + "${GTSAM_UNSTABLE_MODULE_PATH}") # Hack to get python test files copied every time they are modified file(GLOB GTSAM_UNSTABLE_PYTHON_TEST_FILES RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_unstable/" "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_unstable/tests/*.py") @@ -237,7 +234,7 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON) if(WIN32) set_target_properties(${GTSAM_PYTHON_UNSTABLE_TARGET} PROPERTIES SUFFIX ".pyd" - ) + ) ADD_CUSTOM_COMMAND(TARGET ${GTSAM_PYTHON_UNSTABLE_TARGET} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_if_different "${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam_unstable/${GTSAM_UNSTABLE_OUTPUT_NAME}.pyd" @@ -248,9 +245,25 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON) "$;$" "${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam_unstable/" COMMAND_EXPAND_LISTS - VERBATIM + VERBATIM ) endif() + + add_custom_target( + python-unstable-stubs + COMMAND + ${CMAKE_COMMAND} -E env + "PYTHONPATH=${GTSAM_PYTHON_BUILD_DIRECTORY}/$ENV{PYTHONPATH}" + pybind11-stubgen -o . --enum-class-locations \"KernelFunctionType|NoiseFormat:gtsam.gtsam\" --enum-class-locations \"OrderingType:gtsam.gtsam.Ordering\" --numpy-array-use-type-var --ignore-all-errors gtsam_unstable + DEPENDS ${GTSAM_PYTHON_DEPENDENCIES} ${GTSAM_PYTHON_TEST_FILES} ${GTSAM_PYTHON_UNSTABLE_TARGET} + WORKING_DIRECTORY "${GTSAM_PYTHON_BUILD_DIRECTORY}/" + ) + + if(NOT WIN32) + # Add the stubgen target as a dependency to the install target + list(APPEND GTSAM_PYTHON_INSTALL_EXTRA python-unstable-stubs) + endif() + # Custom make command to run all GTSAM_UNSTABLE Python tests add_custom_target( python-test-unstable @@ -263,12 +276,30 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON) ) endif() +add_custom_target( + python-stubs + COMMAND + ${CMAKE_COMMAND} -E env + "PYTHONPATH=${GTSAM_PYTHON_BUILD_DIRECTORY}/$ENV{PYTHONPATH}" + pybind11-stubgen -o . --enum-class-locations \"KernelFunctionType|NoiseFormat:gtsam.gtsam\" --enum-class-locations \"OrderingType:gtsam.gtsam.Ordering\" --numpy-array-use-type-var --ignore-all-errors gtsam + DEPENDS ${GTSAM_PYTHON_DEPENDENCIES} ${GTSAM_PYTHON_TEST_FILES} ${GTSAM_PYTHON_TARGET} + WORKING_DIRECTORY "${GTSAM_PYTHON_BUILD_DIRECTORY}/" +) + +if(NOT WIN32) + # Add the stubgen target as a dependency to the install target + list(APPEND GTSAM_PYTHON_INSTALL_EXTRA python-stubs) +endif() + # Add custom target so we can install with `make python-install` +# Note below we make sure to install with --user iff not in a virtualenv set(GTSAM_PYTHON_INSTALL_TARGET python-install) + add_custom_target(${GTSAM_PYTHON_INSTALL_TARGET} - COMMAND ${PYTHON_EXECUTABLE} -m pip install --user . - DEPENDS ${GTSAM_PYTHON_DEPENDENCIES} - WORKING_DIRECTORY ${GTSAM_PYTHON_BUILD_DIRECTORY}) + COMMAND ${PYTHON_EXECUTABLE} -c "import sys, subprocess; cmd = [sys.executable, '-m', 'pip', 'install']; has_venv = hasattr(sys, 'real_prefix') or (hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix); cmd.append('--user' if not has_venv else ''); cmd.append('.'); subprocess.check_call([c for c in cmd if c])" + DEPENDS ${GTSAM_PYTHON_DEPENDENCIES} ${GTSAM_PYTHON_INSTALL_EXTRA} + WORKING_DIRECTORY ${GTSAM_PYTHON_BUILD_DIRECTORY} + VERBATIM) # Custom make command to run all GTSAM Python tests add_custom_target( diff --git a/python/CustomFactors.md b/python/CustomFactors.md index 39a840e34..0a387bb4f 100644 --- a/python/CustomFactors.md +++ b/python/CustomFactors.md @@ -18,12 +18,14 @@ def error_func(this: gtsam.CustomFactor, v: gtsam.Values, H: List[np.ndarray]) - `this` is a reference to the `CustomFactor` object. This is required because one can reuse the same `error_func` for multiple factors. `v` is a reference to the current set of values, and `H` is a list of **references** to the list of required Jacobians (see the corresponding C++ documentation). Note that -the error returned must be a 1D numpy array. +the error returned must be a 1D `numpy` array. If `H` is `None`, it means the current factor evaluation does not need Jacobians. For example, the `error` method on a factor does not need Jacobians, so we don't evaluate them to save CPU. If `H` is not `None`, each entry of `H` can be assigned a (2D) `numpy` array, as the Jacobian for the corresponding variable. +All `numpy` matrices inside should be using `order="F"` to maintain interoperability with C++. + After defining `error_func`, one can create a `CustomFactor` just like any other factor in GTSAM: ```python diff --git a/python/dev_requirements.txt b/python/dev_requirements.txt index 6970ee613..94ab40189 100644 --- a/python/dev_requirements.txt +++ b/python/dev_requirements.txt @@ -1,2 +1,3 @@ -r requirements.txt -pyparsing>=2.4.2 \ No newline at end of file +pyparsing>=2.4.2 +pybind11-stubgen>=2.5.1 \ No newline at end of file diff --git a/python/gtsam/examples/CameraResectioning.py b/python/gtsam/examples/CameraResectioning.py new file mode 100644 index 000000000..e962b40bb --- /dev/null +++ b/python/gtsam/examples/CameraResectioning.py @@ -0,0 +1,85 @@ +# pylint: disable=consider-using-from-import,invalid-name,no-name-in-module,no-member,missing-function-docstring +""" +This is a 1:1 transcription of CameraResectioning.cpp. +""" +import numpy as np +from gtsam import Cal3_S2, CustomFactor, LevenbergMarquardtOptimizer, KeyVector +from gtsam import NonlinearFactor, NonlinearFactorGraph +from gtsam import PinholeCameraCal3_S2, Point2, Point3, Pose3, Rot3, Values +from gtsam.noiseModel import Base as SharedNoiseModel, Diagonal +from gtsam.symbol_shorthand import X + + +def resectioning_factor( + model: SharedNoiseModel, + key: int, + calib: Cal3_S2, + p: Point2, + P: Point3, +) -> NonlinearFactor: + + def error_func(this: CustomFactor, v: Values, H: list[np.ndarray]) -> np.ndarray: + pose = v.atPose3(this.keys()[0]) + camera = PinholeCameraCal3_S2(pose, calib) + if H is None: + return camera.project(P) - p + Dpose = np.zeros((2, 6), order="F") + Dpoint = np.zeros((2, 3), order="F") + Dcal = np.zeros((2, 5), order="F") + result = camera.project(P, Dpose, Dpoint, Dcal) - p + H[0] = Dpose + return result + + return CustomFactor(model, KeyVector([key]), error_func) + + +def main() -> None: + """ + Camera: f = 1, Image: 100x100, center: 50, 50.0 + Pose (ground truth): (Xw, -Yw, -Zw, [0,0,2.0]') + Known landmarks: + 3D Points: (10,10,0) (-10,10,0) (-10,-10,0) (10,-10,0) + Perfect measurements: + 2D Point: (55,45) (45,45) (45,55) (55,55) + """ + + # read camera intrinsic parameters + calib = Cal3_S2(1, 1, 0, 50, 50) + + # 1. create graph + graph = NonlinearFactorGraph() + + # 2. add factors to the graph + measurement_noise = Diagonal.Sigmas(np.array([0.5, 0.5])) + graph.add( + resectioning_factor( + measurement_noise, X(1), calib, Point2(55, 45), Point3(10, 10, 0) + ) + ) + graph.add( + resectioning_factor( + measurement_noise, X(1), calib, Point2(45, 45), Point3(-10, 10, 0) + ) + ) + graph.add( + resectioning_factor( + measurement_noise, X(1), calib, Point2(45, 55), Point3(-10, -10, 0) + ) + ) + graph.add( + resectioning_factor( + measurement_noise, X(1), calib, Point2(55, 55), Point3(10, -10, 0) + ) + ) + + # 3. Create an initial estimate for the camera pose + initial: Values = Values() + initial.insert(X(1), Pose3(Rot3(1, 0, 0, 0, -1, 0, 0, 0, -1), Point3(0, 0, 1))) + + # 4. Optimize the graph using Levenberg-Marquardt + result: Values = LevenbergMarquardtOptimizer(graph, initial).optimize() + result.print("Final result:\n") + + +if __name__ == "__main__": + main() diff --git a/python/gtsam/examples/README.md b/python/gtsam/examples/README.md index 2a2c9f2ef..70f6cfcb0 100644 --- a/python/gtsam/examples/README.md +++ b/python/gtsam/examples/README.md @@ -17,7 +17,7 @@ | InverseKinematicsExampleExpressions.cpp | | | ISAM2Example_SmartFactor | | | ISAM2_SmartFactorStereo_IMU | | -| LocalizationExample | impossible? | +| LocalizationExample | :heavy_check_mark: | | METISOrderingExample | | | OdometryExample | :heavy_check_mark: | | PlanarSLAMExample | :heavy_check_mark: | diff --git a/python/gtsam/examples/SelfCalibrationExample.py b/python/gtsam/examples/SelfCalibrationExample.py new file mode 100644 index 000000000..a1919e14e --- /dev/null +++ b/python/gtsam/examples/SelfCalibrationExample.py @@ -0,0 +1,122 @@ +# pylint: disable=unused-import,consider-using-from-import,invalid-name,no-name-in-module,no-member,missing-function-docstring,too-many-locals +""" +Transcription of SelfCalibrationExample.cpp +""" +import math + +from gtsam import Cal3_S2 +from gtsam.noiseModel import Diagonal, Isotropic + +# SFM-specific factors +from gtsam import GeneralSFMFactor2Cal3_S2 # does calibration ! +from gtsam import PinholeCameraCal3_S2 + +# Camera observations of landmarks (i.e. pixel coordinates) will be stored as Point2 (x, y). +from gtsam import Point2 +from gtsam import Point3, Pose3, Rot3 + +# Inference and optimization +from gtsam import NonlinearFactorGraph, DoglegOptimizer, Values +from gtsam.symbol_shorthand import K, L, X + + +# this is a direct translation of examples/SFMData.h +# which is slightly different from python/gtsam/examples/SFMdata.py. +def createPoints() -> list[Point3]: + """ + Create the set of ground-truth landmarks + """ + return [ + Point3(10.0, 10.0, 10.0), + Point3(-10.0, 10.0, 10.0), + Point3(-10.0, -10.0, 10.0), + Point3(10.0, -10.0, 10.0), + Point3(10.0, 10.0, -10.0), + Point3(-10.0, 10.0, -10.0), + Point3(-10.0, -10.0, -10.0), + Point3(10.0, -10.0, -10.0), + ] + + +def createPoses( + init: Pose3 = Pose3(Rot3.Ypr(math.pi / 2, 0, -math.pi / 2), Point3(30, 0, 0)), + delta: Pose3 = Pose3( + Rot3.Ypr(0, -math.pi / 4, 0), + Point3(math.sin(math.pi / 4) * 30, 0, 30 * (1 - math.sin(math.pi / 4))), + ), + steps: int = 8, +) -> list[Pose3]: + """ + Create the set of ground-truth poses + Default values give a circular trajectory, + radius 30 at pi/4 intervals, always facing the circle center + """ + poses: list[Pose3] = [] + poses.append(init) + for i in range(1, steps): + poses.append(poses[i - 1].compose(delta)) + return poses + + +def main() -> None: + # Create the set of ground-truth + points: list[Point3] = createPoints() + poses: list[Pose3] = createPoses() + + # Create the factor graph + graph = NonlinearFactorGraph() + + # Add a prior on pose x1. + # 30cm std on x,y,z 0.1 rad on roll,pitch,yaw + poseNoise = Diagonal.Sigmas([0.1, 0.1, 0.1, 0.3, 0.3, 0.3]) + graph.addPriorPose3(X(0), poses[0], poseNoise) + + # Simulated measurements from each camera pose, adding them to the factor graph + Kcal = Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0) + measurementNoise = Isotropic.Sigma(2, 1.0) + for i, pose in enumerate(poses): + for j, point in enumerate(points): + camera = PinholeCameraCal3_S2(pose, Kcal) + measurement: Point2 = camera.project(point) + # The only real difference with the Visual SLAM example is that here we + # use a different factor type, that also calculates the Jacobian with + # respect to calibration + graph.add( + GeneralSFMFactor2Cal3_S2( + measurement, + measurementNoise, + X(i), + L(j), + K(0), + ) + ) + + # Add a prior on the position of the first landmark. + pointNoise = Isotropic.Sigma(3, 0.1) + graph.addPriorPoint3(L(0), points[0], pointNoise) # add directly to graph + + # Add a prior on the calibration. + calNoise = Diagonal.Sigmas([500, 500, 0.1, 100, 100]) + graph.addPriorCal3_S2(K(0), Kcal, calNoise) + + # Create the initial estimate to the solution + # now including an estimate on the camera calibration parameters + initialEstimate = Values() + initialEstimate.insert(K(0), Cal3_S2(60.0, 60.0, 0.0, 45.0, 45.0)) + for i, pose in enumerate(poses): + initialEstimate.insert( + X(i), + pose.compose( + Pose3(Rot3.Rodrigues(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20)) + ), + ) + for j, point in enumerate(points): + initialEstimate.insert(L(j), point + Point3(-0.25, 0.20, 0.15)) + + # Optimize the graph and print results + result: Values = DoglegOptimizer(graph, initialEstimate).optimize() + result.print("Final results:\n") + + +if __name__ == "__main__": + main() diff --git a/python/gtsam/gtsam.tpl b/python/gtsam/gtsam.tpl index c72a216a2..6876b4ab4 100644 --- a/python/gtsam/gtsam.tpl +++ b/python/gtsam/gtsam.tpl @@ -39,12 +39,12 @@ namespace py = pybind11; {module_def} {{ m_.doc() = "pybind11 wrapper of {module_name}"; +// Specializations for STL classes +#include "python/gtsam/specializations/{module_name}.h" + {submodules_init} {wrapped_namespace} -// Specializations for STL classes -#include "python/gtsam/specializations/{module_name}.h" - }} diff --git a/python/gtsam/tests/test_HybridBayesNet.py b/python/gtsam/tests/test_HybridBayesNet.py index bc685dec6..57346d4d4 100644 --- a/python/gtsam/tests/test_HybridBayesNet.py +++ b/python/gtsam/tests/test_HybridBayesNet.py @@ -17,9 +17,10 @@ import numpy as np from gtsam.symbol_shorthand import A, X from gtsam.utils.test_case import GtsamTestCase -from gtsam import (DiscreteConditional, DiscreteKeys, DiscreteValues, - GaussianConditional, GaussianMixture, HybridBayesNet, - HybridValues, VectorValues, noiseModel) +from gtsam import (DiscreteConditional, DiscreteValues, + GaussianConditional, HybridBayesNet, + HybridGaussianConditional, HybridValues, VectorValues, + noiseModel) class TestHybridBayesNet(GtsamTestCase): @@ -42,15 +43,12 @@ class TestHybridBayesNet(GtsamTestCase): # Create the conditionals conditional0 = GaussianConditional(X(1), [5], I_1x1, model0) conditional1 = GaussianConditional(X(1), [2], I_1x1, model1) - discrete_keys = DiscreteKeys() - discrete_keys.push_back(Asia) # Create hybrid Bayes net. bayesNet = HybridBayesNet() bayesNet.push_back(conditional) bayesNet.push_back( - GaussianMixture([X(1)], [], discrete_keys, - [conditional0, conditional1])) + HybridGaussianConditional(Asia, [conditional0, conditional1])) bayesNet.push_back(DiscreteConditional(Asia, "99/1")) # Create values at which to evaluate. @@ -91,8 +89,7 @@ class TestHybridBayesNet(GtsamTestCase): self.assertTrue(probability >= 0.0) logProb = conditional.logProbability(values) self.assertAlmostEqual(probability, np.exp(logProb)) - expected = conditional.logNormalizationConstant() - \ - conditional.error(values) + expected = -(conditional.negLogConstant() + conditional.error(values)) self.assertAlmostEqual(logProb, expected) diff --git a/python/gtsam/tests/test_HybridFactorGraph.py b/python/gtsam/tests/test_HybridFactorGraph.py index 499afe09f..6d609deb0 100644 --- a/python/gtsam/tests/test_HybridFactorGraph.py +++ b/python/gtsam/tests/test_HybridFactorGraph.py @@ -17,26 +17,25 @@ from gtsam.symbol_shorthand import C, M, X, Z from gtsam.utils.test_case import GtsamTestCase import gtsam -from gtsam import (DiscreteConditional, DiscreteKeys, GaussianConditional, - GaussianMixture, GaussianMixtureFactor, HybridBayesNet, - HybridGaussianFactorGraph, HybridValues, JacobianFactor, - Ordering, noiseModel) +from gtsam import (DiscreteConditional, GaussianConditional, + HybridBayesNet, HybridGaussianConditional, + HybridGaussianFactor, HybridGaussianFactorGraph, + HybridValues, JacobianFactor, noiseModel) DEBUG_MARGINALS = False class TestHybridGaussianFactorGraph(GtsamTestCase): """Unit tests for HybridGaussianFactorGraph.""" + def test_create(self): """Test construction of hybrid factor graph.""" model = noiseModel.Unit.Create(3) - dk = DiscreteKeys() - dk.push_back((C(0), 2)) jf1 = JacobianFactor(X(0), np.eye(3), np.zeros((3, 1)), model) jf2 = JacobianFactor(X(0), np.eye(3), np.ones((3, 1)), model) - gmf = GaussianMixtureFactor([X(0)], dk, [jf1, jf2]) + gmf = HybridGaussianFactor((C(0), 2), [(jf1, 0), (jf2, 0)]) hfg = HybridGaussianFactorGraph() hfg.push_back(jf1) @@ -47,9 +46,9 @@ class TestHybridGaussianFactorGraph(GtsamTestCase): self.assertEqual(hbn.size(), 2) - mixture = hbn.at(0).inner() - self.assertIsInstance(mixture, GaussianMixture) - self.assertEqual(len(mixture.keys()), 2) + hybridCond = hbn.at(0).inner() + self.assertIsInstance(hybridCond, HybridGaussianConditional) + self.assertEqual(len(hybridCond.keys()), 2) discrete_conditional = hbn.at(hbn.size() - 1).inner() self.assertIsInstance(discrete_conditional, DiscreteConditional) @@ -57,13 +56,11 @@ class TestHybridGaussianFactorGraph(GtsamTestCase): def test_optimize(self): """Test construction of hybrid factor graph.""" model = noiseModel.Unit.Create(3) - dk = DiscreteKeys() - dk.push_back((C(0), 2)) jf1 = JacobianFactor(X(0), np.eye(3), np.zeros((3, 1)), model) jf2 = JacobianFactor(X(0), np.eye(3), np.ones((3, 1)), model) - gmf = GaussianMixtureFactor([X(0)], dk, [jf1, jf2]) + gmf = HybridGaussianFactor((C(0), 2), [(jf1, 0), (jf2, 0)]) hfg = HybridGaussianFactorGraph() hfg.push_back(jf1) @@ -93,10 +90,8 @@ class TestHybridGaussianFactorGraph(GtsamTestCase): # Create mode key: 0 is low-noise, 1 is high-noise. mode = (M(0), 2) - # Create Gaussian mixture Z(0) = X(0) + noise for each measurement. + # Create hybrid Gaussian conditional Z(0) = X(0) + noise for each measurement. I_1x1 = np.eye(1) - keys = DiscreteKeys() - keys.push_back(mode) for i in range(num_measurements): conditional0 = GaussianConditional.FromMeanAndStddev(Z(i), I_1x1, @@ -106,8 +101,8 @@ class TestHybridGaussianFactorGraph(GtsamTestCase): I_1x1, X(0), [0], sigma=3) - bayesNet.push_back(GaussianMixture([Z(i)], [X(0)], keys, - [conditional0, conditional1])) + bayesNet.push_back( + HybridGaussianConditional(mode, [conditional0, conditional1])) # Create prior on X(0). prior_on_x0 = GaussianConditional.FromMeanAndStddev( @@ -219,9 +214,9 @@ class TestHybridGaussianFactorGraph(GtsamTestCase): # Check ratio between unnormalized posterior and factor graph is the same for all modes: for mode in [1, 0]: values.insert_or_assign(M(0), mode) - self.assertAlmostEqual(bayesNet.evaluate(values) / - np.exp(-fg.error(values)), - 0.6366197723675815) + self.assertAlmostEqual( + bayesNet.evaluate(values) / np.exp(-fg.error(values)), + 0.6366197723675815) self.assertAlmostEqual(bayesNet.error(values), fg.error(values)) # Test elimination. diff --git a/python/gtsam/tests/test_HybridNonlinearFactorGraph.py b/python/gtsam/tests/test_HybridNonlinearFactorGraph.py index ed26a0c81..1880c18f2 100644 --- a/python/gtsam/tests/test_HybridNonlinearFactorGraph.py +++ b/python/gtsam/tests/test_HybridNonlinearFactorGraph.py @@ -14,33 +14,31 @@ from __future__ import print_function import unittest -import numpy as np -from gtsam.symbol_shorthand import C, X from gtsam.utils.test_case import GtsamTestCase -from gtsam import BetweenFactorPoint3, noiseModel, PriorFactorPoint3, Point3 import gtsam +from gtsam import BetweenFactorPoint3, Point3, PriorFactorPoint3, noiseModel class TestHybridGaussianFactorGraph(GtsamTestCase): """Unit tests for HybridGaussianFactorGraph.""" + def test_nonlinear_hybrid(self): nlfg = gtsam.HybridNonlinearFactorGraph() - dk = gtsam.DiscreteKeys() - dk.push_back((10, 2)) - nlfg.push_back(BetweenFactorPoint3(1, 2, Point3( - 1, 2, 3), noiseModel.Diagonal.Variances([1, 1, 1]))) + nlfg.push_back( + BetweenFactorPoint3(1, 2, Point3(1, 2, 3), + noiseModel.Diagonal.Variances([1, 1, 1]))) nlfg.push_back( PriorFactorPoint3(2, Point3(1, 2, 3), noiseModel.Diagonal.Variances([0.5, 0.5, 0.5]))) - nlfg.push_back( - gtsam.MixtureFactor([1], dk, [ - PriorFactorPoint3(1, Point3(0, 0, 0), - noiseModel.Unit.Create(3)), - PriorFactorPoint3(1, Point3(1, 2, 1), - noiseModel.Unit.Create(3)) - ])) - nlfg.push_back(gtsam.DecisionTreeFactor((10, 2), "1 3")) + + factors = [(PriorFactorPoint3(1, Point3(0, 0, 0), + noiseModel.Unit.Create(3)), 0.0), + (PriorFactorPoint3(1, Point3(1, 2, 1), + noiseModel.Unit.Create(3)), 0.0)] + mode = (10, 2) + nlfg.push_back(gtsam.HybridNonlinearFactor(mode, factors)) + nlfg.push_back(gtsam.DecisionTreeFactor(mode, "1 3")) values = gtsam.Values() values.insert_point3(1, Point3(0, 0, 0)) values.insert_point3(2, Point3(2, 3, 1)) diff --git a/python/gtsam/tests/test_HybridValues.py b/python/gtsam/tests/test_HybridValues.py index 8a54d518c..73c27a4cd 100644 --- a/python/gtsam/tests/test_HybridValues.py +++ b/python/gtsam/tests/test_HybridValues.py @@ -14,11 +14,12 @@ from __future__ import print_function import unittest -import gtsam import numpy as np from gtsam.symbol_shorthand import C, X from gtsam.utils.test_case import GtsamTestCase +import gtsam + class TestHybridValues(GtsamTestCase): """Unit tests for HybridValues.""" diff --git a/python/gtsam/tests/test_NonlinearOptimizer.py b/python/gtsam/tests/test_NonlinearOptimizer.py index a47c5ad62..1b4209509 100644 --- a/python/gtsam/tests/test_NonlinearOptimizer.py +++ b/python/gtsam/tests/test_NonlinearOptimizer.py @@ -14,15 +14,16 @@ from __future__ import print_function import unittest -import gtsam -from gtsam import ( - DoglegOptimizer, DoglegParams, DummyPreconditionerParameters, GaussNewtonOptimizer, - GaussNewtonParams, GncLMParams, GncLossType, GncLMOptimizer, LevenbergMarquardtOptimizer, - LevenbergMarquardtParams, NonlinearFactorGraph, Ordering, PCGSolverParameters, Point2, - PriorFactorPoint2, Values -) from gtsam.utils.test_case import GtsamTestCase +import gtsam +from gtsam import (DoglegOptimizer, DoglegParams, + DummyPreconditionerParameters, GaussNewtonOptimizer, + GaussNewtonParams, GncLMOptimizer, GncLMParams, GncLossType, + LevenbergMarquardtOptimizer, LevenbergMarquardtParams, + NonlinearFactorGraph, Ordering, PCGSolverParameters, Point2, + PriorFactorPoint2, Values) + KEY1 = 1 KEY2 = 2 @@ -136,7 +137,7 @@ class TestScenario(GtsamTestCase): # Test optimizer params optimizer = GncLMOptimizer(self.fg, self.initial_values, params) for ict_factor in (0.9, 1.1): - new_ict = ict_factor * optimizer.getInlierCostThresholds() + new_ict = ict_factor * optimizer.getInlierCostThresholds().item() optimizer.setInlierCostThresholds(new_ict) self.assertAlmostEqual(optimizer.getInlierCostThresholds(), new_ict) for w_factor in (0.8, 0.9): diff --git a/python/gtsam/tests/test_Pose3.py b/python/gtsam/tests/test_Pose3.py index 32b3ad47f..86c5bc9a4 100644 --- a/python/gtsam/tests/test_Pose3.py +++ b/python/gtsam/tests/test_Pose3.py @@ -17,63 +17,7 @@ from gtsam.utils.test_case import GtsamTestCase import gtsam from gtsam import Point3, Pose3, Rot3 - - -def numerical_derivative_pose(pose, method, delta=1e-5): - jacobian = np.zeros((6, 6)) - for idx in range(6): - xplus = np.zeros(6) - xplus[idx] = delta - xminus = np.zeros(6) - xminus[idx] = -delta - pose_plus = pose.retract(xplus).__getattribute__(method)() - pose_minus = pose.retract(xminus).__getattribute__(method)() - jacobian[:, idx] = pose_minus.localCoordinates(pose_plus) / (2 * delta) - return jacobian - - -def numerical_derivative_2_poses(pose, other_pose, method, delta=1e-5, inputs=()): - jacobian = np.zeros((6, 6)) - other_jacobian = np.zeros((6, 6)) - for idx in range(6): - xplus = np.zeros(6) - xplus[idx] = delta - xminus = np.zeros(6) - xminus[idx] = -delta - - pose_plus = pose.retract(xplus).__getattribute__(method)(*inputs, other_pose) - pose_minus = pose.retract(xminus).__getattribute__(method)(*inputs, other_pose) - jacobian[:, idx] = pose_minus.localCoordinates(pose_plus) / (2 * delta) - - other_pose_plus = pose.__getattribute__(method)(*inputs, other_pose.retract(xplus)) - other_pose_minus = pose.__getattribute__(method)(*inputs, other_pose.retract(xminus)) - other_jacobian[:, idx] = other_pose_minus.localCoordinates(other_pose_plus) / (2 * delta) - return jacobian, other_jacobian - - -def numerical_derivative_pose_point(pose, point, method, delta=1e-5): - jacobian = np.zeros((3, 6)) - point_jacobian = np.zeros((3, 3)) - for idx in range(6): - xplus = np.zeros(6) - xplus[idx] = delta - xminus = np.zeros(6) - xminus[idx] = -delta - - point_plus = pose.retract(xplus).__getattribute__(method)(point) - point_minus = pose.retract(xminus).__getattribute__(method)(point) - jacobian[:, idx] = (point_plus - point_minus) / (2 * delta) - - if idx < 3: - xplus = np.zeros(3) - xplus[idx] = delta - xminus = np.zeros(3) - xminus[idx] = -delta - point_plus = pose.__getattribute__(method)(point + xplus) - point_minus = pose.__getattribute__(method)(point + xminus) - point_jacobian[:, idx] = (point_plus - point_minus) / (2 * delta) - return jacobian, point_jacobian - +from gtsam.utils.numerical_derivative import numericalDerivative11, numericalDerivative21, numericalDerivative22 class TestPose3(GtsamTestCase): """Test selected Pose3 methods.""" @@ -90,7 +34,8 @@ class TestPose3(GtsamTestCase): jacobian = np.zeros((6, 6), order='F') jacobian_other = np.zeros((6, 6), order='F') T2.between(T3, jacobian, jacobian_other) - jacobian_numerical, jacobian_numerical_other = numerical_derivative_2_poses(T2, T3, 'between') + jacobian_numerical = numericalDerivative21(Pose3.between, T2, T3) + jacobian_numerical_other = numericalDerivative22(Pose3.between, T2, T3) self.gtsamAssertEquals(jacobian, jacobian_numerical) self.gtsamAssertEquals(jacobian_other, jacobian_numerical_other) @@ -104,7 +49,7 @@ class TestPose3(GtsamTestCase): #test jacobians jacobian = np.zeros((6, 6), order='F') pose.inverse(jacobian) - jacobian_numerical = numerical_derivative_pose(pose, 'inverse') + jacobian_numerical = numericalDerivative11(Pose3.inverse, pose) self.gtsamAssertEquals(jacobian, jacobian_numerical) def test_slerp(self): @@ -123,7 +68,8 @@ class TestPose3(GtsamTestCase): jacobian = np.zeros((6, 6), order='F') jacobian_other = np.zeros((6, 6), order='F') pose0.slerp(0.5, pose1, jacobian, jacobian_other) - jacobian_numerical, jacobian_numerical_other = numerical_derivative_2_poses(pose0, pose1, 'slerp', inputs=[0.5]) + jacobian_numerical = numericalDerivative11(lambda x: x.slerp(0.5, pose1), pose0) + jacobian_numerical_other = numericalDerivative11(lambda x: pose0.slerp(0.5, x), pose1) self.gtsamAssertEquals(jacobian, jacobian_numerical) self.gtsamAssertEquals(jacobian_other, jacobian_numerical_other) @@ -139,7 +85,8 @@ class TestPose3(GtsamTestCase): jacobian_pose = np.zeros((3, 6), order='F') jacobian_point = np.zeros((3, 3), order='F') pose.transformTo(point, jacobian_pose, jacobian_point) - jacobian_numerical_pose, jacobian_numerical_point = numerical_derivative_pose_point(pose, point, 'transformTo') + jacobian_numerical_pose = numericalDerivative21(Pose3.transformTo, pose, point) + jacobian_numerical_point = numericalDerivative22(Pose3.transformTo, pose, point) self.gtsamAssertEquals(jacobian_pose, jacobian_numerical_pose) self.gtsamAssertEquals(jacobian_point, jacobian_numerical_point) @@ -162,7 +109,8 @@ class TestPose3(GtsamTestCase): jacobian_pose = np.zeros((3, 6), order='F') jacobian_point = np.zeros((3, 3), order='F') pose.transformFrom(point, jacobian_pose, jacobian_point) - jacobian_numerical_pose, jacobian_numerical_point = numerical_derivative_pose_point(pose, point, 'transformFrom') + jacobian_numerical_pose = numericalDerivative21(Pose3.transformFrom, pose, point) + jacobian_numerical_point = numericalDerivative22(Pose3.transformFrom, pose, point) self.gtsamAssertEquals(jacobian_pose, jacobian_numerical_pose) self.gtsamAssertEquals(jacobian_point, jacobian_numerical_point) diff --git a/python/gtsam/tests/test_numerical_derivative.py b/python/gtsam/tests/test_numerical_derivative.py new file mode 100644 index 000000000..47d259585 --- /dev/null +++ b/python/gtsam/tests/test_numerical_derivative.py @@ -0,0 +1,125 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Unit tests for IMU numerical_derivative module. +Author: Frank Dellaert & Joel Truher +""" +# pylint: disable=invalid-name, no-name-in-module + +import unittest +import numpy as np + +from gtsam import Pose3, Rot3, Point3 +from gtsam.utils.numerical_derivative import numericalDerivative11, numericalDerivative21, numericalDerivative22, numericalDerivative33 + + +class TestNumericalDerivatives(unittest.TestCase): + def test_numericalDerivative11_scalar(self): + # Test function of one variable + def h(x): + return x ** 2 + + x = np.array([3.0]) + # Analytical derivative: dh/dx = 2x + analytical_derivative = np.array([[2.0 * x[0]]]) + + # Compute numerical derivative + numerical_derivative = numericalDerivative11(h, x) + + # Check if numerical derivative is close to analytical derivative + np.testing.assert_allclose( + numerical_derivative, analytical_derivative, rtol=1e-5 + ) + + def test_numericalDerivative11_vector(self): + # Test function of one vector variable + def h(x): + return x ** 2 + + x = np.array([1.0, 2.0, 3.0]) + # Analytical derivative: dh/dx = 2x + analytical_derivative = np.diag(2.0 * x) + + numerical_derivative = numericalDerivative11(h, x) + + np.testing.assert_allclose( + numerical_derivative, analytical_derivative, rtol=1e-5 + ) + + def test_numericalDerivative21(self): + # Test function of two variables, derivative with respect to first variable + def h(x1, x2): + return x1 * np.sin(x2) + + x1 = np.array([2.0]) + x2 = np.array([np.pi / 4]) + # Analytical derivative: dh/dx1 = sin(x2) + analytical_derivative = np.array([[np.sin(x2[0])]]) + + numerical_derivative = numericalDerivative21(h, x1, x2) + + np.testing.assert_allclose( + numerical_derivative, analytical_derivative, rtol=1e-5 + ) + + def test_numericalDerivative22(self): + # Test function of two variables, derivative with respect to second variable + def h(x1, x2): + return x1 * np.sin(x2) + + x1 = np.array([2.0]) + x2 = np.array([np.pi / 4]) + # Analytical derivative: dh/dx2 = x1 * cos(x2) + analytical_derivative = np.array([[x1[0] * np.cos(x2[0])]]) + + numerical_derivative = numericalDerivative22(h, x1, x2) + + np.testing.assert_allclose( + numerical_derivative, analytical_derivative, rtol=1e-5 + ) + + def test_numericalDerivative33(self): + # Test function of three variables, derivative with respect to third variable + def h(x1, x2, x3): + return x1 * x2 + np.exp(x3) + + x1 = np.array([1.0]) + x2 = np.array([2.0]) + x3 = np.array([0.5]) + # Analytical derivative: dh/dx3 = exp(x3) + analytical_derivative = np.array([[np.exp(x3[0])]]) + + numerical_derivative = numericalDerivative33(h, x1, x2, x3) + + np.testing.assert_allclose( + numerical_derivative, analytical_derivative, rtol=1e-5 + ) + + def test_numericalDerivative_with_pose(self): + # Test function with manifold and vector inputs + + def h(pose:Pose3, point:np.ndarray): + return pose.transformFrom(point) + + # Values from testPose3.cpp + P = Point3(0.2,0.7,-2) + R = Rot3.Rodrigues(0.3,0,0) + P2 = Point3(3.5,-8.2,4.2) + T = Pose3(R,P2) + + analytic_H1 = np.zeros((3,6), order='F', dtype=float) + analytic_H2 = np.zeros((3,3), order='F', dtype=float) + y = T.transformFrom(P, analytic_H1, analytic_H2) + + numerical_H1 = numericalDerivative21(h, T, P) + numerical_H2 = numericalDerivative22(h, T, P) + + np.testing.assert_allclose(numerical_H1, analytic_H1, rtol=1e-5) + np.testing.assert_allclose(numerical_H2, analytic_H2, rtol=1e-5) + +if __name__ == "__main__": + unittest.main() \ No newline at end of file diff --git a/python/gtsam/utils/numerical_derivative.py b/python/gtsam/utils/numerical_derivative.py new file mode 100644 index 000000000..3b52f5a5f --- /dev/null +++ b/python/gtsam/utils/numerical_derivative.py @@ -0,0 +1,228 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Numerical derivative functions. +Author: Joel Truher & Frank Dellaert +""" + +# pylint: disable=C0103,C0114,C0116,E0611,R0913 +# mypy: disable-error-code="import-untyped" +# see numericalDerivative.h + +# pybind wants to wrap concrete types, which would have been +# a whole lot of them, so i reimplemented the part of this that +# I needed, using the python approach to "generic" typing. + +from typing import Callable, TypeVar +import numpy as np + +Y = TypeVar("Y") +X = TypeVar("X") +X1 = TypeVar("X1") +X2 = TypeVar("X2") +X3 = TypeVar("X3") +X4 = TypeVar("X4") +X5 = TypeVar("X5") +X6 = TypeVar("X6") + + +def local(a: Y, b: Y) -> np.ndarray: + if type(a) is not type(b): + raise TypeError(f"a {type(a)} b {type(b)}") + if isinstance(a, np.ndarray): + return b - a + if isinstance(a, (float, int)): + return np.ndarray([[b - a]]) # type:ignore + # there is no common superclass for Y + return a.localCoordinates(b) # type:ignore + + +def retract(a, xi: np.ndarray): + if isinstance(a, (np.ndarray, float, int)): + return a + xi + return a.retract(xi) + + +def numericalDerivative11(h: Callable[[X], Y], x: X, delta=1e-5) -> np.ndarray: + hx: Y = h(x) + zeroY = local(hx, hx) + m = zeroY.shape[0] + zeroX = local(x, x) + N = zeroX.shape[0] + dx = np.zeros(N) + H = np.zeros((m, N)) + factor: float = 1.0 / (2.0 * delta) + for j in range(N): + dx[j] = delta + dy1 = local(hx, h(retract(x, dx))) + dx[j] = -delta + dy2 = local(hx, h(retract(x, dx))) + dx[j] = 0 + H[:, j] = (dy1 - dy2) * factor + return H + + +def numericalDerivative21( + h: Callable[[X1, X2], Y], x1: X1, x2: X2, delta=1e-5 +) -> np.ndarray: + return numericalDerivative11(lambda x: h(x, x2), x1, delta) + + +def numericalDerivative22( + h: Callable[[X1, X2], Y], x1: X1, x2: X2, delta=1e-5 +) -> np.ndarray: + return numericalDerivative11(lambda x: h(x1, x), x2, delta) + + +def numericalDerivative31( + h: Callable[[X1, X2, X3], Y], x1: X1, x2: X2, x3: X3, delta=1e-5 +) -> np.ndarray: + return numericalDerivative11(lambda x: h(x, x2, x3), x1, delta) + + +def numericalDerivative32( + h: Callable[[X1, X2, X3], Y], x1: X1, x2: X2, x3: X3, delta=1e-5 +) -> np.ndarray: + return numericalDerivative11(lambda x: h(x1, x, x3), x2, delta) + + +def numericalDerivative33( + h: Callable[[X1, X2, X3], Y], x1: X1, x2: X2, x3: X3, delta=1e-5 +) -> np.ndarray: + return numericalDerivative11(lambda x: h(x1, x2, x), x3, delta) + + +def numericalDerivative41( + h: Callable[[X1, X2, X3, X4], Y], x1: X1, x2: X2, x3: X3, x4: X4, delta=1e-5 +) -> np.ndarray: + return numericalDerivative11(lambda x: h(x, x2, x3, x4), x1, delta) + + +def numericalDerivative42( + h: Callable[[X1, X2, X3, X4], Y], x1: X1, x2: X2, x3: X3, x4: X4, delta=1e-5 +) -> np.ndarray: + return numericalDerivative11(lambda x: h(x1, x, x3, x4), x2, delta) + + +def numericalDerivative43( + h: Callable[[X1, X2, X3, X4], Y], x1: X1, x2: X2, x3: X3, x4: X4, delta=1e-5 +) -> np.ndarray: + return numericalDerivative11(lambda x: h(x1, x2, x, x4), x3, delta) + + +def numericalDerivative44( + h: Callable[[X1, X2, X3, X4], Y], x1: X1, x2: X2, x3: X3, x4: X4, delta=1e-5 +) -> np.ndarray: + return numericalDerivative11(lambda x: h(x1, x2, x3, x), x4, delta) + + +def numericalDerivative51( + h: Callable[[X1, X2, X3, X4, X5], Y], x1: X1, x2: X2, x3: X3, x4: X4, x5: X5, delta=1e-5 +) -> np.ndarray: + return numericalDerivative11(lambda x: h(x, x2, x3, x4, x5), x1, delta) + + +def numericalDerivative52( + h: Callable[[X1, X2, X3, X4, X5], Y], x1: X1, x2: X2, x3: X3, x4: X4, x5: X5, delta=1e-5 +) -> np.ndarray: + return numericalDerivative11(lambda x: h(x1, x, x3, x4, x5), x2, delta) + + +def numericalDerivative53( + h: Callable[[X1, X2, X3, X4, X5], Y], x1: X1, x2: X2, x3: X3, x4: X4, x5: X5, delta=1e-5 +) -> np.ndarray: + return numericalDerivative11(lambda x: h(x1, x2, x, x4, x5), x3, delta) + + +def numericalDerivative54( + h: Callable[[X1, X2, X3, X4, X5], Y], x1: X1, x2: X2, x3: X3, x4: X4, x5: X5, delta=1e-5 +) -> np.ndarray: + return numericalDerivative11(lambda x: h(x1, x2, x3, x, x5), x4, delta) + + +def numericalDerivative55( + h: Callable[[X1, X2, X3, X4, X5], Y], x1: X1, x2: X2, x3: X3, x4: X4, x5: X5, delta=1e-5 +) -> np.ndarray: + return numericalDerivative11(lambda x: h(x1, x2, x3, x4, x), x5, delta) + + +def numericalDerivative61( + h: Callable[[X1, X2, X3, X4, X5, X6], Y], + x1: X1, + x2: X2, + x3: X3, + x4: X4, + x5: X5, + x6: X6, + delta=1e-5, +) -> np.ndarray: + return numericalDerivative11(lambda x: h(x, x2, x3, x4, x5, x6), x1, delta) + + +def numericalDerivative62( + h: Callable[[X1, X2, X3, X4, X5, X6], Y], + x1: X1, + x2: X2, + x3: X3, + x4: X4, + x5: X5, + x6: X6, + delta=1e-5, +) -> np.ndarray: + return numericalDerivative11(lambda x: h(x1, x, x3, x4, x5, x6), x2, delta) + + +def numericalDerivative63( + h: Callable[[X1, X2, X3, X4, X5, X6], Y], + x1: X1, + x2: X2, + x3: X3, + x4: X4, + x5: X5, + x6: X6, + delta=1e-5, +) -> np.ndarray: + return numericalDerivative11(lambda x: h(x1, x2, x, x4, x5, x6), x3, delta) + + +def numericalDerivative64( + h: Callable[[X1, X2, X3, X4, X5, X6], Y], + x1: X1, + x2: X2, + x3: X3, + x4: X4, + x5: X5, + x6: X6, + delta=1e-5, +) -> np.ndarray: + return numericalDerivative11(lambda x: h(x1, x2, x3, x, x5, x6), x4, delta) + + +def numericalDerivative65( + h: Callable[[X1, X2, X3, X4, X5, X6], Y], + x1: X1, + x2: X2, + x3: X3, + x4: X4, + x5: X5, + x6: X6, + delta=1e-5, +) -> np.ndarray: + return numericalDerivative11(lambda x: h(x1, x2, x3, x4, x, x6), x5, delta) + + +def numericalDerivative66( + h: Callable[[X1, X2, X3, X4, X5, X6], Y], + x1: X1, + x2: X2, + x3: X3, + x4: X4, + x5: X5, + x6: X6, + delta=1e-5, +) -> np.ndarray: + return numericalDerivative11(lambda x: h(x1, x2, x3, x4, x5, x), x6, delta) diff --git a/python/gtsam_unstable/examples/LocalizationExample.py b/python/gtsam_unstable/examples/LocalizationExample.py new file mode 100644 index 000000000..ad8266d6d --- /dev/null +++ b/python/gtsam_unstable/examples/LocalizationExample.py @@ -0,0 +1,68 @@ +""" +A simple 2D pose slam example with "GPS" measurements + - The robot moves forward 2 meter each iteration + - The robot initially faces along the X axis (horizontal, to the right in 2D) + - We have full odometry between pose + - We have "GPS-like" measurements implemented with a custom factor +""" +import numpy as np + +import gtsam +from gtsam import BetweenFactorPose2, Pose2, noiseModel +from gtsam_unstable import PartialPriorFactorPose2 + + +def main(): + # 1. Create a factor graph container and add factors to it. + graph = gtsam.NonlinearFactorGraph() + + # 2a. Add odometry factors + # For simplicity, we will use the same noise model for each odometry factor + odometryNoise = noiseModel.Diagonal.Sigmas(np.asarray([0.2, 0.2, 0.1])) + + # Create odometry (Between) factors between consecutive poses + graph.push_back( + BetweenFactorPose2(1, 2, Pose2(2.0, 0.0, 0.0), odometryNoise)) + graph.push_back( + BetweenFactorPose2(2, 3, Pose2(2.0, 0.0, 0.0), odometryNoise)) + + # 2b. Add "GPS-like" measurements + # We will use PartialPrior factor for this. + unaryNoise = noiseModel.Diagonal.Sigmas(np.array([0.1, + 0.1])) # 10cm std on x,y + + graph.push_back( + PartialPriorFactorPose2(1, [0, 1], np.asarray([0.0, 0.0]), unaryNoise)) + graph.push_back( + PartialPriorFactorPose2(2, [0, 1], np.asarray([2.0, 0.0]), unaryNoise)) + graph.push_back( + PartialPriorFactorPose2(3, [0, 1], np.asarray([4.0, 0.0]), unaryNoise)) + graph.print("\nFactor Graph:\n") + + # 3. Create the data structure to hold the initialEstimate estimate to the solution + # For illustrative purposes, these have been deliberately set to incorrect values + initialEstimate = gtsam.Values() + initialEstimate.insert(1, Pose2(0.5, 0.0, 0.2)) + initialEstimate.insert(2, Pose2(2.3, 0.1, -0.2)) + initialEstimate.insert(3, Pose2(4.1, 0.1, 0.1)) + initialEstimate.print("\nInitial Estimate:\n") + + # 4. Optimize using Levenberg-Marquardt optimization. The optimizer + # accepts an optional set of configuration parameters, controlling + # things like convergence criteria, the type of linear system solver + # to use, and the amount of information displayed during optimization. + # Here we will use the default set of parameters. See the + # documentation for the full set of parameters. + optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate) + result = optimizer.optimize() + result.print("Final Result:\n") + + # 5. Calculate and print marginal covariances for all variables + marginals = gtsam.Marginals(graph, result) + print("x1 covariance:\n", marginals.marginalCovariance(1)) + print("x2 covariance:\n", marginals.marginalCovariance(2)) + print("x3 covariance:\n", marginals.marginalCovariance(3)) + + +if __name__ == "__main__": + main() diff --git a/python/gtsam_unstable/gtsam_unstable.tpl b/python/gtsam_unstable/gtsam_unstable.tpl index b98872c46..006ca7fc8 100644 --- a/python/gtsam_unstable/gtsam_unstable.tpl +++ b/python/gtsam_unstable/gtsam_unstable.tpl @@ -9,6 +9,7 @@ #include #include +#include #include #include #include diff --git a/python/setup.py.in b/python/setup.py.in index 824a6656e..b9d7392c7 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -13,6 +13,7 @@ package_data = { "./*.so", "./*.dll", "./*.pyd", + "*.pyi", "**/*.pyi", # Add the type hints ] } diff --git a/tests/testDoglegOptimizer.cpp b/tests/testDoglegOptimizer.cpp index e64e5ab7a..6fbc522d3 100644 --- a/tests/testDoglegOptimizer.cpp +++ b/tests/testDoglegOptimizer.cpp @@ -24,10 +24,9 @@ #include #include #include -#include -#include -#include -#include +#include +#include +#include "examples/SFMdata.h" #include @@ -36,7 +35,6 @@ using namespace gtsam; // Convenience for named keys using symbol_shorthand::X; -using symbol_shorthand::L; /* ************************************************************************* */ TEST(DoglegOptimizer, ComputeBlend) { @@ -185,6 +183,128 @@ TEST(DoglegOptimizer, Constraint) { #endif } +/* ************************************************************************* */ +/** + * Test created to fix issue in ISAM2 when using the DogLegOptimizer. + * Originally reported by kvmanohar22 in issue #301 + * https://github.com/borglab/gtsam/issues/301 + * + * This test is based on a script provided by kvmanohar22 + * to help reproduce the issue. + */ +TEST(DogLegOptimizer, VariableUpdate) { + // Make the typename short so it looks much cleaner + typedef SmartProjectionPoseFactor SmartFactor; + + // create a typedef to the camera type + typedef PinholePose Camera; + // Define the camera calibration parameters + Cal3_S2::shared_ptr K(new Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)); + + // Define the camera observation noise model + noiseModel::Isotropic::shared_ptr measurementNoise = + noiseModel::Isotropic::Sigma(2, 1.0); // one pixel in u and v + + // Create the set of ground-truth landmarks and poses + vector points = createPoints(); + vector poses = createPoses(); + + // Create a factor graph + NonlinearFactorGraph graph; + + ISAM2DoglegParams doglegparams = ISAM2DoglegParams(); + doglegparams.verbose = false; + ISAM2Params isam2_params; + isam2_params.evaluateNonlinearError = true; + isam2_params.relinearizeThreshold = 0.0; + isam2_params.enableRelinearization = true; + isam2_params.optimizationParams = doglegparams; + isam2_params.relinearizeSkip = 1; + ISAM2 isam2(isam2_params); + + // Simulated measurements from each camera pose, adding them to the factor + // graph + unordered_map smart_factors; + for (size_t j = 0; j < points.size(); ++j) { + // every landmark represent a single landmark, we use shared pointer to init + // the factor, and then insert measurements. + SmartFactor::shared_ptr smartfactor(new SmartFactor(measurementNoise, K)); + + for (size_t i = 0; i < poses.size(); ++i) { + // generate the 2D measurement + Camera camera(poses[i], K); + Point2 measurement = camera.project(points[j]); + + // call add() function to add measurement into a single factor, here we + // need to add: + // 1. the 2D measurement + // 2. the corresponding camera's key + // 3. camera noise model + // 4. camera calibration + + // add only first 3 measurements and update the later measurements + // incrementally + if (i < 3) smartfactor->add(measurement, i); + } + + // insert the smart factor in the graph + smart_factors[j] = smartfactor; + graph.push_back(smartfactor); + } + + // Add a prior on pose x0. This indirectly specifies where the origin is. + // 30cm std on x,y,z 0.1 rad on roll,pitch,yaw + noiseModel::Diagonal::shared_ptr noise = noiseModel::Diagonal::Sigmas( + (Vector(6) << Vector3::Constant(0.3), Vector3::Constant(0.1)).finished()); + graph.emplace_shared >(0, poses[0], noise); + + // Because the structure-from-motion problem has a scale ambiguity, the + // problem is still under-constrained. Here we add a prior on the second pose + // x1, so this will fix the scale by indicating the distance between x0 and + // x1. Because these two are fixed, the rest of the poses will be also be + // fixed. + graph.emplace_shared >(1, poses[1], + noise); // add directly to graph + + // Create the initial estimate to the solution + // Intentionally initialize the variables off from the ground truth + Values initialEstimate; + Pose3 delta(Rot3::Rodrigues(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20)); + for (size_t i = 0; i < 3; ++i) + initialEstimate.insert(i, poses[i].compose(delta)); + // initialEstimate.print("Initial Estimates:\n"); + + // Optimize the graph and print results + isam2.update(graph, initialEstimate); + Values result = isam2.calculateEstimate(); + // result.print("Results:\n"); + + // we add new measurements from this pose + size_t pose_idx = 3; + + // Now update existing smart factors with new observations + for (size_t j = 0; j < points.size(); ++j) { + SmartFactor::shared_ptr smartfactor = smart_factors[j]; + + // add the 4th measurement + Camera camera(poses[pose_idx], K); + Point2 measurement = camera.project(points[j]); + smartfactor->add(measurement, pose_idx); + } + + graph.resize(0); + initialEstimate.clear(); + + // update initial estimate for the new pose + initialEstimate.insert(pose_idx, poses[pose_idx].compose(delta)); + + // this should break the system + isam2.update(graph, initialEstimate); + result = isam2.calculateEstimate(); + EXPECT(std::find(result.keys().begin(), result.keys().end(), pose_idx) != + result.keys().end()); +} + /* ************************************************************************* */ int main() { TestResult tr; return TestRegistry::runAllTests(tr); } /* ************************************************************************* */ diff --git a/tests/testGaussianISAM2.cpp b/tests/testGaussianISAM2.cpp index 1ea60dac9..721ed51c0 100644 --- a/tests/testGaussianISAM2.cpp +++ b/tests/testGaussianISAM2.cpp @@ -994,6 +994,56 @@ TEST(ISAM2, calculate_nnz) EXPECT_LONGS_EQUAL(expected, actual); } +class FixActiveFactor : public NoiseModelFactorN { + using Base = NoiseModelFactorN; + bool is_active_; + +public: + FixActiveFactor(const gtsam::Key& key, const bool active) + : Base(nullptr, key), is_active_(active) {} + + virtual bool active(const gtsam::Values &values) const override { + return is_active_; + } + + virtual Vector + evaluateError(const Vector2& x, + Base::OptionalMatrixTypeT H = nullptr) const override { + if (H) { + *H = Vector2::Identity(); + } + return Vector2::Zero(); + } +}; + +TEST(ActiveFactorTesting, Issue1596) { + // Issue1596: When a derived Nonlinear Factor is not active, the linearization returns a nullptr (NonlinearFactor.cpp:156), which + // causes an error when `EliminateSymbolic` is called (SymbolicFactor-inst.h:45) due to not checking if the factor is nullptr. + const gtsam::Key key{Symbol('x', 0)}; + + ISAM2 isam; + Values values; + NonlinearFactorGraph graph; + + // Insert an active factor + values.insert(key, Vector2::Zero()); + graph.emplace_shared(key, true); + + // No problem here + isam.update(graph, values); + + graph = NonlinearFactorGraph(); + + // Inserting a factor that is never active + graph.emplace_shared(key, false); + + // This call throws the error if the pointer is not validated on (SymbolicFactor-inst.h:45) + isam.update(graph); + + // If the bug is fixed, this line is reached. + EXPECT(isam.getFactorsUnsafe().size() == 2); +} + /* ************************************************************************* */ int main() { TestResult tr; return TestRegistry::runAllTests(tr);} /* ************************************************************************* */ diff --git a/tests/testTranslationRecovery.cpp b/tests/testTranslationRecovery.cpp index 15f1caa1b..6e9c3ed24 100644 --- a/tests/testTranslationRecovery.cpp +++ b/tests/testTranslationRecovery.cpp @@ -353,6 +353,340 @@ TEST(TranslationRecovery, NodeWithBetweenFactorAndNoMeasurements) { EXPECT(assert_equal(Point3(1, 2, 1), result.at(4), 1e-4)); } +/* ************************************************************************* +* Repeat all tests, but with the Bilinear Angle Translation Factor. +* ************************************************************************* */ + + +/* ************************************************************************* */ +// We read the BAL file, which has 3 cameras in it, with poses. We then assume +// the rotations are correct, but translations have to be estimated from +// translation directions only. Since we have 3 cameras, A, B, and C, we can at +// most create three relative measurements, let's call them w_aZb, w_aZc, and +// bZc. These will be of type Unit3. We then call `recoverTranslations` which +// sets up an optimization problem for the three unknown translations. +TEST(TranslationRecovery, BALBATA) { + const string filename = findExampleDataFile("dubrovnik-3-7-pre"); + SfmData db = SfmData::FromBalFile(filename); + + // Get camera poses, as Values + size_t j = 0; + Values poses; + for (auto camera : db.cameras) { + poses.insert(j++, camera.pose()); + } + + // Simulate measurements + const auto relativeTranslations = TranslationRecovery::SimulateMeasurements( + poses, {{0, 1}, {0, 2}, {1, 2}}); + + // Check simulated measurements. + for (auto& unitTranslation : relativeTranslations) { + EXPECT(assert_equal(GetDirectionFromPoses(poses, unitTranslation), + unitTranslation.measured())); + } + + LevenbergMarquardtParams params; + TranslationRecovery algorithm(params, true); + const auto graph = algorithm.buildGraph(relativeTranslations); + EXPECT_LONGS_EQUAL(3, graph.size()); + + // Run translation recovery + const double scale = 2.0; + const auto result = algorithm.run(relativeTranslations, scale); + + // Check result for first two translations, determined by prior + EXPECT(assert_equal(Point3(0, 0, 0), result.at(0))); + EXPECT(assert_equal( + Point3(2 * GetDirectionFromPoses(poses, relativeTranslations[0])), + result.at(1))); + + // Check that the third translations is correct + Point3 Ta = poses.at(0).translation(); + Point3 Tb = poses.at(1).translation(); + Point3 Tc = poses.at(2).translation(); + Point3 expected = (Tc - Ta) * (scale / (Tb - Ta).norm()); + EXPECT(assert_equal(expected, result.at(2), 1e-4)); + + // TODO(frank): how to get stats back? + // EXPECT_DOUBLES_EQUAL(0.0199833, actualError, 1e-5); +} + +TEST(TranslationRecovery, TwoPoseTestBATA) { + // Create a dataset with 2 poses. + // __ __ + // \/ \/ + // 0 _____ 1 + // + // 0 and 1 face in the same direction but have a translation offset. + Values poses; + poses.insert(0, Pose3(Rot3(), Point3(0, 0, 0))); + poses.insert(1, Pose3(Rot3(), Point3(2, 0, 0))); + + auto relativeTranslations = + TranslationRecovery::SimulateMeasurements(poses, {{0, 1}}); + + // Check simulated measurements. + for (auto& unitTranslation : relativeTranslations) { + EXPECT(assert_equal(GetDirectionFromPoses(poses, unitTranslation), + unitTranslation.measured())); + } + + LevenbergMarquardtParams params; + TranslationRecovery algorithm(params, true); + const auto graph = algorithm.buildGraph(relativeTranslations); + EXPECT_LONGS_EQUAL(1, graph.size()); + + // Run translation recovery + const auto result = algorithm.run(relativeTranslations, /*scale=*/3.0); + + // Check result for first two translations, determined by prior + EXPECT(assert_equal(Point3(0, 0, 0), result.at(0), 1e-8)); + EXPECT(assert_equal(Point3(3, 0, 0), result.at(1), 1e-8)); +} + +TEST(TranslationRecovery, ThreePoseTestBATA) { + // Create a dataset with 3 poses. + // __ __ + // \/ \/ + // 0 _____ 1 + // \ __ / + // \\// + // 3 + // + // 0 and 1 face in the same direction but have a translation offset. 3 is in + // the same direction as 0 and 1, in between 0 and 1, with some Y axis offset. + + Values poses; + poses.insert(0, Pose3(Rot3(), Point3(0, 0, 0))); + poses.insert(1, Pose3(Rot3(), Point3(2, 0, 0))); + poses.insert(3, Pose3(Rot3(), Point3(1, -1, 0))); + + auto relativeTranslations = TranslationRecovery::SimulateMeasurements( + poses, {{0, 1}, {1, 3}, {3, 0}}); + + // Check simulated measurements. + for (auto& unitTranslation : relativeTranslations) { + EXPECT(assert_equal(GetDirectionFromPoses(poses, unitTranslation), + unitTranslation.measured())); + } + + LevenbergMarquardtParams params; + TranslationRecovery algorithm(params, true); + const auto graph = algorithm.buildGraph(relativeTranslations); + EXPECT_LONGS_EQUAL(3, graph.size()); + + const auto result = algorithm.run(relativeTranslations, /*scale=*/3.0); + + // Check result + EXPECT(assert_equal(Point3(0, 0, 0), result.at(0), 1e-8)); + EXPECT(assert_equal(Point3(3, 0, 0), result.at(1), 1e-8)); + EXPECT(assert_equal(Point3(1.5, -1.5, 0), result.at(3), 1e-8)); +} + +TEST(TranslationRecovery, ThreePosesIncludingZeroTranslationBATA) { + // Create a dataset with 3 poses. + // __ __ + // \/ \/ + // 0 _____ 1 + // 2 <| + // + // 0 and 1 face in the same direction but have a translation offset. 2 is at + // the same point as 1 but is rotated, with little FOV overlap. + Values poses; + poses.insert(0, Pose3(Rot3(), Point3(0, 0, 0))); + poses.insert(1, Pose3(Rot3(), Point3(2, 0, 0))); + poses.insert(2, Pose3(Rot3::RzRyRx(-M_PI / 2, 0, 0), Point3(2, 0, 0))); + + auto relativeTranslations = + TranslationRecovery::SimulateMeasurements(poses, {{0, 1}, {1, 2}}); + + // Check simulated measurements. + for (auto& unitTranslation : relativeTranslations) { + EXPECT(assert_equal(GetDirectionFromPoses(poses, unitTranslation), + unitTranslation.measured())); + } + + LevenbergMarquardtParams params; + TranslationRecovery algorithm(params, true); + // Run translation recovery + const auto result = algorithm.run(relativeTranslations, /*scale=*/3.0); + + // Check result + EXPECT(assert_equal(Point3(0, 0, 0), result.at(0), 1e-8)); + EXPECT(assert_equal(Point3(3, 0, 0), result.at(1), 1e-8)); + EXPECT(assert_equal(Point3(3, 0, 0), result.at(2), 1e-8)); +} + +TEST(TranslationRecovery, FourPosesIncludingZeroTranslationBATA) { + // Create a dataset with 4 poses. + // __ __ + // \/ \/ + // 0 _____ 1 + // \ __ 2 <| + // \\// + // 3 + // + // 0 and 1 face in the same direction but have a translation offset. 2 is at + // the same point as 1 but is rotated, with very little FOV overlap. 3 is in + // the same direction as 0 and 1, in between 0 and 1, with some Y axis offset. + + Values poses; + poses.insert(0, Pose3(Rot3(), Point3(0, 0, 0))); + poses.insert(1, Pose3(Rot3(), Point3(2, 0, 0))); + poses.insert(2, Pose3(Rot3::RzRyRx(-M_PI / 2, 0, 0), Point3(2, 0, 0))); + poses.insert(3, Pose3(Rot3(), Point3(1, -1, 0))); + + auto relativeTranslations = TranslationRecovery::SimulateMeasurements( + poses, {{0, 1}, {1, 2}, {1, 3}, {3, 0}}); + + // Check simulated measurements. + for (auto& unitTranslation : relativeTranslations) { + EXPECT(assert_equal(GetDirectionFromPoses(poses, unitTranslation), + unitTranslation.measured())); + } + + LevenbergMarquardtParams params; + TranslationRecovery algorithm(params, true); + + // Run translation recovery + const auto result = algorithm.run(relativeTranslations, /*scale=*/4.0); + + // Check result + EXPECT(assert_equal(Point3(0, 0, 0), result.at(0), 1e-8)); + EXPECT(assert_equal(Point3(4, 0, 0), result.at(1), 1e-8)); + EXPECT(assert_equal(Point3(4, 0, 0), result.at(2), 1e-8)); + EXPECT(assert_equal(Point3(2, -2, 0), result.at(3), 1e-8)); +} + +TEST(TranslationRecovery, ThreePosesWithZeroTranslationBATA) { + Values poses; + poses.insert(0, Pose3(Rot3::RzRyRx(-M_PI / 6, 0, 0), Point3(0, 0, 0))); + poses.insert(1, Pose3(Rot3(), Point3(0, 0, 0))); + poses.insert(2, Pose3(Rot3::RzRyRx(M_PI / 6, 0, 0), Point3(0, 0, 0))); + + auto relativeTranslations = TranslationRecovery::SimulateMeasurements( + poses, {{0, 1}, {1, 2}, {2, 0}}); + + // Check simulated measurements. + for (auto& unitTranslation : relativeTranslations) { + EXPECT(assert_equal(GetDirectionFromPoses(poses, unitTranslation), + unitTranslation.measured())); + } + + LevenbergMarquardtParams params; + TranslationRecovery algorithm(params, true); + // Run translation recovery + const auto result = algorithm.run(relativeTranslations, /*scale=*/4.0); + + // Check result + EXPECT(assert_equal(Point3(0, 0, 0), result.at(0), 1e-8)); + EXPECT(assert_equal(Point3(0, 0, 0), result.at(1), 1e-8)); + EXPECT(assert_equal(Point3(0, 0, 0), result.at(2), 1e-8)); +} + +TEST(TranslationRecovery, ThreePosesWithOneSoftConstraintBATA) { + // Create a dataset with 3 poses. + // __ __ + // \/ \/ + // 0 _____ 1 + // \ __ / + // \\// + // 3 + // + // 0 and 1 face in the same direction but have a translation offset. 3 is in + // the same direction as 0 and 1, in between 0 and 1, with some Y axis offset. + + Values poses; + poses.insert(0, Pose3(Rot3(), Point3(0, 0, 0))); + poses.insert(1, Pose3(Rot3(), Point3(2, 0, 0))); + poses.insert(3, Pose3(Rot3(), Point3(1, -1, 0))); + + auto relativeTranslations = TranslationRecovery::SimulateMeasurements( + poses, {{0, 1}, {0, 3}, {1, 3}}); + + std::vector> betweenTranslations; + betweenTranslations.emplace_back(0, 3, Point3(1, -1, 0), + noiseModel::Isotropic::Sigma(3, 1e-2)); + + LevenbergMarquardtParams params; + TranslationRecovery algorithm(params, true); + auto result = + algorithm.run(relativeTranslations, /*scale=*/0.0, betweenTranslations); + + // Check result + EXPECT(assert_equal(Point3(0, 0, 0), result.at(0), 1e-4)); + EXPECT(assert_equal(Point3(2, 0, 0), result.at(1), 1e-4)); + EXPECT(assert_equal(Point3(1, -1, 0), result.at(3), 1e-4)); +} + +TEST(TranslationRecovery, ThreePosesWithOneHardConstraintBATA) { + // Create a dataset with 3 poses. + // __ __ + // \/ \/ + // 0 _____ 1 + // \ __ / + // \\// + // 3 + // + // 0 and 1 face in the same direction but have a translation offset. 3 is in + // the same direction as 0 and 1, in between 0 and 1, with some Y axis offset. + + Values poses; + poses.insert(0, Pose3(Rot3(), Point3(0, 0, 0))); + poses.insert(1, Pose3(Rot3(), Point3(2, 0, 0))); + poses.insert(3, Pose3(Rot3(), Point3(1, -1, 0))); + + auto relativeTranslations = TranslationRecovery::SimulateMeasurements( + poses, {{0, 1}, {0, 3}, {1, 3}}); + + std::vector> betweenTranslations; + betweenTranslations.emplace_back(0, 1, Point3(2, 0, 0), + noiseModel::Constrained::All(3, 1e2)); + + LevenbergMarquardtParams params; + TranslationRecovery algorithm(params, true); + auto result = + algorithm.run(relativeTranslations, /*scale=*/0.0, betweenTranslations); + + // Check result + EXPECT(assert_equal(Point3(0, 0, 0), result.at(0), 1e-4)); + EXPECT(assert_equal(Point3(2, 0, 0), result.at(1), 1e-4)); + EXPECT(assert_equal(Point3(1, -1, 0), result.at(3), 1e-4)); +} + +TEST(TranslationRecovery, NodeWithBetweenFactorAndNoMeasurementsBATA) { + // Checks that valid results are obtained when a between translation edge is + // provided with a node that does not have any other relative translations. + Values poses; + poses.insert(0, Pose3(Rot3(), Point3(0, 0, 0))); + poses.insert(1, Pose3(Rot3(), Point3(2, 0, 0))); + poses.insert(3, Pose3(Rot3(), Point3(1, -1, 0))); + poses.insert(4, Pose3(Rot3(), Point3(1, 2, 1))); + + auto relativeTranslations = TranslationRecovery::SimulateMeasurements( + poses, {{0, 1}, {0, 3}, {1, 3}}); + + std::vector> betweenTranslations; + betweenTranslations.emplace_back(0, 1, Point3(2, 0, 0), + noiseModel::Constrained::All(3, 1e2)); + // Node 4 only has this between translation prior, no relative translations. + betweenTranslations.emplace_back(0, 4, Point3(1, 2, 1)); + + LevenbergMarquardtParams params; + TranslationRecovery algorithm(params, true); + auto result = + algorithm.run(relativeTranslations, /*scale=*/0.0, betweenTranslations); + + // Check result + EXPECT(assert_equal(Point3(0, 0, 0), result.at(0), 1e-4)); + EXPECT(assert_equal(Point3(2, 0, 0), result.at(1), 1e-4)); + EXPECT(assert_equal(Point3(1, -1, 0), result.at(3), 1e-4)); + EXPECT(assert_equal(Point3(1, 2, 1), result.at(4), 1e-4)); +} + + + /* ************************************************************************* */ int main() { TestResult tr;